]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/page-writeback.c
mm: add bdi_get_min_bytes() function
[thirdparty/linux.git] / mm / page-writeback.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
f30c2269 3 * mm/page-writeback.c
1da177e4
LT
4 *
5 * Copyright (C) 2002, Linus Torvalds.
90eec103 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
1da177e4
LT
7 *
8 * Contains functions related to writing back dirty pages at the
9 * address_space level.
10 *
e1f8e874 11 * 10Apr2002 Andrew Morton
1da177e4
LT
12 * Initial version
13 */
14
15#include <linux/kernel.h>
1bf27e98 16#include <linux/math64.h>
b95f1b31 17#include <linux/export.h>
1da177e4
LT
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/slab.h>
23#include <linux/pagemap.h>
24#include <linux/writeback.h>
25#include <linux/init.h>
26#include <linux/backing-dev.h>
55e829af 27#include <linux/task_io_accounting_ops.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/mpage.h>
d08b3851 30#include <linux/rmap.h>
1da177e4 31#include <linux/percpu.h>
1da177e4
LT
32#include <linux/smp.h>
33#include <linux/sysctl.h>
34#include <linux/cpu.h>
35#include <linux/syscalls.h>
811d736f 36#include <linux/pagevec.h>
eb608e3a 37#include <linux/timer.h>
8bd75c77 38#include <linux/sched/rt.h>
f361bf4a 39#include <linux/sched/signal.h>
6e543d57 40#include <linux/mm_inline.h>
028c2dd1 41#include <trace/events/writeback.h>
1da177e4 42
6e543d57
LD
43#include "internal.h"
44
ffd1f609
WF
45/*
46 * Sleep at most 200ms at a time in balance_dirty_pages().
47 */
48#define MAX_PAUSE max(HZ/5, 1)
49
5b9b3574
WF
50/*
51 * Try to keep balance_dirty_pages() call intervals higher than this many pages
52 * by raising pause time to max_pause when falls below it.
53 */
54#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
55
e98be2d5
WF
56/*
57 * Estimate write bandwidth at 200ms intervals.
58 */
59#define BANDWIDTH_INTERVAL max(HZ/5, 1)
60
6c14ae1e
WF
61#define RATELIMIT_CALC_SHIFT 10
62
1da177e4
LT
63/*
64 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
65 * will look to see if it needs to force writeback or throttling.
66 */
67static long ratelimit_pages = 32;
68
1da177e4
LT
69/* The following parameters are exported via /proc/sys/vm */
70
71/*
5b0830cb 72 * Start background writeback (via writeback threads) at this percentage
1da177e4 73 */
aa779e51 74static int dirty_background_ratio = 10;
1da177e4 75
2da02997
DR
76/*
77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
78 * dirty_background_ratio * the amount of dirtyable memory
79 */
aa779e51 80static unsigned long dirty_background_bytes;
2da02997 81
195cf453
BG
82/*
83 * free highmem will not be subtracted from the total free memory
84 * for calculating free ratios if vm_highmem_is_dirtyable is true
85 */
aa779e51 86static int vm_highmem_is_dirtyable;
195cf453 87
1da177e4
LT
88/*
89 * The generator of dirty data starts writeback at this percentage
90 */
aa779e51 91static int vm_dirty_ratio = 20;
1da177e4 92
2da02997
DR
93/*
94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
95 * vm_dirty_ratio * the amount of dirtyable memory
96 */
aa779e51 97static unsigned long vm_dirty_bytes;
2da02997 98
1da177e4 99/*
704503d8 100 * The interval between `kupdate'-style writebacks
1da177e4 101 */
22ef37ee 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1da177e4 103
91913a29
AB
104EXPORT_SYMBOL_GPL(dirty_writeback_interval);
105
1da177e4 106/*
704503d8 107 * The longest time for which data is allowed to remain dirty
1da177e4 108 */
22ef37ee 109unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1da177e4 110
1da177e4 111/*
ed5b43f1
BS
112 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
113 * a full sync is triggered after this time elapses without any disk activity.
1da177e4
LT
114 */
115int laptop_mode;
116
117EXPORT_SYMBOL(laptop_mode);
118
119/* End of sysctl-exported parameters */
120
dcc25ae7 121struct wb_domain global_wb_domain;
1da177e4 122
2bc00aef
TH
123/* consolidated parameters for balance_dirty_pages() and its subroutines */
124struct dirty_throttle_control {
e9f07dfd
TH
125#ifdef CONFIG_CGROUP_WRITEBACK
126 struct wb_domain *dom;
9fc3a43e 127 struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
e9f07dfd 128#endif
2bc00aef 129 struct bdi_writeback *wb;
e9770b34 130 struct fprop_local_percpu *wb_completions;
eb608e3a 131
9fc3a43e 132 unsigned long avail; /* dirtyable */
2bc00aef
TH
133 unsigned long dirty; /* file_dirty + write + nfs */
134 unsigned long thresh; /* dirty threshold */
135 unsigned long bg_thresh; /* dirty background threshold */
136
137 unsigned long wb_dirty; /* per-wb counterparts */
138 unsigned long wb_thresh;
970fb01a 139 unsigned long wb_bg_thresh;
daddfa3c
TH
140
141 unsigned long pos_ratio;
2bc00aef
TH
142};
143
eb608e3a
JK
144/*
145 * Length of period for aging writeout fractions of bdis. This is an
146 * arbitrarily chosen number. The longer the period, the slower fractions will
147 * reflect changes in current writeout rate.
148 */
149#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
04fbfdc1 150
693108a8
TH
151#ifdef CONFIG_CGROUP_WRITEBACK
152
d60d1bdd
TH
153#define GDTC_INIT(__wb) .wb = (__wb), \
154 .dom = &global_wb_domain, \
155 .wb_completions = &(__wb)->completions
156
9fc3a43e 157#define GDTC_INIT_NO_WB .dom = &global_wb_domain
d60d1bdd
TH
158
159#define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
160 .dom = mem_cgroup_wb_domain(__wb), \
161 .wb_completions = &(__wb)->memcg_completions, \
162 .gdtc = __gdtc
c2aa723a
TH
163
164static bool mdtc_valid(struct dirty_throttle_control *dtc)
165{
166 return dtc->dom;
167}
e9f07dfd
TH
168
169static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
170{
171 return dtc->dom;
172}
173
9fc3a43e
TH
174static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
175{
176 return mdtc->gdtc;
177}
178
841710aa
TH
179static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
180{
181 return &wb->memcg_completions;
182}
183
693108a8
TH
184static void wb_min_max_ratio(struct bdi_writeback *wb,
185 unsigned long *minp, unsigned long *maxp)
186{
20792ebf 187 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
693108a8
TH
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189 unsigned long long min = wb->bdi->min_ratio;
190 unsigned long long max = wb->bdi->max_ratio;
191
192 /*
193 * @wb may already be clean by the time control reaches here and
194 * the total may not include its bw.
195 */
196 if (this_bw < tot_bw) {
197 if (min) {
198 min *= this_bw;
6d9e8c65 199 min = div64_ul(min, tot_bw);
693108a8 200 }
ae82291e 201 if (max < 100 * BDI_RATIO_SCALE) {
693108a8 202 max *= this_bw;
6d9e8c65 203 max = div64_ul(max, tot_bw);
693108a8
TH
204 }
205 }
206
207 *minp = min;
208 *maxp = max;
209}
210
211#else /* CONFIG_CGROUP_WRITEBACK */
212
d60d1bdd
TH
213#define GDTC_INIT(__wb) .wb = (__wb), \
214 .wb_completions = &(__wb)->completions
9fc3a43e 215#define GDTC_INIT_NO_WB
c2aa723a
TH
216#define MDTC_INIT(__wb, __gdtc)
217
218static bool mdtc_valid(struct dirty_throttle_control *dtc)
219{
220 return false;
221}
e9f07dfd
TH
222
223static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
224{
225 return &global_wb_domain;
226}
227
9fc3a43e
TH
228static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
229{
230 return NULL;
231}
232
841710aa
TH
233static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
234{
235 return NULL;
236}
237
693108a8
TH
238static void wb_min_max_ratio(struct bdi_writeback *wb,
239 unsigned long *minp, unsigned long *maxp)
240{
241 *minp = wb->bdi->min_ratio;
242 *maxp = wb->bdi->max_ratio;
243}
244
245#endif /* CONFIG_CGROUP_WRITEBACK */
246
a756cf59
JW
247/*
248 * In a memory zone, there is a certain amount of pages we consider
249 * available for the page cache, which is essentially the number of
250 * free and reclaimable pages, minus some zone reserves to protect
251 * lowmem and the ability to uphold the zone's watermarks without
252 * requiring writeback.
253 *
254 * This number of dirtyable pages is the base value of which the
e0857cf5 255 * user-configurable dirty ratio is the effective number of pages that
a756cf59
JW
256 * are allowed to be actually dirtied. Per individual zone, or
257 * globally by using the sum of dirtyable pages over all zones.
258 *
259 * Because the user is allowed to specify the dirty limit globally as
260 * absolute number of bytes, calculating the per-zone dirty limit can
261 * require translating the configured limit into a percentage of
262 * global dirtyable memory first.
263 */
264
a804552b 265/**
281e3726
MG
266 * node_dirtyable_memory - number of dirtyable pages in a node
267 * @pgdat: the node
a804552b 268 *
a862f68a 269 * Return: the node's number of pages potentially available for dirty
281e3726 270 * page cache. This is the base value for the per-node dirty limits.
a804552b 271 */
281e3726 272static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
a804552b 273{
281e3726
MG
274 unsigned long nr_pages = 0;
275 int z;
276
277 for (z = 0; z < MAX_NR_ZONES; z++) {
278 struct zone *zone = pgdat->node_zones + z;
279
280 if (!populated_zone(zone))
281 continue;
282
283 nr_pages += zone_page_state(zone, NR_FREE_PAGES);
284 }
a804552b 285
a8d01437
JW
286 /*
287 * Pages reserved for the kernel should not be considered
288 * dirtyable, to prevent a situation where reclaim has to
289 * clean pages in order to balance the zones.
290 */
281e3726 291 nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
a804552b 292
281e3726
MG
293 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
294 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
a804552b
JW
295
296 return nr_pages;
297}
298
1edf2234
JW
299static unsigned long highmem_dirtyable_memory(unsigned long total)
300{
301#ifdef CONFIG_HIGHMEM
302 int node;
bb4cc2be 303 unsigned long x = 0;
09b4ab3c 304 int i;
1edf2234
JW
305
306 for_each_node_state(node, N_HIGH_MEMORY) {
281e3726
MG
307 for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
308 struct zone *z;
9cb937e2 309 unsigned long nr_pages;
281e3726
MG
310
311 if (!is_highmem_idx(i))
312 continue;
313
314 z = &NODE_DATA(node)->node_zones[i];
9cb937e2
MK
315 if (!populated_zone(z))
316 continue;
1edf2234 317
9cb937e2 318 nr_pages = zone_page_state(z, NR_FREE_PAGES);
281e3726 319 /* watch for underflows */
9cb937e2 320 nr_pages -= min(nr_pages, high_wmark_pages(z));
bb4cc2be
MG
321 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
322 nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
323 x += nr_pages;
09b4ab3c 324 }
1edf2234 325 }
281e3726 326
1edf2234
JW
327 /*
328 * Make sure that the number of highmem pages is never larger
329 * than the number of the total dirtyable memory. This can only
330 * occur in very strange VM situations but we want to make sure
331 * that this does not occur.
332 */
333 return min(x, total);
334#else
335 return 0;
336#endif
337}
338
339/**
ccafa287 340 * global_dirtyable_memory - number of globally dirtyable pages
1edf2234 341 *
a862f68a 342 * Return: the global number of pages potentially available for dirty
ccafa287 343 * page cache. This is the base value for the global dirty limits.
1edf2234 344 */
18cf8cf8 345static unsigned long global_dirtyable_memory(void)
1edf2234
JW
346{
347 unsigned long x;
348
c41f012a 349 x = global_zone_page_state(NR_FREE_PAGES);
a8d01437
JW
350 /*
351 * Pages reserved for the kernel should not be considered
352 * dirtyable, to prevent a situation where reclaim has to
353 * clean pages in order to balance the zones.
354 */
355 x -= min(x, totalreserve_pages);
1edf2234 356
599d0c95
MG
357 x += global_node_page_state(NR_INACTIVE_FILE);
358 x += global_node_page_state(NR_ACTIVE_FILE);
a804552b 359
1edf2234
JW
360 if (!vm_highmem_is_dirtyable)
361 x -= highmem_dirtyable_memory(x);
362
363 return x + 1; /* Ensure that we never return 0 */
364}
365
9fc3a43e
TH
366/**
367 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
368 * @dtc: dirty_throttle_control of interest
ccafa287 369 *
9fc3a43e
TH
370 * Calculate @dtc->thresh and ->bg_thresh considering
371 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
372 * must ensure that @dtc->avail is set before calling this function. The
a37b0715 373 * dirty limits will be lifted by 1/4 for real-time tasks.
ccafa287 374 */
9fc3a43e 375static void domain_dirty_limits(struct dirty_throttle_control *dtc)
ccafa287 376{
9fc3a43e
TH
377 const unsigned long available_memory = dtc->avail;
378 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
379 unsigned long bytes = vm_dirty_bytes;
380 unsigned long bg_bytes = dirty_background_bytes;
62a584fe
TH
381 /* convert ratios to per-PAGE_SIZE for higher precision */
382 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
383 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
9fc3a43e
TH
384 unsigned long thresh;
385 unsigned long bg_thresh;
ccafa287
JW
386 struct task_struct *tsk;
387
9fc3a43e
TH
388 /* gdtc is !NULL iff @dtc is for memcg domain */
389 if (gdtc) {
390 unsigned long global_avail = gdtc->avail;
391
392 /*
393 * The byte settings can't be applied directly to memcg
394 * domains. Convert them to ratios by scaling against
62a584fe
TH
395 * globally available memory. As the ratios are in
396 * per-PAGE_SIZE, they can be obtained by dividing bytes by
397 * number of pages.
9fc3a43e
TH
398 */
399 if (bytes)
62a584fe
TH
400 ratio = min(DIV_ROUND_UP(bytes, global_avail),
401 PAGE_SIZE);
9fc3a43e 402 if (bg_bytes)
62a584fe
TH
403 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
404 PAGE_SIZE);
9fc3a43e
TH
405 bytes = bg_bytes = 0;
406 }
407
408 if (bytes)
409 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
ccafa287 410 else
62a584fe 411 thresh = (ratio * available_memory) / PAGE_SIZE;
ccafa287 412
9fc3a43e
TH
413 if (bg_bytes)
414 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
ccafa287 415 else
62a584fe 416 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
ccafa287 417
90daf306 418 if (bg_thresh >= thresh)
9fc3a43e 419 bg_thresh = thresh / 2;
ccafa287 420 tsk = current;
a37b0715 421 if (rt_task(tsk)) {
a53eaff8
N
422 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
423 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
ccafa287 424 }
9fc3a43e
TH
425 dtc->thresh = thresh;
426 dtc->bg_thresh = bg_thresh;
427
428 /* we should eventually report the domain in the TP */
429 if (!gdtc)
430 trace_global_dirty_state(bg_thresh, thresh);
431}
432
433/**
434 * global_dirty_limits - background-writeback and dirty-throttling thresholds
435 * @pbackground: out parameter for bg_thresh
436 * @pdirty: out parameter for thresh
437 *
438 * Calculate bg_thresh and thresh for global_wb_domain. See
439 * domain_dirty_limits() for details.
440 */
441void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
442{
443 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
444
445 gdtc.avail = global_dirtyable_memory();
446 domain_dirty_limits(&gdtc);
447
448 *pbackground = gdtc.bg_thresh;
449 *pdirty = gdtc.thresh;
ccafa287
JW
450}
451
a756cf59 452/**
281e3726
MG
453 * node_dirty_limit - maximum number of dirty pages allowed in a node
454 * @pgdat: the node
a756cf59 455 *
a862f68a 456 * Return: the maximum number of dirty pages allowed in a node, based
281e3726 457 * on the node's dirtyable memory.
a756cf59 458 */
281e3726 459static unsigned long node_dirty_limit(struct pglist_data *pgdat)
a756cf59 460{
281e3726 461 unsigned long node_memory = node_dirtyable_memory(pgdat);
a756cf59
JW
462 struct task_struct *tsk = current;
463 unsigned long dirty;
464
465 if (vm_dirty_bytes)
466 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
281e3726 467 node_memory / global_dirtyable_memory();
a756cf59 468 else
281e3726 469 dirty = vm_dirty_ratio * node_memory / 100;
a756cf59 470
a37b0715 471 if (rt_task(tsk))
a756cf59
JW
472 dirty += dirty / 4;
473
474 return dirty;
475}
476
477/**
281e3726
MG
478 * node_dirty_ok - tells whether a node is within its dirty limits
479 * @pgdat: the node to check
a756cf59 480 *
a862f68a 481 * Return: %true when the dirty pages in @pgdat are within the node's
a756cf59
JW
482 * dirty limit, %false if the limit is exceeded.
483 */
281e3726 484bool node_dirty_ok(struct pglist_data *pgdat)
a756cf59 485{
281e3726
MG
486 unsigned long limit = node_dirty_limit(pgdat);
487 unsigned long nr_pages = 0;
488
11fb9989 489 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
11fb9989 490 nr_pages += node_page_state(pgdat, NR_WRITEBACK);
a756cf59 491
281e3726 492 return nr_pages <= limit;
a756cf59
JW
493}
494
aa779e51 495#ifdef CONFIG_SYSCTL
496static int dirty_background_ratio_handler(struct ctl_table *table, int write,
32927393 497 void *buffer, size_t *lenp, loff_t *ppos)
2da02997
DR
498{
499 int ret;
500
8d65af78 501 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
502 if (ret == 0 && write)
503 dirty_background_bytes = 0;
504 return ret;
505}
506
aa779e51 507static int dirty_background_bytes_handler(struct ctl_table *table, int write,
32927393 508 void *buffer, size_t *lenp, loff_t *ppos)
2da02997
DR
509{
510 int ret;
511
8d65af78 512 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
513 if (ret == 0 && write)
514 dirty_background_ratio = 0;
515 return ret;
516}
517
aa779e51 518static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
32927393 519 size_t *lenp, loff_t *ppos)
04fbfdc1
PZ
520{
521 int old_ratio = vm_dirty_ratio;
2da02997
DR
522 int ret;
523
8d65af78 524 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
04fbfdc1 525 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
eb608e3a 526 writeback_set_ratelimit();
2da02997
DR
527 vm_dirty_bytes = 0;
528 }
529 return ret;
530}
531
aa779e51 532static int dirty_bytes_handler(struct ctl_table *table, int write,
32927393 533 void *buffer, size_t *lenp, loff_t *ppos)
2da02997 534{
fc3501d4 535 unsigned long old_bytes = vm_dirty_bytes;
2da02997
DR
536 int ret;
537
8d65af78 538 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997 539 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
eb608e3a 540 writeback_set_ratelimit();
2da02997 541 vm_dirty_ratio = 0;
04fbfdc1
PZ
542 }
543 return ret;
544}
aa779e51 545#endif
04fbfdc1 546
eb608e3a
JK
547static unsigned long wp_next_time(unsigned long cur_time)
548{
549 cur_time += VM_COMPLETIONS_PERIOD_LEN;
550 /* 0 has a special meaning... */
551 if (!cur_time)
552 return 1;
553 return cur_time;
554}
555
cc24df4c 556static void wb_domain_writeout_add(struct wb_domain *dom,
c7981433 557 struct fprop_local_percpu *completions,
cc24df4c 558 unsigned int max_prop_frac, long nr)
04fbfdc1 559{
be5f1797 560 __fprop_add_percpu_max(&dom->completions, completions,
cc24df4c 561 max_prop_frac, nr);
eb608e3a 562 /* First event after period switching was turned off? */
517663ed 563 if (unlikely(!dom->period_time)) {
eb608e3a
JK
564 /*
565 * We can race with other __bdi_writeout_inc calls here but
566 * it does not cause any harm since the resulting time when
567 * timer will fire and what is in writeout_period_time will be
568 * roughly the same.
569 */
380c27ca
TH
570 dom->period_time = wp_next_time(jiffies);
571 mod_timer(&dom->period_timer, dom->period_time);
eb608e3a 572 }
04fbfdc1
PZ
573}
574
c7981433
TH
575/*
576 * Increment @wb's writeout completion count and the global writeout
269ccca3 577 * completion count. Called from __folio_end_writeback().
c7981433 578 */
cc24df4c 579static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
dd5656e5 580{
841710aa 581 struct wb_domain *cgdom;
dd5656e5 582
cc24df4c
MWO
583 wb_stat_mod(wb, WB_WRITTEN, nr);
584 wb_domain_writeout_add(&global_wb_domain, &wb->completions,
585 wb->bdi->max_prop_frac, nr);
841710aa
TH
586
587 cgdom = mem_cgroup_wb_domain(wb);
588 if (cgdom)
cc24df4c
MWO
589 wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
590 wb->bdi->max_prop_frac, nr);
dd5656e5 591}
dd5656e5 592
93f78d88 593void wb_writeout_inc(struct bdi_writeback *wb)
04fbfdc1 594{
dd5656e5
MS
595 unsigned long flags;
596
597 local_irq_save(flags);
cc24df4c 598 __wb_writeout_add(wb, 1);
dd5656e5 599 local_irq_restore(flags);
04fbfdc1 600}
93f78d88 601EXPORT_SYMBOL_GPL(wb_writeout_inc);
04fbfdc1 602
eb608e3a
JK
603/*
604 * On idle system, we can be called long after we scheduled because we use
605 * deferred timers so count with missed periods.
606 */
9823e51b 607static void writeout_period(struct timer_list *t)
eb608e3a 608{
9823e51b 609 struct wb_domain *dom = from_timer(dom, t, period_timer);
380c27ca 610 int miss_periods = (jiffies - dom->period_time) /
eb608e3a
JK
611 VM_COMPLETIONS_PERIOD_LEN;
612
380c27ca
TH
613 if (fprop_new_period(&dom->completions, miss_periods + 1)) {
614 dom->period_time = wp_next_time(dom->period_time +
eb608e3a 615 miss_periods * VM_COMPLETIONS_PERIOD_LEN);
380c27ca 616 mod_timer(&dom->period_timer, dom->period_time);
eb608e3a
JK
617 } else {
618 /*
619 * Aging has zeroed all fractions. Stop wasting CPU on period
620 * updates.
621 */
380c27ca 622 dom->period_time = 0;
eb608e3a
JK
623 }
624}
625
380c27ca
TH
626int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
627{
628 memset(dom, 0, sizeof(*dom));
dcc25ae7
TH
629
630 spin_lock_init(&dom->lock);
631
9823e51b 632 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
dcc25ae7
TH
633
634 dom->dirty_limit_tstamp = jiffies;
635
380c27ca
TH
636 return fprop_global_init(&dom->completions, gfp);
637}
638
841710aa
TH
639#ifdef CONFIG_CGROUP_WRITEBACK
640void wb_domain_exit(struct wb_domain *dom)
641{
642 del_timer_sync(&dom->period_timer);
643 fprop_global_destroy(&dom->completions);
644}
645#endif
646
189d3c4a 647/*
d08c429b
JW
648 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
649 * registered backing devices, which, for obvious reasons, can not
650 * exceed 100%.
189d3c4a 651 */
189d3c4a
PZ
652static unsigned int bdi_min_ratio;
653
1bf27e98
SR
654static int bdi_check_pages_limit(unsigned long pages)
655{
656 unsigned long max_dirty_pages = global_dirtyable_memory();
657
658 if (pages > max_dirty_pages)
659 return -EINVAL;
660
661 return 0;
662}
663
664static unsigned long bdi_ratio_from_pages(unsigned long pages)
665{
666 unsigned long background_thresh;
667 unsigned long dirty_thresh;
668 unsigned long ratio;
669
670 global_dirty_limits(&background_thresh, &dirty_thresh);
671 ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
672
673 return ratio;
674}
675
00df7d51
SR
676static u64 bdi_get_bytes(unsigned int ratio)
677{
678 unsigned long background_thresh;
679 unsigned long dirty_thresh;
680 u64 bytes;
681
682 global_dirty_limits(&background_thresh, &dirty_thresh);
683 bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
684
685 return bytes;
686}
687
189d3c4a
PZ
688int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
689{
21f0dd88 690 unsigned int delta;
189d3c4a 691 int ret = 0;
189d3c4a 692
ae82291e
SR
693 min_ratio *= BDI_RATIO_SCALE;
694
cfc4ba53 695 spin_lock_bh(&bdi_lock);
a42dde04 696 if (min_ratio > bdi->max_ratio) {
189d3c4a 697 ret = -EINVAL;
a42dde04 698 } else {
21f0dd88
CW
699 if (min_ratio < bdi->min_ratio) {
700 delta = bdi->min_ratio - min_ratio;
701 bdi_min_ratio -= delta;
702 bdi->min_ratio = min_ratio;
a42dde04 703 } else {
21f0dd88 704 delta = min_ratio - bdi->min_ratio;
ae82291e 705 if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
21f0dd88
CW
706 bdi_min_ratio += delta;
707 bdi->min_ratio = min_ratio;
708 } else {
709 ret = -EINVAL;
710 }
a42dde04
PZ
711 }
712 }
cfc4ba53 713 spin_unlock_bh(&bdi_lock);
a42dde04
PZ
714
715 return ret;
716}
717
efc3e6ad 718static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
a42dde04 719{
a42dde04
PZ
720 int ret = 0;
721
cfc4ba53 722 spin_lock_bh(&bdi_lock);
a42dde04
PZ
723 if (bdi->min_ratio > max_ratio) {
724 ret = -EINVAL;
725 } else {
726 bdi->max_ratio = max_ratio;
eb608e3a 727 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
a42dde04 728 }
cfc4ba53 729 spin_unlock_bh(&bdi_lock);
189d3c4a
PZ
730
731 return ret;
732}
efc3e6ad
SR
733
734int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
735{
736 if (max_ratio > 100)
737 return -EINVAL;
738
739 return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
740}
a42dde04 741EXPORT_SYMBOL(bdi_set_max_ratio);
189d3c4a 742
712c00d6
SR
743u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
744{
745 return bdi_get_bytes(bdi->min_ratio);
746}
747
00df7d51
SR
748u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
749{
750 return bdi_get_bytes(bdi->max_ratio);
751}
752
1bf27e98
SR
753int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
754{
755 int ret;
756 unsigned long pages = max_bytes >> PAGE_SHIFT;
757 unsigned long max_ratio;
758
759 ret = bdi_check_pages_limit(pages);
760 if (ret)
761 return ret;
762
763 max_ratio = bdi_ratio_from_pages(pages);
764 return __bdi_set_max_ratio(bdi, max_ratio);
765}
766
8e9d5ead
SR
767int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
768{
769 if (strict_limit > 1)
770 return -EINVAL;
771
772 spin_lock_bh(&bdi_lock);
773 if (strict_limit)
774 bdi->capabilities |= BDI_CAP_STRICTLIMIT;
775 else
776 bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
777 spin_unlock_bh(&bdi_lock);
778
779 return 0;
780}
781
6c14ae1e
WF
782static unsigned long dirty_freerun_ceiling(unsigned long thresh,
783 unsigned long bg_thresh)
784{
785 return (thresh + bg_thresh) / 2;
786}
787
c7981433
TH
788static unsigned long hard_dirty_limit(struct wb_domain *dom,
789 unsigned long thresh)
ffd1f609 790{
dcc25ae7 791 return max(thresh, dom->dirty_limit);
ffd1f609
WF
792}
793
c5edf9cd
TH
794/*
795 * Memory which can be further allocated to a memcg domain is capped by
796 * system-wide clean memory excluding the amount being used in the domain.
797 */
798static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
799 unsigned long filepages, unsigned long headroom)
c2aa723a
TH
800{
801 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
c5edf9cd
TH
802 unsigned long clean = filepages - min(filepages, mdtc->dirty);
803 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
804 unsigned long other_clean = global_clean - min(global_clean, clean);
c2aa723a 805
c5edf9cd 806 mdtc->avail = filepages + min(headroom, other_clean);
ffd1f609
WF
807}
808
6f718656 809/**
b1cbc6d4
TH
810 * __wb_calc_thresh - @wb's share of dirty throttling threshold
811 * @dtc: dirty_throttle_context of interest
1babe183 812 *
aed21ad2
WF
813 * Note that balance_dirty_pages() will only seriously take it as a hard limit
814 * when sleeping max_pause per page is not enough to keep the dirty pages under
815 * control. For example, when the device is completely stalled due to some error
816 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
817 * In the other normal situations, it acts more gently by throttling the tasks
a88a341a 818 * more (rather than completely block them) when the wb dirty pages go high.
1babe183 819 *
6f718656 820 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
1babe183
WF
821 * - starving fast devices
822 * - piling up dirty pages (that will take long time to sync) on slow devices
823 *
a88a341a 824 * The wb's share of dirty limit will be adapting to its throughput and
1babe183 825 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
a862f68a
MR
826 *
827 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
8d92890b 828 * dirty balancing includes all PG_dirty and PG_writeback pages.
1babe183 829 */
b1cbc6d4 830static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
16c4042f 831{
e9f07dfd 832 struct wb_domain *dom = dtc_dom(dtc);
b1cbc6d4 833 unsigned long thresh = dtc->thresh;
0d960a38 834 u64 wb_thresh;
d3ac946e 835 unsigned long numerator, denominator;
693108a8 836 unsigned long wb_min_ratio, wb_max_ratio;
04fbfdc1 837
16c4042f 838 /*
0d960a38 839 * Calculate this BDI's share of the thresh ratio.
16c4042f 840 */
e9770b34 841 fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
380c27ca 842 &numerator, &denominator);
04fbfdc1 843
ae82291e 844 wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
0d960a38 845 wb_thresh *= numerator;
d3ac946e 846 wb_thresh = div64_ul(wb_thresh, denominator);
04fbfdc1 847
b1cbc6d4 848 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
04fbfdc1 849
ae82291e
SR
850 wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
851 if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE))
852 wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
16c4042f 853
0d960a38 854 return wb_thresh;
1da177e4
LT
855}
856
b1cbc6d4
TH
857unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
858{
859 struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
860 .thresh = thresh };
861 return __wb_calc_thresh(&gdtc);
1da177e4
LT
862}
863
5a537485
MP
864/*
865 * setpoint - dirty 3
866 * f(dirty) := 1.0 + (----------------)
867 * limit - setpoint
868 *
869 * it's a 3rd order polynomial that subjects to
870 *
871 * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
872 * (2) f(setpoint) = 1.0 => the balance point
873 * (3) f(limit) = 0 => the hard limit
874 * (4) df/dx <= 0 => negative feedback control
875 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
876 * => fast response on large errors; small oscillation near setpoint
877 */
d5c9fde3 878static long long pos_ratio_polynom(unsigned long setpoint,
5a537485
MP
879 unsigned long dirty,
880 unsigned long limit)
881{
882 long long pos_ratio;
883 long x;
884
d5c9fde3 885 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
464d1387 886 (limit - setpoint) | 1);
5a537485
MP
887 pos_ratio = x;
888 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
889 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
890 pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
891
892 return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
893}
894
6c14ae1e
WF
895/*
896 * Dirty position control.
897 *
898 * (o) global/bdi setpoints
899 *
de1fff37 900 * We want the dirty pages be balanced around the global/wb setpoints.
6c14ae1e
WF
901 * When the number of dirty pages is higher/lower than the setpoint, the
902 * dirty position control ratio (and hence task dirty ratelimit) will be
903 * decreased/increased to bring the dirty pages back to the setpoint.
904 *
905 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
906 *
907 * if (dirty < setpoint) scale up pos_ratio
908 * if (dirty > setpoint) scale down pos_ratio
909 *
de1fff37
TH
910 * if (wb_dirty < wb_setpoint) scale up pos_ratio
911 * if (wb_dirty > wb_setpoint) scale down pos_ratio
6c14ae1e
WF
912 *
913 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
914 *
915 * (o) global control line
916 *
917 * ^ pos_ratio
918 * |
919 * | |<===== global dirty control scope ======>|
03231554 920 * 2.0 * * * * * * *
6c14ae1e
WF
921 * | .*
922 * | . *
923 * | . *
924 * | . *
925 * | . *
926 * | . *
927 * 1.0 ................................*
928 * | . . *
929 * | . . *
930 * | . . *
931 * | . . *
932 * | . . *
933 * 0 +------------.------------------.----------------------*------------->
934 * freerun^ setpoint^ limit^ dirty pages
935 *
de1fff37 936 * (o) wb control line
6c14ae1e
WF
937 *
938 * ^ pos_ratio
939 * |
940 * | *
941 * | *
942 * | *
943 * | *
944 * | * |<=========== span ============>|
945 * 1.0 .......................*
946 * | . *
947 * | . *
948 * | . *
949 * | . *
950 * | . *
951 * | . *
952 * | . *
953 * | . *
954 * | . *
955 * | . *
956 * | . *
957 * 1/4 ...............................................* * * * * * * * * * * *
958 * | . .
959 * | . .
960 * | . .
961 * 0 +----------------------.-------------------------------.------------->
de1fff37 962 * wb_setpoint^ x_intercept^
6c14ae1e 963 *
de1fff37 964 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
6c14ae1e
WF
965 * be smoothly throttled down to normal if it starts high in situations like
966 * - start writing to a slow SD card and a fast disk at the same time. The SD
de1fff37
TH
967 * card's wb_dirty may rush to many times higher than wb_setpoint.
968 * - the wb dirty thresh drops quickly due to change of JBOD workload
6c14ae1e 969 */
daddfa3c 970static void wb_position_ratio(struct dirty_throttle_control *dtc)
6c14ae1e 971{
2bc00aef 972 struct bdi_writeback *wb = dtc->wb;
20792ebf 973 unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
2bc00aef 974 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
c7981433 975 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
2bc00aef 976 unsigned long wb_thresh = dtc->wb_thresh;
6c14ae1e
WF
977 unsigned long x_intercept;
978 unsigned long setpoint; /* dirty pages' target balance point */
de1fff37 979 unsigned long wb_setpoint;
6c14ae1e
WF
980 unsigned long span;
981 long long pos_ratio; /* for scaling up/down the rate limit */
982 long x;
983
daddfa3c
TH
984 dtc->pos_ratio = 0;
985
2bc00aef 986 if (unlikely(dtc->dirty >= limit))
daddfa3c 987 return;
6c14ae1e
WF
988
989 /*
990 * global setpoint
991 *
5a537485
MP
992 * See comment for pos_ratio_polynom().
993 */
994 setpoint = (freerun + limit) / 2;
2bc00aef 995 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
5a537485
MP
996
997 /*
998 * The strictlimit feature is a tool preventing mistrusted filesystems
999 * from growing a large number of dirty pages before throttling. For
de1fff37
TH
1000 * such filesystems balance_dirty_pages always checks wb counters
1001 * against wb limits. Even if global "nr_dirty" is under "freerun".
5a537485
MP
1002 * This is especially important for fuse which sets bdi->max_ratio to
1003 * 1% by default. Without strictlimit feature, fuse writeback may
1004 * consume arbitrary amount of RAM because it is accounted in
1005 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
6c14ae1e 1006 *
a88a341a 1007 * Here, in wb_position_ratio(), we calculate pos_ratio based on
de1fff37 1008 * two values: wb_dirty and wb_thresh. Let's consider an example:
5a537485
MP
1009 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
1010 * limits are set by default to 10% and 20% (background and throttle).
de1fff37 1011 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
0d960a38 1012 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
de1fff37 1013 * about ~6K pages (as the average of background and throttle wb
5a537485 1014 * limits). The 3rd order polynomial will provide positive feedback if
de1fff37 1015 * wb_dirty is under wb_setpoint and vice versa.
6c14ae1e 1016 *
5a537485 1017 * Note, that we cannot use global counters in these calculations
de1fff37 1018 * because we want to throttle process writing to a strictlimit wb
5a537485
MP
1019 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
1020 * in the example above).
6c14ae1e 1021 */
a88a341a 1022 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
de1fff37 1023 long long wb_pos_ratio;
5a537485 1024
daddfa3c
TH
1025 if (dtc->wb_dirty < 8) {
1026 dtc->pos_ratio = min_t(long long, pos_ratio * 2,
1027 2 << RATELIMIT_CALC_SHIFT);
1028 return;
1029 }
5a537485 1030
2bc00aef 1031 if (dtc->wb_dirty >= wb_thresh)
daddfa3c 1032 return;
5a537485 1033
970fb01a
TH
1034 wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1035 dtc->wb_bg_thresh);
5a537485 1036
de1fff37 1037 if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
daddfa3c 1038 return;
5a537485 1039
2bc00aef 1040 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
de1fff37 1041 wb_thresh);
5a537485
MP
1042
1043 /*
de1fff37
TH
1044 * Typically, for strictlimit case, wb_setpoint << setpoint
1045 * and pos_ratio >> wb_pos_ratio. In the other words global
5a537485 1046 * state ("dirty") is not limiting factor and we have to
de1fff37 1047 * make decision based on wb counters. But there is an
5a537485
MP
1048 * important case when global pos_ratio should get precedence:
1049 * global limits are exceeded (e.g. due to activities on other
de1fff37 1050 * wb's) while given strictlimit wb is below limit.
5a537485 1051 *
de1fff37 1052 * "pos_ratio * wb_pos_ratio" would work for the case above,
5a537485 1053 * but it would look too non-natural for the case of all
de1fff37 1054 * activity in the system coming from a single strictlimit wb
5a537485
MP
1055 * with bdi->max_ratio == 100%.
1056 *
1057 * Note that min() below somewhat changes the dynamics of the
1058 * control system. Normally, pos_ratio value can be well over 3
de1fff37 1059 * (when globally we are at freerun and wb is well below wb
5a537485
MP
1060 * setpoint). Now the maximum pos_ratio in the same situation
1061 * is 2. We might want to tweak this if we observe the control
1062 * system is too slow to adapt.
1063 */
daddfa3c
TH
1064 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1065 return;
5a537485 1066 }
6c14ae1e
WF
1067
1068 /*
1069 * We have computed basic pos_ratio above based on global situation. If
de1fff37 1070 * the wb is over/under its share of dirty pages, we want to scale
6c14ae1e
WF
1071 * pos_ratio further down/up. That is done by the following mechanism.
1072 */
1073
1074 /*
de1fff37 1075 * wb setpoint
6c14ae1e 1076 *
de1fff37 1077 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
6c14ae1e 1078 *
de1fff37 1079 * x_intercept - wb_dirty
6c14ae1e 1080 * := --------------------------
de1fff37 1081 * x_intercept - wb_setpoint
6c14ae1e 1082 *
de1fff37 1083 * The main wb control line is a linear function that subjects to
6c14ae1e 1084 *
de1fff37
TH
1085 * (1) f(wb_setpoint) = 1.0
1086 * (2) k = - 1 / (8 * write_bw) (in single wb case)
1087 * or equally: x_intercept = wb_setpoint + 8 * write_bw
6c14ae1e 1088 *
de1fff37 1089 * For single wb case, the dirty pages are observed to fluctuate
6c14ae1e 1090 * regularly within range
de1fff37 1091 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
6c14ae1e
WF
1092 * for various filesystems, where (2) can yield in a reasonable 12.5%
1093 * fluctuation range for pos_ratio.
1094 *
de1fff37 1095 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
6c14ae1e 1096 * own size, so move the slope over accordingly and choose a slope that
de1fff37 1097 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
6c14ae1e 1098 */
2bc00aef
TH
1099 if (unlikely(wb_thresh > dtc->thresh))
1100 wb_thresh = dtc->thresh;
aed21ad2 1101 /*
de1fff37 1102 * It's very possible that wb_thresh is close to 0 not because the
aed21ad2
WF
1103 * device is slow, but that it has remained inactive for long time.
1104 * Honour such devices a reasonable good (hopefully IO efficient)
1105 * threshold, so that the occasional writes won't be blocked and active
1106 * writes can rampup the threshold quickly.
1107 */
2bc00aef 1108 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
6c14ae1e 1109 /*
de1fff37
TH
1110 * scale global setpoint to wb's:
1111 * wb_setpoint = setpoint * wb_thresh / thresh
6c14ae1e 1112 */
e4bc13ad 1113 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
de1fff37 1114 wb_setpoint = setpoint * (u64)x >> 16;
6c14ae1e 1115 /*
de1fff37
TH
1116 * Use span=(8*write_bw) in single wb case as indicated by
1117 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
6c14ae1e 1118 *
de1fff37
TH
1119 * wb_thresh thresh - wb_thresh
1120 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1121 * thresh thresh
6c14ae1e 1122 */
2bc00aef 1123 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
de1fff37 1124 x_intercept = wb_setpoint + span;
6c14ae1e 1125
2bc00aef
TH
1126 if (dtc->wb_dirty < x_intercept - span / 4) {
1127 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
e4bc13ad 1128 (x_intercept - wb_setpoint) | 1);
6c14ae1e
WF
1129 } else
1130 pos_ratio /= 4;
1131
8927f66c 1132 /*
de1fff37 1133 * wb reserve area, safeguard against dirty pool underrun and disk idle
8927f66c
WF
1134 * It may push the desired control point of global dirty pages higher
1135 * than setpoint.
1136 */
de1fff37 1137 x_intercept = wb_thresh / 2;
2bc00aef
TH
1138 if (dtc->wb_dirty < x_intercept) {
1139 if (dtc->wb_dirty > x_intercept / 8)
1140 pos_ratio = div_u64(pos_ratio * x_intercept,
1141 dtc->wb_dirty);
50657fc4 1142 else
8927f66c
WF
1143 pos_ratio *= 8;
1144 }
1145
daddfa3c 1146 dtc->pos_ratio = pos_ratio;
6c14ae1e
WF
1147}
1148
a88a341a
TH
1149static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1150 unsigned long elapsed,
1151 unsigned long written)
e98be2d5
WF
1152{
1153 const unsigned long period = roundup_pow_of_two(3 * HZ);
a88a341a
TH
1154 unsigned long avg = wb->avg_write_bandwidth;
1155 unsigned long old = wb->write_bandwidth;
e98be2d5
WF
1156 u64 bw;
1157
1158 /*
1159 * bw = written * HZ / elapsed
1160 *
1161 * bw * elapsed + write_bandwidth * (period - elapsed)
1162 * write_bandwidth = ---------------------------------------------------
1163 * period
c72efb65 1164 *
25ff8b15 1165 * @written may have decreased due to folio_account_redirty().
c72efb65 1166 * Avoid underflowing @bw calculation.
e98be2d5 1167 */
a88a341a 1168 bw = written - min(written, wb->written_stamp);
e98be2d5
WF
1169 bw *= HZ;
1170 if (unlikely(elapsed > period)) {
0a5d1a7f 1171 bw = div64_ul(bw, elapsed);
e98be2d5
WF
1172 avg = bw;
1173 goto out;
1174 }
a88a341a 1175 bw += (u64)wb->write_bandwidth * (period - elapsed);
e98be2d5
WF
1176 bw >>= ilog2(period);
1177
1178 /*
1179 * one more level of smoothing, for filtering out sudden spikes
1180 */
1181 if (avg > old && old >= (unsigned long)bw)
1182 avg -= (avg - old) >> 3;
1183
1184 if (avg < old && old <= (unsigned long)bw)
1185 avg += (old - avg) >> 3;
1186
1187out:
95a46c65
TH
1188 /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1189 avg = max(avg, 1LU);
1190 if (wb_has_dirty_io(wb)) {
1191 long delta = avg - wb->avg_write_bandwidth;
1192 WARN_ON_ONCE(atomic_long_add_return(delta,
1193 &wb->bdi->tot_write_bandwidth) <= 0);
1194 }
a88a341a 1195 wb->write_bandwidth = bw;
20792ebf 1196 WRITE_ONCE(wb->avg_write_bandwidth, avg);
e98be2d5
WF
1197}
1198
2bc00aef 1199static void update_dirty_limit(struct dirty_throttle_control *dtc)
c42843f2 1200{
e9f07dfd 1201 struct wb_domain *dom = dtc_dom(dtc);
2bc00aef 1202 unsigned long thresh = dtc->thresh;
dcc25ae7 1203 unsigned long limit = dom->dirty_limit;
c42843f2
WF
1204
1205 /*
1206 * Follow up in one step.
1207 */
1208 if (limit < thresh) {
1209 limit = thresh;
1210 goto update;
1211 }
1212
1213 /*
1214 * Follow down slowly. Use the higher one as the target, because thresh
1215 * may drop below dirty. This is exactly the reason to introduce
dcc25ae7 1216 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
c42843f2 1217 */
2bc00aef 1218 thresh = max(thresh, dtc->dirty);
c42843f2
WF
1219 if (limit > thresh) {
1220 limit -= (limit - thresh) >> 5;
1221 goto update;
1222 }
1223 return;
1224update:
dcc25ae7 1225 dom->dirty_limit = limit;
c42843f2
WF
1226}
1227
42dd235c
JK
1228static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1229 unsigned long now)
c42843f2 1230{
e9f07dfd 1231 struct wb_domain *dom = dtc_dom(dtc);
c42843f2
WF
1232
1233 /*
1234 * check locklessly first to optimize away locking for the most time
1235 */
dcc25ae7 1236 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
c42843f2
WF
1237 return;
1238
dcc25ae7
TH
1239 spin_lock(&dom->lock);
1240 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
2bc00aef 1241 update_dirty_limit(dtc);
dcc25ae7 1242 dom->dirty_limit_tstamp = now;
c42843f2 1243 }
dcc25ae7 1244 spin_unlock(&dom->lock);
c42843f2
WF
1245}
1246
be3ffa27 1247/*
de1fff37 1248 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
be3ffa27 1249 *
de1fff37 1250 * Normal wb tasks will be curbed at or below it in long term.
be3ffa27
WF
1251 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1252 */
2bc00aef 1253static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
a88a341a
TH
1254 unsigned long dirtied,
1255 unsigned long elapsed)
be3ffa27 1256{
2bc00aef
TH
1257 struct bdi_writeback *wb = dtc->wb;
1258 unsigned long dirty = dtc->dirty;
1259 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
c7981433 1260 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
7381131c 1261 unsigned long setpoint = (freerun + limit) / 2;
a88a341a
TH
1262 unsigned long write_bw = wb->avg_write_bandwidth;
1263 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
be3ffa27
WF
1264 unsigned long dirty_rate;
1265 unsigned long task_ratelimit;
1266 unsigned long balanced_dirty_ratelimit;
7381131c
WF
1267 unsigned long step;
1268 unsigned long x;
d59b1087 1269 unsigned long shift;
be3ffa27
WF
1270
1271 /*
1272 * The dirty rate will match the writeout rate in long term, except
1273 * when dirty pages are truncated by userspace or re-dirtied by FS.
1274 */
a88a341a 1275 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
be3ffa27 1276
be3ffa27
WF
1277 /*
1278 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1279 */
1280 task_ratelimit = (u64)dirty_ratelimit *
daddfa3c 1281 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
be3ffa27
WF
1282 task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1283
1284 /*
1285 * A linear estimation of the "balanced" throttle rate. The theory is,
de1fff37 1286 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
be3ffa27
WF
1287 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1288 * formula will yield the balanced rate limit (write_bw / N).
1289 *
1290 * Note that the expanded form is not a pure rate feedback:
1291 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1292 * but also takes pos_ratio into account:
1293 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1294 *
1295 * (1) is not realistic because pos_ratio also takes part in balancing
1296 * the dirty rate. Consider the state
1297 * pos_ratio = 0.5 (3)
1298 * rate = 2 * (write_bw / N) (4)
1299 * If (1) is used, it will stuck in that state! Because each dd will
1300 * be throttled at
1301 * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1302 * yielding
1303 * dirty_rate = N * task_ratelimit = write_bw (6)
1304 * put (6) into (1) we get
1305 * rate_(i+1) = rate_(i) (7)
1306 *
1307 * So we end up using (2) to always keep
1308 * rate_(i+1) ~= (write_bw / N) (8)
1309 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1310 * pos_ratio is able to drive itself to 1.0, which is not only where
1311 * the dirty count meet the setpoint, but also where the slope of
1312 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1313 */
1314 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1315 dirty_rate | 1);
bdaac490
WF
1316 /*
1317 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1318 */
1319 if (unlikely(balanced_dirty_ratelimit > write_bw))
1320 balanced_dirty_ratelimit = write_bw;
be3ffa27 1321
7381131c
WF
1322 /*
1323 * We could safely do this and return immediately:
1324 *
de1fff37 1325 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
7381131c
WF
1326 *
1327 * However to get a more stable dirty_ratelimit, the below elaborated
331cbdee 1328 * code makes use of task_ratelimit to filter out singular points and
7381131c
WF
1329 * limit the step size.
1330 *
1331 * The below code essentially only uses the relative value of
1332 *
1333 * task_ratelimit - dirty_ratelimit
1334 * = (pos_ratio - 1) * dirty_ratelimit
1335 *
1336 * which reflects the direction and size of dirty position error.
1337 */
1338
1339 /*
1340 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1341 * task_ratelimit is on the same side of dirty_ratelimit, too.
1342 * For example, when
1343 * - dirty_ratelimit > balanced_dirty_ratelimit
1344 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1345 * lowering dirty_ratelimit will help meet both the position and rate
1346 * control targets. Otherwise, don't update dirty_ratelimit if it will
1347 * only help meet the rate target. After all, what the users ultimately
1348 * feel and care are stable dirty rate and small position error.
1349 *
1350 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
331cbdee 1351 * and filter out the singular points of balanced_dirty_ratelimit. Which
7381131c
WF
1352 * keeps jumping around randomly and can even leap far away at times
1353 * due to the small 200ms estimation period of dirty_rate (we want to
1354 * keep that period small to reduce time lags).
1355 */
1356 step = 0;
5a537485
MP
1357
1358 /*
de1fff37 1359 * For strictlimit case, calculations above were based on wb counters
a88a341a 1360 * and limits (starting from pos_ratio = wb_position_ratio() and up to
5a537485 1361 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
de1fff37
TH
1362 * Hence, to calculate "step" properly, we have to use wb_dirty as
1363 * "dirty" and wb_setpoint as "setpoint".
5a537485 1364 *
de1fff37
TH
1365 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1366 * it's possible that wb_thresh is close to zero due to inactivity
970fb01a 1367 * of backing device.
5a537485 1368 */
a88a341a 1369 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
2bc00aef
TH
1370 dirty = dtc->wb_dirty;
1371 if (dtc->wb_dirty < 8)
1372 setpoint = dtc->wb_dirty + 1;
5a537485 1373 else
970fb01a 1374 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
5a537485
MP
1375 }
1376
7381131c 1377 if (dirty < setpoint) {
a88a341a 1378 x = min3(wb->balanced_dirty_ratelimit,
7c809968 1379 balanced_dirty_ratelimit, task_ratelimit);
7381131c
WF
1380 if (dirty_ratelimit < x)
1381 step = x - dirty_ratelimit;
1382 } else {
a88a341a 1383 x = max3(wb->balanced_dirty_ratelimit,
7c809968 1384 balanced_dirty_ratelimit, task_ratelimit);
7381131c
WF
1385 if (dirty_ratelimit > x)
1386 step = dirty_ratelimit - x;
1387 }
1388
1389 /*
1390 * Don't pursue 100% rate matching. It's impossible since the balanced
1391 * rate itself is constantly fluctuating. So decrease the track speed
1392 * when it gets close to the target. Helps eliminate pointless tremors.
1393 */
d59b1087
AR
1394 shift = dirty_ratelimit / (2 * step + 1);
1395 if (shift < BITS_PER_LONG)
1396 step = DIV_ROUND_UP(step >> shift, 8);
1397 else
1398 step = 0;
7381131c
WF
1399
1400 if (dirty_ratelimit < balanced_dirty_ratelimit)
1401 dirty_ratelimit += step;
1402 else
1403 dirty_ratelimit -= step;
1404
20792ebf 1405 WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
a88a341a 1406 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
b48c104d 1407
5634cc2a 1408 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
be3ffa27
WF
1409}
1410
c2aa723a
TH
1411static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1412 struct dirty_throttle_control *mdtc,
8a731799 1413 bool update_ratelimit)
e98be2d5 1414{
c2aa723a 1415 struct bdi_writeback *wb = gdtc->wb;
e98be2d5 1416 unsigned long now = jiffies;
45a2966f 1417 unsigned long elapsed;
be3ffa27 1418 unsigned long dirtied;
e98be2d5
WF
1419 unsigned long written;
1420
45a2966f 1421 spin_lock(&wb->list_lock);
8a731799 1422
e98be2d5 1423 /*
45a2966f
JK
1424 * Lockless checks for elapsed time are racy and delayed update after
1425 * IO completion doesn't do it at all (to make sure written pages are
1426 * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
1427 * division errors.
e98be2d5 1428 */
45a2966f 1429 elapsed = max(now - wb->bw_time_stamp, 1UL);
a88a341a
TH
1430 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1431 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
e98be2d5 1432
8a731799 1433 if (update_ratelimit) {
42dd235c 1434 domain_update_dirty_limit(gdtc, now);
c2aa723a
TH
1435 wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1436
1437 /*
1438 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1439 * compiler has no way to figure that out. Help it.
1440 */
1441 if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
42dd235c 1442 domain_update_dirty_limit(mdtc, now);
c2aa723a
TH
1443 wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1444 }
be3ffa27 1445 }
a88a341a 1446 wb_update_write_bandwidth(wb, elapsed, written);
e98be2d5 1447
a88a341a
TH
1448 wb->dirtied_stamp = dirtied;
1449 wb->written_stamp = written;
20792ebf 1450 WRITE_ONCE(wb->bw_time_stamp, now);
45a2966f 1451 spin_unlock(&wb->list_lock);
e98be2d5
WF
1452}
1453
45a2966f 1454void wb_update_bandwidth(struct bdi_writeback *wb)
e98be2d5 1455{
2bc00aef
TH
1456 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1457
fee468fd 1458 __wb_update_bandwidth(&gdtc, NULL, false);
fee468fd
JK
1459}
1460
1461/* Interval after which we consider wb idle and don't estimate bandwidth */
1462#define WB_BANDWIDTH_IDLE_JIF (HZ)
1463
1464static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1465{
1466 unsigned long now = jiffies;
1467 unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1468
1469 if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1470 !atomic_read(&wb->writeback_inodes)) {
1471 spin_lock(&wb->list_lock);
1472 wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1473 wb->written_stamp = wb_stat(wb, WB_WRITTEN);
20792ebf 1474 WRITE_ONCE(wb->bw_time_stamp, now);
fee468fd
JK
1475 spin_unlock(&wb->list_lock);
1476 }
e98be2d5
WF
1477}
1478
9d823e8f 1479/*
d0e1d66b 1480 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
9d823e8f
WF
1481 * will look to see if it needs to start dirty throttling.
1482 *
1483 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
c41f012a 1484 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
9d823e8f
WF
1485 * (the number of pages we may dirty without exceeding the dirty limits).
1486 */
1487static unsigned long dirty_poll_interval(unsigned long dirty,
1488 unsigned long thresh)
1489{
1490 if (thresh > dirty)
1491 return 1UL << (ilog2(thresh - dirty) >> 1);
1492
1493 return 1;
1494}
1495
a88a341a 1496static unsigned long wb_max_pause(struct bdi_writeback *wb,
de1fff37 1497 unsigned long wb_dirty)
c8462cc9 1498{
20792ebf 1499 unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
e3b6c655 1500 unsigned long t;
c8462cc9 1501
7ccb9ad5
WF
1502 /*
1503 * Limit pause time for small memory systems. If sleeping for too long
1504 * time, a small pool of dirty/writeback pages may go empty and disk go
1505 * idle.
1506 *
1507 * 8 serves as the safety ratio.
1508 */
de1fff37 1509 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
7ccb9ad5
WF
1510 t++;
1511
e3b6c655 1512 return min_t(unsigned long, t, MAX_PAUSE);
7ccb9ad5
WF
1513}
1514
a88a341a
TH
1515static long wb_min_pause(struct bdi_writeback *wb,
1516 long max_pause,
1517 unsigned long task_ratelimit,
1518 unsigned long dirty_ratelimit,
1519 int *nr_dirtied_pause)
c8462cc9 1520{
20792ebf
JK
1521 long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1522 long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
7ccb9ad5
WF
1523 long t; /* target pause */
1524 long pause; /* estimated next pause */
1525 int pages; /* target nr_dirtied_pause */
c8462cc9 1526
7ccb9ad5
WF
1527 /* target for 10ms pause on 1-dd case */
1528 t = max(1, HZ / 100);
c8462cc9
WF
1529
1530 /*
1531 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1532 * overheads.
1533 *
7ccb9ad5 1534 * (N * 10ms) on 2^N concurrent tasks.
c8462cc9
WF
1535 */
1536 if (hi > lo)
7ccb9ad5 1537 t += (hi - lo) * (10 * HZ) / 1024;
c8462cc9
WF
1538
1539 /*
7ccb9ad5
WF
1540 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1541 * on the much more stable dirty_ratelimit. However the next pause time
1542 * will be computed based on task_ratelimit and the two rate limits may
1543 * depart considerably at some time. Especially if task_ratelimit goes
1544 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1545 * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
1546 * result task_ratelimit won't be executed faithfully, which could
1547 * eventually bring down dirty_ratelimit.
c8462cc9 1548 *
7ccb9ad5
WF
1549 * We apply two rules to fix it up:
1550 * 1) try to estimate the next pause time and if necessary, use a lower
1551 * nr_dirtied_pause so as not to exceed max_pause. When this happens,
1552 * nr_dirtied_pause will be "dancing" with task_ratelimit.
1553 * 2) limit the target pause time to max_pause/2, so that the normal
1554 * small fluctuations of task_ratelimit won't trigger rule (1) and
1555 * nr_dirtied_pause will remain as stable as dirty_ratelimit.
c8462cc9 1556 */
7ccb9ad5
WF
1557 t = min(t, 1 + max_pause / 2);
1558 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
c8462cc9
WF
1559
1560 /*
5b9b3574
WF
1561 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1562 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1563 * When the 16 consecutive reads are often interrupted by some dirty
1564 * throttling pause during the async writes, cfq will go into idles
1565 * (deadline is fine). So push nr_dirtied_pause as high as possible
1566 * until reaches DIRTY_POLL_THRESH=32 pages.
c8462cc9 1567 */
5b9b3574
WF
1568 if (pages < DIRTY_POLL_THRESH) {
1569 t = max_pause;
1570 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1571 if (pages > DIRTY_POLL_THRESH) {
1572 pages = DIRTY_POLL_THRESH;
1573 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1574 }
1575 }
1576
7ccb9ad5
WF
1577 pause = HZ * pages / (task_ratelimit + 1);
1578 if (pause > max_pause) {
1579 t = max_pause;
1580 pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1581 }
c8462cc9 1582
7ccb9ad5 1583 *nr_dirtied_pause = pages;
c8462cc9 1584 /*
7ccb9ad5 1585 * The minimal pause time will normally be half the target pause time.
c8462cc9 1586 */
5b9b3574 1587 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
c8462cc9
WF
1588}
1589
970fb01a 1590static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
5a537485 1591{
2bc00aef 1592 struct bdi_writeback *wb = dtc->wb;
93f78d88 1593 unsigned long wb_reclaimable;
5a537485
MP
1594
1595 /*
de1fff37 1596 * wb_thresh is not treated as some limiting factor as
5a537485 1597 * dirty_thresh, due to reasons
de1fff37 1598 * - in JBOD setup, wb_thresh can fluctuate a lot
5a537485 1599 * - in a system with HDD and USB key, the USB key may somehow
de1fff37
TH
1600 * go into state (wb_dirty >> wb_thresh) either because
1601 * wb_dirty starts high, or because wb_thresh drops low.
5a537485 1602 * In this case we don't want to hard throttle the USB key
de1fff37
TH
1603 * dirtiers for 100 seconds until wb_dirty drops under
1604 * wb_thresh. Instead the auxiliary wb control line in
a88a341a 1605 * wb_position_ratio() will let the dirtier task progress
de1fff37 1606 * at some rate <= (write_bw / 2) for bringing down wb_dirty.
5a537485 1607 */
b1cbc6d4 1608 dtc->wb_thresh = __wb_calc_thresh(dtc);
970fb01a
TH
1609 dtc->wb_bg_thresh = dtc->thresh ?
1610 div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
5a537485
MP
1611
1612 /*
1613 * In order to avoid the stacked BDI deadlock we need
1614 * to ensure we accurately count the 'dirty' pages when
1615 * the threshold is low.
1616 *
1617 * Otherwise it would be possible to get thresh+n pages
1618 * reported dirty, even though there are thresh-m pages
1619 * actually dirty; with m+n sitting in the percpu
1620 * deltas.
1621 */
2bce774e 1622 if (dtc->wb_thresh < 2 * wb_stat_error()) {
93f78d88 1623 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2bc00aef 1624 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
5a537485 1625 } else {
93f78d88 1626 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2bc00aef 1627 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
5a537485
MP
1628 }
1629}
1630
1da177e4
LT
1631/*
1632 * balance_dirty_pages() must be called by processes which are generating dirty
1633 * data. It looks at the number of dirty pages in the machine and will force
143dfe86 1634 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
5b0830cb
JA
1635 * If we're over `background_thresh' then the writeback threads are woken to
1636 * perform some writeout.
1da177e4 1637 */
fe6c9c6e
JK
1638static int balance_dirty_pages(struct bdi_writeback *wb,
1639 unsigned long pages_dirtied, unsigned int flags)
1da177e4 1640{
2bc00aef 1641 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
c2aa723a 1642 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2bc00aef 1643 struct dirty_throttle_control * const gdtc = &gdtc_stor;
c2aa723a
TH
1644 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1645 &mdtc_stor : NULL;
1646 struct dirty_throttle_control *sdtc;
8d92890b 1647 unsigned long nr_reclaimable; /* = file_dirty */
83712358 1648 long period;
7ccb9ad5
WF
1649 long pause;
1650 long max_pause;
1651 long min_pause;
1652 int nr_dirtied_pause;
e50e3720 1653 bool dirty_exceeded = false;
143dfe86 1654 unsigned long task_ratelimit;
7ccb9ad5 1655 unsigned long dirty_ratelimit;
dfb8ae56 1656 struct backing_dev_info *bdi = wb->bdi;
5a537485 1657 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
e98be2d5 1658 unsigned long start_time = jiffies;
fe6c9c6e 1659 int ret = 0;
1da177e4
LT
1660
1661 for (;;) {
83712358 1662 unsigned long now = jiffies;
2bc00aef 1663 unsigned long dirty, thresh, bg_thresh;
50e55bf6
YS
1664 unsigned long m_dirty = 0; /* stop bogus uninit warnings */
1665 unsigned long m_thresh = 0;
1666 unsigned long m_bg_thresh = 0;
83712358 1667
8d92890b 1668 nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
9fc3a43e 1669 gdtc->avail = global_dirtyable_memory();
11fb9989 1670 gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
5fce25a9 1671
9fc3a43e 1672 domain_dirty_limits(gdtc);
16c4042f 1673
5a537485 1674 if (unlikely(strictlimit)) {
970fb01a 1675 wb_dirty_limits(gdtc);
5a537485 1676
2bc00aef
TH
1677 dirty = gdtc->wb_dirty;
1678 thresh = gdtc->wb_thresh;
970fb01a 1679 bg_thresh = gdtc->wb_bg_thresh;
5a537485 1680 } else {
2bc00aef
TH
1681 dirty = gdtc->dirty;
1682 thresh = gdtc->thresh;
1683 bg_thresh = gdtc->bg_thresh;
5a537485
MP
1684 }
1685
c2aa723a 1686 if (mdtc) {
c5edf9cd 1687 unsigned long filepages, headroom, writeback;
c2aa723a
TH
1688
1689 /*
1690 * If @wb belongs to !root memcg, repeat the same
1691 * basic calculations for the memcg domain.
1692 */
c5edf9cd
TH
1693 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1694 &mdtc->dirty, &writeback);
c2aa723a 1695 mdtc->dirty += writeback;
c5edf9cd 1696 mdtc_calc_avail(mdtc, filepages, headroom);
c2aa723a
TH
1697
1698 domain_dirty_limits(mdtc);
1699
1700 if (unlikely(strictlimit)) {
1701 wb_dirty_limits(mdtc);
1702 m_dirty = mdtc->wb_dirty;
1703 m_thresh = mdtc->wb_thresh;
1704 m_bg_thresh = mdtc->wb_bg_thresh;
1705 } else {
1706 m_dirty = mdtc->dirty;
1707 m_thresh = mdtc->thresh;
1708 m_bg_thresh = mdtc->bg_thresh;
1709 }
5a537485
MP
1710 }
1711
ea6813be
JK
1712 /*
1713 * In laptop mode, we wait until hitting the higher threshold
1714 * before starting background writeout, and then write out all
1715 * the way down to the lower threshold. So slow writers cause
1716 * minimal disk activity.
1717 *
1718 * In normal mode, we start background writeout at the lower
1719 * background_thresh, to keep the amount of dirty memory low.
1720 */
1721 if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh &&
1722 !writeback_in_progress(wb))
1723 wb_start_background_writeback(wb);
1724
16c4042f
WF
1725 /*
1726 * Throttle it only when the background writeback cannot
1727 * catch-up. This avoids (excessively) small writeouts
de1fff37 1728 * when the wb limits are ramping up in case of !strictlimit.
5a537485 1729 *
de1fff37
TH
1730 * In strictlimit case make decision based on the wb counters
1731 * and limits. Small writeouts when the wb limits are ramping
5a537485 1732 * up are the price we consciously pay for strictlimit-ing.
c2aa723a
TH
1733 *
1734 * If memcg domain is in effect, @dirty should be under
1735 * both global and memcg freerun ceilings.
16c4042f 1736 */
c2aa723a
TH
1737 if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1738 (!mdtc ||
1739 m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
a37b0715
N
1740 unsigned long intv;
1741 unsigned long m_intv;
1742
1743free_running:
1744 intv = dirty_poll_interval(dirty, thresh);
1745 m_intv = ULONG_MAX;
c2aa723a 1746
83712358
WF
1747 current->dirty_paused_when = now;
1748 current->nr_dirtied = 0;
c2aa723a
TH
1749 if (mdtc)
1750 m_intv = dirty_poll_interval(m_dirty, m_thresh);
1751 current->nr_dirtied_pause = min(intv, m_intv);
16c4042f 1752 break;
83712358 1753 }
16c4042f 1754
ea6813be 1755 /* Start writeback even when in laptop mode */
bc05873d 1756 if (unlikely(!writeback_in_progress(wb)))
9ecf4866 1757 wb_start_background_writeback(wb);
143dfe86 1758
97b27821
TH
1759 mem_cgroup_flush_foreign(wb);
1760
c2aa723a
TH
1761 /*
1762 * Calculate global domain's pos_ratio and select the
1763 * global dtc by default.
1764 */
a37b0715 1765 if (!strictlimit) {
970fb01a 1766 wb_dirty_limits(gdtc);
5fce25a9 1767
a37b0715
N
1768 if ((current->flags & PF_LOCAL_THROTTLE) &&
1769 gdtc->wb_dirty <
1770 dirty_freerun_ceiling(gdtc->wb_thresh,
1771 gdtc->wb_bg_thresh))
1772 /*
1773 * LOCAL_THROTTLE tasks must not be throttled
1774 * when below the per-wb freerun ceiling.
1775 */
1776 goto free_running;
1777 }
1778
2bc00aef
TH
1779 dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1780 ((gdtc->dirty > gdtc->thresh) || strictlimit);
daddfa3c
TH
1781
1782 wb_position_ratio(gdtc);
c2aa723a
TH
1783 sdtc = gdtc;
1784
1785 if (mdtc) {
1786 /*
1787 * If memcg domain is in effect, calculate its
1788 * pos_ratio. @wb should satisfy constraints from
1789 * both global and memcg domains. Choose the one
1790 * w/ lower pos_ratio.
1791 */
a37b0715 1792 if (!strictlimit) {
c2aa723a
TH
1793 wb_dirty_limits(mdtc);
1794
a37b0715
N
1795 if ((current->flags & PF_LOCAL_THROTTLE) &&
1796 mdtc->wb_dirty <
1797 dirty_freerun_ceiling(mdtc->wb_thresh,
1798 mdtc->wb_bg_thresh))
1799 /*
1800 * LOCAL_THROTTLE tasks must not be
1801 * throttled when below the per-wb
1802 * freerun ceiling.
1803 */
1804 goto free_running;
1805 }
c2aa723a
TH
1806 dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1807 ((mdtc->dirty > mdtc->thresh) || strictlimit);
1808
1809 wb_position_ratio(mdtc);
1810 if (mdtc->pos_ratio < gdtc->pos_ratio)
1811 sdtc = mdtc;
1812 }
daddfa3c 1813
e92eebbb
JK
1814 if (dirty_exceeded != wb->dirty_exceeded)
1815 wb->dirty_exceeded = dirty_exceeded;
1da177e4 1816
20792ebf 1817 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
45a2966f 1818 BANDWIDTH_INTERVAL))
fee468fd 1819 __wb_update_bandwidth(gdtc, mdtc, true);
e98be2d5 1820
c2aa723a 1821 /* throttle according to the chosen dtc */
20792ebf 1822 dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
c2aa723a 1823 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
3a73dbbc 1824 RATELIMIT_CALC_SHIFT;
c2aa723a 1825 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
a88a341a
TH
1826 min_pause = wb_min_pause(wb, max_pause,
1827 task_ratelimit, dirty_ratelimit,
1828 &nr_dirtied_pause);
7ccb9ad5 1829
3a73dbbc 1830 if (unlikely(task_ratelimit == 0)) {
83712358 1831 period = max_pause;
c8462cc9 1832 pause = max_pause;
143dfe86 1833 goto pause;
04fbfdc1 1834 }
83712358
WF
1835 period = HZ * pages_dirtied / task_ratelimit;
1836 pause = period;
1837 if (current->dirty_paused_when)
1838 pause -= now - current->dirty_paused_when;
1839 /*
1840 * For less than 1s think time (ext3/4 may block the dirtier
1841 * for up to 800ms from time to time on 1-HDD; so does xfs,
1842 * however at much less frequency), try to compensate it in
1843 * future periods by updating the virtual time; otherwise just
1844 * do a reset, as it may be a light dirtier.
1845 */
7ccb9ad5 1846 if (pause < min_pause) {
5634cc2a 1847 trace_balance_dirty_pages(wb,
c2aa723a
TH
1848 sdtc->thresh,
1849 sdtc->bg_thresh,
1850 sdtc->dirty,
1851 sdtc->wb_thresh,
1852 sdtc->wb_dirty,
ece13ac3
WF
1853 dirty_ratelimit,
1854 task_ratelimit,
1855 pages_dirtied,
83712358 1856 period,
7ccb9ad5 1857 min(pause, 0L),
ece13ac3 1858 start_time);
83712358
WF
1859 if (pause < -HZ) {
1860 current->dirty_paused_when = now;
1861 current->nr_dirtied = 0;
1862 } else if (period) {
1863 current->dirty_paused_when += period;
1864 current->nr_dirtied = 0;
7ccb9ad5
WF
1865 } else if (current->nr_dirtied_pause <= pages_dirtied)
1866 current->nr_dirtied_pause += pages_dirtied;
57fc978c 1867 break;
04fbfdc1 1868 }
7ccb9ad5
WF
1869 if (unlikely(pause > max_pause)) {
1870 /* for occasional dropped task_ratelimit */
1871 now += min(pause - max_pause, max_pause);
1872 pause = max_pause;
1873 }
143dfe86
WF
1874
1875pause:
5634cc2a 1876 trace_balance_dirty_pages(wb,
c2aa723a
TH
1877 sdtc->thresh,
1878 sdtc->bg_thresh,
1879 sdtc->dirty,
1880 sdtc->wb_thresh,
1881 sdtc->wb_dirty,
ece13ac3
WF
1882 dirty_ratelimit,
1883 task_ratelimit,
1884 pages_dirtied,
83712358 1885 period,
ece13ac3
WF
1886 pause,
1887 start_time);
fe6c9c6e
JK
1888 if (flags & BDP_ASYNC) {
1889 ret = -EAGAIN;
1890 break;
1891 }
499d05ec 1892 __set_current_state(TASK_KILLABLE);
b57d74af 1893 wb->dirty_sleep = now;
d25105e8 1894 io_schedule_timeout(pause);
87c6a9b2 1895
83712358
WF
1896 current->dirty_paused_when = now + pause;
1897 current->nr_dirtied = 0;
7ccb9ad5 1898 current->nr_dirtied_pause = nr_dirtied_pause;
83712358 1899
ffd1f609 1900 /*
2bc00aef
TH
1901 * This is typically equal to (dirty < thresh) and can also
1902 * keep "1000+ dd on a slow USB stick" under control.
ffd1f609 1903 */
1df64719 1904 if (task_ratelimit)
ffd1f609 1905 break;
499d05ec 1906
c5c6343c 1907 /*
f0953a1b 1908 * In the case of an unresponsive NFS server and the NFS dirty
de1fff37 1909 * pages exceeds dirty_thresh, give the other good wb's a pipe
c5c6343c
WF
1910 * to go through, so that tasks on them still remain responsive.
1911 *
3f8b6fb7 1912 * In theory 1 page is enough to keep the consumer-producer
c5c6343c 1913 * pipe going: the flusher cleans 1 page => the task dirties 1
de1fff37 1914 * more page. However wb_dirty has accounting errors. So use
93f78d88 1915 * the larger and more IO friendly wb_stat_error.
c5c6343c 1916 */
2bce774e 1917 if (sdtc->wb_dirty <= wb_stat_error())
c5c6343c
WF
1918 break;
1919
499d05ec
JK
1920 if (fatal_signal_pending(current))
1921 break;
1da177e4 1922 }
fe6c9c6e 1923 return ret;
1da177e4
LT
1924}
1925
9d823e8f 1926static DEFINE_PER_CPU(int, bdp_ratelimits);
245b2e70 1927
54848d73
WF
1928/*
1929 * Normal tasks are throttled by
1930 * loop {
1931 * dirty tsk->nr_dirtied_pause pages;
1932 * take a snap in balance_dirty_pages();
1933 * }
1934 * However there is a worst case. If every task exit immediately when dirtied
1935 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1936 * called to throttle the page dirties. The solution is to save the not yet
1937 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1938 * randomly into the running tasks. This works well for the above worst case,
1939 * as the new task will pick up and accumulate the old task's leaked dirty
1940 * count and eventually get throttled.
1941 */
1942DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1943
1da177e4 1944/**
fe6c9c6e
JK
1945 * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
1946 * @mapping: address_space which was dirtied.
1947 * @flags: BDP flags.
1da177e4
LT
1948 *
1949 * Processes which are dirtying memory should call in here once for each page
1950 * which was newly dirtied. The function will periodically check the system's
1951 * dirty state and will initiate writeback if needed.
1952 *
fe6c9c6e
JK
1953 * See balance_dirty_pages_ratelimited() for details.
1954 *
1955 * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
1956 * indicate that memory is out of balance and the caller must wait
1957 * for I/O to complete. Otherwise, it will return 0 to indicate
1958 * that either memory was already in balance, or it was able to sleep
1959 * until the amount of dirty memory returned to balance.
1da177e4 1960 */
fe6c9c6e
JK
1961int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
1962 unsigned int flags)
1da177e4 1963{
dfb8ae56
TH
1964 struct inode *inode = mapping->host;
1965 struct backing_dev_info *bdi = inode_to_bdi(inode);
1966 struct bdi_writeback *wb = NULL;
9d823e8f 1967 int ratelimit;
fe6c9c6e 1968 int ret = 0;
9d823e8f 1969 int *p;
1da177e4 1970
f56753ac 1971 if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
fe6c9c6e 1972 return ret;
36715cef 1973
dfb8ae56
TH
1974 if (inode_cgwb_enabled(inode))
1975 wb = wb_get_create_current(bdi, GFP_KERNEL);
1976 if (!wb)
1977 wb = &bdi->wb;
1978
9d823e8f 1979 ratelimit = current->nr_dirtied_pause;
a88a341a 1980 if (wb->dirty_exceeded)
9d823e8f
WF
1981 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1982
9d823e8f 1983 preempt_disable();
1da177e4 1984 /*
9d823e8f
WF
1985 * This prevents one CPU to accumulate too many dirtied pages without
1986 * calling into balance_dirty_pages(), which can happen when there are
1987 * 1000+ tasks, all of them start dirtying pages at exactly the same
1988 * time, hence all honoured too large initial task->nr_dirtied_pause.
1da177e4 1989 */
7c8e0181 1990 p = this_cpu_ptr(&bdp_ratelimits);
9d823e8f 1991 if (unlikely(current->nr_dirtied >= ratelimit))
fa5a734e 1992 *p = 0;
d3bc1fef
WF
1993 else if (unlikely(*p >= ratelimit_pages)) {
1994 *p = 0;
1995 ratelimit = 0;
1da177e4 1996 }
54848d73
WF
1997 /*
1998 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1999 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
2000 * the dirty throttling and livelock other long-run dirtiers.
2001 */
7c8e0181 2002 p = this_cpu_ptr(&dirty_throttle_leaks);
54848d73 2003 if (*p > 0 && current->nr_dirtied < ratelimit) {
d0e1d66b 2004 unsigned long nr_pages_dirtied;
54848d73
WF
2005 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
2006 *p -= nr_pages_dirtied;
2007 current->nr_dirtied += nr_pages_dirtied;
1da177e4 2008 }
fa5a734e 2009 preempt_enable();
9d823e8f
WF
2010
2011 if (unlikely(current->nr_dirtied >= ratelimit))
fe6c9c6e 2012 ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
dfb8ae56
TH
2013
2014 wb_put(wb);
fe6c9c6e
JK
2015 return ret;
2016}
611df5d6 2017EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
fe6c9c6e
JK
2018
2019/**
2020 * balance_dirty_pages_ratelimited - balance dirty memory state.
2021 * @mapping: address_space which was dirtied.
2022 *
2023 * Processes which are dirtying memory should call in here once for each page
2024 * which was newly dirtied. The function will periodically check the system's
2025 * dirty state and will initiate writeback if needed.
2026 *
2027 * Once we're over the dirty memory limit we decrease the ratelimiting
2028 * by a lot, to prevent individual processes from overshooting the limit
2029 * by (ratelimit_pages) each.
2030 */
2031void balance_dirty_pages_ratelimited(struct address_space *mapping)
2032{
2033 balance_dirty_pages_ratelimited_flags(mapping, 0);
1da177e4 2034}
d0e1d66b 2035EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1da177e4 2036
aa661bbe
TH
2037/**
2038 * wb_over_bg_thresh - does @wb need to be written back?
2039 * @wb: bdi_writeback of interest
2040 *
2041 * Determines whether background writeback should keep writing @wb or it's
a862f68a
MR
2042 * clean enough.
2043 *
2044 * Return: %true if writeback should continue.
aa661bbe
TH
2045 */
2046bool wb_over_bg_thresh(struct bdi_writeback *wb)
2047{
947e9762 2048 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
c2aa723a 2049 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
947e9762 2050 struct dirty_throttle_control * const gdtc = &gdtc_stor;
c2aa723a
TH
2051 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
2052 &mdtc_stor : NULL;
ab19939a
CW
2053 unsigned long reclaimable;
2054 unsigned long thresh;
aa661bbe 2055
947e9762
TH
2056 /*
2057 * Similar to balance_dirty_pages() but ignores pages being written
2058 * as we're trying to decide whether to put more under writeback.
2059 */
2060 gdtc->avail = global_dirtyable_memory();
8d92890b 2061 gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
947e9762 2062 domain_dirty_limits(gdtc);
aa661bbe 2063
947e9762 2064 if (gdtc->dirty > gdtc->bg_thresh)
aa661bbe
TH
2065 return true;
2066
ab19939a
CW
2067 thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
2068 if (thresh < 2 * wb_stat_error())
2069 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2070 else
2071 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2072
2073 if (reclaimable > thresh)
aa661bbe
TH
2074 return true;
2075
c2aa723a 2076 if (mdtc) {
c5edf9cd 2077 unsigned long filepages, headroom, writeback;
c2aa723a 2078
c5edf9cd
TH
2079 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
2080 &writeback);
2081 mdtc_calc_avail(mdtc, filepages, headroom);
c2aa723a
TH
2082 domain_dirty_limits(mdtc); /* ditto, ignore writeback */
2083
2084 if (mdtc->dirty > mdtc->bg_thresh)
2085 return true;
2086
ab19939a
CW
2087 thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
2088 if (thresh < 2 * wb_stat_error())
2089 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2090 else
2091 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2092
2093 if (reclaimable > thresh)
c2aa723a
TH
2094 return true;
2095 }
2096
aa661bbe
TH
2097 return false;
2098}
2099
aa779e51 2100#ifdef CONFIG_SYSCTL
1da177e4
LT
2101/*
2102 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2103 */
aa779e51 2104static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
32927393 2105 void *buffer, size_t *length, loff_t *ppos)
1da177e4 2106{
94af5846
YS
2107 unsigned int old_interval = dirty_writeback_interval;
2108 int ret;
2109
2110 ret = proc_dointvec(table, write, buffer, length, ppos);
515c24c1
YS
2111
2112 /*
2113 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2114 * and a different non-zero value will wakeup the writeback threads.
2115 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2116 * iterate over all bdis and wbs.
2117 * The reason we do this is to make the change take effect immediately.
2118 */
2119 if (!ret && write && dirty_writeback_interval &&
2120 dirty_writeback_interval != old_interval)
94af5846
YS
2121 wakeup_flusher_threads(WB_REASON_PERIODIC);
2122
2123 return ret;
1da177e4 2124}
aa779e51 2125#endif
1da177e4 2126
bca237a5 2127void laptop_mode_timer_fn(struct timer_list *t)
1da177e4 2128{
bca237a5
KC
2129 struct backing_dev_info *backing_dev_info =
2130 from_timer(backing_dev_info, t, laptop_mode_wb_timer);
1da177e4 2131
bca237a5 2132 wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
1da177e4
LT
2133}
2134
2135/*
2136 * We've spun up the disk and we're in laptop mode: schedule writeback
2137 * of all dirty data a few seconds from now. If the flush is already scheduled
2138 * then push it back - the user is still using the disk.
2139 */
31373d09 2140void laptop_io_completion(struct backing_dev_info *info)
1da177e4 2141{
31373d09 2142 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4
LT
2143}
2144
2145/*
2146 * We're in laptop mode and we've just synced. The sync's writes will have
2147 * caused another writeback to be scheduled by laptop_io_completion.
2148 * Nothing needs to be written back anymore, so we unschedule the writeback.
2149 */
2150void laptop_sync_completion(void)
2151{
31373d09
MG
2152 struct backing_dev_info *bdi;
2153
2154 rcu_read_lock();
2155
2156 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2157 del_timer(&bdi->laptop_mode_wb_timer);
2158
2159 rcu_read_unlock();
1da177e4
LT
2160}
2161
2162/*
2163 * If ratelimit_pages is too high then we can get into dirty-data overload
2164 * if a large number of processes all perform writes at the same time.
1da177e4
LT
2165 *
2166 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2167 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
9d823e8f 2168 * thresholds.
1da177e4
LT
2169 */
2170
2d1d43f6 2171void writeback_set_ratelimit(void)
1da177e4 2172{
dcc25ae7 2173 struct wb_domain *dom = &global_wb_domain;
9d823e8f
WF
2174 unsigned long background_thresh;
2175 unsigned long dirty_thresh;
dcc25ae7 2176
9d823e8f 2177 global_dirty_limits(&background_thresh, &dirty_thresh);
dcc25ae7 2178 dom->dirty_limit = dirty_thresh;
9d823e8f 2179 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1da177e4
LT
2180 if (ratelimit_pages < 16)
2181 ratelimit_pages = 16;
1da177e4
LT
2182}
2183
1d7ac6ae 2184static int page_writeback_cpu_online(unsigned int cpu)
1da177e4 2185{
1d7ac6ae
SAS
2186 writeback_set_ratelimit();
2187 return 0;
1da177e4
LT
2188}
2189
aa779e51 2190#ifdef CONFIG_SYSCTL
3c6a4cba
LC
2191
2192/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
2193static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
2194
aa779e51 2195static struct ctl_table vm_page_writeback_sysctls[] = {
2196 {
2197 .procname = "dirty_background_ratio",
2198 .data = &dirty_background_ratio,
2199 .maxlen = sizeof(dirty_background_ratio),
2200 .mode = 0644,
2201 .proc_handler = dirty_background_ratio_handler,
2202 .extra1 = SYSCTL_ZERO,
2203 .extra2 = SYSCTL_ONE_HUNDRED,
2204 },
2205 {
2206 .procname = "dirty_background_bytes",
2207 .data = &dirty_background_bytes,
2208 .maxlen = sizeof(dirty_background_bytes),
2209 .mode = 0644,
2210 .proc_handler = dirty_background_bytes_handler,
2211 .extra1 = SYSCTL_LONG_ONE,
2212 },
2213 {
2214 .procname = "dirty_ratio",
2215 .data = &vm_dirty_ratio,
2216 .maxlen = sizeof(vm_dirty_ratio),
2217 .mode = 0644,
2218 .proc_handler = dirty_ratio_handler,
2219 .extra1 = SYSCTL_ZERO,
2220 .extra2 = SYSCTL_ONE_HUNDRED,
2221 },
2222 {
2223 .procname = "dirty_bytes",
2224 .data = &vm_dirty_bytes,
2225 .maxlen = sizeof(vm_dirty_bytes),
2226 .mode = 0644,
2227 .proc_handler = dirty_bytes_handler,
2228 .extra1 = (void *)&dirty_bytes_min,
2229 },
2230 {
2231 .procname = "dirty_writeback_centisecs",
2232 .data = &dirty_writeback_interval,
2233 .maxlen = sizeof(dirty_writeback_interval),
2234 .mode = 0644,
2235 .proc_handler = dirty_writeback_centisecs_handler,
2236 },
2237 {
2238 .procname = "dirty_expire_centisecs",
2239 .data = &dirty_expire_interval,
2240 .maxlen = sizeof(dirty_expire_interval),
2241 .mode = 0644,
2242 .proc_handler = proc_dointvec_minmax,
2243 .extra1 = SYSCTL_ZERO,
2244 },
2245#ifdef CONFIG_HIGHMEM
2246 {
2247 .procname = "highmem_is_dirtyable",
2248 .data = &vm_highmem_is_dirtyable,
2249 .maxlen = sizeof(vm_highmem_is_dirtyable),
2250 .mode = 0644,
2251 .proc_handler = proc_dointvec_minmax,
2252 .extra1 = SYSCTL_ZERO,
2253 .extra2 = SYSCTL_ONE,
2254 },
2255#endif
2256 {
2257 .procname = "laptop_mode",
2258 .data = &laptop_mode,
2259 .maxlen = sizeof(laptop_mode),
2260 .mode = 0644,
2261 .proc_handler = proc_dointvec_jiffies,
2262 },
2263 {}
2264};
2265#endif
2266
1da177e4 2267/*
dc6e29da
LT
2268 * Called early on to tune the page writeback dirty limits.
2269 *
2270 * We used to scale dirty pages according to how total memory
0a18e607 2271 * related to pages that could be allocated for buffers.
dc6e29da
LT
2272 *
2273 * However, that was when we used "dirty_ratio" to scale with
2274 * all memory, and we don't do that any more. "dirty_ratio"
0a18e607 2275 * is now applied to total non-HIGHPAGE memory, and as such we can't
dc6e29da
LT
2276 * get into the old insane situation any more where we had
2277 * large amounts of dirty pages compared to a small amount of
2278 * non-HIGHMEM memory.
2279 *
2280 * But we might still want to scale the dirty_ratio by how
2281 * much memory the box has..
1da177e4
LT
2282 */
2283void __init page_writeback_init(void)
2284{
a50fcb51
RV
2285 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2286
1d7ac6ae
SAS
2287 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2288 page_writeback_cpu_online, NULL);
2289 cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2290 page_writeback_cpu_online);
aa779e51 2291#ifdef CONFIG_SYSCTL
2292 register_sysctl_init("vm", vm_page_writeback_sysctls);
2293#endif
1da177e4
LT
2294}
2295
f446daae
JK
2296/**
2297 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2298 * @mapping: address space structure to write
2299 * @start: starting page index
2300 * @end: ending page index (inclusive)
2301 *
2302 * This function scans the page range from @start to @end (inclusive) and tags
2303 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2304 * that write_cache_pages (or whoever calls this function) will then use
2305 * TOWRITE tag to identify pages eligible for writeback. This mechanism is
2306 * used to avoid livelocking of writeback by a process steadily creating new
2307 * dirty pages in the file (thus it is important for this function to be quick
2308 * so that it can tag pages faster than a dirtying process can create them).
2309 */
f446daae
JK
2310void tag_pages_for_writeback(struct address_space *mapping,
2311 pgoff_t start, pgoff_t end)
2312{
ff9c745b
MW
2313 XA_STATE(xas, &mapping->i_pages, start);
2314 unsigned int tagged = 0;
2315 void *page;
268f42de 2316
ff9c745b
MW
2317 xas_lock_irq(&xas);
2318 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2319 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2320 if (++tagged % XA_CHECK_SCHED)
268f42de 2321 continue;
ff9c745b
MW
2322
2323 xas_pause(&xas);
2324 xas_unlock_irq(&xas);
f446daae 2325 cond_resched();
ff9c745b 2326 xas_lock_irq(&xas);
268f42de 2327 }
ff9c745b 2328 xas_unlock_irq(&xas);
f446daae
JK
2329}
2330EXPORT_SYMBOL(tag_pages_for_writeback);
2331
811d736f 2332/**
0ea97180 2333 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
811d736f
DH
2334 * @mapping: address space structure to write
2335 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
0ea97180
MS
2336 * @writepage: function called for each page
2337 * @data: data passed to writepage function
811d736f 2338 *
0ea97180 2339 * If a page is already under I/O, write_cache_pages() skips it, even
811d736f
DH
2340 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2341 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2342 * and msync() need to guarantee that all the data which was dirty at the time
2343 * the call was made get new I/O started against them. If wbc->sync_mode is
2344 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2345 * existing IO to complete.
f446daae
JK
2346 *
2347 * To avoid livelocks (when other process dirties new pages), we first tag
2348 * pages which should be written back with TOWRITE tag and only then start
2349 * writing them. For data-integrity sync we have to be careful so that we do
2350 * not miss some pages (e.g., because some other process has cleared TOWRITE
2351 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2352 * by the process clearing the DIRTY tag (and submitting the page for IO).
64081362
DC
2353 *
2354 * To avoid deadlocks between range_cyclic writeback and callers that hold
2355 * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
2356 * we do not loop back to the start of the file. Doing so causes a page
2357 * lock/page writeback access order inversion - we should only ever lock
2358 * multiple pages in ascending page->index order, and looping back to the start
2359 * of the file violates that rule and causes deadlocks.
a862f68a
MR
2360 *
2361 * Return: %0 on success, negative error code otherwise
811d736f 2362 */
0ea97180
MS
2363int write_cache_pages(struct address_space *mapping,
2364 struct writeback_control *wbc, writepage_t writepage,
2365 void *data)
811d736f 2366{
811d736f
DH
2367 int ret = 0;
2368 int done = 0;
3fa750dc 2369 int error;
811d736f
DH
2370 struct pagevec pvec;
2371 int nr_pages;
2372 pgoff_t index;
2373 pgoff_t end; /* Inclusive */
bd19e012 2374 pgoff_t done_index;
811d736f 2375 int range_whole = 0;
ff9c745b 2376 xa_mark_t tag;
811d736f 2377
86679820 2378 pagevec_init(&pvec);
811d736f 2379 if (wbc->range_cyclic) {
28659cc8 2380 index = mapping->writeback_index; /* prev offset */
811d736f
DH
2381 end = -1;
2382 } else {
09cbfeaf
KS
2383 index = wbc->range_start >> PAGE_SHIFT;
2384 end = wbc->range_end >> PAGE_SHIFT;
811d736f
DH
2385 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2386 range_whole = 1;
811d736f 2387 }
cc7b8f62
MFO
2388 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
2389 tag_pages_for_writeback(mapping, index, end);
f446daae 2390 tag = PAGECACHE_TAG_TOWRITE;
cc7b8f62 2391 } else {
f446daae 2392 tag = PAGECACHE_TAG_DIRTY;
cc7b8f62 2393 }
bd19e012 2394 done_index = index;
5a3d5c98
NP
2395 while (!done && (index <= end)) {
2396 int i;
2397
2b9775ae 2398 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
67fd707f 2399 tag);
5a3d5c98
NP
2400 if (nr_pages == 0)
2401 break;
811d736f 2402
811d736f
DH
2403 for (i = 0; i < nr_pages; i++) {
2404 struct page *page = pvec.pages[i];
2405
cf15b07c 2406 done_index = page->index;
d5482cdf 2407
811d736f
DH
2408 lock_page(page);
2409
5a3d5c98
NP
2410 /*
2411 * Page truncated or invalidated. We can freely skip it
2412 * then, even for data integrity operations: the page
2413 * has disappeared concurrently, so there could be no
f0953a1b 2414 * real expectation of this data integrity operation
5a3d5c98
NP
2415 * even if there is now a new, dirty page at the same
2416 * pagecache address.
2417 */
811d736f 2418 if (unlikely(page->mapping != mapping)) {
5a3d5c98 2419continue_unlock:
811d736f
DH
2420 unlock_page(page);
2421 continue;
2422 }
2423
515f4a03
NP
2424 if (!PageDirty(page)) {
2425 /* someone wrote it for us */
2426 goto continue_unlock;
2427 }
2428
2429 if (PageWriteback(page)) {
2430 if (wbc->sync_mode != WB_SYNC_NONE)
2431 wait_on_page_writeback(page);
2432 else
2433 goto continue_unlock;
2434 }
811d736f 2435
515f4a03
NP
2436 BUG_ON(PageWriteback(page));
2437 if (!clear_page_dirty_for_io(page))
5a3d5c98 2438 goto continue_unlock;
811d736f 2439
de1414a6 2440 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
3fa750dc
BF
2441 error = (*writepage)(page, wbc, data);
2442 if (unlikely(error)) {
2443 /*
2444 * Handle errors according to the type of
2445 * writeback. There's no need to continue for
2446 * background writeback. Just push done_index
2447 * past this page so media errors won't choke
2448 * writeout for the entire file. For integrity
2449 * writeback, we must process the entire dirty
2450 * set regardless of errors because the fs may
2451 * still have state to clear for each page. In
2452 * that case we continue processing and return
2453 * the first error.
2454 */
2455 if (error == AOP_WRITEPAGE_ACTIVATE) {
00266770 2456 unlock_page(page);
3fa750dc
BF
2457 error = 0;
2458 } else if (wbc->sync_mode != WB_SYNC_ALL) {
2459 ret = error;
cf15b07c 2460 done_index = page->index + 1;
00266770
NP
2461 done = 1;
2462 break;
2463 }
3fa750dc
BF
2464 if (!ret)
2465 ret = error;
0b564927 2466 }
00266770 2467
546a1924
DC
2468 /*
2469 * We stop writing back only if we are not doing
2470 * integrity sync. In case of integrity sync we have to
2471 * keep going until we have written all the pages
2472 * we tagged for writeback prior to entering this loop.
2473 */
2474 if (--wbc->nr_to_write <= 0 &&
2475 wbc->sync_mode == WB_SYNC_NONE) {
2476 done = 1;
2477 break;
05fe478d 2478 }
811d736f
DH
2479 }
2480 pagevec_release(&pvec);
2481 cond_resched();
2482 }
64081362
DC
2483
2484 /*
2485 * If we hit the last page and there is more work to be done: wrap
2486 * back the index back to the start of the file for the next
2487 * time we are called.
2488 */
2489 if (wbc->range_cyclic && !done)
2490 done_index = 0;
0b564927
DC
2491 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2492 mapping->writeback_index = done_index;
06d6cf69 2493
811d736f
DH
2494 return ret;
2495}
0ea97180
MS
2496EXPORT_SYMBOL(write_cache_pages);
2497
2498/*
2499 * Function used by generic_writepages to call the real writepage
2500 * function and set the mapping flags on error
2501 */
2502static int __writepage(struct page *page, struct writeback_control *wbc,
2503 void *data)
2504{
2505 struct address_space *mapping = data;
2506 int ret = mapping->a_ops->writepage(page, wbc);
2507 mapping_set_error(mapping, ret);
2508 return ret;
2509}
2510
2511/**
2512 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2513 * @mapping: address space structure to write
2514 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2515 *
2516 * This is a library function, which implements the writepages()
2517 * address_space_operation.
a862f68a
MR
2518 *
2519 * Return: %0 on success, negative error code otherwise
0ea97180
MS
2520 */
2521int generic_writepages(struct address_space *mapping,
2522 struct writeback_control *wbc)
2523{
9b6096a6
SL
2524 struct blk_plug plug;
2525 int ret;
2526
0ea97180
MS
2527 /* deal with chardevs and other special file */
2528 if (!mapping->a_ops->writepage)
2529 return 0;
2530
9b6096a6
SL
2531 blk_start_plug(&plug);
2532 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2533 blk_finish_plug(&plug);
2534 return ret;
0ea97180 2535}
811d736f
DH
2536
2537EXPORT_SYMBOL(generic_writepages);
2538
1da177e4
LT
2539int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2540{
22905f77 2541 int ret;
fee468fd 2542 struct bdi_writeback *wb;
22905f77 2543
1da177e4
LT
2544 if (wbc->nr_to_write <= 0)
2545 return 0;
fee468fd
JK
2546 wb = inode_to_wb_wbc(mapping->host, wbc);
2547 wb_bandwidth_estimate_start(wb);
80a2ea9f
TT
2548 while (1) {
2549 if (mapping->a_ops->writepages)
2550 ret = mapping->a_ops->writepages(mapping, wbc);
2551 else
2552 ret = generic_writepages(mapping, wbc);
2553 if ((ret != -ENOMEM) || (wbc->sync_mode != WB_SYNC_ALL))
2554 break;
8d58802f
MG
2555
2556 /*
2557 * Lacking an allocation context or the locality or writeback
2558 * state of any of the inode's pages, throttle based on
2559 * writeback activity on the local node. It's as good a
2560 * guess as any.
2561 */
2562 reclaim_throttle(NODE_DATA(numa_node_id()),
c3f4a9a2 2563 VMSCAN_THROTTLE_WRITEBACK);
80a2ea9f 2564 }
45a2966f
JK
2565 /*
2566 * Usually few pages are written by now from those we've just submitted
2567 * but if there's constant writeback being submitted, this makes sure
2568 * writeback bandwidth is updated once in a while.
2569 */
20792ebf
JK
2570 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2571 BANDWIDTH_INTERVAL))
45a2966f 2572 wb_update_bandwidth(wb);
22905f77 2573 return ret;
1da177e4
LT
2574}
2575
2576/**
121703c1
MWO
2577 * folio_write_one - write out a single folio and wait on I/O.
2578 * @folio: The folio to write.
1da177e4 2579 *
121703c1 2580 * The folio must be locked by the caller and will be unlocked upon return.
1da177e4 2581 *
37e51a76
JL
2582 * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
2583 * function returns.
a862f68a
MR
2584 *
2585 * Return: %0 on success, negative error code otherwise
1da177e4 2586 */
121703c1 2587int folio_write_one(struct folio *folio)
1da177e4 2588{
121703c1 2589 struct address_space *mapping = folio->mapping;
1da177e4
LT
2590 int ret = 0;
2591 struct writeback_control wbc = {
2592 .sync_mode = WB_SYNC_ALL,
121703c1 2593 .nr_to_write = folio_nr_pages(folio),
1da177e4
LT
2594 };
2595
121703c1 2596 BUG_ON(!folio_test_locked(folio));
1da177e4 2597
121703c1 2598 folio_wait_writeback(folio);
1da177e4 2599
121703c1
MWO
2600 if (folio_clear_dirty_for_io(folio)) {
2601 folio_get(folio);
2602 ret = mapping->a_ops->writepage(&folio->page, &wbc);
37e51a76 2603 if (ret == 0)
121703c1
MWO
2604 folio_wait_writeback(folio);
2605 folio_put(folio);
1da177e4 2606 } else {
121703c1 2607 folio_unlock(folio);
1da177e4 2608 }
37e51a76
JL
2609
2610 if (!ret)
2611 ret = filemap_check_errors(mapping);
1da177e4
LT
2612 return ret;
2613}
121703c1 2614EXPORT_SYMBOL(folio_write_one);
1da177e4 2615
76719325
KC
2616/*
2617 * For address_spaces which do not use buffers nor write back.
2618 */
46de8b97 2619bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
76719325 2620{
46de8b97
MWO
2621 if (!folio_test_dirty(folio))
2622 return !folio_test_set_dirty(folio);
2623 return false;
76719325 2624}
46de8b97 2625EXPORT_SYMBOL(noop_dirty_folio);
76719325 2626
e3a7cca1
ES
2627/*
2628 * Helper function for set_page_dirty family.
c4843a75 2629 *
81f8c3a4 2630 * Caller must hold lock_page_memcg().
c4843a75 2631 *
e3a7cca1
ES
2632 * NOTE: This relies on being atomic wrt interrupts.
2633 */
203a3151 2634static void folio_account_dirtied(struct folio *folio,
6e1cae88 2635 struct address_space *mapping)
e3a7cca1 2636{
52ebea74
TH
2637 struct inode *inode = mapping->host;
2638
b9b0ff61 2639 trace_writeback_dirty_folio(folio, mapping);
9fb0a7da 2640
f56753ac 2641 if (mapping_can_writeback(mapping)) {
52ebea74 2642 struct bdi_writeback *wb;
203a3151 2643 long nr = folio_nr_pages(folio);
de1414a6 2644
203a3151 2645 inode_attach_wb(inode, &folio->page);
52ebea74 2646 wb = inode_to_wb(inode);
de1414a6 2647
203a3151
MWO
2648 __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2649 __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2650 __node_stat_mod_folio(folio, NR_DIRTIED, nr);
2651 wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2652 wb_stat_mod(wb, WB_DIRTIED, nr);
2653 task_io_account_write(nr * PAGE_SIZE);
2654 current->nr_dirtied += nr;
2655 __this_cpu_add(bdp_ratelimits, nr);
97b27821 2656
203a3151 2657 mem_cgroup_track_foreign_dirty(folio, wb);
e3a7cca1
ES
2658 }
2659}
2660
b9ea2515
KK
2661/*
2662 * Helper function for deaccounting dirty page without writeback.
2663 *
81f8c3a4 2664 * Caller must hold lock_page_memcg().
b9ea2515 2665 */
566d3362 2666void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
b9ea2515 2667{
566d3362
HD
2668 long nr = folio_nr_pages(folio);
2669
2670 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2671 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2672 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2673 task_io_account_cancelled_write(nr * PAGE_SIZE);
b9ea2515 2674}
b9ea2515 2675
6e1cae88 2676/*
203a3151
MWO
2677 * Mark the folio dirty, and set it dirty in the page cache, and mark
2678 * the inode dirty.
6e1cae88 2679 *
203a3151 2680 * If warn is true, then emit a warning if the folio is not uptodate and has
6e1cae88
MWO
2681 * not been truncated.
2682 *
a229a4f0
MWO
2683 * The caller must hold lock_page_memcg(). Most callers have the folio
2684 * locked. A few have the folio blocked from truncation through other
2685 * means (eg zap_page_range() has it mapped and is holding the page table
2686 * lock). This can also be called from mark_buffer_dirty(), which I
2687 * cannot prove is always protected against truncate.
6e1cae88 2688 */
203a3151 2689void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
6e1cae88
MWO
2690 int warn)
2691{
2692 unsigned long flags;
2693
2694 xa_lock_irqsave(&mapping->i_pages, flags);
203a3151
MWO
2695 if (folio->mapping) { /* Race with truncate? */
2696 WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2697 folio_account_dirtied(folio, mapping);
2698 __xa_set_mark(&mapping->i_pages, folio_index(folio),
6e1cae88
MWO
2699 PAGECACHE_TAG_DIRTY);
2700 }
2701 xa_unlock_irqrestore(&mapping->i_pages, flags);
2702}
2703
85d4d2eb
MWO
2704/**
2705 * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2706 * @mapping: Address space this folio belongs to.
2707 * @folio: Folio to be marked as dirty.
1da177e4 2708 *
85d4d2eb
MWO
2709 * Filesystems which do not use buffer heads should call this function
2710 * from their set_page_dirty address space operation. It ignores the
2711 * contents of folio_get_private(), so if the filesystem marks individual
2712 * blocks as dirty, the filesystem should handle that itself.
1da177e4 2713 *
85d4d2eb
MWO
2714 * This is also sometimes used by filesystems which use buffer_heads when
2715 * a single buffer is being dirtied: we want to set the folio dirty in
2716 * that case, but not all the buffers. This is a "bottom-up" dirtying,
e621900a 2717 * whereas block_dirty_folio() is a "top-down" dirtying.
85d4d2eb
MWO
2718 *
2719 * The caller must ensure this doesn't race with truncation. Most will
2720 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2721 * folio mapped and the pte lock held, which also locks out truncation.
1da177e4 2722 */
85d4d2eb 2723bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
1da177e4 2724{
85d4d2eb
MWO
2725 folio_memcg_lock(folio);
2726 if (folio_test_set_dirty(folio)) {
2727 folio_memcg_unlock(folio);
2728 return false;
2729 }
1da177e4 2730
85d4d2eb
MWO
2731 __folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2732 folio_memcg_unlock(folio);
c4843a75 2733
85d4d2eb
MWO
2734 if (mapping->host) {
2735 /* !PageAnon && !swapper_space */
2736 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4 2737 }
85d4d2eb 2738 return true;
1da177e4 2739}
85d4d2eb 2740EXPORT_SYMBOL(filemap_dirty_folio);
1da177e4 2741
25ff8b15
MWO
2742/**
2743 * folio_account_redirty - Manually account for redirtying a page.
2744 * @folio: The folio which is being redirtied.
2745 *
2746 * Most filesystems should call folio_redirty_for_writepage() instead
2747 * of this fuction. If your filesystem is doing writeback outside the
2748 * context of a writeback_control(), it can call this when redirtying
2749 * a folio, to de-account the dirty counters (NR_DIRTIED, WB_DIRTIED,
2750 * tsk->nr_dirtied), so that they match the written counters (NR_WRITTEN,
2751 * WB_WRITTEN) in long term. The mismatches will lead to systematic errors
2752 * in balanced_dirty_ratelimit and the dirty pages position control.
2f800fbd 2753 */
25ff8b15 2754void folio_account_redirty(struct folio *folio)
2f800fbd 2755{
25ff8b15 2756 struct address_space *mapping = folio->mapping;
91018134 2757
f56753ac 2758 if (mapping && mapping_can_writeback(mapping)) {
682aa8e1
TH
2759 struct inode *inode = mapping->host;
2760 struct bdi_writeback *wb;
2e898e4c 2761 struct wb_lock_cookie cookie = {};
25ff8b15 2762 long nr = folio_nr_pages(folio);
91018134 2763
2e898e4c 2764 wb = unlocked_inode_to_wb_begin(inode, &cookie);
25ff8b15
MWO
2765 current->nr_dirtied -= nr;
2766 node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2767 wb_stat_mod(wb, WB_DIRTIED, -nr);
2e898e4c 2768 unlocked_inode_to_wb_end(inode, &cookie);
2f800fbd
WF
2769 }
2770}
25ff8b15 2771EXPORT_SYMBOL(folio_account_redirty);
2f800fbd 2772
cd78ab11
MWO
2773/**
2774 * folio_redirty_for_writepage - Decline to write a dirty folio.
2775 * @wbc: The writeback control.
2776 * @folio: The folio.
2777 *
2778 * When a writepage implementation decides that it doesn't want to write
2779 * @folio for some reason, it should call this function, unlock @folio and
2780 * return 0.
2781 *
2782 * Return: True if we redirtied the folio. False if someone else dirtied
2783 * it first.
1da177e4 2784 */
cd78ab11
MWO
2785bool folio_redirty_for_writepage(struct writeback_control *wbc,
2786 struct folio *folio)
1da177e4 2787{
cd78ab11
MWO
2788 bool ret;
2789 long nr = folio_nr_pages(folio);
2790
2791 wbc->pages_skipped += nr;
2792 ret = filemap_dirty_folio(folio->mapping, folio);
2793 folio_account_redirty(folio);
8d38633c 2794
8d38633c 2795 return ret;
1da177e4 2796}
cd78ab11 2797EXPORT_SYMBOL(folio_redirty_for_writepage);
1da177e4 2798
b5e84594
MWO
2799/**
2800 * folio_mark_dirty - Mark a folio as being modified.
2801 * @folio: The folio.
6746aff7 2802 *
2ca456c2
MWO
2803 * The folio may not be truncated while this function is running.
2804 * Holding the folio lock is sufficient to prevent truncation, but some
2805 * callers cannot acquire a sleeping lock. These callers instead hold
2806 * the page table lock for a page table which contains at least one page
2807 * in this folio. Truncation will block on the page table lock as it
2808 * unmaps pages before removing the folio from its mapping.
b5e84594
MWO
2809 *
2810 * Return: True if the folio was newly dirtied, false if it was already dirty.
1da177e4 2811 */
b5e84594 2812bool folio_mark_dirty(struct folio *folio)
1da177e4 2813{
b5e84594 2814 struct address_space *mapping = folio_mapping(folio);
1da177e4
LT
2815
2816 if (likely(mapping)) {
278df9f4
MK
2817 /*
2818 * readahead/lru_deactivate_page could remain
6f31a5a2
MWO
2819 * PG_readahead/PG_reclaim due to race with folio_end_writeback
2820 * About readahead, if the folio is written, the flags would be
278df9f4 2821 * reset. So no problem.
6f31a5a2
MWO
2822 * About lru_deactivate_page, if the folio is redirtied,
2823 * the flag will be reset. So no problem. but if the
2824 * folio is used by readahead it will confuse readahead
2825 * and make it restart the size rampup process. But it's
2826 * a trivial problem.
278df9f4 2827 */
b5e84594
MWO
2828 if (folio_test_reclaim(folio))
2829 folio_clear_reclaim(folio);
3a3bae50 2830 return mapping->a_ops->dirty_folio(mapping, folio);
4741c9fd 2831 }
3a3bae50
MWO
2832
2833 return noop_dirty_folio(mapping, folio);
1da177e4 2834}
b5e84594 2835EXPORT_SYMBOL(folio_mark_dirty);
1da177e4
LT
2836
2837/*
2838 * set_page_dirty() is racy if the caller has no reference against
2839 * page->mapping->host, and if the page is unlocked. This is because another
2840 * CPU could truncate the page off the mapping and then free the mapping.
2841 *
2842 * Usually, the page _is_ locked, or the caller is a user-space process which
2843 * holds a reference on the inode by having an open file.
2844 *
2845 * In other cases, the page should be locked before running set_page_dirty().
2846 */
2847int set_page_dirty_lock(struct page *page)
2848{
2849 int ret;
2850
7eaceacc 2851 lock_page(page);
1da177e4
LT
2852 ret = set_page_dirty(page);
2853 unlock_page(page);
2854 return ret;
2855}
2856EXPORT_SYMBOL(set_page_dirty_lock);
2857
11f81bec
TH
2858/*
2859 * This cancels just the dirty bit on the kernel page itself, it does NOT
2860 * actually remove dirty bits on any mmap's that may be around. It also
2861 * leaves the page tagged dirty, so any sync activity will still find it on
2862 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2863 * look at the dirty bits in the VM.
2864 *
2865 * Doing this should *normally* only ever be done when a page is truncated,
2866 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2867 * this when it notices that somebody has cleaned out all the buffers on a
2868 * page without actually doing it through the VM. Can you say "ext3 is
2869 * horribly ugly"? Thought you could.
2870 */
fdaf532a 2871void __folio_cancel_dirty(struct folio *folio)
11f81bec 2872{
fdaf532a 2873 struct address_space *mapping = folio_mapping(folio);
c4843a75 2874
f56753ac 2875 if (mapping_can_writeback(mapping)) {
682aa8e1
TH
2876 struct inode *inode = mapping->host;
2877 struct bdi_writeback *wb;
2e898e4c 2878 struct wb_lock_cookie cookie = {};
c4843a75 2879
fdaf532a 2880 folio_memcg_lock(folio);
2e898e4c 2881 wb = unlocked_inode_to_wb_begin(inode, &cookie);
c4843a75 2882
fdaf532a 2883 if (folio_test_clear_dirty(folio))
566d3362 2884 folio_account_cleaned(folio, wb);
c4843a75 2885
2e898e4c 2886 unlocked_inode_to_wb_end(inode, &cookie);
fdaf532a 2887 folio_memcg_unlock(folio);
c4843a75 2888 } else {
fdaf532a 2889 folio_clear_dirty(folio);
c4843a75 2890 }
11f81bec 2891}
fdaf532a 2892EXPORT_SYMBOL(__folio_cancel_dirty);
11f81bec 2893
1da177e4 2894/*
9350f20a
MWO
2895 * Clear a folio's dirty flag, while caring for dirty memory accounting.
2896 * Returns true if the folio was previously dirty.
1da177e4 2897 *
9350f20a
MWO
2898 * This is for preparing to put the folio under writeout. We leave
2899 * the folio tagged as dirty in the xarray so that a concurrent
2900 * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2901 * The ->writepage implementation will run either folio_start_writeback()
2902 * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2903 * and xarray dirty tag back into sync.
1da177e4 2904 *
9350f20a
MWO
2905 * This incoherency between the folio's dirty flag and xarray tag is
2906 * unfortunate, but it only exists while the folio is locked.
1da177e4 2907 */
9350f20a 2908bool folio_clear_dirty_for_io(struct folio *folio)
1da177e4 2909{
9350f20a
MWO
2910 struct address_space *mapping = folio_mapping(folio);
2911 bool ret = false;
1da177e4 2912
9350f20a 2913 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
79352894 2914
f56753ac 2915 if (mapping && mapping_can_writeback(mapping)) {
682aa8e1
TH
2916 struct inode *inode = mapping->host;
2917 struct bdi_writeback *wb;
2e898e4c 2918 struct wb_lock_cookie cookie = {};
682aa8e1 2919
7658cc28
LT
2920 /*
2921 * Yes, Virginia, this is indeed insane.
2922 *
2923 * We use this sequence to make sure that
2924 * (a) we account for dirty stats properly
2925 * (b) we tell the low-level filesystem to
9350f20a 2926 * mark the whole folio dirty if it was
7658cc28 2927 * dirty in a pagetable. Only to then
9350f20a 2928 * (c) clean the folio again and return 1 to
7658cc28
LT
2929 * cause the writeback.
2930 *
2931 * This way we avoid all nasty races with the
2932 * dirty bit in multiple places and clearing
2933 * them concurrently from different threads.
2934 *
9350f20a 2935 * Note! Normally the "folio_mark_dirty(folio)"
7658cc28
LT
2936 * has no effect on the actual dirty bit - since
2937 * that will already usually be set. But we
2938 * need the side effects, and it can help us
2939 * avoid races.
2940 *
9350f20a 2941 * We basically use the folio "master dirty bit"
7658cc28
LT
2942 * as a serialization point for all the different
2943 * threads doing their things.
7658cc28 2944 */
9350f20a
MWO
2945 if (folio_mkclean(folio))
2946 folio_mark_dirty(folio);
79352894
NP
2947 /*
2948 * We carefully synchronise fault handlers against
9350f20a 2949 * installing a dirty pte and marking the folio dirty
2d6d7f98 2950 * at this point. We do this by having them hold the
9350f20a 2951 * page lock while dirtying the folio, and folios are
2d6d7f98
JW
2952 * always locked coming in here, so we get the desired
2953 * exclusion.
79352894 2954 */
2e898e4c 2955 wb = unlocked_inode_to_wb_begin(inode, &cookie);
9350f20a
MWO
2956 if (folio_test_clear_dirty(folio)) {
2957 long nr = folio_nr_pages(folio);
2958 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2959 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2960 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2961 ret = true;
1da177e4 2962 }
2e898e4c 2963 unlocked_inode_to_wb_end(inode, &cookie);
c4843a75 2964 return ret;
1da177e4 2965 }
9350f20a 2966 return folio_test_clear_dirty(folio);
1da177e4 2967}
9350f20a 2968EXPORT_SYMBOL(folio_clear_dirty_for_io);
1da177e4 2969
633a2abb
JK
2970static void wb_inode_writeback_start(struct bdi_writeback *wb)
2971{
2972 atomic_inc(&wb->writeback_inodes);
2973}
2974
2975static void wb_inode_writeback_end(struct bdi_writeback *wb)
2976{
f87904c0 2977 unsigned long flags;
633a2abb 2978 atomic_dec(&wb->writeback_inodes);
45a2966f
JK
2979 /*
2980 * Make sure estimate of writeback throughput gets updated after
2981 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
2982 * (which is the interval other bandwidth updates use for batching) so
2983 * that if multiple inodes end writeback at a similar time, they get
2984 * batched into one bandwidth update.
2985 */
f87904c0
KK
2986 spin_lock_irqsave(&wb->work_lock, flags);
2987 if (test_bit(WB_registered, &wb->state))
2988 queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2989 spin_unlock_irqrestore(&wb->work_lock, flags);
633a2abb
JK
2990}
2991
269ccca3 2992bool __folio_end_writeback(struct folio *folio)
1da177e4 2993{
269ccca3
MWO
2994 long nr = folio_nr_pages(folio);
2995 struct address_space *mapping = folio_mapping(folio);
2996 bool ret;
1da177e4 2997
269ccca3 2998 folio_memcg_lock(folio);
371a096e 2999 if (mapping && mapping_use_writeback_tags(mapping)) {
91018134
TH
3000 struct inode *inode = mapping->host;
3001 struct backing_dev_info *bdi = inode_to_bdi(inode);
1da177e4
LT
3002 unsigned long flags;
3003
b93b0163 3004 xa_lock_irqsave(&mapping->i_pages, flags);
269ccca3 3005 ret = folio_test_clear_writeback(folio);
69cb51d1 3006 if (ret) {
269ccca3 3007 __xa_clear_mark(&mapping->i_pages, folio_index(folio),
1da177e4 3008 PAGECACHE_TAG_WRITEBACK);
823423ef 3009 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
91018134
TH
3010 struct bdi_writeback *wb = inode_to_wb(inode);
3011
269ccca3
MWO
3012 wb_stat_mod(wb, WB_WRITEBACK, -nr);
3013 __wb_writeout_add(wb, nr);
633a2abb
JK
3014 if (!mapping_tagged(mapping,
3015 PAGECACHE_TAG_WRITEBACK))
3016 wb_inode_writeback_end(wb);
04fbfdc1 3017 }
69cb51d1 3018 }
6c60d2b5
DC
3019
3020 if (mapping->host && !mapping_tagged(mapping,
3021 PAGECACHE_TAG_WRITEBACK))
3022 sb_clear_inode_writeback(mapping->host);
3023
b93b0163 3024 xa_unlock_irqrestore(&mapping->i_pages, flags);
1da177e4 3025 } else {
269ccca3 3026 ret = folio_test_clear_writeback(folio);
1da177e4 3027 }
99b12e3d 3028 if (ret) {
269ccca3
MWO
3029 lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
3030 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
3031 node_stat_mod_folio(folio, NR_WRITTEN, nr);
99b12e3d 3032 }
269ccca3 3033 folio_memcg_unlock(folio);
1da177e4
LT
3034 return ret;
3035}
3036
f143f1ea 3037bool __folio_start_writeback(struct folio *folio, bool keep_write)
1da177e4 3038{
f143f1ea
MWO
3039 long nr = folio_nr_pages(folio);
3040 struct address_space *mapping = folio_mapping(folio);
3041 bool ret;
3042 int access_ret;
1da177e4 3043
f143f1ea 3044 folio_memcg_lock(folio);
371a096e 3045 if (mapping && mapping_use_writeback_tags(mapping)) {
f143f1ea 3046 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
91018134
TH
3047 struct inode *inode = mapping->host;
3048 struct backing_dev_info *bdi = inode_to_bdi(inode);
1da177e4
LT
3049 unsigned long flags;
3050
ff9c745b
MW
3051 xas_lock_irqsave(&xas, flags);
3052 xas_load(&xas);
f143f1ea 3053 ret = folio_test_set_writeback(folio);
69cb51d1 3054 if (!ret) {
6c60d2b5
DC
3055 bool on_wblist;
3056
3057 on_wblist = mapping_tagged(mapping,
3058 PAGECACHE_TAG_WRITEBACK);
3059
ff9c745b 3060 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
633a2abb
JK
3061 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3062 struct bdi_writeback *wb = inode_to_wb(inode);
3063
f143f1ea 3064 wb_stat_mod(wb, WB_WRITEBACK, nr);
633a2abb
JK
3065 if (!on_wblist)
3066 wb_inode_writeback_start(wb);
3067 }
6c60d2b5
DC
3068
3069 /*
f143f1ea
MWO
3070 * We can come through here when swapping
3071 * anonymous folios, so we don't necessarily
3072 * have an inode to track for sync.
6c60d2b5
DC
3073 */
3074 if (mapping->host && !on_wblist)
3075 sb_mark_inode_writeback(mapping->host);
69cb51d1 3076 }
f143f1ea 3077 if (!folio_test_dirty(folio))
ff9c745b 3078 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
1c8349a1 3079 if (!keep_write)
ff9c745b
MW
3080 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3081 xas_unlock_irqrestore(&xas, flags);
1da177e4 3082 } else {
f143f1ea 3083 ret = folio_test_set_writeback(folio);
1da177e4 3084 }
3a3c02ec 3085 if (!ret) {
f143f1ea
MWO
3086 lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3087 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
3a3c02ec 3088 }
f143f1ea
MWO
3089 folio_memcg_unlock(folio);
3090 access_ret = arch_make_folio_accessible(folio);
f28d4363
CI
3091 /*
3092 * If writeback has been triggered on a page that cannot be made
3093 * accessible, it is too late to recover here.
3094 */
f143f1ea 3095 VM_BUG_ON_FOLIO(access_ret != 0, folio);
f28d4363 3096
1da177e4 3097 return ret;
1da177e4 3098}
f143f1ea 3099EXPORT_SYMBOL(__folio_start_writeback);
1da177e4 3100
490e016f
MWO
3101/**
3102 * folio_wait_writeback - Wait for a folio to finish writeback.
3103 * @folio: The folio to wait for.
3104 *
3105 * If the folio is currently being written back to storage, wait for the
3106 * I/O to complete.
3107 *
3108 * Context: Sleeps. Must be called in process context and with
3109 * no spinlocks held. Caller should hold a reference on the folio.
3110 * If the folio is not locked, writeback may start again after writeback
3111 * has finished.
19343b5b 3112 */
490e016f 3113void folio_wait_writeback(struct folio *folio)
19343b5b 3114{
490e016f 3115 while (folio_test_writeback(folio)) {
b9b0ff61 3116 trace_folio_wait_writeback(folio, folio_mapping(folio));
101c0bf6 3117 folio_wait_bit(folio, PG_writeback);
19343b5b
YS
3118 }
3119}
490e016f 3120EXPORT_SYMBOL_GPL(folio_wait_writeback);
19343b5b 3121
490e016f
MWO
3122/**
3123 * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3124 * @folio: The folio to wait for.
3125 *
3126 * If the folio is currently being written back to storage, wait for the
3127 * I/O to complete or a fatal signal to arrive.
3128 *
3129 * Context: Sleeps. Must be called in process context and with
3130 * no spinlocks held. Caller should hold a reference on the folio.
3131 * If the folio is not locked, writeback may start again after writeback
3132 * has finished.
3133 * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
e5dbd332 3134 */
490e016f 3135int folio_wait_writeback_killable(struct folio *folio)
e5dbd332 3136{
490e016f 3137 while (folio_test_writeback(folio)) {
b9b0ff61 3138 trace_folio_wait_writeback(folio, folio_mapping(folio));
101c0bf6 3139 if (folio_wait_bit_killable(folio, PG_writeback))
e5dbd332
MWO
3140 return -EINTR;
3141 }
3142
3143 return 0;
3144}
490e016f 3145EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
e5dbd332 3146
1d1d1a76 3147/**
a49d0c50
MWO
3148 * folio_wait_stable() - wait for writeback to finish, if necessary.
3149 * @folio: The folio to wait on.
1d1d1a76 3150 *
a49d0c50
MWO
3151 * This function determines if the given folio is related to a backing
3152 * device that requires folio contents to be held stable during writeback.
3153 * If so, then it will wait for any pending writeback to complete.
3154 *
3155 * Context: Sleeps. Must be called in process context and with
3156 * no spinlocks held. Caller should hold a reference on the folio.
3157 * If the folio is not locked, writeback may start again after writeback
3158 * has finished.
1d1d1a76 3159 */
a49d0c50 3160void folio_wait_stable(struct folio *folio)
1d1d1a76 3161{
452c472e 3162 if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES)
a49d0c50 3163 folio_wait_writeback(folio);
1d1d1a76 3164}
a49d0c50 3165EXPORT_SYMBOL_GPL(folio_wait_stable);