1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/page-writeback.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
8 * Contains functions related to writing back dirty pages at the
9 * address_space level.
10 *
11 * 10Apr2002 Andrew Morton
12 * Initial version
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/export.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/writeback.h>
24 #include <linux/init.h>
25 #include <linux/backing-dev.h>
26 #include <linux/task_io_accounting_ops.h>
27 #include <linux/blkdev.h>
28 #include <linux/mpage.h>
29 #include <linux/rmap.h>
30 #include <linux/percpu.h>
31 #include <linux/smp.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/syscalls.h>
35 #include <linux/buffer_head.h> /* __set_page_dirty_buffers */
36 #include <linux/pagevec.h>
37 #include <linux/timer.h>
38 #include <linux/sched/rt.h>
39 #include <linux/sched/signal.h>
40 #include <linux/mm_inline.h>
41 #include <trace/events/writeback.h>
42
43 #include "internal.h"
44
45 /*
46 * Sleep at most 200ms at a time in balance_dirty_pages().
47 */
48 #define MAX_PAUSE max(HZ/5, 1)
49
50 /*
51 * Try to keep balance_dirty_pages() call intervals higher than this many pages
52 * by raising pause time to max_pause when falls below it.
53 */
54 #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
55
56 /*
57 * Estimate write bandwidth at 200ms intervals.
58 */
59 #define BANDWIDTH_INTERVAL max(HZ/5, 1)
60
61 #define RATELIMIT_CALC_SHIFT 10
62
63 /*
64 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
65 * will look to see if it needs to force writeback or throttling.
66 */
67 static long ratelimit_pages = 32;
68
69 /* The following parameters are exported via /proc/sys/vm */
70
71 /*
72 * Start background writeback (via writeback threads) at this percentage
73 */
74 int dirty_background_ratio = 10;
75
76 /*
77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
78 * dirty_background_ratio * the amount of dirtyable memory
79 */
80 unsigned long dirty_background_bytes;
81
82 /*
83 * free highmem will not be subtracted from the total free memory
84 * for calculating free ratios if vm_highmem_is_dirtyable is true
85 */
86 int vm_highmem_is_dirtyable;
87
88 /*
89 * The generator of dirty data starts writeback at this percentage
90 */
91 int vm_dirty_ratio = 20;
92
93 /*
94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
95 * vm_dirty_ratio * the amount of dirtyable memory
96 */
97 unsigned long vm_dirty_bytes;
98
99 /*
100 * The interval between `kupdate'-style writebacks
101 */
102 unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
103
104 EXPORT_SYMBOL_GPL(dirty_writeback_interval);
105
106 /*
107 * The longest time for which data is allowed to remain dirty
108 */
109 unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
110
111 /*
112 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
113 * a full sync is triggered after this time elapses without any disk activity.
114 */
115 int laptop_mode;
116
117 EXPORT_SYMBOL(laptop_mode);
118
119 /* End of sysctl-exported parameters */
120
121 struct wb_domain global_wb_domain;
122
123 /* consolidated parameters for balance_dirty_pages() and its subroutines */
124 struct dirty_throttle_control {
125 #ifdef CONFIG_CGROUP_WRITEBACK
126 struct wb_domain *dom;
127 struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
128 #endif
129 struct bdi_writeback *wb;
130 struct fprop_local_percpu *wb_completions;
131
132 unsigned long avail; /* dirtyable */
133 unsigned long dirty; /* file_dirty + write + nfs */
134 unsigned long thresh; /* dirty threshold */
135 unsigned long bg_thresh; /* dirty background threshold */
136
137 unsigned long wb_dirty; /* per-wb counterparts */
138 unsigned long wb_thresh;
139 unsigned long wb_bg_thresh;
140
141 unsigned long pos_ratio;
142 };
143
144 /*
145 * Length of period for aging writeout fractions of bdis. This is an
146 * arbitrarily chosen number. The longer the period, the slower fractions will
147 * reflect changes in current writeout rate.
148 */
149 #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
150
151 #ifdef CONFIG_CGROUP_WRITEBACK
152
153 #define GDTC_INIT(__wb) .wb = (__wb), \
154 .dom = &global_wb_domain, \
155 .wb_completions = &(__wb)->completions
156
157 #define GDTC_INIT_NO_WB .dom = &global_wb_domain
158
159 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
160 .dom = mem_cgroup_wb_domain(__wb), \
161 .wb_completions = &(__wb)->memcg_completions, \
162 .gdtc = __gdtc
163
mdtc_valid(struct dirty_throttle_control *dtc)164 static bool mdtc_valid(struct dirty_throttle_control *dtc)
165 {
166 return dtc->dom;
167 }
168
dtc_dom(struct dirty_throttle_control *dtc)169 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
170 {
171 return dtc->dom;
172 }
173
mdtc_gdtc(struct dirty_throttle_control *mdtc)174 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
175 {
176 return mdtc->gdtc;
177 }
178
wb_memcg_completions(struct bdi_writeback *wb)179 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
180 {
181 return &wb->memcg_completions;
182 }
183
wb_min_max_ratio(struct bdi_writeback *wb, unsigned long *minp, unsigned long *maxp)184 static void wb_min_max_ratio(struct bdi_writeback *wb,
185 unsigned long *minp, unsigned long *maxp)
186 {
187 unsigned long this_bw = wb->avg_write_bandwidth;
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189 unsigned long long min = wb->bdi->min_ratio;
190 unsigned long long max = wb->bdi->max_ratio;
191
192 /*
193 * @wb may already be clean by the time control reaches here and
194 * the total may not include its bw.
195 */
196 if (this_bw < tot_bw) {
197 if (min) {
198 min *= this_bw;
199 min = div64_ul(min, tot_bw);
200 }
201 if (max < 100) {
202 max *= this_bw;
203 max = div64_ul(max, tot_bw);
204 }
205 }
206
207 *minp = min;
208 *maxp = max;
209 }
210
211 #else /* CONFIG_CGROUP_WRITEBACK */
212
213 #define GDTC_INIT(__wb) .wb = (__wb), \
214 .wb_completions = &(__wb)->completions
215 #define GDTC_INIT_NO_WB
216 #define MDTC_INIT(__wb, __gdtc)
217
mdtc_valid(struct dirty_throttle_control *dtc)218 static bool mdtc_valid(struct dirty_throttle_control *dtc)
219 {
220 return false;
221 }
222
dtc_dom(struct dirty_throttle_control *dtc)223 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
224 {
225 return &global_wb_domain;
226 }
227
mdtc_gdtc(struct dirty_throttle_control *mdtc)228 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
229 {
230 return NULL;
231 }
232
wb_memcg_completions(struct bdi_writeback *wb)233 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
234 {
235 return NULL;
236 }
237
wb_min_max_ratio(struct bdi_writeback *wb, unsigned long *minp, unsigned long *maxp)238 static void wb_min_max_ratio(struct bdi_writeback *wb,
239 unsigned long *minp, unsigned long *maxp)
240 {
241 *minp = wb->bdi->min_ratio;
242 *maxp = wb->bdi->max_ratio;
243 }
244
245 #endif /* CONFIG_CGROUP_WRITEBACK */
246
247 /*
248 * In a memory zone, there is a certain amount of pages we consider
249 * available for the page cache, which is essentially the number of
250 * free and reclaimable pages, minus some zone reserves to protect
251 * lowmem and the ability to uphold the zone's watermarks without
252 * requiring writeback.
253 *
254 * This number of dirtyable pages is the base value of which the
255 * user-configurable dirty ratio is the effective number of pages that
256 * are allowed to be actually dirtied. Per individual zone, or
257 * globally by using the sum of dirtyable pages over all zones.
258 *
259 * Because the user is allowed to specify the dirty limit globally as
260 * absolute number of bytes, calculating the per-zone dirty limit can
261 * require translating the configured limit into a percentage of
262 * global dirtyable memory first.
263 */
264
265 /**
266 * node_dirtyable_memory - number of dirtyable pages in a node
267 * @pgdat: the node
268 *
269 * Return: the node's number of pages potentially available for dirty
270 * page cache. This is the base value for the per-node dirty limits.
271 */
node_dirtyable_memory(struct pglist_data *pgdat)272 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
273 {
274 unsigned long nr_pages = 0;
275 int z;
276
277 for (z = 0; z < MAX_NR_ZONES; z++) {
278 struct zone *zone = pgdat->node_zones + z;
279
280 if (!populated_zone(zone))
281 continue;
282
283 nr_pages += zone_page_state(zone, NR_FREE_PAGES);
284 }
285
286 /*
287 * Pages reserved for the kernel should not be considered
288 * dirtyable, to prevent a situation where reclaim has to
289 * clean pages in order to balance the zones.
290 */
291 nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
292
293 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
294 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
295
296 return nr_pages;
297 }
298
highmem_dirtyable_memory(unsigned long total)299 static unsigned long highmem_dirtyable_memory(unsigned long total)
300 {
301 #ifdef CONFIG_HIGHMEM
302 int node;
303 unsigned long x = 0;
304 int i;
305
306 for_each_node_state(node, N_HIGH_MEMORY) {
307 for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
308 struct zone *z;
309 unsigned long nr_pages;
310
311 if (!is_highmem_idx(i))
312 continue;
313
314 z = &NODE_DATA(node)->node_zones[i];
315 if (!populated_zone(z))
316 continue;
317
318 nr_pages = zone_page_state(z, NR_FREE_PAGES);
319 /* watch for underflows */
320 nr_pages -= min(nr_pages, high_wmark_pages(z));
321 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
322 nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
323 x += nr_pages;
324 }
325 }
326
327 /*
328 * Unreclaimable memory (kernel memory or anonymous memory
329 * without swap) can bring down the dirtyable pages below
330 * the zone's dirty balance reserve and the above calculation
331 * will underflow. However we still want to add in nodes
332 * which are below threshold (negative values) to get a more
333 * accurate calculation but make sure that the total never
334 * underflows.
335 */
336 if ((long)x < 0)
337 x = 0;
338
339 /*
340 * Make sure that the number of highmem pages is never larger
341 * than the number of the total dirtyable memory. This can only
342 * occur in very strange VM situations but we want to make sure
343 * that this does not occur.
344 */
345 return min(x, total);
346 #else
347 return 0;
348 #endif
349 }
350
351 /**
352 * global_dirtyable_memory - number of globally dirtyable pages
353 *
354 * Return: the global number of pages potentially available for dirty
355 * page cache. This is the base value for the global dirty limits.
356 */
global_dirtyable_memory(void)357 static unsigned long global_dirtyable_memory(void)
358 {
359 unsigned long x;
360
361 x = global_zone_page_state(NR_FREE_PAGES);
362 /*
363 * Pages reserved for the kernel should not be considered
364 * dirtyable, to prevent a situation where reclaim has to
365 * clean pages in order to balance the zones.
366 */
367 x -= min(x, totalreserve_pages);
368
369 x += global_node_page_state(NR_INACTIVE_FILE);
370 x += global_node_page_state(NR_ACTIVE_FILE);
371
372 if (!vm_highmem_is_dirtyable)
373 x -= highmem_dirtyable_memory(x);
374
375 return x + 1; /* Ensure that we never return 0 */
376 }
377
378 /**
379 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
380 * @dtc: dirty_throttle_control of interest
381 *
382 * Calculate @dtc->thresh and ->bg_thresh considering
383 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
384 * must ensure that @dtc->avail is set before calling this function. The
385 * dirty limits will be lifted by 1/4 for real-time tasks.
386 */
domain_dirty_limits(struct dirty_throttle_control *dtc)387 static void domain_dirty_limits(struct dirty_throttle_control *dtc)
388 {
389 const unsigned long available_memory = dtc->avail;
390 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
391 unsigned long bytes = vm_dirty_bytes;
392 unsigned long bg_bytes = dirty_background_bytes;
393 /* convert ratios to per-PAGE_SIZE for higher precision */
394 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
395 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
396 unsigned long thresh;
397 unsigned long bg_thresh;
398 struct task_struct *tsk;
399
400 /* gdtc is !NULL iff @dtc is for memcg domain */
401 if (gdtc) {
402 unsigned long global_avail = gdtc->avail;
403
404 /*
405 * The byte settings can't be applied directly to memcg
406 * domains. Convert them to ratios by scaling against
407 * globally available memory. As the ratios are in
408 * per-PAGE_SIZE, they can be obtained by dividing bytes by
409 * number of pages.
410 */
411 if (bytes)
412 ratio = min(DIV_ROUND_UP(bytes, global_avail),
413 PAGE_SIZE);
414 if (bg_bytes)
415 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
416 PAGE_SIZE);
417 bytes = bg_bytes = 0;
418 }
419
420 if (bytes)
421 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
422 else
423 thresh = (ratio * available_memory) / PAGE_SIZE;
424
425 if (bg_bytes)
426 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
427 else
428 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
429
430 tsk = current;
431 if (rt_task(tsk)) {
432 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
433 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
434 }
435 /*
436 * Dirty throttling logic assumes the limits in page units fit into
437 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
438 */
439 if (thresh > UINT_MAX)
440 thresh = UINT_MAX;
441 /* This makes sure bg_thresh is within 32-bits as well */
442 if (bg_thresh >= thresh)
443 bg_thresh = thresh / 2;
444 dtc->thresh = thresh;
445 dtc->bg_thresh = bg_thresh;
446
447 /* we should eventually report the domain in the TP */
448 if (!gdtc)
449 trace_global_dirty_state(bg_thresh, thresh);
450 }
451
452 /**
453 * global_dirty_limits - background-writeback and dirty-throttling thresholds
454 * @pbackground: out parameter for bg_thresh
455 * @pdirty: out parameter for thresh
456 *
457 * Calculate bg_thresh and thresh for global_wb_domain. See
458 * domain_dirty_limits() for details.
459 */
global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)460 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
461 {
462 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
463
464 gdtc.avail = global_dirtyable_memory();
465 domain_dirty_limits(&gdtc);
466
467 *pbackground = gdtc.bg_thresh;
468 *pdirty = gdtc.thresh;
469 }
470
471 /**
472 * node_dirty_limit - maximum number of dirty pages allowed in a node
473 * @pgdat: the node
474 *
475 * Return: the maximum number of dirty pages allowed in a node, based
476 * on the node's dirtyable memory.
477 */
node_dirty_limit(struct pglist_data *pgdat)478 static unsigned long node_dirty_limit(struct pglist_data *pgdat)
479 {
480 unsigned long node_memory = node_dirtyable_memory(pgdat);
481 struct task_struct *tsk = current;
482 unsigned long dirty;
483
484 if (vm_dirty_bytes)
485 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
486 node_memory / global_dirtyable_memory();
487 else
488 dirty = vm_dirty_ratio * node_memory / 100;
489
490 if (rt_task(tsk))
491 dirty += dirty / 4;
492
493 /*
494 * Dirty throttling logic assumes the limits in page units fit into
495 * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
496 */
497 return min_t(unsigned long, dirty, UINT_MAX);
498 }
499
500 /**
501 * node_dirty_ok - tells whether a node is within its dirty limits
502 * @pgdat: the node to check
503 *
504 * Return: %true when the dirty pages in @pgdat are within the node's
505 * dirty limit, %false if the limit is exceeded.
506 */
node_dirty_ok(struct pglist_data *pgdat)507 bool node_dirty_ok(struct pglist_data *pgdat)
508 {
509 unsigned long limit = node_dirty_limit(pgdat);
510 unsigned long nr_pages = 0;
511
512 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
513 nr_pages += node_page_state(pgdat, NR_WRITEBACK);
514
515 return nr_pages <= limit;
516 }
517
dirty_background_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)518 int dirty_background_ratio_handler(struct ctl_table *table, int write,
519 void *buffer, size_t *lenp, loff_t *ppos)
520 {
521 int ret;
522
523 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
524 if (ret == 0 && write)
525 dirty_background_bytes = 0;
526 return ret;
527 }
528
dirty_background_bytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)529 int dirty_background_bytes_handler(struct ctl_table *table, int write,
530 void *buffer, size_t *lenp, loff_t *ppos)
531 {
532 int ret;
533 unsigned long old_bytes = dirty_background_bytes;
534
535 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
536 if (ret == 0 && write) {
537 if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
538 UINT_MAX) {
539 dirty_background_bytes = old_bytes;
540 return -ERANGE;
541 }
542 dirty_background_ratio = 0;
543 }
544 return ret;
545 }
546
dirty_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)547 int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
548 size_t *lenp, loff_t *ppos)
549 {
550 int old_ratio = vm_dirty_ratio;
551 int ret;
552
553 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
554 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
555 writeback_set_ratelimit();
556 vm_dirty_bytes = 0;
557 }
558 return ret;
559 }
560
dirty_bytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos)561 int dirty_bytes_handler(struct ctl_table *table, int write,
562 void *buffer, size_t *lenp, loff_t *ppos)
563 {
564 unsigned long old_bytes = vm_dirty_bytes;
565 int ret;
566
567 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
568 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
569 if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
570 vm_dirty_bytes = old_bytes;
571 return -ERANGE;
572 }
573 writeback_set_ratelimit();
574 vm_dirty_ratio = 0;
575 }
576 return ret;
577 }
578
wp_next_time(unsigned long cur_time)579 static unsigned long wp_next_time(unsigned long cur_time)
580 {
581 cur_time += VM_COMPLETIONS_PERIOD_LEN;
582 /* 0 has a special meaning... */
583 if (!cur_time)
584 return 1;
585 return cur_time;
586 }
587
wb_domain_writeout_inc(struct wb_domain *dom, struct fprop_local_percpu *completions, unsigned int max_prop_frac)588 static void wb_domain_writeout_inc(struct wb_domain *dom,
589 struct fprop_local_percpu *completions,
590 unsigned int max_prop_frac)
591 {
592 __fprop_inc_percpu_max(&dom->completions, completions,
593 max_prop_frac);
594 /* First event after period switching was turned off? */
595 if (unlikely(!dom->period_time)) {
596 /*
597 * We can race with other __bdi_writeout_inc calls here but
598 * it does not cause any harm since the resulting time when
599 * timer will fire and what is in writeout_period_time will be
600 * roughly the same.
601 */
602 dom->period_time = wp_next_time(jiffies);
603 mod_timer(&dom->period_timer, dom->period_time);
604 }
605 }
606
607 /*
608 * Increment @wb's writeout completion count and the global writeout
609 * completion count. Called from test_clear_page_writeback().
610 */
__wb_writeout_inc(struct bdi_writeback *wb)611 static inline void __wb_writeout_inc(struct bdi_writeback *wb)
612 {
613 struct wb_domain *cgdom;
614
615 inc_wb_stat(wb, WB_WRITTEN);
616 wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
617 wb->bdi->max_prop_frac);
618
619 cgdom = mem_cgroup_wb_domain(wb);
620 if (cgdom)
621 wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
622 wb->bdi->max_prop_frac);
623 }
624
wb_writeout_inc(struct bdi_writeback *wb)625 void wb_writeout_inc(struct bdi_writeback *wb)
626 {
627 unsigned long flags;
628
629 local_irq_save(flags);
630 __wb_writeout_inc(wb);
631 local_irq_restore(flags);
632 }
633 EXPORT_SYMBOL_GPL(wb_writeout_inc);
634
635 /*
636 * On idle system, we can be called long after we scheduled because we use
637 * deferred timers so count with missed periods.
638 */
writeout_period(struct timer_list *t)639 static void writeout_period(struct timer_list *t)
640 {
641 struct wb_domain *dom = from_timer(dom, t, period_timer);
642 int miss_periods = (jiffies - dom->period_time) /
643 VM_COMPLETIONS_PERIOD_LEN;
644
645 if (fprop_new_period(&dom->completions, miss_periods + 1)) {
646 dom->period_time = wp_next_time(dom->period_time +
647 miss_periods * VM_COMPLETIONS_PERIOD_LEN);
648 mod_timer(&dom->period_timer, dom->period_time);
649 } else {
650 /*
651 * Aging has zeroed all fractions. Stop wasting CPU on period
652 * updates.
653 */
654 dom->period_time = 0;
655 }
656 }
657
wb_domain_init(struct wb_domain *dom, gfp_t gfp)658 int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
659 {
660 memset(dom, 0, sizeof(*dom));
661
662 spin_lock_init(&dom->lock);
663
664 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
665
666 dom->dirty_limit_tstamp = jiffies;
667
668 return fprop_global_init(&dom->completions, gfp);
669 }
670
671 #ifdef CONFIG_CGROUP_WRITEBACK
wb_domain_exit(struct wb_domain *dom)672 void wb_domain_exit(struct wb_domain *dom)
673 {
674 del_timer_sync(&dom->period_timer);
675 fprop_global_destroy(&dom->completions);
676 }
677 #endif
678
679 /*
680 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
681 * registered backing devices, which, for obvious reasons, can not
682 * exceed 100%.
683 */
684 static unsigned int bdi_min_ratio;
685
bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)686 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
687 {
688 int ret = 0;
689
690 spin_lock_bh(&bdi_lock);
691 if (min_ratio > bdi->max_ratio) {
692 ret = -EINVAL;
693 } else {
694 min_ratio -= bdi->min_ratio;
695 if (bdi_min_ratio + min_ratio < 100) {
696 bdi_min_ratio += min_ratio;
697 bdi->min_ratio += min_ratio;
698 } else {
699 ret = -EINVAL;
700 }
701 }
702 spin_unlock_bh(&bdi_lock);
703
704 return ret;
705 }
706
bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)707 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
708 {
709 int ret = 0;
710
711 if (max_ratio > 100)
712 return -EINVAL;
713
714 spin_lock_bh(&bdi_lock);
715 if (bdi->min_ratio > max_ratio) {
716 ret = -EINVAL;
717 } else {
718 bdi->max_ratio = max_ratio;
719 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
720 }
721 spin_unlock_bh(&bdi_lock);
722
723 return ret;
724 }
725 EXPORT_SYMBOL(bdi_set_max_ratio);
726
dirty_freerun_ceiling(unsigned long thresh, unsigned long bg_thresh)727 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
728 unsigned long bg_thresh)
729 {
730 return (thresh + bg_thresh) / 2;
731 }
732
hard_dirty_limit(struct wb_domain *dom, unsigned long thresh)733 static unsigned long hard_dirty_limit(struct wb_domain *dom,
734 unsigned long thresh)
735 {
736 return max(thresh, dom->dirty_limit);
737 }
738
739 /*
740 * Memory which can be further allocated to a memcg domain is capped by
741 * system-wide clean memory excluding the amount being used in the domain.
742 */
mdtc_calc_avail(struct dirty_throttle_control *mdtc, unsigned long filepages, unsigned long headroom)743 static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
744 unsigned long filepages, unsigned long headroom)
745 {
746 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
747 unsigned long clean = filepages - min(filepages, mdtc->dirty);
748 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
749 unsigned long other_clean = global_clean - min(global_clean, clean);
750
751 mdtc->avail = filepages + min(headroom, other_clean);
752 }
753
754 /**
755 * __wb_calc_thresh - @wb's share of dirty throttling threshold
756 * @dtc: dirty_throttle_context of interest
757 *
758 * Note that balance_dirty_pages() will only seriously take it as a hard limit
759 * when sleeping max_pause per page is not enough to keep the dirty pages under
760 * control. For example, when the device is completely stalled due to some error
761 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
762 * In the other normal situations, it acts more gently by throttling the tasks
763 * more (rather than completely block them) when the wb dirty pages go high.
764 *
765 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
766 * - starving fast devices
767 * - piling up dirty pages (that will take long time to sync) on slow devices
768 *
769 * The wb's share of dirty limit will be adapting to its throughput and
770 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
771 *
772 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
773 * dirty balancing includes all PG_dirty and PG_writeback pages.
774 */
__wb_calc_thresh(struct dirty_throttle_control *dtc)775 static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
776 {
777 struct wb_domain *dom = dtc_dom(dtc);
778 unsigned long thresh = dtc->thresh;
779 u64 wb_thresh;
780 unsigned long numerator, denominator;
781 unsigned long wb_min_ratio, wb_max_ratio;
782
783 /*
784 * Calculate this BDI's share of the thresh ratio.
785 */
786 fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
787 &numerator, &denominator);
788
789 wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
790 wb_thresh *= numerator;
791 wb_thresh = div64_ul(wb_thresh, denominator);
792
793 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
794
795 wb_thresh += (thresh * wb_min_ratio) / 100;
796 if (wb_thresh > (thresh * wb_max_ratio) / 100)
797 wb_thresh = thresh * wb_max_ratio / 100;
798
799 return wb_thresh;
800 }
801
wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)802 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
803 {
804 struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
805 .thresh = thresh };
806 return __wb_calc_thresh(&gdtc);
807 }
808
809 /*
810 * setpoint - dirty 3
811 * f(dirty) := 1.0 + (----------------)
812 * limit - setpoint
813 *
814 * it's a 3rd order polynomial that subjects to
815 *
816 * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
817 * (2) f(setpoint) = 1.0 => the balance point
818 * (3) f(limit) = 0 => the hard limit
819 * (4) df/dx <= 0 => negative feedback control
820 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
821 * => fast response on large errors; small oscillation near setpoint
822 */
pos_ratio_polynom(unsigned long setpoint, unsigned long dirty, unsigned long limit)823 static long long pos_ratio_polynom(unsigned long setpoint,
824 unsigned long dirty,
825 unsigned long limit)
826 {
827 long long pos_ratio;
828 long x;
829
830 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
831 (limit - setpoint) | 1);
832 pos_ratio = x;
833 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
834 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
835 pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
836
837 return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
838 }
839
840 /*
841 * Dirty position control.
842 *
843 * (o) global/bdi setpoints
844 *
845 * We want the dirty pages be balanced around the global/wb setpoints.
846 * When the number of dirty pages is higher/lower than the setpoint, the
847 * dirty position control ratio (and hence task dirty ratelimit) will be
848 * decreased/increased to bring the dirty pages back to the setpoint.
849 *
850 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
851 *
852 * if (dirty < setpoint) scale up pos_ratio
853 * if (dirty > setpoint) scale down pos_ratio
854 *
855 * if (wb_dirty < wb_setpoint) scale up pos_ratio
856 * if (wb_dirty > wb_setpoint) scale down pos_ratio
857 *
858 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
859 *
860 * (o) global control line
861 *
862 * ^ pos_ratio
863 * |
864 * | |<===== global dirty control scope ======>|
865 * 2.0 .............*
866 * | .*
867 * | . *
868 * | . *
869 * | . *
870 * | . *
871 * | . *
872 * 1.0 ................................*
873 * | . . *
874 * | . . *
875 * | . . *
876 * | . . *
877 * | . . *
878 * 0 +------------.------------------.----------------------*------------->
879 * freerun^ setpoint^ limit^ dirty pages
880 *
881 * (o) wb control line
882 *
883 * ^ pos_ratio
884 * |
885 * | *
886 * | *
887 * | *
888 * | *
889 * | * |<=========== span ============>|
890 * 1.0 .......................*
891 * | . *
892 * | . *
893 * | . *
894 * | . *
895 * | . *
896 * | . *
897 * | . *
898 * | . *
899 * | . *
900 * | . *
901 * | . *
902 * 1/4 ...............................................* * * * * * * * * * * *
903 * | . .
904 * | . .
905 * | . .
906 * 0 +----------------------.-------------------------------.------------->
907 * wb_setpoint^ x_intercept^
908 *
909 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
910 * be smoothly throttled down to normal if it starts high in situations like
911 * - start writing to a slow SD card and a fast disk at the same time. The SD
912 * card's wb_dirty may rush to many times higher than wb_setpoint.
913 * - the wb dirty thresh drops quickly due to change of JBOD workload
914 */
wb_position_ratio(struct dirty_throttle_control *dtc)915 static void wb_position_ratio(struct dirty_throttle_control *dtc)
916 {
917 struct bdi_writeback *wb = dtc->wb;
918 unsigned long write_bw = wb->avg_write_bandwidth;
919 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
920 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
921 unsigned long wb_thresh = dtc->wb_thresh;
922 unsigned long x_intercept;
923 unsigned long setpoint; /* dirty pages' target balance point */
924 unsigned long wb_setpoint;
925 unsigned long span;
926 long long pos_ratio; /* for scaling up/down the rate limit */
927 long x;
928
929 dtc->pos_ratio = 0;
930
931 if (unlikely(dtc->dirty >= limit))
932 return;
933
934 /*
935 * global setpoint
936 *
937 * See comment for pos_ratio_polynom().
938 */
939 setpoint = (freerun + limit) / 2;
940 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
941
942 /*
943 * The strictlimit feature is a tool preventing mistrusted filesystems
944 * from growing a large number of dirty pages before throttling. For
945 * such filesystems balance_dirty_pages always checks wb counters
946 * against wb limits. Even if global "nr_dirty" is under "freerun".
947 * This is especially important for fuse which sets bdi->max_ratio to
948 * 1% by default. Without strictlimit feature, fuse writeback may
949 * consume arbitrary amount of RAM because it is accounted in
950 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
951 *
952 * Here, in wb_position_ratio(), we calculate pos_ratio based on
953 * two values: wb_dirty and wb_thresh. Let's consider an example:
954 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
955 * limits are set by default to 10% and 20% (background and throttle).
956 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
957 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
958 * about ~6K pages (as the average of background and throttle wb
959 * limits). The 3rd order polynomial will provide positive feedback if
960 * wb_dirty is under wb_setpoint and vice versa.
961 *
962 * Note, that we cannot use global counters in these calculations
963 * because we want to throttle process writing to a strictlimit wb
964 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
965 * in the example above).
966 */
967 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
968 long long wb_pos_ratio;
969
970 if (dtc->wb_dirty < 8) {
971 dtc->pos_ratio = min_t(long long, pos_ratio * 2,
972 2 << RATELIMIT_CALC_SHIFT);
973 return;
974 }
975
976 if (dtc->wb_dirty >= wb_thresh)
977 return;
978
979 wb_setpoint = dirty_freerun_ceiling(wb_thresh,
980 dtc->wb_bg_thresh);
981
982 if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
983 return;
984
985 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
986 wb_thresh);
987
988 /*
989 * Typically, for strictlimit case, wb_setpoint << setpoint
990 * and pos_ratio >> wb_pos_ratio. In the other words global
991 * state ("dirty") is not limiting factor and we have to
992 * make decision based on wb counters. But there is an
993 * important case when global pos_ratio should get precedence:
994 * global limits are exceeded (e.g. due to activities on other
995 * wb's) while given strictlimit wb is below limit.
996 *
997 * "pos_ratio * wb_pos_ratio" would work for the case above,
998 * but it would look too non-natural for the case of all
999 * activity in the system coming from a single strictlimit wb
1000 * with bdi->max_ratio == 100%.
1001 *
1002 * Note that min() below somewhat changes the dynamics of the
1003 * control system. Normally, pos_ratio value can be well over 3
1004 * (when globally we are at freerun and wb is well below wb
1005 * setpoint). Now the maximum pos_ratio in the same situation
1006 * is 2. We might want to tweak this if we observe the control
1007 * system is too slow to adapt.
1008 */
1009 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1010 return;
1011 }
1012
1013 /*
1014 * We have computed basic pos_ratio above based on global situation. If
1015 * the wb is over/under its share of dirty pages, we want to scale
1016 * pos_ratio further down/up. That is done by the following mechanism.
1017 */
1018
1019 /*
1020 * wb setpoint
1021 *
1022 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1023 *
1024 * x_intercept - wb_dirty
1025 * := --------------------------
1026 * x_intercept - wb_setpoint
1027 *
1028 * The main wb control line is a linear function that subjects to
1029 *
1030 * (1) f(wb_setpoint) = 1.0
1031 * (2) k = - 1 / (8 * write_bw) (in single wb case)
1032 * or equally: x_intercept = wb_setpoint + 8 * write_bw
1033 *
1034 * For single wb case, the dirty pages are observed to fluctuate
1035 * regularly within range
1036 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1037 * for various filesystems, where (2) can yield in a reasonable 12.5%
1038 * fluctuation range for pos_ratio.
1039 *
1040 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1041 * own size, so move the slope over accordingly and choose a slope that
1042 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1043 */
1044 if (unlikely(wb_thresh > dtc->thresh))
1045 wb_thresh = dtc->thresh;
1046 /*
1047 * It's very possible that wb_thresh is close to 0 not because the
1048 * device is slow, but that it has remained inactive for long time.
1049 * Honour such devices a reasonable good (hopefully IO efficient)
1050 * threshold, so that the occasional writes won't be blocked and active
1051 * writes can rampup the threshold quickly.
1052 */
1053 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
1054 /*
1055 * scale global setpoint to wb's:
1056 * wb_setpoint = setpoint * wb_thresh / thresh
1057 */
1058 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1059 wb_setpoint = setpoint * (u64)x >> 16;
1060 /*
1061 * Use span=(8*write_bw) in single wb case as indicated by
1062 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1063 *
1064 * wb_thresh thresh - wb_thresh
1065 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1066 * thresh thresh
1067 */
1068 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1069 x_intercept = wb_setpoint + span;
1070
1071 if (dtc->wb_dirty < x_intercept - span / 4) {
1072 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1073 (x_intercept - wb_setpoint) | 1);
1074 } else
1075 pos_ratio /= 4;
1076
1077 /*
1078 * wb reserve area, safeguard against dirty pool underrun and disk idle
1079 * It may push the desired control point of global dirty pages higher
1080 * than setpoint.
1081 */
1082 x_intercept = wb_thresh / 2;
1083 if (dtc->wb_dirty < x_intercept) {
1084 if (dtc->wb_dirty > x_intercept / 8)
1085 pos_ratio = div_u64(pos_ratio * x_intercept,
1086 dtc->wb_dirty);
1087 else
1088 pos_ratio *= 8;
1089 }
1090
1091 dtc->pos_ratio = pos_ratio;
1092 }
1093
wb_update_write_bandwidth(struct bdi_writeback *wb, unsigned long elapsed, unsigned long written)1094 static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1095 unsigned long elapsed,
1096 unsigned long written)
1097 {
1098 const unsigned long period = roundup_pow_of_two(3 * HZ);
1099 unsigned long avg = wb->avg_write_bandwidth;
1100 unsigned long old = wb->write_bandwidth;
1101 u64 bw;
1102
1103 /*
1104 * bw = written * HZ / elapsed
1105 *
1106 * bw * elapsed + write_bandwidth * (period - elapsed)
1107 * write_bandwidth = ---------------------------------------------------
1108 * period
1109 *
1110 * @written may have decreased due to account_page_redirty().
1111 * Avoid underflowing @bw calculation.
1112 */
1113 bw = written - min(written, wb->written_stamp);
1114 bw *= HZ;
1115 if (unlikely(elapsed > period)) {
1116 bw = div64_ul(bw, elapsed);
1117 avg = bw;
1118 goto out;
1119 }
1120 bw += (u64)wb->write_bandwidth * (period - elapsed);
1121 bw >>= ilog2(period);
1122
1123 /*
1124 * one more level of smoothing, for filtering out sudden spikes
1125 */
1126 if (avg > old && old >= (unsigned long)bw)
1127 avg -= (avg - old) >> 3;
1128
1129 if (avg < old && old <= (unsigned long)bw)
1130 avg += (old - avg) >> 3;
1131
1132 out:
1133 /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1134 avg = max(avg, 1LU);
1135 if (wb_has_dirty_io(wb)) {
1136 long delta = avg - wb->avg_write_bandwidth;
1137 WARN_ON_ONCE(atomic_long_add_return(delta,
1138 &wb->bdi->tot_write_bandwidth) <= 0);
1139 }
1140 wb->write_bandwidth = bw;
1141 wb->avg_write_bandwidth = avg;
1142 }
1143
update_dirty_limit(struct dirty_throttle_control *dtc)1144 static void update_dirty_limit(struct dirty_throttle_control *dtc)
1145 {
1146 struct wb_domain *dom = dtc_dom(dtc);
1147 unsigned long thresh = dtc->thresh;
1148 unsigned long limit = dom->dirty_limit;
1149
1150 /*
1151 * Follow up in one step.
1152 */
1153 if (limit < thresh) {
1154 limit = thresh;
1155 goto update;
1156 }
1157
1158 /*
1159 * Follow down slowly. Use the higher one as the target, because thresh
1160 * may drop below dirty. This is exactly the reason to introduce
1161 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1162 */
1163 thresh = max(thresh, dtc->dirty);
1164 if (limit > thresh) {
1165 limit -= (limit - thresh) >> 5;
1166 goto update;
1167 }
1168 return;
1169 update:
1170 dom->dirty_limit = limit;
1171 }
1172
domain_update_bandwidth(struct dirty_throttle_control *dtc, unsigned long now)1173 static void domain_update_bandwidth(struct dirty_throttle_control *dtc,
1174 unsigned long now)
1175 {
1176 struct wb_domain *dom = dtc_dom(dtc);
1177
1178 /*
1179 * check locklessly first to optimize away locking for the most time
1180 */
1181 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1182 return;
1183
1184 spin_lock(&dom->lock);
1185 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1186 update_dirty_limit(dtc);
1187 dom->dirty_limit_tstamp = now;
1188 }
1189 spin_unlock(&dom->lock);
1190 }
1191
1192 /*
1193 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1194 *
1195 * Normal wb tasks will be curbed at or below it in long term.
1196 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1197 */
wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, unsigned long dirtied, unsigned long elapsed)1198 static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1199 unsigned long dirtied,
1200 unsigned long elapsed)
1201 {
1202 struct bdi_writeback *wb = dtc->wb;
1203 unsigned long dirty = dtc->dirty;
1204 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1205 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1206 unsigned long setpoint = (freerun + limit) / 2;
1207 unsigned long write_bw = wb->avg_write_bandwidth;
1208 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1209 unsigned long dirty_rate;
1210 unsigned long task_ratelimit;
1211 unsigned long balanced_dirty_ratelimit;
1212 unsigned long step;
1213 unsigned long x;
1214 unsigned long shift;
1215
1216 /*
1217 * The dirty rate will match the writeout rate in long term, except
1218 * when dirty pages are truncated by userspace or re-dirtied by FS.
1219 */
1220 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1221
1222 /*
1223 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1224 */
1225 task_ratelimit = (u64)dirty_ratelimit *
1226 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1227 task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1228
1229 /*
1230 * A linear estimation of the "balanced" throttle rate. The theory is,
1231 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1232 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1233 * formula will yield the balanced rate limit (write_bw / N).
1234 *
1235 * Note that the expanded form is not a pure rate feedback:
1236 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1237 * but also takes pos_ratio into account:
1238 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1239 *
1240 * (1) is not realistic because pos_ratio also takes part in balancing
1241 * the dirty rate. Consider the state
1242 * pos_ratio = 0.5 (3)
1243 * rate = 2 * (write_bw / N) (4)
1244 * If (1) is used, it will stuck in that state! Because each dd will
1245 * be throttled at
1246 * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1247 * yielding
1248 * dirty_rate = N * task_ratelimit = write_bw (6)
1249 * put (6) into (1) we get
1250 * rate_(i+1) = rate_(i) (7)
1251 *
1252 * So we end up using (2) to always keep
1253 * rate_(i+1) ~= (write_bw / N) (8)
1254 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1255 * pos_ratio is able to drive itself to 1.0, which is not only where
1256 * the dirty count meet the setpoint, but also where the slope of
1257 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1258 */
1259 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1260 dirty_rate | 1);
1261 /*
1262 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1263 */
1264 if (unlikely(balanced_dirty_ratelimit > write_bw))
1265 balanced_dirty_ratelimit = write_bw;
1266
1267 /*
1268 * We could safely do this and return immediately:
1269 *
1270 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
1271 *
1272 * However to get a more stable dirty_ratelimit, the below elaborated
1273 * code makes use of task_ratelimit to filter out singular points and
1274 * limit the step size.
1275 *
1276 * The below code essentially only uses the relative value of
1277 *
1278 * task_ratelimit - dirty_ratelimit
1279 * = (pos_ratio - 1) * dirty_ratelimit
1280 *
1281 * which reflects the direction and size of dirty position error.
1282 */
1283
1284 /*
1285 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1286 * task_ratelimit is on the same side of dirty_ratelimit, too.
1287 * For example, when
1288 * - dirty_ratelimit > balanced_dirty_ratelimit
1289 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1290 * lowering dirty_ratelimit will help meet both the position and rate
1291 * control targets. Otherwise, don't update dirty_ratelimit if it will
1292 * only help meet the rate target. After all, what the users ultimately
1293 * feel and care are stable dirty rate and small position error.
1294 *
1295 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1296 * and filter out the singular points of balanced_dirty_ratelimit. Which
1297 * keeps jumping around randomly and can even leap far away at times
1298 * due to the small 200ms estimation period of dirty_rate (we want to
1299 * keep that period small to reduce time lags).
1300 */
1301 step = 0;
1302
1303 /*
1304 * For strictlimit case, calculations above were based on wb counters
1305 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1306 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1307 * Hence, to calculate "step" properly, we have to use wb_dirty as
1308 * "dirty" and wb_setpoint as "setpoint".
1309 *
1310 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1311 * it's possible that wb_thresh is close to zero due to inactivity
1312 * of backing device.
1313 */
1314 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1315 dirty = dtc->wb_dirty;
1316 if (dtc->wb_dirty < 8)
1317 setpoint = dtc->wb_dirty + 1;
1318 else
1319 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1320 }
1321
1322 if (dirty < setpoint) {
1323 x = min3(wb->balanced_dirty_ratelimit,
1324 balanced_dirty_ratelimit, task_ratelimit);
1325 if (dirty_ratelimit < x)
1326 step = x - dirty_ratelimit;
1327 } else {
1328 x = max3(wb->balanced_dirty_ratelimit,
1329 balanced_dirty_ratelimit, task_ratelimit);
1330 if (dirty_ratelimit > x)
1331 step = dirty_ratelimit - x;
1332 }
1333
1334 /*
1335 * Don't pursue 100% rate matching. It's impossible since the balanced
1336 * rate itself is constantly fluctuating. So decrease the track speed
1337 * when it gets close to the target. Helps eliminate pointless tremors.
1338 */
1339 shift = dirty_ratelimit / (2 * step + 1);
1340 if (shift < BITS_PER_LONG)
1341 step = DIV_ROUND_UP(step >> shift, 8);
1342 else
1343 step = 0;
1344
1345 if (dirty_ratelimit < balanced_dirty_ratelimit)
1346 dirty_ratelimit += step;
1347 else
1348 dirty_ratelimit -= step;
1349
1350 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1351 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1352
1353 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1354 }
1355
__wb_update_bandwidth(struct dirty_throttle_control *gdtc, struct dirty_throttle_control *mdtc, unsigned long start_time, bool update_ratelimit)1356 static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1357 struct dirty_throttle_control *mdtc,
1358 unsigned long start_time,
1359 bool update_ratelimit)
1360 {
1361 struct bdi_writeback *wb = gdtc->wb;
1362 unsigned long now = jiffies;
1363 unsigned long elapsed = now - wb->bw_time_stamp;
1364 unsigned long dirtied;
1365 unsigned long written;
1366
1367 lockdep_assert_held(&wb->list_lock);
1368
1369 /*
1370 * rate-limit, only update once every 200ms.
1371 */
1372 if (elapsed < BANDWIDTH_INTERVAL)
1373 return;
1374
1375 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1376 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1377
1378 /*
1379 * Skip quiet periods when disk bandwidth is under-utilized.
1380 * (at least 1s idle time between two flusher runs)
1381 */
1382 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1383 goto snapshot;
1384
1385 if (update_ratelimit) {
1386 domain_update_bandwidth(gdtc, now);
1387 wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1388
1389 /*
1390 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1391 * compiler has no way to figure that out. Help it.
1392 */
1393 if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1394 domain_update_bandwidth(mdtc, now);
1395 wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1396 }
1397 }
1398 wb_update_write_bandwidth(wb, elapsed, written);
1399
1400 snapshot:
1401 wb->dirtied_stamp = dirtied;
1402 wb->written_stamp = written;
1403 wb->bw_time_stamp = now;
1404 }
1405
wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)1406 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1407 {
1408 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1409
1410 __wb_update_bandwidth(&gdtc, NULL, start_time, false);
1411 }
1412
1413 /*
1414 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1415 * will look to see if it needs to start dirty throttling.
1416 *
1417 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1418 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1419 * (the number of pages we may dirty without exceeding the dirty limits).
1420 */
dirty_poll_interval(unsigned long dirty, unsigned long thresh)1421 static unsigned long dirty_poll_interval(unsigned long dirty,
1422 unsigned long thresh)
1423 {
1424 if (thresh > dirty)
1425 return 1UL << (ilog2(thresh - dirty) >> 1);
1426
1427 return 1;
1428 }
1429
wb_max_pause(struct bdi_writeback *wb, unsigned long wb_dirty)1430 static unsigned long wb_max_pause(struct bdi_writeback *wb,
1431 unsigned long wb_dirty)
1432 {
1433 unsigned long bw = wb->avg_write_bandwidth;
1434 unsigned long t;
1435
1436 /*
1437 * Limit pause time for small memory systems. If sleeping for too long
1438 * time, a small pool of dirty/writeback pages may go empty and disk go
1439 * idle.
1440 *
1441 * 8 serves as the safety ratio.
1442 */
1443 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1444 t++;
1445
1446 return min_t(unsigned long, t, MAX_PAUSE);
1447 }
1448
wb_min_pause(struct bdi_writeback *wb, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause)1449 static long wb_min_pause(struct bdi_writeback *wb,
1450 long max_pause,
1451 unsigned long task_ratelimit,
1452 unsigned long dirty_ratelimit,
1453 int *nr_dirtied_pause)
1454 {
1455 long hi = ilog2(wb->avg_write_bandwidth);
1456 long lo = ilog2(wb->dirty_ratelimit);
1457 long t; /* target pause */
1458 long pause; /* estimated next pause */
1459 int pages; /* target nr_dirtied_pause */
1460
1461 /* target for 10ms pause on 1-dd case */
1462 t = max(1, HZ / 100);
1463
1464 /*
1465 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1466 * overheads.
1467 *
1468 * (N * 10ms) on 2^N concurrent tasks.
1469 */
1470 if (hi > lo)
1471 t += (hi - lo) * (10 * HZ) / 1024;
1472
1473 /*
1474 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1475 * on the much more stable dirty_ratelimit. However the next pause time
1476 * will be computed based on task_ratelimit and the two rate limits may
1477 * depart considerably at some time. Especially if task_ratelimit goes
1478 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1479 * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
1480 * result task_ratelimit won't be executed faithfully, which could
1481 * eventually bring down dirty_ratelimit.
1482 *
1483 * We apply two rules to fix it up:
1484 * 1) try to estimate the next pause time and if necessary, use a lower
1485 * nr_dirtied_pause so as not to exceed max_pause. When this happens,
1486 * nr_dirtied_pause will be "dancing" with task_ratelimit.
1487 * 2) limit the target pause time to max_pause/2, so that the normal
1488 * small fluctuations of task_ratelimit won't trigger rule (1) and
1489 * nr_dirtied_pause will remain as stable as dirty_ratelimit.
1490 */
1491 t = min(t, 1 + max_pause / 2);
1492 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1493
1494 /*
1495 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1496 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1497 * When the 16 consecutive reads are often interrupted by some dirty
1498 * throttling pause during the async writes, cfq will go into idles
1499 * (deadline is fine). So push nr_dirtied_pause as high as possible
1500 * until reaches DIRTY_POLL_THRESH=32 pages.
1501 */
1502 if (pages < DIRTY_POLL_THRESH) {
1503 t = max_pause;
1504 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1505 if (pages > DIRTY_POLL_THRESH) {
1506 pages = DIRTY_POLL_THRESH;
1507 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1508 }
1509 }
1510
1511 pause = HZ * pages / (task_ratelimit + 1);
1512 if (pause > max_pause) {
1513 t = max_pause;
1514 pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1515 }
1516
1517 *nr_dirtied_pause = pages;
1518 /*
1519 * The minimal pause time will normally be half the target pause time.
1520 */
1521 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1522 }
1523
wb_dirty_limits(struct dirty_throttle_control *dtc)1524 static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1525 {
1526 struct bdi_writeback *wb = dtc->wb;
1527 unsigned long wb_reclaimable;
1528
1529 /*
1530 * wb_thresh is not treated as some limiting factor as
1531 * dirty_thresh, due to reasons
1532 * - in JBOD setup, wb_thresh can fluctuate a lot
1533 * - in a system with HDD and USB key, the USB key may somehow
1534 * go into state (wb_dirty >> wb_thresh) either because
1535 * wb_dirty starts high, or because wb_thresh drops low.
1536 * In this case we don't want to hard throttle the USB key
1537 * dirtiers for 100 seconds until wb_dirty drops under
1538 * wb_thresh. Instead the auxiliary wb control line in
1539 * wb_position_ratio() will let the dirtier task progress
1540 * at some rate <= (write_bw / 2) for bringing down wb_dirty.
1541 */
1542 dtc->wb_thresh = __wb_calc_thresh(dtc);
1543 dtc->wb_bg_thresh = dtc->thresh ?
1544 div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1545
1546 /*
1547 * In order to avoid the stacked BDI deadlock we need
1548 * to ensure we accurately count the 'dirty' pages when
1549 * the threshold is low.
1550 *
1551 * Otherwise it would be possible to get thresh+n pages
1552 * reported dirty, even though there are thresh-m pages
1553 * actually dirty; with m+n sitting in the percpu
1554 * deltas.
1555 */
1556 if (dtc->wb_thresh < 2 * wb_stat_error()) {
1557 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1558 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1559 } else {
1560 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1561 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1562 }
1563 }
1564
1565 /*
1566 * balance_dirty_pages() must be called by processes which are generating dirty
1567 * data. It looks at the number of dirty pages in the machine and will force
1568 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1569 * If we're over `background_thresh' then the writeback threads are woken to
1570 * perform some writeout.
1571 */
balance_dirty_pages(struct bdi_writeback *wb, unsigned long pages_dirtied)1572 static void balance_dirty_pages(struct bdi_writeback *wb,
1573 unsigned long pages_dirtied)
1574 {
1575 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1576 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1577 struct dirty_throttle_control * const gdtc = &gdtc_stor;
1578 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1579 &mdtc_stor : NULL;
1580 struct dirty_throttle_control *sdtc;
1581 unsigned long nr_reclaimable; /* = file_dirty */
1582 long period;
1583 long pause;
1584 long max_pause;
1585 long min_pause;
1586 int nr_dirtied_pause;
1587 bool dirty_exceeded = false;
1588 unsigned long task_ratelimit;
1589 unsigned long dirty_ratelimit;
1590 struct backing_dev_info *bdi = wb->bdi;
1591 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1592 unsigned long start_time = jiffies;
1593
1594 for (;;) {
1595 unsigned long now = jiffies;
1596 unsigned long dirty, thresh, bg_thresh;
1597 unsigned long m_dirty = 0; /* stop bogus uninit warnings */
1598 unsigned long m_thresh = 0;
1599 unsigned long m_bg_thresh = 0;
1600
1601 nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
1602 gdtc->avail = global_dirtyable_memory();
1603 gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
1604
1605 domain_dirty_limits(gdtc);
1606
1607 if (unlikely(strictlimit)) {
1608 wb_dirty_limits(gdtc);
1609
1610 dirty = gdtc->wb_dirty;
1611 thresh = gdtc->wb_thresh;
1612 bg_thresh = gdtc->wb_bg_thresh;
1613 } else {
1614 dirty = gdtc->dirty;
1615 thresh = gdtc->thresh;
1616 bg_thresh = gdtc->bg_thresh;
1617 }
1618
1619 if (mdtc) {
1620 unsigned long filepages, headroom, writeback;
1621
1622 /*
1623 * If @wb belongs to !root memcg, repeat the same
1624 * basic calculations for the memcg domain.
1625 */
1626 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1627 &mdtc->dirty, &writeback);
1628 mdtc->dirty += writeback;
1629 mdtc_calc_avail(mdtc, filepages, headroom);
1630
1631 domain_dirty_limits(mdtc);
1632
1633 if (unlikely(strictlimit)) {
1634 wb_dirty_limits(mdtc);
1635 m_dirty = mdtc->wb_dirty;
1636 m_thresh = mdtc->wb_thresh;
1637 m_bg_thresh = mdtc->wb_bg_thresh;
1638 } else {
1639 m_dirty = mdtc->dirty;
1640 m_thresh = mdtc->thresh;
1641 m_bg_thresh = mdtc->bg_thresh;
1642 }
1643 }
1644
1645 /*
1646 * Throttle it only when the background writeback cannot
1647 * catch-up. This avoids (excessively) small writeouts
1648 * when the wb limits are ramping up in case of !strictlimit.
1649 *
1650 * In strictlimit case make decision based on the wb counters
1651 * and limits. Small writeouts when the wb limits are ramping
1652 * up are the price we consciously pay for strictlimit-ing.
1653 *
1654 * If memcg domain is in effect, @dirty should be under
1655 * both global and memcg freerun ceilings.
1656 */
1657 if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1658 (!mdtc ||
1659 m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1660 unsigned long intv;
1661 unsigned long m_intv;
1662
1663 free_running:
1664 intv = dirty_poll_interval(dirty, thresh);
1665 m_intv = ULONG_MAX;
1666
1667 current->dirty_paused_when = now;
1668 current->nr_dirtied = 0;
1669 if (mdtc)
1670 m_intv = dirty_poll_interval(m_dirty, m_thresh);
1671 current->nr_dirtied_pause = min(intv, m_intv);
1672 break;
1673 }
1674
1675 if (unlikely(!writeback_in_progress(wb)))
1676 wb_start_background_writeback(wb);
1677
1678 mem_cgroup_flush_foreign(wb);
1679
1680 /*
1681 * Calculate global domain's pos_ratio and select the
1682 * global dtc by default.
1683 */
1684 if (!strictlimit) {
1685 wb_dirty_limits(gdtc);
1686
1687 if ((current->flags & PF_LOCAL_THROTTLE) &&
1688 gdtc->wb_dirty <
1689 dirty_freerun_ceiling(gdtc->wb_thresh,
1690 gdtc->wb_bg_thresh))
1691 /*
1692 * LOCAL_THROTTLE tasks must not be throttled
1693 * when below the per-wb freerun ceiling.
1694 */
1695 goto free_running;
1696 }
1697
1698 dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1699 ((gdtc->dirty > gdtc->thresh) || strictlimit);
1700
1701 wb_position_ratio(gdtc);
1702 sdtc = gdtc;
1703
1704 if (mdtc) {
1705 /*
1706 * If memcg domain is in effect, calculate its
1707 * pos_ratio. @wb should satisfy constraints from
1708 * both global and memcg domains. Choose the one
1709 * w/ lower pos_ratio.
1710 */
1711 if (!strictlimit) {
1712 wb_dirty_limits(mdtc);
1713
1714 if ((current->flags & PF_LOCAL_THROTTLE) &&
1715 mdtc->wb_dirty <
1716 dirty_freerun_ceiling(mdtc->wb_thresh,
1717 mdtc->wb_bg_thresh))
1718 /*
1719 * LOCAL_THROTTLE tasks must not be
1720 * throttled when below the per-wb
1721 * freerun ceiling.
1722 */
1723 goto free_running;
1724 }
1725 dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1726 ((mdtc->dirty > mdtc->thresh) || strictlimit);
1727
1728 wb_position_ratio(mdtc);
1729 if (mdtc->pos_ratio < gdtc->pos_ratio)
1730 sdtc = mdtc;
1731 }
1732
1733 if (dirty_exceeded && !wb->dirty_exceeded)
1734 wb->dirty_exceeded = 1;
1735
1736 if (time_is_before_jiffies(wb->bw_time_stamp +
1737 BANDWIDTH_INTERVAL)) {
1738 spin_lock(&wb->list_lock);
1739 __wb_update_bandwidth(gdtc, mdtc, start_time, true);
1740 spin_unlock(&wb->list_lock);
1741 }
1742
1743 /* throttle according to the chosen dtc */
1744 dirty_ratelimit = wb->dirty_ratelimit;
1745 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1746 RATELIMIT_CALC_SHIFT;
1747 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1748 min_pause = wb_min_pause(wb, max_pause,
1749 task_ratelimit, dirty_ratelimit,
1750 &nr_dirtied_pause);
1751
1752 if (unlikely(task_ratelimit == 0)) {
1753 period = max_pause;
1754 pause = max_pause;
1755 goto pause;
1756 }
1757 period = HZ * pages_dirtied / task_ratelimit;
1758 pause = period;
1759 if (current->dirty_paused_when)
1760 pause -= now - current->dirty_paused_when;
1761 /*
1762 * For less than 1s think time (ext3/4 may block the dirtier
1763 * for up to 800ms from time to time on 1-HDD; so does xfs,
1764 * however at much less frequency), try to compensate it in
1765 * future periods by updating the virtual time; otherwise just
1766 * do a reset, as it may be a light dirtier.
1767 */
1768 if (pause < min_pause) {
1769 trace_balance_dirty_pages(wb,
1770 sdtc->thresh,
1771 sdtc->bg_thresh,
1772 sdtc->dirty,
1773 sdtc->wb_thresh,
1774 sdtc->wb_dirty,
1775 dirty_ratelimit,
1776 task_ratelimit,
1777 pages_dirtied,
1778 period,
1779 min(pause, 0L),
1780 start_time);
1781 if (pause < -HZ) {
1782 current->dirty_paused_when = now;
1783 current->nr_dirtied = 0;
1784 } else if (period) {
1785 current->dirty_paused_when += period;
1786 current->nr_dirtied = 0;
1787 } else if (current->nr_dirtied_pause <= pages_dirtied)
1788 current->nr_dirtied_pause += pages_dirtied;
1789 break;
1790 }
1791 if (unlikely(pause > max_pause)) {
1792 /* for occasional dropped task_ratelimit */
1793 now += min(pause - max_pause, max_pause);
1794 pause = max_pause;
1795 }
1796
1797 pause:
1798 trace_balance_dirty_pages(wb,
1799 sdtc->thresh,
1800 sdtc->bg_thresh,
1801 sdtc->dirty,
1802 sdtc->wb_thresh,
1803 sdtc->wb_dirty,
1804 dirty_ratelimit,
1805 task_ratelimit,
1806 pages_dirtied,
1807 period,
1808 pause,
1809 start_time);
1810 __set_current_state(TASK_KILLABLE);
1811 wb->dirty_sleep = now;
1812 io_schedule_timeout(pause);
1813
1814 current->dirty_paused_when = now + pause;
1815 current->nr_dirtied = 0;
1816 current->nr_dirtied_pause = nr_dirtied_pause;
1817
1818 /*
1819 * This is typically equal to (dirty < thresh) and can also
1820 * keep "1000+ dd on a slow USB stick" under control.
1821 */
1822 if (task_ratelimit)
1823 break;
1824
1825 /*
1826 * In the case of an unresponding NFS server and the NFS dirty
1827 * pages exceeds dirty_thresh, give the other good wb's a pipe
1828 * to go through, so that tasks on them still remain responsive.
1829 *
1830 * In theory 1 page is enough to keep the consumer-producer
1831 * pipe going: the flusher cleans 1 page => the task dirties 1
1832 * more page. However wb_dirty has accounting errors. So use
1833 * the larger and more IO friendly wb_stat_error.
1834 */
1835 if (sdtc->wb_dirty <= wb_stat_error())
1836 break;
1837
1838 if (fatal_signal_pending(current))
1839 break;
1840 }
1841
1842 if (!dirty_exceeded && wb->dirty_exceeded)
1843 wb->dirty_exceeded = 0;
1844
1845 if (writeback_in_progress(wb))
1846 return;
1847
1848 /*
1849 * In laptop mode, we wait until hitting the higher threshold before
1850 * starting background writeout, and then write out all the way down
1851 * to the lower threshold. So slow writers cause minimal disk activity.
1852 *
1853 * In normal mode, we start background writeout at the lower
1854 * background_thresh, to keep the amount of dirty memory low.
1855 */
1856 if (laptop_mode)
1857 return;
1858
1859 if (nr_reclaimable > gdtc->bg_thresh)
1860 wb_start_background_writeback(wb);
1861 }
1862
1863 static DEFINE_PER_CPU(int, bdp_ratelimits);
1864
1865 /*
1866 * Normal tasks are throttled by
1867 * loop {
1868 * dirty tsk->nr_dirtied_pause pages;
1869 * take a snap in balance_dirty_pages();
1870 * }
1871 * However there is a worst case. If every task exit immediately when dirtied
1872 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1873 * called to throttle the page dirties. The solution is to save the not yet
1874 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1875 * randomly into the running tasks. This works well for the above worst case,
1876 * as the new task will pick up and accumulate the old task's leaked dirty
1877 * count and eventually get throttled.
1878 */
1879 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1880
1881 /**
1882 * balance_dirty_pages_ratelimited - balance dirty memory state
1883 * @mapping: address_space which was dirtied
1884 *
1885 * Processes which are dirtying memory should call in here once for each page
1886 * which was newly dirtied. The function will periodically check the system's
1887 * dirty state and will initiate writeback if needed.
1888 *
1889 * On really big machines, get_writeback_state is expensive, so try to avoid
1890 * calling it too often (ratelimiting). But once we're over the dirty memory
1891 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1892 * from overshooting the limit by (ratelimit_pages) each.
1893 */
balance_dirty_pages_ratelimited(struct address_space *mapping)1894 void balance_dirty_pages_ratelimited(struct address_space *mapping)
1895 {
1896 struct inode *inode = mapping->host;
1897 struct backing_dev_info *bdi = inode_to_bdi(inode);
1898 struct bdi_writeback *wb = NULL;
1899 int ratelimit;
1900 int *p;
1901
1902 if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
1903 return;
1904
1905 if (inode_cgwb_enabled(inode))
1906 wb = wb_get_create_current(bdi, GFP_KERNEL);
1907 if (!wb)
1908 wb = &bdi->wb;
1909
1910 ratelimit = current->nr_dirtied_pause;
1911 if (wb->dirty_exceeded)
1912 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1913
1914 preempt_disable();
1915 /*
1916 * This prevents one CPU to accumulate too many dirtied pages without
1917 * calling into balance_dirty_pages(), which can happen when there are
1918 * 1000+ tasks, all of them start dirtying pages at exactly the same
1919 * time, hence all honoured too large initial task->nr_dirtied_pause.
1920 */
1921 p = this_cpu_ptr(&bdp_ratelimits);
1922 if (unlikely(current->nr_dirtied >= ratelimit))
1923 *p = 0;
1924 else if (unlikely(*p >= ratelimit_pages)) {
1925 *p = 0;
1926 ratelimit = 0;
1927 }
1928 /*
1929 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1930 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1931 * the dirty throttling and livelock other long-run dirtiers.
1932 */
1933 p = this_cpu_ptr(&dirty_throttle_leaks);
1934 if (*p > 0 && current->nr_dirtied < ratelimit) {
1935 unsigned long nr_pages_dirtied;
1936 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1937 *p -= nr_pages_dirtied;
1938 current->nr_dirtied += nr_pages_dirtied;
1939 }
1940 preempt_enable();
1941
1942 if (unlikely(current->nr_dirtied >= ratelimit))
1943 balance_dirty_pages(wb, current->nr_dirtied);
1944
1945 wb_put(wb);
1946 }
1947 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1948
1949 /**
1950 * wb_over_bg_thresh - does @wb need to be written back?
1951 * @wb: bdi_writeback of interest
1952 *
1953 * Determines whether background writeback should keep writing @wb or it's
1954 * clean enough.
1955 *
1956 * Return: %true if writeback should continue.
1957 */
wb_over_bg_thresh(struct bdi_writeback *wb)1958 bool wb_over_bg_thresh(struct bdi_writeback *wb)
1959 {
1960 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1961 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1962 struct dirty_throttle_control * const gdtc = &gdtc_stor;
1963 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1964 &mdtc_stor : NULL;
1965
1966 /*
1967 * Similar to balance_dirty_pages() but ignores pages being written
1968 * as we're trying to decide whether to put more under writeback.
1969 */
1970 gdtc->avail = global_dirtyable_memory();
1971 gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
1972 domain_dirty_limits(gdtc);
1973
1974 if (gdtc->dirty > gdtc->bg_thresh)
1975 return true;
1976
1977 if (wb_stat(wb, WB_RECLAIMABLE) >
1978 wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
1979 return true;
1980
1981 if (mdtc) {
1982 unsigned long filepages, headroom, writeback;
1983
1984 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1985 &writeback);
1986 mdtc_calc_avail(mdtc, filepages, headroom);
1987 domain_dirty_limits(mdtc); /* ditto, ignore writeback */
1988
1989 if (mdtc->dirty > mdtc->bg_thresh)
1990 return true;
1991
1992 if (wb_stat(wb, WB_RECLAIMABLE) >
1993 wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
1994 return true;
1995 }
1996
1997 return false;
1998 }
1999
2000 /*
2001 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2002 */
dirty_writeback_centisecs_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos)2003 int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
2004 void *buffer, size_t *length, loff_t *ppos)
2005 {
2006 unsigned int old_interval = dirty_writeback_interval;
2007 int ret;
2008
2009 ret = proc_dointvec(table, write, buffer, length, ppos);
2010
2011 /*
2012 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2013 * and a different non-zero value will wakeup the writeback threads.
2014 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2015 * iterate over all bdis and wbs.
2016 * The reason we do this is to make the change take effect immediately.
2017 */
2018 if (!ret && write && dirty_writeback_interval &&
2019 dirty_writeback_interval != old_interval)
2020 wakeup_flusher_threads(WB_REASON_PERIODIC);
2021
2022 return ret;
2023 }
2024
2025 #ifdef CONFIG_BLOCK
laptop_mode_timer_fn(struct timer_list *t)2026 void laptop_mode_timer_fn(struct timer_list *t)
2027 {
2028 struct backing_dev_info *backing_dev_info =
2029 from_timer(backing_dev_info, t, laptop_mode_wb_timer);
2030
2031 wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
2032 }
2033
2034 /*
2035 * We've spun up the disk and we're in laptop mode: schedule writeback
2036 * of all dirty data a few seconds from now. If the flush is already scheduled
2037 * then push it back - the user is still using the disk.
2038 */
laptop_io_completion(struct backing_dev_info *info)2039 void laptop_io_completion(struct backing_dev_info *info)
2040 {
2041 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
2042 }
2043
2044 /*
2045 * We're in laptop mode and we've just synced. The sync's writes will have
2046 * caused another writeback to be scheduled by laptop_io_completion.
2047 * Nothing needs to be written back anymore, so we unschedule the writeback.
2048 */
laptop_sync_completion(void)2049 void laptop_sync_completion(void)
2050 {
2051 struct backing_dev_info *bdi;
2052
2053 rcu_read_lock();
2054
2055 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2056 del_timer(&bdi->laptop_mode_wb_timer);
2057
2058 rcu_read_unlock();
2059 }
2060 #endif
2061
2062 /*
2063 * If ratelimit_pages is too high then we can get into dirty-data overload
2064 * if a large number of processes all perform writes at the same time.
2065 * If it is too low then SMP machines will call the (expensive)
2066 * get_writeback_state too often.
2067 *
2068 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2069 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2070 * thresholds.
2071 */
2072
writeback_set_ratelimit(void)2073 void writeback_set_ratelimit(void)
2074 {
2075 struct wb_domain *dom = &global_wb_domain;
2076 unsigned long background_thresh;
2077 unsigned long dirty_thresh;
2078
2079 global_dirty_limits(&background_thresh, &dirty_thresh);
2080 dom->dirty_limit = dirty_thresh;
2081 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2082 if (ratelimit_pages < 16)
2083 ratelimit_pages = 16;
2084 }
2085
page_writeback_cpu_online(unsigned int cpu)2086 static int page_writeback_cpu_online(unsigned int cpu)
2087 {
2088 writeback_set_ratelimit();
2089 return 0;
2090 }
2091
2092 /*
2093 * Called early on to tune the page writeback dirty limits.
2094 *
2095 * We used to scale dirty pages according to how total memory
2096 * related to pages that could be allocated for buffers.
2097 *
2098 * However, that was when we used "dirty_ratio" to scale with
2099 * all memory, and we don't do that any more. "dirty_ratio"
2100 * is now applied to total non-HIGHPAGE memory, and as such we can't
2101 * get into the old insane situation any more where we had
2102 * large amounts of dirty pages compared to a small amount of
2103 * non-HIGHMEM memory.
2104 *
2105 * But we might still want to scale the dirty_ratio by how
2106 * much memory the box has..
2107 */
page_writeback_init(void)2108 void __init page_writeback_init(void)
2109 {
2110 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2111
2112 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2113 page_writeback_cpu_online, NULL);
2114 cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2115 page_writeback_cpu_online);
2116 }
2117
2118 /**
2119 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2120 * @mapping: address space structure to write
2121 * @start: starting page index
2122 * @end: ending page index (inclusive)
2123 *
2124 * This function scans the page range from @start to @end (inclusive) and tags
2125 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2126 * that write_cache_pages (or whoever calls this function) will then use
2127 * TOWRITE tag to identify pages eligible for writeback. This mechanism is
2128 * used to avoid livelocking of writeback by a process steadily creating new
2129 * dirty pages in the file (thus it is important for this function to be quick
2130 * so that it can tag pages faster than a dirtying process can create them).
2131 */
tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end)2132 void tag_pages_for_writeback(struct address_space *mapping,
2133 pgoff_t start, pgoff_t end)
2134 {
2135 XA_STATE(xas, &mapping->i_pages, start);
2136 unsigned int tagged = 0;
2137 void *page;
2138
2139 xas_lock_irq(&xas);
2140 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2141 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2142 if (++tagged % XA_CHECK_SCHED)
2143 continue;
2144
2145 xas_pause(&xas);
2146 xas_unlock_irq(&xas);
2147 cond_resched();
2148 xas_lock_irq(&xas);
2149 }
2150 xas_unlock_irq(&xas);
2151 }
2152 EXPORT_SYMBOL(tag_pages_for_writeback);
2153
2154 /**
2155 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2156 * @mapping: address space structure to write
2157 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2158 * @writepage: function called for each page
2159 * @data: data passed to writepage function
2160 *
2161 * If a page is already under I/O, write_cache_pages() skips it, even
2162 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2163 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2164 * and msync() need to guarantee that all the data which was dirty at the time
2165 * the call was made get new I/O started against them. If wbc->sync_mode is
2166 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2167 * existing IO to complete.
2168 *
2169 * To avoid livelocks (when other process dirties new pages), we first tag
2170 * pages which should be written back with TOWRITE tag and only then start
2171 * writing them. For data-integrity sync we have to be careful so that we do
2172 * not miss some pages (e.g., because some other process has cleared TOWRITE
2173 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2174 * by the process clearing the DIRTY tag (and submitting the page for IO).
2175 *
2176 * To avoid deadlocks between range_cyclic writeback and callers that hold
2177 * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
2178 * we do not loop back to the start of the file. Doing so causes a page
2179 * lock/page writeback access order inversion - we should only ever lock
2180 * multiple pages in ascending page->index order, and looping back to the start
2181 * of the file violates that rule and causes deadlocks.
2182 *
2183 * Return: %0 on success, negative error code otherwise
2184 */
write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data)2185 int write_cache_pages(struct address_space *mapping,
2186 struct writeback_control *wbc, writepage_t writepage,
2187 void *data)
2188 {
2189 int ret = 0;
2190 int done = 0;
2191 int error;
2192 struct pagevec pvec;
2193 int nr_pages;
2194 pgoff_t index;
2195 pgoff_t end; /* Inclusive */
2196 pgoff_t done_index;
2197 int range_whole = 0;
2198 xa_mark_t tag;
2199
2200 pagevec_init(&pvec);
2201 if (wbc->range_cyclic) {
2202 index = mapping->writeback_index; /* prev offset */
2203 end = -1;
2204 } else {
2205 index = wbc->range_start >> PAGE_SHIFT;
2206 end = wbc->range_end >> PAGE_SHIFT;
2207 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2208 range_whole = 1;
2209 }
2210 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
2211 tag_pages_for_writeback(mapping, index, end);
2212 tag = PAGECACHE_TAG_TOWRITE;
2213 } else {
2214 tag = PAGECACHE_TAG_DIRTY;
2215 }
2216 done_index = index;
2217 while (!done && (index <= end)) {
2218 int i;
2219
2220 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2221 tag);
2222 if (nr_pages == 0)
2223 break;
2224
2225 for (i = 0; i < nr_pages; i++) {
2226 struct page *page = pvec.pages[i];
2227
2228 done_index = page->index;
2229
2230 lock_page(page);
2231
2232 /*
2233 * Page truncated or invalidated. We can freely skip it
2234 * then, even for data integrity operations: the page
2235 * has disappeared concurrently, so there could be no
2236 * real expectation of this data interity operation
2237 * even if there is now a new, dirty page at the same
2238 * pagecache address.
2239 */
2240 if (unlikely(page->mapping != mapping)) {
2241 continue_unlock:
2242 unlock_page(page);
2243 continue;
2244 }
2245
2246 if (!PageDirty(page)) {
2247 /* someone wrote it for us */
2248 goto continue_unlock;
2249 }
2250
2251 if (PageWriteback(page)) {
2252 if (wbc->sync_mode != WB_SYNC_NONE)
2253 wait_on_page_writeback(page);
2254 else
2255 goto continue_unlock;
2256 }
2257
2258 BUG_ON(PageWriteback(page));
2259 if (!clear_page_dirty_for_io(page))
2260 goto continue_unlock;
2261
2262 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2263 error = (*writepage)(page, wbc, data);
2264 if (unlikely(error)) {
2265 /*
2266 * Handle errors according to the type of
2267 * writeback. There's no need to continue for
2268 * background writeback. Just push done_index
2269 * past this page so media errors won't choke
2270 * writeout for the entire file. For integrity
2271 * writeback, we must process the entire dirty
2272 * set regardless of errors because the fs may
2273 * still have state to clear for each page. In
2274 * that case we continue processing and return
2275 * the first error.
2276 */
2277 if (error == AOP_WRITEPAGE_ACTIVATE) {
2278 unlock_page(page);
2279 error = 0;
2280 } else if (wbc->sync_mode != WB_SYNC_ALL) {
2281 ret = error;
2282 done_index = page->index + 1;
2283 done = 1;
2284 break;
2285 }
2286 if (!ret)
2287 ret = error;
2288 }
2289
2290 /*
2291 * We stop writing back only if we are not doing
2292 * integrity sync. In case of integrity sync we have to
2293 * keep going until we have written all the pages
2294 * we tagged for writeback prior to entering this loop.
2295 */
2296 if (--wbc->nr_to_write <= 0 &&
2297 wbc->sync_mode == WB_SYNC_NONE) {
2298 done = 1;
2299 break;
2300 }
2301 }
2302 pagevec_release(&pvec);
2303 cond_resched();
2304 }
2305
2306 /*
2307 * If we hit the last page and there is more work to be done: wrap
2308 * back the index back to the start of the file for the next
2309 * time we are called.
2310 */
2311 if (wbc->range_cyclic && !done)
2312 done_index = 0;
2313 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2314 mapping->writeback_index = done_index;
2315
2316 return ret;
2317 }
2318 EXPORT_SYMBOL(write_cache_pages);
2319
2320 /*
2321 * Function used by generic_writepages to call the real writepage
2322 * function and set the mapping flags on error
2323 */
__writepage(struct page *page, struct writeback_control *wbc, void *data)2324 static int __writepage(struct page *page, struct writeback_control *wbc,
2325 void *data)
2326 {
2327 struct address_space *mapping = data;
2328 int ret = mapping->a_ops->writepage(page, wbc);
2329 mapping_set_error(mapping, ret);
2330 return ret;
2331 }
2332
2333 /**
2334 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2335 * @mapping: address space structure to write
2336 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2337 *
2338 * This is a library function, which implements the writepages()
2339 * address_space_operation.
2340 *
2341 * Return: %0 on success, negative error code otherwise
2342 */
generic_writepages(struct address_space *mapping, struct writeback_control *wbc)2343 int generic_writepages(struct address_space *mapping,
2344 struct writeback_control *wbc)
2345 {
2346 struct blk_plug plug;
2347 int ret;
2348
2349 /* deal with chardevs and other special file */
2350 if (!mapping->a_ops->writepage)
2351 return 0;
2352
2353 blk_start_plug(&plug);
2354 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2355 blk_finish_plug(&plug);
2356 return ret;
2357 }
2358
2359 EXPORT_SYMBOL(generic_writepages);
2360
do_writepages(struct address_space *mapping, struct writeback_control *wbc)2361 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2362 {
2363 int ret;
2364
2365 if (wbc->nr_to_write <= 0)
2366 return 0;
2367 while (1) {
2368 if (mapping->a_ops->writepages)
2369 ret = mapping->a_ops->writepages(mapping, wbc);
2370 else
2371 ret = generic_writepages(mapping, wbc);
2372 if ((ret != -ENOMEM) || (wbc->sync_mode != WB_SYNC_ALL))
2373 break;
2374 cond_resched();
2375 congestion_wait(BLK_RW_ASYNC, HZ/50);
2376 }
2377 return ret;
2378 }
2379
2380 /**
2381 * write_one_page - write out a single page and wait on I/O
2382 * @page: the page to write
2383 *
2384 * The page must be locked by the caller and will be unlocked upon return.
2385 *
2386 * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
2387 * function returns.
2388 *
2389 * Return: %0 on success, negative error code otherwise
2390 */
write_one_page(struct page *page)2391 int write_one_page(struct page *page)
2392 {
2393 struct address_space *mapping = page->mapping;
2394 int ret = 0;
2395 struct writeback_control wbc = {
2396 .sync_mode = WB_SYNC_ALL,
2397 .nr_to_write = 1,
2398 };
2399
2400 BUG_ON(!PageLocked(page));
2401
2402 wait_on_page_writeback(page);
2403
2404 if (clear_page_dirty_for_io(page)) {
2405 get_page(page);
2406 ret = mapping->a_ops->writepage(page, &wbc);
2407 if (ret == 0)
2408 wait_on_page_writeback(page);
2409 put_page(page);
2410 } else {
2411 unlock_page(page);
2412 }
2413
2414 if (!ret)
2415 ret = filemap_check_errors(mapping);
2416 return ret;
2417 }
2418 EXPORT_SYMBOL(write_one_page);
2419
2420 /*
2421 * For address_spaces which do not use buffers nor write back.
2422 */
__set_page_dirty_no_writeback(struct page *page)2423 int __set_page_dirty_no_writeback(struct page *page)
2424 {
2425 if (!PageDirty(page))
2426 return !TestSetPageDirty(page);
2427 return 0;
2428 }
2429
2430 /*
2431 * Helper function for set_page_dirty family.
2432 *
2433 * Caller must hold lock_page_memcg().
2434 *
2435 * NOTE: This relies on being atomic wrt interrupts.
2436 */
account_page_dirtied(struct page *page, struct address_space *mapping)2437 void account_page_dirtied(struct page *page, struct address_space *mapping)
2438 {
2439 struct inode *inode = mapping->host;
2440
2441 trace_writeback_dirty_page(page, mapping);
2442
2443 if (mapping_can_writeback(mapping)) {
2444 struct bdi_writeback *wb;
2445
2446 inode_attach_wb(inode, page);
2447 wb = inode_to_wb(inode);
2448
2449 __inc_lruvec_page_state(page, NR_FILE_DIRTY);
2450 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2451 __inc_node_page_state(page, NR_DIRTIED);
2452 inc_wb_stat(wb, WB_RECLAIMABLE);
2453 inc_wb_stat(wb, WB_DIRTIED);
2454 task_io_account_write(PAGE_SIZE);
2455 current->nr_dirtied++;
2456 this_cpu_inc(bdp_ratelimits);
2457
2458 mem_cgroup_track_foreign_dirty(page, wb);
2459 }
2460 }
2461
2462 /*
2463 * Helper function for deaccounting dirty page without writeback.
2464 *
2465 * Caller must hold lock_page_memcg().
2466 */
account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb)2467 void account_page_cleaned(struct page *page, struct address_space *mapping,
2468 struct bdi_writeback *wb)
2469 {
2470 if (mapping_can_writeback(mapping)) {
2471 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2472 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2473 dec_wb_stat(wb, WB_RECLAIMABLE);
2474 task_io_account_cancelled_write(PAGE_SIZE);
2475 }
2476 }
2477
2478 /*
2479 * For address_spaces which do not use buffers. Just tag the page as dirty in
2480 * the xarray.
2481 *
2482 * This is also used when a single buffer is being dirtied: we want to set the
2483 * page dirty in that case, but not all the buffers. This is a "bottom-up"
2484 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2485 *
2486 * The caller must ensure this doesn't race with truncation. Most will simply
2487 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2488 * the pte lock held, which also locks out truncation.
2489 */
__set_page_dirty_nobuffers(struct page *page)2490 int __set_page_dirty_nobuffers(struct page *page)
2491 {
2492 lock_page_memcg(page);
2493 if (!TestSetPageDirty(page)) {
2494 struct address_space *mapping = page_mapping(page);
2495 unsigned long flags;
2496
2497 if (!mapping) {
2498 unlock_page_memcg(page);
2499 return 1;
2500 }
2501
2502 xa_lock_irqsave(&mapping->i_pages, flags);
2503 BUG_ON(page_mapping(page) != mapping);
2504 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2505 account_page_dirtied(page, mapping);
2506 __xa_set_mark(&mapping->i_pages, page_index(page),
2507 PAGECACHE_TAG_DIRTY);
2508 xa_unlock_irqrestore(&mapping->i_pages, flags);
2509 unlock_page_memcg(page);
2510
2511 if (mapping->host) {
2512 /* !PageAnon && !swapper_space */
2513 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2514 }
2515 return 1;
2516 }
2517 unlock_page_memcg(page);
2518 return 0;
2519 }
2520 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2521
2522 /*
2523 * Call this whenever redirtying a page, to de-account the dirty counters
2524 * (NR_DIRTIED, WB_DIRTIED, tsk->nr_dirtied), so that they match the written
2525 * counters (NR_WRITTEN, WB_WRITTEN) in long term. The mismatches will lead to
2526 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2527 * control.
2528 */
account_page_redirty(struct page *page)2529 void account_page_redirty(struct page *page)
2530 {
2531 struct address_space *mapping = page->mapping;
2532
2533 if (mapping && mapping_can_writeback(mapping)) {
2534 struct inode *inode = mapping->host;
2535 struct bdi_writeback *wb;
2536 struct wb_lock_cookie cookie = {};
2537
2538 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2539 current->nr_dirtied--;
2540 dec_node_page_state(page, NR_DIRTIED);
2541 dec_wb_stat(wb, WB_DIRTIED);
2542 unlocked_inode_to_wb_end(inode, &cookie);
2543 }
2544 }
2545 EXPORT_SYMBOL(account_page_redirty);
2546
2547 /*
2548 * When a writepage implementation decides that it doesn't want to write this
2549 * page for some reason, it should redirty the locked page via
2550 * redirty_page_for_writepage() and it should then unlock the page and return 0
2551 */
redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)2552 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2553 {
2554 int ret;
2555
2556 wbc->pages_skipped++;
2557 ret = __set_page_dirty_nobuffers(page);
2558 account_page_redirty(page);
2559 return ret;
2560 }
2561 EXPORT_SYMBOL(redirty_page_for_writepage);
2562
2563 /*
2564 * Dirty a page.
2565 *
2566 * For pages with a mapping this should be done under the page lock
2567 * for the benefit of asynchronous memory errors who prefer a consistent
2568 * dirty state. This rule can be broken in some special cases,
2569 * but should be better not to.
2570 *
2571 * If the mapping doesn't provide a set_page_dirty a_op, then
2572 * just fall through and assume that it wants buffer_heads.
2573 */
set_page_dirty(struct page *page)2574 int set_page_dirty(struct page *page)
2575 {
2576 struct address_space *mapping = page_mapping(page);
2577
2578 page = compound_head(page);
2579 if (likely(mapping)) {
2580 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2581 /*
2582 * readahead/lru_deactivate_page could remain
2583 * PG_readahead/PG_reclaim due to race with end_page_writeback
2584 * About readahead, if the page is written, the flags would be
2585 * reset. So no problem.
2586 * About lru_deactivate_page, if the page is redirty, the flag
2587 * will be reset. So no problem. but if the page is used by readahead
2588 * it will confuse readahead and make it restart the size rampup
2589 * process. But it's a trivial problem.
2590 */
2591 if (PageReclaim(page))
2592 ClearPageReclaim(page);
2593 #ifdef CONFIG_BLOCK
2594 if (!spd)
2595 spd = __set_page_dirty_buffers;
2596 #endif
2597 return (*spd)(page);
2598 }
2599 if (!PageDirty(page)) {
2600 if (!TestSetPageDirty(page))
2601 return 1;
2602 }
2603 return 0;
2604 }
2605 EXPORT_SYMBOL(set_page_dirty);
2606
2607 /*
2608 * set_page_dirty() is racy if the caller has no reference against
2609 * page->mapping->host, and if the page is unlocked. This is because another
2610 * CPU could truncate the page off the mapping and then free the mapping.
2611 *
2612 * Usually, the page _is_ locked, or the caller is a user-space process which
2613 * holds a reference on the inode by having an open file.
2614 *
2615 * In other cases, the page should be locked before running set_page_dirty().
2616 */
set_page_dirty_lock(struct page *page)2617 int set_page_dirty_lock(struct page *page)
2618 {
2619 int ret;
2620
2621 lock_page(page);
2622 ret = set_page_dirty(page);
2623 unlock_page(page);
2624 return ret;
2625 }
2626 EXPORT_SYMBOL(set_page_dirty_lock);
2627
2628 /*
2629 * This cancels just the dirty bit on the kernel page itself, it does NOT
2630 * actually remove dirty bits on any mmap's that may be around. It also
2631 * leaves the page tagged dirty, so any sync activity will still find it on
2632 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2633 * look at the dirty bits in the VM.
2634 *
2635 * Doing this should *normally* only ever be done when a page is truncated,
2636 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2637 * this when it notices that somebody has cleaned out all the buffers on a
2638 * page without actually doing it through the VM. Can you say "ext3 is
2639 * horribly ugly"? Thought you could.
2640 */
__cancel_dirty_page(struct page *page)2641 void __cancel_dirty_page(struct page *page)
2642 {
2643 struct address_space *mapping = page_mapping(page);
2644
2645 if (mapping_can_writeback(mapping)) {
2646 struct inode *inode = mapping->host;
2647 struct bdi_writeback *wb;
2648 struct wb_lock_cookie cookie = {};
2649
2650 lock_page_memcg(page);
2651 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2652
2653 if (TestClearPageDirty(page))
2654 account_page_cleaned(page, mapping, wb);
2655
2656 unlocked_inode_to_wb_end(inode, &cookie);
2657 unlock_page_memcg(page);
2658 } else {
2659 ClearPageDirty(page);
2660 }
2661 }
2662 EXPORT_SYMBOL(__cancel_dirty_page);
2663
2664 /*
2665 * Clear a page's dirty flag, while caring for dirty memory accounting.
2666 * Returns true if the page was previously dirty.
2667 *
2668 * This is for preparing to put the page under writeout. We leave the page
2669 * tagged as dirty in the xarray so that a concurrent write-for-sync
2670 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
2671 * implementation will run either set_page_writeback() or set_page_dirty(),
2672 * at which stage we bring the page's dirty flag and xarray dirty tag
2673 * back into sync.
2674 *
2675 * This incoherency between the page's dirty flag and xarray tag is
2676 * unfortunate, but it only exists while the page is locked.
2677 */
clear_page_dirty_for_io(struct page *page)2678 int clear_page_dirty_for_io(struct page *page)
2679 {
2680 struct address_space *mapping = page_mapping(page);
2681 int ret = 0;
2682
2683 VM_BUG_ON_PAGE(!PageLocked(page), page);
2684
2685 if (mapping && mapping_can_writeback(mapping)) {
2686 struct inode *inode = mapping->host;
2687 struct bdi_writeback *wb;
2688 struct wb_lock_cookie cookie = {};
2689
2690 /*
2691 * Yes, Virginia, this is indeed insane.
2692 *
2693 * We use this sequence to make sure that
2694 * (a) we account for dirty stats properly
2695 * (b) we tell the low-level filesystem to
2696 * mark the whole page dirty if it was
2697 * dirty in a pagetable. Only to then
2698 * (c) clean the page again and return 1 to
2699 * cause the writeback.
2700 *
2701 * This way we avoid all nasty races with the
2702 * dirty bit in multiple places and clearing
2703 * them concurrently from different threads.
2704 *
2705 * Note! Normally the "set_page_dirty(page)"
2706 * has no effect on the actual dirty bit - since
2707 * that will already usually be set. But we
2708 * need the side effects, and it can help us
2709 * avoid races.
2710 *
2711 * We basically use the page "master dirty bit"
2712 * as a serialization point for all the different
2713 * threads doing their things.
2714 */
2715 if (page_mkclean(page))
2716 set_page_dirty(page);
2717 /*
2718 * We carefully synchronise fault handlers against
2719 * installing a dirty pte and marking the page dirty
2720 * at this point. We do this by having them hold the
2721 * page lock while dirtying the page, and pages are
2722 * always locked coming in here, so we get the desired
2723 * exclusion.
2724 */
2725 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2726 if (TestClearPageDirty(page)) {
2727 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2728 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2729 dec_wb_stat(wb, WB_RECLAIMABLE);
2730 ret = 1;
2731 }
2732 unlocked_inode_to_wb_end(inode, &cookie);
2733 return ret;
2734 }
2735 return TestClearPageDirty(page);
2736 }
2737 EXPORT_SYMBOL(clear_page_dirty_for_io);
2738
test_clear_page_writeback(struct page *page)2739 int test_clear_page_writeback(struct page *page)
2740 {
2741 struct address_space *mapping = page_mapping(page);
2742 struct mem_cgroup *memcg;
2743 struct lruvec *lruvec;
2744 int ret;
2745
2746 memcg = lock_page_memcg(page);
2747 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
2748 if (mapping && mapping_use_writeback_tags(mapping)) {
2749 struct inode *inode = mapping->host;
2750 struct backing_dev_info *bdi = inode_to_bdi(inode);
2751 unsigned long flags;
2752
2753 xa_lock_irqsave(&mapping->i_pages, flags);
2754 ret = TestClearPageWriteback(page);
2755 if (ret) {
2756 __xa_clear_mark(&mapping->i_pages, page_index(page),
2757 PAGECACHE_TAG_WRITEBACK);
2758 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
2759 struct bdi_writeback *wb = inode_to_wb(inode);
2760
2761 dec_wb_stat(wb, WB_WRITEBACK);
2762 __wb_writeout_inc(wb);
2763 }
2764 }
2765
2766 if (mapping->host && !mapping_tagged(mapping,
2767 PAGECACHE_TAG_WRITEBACK))
2768 sb_clear_inode_writeback(mapping->host);
2769
2770 xa_unlock_irqrestore(&mapping->i_pages, flags);
2771 } else {
2772 ret = TestClearPageWriteback(page);
2773 }
2774 if (ret) {
2775 dec_lruvec_state(lruvec, NR_WRITEBACK);
2776 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2777 inc_node_page_state(page, NR_WRITTEN);
2778 }
2779 __unlock_page_memcg(memcg);
2780 return ret;
2781 }
2782
__test_set_page_writeback(struct page *page, bool keep_write)2783 int __test_set_page_writeback(struct page *page, bool keep_write)
2784 {
2785 struct address_space *mapping = page_mapping(page);
2786 int ret, access_ret;
2787
2788 lock_page_memcg(page);
2789 if (mapping && mapping_use_writeback_tags(mapping)) {
2790 XA_STATE(xas, &mapping->i_pages, page_index(page));
2791 struct inode *inode = mapping->host;
2792 struct backing_dev_info *bdi = inode_to_bdi(inode);
2793 unsigned long flags;
2794
2795 xas_lock_irqsave(&xas, flags);
2796 xas_load(&xas);
2797 ret = TestSetPageWriteback(page);
2798 if (!ret) {
2799 bool on_wblist;
2800
2801 on_wblist = mapping_tagged(mapping,
2802 PAGECACHE_TAG_WRITEBACK);
2803
2804 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
2805 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT)
2806 inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
2807
2808 /*
2809 * We can come through here when swapping anonymous
2810 * pages, so we don't necessarily have an inode to track
2811 * for sync.
2812 */
2813 if (mapping->host && !on_wblist)
2814 sb_mark_inode_writeback(mapping->host);
2815 }
2816 if (!PageDirty(page))
2817 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
2818 if (!keep_write)
2819 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
2820 xas_unlock_irqrestore(&xas, flags);
2821 } else {
2822 ret = TestSetPageWriteback(page);
2823 }
2824 if (!ret) {
2825 inc_lruvec_page_state(page, NR_WRITEBACK);
2826 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2827 }
2828 unlock_page_memcg(page);
2829 access_ret = arch_make_page_accessible(page);
2830 /*
2831 * If writeback has been triggered on a page that cannot be made
2832 * accessible, it is too late to recover here.
2833 */
2834 VM_BUG_ON_PAGE(access_ret != 0, page);
2835
2836 return ret;
2837
2838 }
2839 EXPORT_SYMBOL(__test_set_page_writeback);
2840
2841 /*
2842 * Wait for a page to complete writeback
2843 */
wait_on_page_writeback(struct page *page)2844 void wait_on_page_writeback(struct page *page)
2845 {
2846 while (PageWriteback(page)) {
2847 trace_wait_on_page_writeback(page, page_mapping(page));
2848 wait_on_page_bit(page, PG_writeback);
2849 }
2850 }
2851 EXPORT_SYMBOL_GPL(wait_on_page_writeback);
2852
2853 /**
2854 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2855 * @page: The page to wait on.
2856 *
2857 * This function determines if the given page is related to a backing device
2858 * that requires page contents to be held stable during writeback. If so, then
2859 * it will wait for any pending writeback to complete.
2860 */
wait_for_stable_page(struct page *page)2861 void wait_for_stable_page(struct page *page)
2862 {
2863 page = thp_head(page);
2864 if (page->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES)
2865 wait_on_page_writeback(page);
2866 }
2867 EXPORT_SYMBOL_GPL(wait_for_stable_page);
2868