Lines Matching refs:wb

129 	struct bdi_writeback	*wb;
137 unsigned long wb_dirty; /* per-wb counterparts */
153 #define GDTC_INIT(__wb) .wb = (__wb), \
159 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
179 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
181 return &wb->memcg_completions;
184 static void wb_min_max_ratio(struct bdi_writeback *wb,
187 unsigned long this_bw = wb->avg_write_bandwidth;
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189 unsigned long long min = wb->bdi->min_ratio;
190 unsigned long long max = wb->bdi->max_ratio;
193 * @wb may already be clean by the time control reaches here and
213 #define GDTC_INIT(__wb) .wb = (__wb), \
233 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
238 static void wb_min_max_ratio(struct bdi_writeback *wb,
241 *minp = wb->bdi->min_ratio;
242 *maxp = wb->bdi->max_ratio;
608 * Increment @wb's writeout completion count and the global writeout
611 static inline void __wb_writeout_inc(struct bdi_writeback *wb)
615 inc_wb_stat(wb, WB_WRITTEN);
616 wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
617 wb->bdi->max_prop_frac);
619 cgdom = mem_cgroup_wb_domain(wb);
621 wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
622 wb->bdi->max_prop_frac);
625 void wb_writeout_inc(struct bdi_writeback *wb)
630 __wb_writeout_inc(wb);
755 * __wb_calc_thresh - @wb's share of dirty throttling threshold
763 * more (rather than completely block them) when the wb dirty pages go high.
769 * The wb's share of dirty limit will be adapting to its throughput and
772 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
793 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
802 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
804 struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
845 * We want the dirty pages be balanced around the global/wb setpoints.
881 * (o) wb control line
909 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
913 * - the wb dirty thresh drops quickly due to change of JBOD workload
917 struct bdi_writeback *wb = dtc->wb;
918 unsigned long write_bw = wb->avg_write_bandwidth;
945 * such filesystems balance_dirty_pages always checks wb counters
946 * against wb limits. Even if global "nr_dirty" is under "freerun".
957 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
958 * about ~6K pages (as the average of background and throttle wb
963 * because we want to throttle process writing to a strictlimit wb
967 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
992 * make decision based on wb counters. But there is an
995 * wb's) while given strictlimit wb is below limit.
999 * activity in the system coming from a single strictlimit wb
1004 * (when globally we are at freerun and wb is well below wb
1015 * the wb is over/under its share of dirty pages, we want to scale
1020 * wb setpoint
1028 * The main wb control line is a linear function that subjects to
1031 * (2) k = - 1 / (8 * write_bw) (in single wb case)
1034 * For single wb case, the dirty pages are observed to fluctuate
1055 * scale global setpoint to wb's:
1061 * Use span=(8*write_bw) in single wb case as indicated by
1078 * wb reserve area, safeguard against dirty pool underrun and disk idle
1094 static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1099 unsigned long avg = wb->avg_write_bandwidth;
1100 unsigned long old = wb->write_bandwidth;
1113 bw = written - min(written, wb->written_stamp);
1120 bw += (u64)wb->write_bandwidth * (period - elapsed);
1135 if (wb_has_dirty_io(wb)) {
1136 long delta = avg - wb->avg_write_bandwidth;
1138 &wb->bdi->tot_write_bandwidth) <= 0);
1140 wb->write_bandwidth = bw;
1141 wb->avg_write_bandwidth = avg;
1193 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1195 * Normal wb tasks will be curbed at or below it in long term.
1202 struct bdi_writeback *wb = dtc->wb;
1207 unsigned long write_bw = wb->avg_write_bandwidth;
1208 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1220 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1231 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1270 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
1304 * For strictlimit case, calculations above were based on wb counters
1314 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1323 x = min3(wb->balanced_dirty_ratelimit,
1328 x = max3(wb->balanced_dirty_ratelimit,
1350 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1351 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1353 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1361 struct bdi_writeback *wb = gdtc->wb;
1363 unsigned long elapsed = now - wb->bw_time_stamp;
1367 lockdep_assert_held(&wb->list_lock);
1375 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1376 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1382 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1398 wb_update_write_bandwidth(wb, elapsed, written);
1401 wb->dirtied_stamp = dirtied;
1402 wb->written_stamp = written;
1403 wb->bw_time_stamp = now;
1406 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1408 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1430 static unsigned long wb_max_pause(struct bdi_writeback *wb,
1433 unsigned long bw = wb->avg_write_bandwidth;
1449 static long wb_min_pause(struct bdi_writeback *wb,
1455 long hi = ilog2(wb->avg_write_bandwidth);
1456 long lo = ilog2(wb->dirty_ratelimit);
1526 struct bdi_writeback *wb = dtc->wb;
1538 * wb_thresh. Instead the auxiliary wb control line in
1557 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1558 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1560 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1561 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1572 static void balance_dirty_pages(struct bdi_writeback *wb,
1575 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1576 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1590 struct backing_dev_info *bdi = wb->bdi;
1623 * If @wb belongs to !root memcg, repeat the same
1626 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1648 * when the wb limits are ramping up in case of !strictlimit.
1650 * In strictlimit case make decision based on the wb counters
1651 * and limits. Small writeouts when the wb limits are ramping
1675 if (unlikely(!writeback_in_progress(wb)))
1676 wb_start_background_writeback(wb);
1678 mem_cgroup_flush_foreign(wb);
1693 * when below the per-wb freerun ceiling.
1707 * pos_ratio. @wb should satisfy constraints from
1720 * throttled when below the per-wb
1733 if (dirty_exceeded && !wb->dirty_exceeded)
1734 wb->dirty_exceeded = 1;
1736 if (time_is_before_jiffies(wb->bw_time_stamp +
1738 spin_lock(&wb->list_lock);
1740 spin_unlock(&wb->list_lock);
1744 dirty_ratelimit = wb->dirty_ratelimit;
1747 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1748 min_pause = wb_min_pause(wb, max_pause,
1769 trace_balance_dirty_pages(wb,
1798 trace_balance_dirty_pages(wb,
1811 wb->dirty_sleep = now;
1827 * pages exceeds dirty_thresh, give the other good wb's a pipe
1842 if (!dirty_exceeded && wb->dirty_exceeded)
1843 wb->dirty_exceeded = 0;
1845 if (writeback_in_progress(wb))
1860 wb_start_background_writeback(wb);
1898 struct bdi_writeback *wb = NULL;
1906 wb = wb_get_create_current(bdi, GFP_KERNEL);
1907 if (!wb)
1908 wb = &bdi->wb;
1911 if (wb->dirty_exceeded)
1943 balance_dirty_pages(wb, current->nr_dirtied);
1945 wb_put(wb);
1950 * wb_over_bg_thresh - does @wb need to be written back?
1951 * @wb: bdi_writeback of interest
1953 * Determines whether background writeback should keep writing @wb or it's
1958 bool wb_over_bg_thresh(struct bdi_writeback *wb)
1960 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1961 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1977 if (wb_stat(wb, WB_RECLAIMABLE) >
1978 wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
1984 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1992 if (wb_stat(wb, WB_RECLAIMABLE) >
1993 wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
2444 struct bdi_writeback *wb;
2447 wb = inode_to_wb(inode);
2452 inc_wb_stat(wb, WB_RECLAIMABLE);
2453 inc_wb_stat(wb, WB_DIRTIED);
2458 mem_cgroup_track_foreign_dirty(page, wb);
2468 struct bdi_writeback *wb)
2473 dec_wb_stat(wb, WB_RECLAIMABLE);
2535 struct bdi_writeback *wb;
2538 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2541 dec_wb_stat(wb, WB_DIRTIED);
2647 struct bdi_writeback *wb;
2651 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2654 account_page_cleaned(page, mapping, wb);
2687 struct bdi_writeback *wb;
2725 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2729 dec_wb_stat(wb, WB_RECLAIMABLE);
2759 struct bdi_writeback *wb = inode_to_wb(inode);
2761 dec_wb_stat(wb, WB_WRITEBACK);
2762 __wb_writeout_inc(wb);