Lines Matching refs:wb

129 	struct bdi_writeback	*wb;
137 unsigned long wb_dirty; /* per-wb counterparts */
153 #define GDTC_INIT(__wb) .wb = (__wb), \
159 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
179 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
181 return &wb->memcg_completions;
184 static void wb_min_max_ratio(struct bdi_writeback *wb,
187 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189 unsigned long long min = wb->bdi->min_ratio;
190 unsigned long long max = wb->bdi->max_ratio;
193 * @wb may already be clean by the time control reaches here and
213 #define GDTC_INIT(__wb) .wb = (__wb), \
233 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
238 static void wb_min_max_ratio(struct bdi_writeback *wb,
241 *minp = wb->bdi->min_ratio;
242 *maxp = wb->bdi->max_ratio;
576 * Increment @wb's writeout completion count and the global writeout
579 static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
583 wb_stat_mod(wb, WB_WRITTEN, nr);
584 wb_domain_writeout_add(&global_wb_domain, &wb->completions,
585 wb->bdi->max_prop_frac, nr);
587 cgdom = mem_cgroup_wb_domain(wb);
589 wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
590 wb->bdi->max_prop_frac, nr);
593 void wb_writeout_inc(struct bdi_writeback *wb)
598 __wb_writeout_add(wb, 1);
841 * __wb_calc_thresh - @wb's share of dirty throttling threshold
849 * more (rather than completely block them) when the wb dirty pages go high.
855 * The wb's share of dirty limit will be adapting to its throughput and
858 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
879 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
888 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
890 struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
931 * We want the dirty pages be balanced around the global/wb setpoints.
967 * (o) wb control line
995 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
999 * - the wb dirty thresh drops quickly due to change of JBOD workload
1003 struct bdi_writeback *wb = dtc->wb;
1004 unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
1031 * such filesystems balance_dirty_pages always checks wb counters
1032 * against wb limits. Even if global "nr_dirty" is under "freerun".
1043 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
1044 * about ~6K pages (as the average of background and throttle wb
1049 * because we want to throttle process writing to a strictlimit wb
1053 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1078 * make decision based on wb counters. But there is an
1081 * wb's) while given strictlimit wb is below limit.
1085 * activity in the system coming from a single strictlimit wb
1090 * (when globally we are at freerun and wb is well below wb
1101 * the wb is over/under its share of dirty pages, we want to scale
1106 * wb setpoint
1114 * The main wb control line is a linear function that subjects to
1117 * (2) k = - 1 / (8 * write_bw) (in single wb case)
1120 * For single wb case, the dirty pages are observed to fluctuate
1141 * scale global setpoint to wb's:
1147 * Use span=(8*write_bw) in single wb case as indicated by
1164 * wb reserve area, safeguard against dirty pool underrun and disk idle
1180 static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1185 unsigned long avg = wb->avg_write_bandwidth;
1186 unsigned long old = wb->write_bandwidth;
1199 bw = written - min(written, wb->written_stamp);
1206 bw += (u64)wb->write_bandwidth * (period - elapsed);
1221 if (wb_has_dirty_io(wb)) {
1222 long delta = avg - wb->avg_write_bandwidth;
1224 &wb->bdi->tot_write_bandwidth) <= 0);
1226 wb->write_bandwidth = bw;
1227 WRITE_ONCE(wb->avg_write_bandwidth, avg);
1279 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1281 * Normal wb tasks will be curbed at or below it in long term.
1288 struct bdi_writeback *wb = dtc->wb;
1293 unsigned long write_bw = wb->avg_write_bandwidth;
1294 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1306 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1317 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1356 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
1390 * For strictlimit case, calculations above were based on wb counters
1400 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1409 x = min3(wb->balanced_dirty_ratelimit,
1414 x = max3(wb->balanced_dirty_ratelimit,
1436 WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
1437 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1439 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1446 struct bdi_writeback *wb = gdtc->wb;
1452 spin_lock(&wb->list_lock);
1460 elapsed = max(now - wb->bw_time_stamp, 1UL);
1461 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1462 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1477 wb_update_write_bandwidth(wb, elapsed, written);
1479 wb->dirtied_stamp = dirtied;
1480 wb->written_stamp = written;
1481 WRITE_ONCE(wb->bw_time_stamp, now);
1482 spin_unlock(&wb->list_lock);
1485 void wb_update_bandwidth(struct bdi_writeback *wb)
1487 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1492 /* Interval after which we consider wb idle and don't estimate bandwidth */
1495 static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1498 unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1501 !atomic_read(&wb->writeback_inodes)) {
1502 spin_lock(&wb->list_lock);
1503 wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1504 wb->written_stamp = wb_stat(wb, WB_WRITTEN);
1505 WRITE_ONCE(wb->bw_time_stamp, now);
1506 spin_unlock(&wb->list_lock);
1527 static unsigned long wb_max_pause(struct bdi_writeback *wb,
1530 unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
1546 static long wb_min_pause(struct bdi_writeback *wb,
1552 long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1553 long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
1623 struct bdi_writeback *wb = dtc->wb;
1635 * wb_thresh. Instead the auxiliary wb control line in
1654 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1655 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1657 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1658 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1669 static int balance_dirty_pages(struct bdi_writeback *wb,
1672 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1673 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1687 struct backing_dev_info *bdi = wb->bdi;
1721 * If @wb belongs to !root memcg, repeat the same
1724 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1753 !writeback_in_progress(wb))
1754 wb_start_background_writeback(wb);
1759 * when the wb limits are ramping up in case of !strictlimit.
1761 * In strictlimit case make decision based on the wb counters
1762 * and limits. Small writeouts when the wb limits are ramping
1787 if (unlikely(!writeback_in_progress(wb)))
1788 wb_start_background_writeback(wb);
1790 mem_cgroup_flush_foreign(wb);
1805 * when below the per-wb freerun ceiling.
1819 * pos_ratio. @wb should satisfy constraints from
1832 * throttled when below the per-wb
1845 if (dirty_exceeded != wb->dirty_exceeded)
1846 wb->dirty_exceeded = dirty_exceeded;
1848 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
1853 dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
1856 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1857 min_pause = wb_min_pause(wb, max_pause,
1878 trace_balance_dirty_pages(wb,
1907 trace_balance_dirty_pages(wb,
1940 * pages exceeds dirty_thresh, give the other good wb's a pipe
1997 struct bdi_writeback *wb = NULL;
2006 wb = wb_get_create_current(bdi, GFP_KERNEL);
2007 if (!wb)
2008 wb = &bdi->wb;
2011 if (wb->dirty_exceeded)
2043 ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
2045 wb_put(wb);
2069 * wb_over_bg_thresh - does @wb need to be written back?
2070 * @wb: bdi_writeback of interest
2072 * Determines whether background writeback should keep writing @wb or it's
2077 bool wb_over_bg_thresh(struct bdi_writeback *wb)
2079 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
2080 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2098 thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
2100 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2102 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2110 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
2118 thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
2120 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2122 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2545 struct bdi_writeback *wb;
2549 wb = inode_to_wb_wbc(mapping->host, wbc);
2550 wb_bandwidth_estimate_start(wb);
2582 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2584 wb_update_bandwidth(wb);
2614 struct bdi_writeback *wb;
2618 wb = inode_to_wb(inode);
2623 wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2624 wb_stat_mod(wb, WB_DIRTIED, nr);
2629 mem_cgroup_track_foreign_dirty(folio, wb);
2638 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
2644 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2737 struct bdi_writeback *wb;
2740 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2743 wb_stat_mod(wb, WB_DIRTIED, -nr);
2828 struct bdi_writeback *wb;
2832 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2835 folio_account_cleaned(folio, wb);
2868 struct bdi_writeback *wb;
2906 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2911 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2921 static void wb_inode_writeback_start(struct bdi_writeback *wb)
2923 atomic_inc(&wb->writeback_inodes);
2926 static void wb_inode_writeback_end(struct bdi_writeback *wb)
2929 atomic_dec(&wb->writeback_inodes);
2937 spin_lock_irqsave(&wb->work_lock, flags);
2938 if (test_bit(WB_registered, &wb->state))
2939 queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
2940 spin_unlock_irqrestore(&wb->work_lock, flags);
2961 struct bdi_writeback *wb = inode_to_wb(inode);
2963 wb_stat_mod(wb, WB_WRITEBACK, -nr);
2964 __wb_writeout_add(wb, nr);
2967 wb_inode_writeback_end(wb);
3013 struct bdi_writeback *wb = inode_to_wb(inode);
3015 wb_stat_mod(wb, WB_WRITEBACK, nr);
3017 wb_inode_writeback_start(wb);