Lines Matching refs:wb

85 static bool wb_io_lists_populated(struct bdi_writeback *wb)
87 if (wb_has_dirty_io(wb)) {
90 set_bit(WB_has_dirty_io, &wb->state);
91 WARN_ON_ONCE(!wb->avg_write_bandwidth);
92 atomic_long_add(wb->avg_write_bandwidth,
93 &wb->bdi->tot_write_bandwidth);
98 static void wb_io_lists_depopulated(struct bdi_writeback *wb)
100 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
101 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
102 clear_bit(WB_has_dirty_io, &wb->state);
103 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
104 &wb->bdi->tot_write_bandwidth) < 0);
111 * @wb: target bdi_writeback
112 * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
119 struct bdi_writeback *wb,
122 assert_spin_locked(&wb->list_lock);
127 if (head != &wb->b_dirty_time)
128 return wb_io_lists_populated(wb);
130 wb_io_lists_depopulated(wb);
137 * @wb: bdi_writeback @inode is being removed from
139 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
143 struct bdi_writeback *wb)
145 assert_spin_locked(&wb->list_lock);
150 wb_io_lists_depopulated(wb);
153 static void wb_wakeup(struct bdi_writeback *wb)
155 spin_lock_bh(&wb->work_lock);
156 if (test_bit(WB_registered, &wb->state))
157 mod_delayed_work(bdi_wq, &wb->dwork, 0);
158 spin_unlock_bh(&wb->work_lock);
161 static void finish_writeback_work(struct bdi_writeback *wb,
177 static void wb_queue_work(struct bdi_writeback *wb,
180 trace_writeback_queue(wb, work);
185 spin_lock_bh(&wb->work_lock);
187 if (test_bit(WB_registered, &wb->state)) {
188 list_add_tail(&work->list, &wb->work_list);
189 mod_delayed_work(bdi_wq, &wb->dwork, 0);
191 finish_writeback_work(wb, work);
193 spin_unlock_bh(&wb->work_lock);
253 struct bdi_writeback *wb = NULL;
260 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
264 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
269 if (!wb)
270 wb = &bdi->wb;
276 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
277 wb_put(wb);
282 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
285 * Returns @inode's wb with its list_lock held. @inode->i_lock must be
286 * held on entry and is released on return. The returned wb is guaranteed
287 * to stay @inode's associated wb until its list_lock is released.
292 __acquires(&wb->list_lock)
295 struct bdi_writeback *wb = inode_to_wb(inode);
299 * @inode->i_lock and @wb->list_lock but list_lock nests
303 wb_get(wb);
305 spin_lock(&wb->list_lock);
308 if (likely(wb == inode->i_wb)) {
309 wb_put(wb); /* @inode already has ref */
310 return wb;
313 spin_unlock(&wb->list_lock);
314 wb_put(wb);
321 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
328 __acquires(&wb->list_lock)
373 * since I_WB_SWITCH assertion and all wb stat update transactions
378 * gives us exclusion against all wb related operations on @inode
449 * ensures that the new wb is visible if they see !I_WB_SWITCH.
483 * inode_switch_wbs - change the wb association of an inode
485 * @new_wb_id: ID of the new wb
487 * Switch @inode's wb association to the wb identified by @new_wb_id. The
510 /* find and pin the new wb */
572 wbc->wb = inode_to_wb(inode);
575 wbc->wb_id = wbc->wb->memcg_css->id;
582 wb_get(wbc->wb);
586 * A dying wb indicates that either the blkcg associated with the
588 * case, a replacement wb should already be available and we should
589 * refresh the wb immediately. In the second case, trying to
592 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
623 * current wb and the last round's winner wb (max of last round's current
624 * wb, the winner from two rounds ago, and the last round's majority
636 struct bdi_writeback *wb = wbc->wb;
642 if (!wb)
669 wb->avg_write_bandwidth);
680 * The switch verdict is reached if foreign wb's consume
697 * Switch if the current wb isn't the consistent winner.
701 * the wrong wb for an extended period of time.
715 wb_put(wbc->wb);
716 wbc->wb = NULL;
742 if (!wbc->wb || wbc->no_cgroup_owner)
780 * associated with @inode is congested; otherwise, the root wb's congestion
793 struct bdi_writeback *wb;
797 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
798 congested = wb_congested(wb, cong_bits);
803 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
809 * @wb: target bdi_writeback to split @nr_pages to
812 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
813 * relation to the total write bandwidth of all wb's w/ dirty inodes on
814 * @wb->bdi.
816 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
818 unsigned long this_bw = wb->avg_write_bandwidth;
819 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
825 * This may be called on clean wb's and proportional distribution
836 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
839 * @skip_if_busy: skip wb's which already have writeback in progress
841 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
843 * distributed to the busy wbs according to each wb's proportion in the
851 struct bdi_writeback *wb = list_entry(&bdi->wb_list,
857 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
869 if (!wb_has_dirty_io(wb) &&
871 list_empty(&wb->b_dirty_time)))
873 if (skip_if_busy && writeback_in_progress(wb))
876 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
883 wb_queue_work(wb, work);
888 * If wb_tryget fails, the wb has been shutdown, skip it.
890 * Pin @wb so that it stays on @bdi->wb_list. This allows
891 * continuing iteration from @wb after dropping and
894 if (!wb_tryget(wb))
904 wb_queue_work(wb, work);
905 last_wb = wb;
933 struct bdi_writeback *wb;
953 * And find the associated wb. If the wb isn't there already
956 wb = wb_get_lookup(bdi, memcg_css);
957 if (!wb) {
972 mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
986 wb_queue_work(wb, work);
992 wb_put(wb);
1001 * cgroup_writeback_umount - flush inode wb switches for umount
1004 * flushes in-flight inode wb switches. An inode wb switch goes through
1006 * that all previously scheduled switches are finished. As wb switches are
1008 * flushing iff wb switches are in flight.
1015 * ensure that all in-flight wb switches are in the workqueue.
1039 __acquires(&wb->list_lock)
1041 struct bdi_writeback *wb = inode_to_wb(inode);
1044 spin_lock(&wb->list_lock);
1045 return wb;
1049 __acquires(&wb->list_lock)
1051 struct bdi_writeback *wb = inode_to_wb(inode);
1053 spin_lock(&wb->list_lock);
1054 return wb;
1057 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1068 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1070 wb_queue_work(&bdi->wb, base_work);
1086 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1088 if (!wb_has_dirty_io(wb))
1099 if (test_bit(WB_start_all, &wb->state) ||
1100 test_and_set_bit(WB_start_all, &wb->state))
1103 wb->start_all_reason = reason;
1104 wb_wakeup(wb);
1109 * @wb: bdi_writback to write from
1113 * this function returns, it is only guaranteed that for given wb
1117 void wb_start_background_writeback(struct bdi_writeback *wb)
1123 trace_writeback_wake_background(wb);
1124 wb_wakeup(wb);
1132 struct bdi_writeback *wb;
1134 wb = inode_to_wb_and_lock_list(inode);
1136 inode_io_list_del_locked(inode, wb);
1138 spin_unlock(&wb->list_lock);
1187 static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1191 if (!list_empty(&wb->b_dirty)) {
1194 tail = wb_inode(wb->b_dirty.next);
1198 inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1202 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1205 redirty_tail_locked(inode, wb);
1212 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1214 inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1305 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1311 assert_spin_locked(&wb->list_lock);
1312 list_splice_init(&wb->b_more_io, &wb->b_io);
1313 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1316 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1319 wb_io_lists_populated(wb);
1320 trace_writeback_queue_io(wb, work, dirtied_before, moved);
1394 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1414 redirty_tail_locked(inode, wb);
1425 requeue_io(inode, wb);
1434 redirty_tail_locked(inode, wb);
1442 redirty_tail_locked(inode, wb);
1445 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1449 inode_io_list_del_locked(inode, wb);
1546 struct bdi_writeback *wb;
1585 wb = inode_to_wb_and_lock_list(inode);
1592 inode_io_list_del_locked(inode, wb);
1593 spin_unlock(&wb->list_lock);
1600 static long writeback_chunk_size(struct bdi_writeback *wb,
1621 pages = min(wb->avg_write_bandwidth / 2,
1636 * NOTE! This is called with wb->list_lock held, and will
1641 struct bdi_writeback *wb,
1658 while (!list_empty(&wb->b_io)) {
1659 struct inode *inode = wb_inode(wb->b_io.prev);
1670 redirty_tail(inode, wb);
1689 redirty_tail_locked(inode, wb);
1704 requeue_io(inode, wb);
1708 spin_unlock(&wb->list_lock);
1719 spin_lock(&wb->list_lock);
1725 write_chunk = writeback_chunk_size(wb, work);
1756 * have been switched to another wb in the meantime.
1766 if (unlikely(tmp_wb != wb)) {
1768 spin_lock(&wb->list_lock);
1785 static long __writeback_inodes_wb(struct bdi_writeback *wb,
1791 while (!list_empty(&wb->b_io)) {
1792 struct inode *inode = wb_inode(wb->b_io.prev);
1801 redirty_tail(inode, wb);
1804 wrote += writeback_sb_inodes(sb, wb, work);
1819 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1831 spin_lock(&wb->list_lock);
1832 if (list_empty(&wb->b_io))
1833 queue_io(wb, &work, jiffies);
1834 __writeback_inodes_wb(wb, &work);
1835 spin_unlock(&wb->list_lock);
1856 static long wb_writeback(struct bdi_writeback *wb,
1867 spin_lock(&wb->list_lock);
1882 !list_empty(&wb->work_list))
1889 if (work->for_background && !wb_over_bg_thresh(wb))
1904 trace_writeback_start(wb, work);
1905 if (list_empty(&wb->b_io))
1906 queue_io(wb, work, dirtied_before);
1908 progress = writeback_sb_inodes(work->sb, wb, work);
1910 progress = __writeback_inodes_wb(wb, work);
1911 trace_writeback_written(wb, work);
1913 wb_update_bandwidth(wb, wb_start);
1928 if (list_empty(&wb->b_more_io))
1935 trace_writeback_wait(wb, work);
1936 inode = wb_inode(wb->b_more_io.prev);
1938 spin_unlock(&wb->list_lock);
1941 spin_lock(&wb->list_lock);
1943 spin_unlock(&wb->list_lock);
1952 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
1956 spin_lock_bh(&wb->work_lock);
1957 if (!list_empty(&wb->work_list)) {
1958 work = list_entry(wb->work_list.next,
1962 spin_unlock_bh(&wb->work_lock);
1966 static long wb_check_background_flush(struct bdi_writeback *wb)
1968 if (wb_over_bg_thresh(wb)) {
1978 return wb_writeback(wb, &work);
1984 static long wb_check_old_data_flush(struct bdi_writeback *wb)
1995 expired = wb->last_old_flush +
2000 wb->last_old_flush = jiffies;
2012 return wb_writeback(wb, &work);
2018 static long wb_check_start_all(struct bdi_writeback *wb)
2022 if (!test_bit(WB_start_all, &wb->state))
2028 .nr_pages = wb_split_bdi_pages(wb, nr_pages),
2031 .reason = wb->start_all_reason,
2034 nr_pages = wb_writeback(wb, &work);
2037 clear_bit(WB_start_all, &wb->state);
2045 static long wb_do_writeback(struct bdi_writeback *wb)
2050 set_bit(WB_writeback_running, &wb->state);
2051 while ((work = get_next_work_item(wb)) != NULL) {
2052 trace_writeback_exec(wb, work);
2053 wrote += wb_writeback(wb, work);
2054 finish_writeback_work(wb, work);
2060 wrote += wb_check_start_all(wb);
2065 wrote += wb_check_old_data_flush(wb);
2066 wrote += wb_check_background_flush(wb);
2067 clear_bit(WB_writeback_running, &wb->state);
2078 struct bdi_writeback *wb = container_of(to_delayed_work(work),
2082 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2086 !test_bit(WB_registered, &wb->state))) {
2088 * The normal path. Keep writing back @wb until its
2090 * if @wb is shutting down even when we're running off the
2094 pages_written = wb_do_writeback(wb);
2096 } while (!list_empty(&wb->work_list));
2103 pages_written = writeback_inodes_wb(wb, 1024,
2108 if (!list_empty(&wb->work_list))
2109 wb_wakeup(wb);
2110 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2111 wb_wakeup_delayed(wb);
2123 struct bdi_writeback *wb;
2128 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2129 wb_start_writeback(wb, reason);
2183 struct bdi_writeback *wb;
2185 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2186 if (!list_empty(&wb->b_dirty_time))
2187 wb_wakeup(wb);
2307 struct bdi_writeback *wb;
2311 wb = locked_inode_to_wb_and_lock_list(inode);
2318 dirty_list = &wb->b_dirty;
2320 dirty_list = &wb->b_dirty_time;
2322 wakeup_bdi = inode_io_list_move_locked(inode, wb,
2325 spin_unlock(&wb->list_lock);
2335 (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2336 wb_wakeup_delayed(wb);
2392 * Move each inode back to the wb list before we drop the lock
2401 * do not have the mapping lock. Skip it here, wb completion
2541 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */