Lines Matching refs:hwb

34 	struct hmdfs_writeback *hwb = container_of(
37 try_to_writeback_inodes_sb(hwb->sbi->sb, WB_REASON_FS_FREE_SPACE);
44 struct hmdfs_writeback *hwb = container_of(
47 spin_lock(&hwb->inode_list_lock);
48 while (likely(!list_empty(&hwb->inode_list_head))) {
49 info = list_first_entry(&hwb->inode_list_head,
52 spin_unlock(&hwb->inode_list_lock);
57 spin_lock(&hwb->inode_list_lock);
59 spin_unlock(&hwb->inode_list_lock);
82 struct hmdfs_writeback *hwb = sbi->h_wb;
85 spin_lock(&hwb->inode_list_lock);
88 list_add_tail(&info->wb_list, &hwb->inode_list_head);
89 queue_delayed_work(hwb->dirty_inode_writeback_wq,
90 &hwb->dirty_inode_writeback_work, 0);
92 spin_unlock(&hwb->inode_list_lock);
130 void hmdfs_calculate_dirty_thresh(struct hmdfs_writeback *hwb)
132 hwb->dirty_fs_thresh = DIV_ROUND_UP(hwb->dirty_fs_bytes, PAGE_SIZE);
133 hwb->dirty_file_thresh = DIV_ROUND_UP(hwb->dirty_file_bytes, PAGE_SIZE);
134 hwb->dirty_fs_bg_thresh =
135 DIV_ROUND_UP(hwb->dirty_fs_bg_bytes, PAGE_SIZE);
136 hwb->dirty_file_bg_thresh =
137 DIV_ROUND_UP(hwb->dirty_file_bg_bytes, PAGE_SIZE);
139 hwb->fs_bg_ratio = hmdfs_thresh_ratio(hwb->dirty_fs_bg_thresh,
140 hwb->dirty_fs_thresh);
141 hwb->file_bg_ratio = hmdfs_thresh_ratio(hwb->dirty_file_bg_thresh,
142 hwb->dirty_file_thresh);
143 hwb->fs_file_ratio = hmdfs_thresh_ratio(hwb->dirty_file_thresh,
144 hwb->dirty_fs_thresh);
149 struct hmdfs_writeback *hwb = hdtc->hwb;
151 hdtc->fs_thresh = hdtc->hwb->dirty_fs_thresh;
152 hdtc->file_thresh = hdtc->hwb->dirty_file_thresh;
153 hdtc->fs_bg_thresh = hdtc->hwb->dirty_fs_bg_thresh;
154 hdtc->file_bg_thresh = hdtc->hwb->dirty_file_bg_thresh;
156 if (!hwb->dirty_auto_threshold)
163 if (hwb->bw_fs_thresh < hdtc->fs_thresh) {
164 hdtc->fs_thresh = hwb->bw_fs_thresh;
165 hdtc->fs_bg_thresh = hmdfs_ratio_thresh(hwb->fs_bg_ratio,
168 if (hwb->bw_file_thresh < hdtc->file_thresh) {
169 hdtc->file_thresh = hwb->bw_file_thresh;
170 hdtc->file_bg_thresh = hmdfs_ratio_thresh(hwb->file_bg_ratio,
182 struct hmdfs_writeback *hwb = hdtc->hwb;
183 struct bdi_writeback *wb = hwb->wb;
184 unsigned int time_limit = hwb->writeback_timelimit;
188 if (!hwb->dirty_auto_threshold)
191 spin_lock(&hwb->write_bandwidth_lock);
192 if (bw > hwb->max_write_bandwidth)
193 hwb->max_write_bandwidth = bw;
195 if (bw < hwb->min_write_bandwidth)
196 hwb->min_write_bandwidth = bw;
197 hwb->avg_write_bandwidth = bw;
198 spin_unlock(&hwb->write_bandwidth_lock);
204 bw = max(bw, hwb->bw_thresh_lowerlimit);
206 if (thresh >= hwb->dirty_fs_thresh) {
207 hdtc->fs_thresh = hwb->dirty_fs_thresh;
208 hdtc->file_thresh = hwb->dirty_file_thresh;
209 hdtc->fs_bg_thresh = hwb->dirty_fs_bg_thresh;
210 hdtc->file_bg_thresh = hwb->dirty_file_bg_thresh;
214 hdtc->fs_bg_thresh = hmdfs_ratio_thresh(hwb->fs_bg_ratio,
216 hdtc->file_thresh = hmdfs_ratio_thresh(hwb->fs_file_ratio,
218 hdtc->file_bg_thresh = hmdfs_ratio_thresh(hwb->file_bg_ratio,
222 hwb->bw_fs_thresh = hdtc->fs_thresh;
223 hwb->bw_file_thresh = hdtc->file_thresh;
228 void hmdfs_update_ratelimit(struct hmdfs_writeback *hwb)
230 struct hmdfs_dirty_throttle_control hdtc = {.hwb = hwb};
235 hwb->ratelimit_pages = hdtc.file_bg_thresh /
237 if (hwb->ratelimit_pages < HMDFS_MIN_RATELIMIT_PAGES)
238 hwb->ratelimit_pages = HMDFS_MIN_RATELIMIT_PAGES;
285 struct hmdfs_writeback *hwb = sbi->h_wb;
287 struct hmdfs_dirty_throttle_control hdtc = {.hwb = hwb};
293 if (hwb->dirty_writeback_interval != 0)
295 sb, hwb->dirty_writeback_interval * 10);
352 if (hwb->dirty_auto_threshold &&
373 if (dirty_exceeded && !hwb->dirty_exceeded)
374 hwb->dirty_exceeded = true;
389 if (!dirty_exceeded && hwb->dirty_exceeded)
390 hwb->dirty_exceeded = false;
403 struct hmdfs_writeback *hwb = sbi->h_wb;
407 if (!hwb->dirty_writeback_control)
411 if (hwb->dirty_writeback_interval != 0)
414 hwb->dirty_writeback_interval * 10);
417 if (hwb->dirty_exceeded)
428 bdp_ratelimits = this_cpu_ptr(hwb->bdp_ratelimits);
430 trace_hmdfs_balance_dirty_pages_ratelimited(sbi, hwb, *bdp_ratelimits);
434 } else if (unlikely(*bdp_ratelimits >= hwb->ratelimit_pages)) {
460 struct hmdfs_writeback *hwb;
464 hwb = kzalloc(sizeof(struct hmdfs_writeback), GFP_KERNEL);
465 if (!hwb)
468 hwb->sbi = sbi;
469 hwb->wb = &sbi->sb->s_bdi->wb;
470 hwb->dirty_writeback_control = true;
471 hwb->dirty_writeback_interval = HM_DEFAULT_WRITEBACK_INTERVAL;
472 hwb->dirty_file_bg_bytes = HMDFS_FILE_BG_WB_BYTES;
473 hwb->dirty_fs_bg_bytes = HMDFS_FS_BG_WB_BYTES;
474 hwb->dirty_file_bytes = HMDFS_FILE_WB_BYTES;
475 hwb->dirty_fs_bytes = HMDFS_FS_WB_BYTES;
476 hmdfs_calculate_dirty_thresh(hwb);
477 hwb->bw_file_thresh = hwb->dirty_file_thresh;
478 hwb->bw_fs_thresh = hwb->dirty_fs_thresh;
479 spin_lock_init(&hwb->inode_list_lock);
480 INIT_LIST_HEAD(&hwb->inode_list_head);
481 hwb->dirty_exceeded = false;
482 hwb->ratelimit_pages = HMDFS_DEF_RATELIMIT_PAGES;
483 hwb->dirty_auto_threshold = true;
484 hwb->writeback_timelimit = HMDFS_DEF_WB_TIMELIMIT;
485 hwb->bw_thresh_lowerlimit = HMDFS_BW_THRESH_DEF_LIMIT;
486 spin_lock_init(&hwb->write_bandwidth_lock);
487 hwb->avg_write_bandwidth = 0;
488 hwb->max_write_bandwidth = 0;
489 hwb->min_write_bandwidth = ULONG_MAX;
490 hwb->bdp_ratelimits = alloc_percpu(int);
491 if (!hwb->bdp_ratelimits)
495 hwb->dirty_inode_writeback_wq = create_singlethread_workqueue(name);
496 if (!hwb->dirty_inode_writeback_wq) {
501 hwb->dirty_sb_writeback_wq = create_singlethread_workqueue(name);
502 if (!hwb->dirty_sb_writeback_wq) {
506 INIT_DELAYED_WORK(&hwb->dirty_sb_writeback_work,
508 INIT_DELAYED_WORK(&hwb->dirty_inode_writeback_work,
510 sbi->h_wb = hwb;
513 destroy_workqueue(hwb->dirty_inode_writeback_wq);
515 free_percpu(hwb->bdp_ratelimits);
517 kfree(hwb);