Lines Matching defs:thresh
134 unsigned long thresh; /* dirty threshold */
379 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
382 * Calculate @dtc->thresh and ->bg_thresh considering
396 unsigned long thresh;
421 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
423 thresh = (ratio * available_memory) / PAGE_SIZE;
433 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
439 if (thresh > UINT_MAX)
440 thresh = UINT_MAX;
442 if (bg_thresh >= thresh)
443 bg_thresh = thresh / 2;
444 dtc->thresh = thresh;
449 trace_global_dirty_state(bg_thresh, thresh);
455 * @pdirty: out parameter for thresh
457 * Calculate bg_thresh and thresh for global_wb_domain. See
468 *pdirty = gdtc.thresh;
727 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
730 return (thresh + bg_thresh) / 2;
734 unsigned long thresh)
736 return max(thresh, dom->dirty_limit);
778 unsigned long thresh = dtc->thresh;
784 * Calculate this BDI's share of the thresh ratio.
789 wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
795 wb_thresh += (thresh * wb_min_ratio) / 100;
796 if (wb_thresh > (thresh * wb_max_ratio) / 100)
797 wb_thresh = thresh * wb_max_ratio / 100;
802 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
805 .thresh = thresh };
913 * - the wb dirty thresh drops quickly due to change of JBOD workload
919 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
920 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1044 if (unlikely(wb_thresh > dtc->thresh))
1045 wb_thresh = dtc->thresh;
1056 * wb_setpoint = setpoint * wb_thresh / thresh
1058 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1062 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1064 * wb_thresh thresh - wb_thresh
1066 * thresh thresh
1068 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1147 unsigned long thresh = dtc->thresh;
1153 if (limit < thresh) {
1154 limit = thresh;
1159 * Follow down slowly. Use the higher one as the target, because thresh
1163 thresh = max(thresh, dtc->dirty);
1164 if (limit > thresh) {
1165 limit -= (limit - thresh) >> 5;
1204 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1205 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1422 unsigned long thresh)
1424 if (thresh > dirty)
1425 return 1UL << (ilog2(thresh - dirty) >> 1);
1543 dtc->wb_bg_thresh = dtc->thresh ?
1544 div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1551 * Otherwise it would be possible to get thresh+n pages
1552 * reported dirty, even though there are thresh-m pages
1596 unsigned long dirty, thresh, bg_thresh;
1611 thresh = gdtc->wb_thresh;
1615 thresh = gdtc->thresh;
1640 m_thresh = mdtc->thresh;
1657 if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1664 intv = dirty_poll_interval(dirty, thresh);
1699 ((gdtc->dirty > gdtc->thresh) || strictlimit);
1726 ((mdtc->dirty > mdtc->thresh) || strictlimit);
1770 sdtc->thresh,
1799 sdtc->thresh,
1819 * This is typically equal to (dirty < thresh) and can also