Lines Matching refs:WRITE

82 	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
209 /* Total Number of queued bios on READ and WRITE lists */
510 for (rw = READ; rw <= WRITE; rw++) {
517 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
519 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
521 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
523 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
577 for (rw = READ; rw <= WRITE; rw++)
604 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
605 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
621 tg->bps[WRITE][LIMIT_LOW] = 0;
623 tg->iops[WRITE][LIMIT_LOW] = 0;
1120 bio = throtl_peek_queued(&sq->queued[WRITE]);
1208 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1291 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1292 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1353 for (rw = READ; rw <= WRITE; rw++)
1410 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1411 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1452 throtl_start_new_slice(tg, WRITE);
1570 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1582 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1632 tg->bps_conf[WRITE][off] == bps_dft &&
1634 tg->iops_conf[WRITE][off] == iops_dft &&
1643 if (tg->bps_conf[WRITE][off] != U64_MAX)
1645 tg->bps_conf[WRITE][off]);
1649 if (tg->iops_conf[WRITE][off] != UINT_MAX)
1651 tg->iops_conf[WRITE][off]);
1701 v[1] = tg->bps_conf[WRITE][index];
1703 v[3] = tg->iops_conf[WRITE][index];
1747 tg->bps_conf[WRITE][index] = v[1];
1749 tg->iops_conf[WRITE][index] = v[3];
1753 tg->bps[WRITE][index] = v[1];
1755 tg->iops[WRITE][index] = v[3];
1759 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1760 tg->bps_conf[WRITE][LIMIT_MAX]);
1763 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1764 tg->iops_conf[WRITE][LIMIT_MAX]);
1770 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1774 tg->bps[WRITE][LIMIT_LOW] = 0;
1776 tg->iops[WRITE][LIMIT_LOW] = 0;
1843 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1844 wtime = tg->last_low_overflow_time[WRITE];
1867 !parent->bps[WRITE][LIMIT_LOW] &&
1868 !parent->iops[WRITE][LIMIT_LOW])
1912 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1916 (!write_limit || sq->nr_queued[WRITE]))
1918 if (write_limit && sq->nr_queued[WRITE] &&
2087 if (tg->bps[WRITE][LIMIT_LOW]) {
2088 bps = tg->last_bytes_disp[WRITE] * HZ;
2090 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2091 tg->last_low_overflow_time[WRITE] = now;
2101 if (tg->iops[WRITE][LIMIT_LOW]) {
2102 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
2103 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2104 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2105 tg->last_low_overflow_time[WRITE] = now;
2116 tg->last_bytes_disp[WRITE] = 0;
2118 tg->last_io_disp[WRITE] = 0;
2153 for (rw = READ; rw <= WRITE; rw++) {
2184 for (rw = READ; rw <= WRITE; rw++) {
2212 td->avg_buckets[WRITE][i].latency,
2213 td->avg_buckets[WRITE][i].valid);
2328 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2458 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2460 if (!td->latency_buckets[WRITE]) {
2481 free_percpu(td->latency_buckets[WRITE]);
2494 free_percpu(q->td->latency_buckets[WRITE]);
2514 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;