Lines Matching refs:READ

66 	/* Total Number of queued bios on READ and WRITE lists */
332 INIT_LIST_HEAD(&sq->queued[READ]);
356 for (rw = READ; rw <= WRITE; rw++) {
362 tg->bps[READ][LIMIT_MAX] = U64_MAX;
364 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
366 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
368 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
424 for (rw = READ; rw <= WRITE; rw++) {
457 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
458 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
478 tg->bps[READ][LIMIT_LOW] = 0;
480 tg->iops[READ][LIMIT_LOW] = 0;
653 rw == READ ? 'R' : 'W', tg->slice_start[rw],
671 rw == READ ? 'R' : 'W', tg->slice_start[rw],
687 rw == READ ? 'R' : 'W', tg->slice_start[rw],
791 rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
820 if (tg->service_queue.nr_queued[READ])
821 __tg_update_carryover(tg, READ);
827 tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
828 tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
1015 bio = throtl_peek_queued(&sq->queued[READ]);
1098 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1142 if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
1201 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1202 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1263 for (rw = READ; rw <= WRITE; rw++)
1320 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1321 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1357 * Restart the slices for both READ and WRITES. It might happen
1361 throtl_start_new_slice(tg, READ, false);
1447 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1459 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1514 if (tg->bps_conf[READ][off] == bps_dft &&
1516 tg->iops_conf[READ][off] == iops_dft &&
1523 if (tg->bps_conf[READ][off] != U64_MAX)
1525 tg->bps_conf[READ][off]);
1529 if (tg->iops_conf[READ][off] != UINT_MAX)
1531 tg->iops_conf[READ][off]);
1583 v[0] = tg->bps_conf[READ][index];
1585 v[2] = tg->iops_conf[READ][index];
1629 tg->bps_conf[READ][index] = v[0];
1631 tg->iops_conf[READ][index] = v[2];
1635 tg->bps[READ][index] = v[0];
1637 tg->iops[READ][index] = v[2];
1640 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1641 tg->bps_conf[READ][LIMIT_MAX]);
1644 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1645 tg->iops_conf[READ][LIMIT_MAX]);
1652 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1656 tg->bps[READ][LIMIT_LOW] = 0;
1658 tg->iops[READ][LIMIT_LOW] = 0;
1769 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1770 rtime = tg->last_low_overflow_time[READ];
1792 if (!parent->bps[READ][LIMIT_LOW] &&
1793 !parent->iops[READ][LIMIT_LOW] &&
1846 * cgroup reaches low limit when low limit of READ and WRITE are
1850 if (throtl_low_limit_reached(tg, READ) &&
2016 if (tg->bps[READ][LIMIT_LOW]) {
2017 bps = tg->last_bytes_disp[READ] * HZ;
2019 if (bps >= tg->bps[READ][LIMIT_LOW])
2020 tg->last_low_overflow_time[READ] = now;
2030 if (tg->iops[READ][LIMIT_LOW]) {
2031 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2032 if (iops >= tg->iops[READ][LIMIT_LOW])
2033 tg->last_low_overflow_time[READ] = now;
2049 tg->last_bytes_disp[READ] = 0;
2051 tg->last_io_disp[READ] = 0;
2086 for (rw = READ; rw <= WRITE; rw++) {
2117 for (rw = READ; rw <= WRITE; rw++) {
2143 td->avg_buckets[READ][i].latency,
2144 td->avg_buckets[READ][i].valid,
2249 rw == READ ? 'R' : 'W',
2253 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2377 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2379 if (!td->latency_buckets[READ]) {
2386 free_percpu(td->latency_buckets[READ]);
2405 free_percpu(td->latency_buckets[READ]);
2420 free_percpu(q->td->latency_buckets[READ]);
2441 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;