Lines Matching refs:READ
82 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
209 /* Total Number of queued bios on READ and WRITE lists */
510 for (rw = READ; rw <= WRITE; rw++) {
516 tg->bps[READ][LIMIT_MAX] = U64_MAX;
518 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
520 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
522 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
577 for (rw = READ; rw <= WRITE; rw++)
604 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
605 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
620 tg->bps[READ][LIMIT_LOW] = 0;
622 tg->iops[READ][LIMIT_LOW] = 0;
792 rw == READ ? 'R' : 'W', tg->slice_start[rw],
807 rw == READ ? 'R' : 'W', tg->slice_start[rw],
823 rw == READ ? 'R' : 'W', tg->slice_start[rw],
892 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
1116 bio = throtl_peek_queued(&sq->queued[READ]);
1198 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1291 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1292 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1353 for (rw = READ; rw <= WRITE; rw++)
1410 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1411 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1447 * Restart the slices for both READ and WRITES. It might happen
1451 throtl_start_new_slice(tg, READ);
1564 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1576 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1631 if (tg->bps_conf[READ][off] == bps_dft &&
1633 tg->iops_conf[READ][off] == iops_dft &&
1640 if (tg->bps_conf[READ][off] != U64_MAX)
1642 tg->bps_conf[READ][off]);
1646 if (tg->iops_conf[READ][off] != UINT_MAX)
1648 tg->iops_conf[READ][off]);
1700 v[0] = tg->bps_conf[READ][index];
1702 v[2] = tg->iops_conf[READ][index];
1746 tg->bps_conf[READ][index] = v[0];
1748 tg->iops_conf[READ][index] = v[2];
1752 tg->bps[READ][index] = v[0];
1754 tg->iops[READ][index] = v[2];
1757 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1758 tg->bps_conf[READ][LIMIT_MAX]);
1761 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1762 tg->iops_conf[READ][LIMIT_MAX]);
1769 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1773 tg->bps[READ][LIMIT_LOW] = 0;
1775 tg->iops[READ][LIMIT_LOW] = 0;
1841 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1842 rtime = tg->last_low_overflow_time[READ];
1865 if (!parent->bps[READ][LIMIT_LOW] &&
1866 !parent->iops[READ][LIMIT_LOW] &&
1911 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1915 if (read_limit && sq->nr_queued[READ] &&
1919 (!read_limit || sq->nr_queued[READ]))
2080 if (tg->bps[READ][LIMIT_LOW]) {
2081 bps = tg->last_bytes_disp[READ] * HZ;
2083 if (bps >= tg->bps[READ][LIMIT_LOW])
2084 tg->last_low_overflow_time[READ] = now;
2094 if (tg->iops[READ][LIMIT_LOW]) {
2095 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
2096 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2097 if (iops >= tg->iops[READ][LIMIT_LOW])
2098 tg->last_low_overflow_time[READ] = now;
2115 tg->last_bytes_disp[READ] = 0;
2117 tg->last_io_disp[READ] = 0;
2153 for (rw = READ; rw <= WRITE; rw++) {
2184 for (rw = READ; rw <= WRITE; rw++) {
2210 td->avg_buckets[READ][i].latency,
2211 td->avg_buckets[READ][i].valid,
2324 rw == READ ? 'R' : 'W',
2328 sq->nr_queued[READ], sq->nr_queued[WRITE]);
2452 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2454 if (!td->latency_buckets[READ]) {
2461 free_percpu(td->latency_buckets[READ]);
2480 free_percpu(td->latency_buckets[READ]);
2493 free_percpu(q->td->latency_buckets[READ]);
2513 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;