Lines Matching refs:rw
300 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
310 ret = tg->bps[rw][td->limit_index];
314 tg->iops[rw][td->limit_index])
320 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
321 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
324 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
325 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
330 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
340 ret = tg->iops[rw][td->limit_index];
344 tg->bps[rw][td->limit_index])
350 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
351 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
354 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
357 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
496 int rw;
510 for (rw = READ; rw <= WRITE; rw++) {
511 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
512 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
575 int rw;
577 for (rw = READ; rw <= WRITE; rw++)
578 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
580 (tg_bps_limit(tg, rw) != U64_MAX ||
581 tg_iops_limit(tg, rw) != UINT_MAX));
773 bool rw, unsigned long start)
775 tg->bytes_disp[rw] = 0;
776 tg->io_disp[rw] = 0;
778 atomic_set(&tg->io_split_cnt[rw], 0);
786 if (time_after_eq(start, tg->slice_start[rw]))
787 tg->slice_start[rw] = start;
789 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
792 rw == READ ? 'R' : 'W', tg->slice_start[rw],
793 tg->slice_end[rw], jiffies);
796 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
798 tg->bytes_disp[rw] = 0;
799 tg->io_disp[rw] = 0;
800 tg->slice_start[rw] = jiffies;
801 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
803 atomic_set(&tg->io_split_cnt[rw], 0);
807 rw == READ ? 'R' : 'W', tg->slice_start[rw],
808 tg->slice_end[rw], jiffies);
811 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
814 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
817 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
820 throtl_set_slice_end(tg, rw, jiffy_end);
823 rw == READ ? 'R' : 'W', tg->slice_start[rw],
824 tg->slice_end[rw], jiffies);
828 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
830 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
837 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
842 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
849 if (throtl_slice_used(tg, rw))
860 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
862 time_elapsed = jiffies - tg->slice_start[rw];
868 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
872 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
878 if (tg->bytes_disp[rw] >= bytes_trim)
879 tg->bytes_disp[rw] -= bytes_trim;
881 tg->bytes_disp[rw] = 0;
883 if (tg->io_disp[rw] >= io_trim)
884 tg->io_disp[rw] -= io_trim;
886 tg->io_disp[rw] = 0;
888 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
892 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
893 tg->slice_start[rw], tg->slice_end[rw], jiffies);
899 bool rw = bio_data_dir(bio);
910 jiffy_elapsed = jiffies - tg->slice_start[rw];
930 if (tg->io_disp[rw] + 1 <= io_allowed) {
947 bool rw = bio_data_dir(bio);
958 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
968 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
975 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
998 bool rw = bio_data_dir(bio);
1000 u64 bps_limit = tg_bps_limit(tg, rw);
1001 u32 iops_limit = tg_iops_limit(tg, rw);
1009 BUG_ON(tg->service_queue.nr_queued[rw] &&
1010 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
1026 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1027 throtl_start_new_slice(tg, rw);
1029 if (time_before(tg->slice_end[rw],
1031 throtl_extend_slice(tg, rw,
1036 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
1050 if (time_before(tg->slice_end[rw], jiffies + max_wait))
1051 throtl_extend_slice(tg, rw, jiffies + max_wait);
1058 bool rw = bio_data_dir(bio);
1062 tg->bytes_disp[rw] += bio_size;
1063 tg->io_disp[rw]++;
1064 tg->last_bytes_disp[rw] += bio_size;
1065 tg->last_io_disp[rw]++;
1090 bool rw = bio_data_dir(bio);
1093 qn = &tg->qnode_on_self[rw];
1101 if (!sq->nr_queued[rw])
1104 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1106 sq->nr_queued[rw]++;
1137 struct throtl_grp *parent_tg, bool rw)
1139 if (throtl_slice_used(parent_tg, rw)) {
1140 throtl_start_new_slice_with_credit(parent_tg, rw,
1141 child_tg->slice_start[rw]);
1146 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1160 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1161 sq->nr_queued[rw]--;
1173 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1174 start_parent_slice_with_credit(tg, parent_tg, rw);
1176 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1177 &parent_sq->queued[rw]);
1178 BUG_ON(tg->td->nr_queued[rw] <= 0);
1179 tg->td->nr_queued[rw]--;
1182 throtl_trim_slice(tg, rw);
1348 int rw;
1353 for (rw = READ; rw <= WRITE; rw++)
1354 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
2142 int i, cpu, rw;
2153 for (rw = READ; rw <= WRITE; rw++) {
2155 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2161 bucket = per_cpu_ptr(td->latency_buckets[rw],
2172 latency[rw] = tmp->total_latency;
2176 latency[rw] /= samples;
2177 if (latency[rw] == 0)
2179 avg_latency[rw][i].latency = latency[rw];
2184 for (rw = READ; rw <= WRITE; rw++) {
2186 if (!avg_latency[rw][i].latency) {
2187 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2188 td->avg_buckets[rw][i].latency =
2189 last_latency[rw];
2193 if (!td->avg_buckets[rw][i].valid)
2194 latency[rw] = avg_latency[rw][i].latency;
2196 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2197 avg_latency[rw][i].latency) >> 3;
2199 td->avg_buckets[rw][i].latency = max(latency[rw],
2200 last_latency[rw]);
2201 td->avg_buckets[rw][i].valid = true;
2202 last_latency[rw] = td->avg_buckets[rw][i].latency;
2226 bool rw = bio_data_dir(bio);
2229 if (!parent->has_rules[rw])
2232 atomic_inc(&parent->io_split_cnt[rw]);
2233 atomic_inc(&parent->last_io_split_cnt[rw]);
2247 bool rw = bio_data_dir(bio);
2263 if (!tg->has_rules[rw])
2276 if (tg->last_low_overflow_time[rw] == 0)
2277 tg->last_low_overflow_time[rw] = jiffies;
2281 if (sq->nr_queued[rw])
2286 tg->last_low_overflow_time[rw] = jiffies;
2308 throtl_trim_slice(tg, rw);
2315 qn = &tg->qnode_on_parent[rw];
2324 rw == READ ? 'R' : 'W',
2325 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2326 tg_bps_limit(tg, rw),
2327 tg->io_disp[rw], tg_iops_limit(tg, rw),
2330 tg->last_low_overflow_time[rw] = jiffies;
2332 td->nr_queued[rw]++;
2397 int rw = bio_data_dir(bio);
2425 threshold = tg->td->avg_buckets[rw][bucket].latency +