Lines Matching defs:cfs_b
5616 * requires cfs_b->lock
5618 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
5622 if (unlikely(cfs_b->quota == RUNTIME_INF))
5625 cfs_b->runtime += cfs_b->quota;
5626 runtime = cfs_b->runtime_snap - cfs_b->runtime;
5628 cfs_b->burst_time += runtime;
5629 cfs_b->nr_burst++;
5632 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
5633 cfs_b->runtime_snap = cfs_b->runtime;
5642 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
5647 lockdep_assert_held(&cfs_b->lock);
5652 if (cfs_b->quota == RUNTIME_INF)
5655 start_cfs_bandwidth(cfs_b);
5657 if (cfs_b->runtime > 0) {
5658 amount = min(cfs_b->runtime, min_amount);
5659 cfs_b->runtime -= amount;
5660 cfs_b->idle = 0;
5672 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5675 raw_spin_lock(&cfs_b->lock);
5676 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
5677 raw_spin_unlock(&cfs_b->lock);
5788 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5792 raw_spin_lock(&cfs_b->lock);
5794 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
5801 * for 1ns of runtime rather than just check cfs_b.
5806 &cfs_b->throttled_cfs_rq);
5808 raw_spin_unlock(&cfs_b->lock);
5879 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5890 raw_spin_lock(&cfs_b->lock);
5892 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5896 raw_spin_unlock(&cfs_b->lock);
6043 static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
6054 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
6076 raw_spin_lock(&cfs_b->lock);
6078 if (runtime > cfs_b->runtime)
6079 runtime = cfs_b->runtime;
6080 cfs_b->runtime -= runtime;
6081 remaining = cfs_b->runtime;
6082 raw_spin_unlock(&cfs_b->lock);
6116 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
6119 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
6124 if (cfs_b->quota == RUNTIME_INF)
6127 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
6128 cfs_b->nr_periods += overrun;
6130 /* Refill extra burst quota even if cfs_b->idle */
6131 __refill_cfs_bandwidth_runtime(cfs_b);
6137 if (cfs_b->idle && !throttled)
6142 cfs_b->idle = 1;
6147 cfs_b->nr_throttled += overrun;
6150 * This check is repeated as we release cfs_b->lock while we unthrottle.
6152 while (throttled && cfs_b->runtime > 0) {
6153 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6154 /* we can't nest cfs_b->lock while distributing bandwidth */
6155 throttled = distribute_cfs_runtime(cfs_b);
6156 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6165 cfs_b->idle = 0;
6183 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
6187 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
6189 struct hrtimer *refresh_timer = &cfs_b->period_timer;
6204 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
6209 if (runtime_refresh_within(cfs_b, min_left))
6213 if (cfs_b->slack_started)
6215 cfs_b->slack_started = true;
6217 hrtimer_start(&cfs_b->slack_timer,
6225 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
6231 raw_spin_lock(&cfs_b->lock);
6232 if (cfs_b->quota != RUNTIME_INF) {
6233 cfs_b->runtime += slack_runtime;
6236 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
6237 !list_empty(&cfs_b->throttled_cfs_rq))
6238 start_cfs_slack_bandwidth(cfs_b);
6240 raw_spin_unlock(&cfs_b->lock);
6261 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
6267 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6268 cfs_b->slack_started = false;
6270 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
6271 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6275 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
6276 runtime = cfs_b->runtime;
6278 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6283 distribute_cfs_runtime(cfs_b);
6348 struct cfs_bandwidth *cfs_b =
6351 do_sched_cfs_slack_timer(cfs_b);
6360 struct cfs_bandwidth *cfs_b =
6367 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6369 overrun = hrtimer_forward_now(timer, cfs_b->period);
6373 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
6376 u64 new, old = ktime_to_ns(cfs_b->period);
6385 cfs_b->period = ns_to_ktime(new);
6386 cfs_b->quota *= 2;
6387 cfs_b->burst *= 2;
6393 div_u64(cfs_b->quota, NSEC_PER_USEC));
6399 div_u64(cfs_b->quota, NSEC_PER_USEC));
6407 cfs_b->period_active = 0;
6408 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6413 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent)
6415 raw_spin_lock_init(&cfs_b->lock);
6416 cfs_b->runtime = 0;
6417 cfs_b->quota = RUNTIME_INF;
6418 cfs_b->period = ns_to_ktime(default_cfs_period());
6419 cfs_b->burst = 0;
6420 cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF;
6422 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
6423 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
6424 cfs_b->period_timer.function = sched_cfs_period_timer;
6427 hrtimer_set_expires(&cfs_b->period_timer,
6428 get_random_u32_below(cfs_b->period));
6429 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6430 cfs_b->slack_timer.function = sched_cfs_slack_timer;
6431 cfs_b->slack_started = false;
6444 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6446 lockdep_assert_held(&cfs_b->lock);
6448 if (cfs_b->period_active)
6451 cfs_b->period_active = 1;
6452 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
6453 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
6456 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6461 if (!cfs_b->throttled_cfs_rq.next)
6464 hrtimer_cancel(&cfs_b->period_timer);
6465 hrtimer_cancel(&cfs_b->slack_timer);
6508 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
6511 raw_spin_lock(&cfs_b->lock);
6512 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
6513 raw_spin_unlock(&cfs_b->lock);
6628 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
6636 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}