Lines Matching defs:weight

173     lw->weight += inc;
179 lw->weight -= dec;
185 lw->weight = w;
247 w = scale_load_down(lw->weight);
258 * delta_exec * weight / lw.weight
260 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
262 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
266 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
267 * weight/lw.weight <= 1, and therefore our shift will also be positive.
269 static u64 fair_calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
271 u64 fact = scale_load_down(weight);
710 if (unlikely(se->load.weight != NICE_0_LOAD)) {
736 * proportional to the weight.
761 update_load_add(&lw, se->load.weight);
764 slice = fair_calc_delta(slice, se->load.weight, load);
804 sa->load_avg = scale_load_down(se->load.weight);
816 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
848 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
1433 * task group, on a particular numa node. The group weight is given a
1574 unsigned int weight;
1621 if ((ns->nr_running > ns->weight) &&
1627 if ((ns->nr_running < ns->weight) ||
1700 ns->weight = cpumask_weight(cpumask_of_node(nid));
1874 * (not part of a group), use the task weight instead.
3109 update_load_add(&cfs_rq->load, se->load.weight);
3123 update_load_sub(&cfs_rq->load, se->load.weight);
3205 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight)
3212 update_load_sub(&cfs_rq->load, se->load.weight);
3216 update_load_set(&se->load, weight);
3228 update_load_add(&cfs_rq->load, se->load.weight);
3237 unsigned long weight = scale_load(sched_prio_to_weight[prio]);
3239 reweight_entity(cfs_rq, se, weight);
3249 * That is, the weight of a group entity, is the proportional share of the
3250 * group weight based on the group runqueue weights. That is:
3252 * tg->weight * grq->load.weight
3253 * ge->load.weight = ----------------------------- (1)
3254 * \Sum grq->load.weight
3262 * grq->load.weight -> grq->avg.load_avg (2)
3266 * tg->weight * grq->avg.load_avg
3267 * ge->load.weight = ------------------------------ (3)
3282 * tg->weight * grq->load.weight
3283 * ge->load.weight = ----------------------------- = tg->weight (4)
3284 * grp->load.weight
3291 * ge->load.weight =
3293 * tg->weight * grq->load.weight
3295 * tg->load_avg - grq->avg.load_avg + grq->load.weight
3297 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
3301 * tg->weight * grq->load.weight
3302 * ge->load.weight = ----------------------------- (6)
3308 * max(grq->load.weight, grq->avg.load_avg)
3312 * overestimates the ge->load.weight and therefore:
3314 * \Sum ge->load.weight >= tg->weight
3325 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
3341 * assigned as a minimum load.weight to the sched_entity representing
3375 if (likely(se->load.weight == shares)) {
3511 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
3516 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
3524 * ge->load.weight * grq->avg.load_avg
3526 * grq->load.weight
3530 * Because while for entities historical weight is not important and we
3557 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
3644 if (scale_load_down(gcfs_rq->load.weight)) {
3645 load_sum = div_s64(gcfs_rq->avg.load_sum, scale_load_down(gcfs_rq->load.weight));
4202 * Where 'w' is the weight of new samples, which is configured to be
4337 * however the extra weight of the new task will slow them down a
4445 * - For group_entity, update its weight to reflect the new share of
4447 * - Add its new weight to cfs_rq->load.weight
4544 * - Subtract its previous weight from cfs_rq->load.weight.
4545 * - For group entity, update its weight to reflect the new share
4648 * least twice that of our own weight (i.e. dont track it
4649 * when there are only lesser-weight tasks around):
4651 if (schedstat_enabled() && rq_of(cfs_rq)->cfs.load.weight >= 0x2 * se->load.weight) {
5037 if (qcfs_rq->load.weight) {
5078 if (!cfs_rq->load.weight) {
5936 if (cfs_rq->load.weight) {
6118 * wake_affine_weight() - considers the weight to reflect the average
6179 * If sync, adjust the weight of prev_eff_load such that if
6295 int weight;
6317 weight = sd->span_weight;
6321 if (weight <= tmp->span_weight) {
7697 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
7702 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
7705 * The weight average is an exponential decay average of the instantaneous
7706 * weight:
7721 * a few fun cases generally called infeasible weight scenarios.
7795 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
8431 if (cfs_rq->load.weight) {
8998 /* Isolated CPU has no weight */
11981 /* guarantee group entities always have weight */
11993 * We can't change the weight of the root cgroup.
12056 if (rq->cfs.load.weight) {