Lines Matching refs:util
1570 unsigned long util;
1622 (((ns->compute_capacity * FAIR_ONEHUNDRED) < (ns->util * imbalance_pct)) ||
1628 (((ns->compute_capacity * FAIR_ONEHUNDRED) > (ns->util * imbalance_pct)) &&
1682 ns->util += cpu_util(cpu);
3405 * As is, the util number is not freq-invariant (we'd have to
3513 * And since, like util, the runnable part should be directly transferable,
3759 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
6031 /* Discount task's util from CPU's util */
6285 * We need task's util for cpu_util_without, sync it up to
6714 unsigned int util;
6725 util = READ_ONCE(cfs_rq->avg.util_avg);
6728 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
6731 return min_t(unsigned long, util, capacity_orig_of(cpu));
6750 unsigned int util;
6771 util = max_t(long, cpu_util(cpu) - task_util(p), 0);
6772 return min_t(unsigned long, util, capacity_orig_of(cpu));
6777 util = READ_ONCE(cfs_rq->avg.util_avg);
6779 /* Discount task's util from CPU's util */
6780 lsub_positive(&util, task_util(p));
6832 util = max(util, estimated);
6840 return min_t(unsigned long, util, capacity_orig_of(cpu));
6856 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
6865 sub_positive(&util, task_util(p));
6867 util += task_util(p);
6883 util = max(util, util_est);
6886 return min(util, capacity_orig_of(cpu));
6994 unsigned long cpu_cap, util, base_energy = 0;
7037 util = cpu_util_next(cpu, p, cpu);
7040 lsub_positive(&spare_cap, util);
7049 util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
7050 if (!fits_capacity(util, cpu_cap)) {
8138 * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
8146 unsigned long util, load;
8223 util = task_util_est(p);
8224 if (util > env->imbalance) {
8228 env->imbalance -= util;
8263 * load/util/tasks.
9923 unsigned long capacity, load, util;
10001 util = cpu_util(cpu_of(rq));
10012 if (busiest_util < util) {
10013 busiest_util = util;