Searched refs:shares (Results 1 - 3 of 3) sorted by relevance
/device/soc/rockchip/common/sdk_linux/kernel/sched/ |
H A D | fair.c | 3320 long tg_weight, tg_shares, load, shares; in calc_group_shares() local 3323 tg_shares = READ_ONCE(tg->shares); in calc_group_shares() 3333 shares = (tg_shares * load); in calc_group_shares() 3335 shares /= tg_weight; in calc_group_shares() 3340 * of a group with small tg->shares value. It is a floor value which is in calc_group_shares() 3344 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 in calc_group_shares() 3345 * on an 8-core system with 8 tasks each runnable on one CPU shares has in calc_group_shares() 3350 return clamp_t(long, shares, MIN_SHARES, tg_shares); in calc_group_shares() 3363 long shares; in update_cfs_group() local 3374 shares in update_cfs_group() 3382 reweight_entity(cfs_rq_of(se), se, shares); update_cfs_group() local 11988 sched_group_set_shares(struct task_group *tg, unsigned long shares) sched_group_set_shares() argument [all...] |
H A D | sched.h | 175 * The extra resolution improves shares distribution and load balancing of 178 * and does not change the user-interface for setting shares/weights. 456 unsigned long shares; member 516 * too large, so as the shares value of a task group. 571 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
H A D | core.c | 8077 root_task_group.shares = ROOT_TASK_GROUP_LOAD; in sched_init() 8510 /* End participation in shares distribution: */ in sched_offline_group() 8952 return (u64)scale_load_down(tg->shares); in cpu_shares_read_u64() 9273 .name = "shares", 9358 u64 weight = scale_load_down(tg->shares); in cpu_weight_read_u64() 9369 * a bit of range on both ends, it maps pretty well onto the shares in cpu_weight_write_u64() 9385 unsigned long weight = scale_load_down(css_tg(css)->shares); in cpu_weight_nice_read_s64()
|
Completed in 28 milliseconds