/kernel/linux/linux-6.6/kernel/sched/ |
H A D | fair.c | 438 * cfs rq without parent should be put in list_add_leaf_cfs_rq() 579 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 2127 ns->nr_running += rq->cfs.h_nr_running; in update_numa_stats() 4014 if (&rq->cfs == cfs_rq) { in cfs_rq_util_change() 4470 * - cfs->throttled_clock_pelt_time@cfs_rq_idle in migrate_se_pelt_lag() 4480 * now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time + in migrate_se_pelt_lag() 4485 * cfs->throttled_clock_pelt_time@cfs_rq_idle in migrate_se_pelt_lag() 4565 * cfs->util_sum. Although this is not a problem by itself, detaching in update_cfs_rq_load_avg() 4567 * util_avg (~1ms) can make cfs->util_sum becoming null whereas in update_cfs_rq_load_avg() 5472 rq_of(cfs_rq)->cfs in set_next_entity() 6739 check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) check_preempt_from_idle() argument [all...] |
H A D | pelt.h | 128 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT; 136 u32 util_sum = rq->cfs.avg.util_sum; in update_idle_rq_clock_pelt()
|
H A D | sched_avg.c | 166 util = rq->cfs.avg.util_avg; in sched_get_cpu_util()
|
H A D | sched.h | 1102 struct cfs_rq cfs; member 1318 return container_of(cfs_rq, struct rq, cfs); in rq_of() 1584 return &task_rq(p)->cfs; in task_cfs_rq() 1592 return &rq->cfs; in cfs_rq_of() 1692 * an update_rq_clock() for several cfs and rt runqueues (Typically 2518 return rq->cfs.nr_running > 0; in sched_fair_runnable()
|
H A D | core.c | 5706 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; in prefetch_curr_exec_start() 6184 rq->nr_running == rq->cfs.h_nr_running)) { in __pick_next_task() 7581 * cpu_util_{cfs,rt,dl,irq}() 7584 * Where the cfs,rt and dl util numbers are tracked with the same metric and 7587 * The cfs,rt,dl utilization are the running times measured with rq->clock_task 10668 init_cfs_rq(&rq->cfs); in sched_init() 10691 * directly in rq->cfs (i.e root_task_group->se[] = NULL). in sched_init() 10693 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
|
/kernel/linux/linux-5.10/kernel/sched/ |
H A D | fair.c | 370 * cfs rq without parent should be put in list_add_leaf_cfs_rq() 487 return &task_rq(p)->cfs; in task_cfs_rq() 495 return &rq->cfs; in cfs_rq_of() 524 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) 736 nr_running = rq_of(cfs_rq)->cfs.h_nr_running; in sched_slice() 1656 ns->nr_running += rq->cfs.h_nr_running; in update_numa_stats() 3309 if (&rq->cfs == cfs_rq) { in cfs_rq_util_change() 3707 * cfs->util_sum. Although this is not a problem by itself, detaching in update_cfs_rq_load_avg() 3709 * util_avg (~1ms) can make cfs->util_sum becoming null whereas in update_cfs_rq_load_avg() 4542 * When bandwidth control is enabled, cfs migh in enqueue_entity() 5781 check_preempt_from_idle(struct cfs_rq *cfs, struct sched_entity *se) check_preempt_from_idle() argument [all...] |
H A D | pelt.h | 110 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT; 118 u32 util_sum = rq->cfs.avg.util_sum; in update_idle_rq_clock_pelt()
|
H A D | sched_avg.c | 166 util = rq->cfs.avg.util_avg; in sched_get_cpu_util()
|
H A D | sched.h | 1035 struct cfs_rq cfs; member 1203 return container_of(cfs_rq, struct rq, cfs); in rq_of() 2034 return rq->cfs.nr_running > 0; in sched_fair_runnable() 2780 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); in cpu_util_cfs() 2784 READ_ONCE(rq->cfs.avg.util_est.enqueued)); in cpu_util_cfs()
|
H A D | debug.c | 580 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; in print_cfs_rq()
|
H A D | core.c | 4126 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; in prefetch_curr_exec_start() 4550 rq->nr_running == rq->cfs.h_nr_running)) { in pick_next_task() 7733 init_cfs_rq(&rq->cfs); in sched_init() 7756 * directly in rq->cfs (i.e root_task_group->se[] = NULL). in sched_init() 7758 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
|
/kernel/linux/linux-5.10/drivers/pci/endpoint/ |
H A D | pci-epf-core.c | 16 #include <linux/pci-ep-cfs.h>
|
H A D | pci-epc-core.c | 16 #include <linux/pci-ep-cfs.h>
|
H A D | pci-ep-cfs.c | 15 #include <linux/pci-ep-cfs.h>
|
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | xt_set.c | 38 #define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo) \ 43 .cmdflags = cfs, \
|
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | xt_set.c | 38 #define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo) \ 43 .cmdflags = cfs, \
|
/kernel/linux/linux-6.6/drivers/pci/endpoint/ |
H A D | pci-epf-core.c | 16 #include <linux/pci-ep-cfs.h>
|
H A D | pci-ep-cfs.c | 15 #include <linux/pci-ep-cfs.h>
|
H A D | pci-epc-core.c | 15 #include <linux/pci-ep-cfs.h>
|