Searched refs:rq_clock (Results 1 - 14 of 14) sorted by relevance
/kernel/linux/linux-5.10/kernel/sched/ |
H A D | stats.h | 179 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_dequeued() 198 unsigned long long now = rq_clock(rq), delta = 0; in sched_info_arrive() 219 t->sched_info.last_queued = rq_clock(rq); in sched_info_queued() 233 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; in sched_info_depart()
|
H A D | deadline.c | 328 zerolag_time -= rq_clock(rq); in task_non_contending() 726 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); in setup_new_dl_entity() 741 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline; in setup_new_dl_entity() 775 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; in replenish_dl_entity() 802 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { in replenish_dl_entity() 804 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; in replenish_dl_entity() 888 u64 laxity = dl_se->deadline - rq_clock(rq); in update_dl_revised_wakeup() 896 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); in update_dl_revised_wakeup() 952 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || in update_dl_entity() 953 dl_entity_overflow(dl_se, rq_clock(r in update_dl_entity() [all...] |
H A D | fair.c | 928 wait_start = rq_clock(rq_of(cfs_rq)); in update_stats_wait_start() 947 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end() 986 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; in update_stats_enqueue_sleeper() 1003 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; in update_stats_enqueue_sleeper() 1077 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue() 1080 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue() 5110 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq() 5129 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq() 11243 this_rq->idle_stamp = rq_clock(this_rq); in newidle_balance()
|
H A D | sched.h | 1281 static inline u64 rq_clock(struct rq *rq) in rq_clock() function 2561 clock = rq_clock(rq); in cpufreq_update_util()
|
H A D | core.c | 2614 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | stats.h | 212 delta = rq_clock(rq) - t->sched_info.last_queued; in sched_info_dequeue() 231 now = rq_clock(rq); in sched_info_arrive() 249 t->sched_info.last_queued = rq_clock(rq); in sched_info_enqueue() 262 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; in sched_info_depart()
|
H A D | stats.c | 11 wait_start = rq_clock(rq); in __update_stats_wait_start() 23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start); in __update_stats_wait_end() 56 u64 delta = rq_clock(rq) - sleep_start; in __update_stats_enqueue_sleeper() 74 u64 delta = rq_clock(rq) - block_start; in __update_stats_enqueue_sleeper()
|
H A D | deadline.c | 421 zerolag_time -= rq_clock(rq); in task_non_contending() 773 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; in replenish_dl_new_period() 795 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); in setup_new_dl_entity() 868 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { in replenish_dl_entity() 953 u64 laxity = dl_se->deadline - rq_clock(rq); in update_dl_revised_wakeup() 961 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); in update_dl_revised_wakeup() 1017 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || in update_dl_entity() 1018 dl_entity_overflow(dl_se, rq_clock(rq))) { in update_dl_entity() 1021 !dl_time_before(dl_se->deadline, rq_clock(rq)) && in update_dl_entity() 1063 delta = ktime_to_ns(now) - rq_clock(r in start_dl_timer() [all...] |
H A D | pelt.h | 77 u64_u32_store(rq->clock_idle, rq_clock(rq)); in _update_idle_rq_clock_pelt()
|
H A D | core_sched.c | 243 u64 delta, now = rq_clock(rq->core); in __sched_core_account_forceidle()
|
H A D | fair.c | 1367 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair() 1370 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair() 4476 * = sched_clock_cpu() - rq_clock()@rq_idle in migrate_se_pelt_lag() 4481 * sched_clock_cpu() - rq_clock()@rq_idle in migrate_se_pelt_lag() 4484 * rq_clock()@rq_idle is rq->clock_idle in migrate_se_pelt_lag() 5361 cfs_rq->throttled_clock = rq_clock(rq); in enqueue_entity() 5363 cfs_rq->throttled_clock_self = rq_clock(rq); in enqueue_entity() 5752 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self; in tg_unthrottle_up() 5778 cfs_rq->throttled_clock_self = rq_clock(rq); in tg_throttle_down() 5872 cfs_rq->throttled_clock = rq_clock(r in throttle_cfs_rq() [all...] |
H A D | core.c | 3928 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_activate() 5760 u64 resched_latency, now = rq_clock(rq); in cpu_resched_latency() 6405 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task() 12681 u64 rq_clock; in sched_mm_cid_remote_clear_old() local 12687 rq_clock = READ_ONCE(rq->clock); in sched_mm_cid_remote_clear_old() 12698 WRITE_ONCE(pcpu_cid->time, rq_clock); in sched_mm_cid_remote_clear_old() 12704 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) in sched_mm_cid_remote_clear_old()
|
H A D | sched.h | 1640 static inline u64 rq_clock(struct rq *rq) in rq_clock() function 3144 clock = rq_clock(rq); in cpufreq_update_util()
|
H A D | rt.c | 1437 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt() 1441 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
|
Completed in 71 milliseconds