Home
last modified time | relevance | path

Searched refs:vtime (Results 1 - 25 of 37) sorted by relevance

12

/kernel/linux/linux-5.10/kernel/sched/
H A Dcputime.c650 static u64 vtime_delta(struct vtime *vtime) in vtime_delta() argument
655 if (clock < vtime->starttime) in vtime_delta()
658 return clock - vtime->starttime; in vtime_delta()
661 static u64 get_vtime_delta(struct vtime *vtime) in get_vtime_delta() argument
663 u64 delta = vtime_delta(vtime); in get_vtime_delta()
667 * Unlike tick based timing, vtime based timing never has lost in get_vtime_delta()
671 * errors from causing elapsed vtime to go negative. in get_vtime_delta()
674 WARN_ON_ONCE(vtime in get_vtime_delta()
680 vtime_account_system(struct task_struct *tsk, struct vtime *vtime) vtime_account_system() argument
690 vtime_account_guest(struct task_struct *tsk, struct vtime *vtime) vtime_account_guest() argument
700 __vtime_account_kernel(struct task_struct *tsk, struct vtime *vtime) __vtime_account_kernel() argument
712 struct vtime *vtime = &tsk->vtime; vtime_account_kernel() local
724 struct vtime *vtime = &tsk->vtime; vtime_user_enter() local
734 struct vtime *vtime = &tsk->vtime; vtime_user_exit() local
748 struct vtime *vtime = &tsk->vtime; vtime_guest_enter() local
766 struct vtime *vtime = &tsk->vtime; vtime_guest_exit() local
783 struct vtime *vtime = &prev->vtime; vtime_task_switch_generic() local
810 struct vtime *vtime = &t->vtime; vtime_init_idle() local
824 struct vtime *vtime = &t->vtime; task_gtime() local
850 struct vtime *vtime = &t->vtime; task_cputime() local
883 vtime_state_fetch(struct vtime *vtime, int cpu) vtime_state_fetch() argument
909 kcpustat_user_vtime(struct vtime *vtime) kcpustat_user_vtime() argument
923 struct vtime *vtime = &tsk->vtime; kcpustat_field_vtime() local
1011 struct vtime *vtime = &tsk->vtime; kcpustat_cpu_fetch_vtime() local
[all...]
/kernel/linux/linux-6.6/kernel/sched/
H A Dcputime.c667 static u64 vtime_delta(struct vtime *vtime) in vtime_delta() argument
672 if (clock < vtime->starttime) in vtime_delta()
675 return clock - vtime->starttime; in vtime_delta()
678 static u64 get_vtime_delta(struct vtime *vtime) in get_vtime_delta() argument
680 u64 delta = vtime_delta(vtime); in get_vtime_delta()
684 * Unlike tick based timing, vtime based timing never has lost in get_vtime_delta()
688 * errors from causing elapsed vtime to go negative. in get_vtime_delta()
691 WARN_ON_ONCE(vtime in get_vtime_delta()
697 vtime_account_system(struct task_struct *tsk, struct vtime *vtime) vtime_account_system() argument
707 vtime_account_guest(struct task_struct *tsk, struct vtime *vtime) vtime_account_guest() argument
717 __vtime_account_kernel(struct task_struct *tsk, struct vtime *vtime) __vtime_account_kernel() argument
729 struct vtime *vtime = &tsk->vtime; vtime_account_kernel() local
741 struct vtime *vtime = &tsk->vtime; vtime_user_enter() local
751 struct vtime *vtime = &tsk->vtime; vtime_user_exit() local
765 struct vtime *vtime = &tsk->vtime; vtime_guest_enter() local
783 struct vtime *vtime = &tsk->vtime; vtime_guest_exit() local
800 struct vtime *vtime = &prev->vtime; vtime_task_switch_generic() local
827 struct vtime *vtime = &t->vtime; vtime_init_idle() local
841 struct vtime *vtime = &t->vtime; task_gtime() local
867 struct vtime *vtime = &t->vtime; task_cputime() local
905 vtime_state_fetch(struct vtime *vtime, int cpu) vtime_state_fetch() argument
931 kcpustat_user_vtime(struct vtime *vtime) kcpustat_user_vtime() argument
945 struct vtime *vtime = &tsk->vtime; kcpustat_field_vtime() local
1033 struct vtime *vtime = &tsk->vtime; kcpustat_cpu_fetch_vtime() local
[all...]
/kernel/linux/linux-6.6/include/trace/events/
H A Diocost.h17 u64 last_period, u64 cur_period, u64 vtime),
19 TP_ARGS(iocg, path, now, last_period, cur_period, vtime),
29 __field(u64, vtime)
44 __entry->vtime = vtime;
52 "period=%llu->%llu vtime=%llu "
57 __entry->vtime, __entry->inuse, __entry->weight,
64 u64 last_period, u64 cur_period, u64 vtime),
66 TP_ARGS(iocg, path, now, last_period, cur_period, vtime)
71 u64 last_period, u64 cur_period, u64 vtime),
[all...]
/kernel/linux/linux-5.10/block/
H A Dbfq-wf2q.c117 !bfq_gt(new_entity->start, st->vtime) in bfq_update_next_in_service()
679 !bfq_gt(last_idle->finish, st->vtime)) { in bfq_forget_idle()
681 * Forget the whole idle tree, increasing the vtime past in bfq_forget_idle()
684 st->vtime = last_idle->finish; in bfq_forget_idle()
687 if (first_idle && !bfq_gt(first_idle->finish, st->vtime)) in bfq_forget_idle()
779 * when entity->finish <= old_st->vtime). in __bfq_entity_update_weight_prio()
808 entity->start = new_st->vtime; in __bfq_entity_update_weight_prio()
841 st->vtime += bfq_delta(served, st->wsum); in bfq_bfqq_served()
939 if (backshifted && bfq_gt(st->vtime, entity->finish)) { in bfq_update_fin_time_enqueue()
940 unsigned long delta = st->vtime in bfq_update_fin_time_enqueue()
1350 bfq_first_active_entity(struct bfq_service_tree *st, u64 vtime) bfq_first_active_entity() argument
[all...]
H A Dblk-iocost.c51 * The device virtual time (vtime) is used as the primary control metric.
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
75 * against the device vtime - an IO which takes 10ms on the underlying
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO iff doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
144 * snapback mechanism in case the cgroup needs more IO vtime fo
506 atomic64_t vtime; global() member
1208 u64 vtime, vtarget; iocg_activate() local
1692 u64 vtime = atomic64_read(&iocg->vtime); hweight_after_donation() local
2166 u64 vtime = atomic64_read(&iocg->vtime); ioc_timer_fn() local
2200 u64 vdone, vtime, usage_us, usage_dur; ioc_timer_fn() local
2427 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, u64 abs_cost, struct ioc_now *now) adjust_inuse_and_calc_cost() argument
2569 u64 abs_cost, cost, vtime; ioc_rqos_throttle() local
2704 u64 vtime, abs_cost, cost; ioc_rqos_merge() local
[all...]
/kernel/linux/linux-6.6/block/
H A Dbfq-wf2q.c117 !bfq_gt(new_entity->start, st->vtime) in bfq_update_next_in_service()
662 !bfq_gt(last_idle->finish, st->vtime)) { in bfq_forget_idle()
664 * Forget the whole idle tree, increasing the vtime past in bfq_forget_idle()
667 st->vtime = last_idle->finish; in bfq_forget_idle()
670 if (first_idle && !bfq_gt(first_idle->finish, st->vtime)) in bfq_forget_idle()
746 * when entity->finish <= old_st->vtime). in __bfq_entity_update_weight_prio()
771 entity->start = new_st->vtime; in __bfq_entity_update_weight_prio()
804 st->vtime += bfq_delta(served, st->wsum); in bfq_bfqq_served()
902 if (backshifted && bfq_gt(st->vtime, entity->finish)) { in bfq_update_fin_time_enqueue()
903 unsigned long delta = st->vtime in bfq_update_fin_time_enqueue()
1296 bfq_first_active_entity(struct bfq_service_tree *st, u64 vtime) bfq_first_active_entity() argument
[all...]
H A Dblk-iocost.c51 * The device virtual time (vtime) is used as the primary control metric.
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
75 * against the device vtime - an IO which takes 10ms on the underlying
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
144 * snapback mechanism in case the cgroup needs more IO vtime fo
504 atomic64_t vtime; global() member
1265 u64 vtime, vtarget; iocg_activate() local
1743 u64 vtime = atomic64_read(&iocg->vtime); hweight_after_donation() local
2198 u64 vtime = atomic64_read(&iocg->vtime); ioc_check_iocgs() local
2270 u64 vdone, vtime, usage_us; ioc_timer_fn() local
2456 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, u64 abs_cost, struct ioc_now *now) adjust_inuse_and_calc_cost() argument
2602 u64 abs_cost, cost, vtime; ioc_rqos_throttle() local
2737 u64 vtime, abs_cost, cost; ioc_rqos_merge() local
[all...]
/kernel/linux/linux-5.10/include/trace/events/
H A Diocost.h17 u64 last_period, u64 cur_period, u64 vtime),
19 TP_ARGS(iocg, path, now, last_period, cur_period, vtime),
29 __field(u64, vtime)
44 __entry->vtime = vtime;
52 "period=%llu->%llu vtime=%llu "
57 __entry->vtime, __entry->inuse, __entry->weight,
/kernel/linux/linux-5.10/init/
H A Dinit_task.c173 .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
174 .vtime.starttime = 0,
175 .vtime.state = VTIME_SYS,
/kernel/linux/linux-6.6/init/
H A Dinit_task.c175 .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
176 .vtime.starttime = 0,
177 .vtime.state = VTIME_SYS,
/kernel/linux/linux-5.10/tools/cgroup/
H A Diocost_monitor.py154 vtime = iocg.vtime.counter.value_()
158 self.inflight_pct = (vtime - vdone) * 100 / period_vtime
/kernel/linux/linux-6.6/tools/cgroup/
H A Diocost_monitor.py157 vtime = iocg.vtime.counter.value_()
161 self.inflight_pct = (vtime - vdone) * 100 / period_vtime
/kernel/linux/linux-5.10/arch/s390/kernel/
H A Dvtime.c18 #include <asm/vtime.h>
104 static inline u64 scale_vtime(u64 vtime) in scale_vtime() argument
110 return vtime * mult / div; in scale_vtime()
111 return vtime; in scale_vtime()
/kernel/linux/linux-6.6/arch/s390/kernel/
H A Dvtime.c18 #include <asm/vtime.h>
104 static inline u64 scale_vtime(u64 vtime) in scale_vtime() argument
110 return vtime * mult / div; in scale_vtime()
111 return vtime; in scale_vtime()
H A Dsyscall.c37 #include <asm/vtime.h>
/kernel/linux/linux-5.10/include/linux/
H A Dhardirq.h9 #include <linux/vtime.h>
H A Dvtime.h7 #include <asm/vtime.h>
24 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
26 * For now vtime state is tied to context tracking. We might want to decouple
61 * Common vtime APIs
H A Dkernel_stat.h11 #include <linux/vtime.h>
H A Dcontext_tracking.h6 #include <linux/vtime.h>
/kernel/linux/linux-6.6/include/linux/
H A Dhardirq.h10 #include <linux/vtime.h>
H A Dcontext_tracking.h6 #include <linux/vtime.h>
H A Dkernel_stat.h11 #include <linux/vtime.h>
H A Dvtime.h9 #include <asm/vtime.h>
13 * Common vtime APIs
70 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
72 * For now vtime state is tied to context tracking. We might want to decouple
/kernel/linux/linux-5.10/tools/testing/selftests/x86/
H A Dtest_vsyscall.c65 const time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400); variable
278 t_vsys = vtime(&t2_vsys); in test_time()
538 vtime(&tmp); in test_emulation()
/kernel/linux/linux-6.6/tools/testing/selftests/x86/
H A Dtest_vsyscall.c65 const time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400); variable
275 t_vsys = vtime(&t2_vsys); in test_time()
535 vtime(&tmp); in test_emulation()

Completed in 18 milliseconds

12