H A D | core.c | 190 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) 201 rq_pin_lock(rq, rf); 215 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 221 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); 242 rq_pin_lock(rq, rf); 246 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); 366 struct rq_flags rf; in hrtick() local 370 rq_lock(rq, &rf); in hrtick() 373 rq_unlock(rq, &rf); in hrtick() 394 struct rq_flags rf; in __hrtick_start() local 1138 struct rq_flags rf; uclamp_update_util_min_rt_default() local 1430 struct rq_flags rf; uclamp_update_active() local 1899 move_queued_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int new_cpu) move_queued_task() argument 1942 __migrate_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int dest_cpu) __migrate_task() argument 1966 struct rq_flags rf; migration_cpu_stop() local 2058 struct rq_flags rf; __set_cpus_allowed_ptr() local 2359 struct rq_flags rf; wait_task_inactive() local 2729 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, struct rq_flags *rf) ttwu_do_wakeup() argument 2762 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, struct rq_flags *rf) ttwu_do_activate() argument 2818 struct rq_flags rf; ttwu_runnable() local 2840 struct rq_flags rf; sched_ttwu_pending() local 2903 struct rq_flags rf; wake_up_if_idle() local 2986 struct rq_flags rf; ttwu_queue() local 3085 struct rq_flags rf; walt_try_to_wake_up() local 3347 struct rq_flags rf; try_invoke_on_locked_down_task() local 3715 struct rq_flags rf; wake_up_new_task() local 3880 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) prepare_lock_switch() argument 4121 context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next, struct rq_flags *rf) context_switch() argument 4338 struct rq_flags rf; task_sched_runtime() local 4385 struct rq_flags rf; scheduler_tick() local 4467 struct rq_flags rf; sched_tick_remote() local 4734 put_prev_task_balance(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) put_prev_task_balance() argument 4762 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) pick_next_task() argument 4848 struct rq_flags rf; __schedule() local 5305 struct rq_flags rf; rt_mutex_setprio() local 5436 struct rq_flags rf; set_user_nice() local 5677 struct rq_flags rf; __sched_setscheduler() local 6675 struct rq_flags rf; do_sched_yield() local 6945 struct rq_flags rf; sched_rr_get_interval() local 7255 struct rq_flags rf; sched_setnuma() local 7389 migrate_tasks(struct rq *dead_rq, struct rq_flags *rf, bool migrate_pinned_tasks) migrate_tasks() argument 7536 struct rq_flags rf; do_isolation_work_cpu_stop() local 7858 struct rq_flags rf; sched_cpu_activate() local 7955 struct rq_flags rf; sched_cpu_dying() local 8552 struct rq_flags rf; sched_move_task() local 8659 struct rq_flags rf; cpu_cgroup_fork() local 9036 struct rq_flags rf; tg_set_cfs_bandwidth() local 9619 struct rq_flags rf; sched_exit() local [all...] |