Home
last modified time | relevance | path

Searched defs:rq (Results 1 - 11 of 11) sorted by relevance

/device/soc/rockchip/common/sdk_linux/kernel/sched/
H A Dpelt.c383 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
405 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
432 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg() argument
456 update_irq_load_avg(struct rq *rq, u64 running) update_irq_load_avg() argument
[all...]
H A Drt.c174 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
264 struct rq *rq = rq_of_rt_se(rt_se); rt_rq_of_se() local
283 need_pull_rt_task(struct rq *rq, struct task_struct *prev) need_pull_rt_task() argument
292 rt_overloaded(struct rq *rq) rt_overloaded() argument
297 rt_set_overload(struct rq *rq) rt_set_overload() argument
317 rt_clear_overload(struct rq *rq) rt_clear_overload() argument
379 has_pushable_tasks(struct rq *rq) has_pushable_tasks() argument
390 rt_queue_push_tasks(struct rq *rq) rt_queue_push_tasks() argument
399 rt_queue_pull_task(struct rq *rq) rt_queue_pull_task() argument
404 enqueue_pushable_task(struct rq *rq, struct task_struct *p) enqueue_pushable_task() argument
416 dequeue_pushable_task(struct rq *rq, struct task_struct *p) dequeue_pushable_task() argument
431 enqueue_pushable_task(struct rq *rq, struct task_struct *p) enqueue_pushable_task() argument
435 dequeue_pushable_task(struct rq *rq, struct task_struct *p) dequeue_pushable_task() argument
447 need_pull_rt_task(struct rq *rq, struct task_struct *prev) need_pull_rt_task() argument
456 rt_queue_push_tasks(struct rq *rq) rt_queue_push_tasks() argument
559 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
660 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
764 __disable_runtime(struct rq *rq) __disable_runtime() argument
850 __enable_runtime(struct rq *rq) __enable_runtime() argument
914 struct rq *rq = rq_of_rt_rq(rt_rq); do_sched_rt_period_timer() local
1059 update_curr_rt(struct rq *rq) update_curr_rt() argument
1109 struct rq *rq = rq_of_rt_rq(rt_rq); dequeue_top_rt_rq() local
1125 struct rq *rq = rq_of_rt_rq(rt_rq); enqueue_top_rt_rq() local
1150 struct rq *rq = rq_of_rt_rq(rt_rq); inc_rt_prio_smp() local
1167 struct rq *rq = rq_of_rt_rq(rt_rq); dec_rt_prio_smp() local
1425 struct rq *rq = rq_of_rt_se(rt_se); enqueue_rt_entity() local
1434 struct rq *rq = rq_of_rt_se(rt_se); dequeue_rt_entity() local
1449 should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync) should_honor_rt_sync() argument
1459 should_honor_rt_sync(struct rq *rq, struct task_struct *p, bool sync) should_honor_rt_sync() argument
1468 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_task_rt() argument
1485 dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) dequeue_task_rt() argument
1514 requeue_task_rt(struct rq *rq, struct task_struct *p, int head) requeue_task_rt() argument
1525 yield_task_rt(struct rq *rq) yield_task_rt() argument
1536 struct rq *rq; select_task_rq_rt() local
1617 check_preempt_equal_prio(struct rq *rq, struct task_struct *p) check_preempt_equal_prio() argument
1644 balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) balance_rt() argument
1665 check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) check_preempt_curr_rt() argument
1691 set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) set_next_task_rt() argument
1708 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); set_next_task_rt() local
1714 pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq) pick_next_rt_entity() argument
1730 _pick_next_task_rt(struct rq *rq) _pick_next_task_rt() argument
1744 pick_next_task_rt(struct rq *rq) pick_next_task_rt() argument
1757 put_prev_task_rt(struct rq *rq, struct task_struct *p) put_prev_task_rt() argument
1761 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); put_prev_task_rt() local
1777 pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) pick_rt_task() argument
1790 pick_highest_pushable_task(struct rq *rq, int cpu) pick_highest_pushable_task() argument
2078 pick_next_pushable_task(struct rq *rq) pick_next_pushable_task() argument
2099 find_lock_lowest_rq(struct task_struct *task, struct rq *rq) find_lock_lowest_rq() argument
2155 push_rt_task(struct rq *rq) push_rt_task() argument
2239 push_rt_tasks(struct rq *rq) push_rt_tasks() argument
2347 tell_cpu_to_push(struct rq *rq) tell_cpu_to_push() argument
2386 struct rq *rq; rto_push_irq_work_func() local
2521 task_woken_rt(struct rq *rq, struct task_struct *p) task_woken_rt() argument
2532 rq_online_rt(struct rq *rq) rq_online_rt() argument
2544 rq_offline_rt(struct rq *rq) rq_offline_rt() argument
2559 switched_from_rt(struct rq *rq, struct task_struct *p) switched_from_rt() argument
2591 switched_to_rt(struct rq *rq, struct task_struct *p) switched_to_rt() argument
2598 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); switched_to_rt() local
2623 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_rt() argument
2665 watchdog(struct rq *rq, struct task_struct *p) watchdog() argument
2688 watchdog(struct rq *rq, struct task_struct *p) watchdog() argument
2701 task_tick_rt(struct rq *rq, struct task_struct *p, int queued) task_tick_rt() argument
2706 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); task_tick_rt() local
2772 check_for_migration_rt(struct rq *rq, struct task_struct *p) check_for_migration_rt() argument
2817 stop_one_cpu_nowait(task_cpu(p), rt_active_load_balance_cpu_stop, rq, &rq->rt_active_balance_work); check_for_migration_rt() local
2823 get_rr_interval_rt(struct rq *rq, struct task_struct *task) get_rr_interval_rt() argument
[all...]
H A Dtopology.c470 void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
713 struct rq *rq = cpu_rq(cpu); cpu_attach_domain() local
2117 struct rq *rq = NULL; build_sched_domains() local
[all...]
H A Dcore.c193 struct rq *rq; global() variable
218 struct rq *rq; global() variable
258 update_rq_clock_task(struct rq *rq, s64 delta) update_rq_clock_task() argument
314 update_rq_clock(struct rq *rq) update_rq_clock() argument
339 rq_csd_init(struct rq *rq, struct __call_single_data *csd, smp_call_func_t func) rq_csd_init() argument
352 hrtick_clear(struct rq *rq) hrtick_clear() argument
365 struct rq *rq = container_of(timer, struct rq, hrtick_timer); hrtick() local
380 __hrtick_restart(struct rq *rq) __hrtick_restart() argument
393 struct rq *rq = arg; __hrtick_start() local
406 hrtick_start(struct rq *rq, u64 delay) hrtick_start() argument
431 hrtick_start(struct rq *rq, u64 delay) hrtick_start() argument
444 hrtick_rq_init(struct rq *rq) hrtick_rq_init() argument
453 hrtick_clear(struct rq *rq) hrtick_clear() argument
457 hrtick_rq_init(struct rq *rq) hrtick_rq_init() argument
631 resched_curr(struct rq *rq) resched_curr() argument
658 struct rq *rq = cpu_rq(cpu); resched_cpu() local
746 struct rq *rq = cpu_rq(cpu); wake_up_idle_cpu() local
794 struct rq *rq = info; nohz_csd_func() local
814 sched_can_stop_tick(struct rq *rq) sched_can_stop_tick() argument
1068 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) uclamp_idle_value() argument
1085 uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) uclamp_idle_reset() argument
1096 uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, unsigned int clamp_value) uclamp_rq_max_value() argument
1139 struct rq *rq; uclamp_update_util_min_rt_default() local
1252 uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) uclamp_rq_inc_id() argument
1292 uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) uclamp_rq_dec_id() argument
1361 uclamp_rq_inc(struct rq *rq, struct task_struct *p) uclamp_rq_inc() argument
1387 uclamp_rq_dec(struct rq *rq, struct task_struct *p) uclamp_rq_dec() argument
1408 uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, enum uclamp_id clamp_id) uclamp_rq_reinc_id() argument
1431 struct rq *rq; uclamp_update_active() local
1649 init_uclamp_rq(struct rq *rq) init_uclamp_rq() argument
1688 uclamp_rq_inc(struct rq *rq, struct task_struct *p) uclamp_rq_inc() argument
1691 uclamp_rq_dec(struct rq *rq, struct task_struct *p) uclamp_rq_dec() argument
1714 enqueue_task(struct rq *rq, struct task_struct *p, int flags) enqueue_task() argument
1729 dequeue_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task() argument
1744 activate_task(struct rq *rq, struct task_struct *p, int flags) activate_task() argument
1751 deactivate_task(struct rq *rq, struct task_struct *p, int flags) deactivate_task() argument
1825 check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, int oldprio) check_class_changed() argument
1840 check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) check_preempt_curr() argument
1899 move_queued_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int new_cpu) move_queued_task() argument
1913 double_rq_unlock(cpu_rq(new_cpu), rq); move_queued_task() local
1942 __migrate_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p, int dest_cpu) __migrate_task() argument
1965 struct rq *rq = this_rq(); migration_cpu_stop() local
2014 struct rq *rq = task_rq(p); do_set_cpus_allowed() local
2059 struct rq *rq; __set_cpus_allowed_ptr() local
2361 struct rq *rq; wait_task_inactive() local
2687 struct rq *rq; ttwu_stat() local
2729 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, struct rq_flags *rf) ttwu_do_wakeup() argument
2762 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, struct rq_flags *rf) ttwu_do_activate() argument
2819 struct rq *rq; ttwu_runnable() local
2838 struct rq *rq = this_rq(); sched_ttwu_pending() local
2874 struct rq *rq = cpu_rq(cpu); send_call_function_single_ipi() local
2892 struct rq *rq = cpu_rq(cpu); __ttwu_queue_wakelist() local
2902 struct rq *rq = cpu_rq(cpu); wake_up_if_idle() local
2985 struct rq *rq = cpu_rq(cpu); ttwu_queue() local
3084 struct rq *rq = cpu_rq(task_cpu(p)); walt_try_to_wake_up() local
3349 struct rq *rq; try_invoke_on_locked_down_task() local
3716 struct rq *rq; wake_up_new_task() local
3880 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) prepare_lock_switch() argument
3897 finish_lock_switch(struct rq *rq) finish_lock_switch() argument
3937 prepare_task_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) prepare_task_switch() argument
3971 struct rq *rq = this_rq(); global() variable
4009 finish_lock_switch(rq); global() variable
4054 __balance_callback(struct rq *rq) __balance_callback() argument
4074 balance_callback(struct rq *rq) balance_callback() argument
4083 balance_callback(struct rq *rq) balance_callback() argument
4096 struct rq *rq; global() variable
4108 balance_callback(rq); global() variable
4121 context_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next, struct rq_flags *rf) context_switch() argument
4339 struct rq *rq; task_sched_runtime() local
4383 struct rq *rq = cpu_rq(cpu); scheduler_tick() local
4399 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); scheduler_tick() local
4465 struct rq *rq = cpu_rq(cpu); sched_tick_remote() local
4734 put_prev_task_balance(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) put_prev_task_balance() argument
4762 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) pick_next_task() argument
4849 struct rq *rq; __schedule() local
5306 struct rq *rq; rt_mutex_setprio() local
5437 struct rq *rq; set_user_nice() local
5562 struct rq *rq = cpu_rq(cpu); idle_cpu() local
5680 struct rq *rq; __sched_setscheduler() local
6676 struct rq *rq; do_sched_yield() local
6785 struct rq *rq, *p_rq; yield_to() local
6946 struct rq *rq; sched_rr_get_interval() local
7120 struct rq *rq = cpu_rq(cpu); init_idle() local
7256 struct rq *rq; sched_setnuma() local
7310 calc_load_migrate(struct rq *rq) calc_load_migrate() argument
7318 __pick_migrate_task(struct rq *rq) __pick_migrate_task() argument
7342 detach_one_task_core(struct task_struct *p, struct rq *rq, struct list_head *tasks) detach_one_task_core() argument
7352 attach_tasks_core(struct list_head *tasks, struct rq *rq) attach_tasks_core() argument
7370 detach_one_task_core(struct task_struct *p, struct rq *rq, struct list_head *tasks) detach_one_task_core() argument
7375 attach_tasks_core(struct list_head *tasks, struct rq *rq) attach_tasks_core() argument
7392 struct rq *rq = dead_rq; migrate_tasks() local
7505 struct rq *rq = cpu_rq(cpu); clear_eas_migration_request() local
7535 struct rq *rq = cpu_rq(cpu); do_isolation_work_cpu_stop() local
7639 struct rq *rq; sched_isolate_cpu() local
7770 set_rq_online(struct rq *rq) set_rq_online() argument
7787 set_rq_offline(struct rq *rq) set_rq_offline() argument
7857 struct rq *rq = cpu_rq(cpu); sched_cpu_activate() local
7932 struct rq *rq = cpu_rq(cpu); sched_rq_cpu_starting() local
7954 struct rq *rq = cpu_rq(cpu); sched_cpu_dying() local
8124 struct rq *rq; sched_init() local
8553 struct rq *rq; sched_move_task() local
8660 struct rq *rq; cpu_cgroup_fork() local
9035 struct rq *rq = cfs_rq->rq; tg_set_cfs_bandwidth() local
9599 call_trace_sched_update_nr_running(struct rq *rq, int count) call_trace_sched_update_nr_running() argument
9620 struct rq *rq; sched_exit() local
[all...]
H A Dsched.h651 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
727 struct rq *r member
979 struct rq { global() struct
1186 cpu_of(struct rq *rq) cpu_of() argument
1198 update_idle_core(struct rq *rq) update_idle_core() argument
1206 update_idle_core(struct rq *rq) update_idle_core() argument
1221 __rq_clock_broken(struct rq *rq) __rq_clock_broken() argument
1253 assert_clock_updated(struct rq *rq) assert_clock_updated() argument
1262 rq_clock(struct rq *rq) rq_clock() argument
1270 rq_clock_task(struct rq *rq) rq_clock_task() argument
1291 rq_clock_thermal(struct rq *rq) rq_clock_thermal() argument
1296 rq_clock_skip_update(struct rq *rq) rq_clock_skip_update() argument
1306 rq_clock_cancel_skipupdate(struct rq *rq) rq_clock_cancel_skipupdate() argument
1335 rq_pin_lock(struct rq *rq, struct rq_flags *rf) rq_pin_lock() argument
1345 rq_unpin_lock(struct rq *rq, struct rq_flags *rf) rq_unpin_lock() argument
1356 rq_repin_lock(struct rq *rq, struct rq_flags *rf) rq_repin_lock() argument
1430 struct rq *rq; global() variable
1482 queue_balance_callback(struct rq *rq, struct callback_head *head, void (*func)(struct rq *rq)) queue_balance_callback() argument
1790 task_current(struct rq *rq, struct task_struct *p) task_current() argument
1795 task_running(struct rq *rq, struct task_struct *p) task_running() argument
1942 put_prev_task(struct rq *rq, struct task_struct *prev) put_prev_task() argument
1948 set_next_task(struct rq *rq, struct task_struct *next) set_next_task() argument
1971 sched_stop_runnable(struct rq *rq) sched_stop_runnable() argument
1976 sched_dl_runnable(struct rq *rq) sched_dl_runnable() argument
1981 sched_rt_runnable(struct rq *rq) sched_rt_runnable() argument
1986 sched_fair_runnable(struct rq *rq) sched_fair_runnable() argument
2005 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) idle_set_state() argument
2010 idle_get_state(struct rq *rq) idle_get_state() argument
2017 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) idle_set_state() argument
2021 idle_get_state(struct rq *rq) idle_get_state() argument
2069 sched_update_tick_dependency(struct rq *rq) sched_update_tick_dependency() argument
2087 sched_update_tick_dependency(struct rq *rq) sched_update_tick_dependency() argument
2092 add_nr_running(struct rq *rq, unsigned count) add_nr_running() argument
2112 sub_nr_running(struct rq *rq, unsigned count) sub_nr_running() argument
2138 hrtick_enabled(struct rq *rq) hrtick_enabled() argument
2153 hrtick_enabled(struct rq *rq) hrtick_enabled() argument
2412 nohz_balance_exit_idle(struct rq *rq) nohz_balance_exit_idle() argument
2426 struct rq *rq = cpu_rq(i); __dl_update() local
2495 cpufreq_update_util(struct rq *rq, unsigned int flags) cpufreq_update_util() argument
2515 cpufreq_update_util(struct rq *rq, unsigned int flags) cpufreq_update_util() argument
2540 uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) uclamp_rq_util_with() argument
2595 uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) uclamp_rq_util_with() argument
2651 cpu_bw_dl(struct rq *rq) cpu_bw_dl() argument
2656 cpu_util_dl(struct rq *rq) cpu_util_dl() argument
2661 cpu_util_cfs(struct rq *rq) cpu_util_cfs() argument
2672 cpu_util_rt(struct rq *rq) cpu_util_rt() argument
2685 cpu_util_irq(struct rq *rq) cpu_util_irq() argument
2698 cpu_util_irq(struct rq *rq) cpu_util_irq() argument
2737 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) membarrier_switch_mm() argument
2753 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) membarrier_switch_mm() argument
2812 walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) walt_fixup_cum_window_demand() argument
2823 struct rq *rq = cpu_rq(src_cpu); same_freq_domain() local
2837 struct rq *rq = cpu_rq(cpu); is_reserved() local
2844 struct rq *rq = cpu_rq(cpu); mark_reserved() local
2851 struct rq *rq = cpu_rq(cpu); clear_reserved() local
2988 struct rq *rq = cpu_rq(cpu); cpu_util_freq_walt() local
3006 walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) walt_fixup_cum_window_demand() argument
[all...]
H A Dfair.c343 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq() local
410 struct rq *rq = rq_of(cfs_rq); list_del_leaf_cfs_rq() local
428 assert_list_leaf_cfs_rq(struct rq *rq) assert_list_leaf_cfs_rq() argument
500 struct rq *rq = task_rq(p); cfs_rq_of() local
527 assert_list_leaf_cfs_rq(struct rq *rq) assert_list_leaf_cfs_rq() argument
930 update_curr_fair(struct rq *rq) update_curr_fair() argument
1254 account_numa_enqueue(struct rq *rq, struct task_struct *p) account_numa_enqueue() argument
1260 account_numa_dequeue(struct rq *rq, struct task_struct *p) account_numa_dequeue() argument
1678 struct rq *rq = cpu_rq(cpu); update_numa_stats() local
1711 struct rq *rq = cpu_rq(env->dst_cpu); task_numa_assign() local
3018 task_tick_numa(struct rq *rq, struct task_struct *curr) task_tick_numa() argument
3089 task_tick_numa(struct rq *rq, struct task_struct *curr) task_tick_numa() argument
3093 account_numa_enqueue(struct rq *rq, struct task_struct *p) account_numa_enqueue() argument
3097 account_numa_dequeue(struct rq *rq, struct task_struct *p) account_numa_dequeue() argument
3112 struct rq *rq = rq_of(cfs_rq); account_entity_enqueue() local
3393 struct rq *rq = rq_of(cfs_rq); cfs_rq_util_change() local
4233 update_misfit_status(struct task_struct *p, struct rq *rq) update_misfit_status() argument
4294 newidle_balance(struct rq *rq, struct rq_flags *rf) newidle_balance() argument
4310 update_misfit_status(struct task_struct *p, struct rq *rq) update_misfit_status() argument
4951 struct rq *rq = data; tg_unthrottle_up() local
4969 struct rq *rq = data; tg_throttle_down() local
4984 struct rq *rq = rq_of(cfs_rq); throttle_cfs_rq() local
5058 struct rq *rq = rq_of(cfs_rq); unthrottle_cfs_rq() local
5158 struct rq *rq = rq_of(cfs_rq); distribute_cfs_runtime() local
5566 update_runtime_enabled(struct rq *rq) update_runtime_enabled() argument
5586 unthrottle_offline_cfs_rqs(struct rq *rq) unthrottle_offline_cfs_rqs() argument
5675 update_runtime_enabled(struct rq *rq) update_runtime_enabled() argument
5678 unthrottle_offline_cfs_rqs(struct rq *rq) unthrottle_offline_cfs_rqs() argument
5689 hrtick_start_fair(struct rq *rq, struct task_struct *p) hrtick_start_fair() argument
5716 hrtick_update(struct rq *rq) hrtick_update() argument
5729 hrtick_start_fair(struct rq *rq, struct task_struct *p) hrtick_start_fair() argument
5733 hrtick_update(struct rq *rq) hrtick_update() argument
5744 update_overutilized_status(struct rq *rq) update_overutilized_status() argument
5752 update_overutilized_status(struct rq *rq) update_overutilized_status() argument
5758 sched_idle_rq(struct rq *rq) sched_idle_rq() argument
5792 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) enqueue_task_fair() argument
5912 dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) dequeue_task_fair() argument
6000 cpu_load(struct rq *rq) cpu_load() argument
6018 cpu_load_without(struct rq *rq, struct task_struct *p) cpu_load_without() argument
6037 cpu_runnable(struct rq *rq) cpu_runnable() argument
6042 cpu_runnable_without(struct rq *rq, struct task_struct *p) cpu_runnable_without() argument
6244 struct rq *rq = cpu_rq(i); find_idlest_group_cpu() local
6366 fair_update_idle_core(struct rq *rq) fair_update_idle_core() argument
7253 balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) balance_fair() argument
7369 check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) check_preempt_wakeup() argument
7459 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) pick_next_task_fair() argument
7612 fair_pick_next_task_fair(struct rq *rq) fair_pick_next_task_fair() argument
7620 put_prev_task_fair(struct rq *rq, struct task_struct *prev) put_prev_task_fair() argument
7636 yield_task_fair(struct rq *rq) yield_task_fair() argument
7668 yield_to_task_fair(struct rq *rq, struct task_struct *p) yield_to_task_fair() argument
8296 attach_task(struct rq *rq, struct task_struct *p) attach_task() argument
8309 attach_one_task(struct rq *rq, struct task_struct *p) attach_one_task() argument
8356 others_have_blocked(struct rq *rq) others_have_blocked() argument
8379 update_blocked_load_status(struct rq *rq, bool has_blocked) update_blocked_load_status() argument
8392 others_have_blocked(struct rq *rq) others_have_blocked() argument
8396 update_blocked_load_status(struct rq *rq, bool has_blocked) update_blocked_load_status() argument
8401 fair_update_blocked_others(struct rq *rq, bool *done) fair_update_blocked_others() argument
8450 fair_update_blocked_fair(struct rq *rq, bool *done) fair_update_blocked_fair() argument
8501 struct rq *rq = rq_of(cfs_rq); update_cfs_rq_h_load() local
8541 fair_update_blocked_fair(struct rq *rq, bool *done) fair_update_blocked_fair() argument
8563 struct rq *rq = cpu_rq(cpu); update_blocked_averages() local
8643 struct rq *rq = cpu_rq(cpu); scale_rt_capacity() local
8758 check_cpu_capacity(struct rq *rq, struct sched_domain *sd) check_cpu_capacity() argument
8768 check_misfit_status(struct rq *rq, struct sched_domain *sd) check_misfit_status() argument
8905 update_nohz_stats(struct rq *rq, bool force) update_nohz_stats() argument
8948 struct rq *rq = cpu_rq(i); update_sg_lb_stats() local
9163 fbq_classify_rq(struct rq *rq) fbq_classify_rq() argument
9179 fbq_classify_rq(struct rq *rq) fbq_classify_rq() argument
9214 struct rq *rq = cpu_rq(cpu); idle_cpu_without() local
9251 struct rq *rq = cpu_rq(i); update_sg_wakeup_stats() local
9916 struct rq *busiest = NULL, *rq; find_busiest_queue() local
10632 rebalance_domains(struct rq *rq, enum cpu_idle_type idle) rebalance_domains() argument
10735 on_null_domain(struct rq *rq) on_null_domain() argument
10810 nohz_balancer_kick(struct rq *rq) nohz_balancer_kick() argument
10951 nohz_balance_exit_idle(struct rq *rq) nohz_balance_exit_idle() argument
10988 struct rq *rq = cpu_rq(cpu); nohz_balance_enter_idle() local
11073 struct rq *rq; _nohz_idle_balance() local
11234 nohz_balancer_kick(struct rq *rq) nohz_balancer_kick() argument
11416 trigger_load_balance(struct rq *rq) trigger_load_balance() argument
11432 rq_online_fair(struct rq *rq) rq_online_fair() argument
11439 rq_offline_fair(struct rq *rq) rq_offline_fair() argument
11448 kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) kick_active_balance() argument
11471 check_for_migration_fair(struct rq *rq, struct task_struct *p) check_for_migration_fair() argument
11544 task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) task_tick_fair() argument
11571 struct rq *rq = this_rq(); task_fork_fair() local
11602 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_fair() argument
11747 switched_from_fair(struct rq *rq, struct task_struct *p) switched_from_fair() argument
11752 switched_to_fair(struct rq *rq, struct task_struct *p) switched_to_fair() argument
11775 set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) set_next_task_fair() argument
11912 struct rq *rq; online_fair_sched_group() local
11930 struct rq *rq; unregister_fair_sched_group() local
11958 struct rq *rq = cpu_rq(cpu); init_tg_cfs_entry() local
12008 struct rq *rq = cpu_rq(i); sched_group_set_shares() local
12047 get_rr_interval_fair(struct rq *rq, struct task_struct *task) get_rr_interval_fair() argument
12186 struct rq *rq = rq_of(tcfs_rq); walt_inc_throttled_cfs_rq_stats() local
12197 struct rq *rq = rq_of(tcfs_rq); walt_dec_throttled_cfs_rq_stats() local
12211 walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled) walt_fixup_sched_stats_fair() argument
12234 walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p, u16 updated_demand_scaled) walt_fixup_sched_stats_fair() argument
12276 sched_trace_rq_avg_rt(struct rq *rq) sched_trace_rq_avg_rt() argument
12286 sched_trace_rq_avg_dl(struct rq *rq) sched_trace_rq_avg_dl() argument
12296 sched_trace_rq_avg_irq(struct rq *rq) sched_trace_rq_avg_irq() argument
12306 sched_trace_rq_cpu(struct rq *rq) sched_trace_rq_cpu() argument
12312 sched_trace_rq_cpu_capacity(struct rq *rq) sched_trace_rq_cpu_capacity() argument
12334 sched_trace_rq_nr_running(struct rq *rq) sched_trace_rq_nr_running() argument
[all...]
/device/soc/rockchip/common/vendor/drivers/staging/android/fiq_debugger/
H A Dfiq_debugger.c401 static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq) in fiq_debugger_do_sysrq() argument
/device/soc/rockchip/common/sdk_linux/drivers/gpu/drm/i915/gt/
H A Dintel_engine_cs.c737 struct i915_request rq; member
1315 static void print_request(struct drm_printer *m, struct i915_request *rq, const char *prefix) in print_request() argument
1333 get_timeline(struct i915_request *rq) get_timeline() argument
1355 print_ring(char *buf, int sz, struct i915_request *rq) print_ring() argument
1465 struct i915_request *const *port, *rq; intel_engine_print_registers() local
1530 print_request_ring(struct drm_printer *m, struct i915_request *rq) print_request_ring() argument
1575 struct i915_request *rq; intel_engine_dump() local
1693 match_ring(struct i915_request *rq) match_ring() argument
[all...]
/device/soc/rockchip/rk3588/kernel/drivers/staging/android/fiq_debugger/
H A Dfiq_debugger.c388 static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq) in fiq_debugger_do_sysrq() argument
/device/soc/rockchip/common/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_wifi6/
H A Dwl_iw.c3390 wl_iw_ioctl( struct net_device *dev, struct ifreq *rq, int cmd ) wl_iw_ioctl() argument
/device/soc/rockchip/rk3588/kernel/drivers/net/ethernet/realtek/r8168/
H A Dr8168_n.c902 static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) in if_mii() argument
[all...]

Completed in 61 milliseconds