/device/soc/rockchip/common/sdk_linux/kernel/ |
H A D | cpu.c | 17 #include <linux/cpu.h> 50 * cpuhp_cpu_state - Per cpu hotplug state storage 51 * @state: The current cpu state 60 * @done_up: Signal completion to the issuer of the task for cpu-up 61 * @done_down: Signal completion to the issuer of the task for cpu-down 124 int (*single)(unsigned int cpu); 125 int (*multi)(unsigned int cpu, struct hlist_node *node); 128 int (*single)(unsigned int cpu); 129 int (*multi)(unsigned int cpu, struct hlist_node *node); 146 * @cpu 154 cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node, struct hlist_node **lastp) cpuhp_invoke_callback() argument 448 cpu_smt_allowed(unsigned int cpu) cpu_smt_allowed() argument 474 cpu_smt_allowed(unsigned int cpu) cpu_smt_allowed() argument 547 bringup_wait_for_ap(unsigned int cpu) bringup_wait_for_ap() argument 578 bringup_cpu(unsigned int cpu) bringup_cpu() argument 605 finish_cpu(unsigned int cpu) finish_cpu() argument 625 undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) undo_cpu_up() argument 647 cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) cpuhp_up_callbacks() argument 669 cpuhp_create(unsigned int cpu) cpuhp_create() argument 677 cpuhp_should_run(unsigned int cpu) cpuhp_should_run() argument 698 cpuhp_thread_fun(unsigned int cpu) cpuhp_thread_fun() argument 773 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node) cpuhp_invoke_ap_callback() argument 824 cpuhp_kick_ap_work(unsigned int cpu) cpuhp_kick_ap_work() argument 922 clear_tasks_mm_cpumask(int cpu) clear_tasks_mm_cpumask() argument 958 int err, cpu = smp_processor_id(); take_cpu_down() local 991 takedown_cpu(unsigned int cpu) takedown_cpu() argument 1061 undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) undo_cpu_down() argument 1068 cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) cpuhp_down_callbacks() argument 1087 _cpu_down(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) _cpu_down() argument 1158 cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) cpu_down_maps_locked() argument 1166 cpu_down(unsigned int cpu, enum cpuhp_state target) cpu_down() argument 1189 remove_cpu(unsigned int cpu) remove_cpu() argument 1203 unsigned int cpu; smp_shutdown_nonboot_cpus() local 1256 notify_cpu_starting(unsigned int cpu) notify_cpu_starting() argument 1299 _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) _cpu_up() argument 1361 cpu_up(unsigned int cpu, enum cpuhp_state target) cpu_up() argument 1408 add_cpu(unsigned int cpu) add_cpu() argument 1445 unsigned int cpu; bringup_nonboot_cpus() local 1463 int cpu, error = 0; freeze_secondary_cpus() local 1534 int cpu, error; thaw_secondary_cpus() local 1864 cpuhp_store_callbacks(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance) cpuhp_store_callbacks() argument 1909 cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node) cpuhp_issue_call() argument 1945 int cpu; cpuhp_rollback_install() local 1967 int cpu; __cpuhp_state_add_instance_cpuslocked() local 2040 __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance) __cpuhp_setup_state_cpuslocked() argument 2044 int cpu, ret = 0; __cpuhp_setup_state_cpuslocked() local 2102 __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu), bool multi_instance) __cpuhp_setup_state() argument 2117 int cpu; __cpuhp_state_remove_instance() local 2168 int cpu; __cpuhp_remove_state_cpuslocked() local 2213 cpuhp_offline_cpu_device(unsigned int cpu) cpuhp_offline_cpu_device() argument 2222 cpuhp_online_cpu_device(unsigned int cpu) cpuhp_online_cpu_device() argument 2233 int cpu, ret = 0; cpuhp_smt_disable() local 2269 int cpu, ret = 0; cpuhp_smt_enable() local 2520 int cpu, ret; cpuhp_sysfs_init() local 2621 set_cpu_online(unsigned int cpu, bool online) set_cpu_online() argument 2649 int cpu = smp_processor_id(); boot_cpu_init() local [all...] |
/device/soc/rockchip/common/sdk_linux/include/trace/events/ |
H A D | cpuhp.h | 12 TP_PROTO(unsigned int cpu, int target, int idx, int (*fun)(unsigned int)), 14 TP_ARGS(cpu, target, idx, fun), 16 TP_STRUCT__entry(__field(unsigned int, cpu) __field(int, target) __field(int, idx) __field(void *, fun)), 18 TP_fast_assign(__entry->cpu = cpu; __entry->target = target; __entry->idx = idx; __entry->fun = fun;), 20 TP_printk("cpu: %04u target: %3d step: %3d (%ps)", __entry->cpu, __entry->target, __entry->idx, 25 TP_PROTO(unsigned int cpu, int target, int idx, int (*fun)(unsigned int, struct hlist_node *), 28 TP_ARGS(cpu, target, idx, fun, node), 30 TP_STRUCT__entry(__field(unsigned int, cpu) __fiel [all...] |
H A D | sched.h | 409 * Tracepoint for waking a polling cpu without an IPI.
413 TP_PROTO(int cpu),
415 TP_ARGS(cpu),
417 TP_STRUCT__entry(__field(int, cpu)),
419 TP_fast_assign(__entry->cpu = cpu;),
421 TP_printk("cpu=%d", __entry->cpu));
426 TP_PROTO(unsigned int cpu, unsigned int old_need, unsigned int new_need, unsigned int updated),
427 TP_ARGS(cpu, old_nee [all...] |
/device/soc/rockchip/common/sdk_linux/kernel/sched/ |
H A D | topology.c | 42 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, struct cpumask *groupmask) in sched_domain_debug_one() argument 53 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { in sched_domain_debug_one() 54 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu); in sched_domain_debug_one() 56 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) { in sched_domain_debug_one() 57 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu); in sched_domain_debug_one() 130 static void sched_domain_debug(struct sched_domain *sd, int cpu) in sched_domain_debug() argument 139 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); in sched_domain_debug() 143 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu); in sched_domain_debug() 146 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) { in sched_domain_debug() 159 #define sched_domain_debug(sd, cpu) \ 260 find_pd(struct perf_domain *pd, int cpu) find_pd() argument 272 pd_init(int cpu) pd_init() argument 363 int cpu = cpumask_first(cpu_map); build_perf_domains() local 678 update_top_cache_domain(int cpu) update_top_cache_domain() argument 711 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) cpu_attach_domain() argument 935 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) build_group_from_child_sched_domain() argument 961 int cpu; init_overlap_sched_group() local 1006 build_overlap_sched_groups(struct sched_domain *sd, int cpu) build_overlap_sched_groups() argument 1175 get_group(int cpu, struct sd_data *sdd) get_group() argument 1221 build_sched_groups(struct sched_domain *sd, int cpu) build_sched_groups() argument 1270 init_sched_groups_capacity(int cpu, struct sched_domain *sd) init_sched_groups_capacity() argument 1280 int cpu, max_cpu = -1; init_sched_groups_capacity() local 1397 claim_allocations(int cpu, struct sched_domain *sd) claim_allocations() argument 1447 sd_init(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain *child, int dflags, int cpu) sd_init() argument 1574 sd_numa_mask(int cpu) sd_numa_mask() argument 1822 sched_domains_numa_masks_set(unsigned int cpu) sched_domains_numa_masks_set() argument 1836 sched_domains_numa_masks_clear(unsigned int cpu) sched_domains_numa_masks_clear() argument 1855 sched_numa_find_closest(const struct cpumask *cpus, int cpu) sched_numa_find_closest() argument 1984 build_sched_domain(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, struct sched_domain_attr *attr, struct sched_domain *child, int dflags, int cpu) build_sched_domain() argument 2013 topology_span_sane(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, int cpu) topology_span_sane() argument 2319 unsigned int cpu = cpumask_any(cpu_map); detach_destroy_domains() local [all...] |
H A D | rt.c | 171 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, in init_tg_rt_entry() argument 174 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() 181 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry() 182 tg->rt_se[cpu] = rt_se; in init_tg_rt_entry() 286 * Try to pull RT tasks here if we lower this rq's prio and cpu is not in need_pull_rt_task() 303 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload() 325 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload() 396 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); in rt_queue_push_tasks() 401 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); in rt_queue_pull_task() 471 * Verify the fitness of task @p to run on @cpu takin 484 rt_task_fits_capacity(struct task_struct *p, int cpu) rt_task_fits_capacity() argument 503 rt_task_fits_capacity(struct task_struct *p, int cpu) rt_task_fits_capacity() argument 562 int cpu = cpu_of(rq); sched_rt_rq_enqueue() local 582 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); sched_rt_rq_dequeue() local 625 sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) sched_rt_period_rt_rq() argument 685 sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) sched_rt_period_rt_rq() argument 1533 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) select_task_rq_rt() argument 1777 pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) pick_rt_task() argument 1790 pick_highest_pushable_task(struct rq *rq, int cpu) pick_highest_pushable_task() argument 1817 int cpu = -1; find_cas_cpu() local 1981 int cpu = task_cpu(task); find_lowest_rq() local 2103 int cpu; find_lock_lowest_rq() local 2293 int cpu; rto_next_cpu() local 2349 int cpu = -1; tell_cpu_to_push() local 2387 int cpu; rto_push_irq_work_func() local 2419 int this_cpu = this_rq->cpu, cpu; pull_rt_task() local 2776 int cpu = task_cpu(p); check_for_migration_rt() local 3212 print_rt_stats(struct seq_file *m, int cpu) print_rt_stats() argument [all...] |
H A D | core.c | 230 * [S] ->cpu = new_cpu [L] task_rq() 634 int cpu; in resched_curr() local 642 cpu = cpu_of(rq); in resched_curr() 643 if (cpu == smp_processor_id()) { in resched_curr() 650 smp_send_reschedule(cpu); in resched_curr() 652 trace_sched_wake_idle_without_ipi(cpu); in resched_curr() 656 void resched_cpu(int cpu) in resched_cpu() argument 658 struct rq *rq = cpu_rq(cpu); in resched_cpu() 662 if (cpu_online(cpu) || cpu in resched_cpu() 680 int i, cpu = smp_processor_id(), default_cpu = -1; get_nohz_timer_target() local 744 wake_up_idle_cpu(int cpu) wake_up_idle_cpu() argument 759 wake_up_full_nohz_cpu(int cpu) wake_up_full_nohz_cpu() argument 785 wake_up_nohz_cpu(int cpu) wake_up_nohz_cpu() argument 795 int cpu = cpu_of(rq); nohz_csd_func() local 1665 int cpu; init_uclamp() local 1863 is_cpu_allowed(struct task_struct *p, int cpu) is_cpu_allowed() argument 2217 __migrate_swap_task(struct task_struct *p, int cpu) __migrate_swap_task() argument 2466 int cpu; kick_process() local 2500 select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso) select_fallback_rq() argument 2605 select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) select_task_rq() argument 2644 sched_set_stop_task(int cpu, struct task_struct *stop) sched_set_stop_task() argument 2685 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ttwu_stat() argument 2872 send_call_function_single_ipi(int cpu) send_call_function_single_ipi() argument 2889 __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) __ttwu_queue_wakelist() argument 2900 wake_up_if_idle(int cpu) wake_up_if_idle() argument 2935 ttwu_queue_cond(int cpu, int wake_flags) ttwu_queue_cond() argument 2958 ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ttwu_queue_wakelist() argument 2975 ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ttwu_queue_wakelist() argument 2983 ttwu_queue(struct task_struct *p, int cpu, int wake_flags) ttwu_queue() argument 3142 int cpu, success = 0; try_to_wake_up() local 4232 nr_iowait_cpu(int cpu) nr_iowait_cpu() argument 4382 int cpu = smp_processor_id(); scheduler_tick() local 4426 int cpu; global() member 4464 int cpu = twork->cpu; sched_tick_remote() local 4518 sched_tick_start(int cpu) sched_tick_start() argument 4540 sched_tick_stop(int cpu) sched_tick_stop() argument 4567 sched_tick_start(int cpu) sched_tick_start() argument 4570 sched_tick_stop(int cpu) sched_tick_stop() argument 4850 int cpu; __schedule() local 5560 idle_cpu(int cpu) idle_cpu() argument 5587 available_idle_cpu(int cpu) available_idle_cpu() argument 5606 idle_task(int cpu) idle_task() argument 7118 init_idle(struct task_struct *idle, int cpu) init_idle() argument 7216 int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); task_can_attach() local 7503 clear_eas_migration_request(int cpu) clear_eas_migration_request() argument 7526 clear_eas_migration_request(int cpu) clear_eas_migration_request() argument 7534 unsigned int cpu = smp_processor_id(); do_isolation_work_cpu_stop() local 7576 sched_update_group_capacities(int cpu) sched_update_group_capacities() argument 7637 sched_isolate_cpu(int cpu) sched_isolate_cpu() argument 7715 sched_unisolate_cpu_unlocked(int cpu) sched_unisolate_cpu_unlocked() argument 7756 sched_unisolate_cpu(int cpu) sched_unisolate_cpu() argument 7840 cpuset_cpu_inactive(unsigned int cpu) cpuset_cpu_inactive() argument 7855 sched_cpu_activate(unsigned int cpu) sched_cpu_activate() argument 7894 sched_cpu_deactivate(unsigned int cpu) sched_cpu_deactivate() argument 7930 sched_rq_cpu_starting(unsigned int cpu) sched_rq_cpu_starting() argument 7943 sched_cpu_starting(unsigned int cpu) sched_cpu_starting() argument 7952 sched_cpu_dying(unsigned int cpu) sched_cpu_dying() argument 8393 curr_task(int cpu) curr_task() argument 8416 ia64_set_curr_task(int cpu, struct task_struct *p) ia64_set_curr_task() argument 9536 dump_cpu_task(int cpu) dump_cpu_task() argument [all...] |
H A D | sched.h | 152 extern void init_sched_groups_capacity(int cpu, struct sched_domain *sd); 348 * - store the maximum -deadline bandwidth of each cpu; 389 * Verify the fitness of task @p to run on @cpu taking into account the 393 * @cpu scaled by SCHED_CAPACITY_SCALE >= runtime/deadline ratio of the 396 static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) in dl_task_fits_capacity() argument 398 unsigned long cap = arch_scale_cpu_capacity(cpu); in dl_task_fits_capacity() 413 extern int dl_cpu_busy(int cpu, struct task_struct *p); 545 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, 555 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, 1082 int cpu; member 1455 sched_domains_numa_masks_set(unsigned int cpu) sched_domains_numa_masks_set() argument 1458 sched_domains_numa_masks_clear(unsigned int cpu) sched_domains_numa_masks_clear() argument 1461 sched_numa_find_closest(const struct cpumask *cpus, int cpu) sched_numa_find_closest() argument 1516 highest_flag_domain(int cpu, int flag) highest_flag_domain() argument 1530 lowest_flag_domain(int cpu, int flag) lowest_flag_domain() argument 1621 dirty_sched_domain_sysctl(int cpu) dirty_sched_domain_sysctl() argument 1661 set_task_rq(struct task_struct *p, unsigned int cpu) set_task_rq() argument 1681 set_task_rq(struct task_struct *p, unsigned int cpu) set_task_rq() argument 1691 __set_task_cpu(struct task_struct *p, unsigned int cpu) __set_task_cpu() argument 2071 int cpu = cpu_of(rq); sched_update_tick_dependency() local 2186 arch_scale_freq_capacity(int cpu) arch_scale_freq_capacity() argument 2455 irq_time_read(int cpu) irq_time_read() argument 2620 capacity_of(int cpu) capacity_of() argument 2625 capacity_orig_of(int cpu) capacity_orig_of() argument 2677 schedutil_cpu_util(int cpu, unsigned long util_cfs, unsigned long max, enum schedutil_type type, struct task_struct *p) schedutil_cpu_util() argument 2835 is_reserved(int cpu) is_reserved() argument 2842 mark_reserved(int cpu) mark_reserved() argument 2849 clear_reserved(int cpu) clear_reserved() argument 2856 cpu_capacity(int cpu) cpu_capacity() argument 2861 cpu_max_possible_capacity(int cpu) cpu_max_possible_capacity() argument 2866 cpu_load_scale_factor(int cpu) cpu_load_scale_factor() argument 2939 is_max_capacity_cpu(int cpu) is_max_capacity_cpu() argument 2944 is_min_capacity_cpu(int cpu) is_min_capacity_cpu() argument 2980 power_cost(int cpu, u64 demand) power_cost() argument 2985 cpu_util_freq_walt(int cpu) cpu_util_freq_walt() argument 3015 is_reserved(int cpu) is_reserved() argument 3020 clear_reserved(int cpu) clear_reserved() argument [all...] |
H A D | fair.c | 143 int __weak arch_asym_cpu_priority(int cpu) in arch_asym_cpu_priority() argument 145 return -cpu; in arch_asym_cpu_priority() 344 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() local 361 if (cfs_rq->tg->parent && cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq() 368 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq() 787 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 1579 static inline bool is_core_idle(int cpu) in is_core_idle() argument 1584 for_each_cpu(sibling, cpu_smt_mask(cpu)) in is_core_idle() 1586 if (cpu == sibling) { in is_core_idle() 1638 static inline bool test_idle_cores(int cpu, boo 1639 numa_idle_core(int idle_core, int cpu) numa_idle_core() argument 1656 numa_idle_core(int idle_core, int cpu) numa_idle_core() argument 1670 int cpu, idle_core = -1; update_numa_stats() local 1715 int cpu; task_numa_assign() local 1941 int cpu = env->dst_stats.idle_cpu; task_numa_compare() local 1986 int cpu; task_numa_find_cpu() local 2575 int cpu = cpupid_to_cpu(cpupid); task_numa_group() local 4221 task_fits_max(struct task_struct *p, int cpu) task_fits_max() argument 4237 int cpu = cpu_of(rq); update_misfit_status() local 5409 sync_throttle(struct task_group *tg, int cpu) sync_throttle() argument 5636 sync_throttle(struct task_group *tg, int cpu) sync_throttle() argument 5739 cpu_overutilized(int cpu) cpu_overutilized() argument 5764 sched_idle_cpu(int cpu) sched_idle_cpu() argument 6276 find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag) find_idlest_cpu() argument 6337 set_idle_cores(int cpu, int val) set_idle_cores() argument 6347 test_idle_cores(int cpu, bool def) test_idle_cores() argument 6369 int cpu; fair_update_idle_core() local 6400 int core, cpu; select_idle_core() local 6446 int cpu; select_idle_smt() local 6494 int cpu, nr = INT_MAX; select_idle_cpu() local 6552 int cpu, best_cpu = -1; select_idle_capacity() local 6584 asym_fits_capacity(int task_util, int cpu) asym_fits_capacity() argument 6711 cpu_util(int cpu) cpu_util() argument 6747 cpu_util_without(int cpu, struct task_struct *p) cpu_util_without() argument 6844 capacity_spare_without(int cpu, struct task_struct *p) capacity_spare_without() argument 6853 cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) cpu_util_next() argument 6893 capacity_curr_of(int cpu) capacity_curr_of() argument 6913 int cpu; compute_energy() local 6995 int cpu, best_energy_cpu = prev_cpu; find_energy_efficient_cpu() local 7119 int cpu = smp_processor_id(); select_task_rq_fair() local 8015 int cpu; can_migrate_task() local 8454 int cpu = cpu_of(rq); fair_update_blocked_fair() local 8560 update_blocked_averages(int cpu) update_blocked_averages() argument 8641 scale_rt_capacity(int cpu) scale_rt_capacity() argument 8671 update_cpu_capacity(struct sched_domain *sd, int cpu) update_cpu_capacity() argument 8690 update_group_capacity(struct sched_domain *sd, int cpu) update_group_capacity() argument 8908 unsigned int cpu = rq->cpu; update_nohz_stats() local 9191 task_running_on_cpu(int cpu, struct task_struct *p) task_running_on_cpu() argument 9212 idle_cpu_without(int cpu, struct task_struct *p) idle_cpu_without() argument 10114 int cpu; should_we_balance() local 10635 int cpu = rq->cpu; rebalance_domains() local 10815 int nr_busy, i, cpu = rq->cpu; nohz_balancer_kick() local 10935 set_cpu_sd_state_busy(int cpu) set_cpu_sd_state_busy() argument 10966 set_cpu_sd_state_idle(int cpu) set_cpu_sd_state_idle() argument 10986 nohz_balance_enter_idle(int cpu) nohz_balance_enter_idle() argument 11931 int cpu; unregister_fair_sched_group() local 11955 init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, struct sched_entity *parent) init_tg_cfs_entry() argument 12117 print_cfs_stats(struct seq_file *m, int cpu) print_cfs_stats() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_wifi6/ |
H A D | dhd_linux_lb.c | 134 uint32 napi_cpu = 0; /* cpu selected for napi rx processing */ in dhd_select_cpu_candidacy() 135 uint32 compl_cpu = 0; /* cpu selected for completion jobs */ in dhd_select_cpu_candidacy() 136 uint32 tx_cpu = 0; /* cpu selected for tx processing job */ in dhd_select_cpu_candidacy() 233 int dhd_cpu_startup_callback(unsigned int cpu) in dhd_cpu_startup_callback() argument 237 DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu)); in dhd_cpu_startup_callback() 238 DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]); in dhd_cpu_startup_callback() 239 cpumask_set_cpu(cpu, dhd->cpumask_curr_avail); in dhd_cpu_startup_callback() 245 int dhd_cpu_teardown_callback(unsigned int cpu) in dhd_cpu_teardown_callback() argument 249 DHD_INFO(("%s(): \r\n cpu in dhd_cpu_teardown_callback() 260 unsigned long int cpu = (unsigned long int)hcpu; dhd_cpu_callback() local 666 dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu) dhd_lb_stats_update_histo() argument 690 int cpu; dhd_lb_stats_update_napi_histo() local 702 int cpu; dhd_lb_stats_update_txc_histo() local 714 int cpu; dhd_lb_stats_update_rxc_histo() local 820 int cpu; dhd_tx_compl_dispatcher_fn() local 872 int cpu; dhd_rx_compl_dispatcher_fn() local 902 int cpu; dhd_tx_dispatcher_fn() local [all...] |
/device/soc/rockchip/rk3588/kernel/include/trace/hooks/ |
H A D | cpu.h | 3 #define TRACE_SYSTEM cpu 14 TP_PROTO(unsigned int cpu), 15 TP_ARGS(cpu)); 18 TP_PROTO(unsigned int cpu), 19 TP_ARGS(cpu));
|
H A D | sched.h | 23 TP_PROTO(int cpu, struct task_struct *p, int *new_cpu), 24 TP_ARGS(cpu, p, new_cpu), 1); 74 TP_PROTO(int cpu, u64 clock, ktime_t rt_period, u64 rt_runtime, 76 TP_ARGS(cpu, clock, rt_period, rt_runtime, rt_period_timer_expires)); 121 TP_PROTO(int cpu, int *overutilized), 122 TP_ARGS(cpu, overutilized), 1); 170 TP_PROTO(int cpu), 171 TP_ARGS(cpu), 1); 174 TP_PROTO(int cpu), 175 TP_ARGS(cpu), [all...] |
H A D | wqlockup.h | 15 TP_PROTO(int cpu, unsigned long pool_ts), 16 TP_ARGS(cpu, pool_ts));
|
/device/soc/rockchip/common/sdk_linux/drivers/cpufreq/ |
H A D | cpufreq-dt.c | 10 #include <linux/cpu.h> 46 static struct private_data *cpufreq_dt_find_data(int cpu) in cpufreq_dt_find_data() argument 52 if (cpumask_test_cpu(cpu, priv->cpus)) { in cpufreq_dt_find_data() 80 int cpu = dev->id; in find_supply_name() local 90 if (!cpu) { in find_supply_name() 98 pp = of_find_property(np, "cpu-supply", NULL); in find_supply_name() 100 name = "cpu"; in find_supply_name() 104 dev_dbg(dev, "no regulator for cpu%d\n", cpu); in find_supply_name() 118 priv = cpufreq_dt_find_data(policy->cpu); in cpufreq_init() 200 dt_cpufreq_early_init(struct device *dev, int cpu) dt_cpufreq_early_init() argument 342 int ret, cpu; dt_cpufreq_probe() local [all...] |
H A D | cpufreq_userspace.c | 33 pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); in cpufreq_set() 36 if (!per_cpu(cpu_is_managed, policy->cpu)) { in cpufreq_set() 79 pr_debug("started managing cpu %u\n", policy->cpu); in cpufreq_userspace_policy_start() 82 per_cpu(cpu_is_managed, policy->cpu) = 1; in cpufreq_userspace_policy_start() 92 pr_debug("managing cpu %u stopped\n", policy->cpu); in cpufreq_userspace_policy_stop() 95 per_cpu(cpu_is_managed, policy->cpu) = 0; in cpufreq_userspace_policy_stop() 105 pr_debug("limit event for cpu in cpufreq_userspace_policy_limits() [all...] |
/device/soc/rockchip/common/vendor/drivers/rockchip/ |
H A D | rk_fiq_debugger.c | 20 #include <linux/cpu.h>
464 static void _rk_fiq_dbg_sdei_switch_cpu(unsigned int cpu, int cpu_off)
in _rk_fiq_dbg_sdei_switch_cpu() argument 466 if (cpu == rk_fiq_sdei.cur_cpu) {
in _rk_fiq_dbg_sdei_switch_cpu() 469 rk_fiq_sdei.sw_cpu = cpu;
in _rk_fiq_dbg_sdei_switch_cpu() 472 sip_fiq_debugger_sdei_switch_cpu(rk_fiq_sdei.cur_cpu, cpu, cpu_off);
in _rk_fiq_dbg_sdei_switch_cpu() 475 static void rk_fiq_dbg_sdei_switch_cpu(struct platform_device *pdev, unsigned int cpu)
in rk_fiq_dbg_sdei_switch_cpu() argument 477 _rk_fiq_dbg_sdei_switch_cpu(cpu, 0);
in rk_fiq_dbg_sdei_switch_cpu() 480 static int fiq_dbg_sdei_cpu_off_migrate_fiq(unsigned int cpu)
in fiq_dbg_sdei_cpu_off_migrate_fiq() argument 485 if (rk_fiq_sdei.cur_cpu == cpu) {
in fiq_dbg_sdei_cpu_off_migrate_fiq() 489 while (rk_fiq_sdei.cur_cpu == cpu in fiq_dbg_sdei_cpu_off_migrate_fiq() 526 int ret, cpu, i; fiq_debugger_sdei_enable() local 610 rk_fiq_debugger_switch_cpu(struct platform_device *pdev, unsigned int cpu) rk_fiq_debugger_switch_cpu() argument 620 fiq_debugger_uart_irq_tf(struct pt_regs _pt_regs, u64 cpu) fiq_debugger_uart_irq_tf() argument 662 fiq_debugger_cpu_offine_migrate_fiq(unsigned int cpu) fiq_debugger_cpu_offine_migrate_fiq() argument [all...] |
/device/soc/rockchip/rk3588/kernel/drivers/soc/rockchip/ |
H A D | rk_fiq_debugger.c | 20 #include <linux/cpu.h> 464 static void _rk_fiq_dbg_sdei_switch_cpu(unsigned int cpu, int cpu_off) in _rk_fiq_dbg_sdei_switch_cpu() argument 466 if (cpu == rk_fiq_sdei.cur_cpu) in _rk_fiq_dbg_sdei_switch_cpu() 468 rk_fiq_sdei.sw_cpu = cpu; in _rk_fiq_dbg_sdei_switch_cpu() 471 sip_fiq_debugger_sdei_switch_cpu(rk_fiq_sdei.cur_cpu, cpu, cpu_off); in _rk_fiq_dbg_sdei_switch_cpu() 475 unsigned int cpu) in rk_fiq_dbg_sdei_switch_cpu() 477 _rk_fiq_dbg_sdei_switch_cpu(cpu, 0); in rk_fiq_dbg_sdei_switch_cpu() 480 static int fiq_dbg_sdei_cpu_off_migrate_fiq(unsigned int cpu) in fiq_dbg_sdei_cpu_off_migrate_fiq() argument 485 if (rk_fiq_sdei.cur_cpu == cpu) { in fiq_dbg_sdei_cpu_off_migrate_fiq() 489 while (rk_fiq_sdei.cur_cpu == cpu in fiq_dbg_sdei_cpu_off_migrate_fiq() 474 rk_fiq_dbg_sdei_switch_cpu(struct platform_device *pdev, unsigned int cpu) rk_fiq_dbg_sdei_switch_cpu() argument 526 int ret, cpu, i; fiq_debugger_sdei_enable() local 627 rk_fiq_debugger_switch_cpu(struct platform_device *pdev, unsigned int cpu) rk_fiq_debugger_switch_cpu() argument 638 fiq_debugger_uart_irq_tf(struct pt_regs _pt_regs, u64 cpu) fiq_debugger_uart_irq_tf() argument 682 fiq_debugger_cpu_offine_migrate_fiq(unsigned int cpu) fiq_debugger_cpu_offine_migrate_fiq() argument [all...] |
/device/soc/rockchip/common/sdk_linux/kernel/power/ |
H A D | energy_model.c | 12 #include <linux/cpu.h> 189 int cpu, ret; in em_create_pd() local 212 for_each_cpu(cpu, cpus) in em_create_pd() 214 cpu_dev = get_cpu_device(cpu); in em_create_pd() 243 * @cpu : CPU to find the performance domain for 245 * Returns the performance domain to which @cpu belongs, or NULL if it doesn't 248 struct em_perf_domain *em_cpu_get(int cpu) in em_cpu_get() argument 252 cpu_dev = get_cpu_device(cpu); in em_cpu_get() 288 int cpu, ret; in em_dev_register_perf_domain() local 312 for_each_cpu(cpu, cpu in em_dev_register_perf_domain() [all...] |
/device/soc/rockchip/common/vendor/drivers/cpufreq/ |
H A D | rockchip-cpufreq.c | 17 #include <linux/cpu.h> 32 #include <linux/rockchip/cpu.h> 300 static struct cluster_info *rockchip_cluster_info_lookup(int cpu) in rockchip_cluster_info_lookup() argument 306 if (cpumask_test_cpu(cpu, &cluster->cpus)) { in rockchip_cluster_info_lookup() 408 static int rockchip_cpufreq_cluster_init(int cpu, struct cluster_info *cluster) in rockchip_cpufreq_cluster_init() argument 416 const char *const reg_names[] = {"cpu", "mem"}; in rockchip_cpufreq_cluster_init() 423 dev = get_cpu_device(cpu); in rockchip_cpufreq_cluster_init() 428 if (of_find_property(dev->of_node, "cpu-supply", NULL)) { in rockchip_cpufreq_cluster_init() 429 reg_name = "cpu"; in rockchip_cpufreq_cluster_init() 467 if (of_find_property(dev->of_node, "cpu in rockchip_cpufreq_cluster_init() 599 int cpu, ret; rockchip_cpufreq_driver_init() local [all...] |
/device/soc/rockchip/common/vendor/drivers/firmware/ |
H A D | rockchip_sip.c | 270 * don't change SIP_UARTDBG_FN to SIP_UARTDBG_CFG64 even when cpu is AArch32 285 static void (*sip_fiq_debugger_uart_irq_tf)(struct pt_regs _pt_regs, unsigned long cpu); 302 /* copy cpu context: x0 ~ spsr_el3 */ in sip_fiq_debugger_get_pt_regs() 370 static void sip_fiq_debugger_uart_irq_tf_cb(unsigned long sp_el1, unsigned long offset, unsigned long cpu) in sip_fiq_debugger_uart_irq_tf_cb() argument 379 sip_fiq_debugger_uart_irq_tf(fiq_pt_regs, cpu); in sip_fiq_debugger_uart_irq_tf_cb() 416 static ulong cpu_logical_map_mpidr(u32 cpu) in cpu_logical_map_mpidr() argument 422 if (cpu < 0x4) { in cpu_logical_map_mpidr() 424 mpidr = cpu; in cpu_logical_map_mpidr() 425 } else if (cpu < 0x8) { in cpu_logical_map_mpidr() 427 mpidr = 0x100 | (cpu in cpu_logical_map_mpidr() 438 sip_fiq_debugger_switch_cpu(u32 cpu) sip_fiq_debugger_switch_cpu() argument [all...] |
/device/soc/rockchip/rk3588/kernel/drivers/firmware/ |
H A D | rockchip_sip.c | 271 * don't change SIP_UARTDBG_FN to SIP_UARTDBG_CFG64 even when cpu is AArch32 287 unsigned long cpu); 305 /* copy cpu context: x0 ~ spsr_el3 */ in sip_fiq_debugger_get_pt_regs() 375 unsigned long cpu) in sip_fiq_debugger_uart_irq_tf_cb() 384 sip_fiq_debugger_uart_irq_tf(fiq_pt_regs, cpu); in sip_fiq_debugger_uart_irq_tf_cb() 424 static ulong cpu_logical_map_mpidr(u32 cpu) in cpu_logical_map_mpidr() argument 430 if (cpu < 4) in cpu_logical_map_mpidr() 432 mpidr = cpu; in cpu_logical_map_mpidr() 433 else if (cpu < 8) in cpu_logical_map_mpidr() 435 mpidr = 0x100 | (cpu in cpu_logical_map_mpidr() 373 sip_fiq_debugger_uart_irq_tf_cb(unsigned long sp_el1, unsigned long offset, unsigned long cpu) sip_fiq_debugger_uart_irq_tf_cb() argument 445 sip_fiq_debugger_switch_cpu(u32 cpu) sip_fiq_debugger_switch_cpu() argument [all...] |
/device/soc/rockchip/common/sdk_linux/include/linux/ |
H A D | energy_model.h | 100 struct em_perf_domain *em_cpu_get(int cpu); 124 int i, cpu; in em_cpu_energy() local 135 cpu = cpumask_first(to_cpumask(pd->cpus)); in em_cpu_energy() 136 scale_cpu = arch_scale_cpu_capacity(cpu); in em_cpu_energy() 223 static inline struct em_perf_domain *em_cpu_get(int cpu) in em_cpu_get() argument
|
/device/soc/rockchip/common/sdk_linux/arch/arm64/kernel/ |
H A D | cpuinfo.c | 9 #include <asm/cpu.h> 266 static int cpuid_cpu_online(unsigned int cpu) in cpuid_cpu_online() argument 270 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); in cpuid_cpu_online() 272 dev = get_cpu_device(cpu); in cpuid_cpu_online() 289 static int cpuid_cpu_offline(unsigned int cpu) in cpuid_cpu_offline() argument 292 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); in cpuid_cpu_offline() 294 dev = get_cpu_device(cpu); in cpuid_cpu_offline() 308 int cpu, ret; in cpuinfo_regs_init() local 310 for_each_possible_cpu(cpu) in cpuinfo_regs_init() 312 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); in cpuinfo_regs_init() 328 unsigned int cpu = smp_processor_id(); cpuinfo_detect_icache_policy() local [all...] |
/device/soc/rockchip/rk3588/kernel/drivers/cpufreq/ |
H A D | rockchip-cpufreq.c | 17 #include <linux/cpu.h> 32 #include <linux/rockchip/cpu.h> 314 static struct cluster_info *rockchip_cluster_info_lookup(int cpu) in rockchip_cluster_info_lookup() argument 319 if (cpumask_test_cpu(cpu, &cluster->cpus)) in rockchip_cluster_info_lookup() 449 static int rockchip_cpufreq_cluster_init(int cpu, struct cluster_info *cluster) in rockchip_cpufreq_cluster_init() argument 457 const char * const reg_names[] = {"cpu", "mem"}; in rockchip_cpufreq_cluster_init() 464 dev = get_cpu_device(cpu); in rockchip_cpufreq_cluster_init() 468 if (of_find_property(dev->of_node, "cpu-supply", NULL)) in rockchip_cpufreq_cluster_init() 469 reg_name = "cpu"; in rockchip_cpufreq_cluster_init() 511 if (of_find_property(dev->of_node, "cpu in rockchip_cpufreq_cluster_init() 639 int cpu, ret; rockchip_cpufreq_driver_init() local [all...] |
/device/soc/hisilicon/common/platform/wifi/hi3881v100/driver/oal/ |
H A D | oal_timer.h | 140 功能描述 : 指定cpu,重启定时器,调用时timer要处于非激活状态否者会死机 146 static inline hi_void oal_timer_start_on(oal_timer_list_stru *pst_timer, unsigned long ui_delay, hi_s32 cpu) in oal_timer_start_on() argument 149 add_timer_on(pst_timer, cpu); in oal_timer_start_on() 221 功能描述 : 指定cpu,重启定时器,调用时timer要处于非激活状态否者会死机 227 static inline hi_void oal_timer_start_on(oal_timer_list_stru *pst_timer, hi_u64 ui_delay, hi_s32 cpu) in oal_timer_start_on() argument 229 hi_unref_param(cpu); in oal_timer_start_on()
|
/device/soc/rockchip/common/sdk_linux/drivers/irqchip/ |
H A D | irq-gic-v3-its.c | 11 #include <linux/cpu.h>
195 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
275 int cpu;
in irq_to_cpuid_lock() local 278 cpu = vpe_to_cpuid_lock(map->vpe, flags);
in irq_to_cpuid_lock() 282 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
in irq_to_cpuid_lock() 287 return cpu;
in irq_to_cpuid_lock() 1423 int cpu;
in direct_lpi_inv() local 1438 cpu = irq_to_cpuid_lock(d, &flags);
in direct_lpi_inv() 1439 raw_spin_lock(&gic_data_rdist_cpu(cpu) in direct_lpi_inv() 1510 its_read_lpi_count(struct irq_data *d, int cpu) its_read_lpi_count() argument 1519 its_inc_lpi_count(struct irq_data *d, int cpu) its_inc_lpi_count() argument 1528 its_dec_lpi_count(struct irq_data *d, int cpu) its_dec_lpi_count() argument 1539 unsigned int cpu = nr_cpu_ids, tmp; cpumask_pick_least_loaded() local 1562 int cpu, node; its_select_cpu() local 1636 int cpu, prev_cpu; its_set_affinity() local 2702 int cpu; inherit_vpe_l1_table_from_rd() local 2738 allocate_vpe_l2_table(int cpu, u32 id) allocate_vpe_l2_table() argument 2997 int err, cpu; allocate_lpi_tables() local 3195 int cpu = smp_processor_id(); its_cpu_init_collection() local 3341 int cpu; its_alloc_vpe_table() local 3608 int cpu; its_irq_domain_activate() local 3800 int from, cpu = cpumask_first(mask_val); its_vpe_set_affinity() local 4137 int cpu; its_vpe_4_1_invall() local 4264 int cpu; its_sgi_get_irqchip_state() local [all...] |