Home
last modified time | relevance | path

Searched refs:sched (Results 1 - 25 of 6815) sorted by relevance

12345678910>>...273

/kernel/linux/linux-5.10/drivers/gpu/drm/scheduler/
H A Dsched_main.c49 #include <linux/sched.h>
51 #include <uapi/linux/sched/types.h>
72 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
78 rq->sched = sched; in drm_sched_rq_init()
95 atomic_inc(&rq->sched->score); in drm_sched_rq_add_entity()
114 atomic_dec(&rq->sched->score); in drm_sched_rq_remove_entity()
176 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
184 if (s_fence && s_fence->sched in drm_sched_dependency_optimized()
198 drm_sched_start_timeout(struct drm_gpu_scheduler *sched) drm_sched_start_timeout() argument
212 drm_sched_fault(struct drm_gpu_scheduler *sched) drm_sched_fault() argument
230 drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) drm_sched_suspend_timeout() argument
256 drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, unsigned long remaining) drm_sched_resume_timeout() argument
272 struct drm_gpu_scheduler *sched = s_job->sched; drm_sched_job_begin() local
282 struct drm_gpu_scheduler *sched; drm_sched_job_timedout() local
334 struct drm_gpu_scheduler *sched = bad->sched; drm_sched_increase_karma() local
377 drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) drm_sched_stop() argument
455 drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) drm_sched_start() argument
501 drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) drm_sched_resubmit_jobs() argument
552 struct drm_gpu_scheduler *sched; drm_sched_job_init() local
593 drm_sched_ready(struct drm_gpu_scheduler *sched) drm_sched_ready() argument
605 drm_sched_wakeup(struct drm_gpu_scheduler *sched) drm_sched_wakeup() argument
619 drm_sched_select_entity(struct drm_gpu_scheduler *sched) drm_sched_select_entity() argument
649 struct drm_gpu_scheduler *sched = s_fence->sched; drm_sched_process_job() local
671 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) drm_sched_get_cleanup_job() argument
715 struct drm_gpu_scheduler *sched, *picked_sched = NULL; drm_sched_pick_best() local
746 drm_sched_blocked(struct drm_gpu_scheduler *sched) drm_sched_blocked() argument
765 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; drm_sched_main() local
842 drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, unsigned hw_submission, unsigned hang_limit, long timeout, const char *name) drm_sched_init() argument
888 drm_sched_fini(struct drm_gpu_scheduler *sched) drm_sched_fini() argument
[all...]
H A Dsched_entity.c44 * @num_sched_list: number of drm sched in sched_list
87 * drm_sched_entity_modify_sched - Modify sched of an entity
91 * @num_sched_list: number of drm sched in sched_list
155 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local
162 sched = entity->rq->sched; in drm_sched_entity_flush()
170 sched->job_scheduled, in drm_sched_entity_flush()
174 wait_event_killable(sched->job_scheduled, in drm_sched_entity_flush()
208 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_cb()
229 while ((f = job->sched in drm_sched_entity_kill_jobs()
266 struct drm_gpu_scheduler *sched = NULL; drm_sched_entity_fini() local
368 struct drm_gpu_scheduler *sched = entity->rq->sched; drm_sched_entity_add_dependency_cb() local
419 struct drm_gpu_scheduler *sched = entity->rq->sched; drm_sched_entity_pop_job() local
456 struct drm_gpu_scheduler *sched; drm_sched_entity_select_rq() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/scheduler/
H A Dsched_main.c53 #include <linux/sched.h>
56 #include <uapi/linux/sched/types.h>
122 * @sched: scheduler instance to associate with this run queue
127 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
134 rq->sched = sched; in drm_sched_rq_init()
153 atomic_inc(rq->sched->score); in drm_sched_rq_add_entity()
175 atomic_dec(rq->sched->score); in drm_sched_rq_remove_entity()
268 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local
300 drm_sched_start_timeout(struct drm_gpu_scheduler *sched) drm_sched_start_timeout() argument
314 drm_sched_fault(struct drm_gpu_scheduler *sched) drm_sched_fault() argument
333 drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) drm_sched_suspend_timeout() argument
359 drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, unsigned long remaining) drm_sched_resume_timeout() argument
375 struct drm_gpu_scheduler *sched = s_job->sched; drm_sched_job_begin() local
385 struct drm_gpu_scheduler *sched; drm_sched_job_timedout() local
438 drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) drm_sched_stop() argument
519 drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) drm_sched_start() argument
576 drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) drm_sched_resubmit_jobs() argument
666 struct drm_gpu_scheduler *sched; drm_sched_job_arm() local
856 drm_sched_can_queue(struct drm_gpu_scheduler *sched) drm_sched_can_queue() argument
868 drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) drm_sched_wakeup_if_can_queue() argument
882 drm_sched_select_entity(struct drm_gpu_scheduler *sched) drm_sched_select_entity() argument
911 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) drm_sched_get_cleanup_job() argument
957 struct drm_gpu_scheduler *sched, *picked_sched = NULL; drm_sched_pick_best() local
988 drm_sched_blocked(struct drm_gpu_scheduler *sched) drm_sched_blocked() argument
1007 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; drm_sched_main() local
1085 drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, unsigned hw_submission, unsigned hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev) drm_sched_init() argument
1133 drm_sched_fini(struct drm_gpu_scheduler *sched) drm_sched_fini() argument
1180 struct drm_gpu_scheduler *sched = bad->sched; drm_sched_increase_karma() local
[all...]
H A Dsched_entity.c44 * @num_sched_list: number of drm sched in sched_list
97 * drm_sched_entity_modify_sched - Modify sched of an entity
101 * @num_sched_list: number of drm sched in sched_list
170 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_work()
261 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local
268 sched = entity->rq->sched; in drm_sched_entity_flush()
276 sched->job_scheduled, in drm_sched_entity_flush()
280 wait_event_killable(sched->job_scheduled, in drm_sched_entity_flush()
361 drm_sched_wakeup_if_can_queue(entity->rq->sched); in drm_sched_entity_wakeup()
387 struct drm_gpu_scheduler *sched = entity->rq->sched; drm_sched_entity_add_dependency_cb() local
509 struct drm_gpu_scheduler *sched; drm_sched_entity_select_rq() local
[all...]
/kernel/linux/linux-6.6/net/mptcp/
H A Dsched.c42 struct mptcp_sched_ops *sched, *ret = NULL; in mptcp_sched_find() local
44 list_for_each_entry_rcu(sched, &mptcp_sched_list, list) { in mptcp_sched_find()
45 if (!strcmp(sched->name, name)) { in mptcp_sched_find()
46 ret = sched; in mptcp_sched_find()
54 int mptcp_register_scheduler(struct mptcp_sched_ops *sched) in mptcp_register_scheduler() argument
56 if (!sched->get_subflow) in mptcp_register_scheduler()
60 if (mptcp_sched_find(sched->name)) { in mptcp_register_scheduler()
64 list_add_tail_rcu(&sched->list, &mptcp_sched_list); in mptcp_register_scheduler()
67 pr_debug("%s registered", sched->name); in mptcp_register_scheduler()
71 void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched) in mptcp_unregister_scheduler() argument
86 mptcp_init_sched(struct mptcp_sock *msk, struct mptcp_sched_ops *sched) mptcp_init_sched() argument
106 struct mptcp_sched_ops *sched = msk->sched; mptcp_release_sched() local
[all...]
/kernel/linux/linux-6.6/tools/perf/
H A Dbuiltin-sched.c97 /* task state bitmask, copied from include/linux/sched.h */
147 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
150 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
153 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
157 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
160 int (*migrate_task_event)(struct perf_sched *sched,
254 u64 last_time; /* time of previous sched in/out event */
259 u64 dt_delay; /* time between wakeup and sched-in */
306 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument
312 } while (T1 + sched in burn_nsecs()
325 calibrate_run_measurement_overhead(struct perf_sched *sched) calibrate_run_measurement_overhead() argument
342 calibrate_sleep_measurement_overhead(struct perf_sched *sched) calibrate_sleep_measurement_overhead() argument
388 add_sched_event_run(struct perf_sched *sched, struct task_desc *task, u64 timestamp, u64 duration) add_sched_event_run() argument
411 add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, u64 timestamp, struct task_desc *wakee) add_sched_event_wakeup() argument
438 add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, u64 timestamp, u64 task_state __maybe_unused) add_sched_event_sleep() argument
448 register_pid(struct perf_sched *sched, unsigned long pid, const char *comm) register_pid() argument
494 print_task_traces(struct perf_sched *sched) print_task_traces() argument
506 add_cross_task_wakeups(struct perf_sched *sched) add_cross_task_wakeups() argument
521 perf_sched__process_event(struct perf_sched *sched, struct sched_atom *atom) perf_sched__process_event() argument
562 self_open_counters(struct perf_sched *sched, unsigned long cur_task) self_open_counters() argument
617 struct perf_sched *sched; global() member
625 struct perf_sched *sched = parms->sched; thread_func() local
774 wait_for_tasks(sched); global() variable
815 test_calibrations(struct perf_sched *sched) test_calibrations() argument
833 replay_wakeup_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine __maybe_unused) replay_wakeup_event() argument
854 replay_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine __maybe_unused) replay_switch_event() argument
900 replay_fork_event(struct perf_sched *sched, union perf_event *event, struct machine *machine) replay_fork_event() argument
1039 thread_atoms_insert(struct perf_sched *sched, struct thread *thread) thread_atoms_insert() argument
1128 latency_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_switch_event() argument
1198 latency_runtime_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_runtime_event() argument
1233 latency_wakeup_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_wakeup_event() argument
1294 latency_migrate_task_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_migrate_task_event() argument
1344 output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) output_lat_thread() argument
1492 perf_sched__sort_lat(struct perf_sched *sched) perf_sched__sort_lat() argument
1518 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_wakeup_event() local
1549 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) map__findnew_thread() argument
1566 map_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) map_switch_event() argument
1716 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_switch_event() local
1742 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_runtime_event() local
1755 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); perf_sched__process_fork_event() local
1772 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_migrate_task_event() local
1832 perf_sched__read_events(struct perf_sched *sched) perf_sched__read_events() argument
1977 timehist_header(struct perf_sched *sched) timehist_header() argument
2048 timehist_print_sample(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct addr_location *al, struct thread *thread, u64 t, int state) timehist_print_sample() argument
2194 save_task_callchain(struct perf_sched *sched, struct perf_sample *sample, struct evsel *evsel, struct machine *machine) save_task_callchain() argument
2339 save_idle_callchain(struct perf_sched *sched, struct idle_thread_runtime *itr, struct perf_sample *sample) save_idle_callchain() argument
2355 timehist_get_thread(struct perf_sched *sched, struct perf_sample *sample, struct machine *machine, struct evsel *evsel) timehist_get_thread() argument
2402 timehist_skip_sample(struct perf_sched *sched, struct thread *thread, struct evsel *evsel, struct perf_sample *sample) timehist_skip_sample() argument
2425 timehist_print_wakeup_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine, struct thread *awakened) timehist_print_wakeup_event() argument
2474 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); timehist_sched_wakeup_event() local
2499 timehist_print_migration_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine, struct thread *migrated) timehist_print_migration_event() argument
2558 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); timehist_migrate_task_event() local
2586 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); timehist_sched_change_event() local
2774 struct perf_sched *sched; global() member
2857 timehist_print_summary(struct perf_sched *sched, struct perf_session *session) timehist_print_summary() argument
2972 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); perf_timehist__process_sample() local
2990 timehist_check_attr(struct perf_sched *sched, struct evlist *evlist) timehist_check_attr() argument
3013 perf_sched__timehist(struct perf_sched *sched) perf_sched__timehist() argument
3129 print_bad_events(struct perf_sched *sched) print_bad_events() argument
3192 perf_sched__merge_lat(struct perf_sched *sched) perf_sched__merge_lat() argument
3207 perf_sched__lat(struct perf_sched *sched) perf_sched__lat() argument
3246 setup_map_cpus(struct perf_sched *sched) setup_map_cpus() argument
3271 setup_color_pids(struct perf_sched *sched) setup_color_pids() argument
3288 setup_color_cpus(struct perf_sched *sched) setup_color_cpus() argument
3305 perf_sched__map(struct perf_sched *sched) perf_sched__map() argument
3323 perf_sched__replay(struct perf_sched *sched) perf_sched__replay() argument
3361 setup_sorting(struct perf_sched *sched, const struct option *options, const char * const usage_msg[]) setup_sorting() argument
3470 struct perf_sched sched = { cmd_sched() local
[all...]
/kernel/linux/linux-5.10/tools/perf/
H A Dbuiltin-sched.c96 /* task state bitmask, copied from include/linux/sched.h */
146 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
149 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
152 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
156 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
159 int (*migrate_task_event)(struct perf_sched *sched,
252 u64 last_time; /* time of previous sched in/out event */
257 u64 dt_delay; /* time between wakeup and sched-in */
304 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument
310 } while (T1 + sched in burn_nsecs()
323 calibrate_run_measurement_overhead(struct perf_sched *sched) calibrate_run_measurement_overhead() argument
340 calibrate_sleep_measurement_overhead(struct perf_sched *sched) calibrate_sleep_measurement_overhead() argument
386 add_sched_event_run(struct perf_sched *sched, struct task_desc *task, u64 timestamp, u64 duration) add_sched_event_run() argument
409 add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, u64 timestamp, struct task_desc *wakee) add_sched_event_wakeup() argument
436 add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, u64 timestamp, u64 task_state __maybe_unused) add_sched_event_sleep() argument
446 register_pid(struct perf_sched *sched, unsigned long pid, const char *comm) register_pid() argument
492 print_task_traces(struct perf_sched *sched) print_task_traces() argument
504 add_cross_task_wakeups(struct perf_sched *sched) add_cross_task_wakeups() argument
519 perf_sched__process_event(struct perf_sched *sched, struct sched_atom *atom) perf_sched__process_event() argument
560 self_open_counters(struct perf_sched *sched, unsigned long cur_task) self_open_counters() argument
615 struct perf_sched *sched; global() member
623 struct perf_sched *sched = parms->sched; thread_func() local
663 create_tasks(struct perf_sched *sched) create_tasks() argument
694 wait_for_tasks(struct perf_sched *sched) wait_for_tasks() argument
747 run_one_test(struct perf_sched *sched) run_one_test() argument
793 test_calibrations(struct perf_sched *sched) test_calibrations() argument
811 replay_wakeup_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine __maybe_unused) replay_wakeup_event() argument
832 replay_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine __maybe_unused) replay_switch_event() argument
878 replay_fork_event(struct perf_sched *sched, union perf_event *event, struct machine *machine) replay_fork_event() argument
1017 thread_atoms_insert(struct perf_sched *sched, struct thread *thread) thread_atoms_insert() argument
1106 latency_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_switch_event() argument
1176 latency_runtime_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_runtime_event() argument
1211 latency_wakeup_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_wakeup_event() argument
1272 latency_migrate_task_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) latency_migrate_task_event() argument
1322 output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) output_lat_thread() argument
1463 perf_sched__sort_lat(struct perf_sched *sched) perf_sched__sort_lat() argument
1489 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_wakeup_event() local
1512 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) map__findnew_thread() argument
1529 map_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) map_switch_event() argument
1674 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_switch_event() local
1700 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_runtime_event() local
1713 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); perf_sched__process_fork_event() local
1730 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); process_sched_migrate_task_event() local
1790 perf_sched__read_events(struct perf_sched *sched) perf_sched__read_events() argument
1930 timehist_header(struct perf_sched *sched) timehist_header() argument
2001 timehist_print_sample(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct addr_location *al, struct thread *thread, u64 t, int state) timehist_print_sample() argument
2147 save_task_callchain(struct perf_sched *sched, struct perf_sample *sample, struct evsel *evsel, struct machine *machine) save_task_callchain() argument
2290 save_idle_callchain(struct perf_sched *sched, struct idle_thread_runtime *itr, struct perf_sample *sample) save_idle_callchain() argument
2300 timehist_get_thread(struct perf_sched *sched, struct perf_sample *sample, struct machine *machine, struct evsel *evsel) timehist_get_thread() argument
2347 timehist_skip_sample(struct perf_sched *sched, struct thread *thread, struct evsel *evsel, struct perf_sample *sample) timehist_skip_sample() argument
2370 timehist_print_wakeup_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine, struct thread *awakened) timehist_print_wakeup_event() argument
2419 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); timehist_sched_wakeup_event() local
2444 timehist_print_migration_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine, struct thread *migrated) timehist_print_migration_event() argument
2503 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); timehist_migrate_task_event() local
2531 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); timehist_sched_change_event() local
2717 struct perf_sched *sched; global() member
2816 timehist_print_summary(struct perf_sched *sched, struct perf_session *session) timehist_print_summary() argument
2936 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); perf_timehist__process_sample() local
2952 timehist_check_attr(struct perf_sched *sched, struct evlist *evlist) timehist_check_attr() argument
2975 perf_sched__timehist(struct perf_sched *sched) perf_sched__timehist() argument
3093 print_bad_events(struct perf_sched *sched) print_bad_events() argument
3156 perf_sched__merge_lat(struct perf_sched *sched) perf_sched__merge_lat() argument
3171 perf_sched__lat(struct perf_sched *sched) perf_sched__lat() argument
3210 setup_map_cpus(struct perf_sched *sched) setup_map_cpus() argument
3235 setup_color_pids(struct perf_sched *sched) setup_color_pids() argument
3252 setup_color_cpus(struct perf_sched *sched) setup_color_cpus() argument
3269 perf_sched__map(struct perf_sched *sched) perf_sched__map() argument
3287 perf_sched__replay(struct perf_sched *sched) perf_sched__replay() argument
3322 setup_sorting(struct perf_sched *sched, const struct option *options, const char * const usage_msg[]) setup_sorting() argument
3417 struct perf_sched sched = { cmd_sched() local
[all...]
/kernel/liteos_a/kernel/base/sched/
H A Dlos_deadline.c81 SchedEDF *sched = (SchedEDF *)&taskCB->sp; in EDFTimeSliceUpdate() local
102 if ((sched->finishTime > currTime) && (taskCB->timeSlice > 0)) { in EDFTimeSliceUpdate()
107 if (sched->finishTime <= currTime) { in EDFTimeSliceUpdate()
109 EDFDebugRecord((UINTPTR *)taskCB, sched->finishTime); in EDFTimeSliceUpdate()
114 (INT32)OS_SYS_CYCLE_TO_US((UINT64)sched->runTime), OS_SYS_CYCLE_TO_US(sched->period)); in EDFTimeSliceUpdate()
120 SchedEDF *sched = (SchedEDF *)&taskCB->sp; in EDFTimeSliceGet() local
122 return (endTime > sched->finishTime) ? sched->finishTime : endTime; in EDFTimeSliceGet()
151 SchedEDF *sched in EDFEnqueue() local
247 const SchedEDF *sched = (const SchedEDF *)&taskCB->sp; EDFWaitTimeGet() local
256 SchedEDF *sched = (SchedEDF *)&runTask->sp; EDFWait() local
300 SchedEDF *sched = (SchedEDF *)&taskCB->sp; EDFSchedParamModify() local
328 SchedEDF *sched = (SchedEDF *)&taskCB->sp; EDFSchedParamGet() local
372 SchedEDF *sched = (SchedEDF *)&taskCB->sp; EDFTaskSchedParamInit() local
[all...]
H A Dlos_priority.c90 SchedHPF *sched = (SchedHPF *)&taskCB->sp; in HPFTimeSliceUpdate() local
97 if (sched->policy == LOS_SCHED_RR) { in HPFTimeSliceUpdate()
119 SchedHPF *sched = (SchedHPF *)&taskCB->sp; in HPFTimeSliceGet() local
122 timeSlice = (timeSlice <= OS_TIME_SLICE_MIN) ? sched->initTimeSlice : timeSlice; in HPFTimeSliceGet()
210 SchedHPF *sched = (SchedHPF *)&taskCB->sp; in PriQueInsert() local
212 switch (sched->policy) { in PriQueInsert()
215 PriQueHeadInsert(rq, sched->basePrio, &taskCB->pendList, sched->priority); in PriQueInsert()
217 sched->initTimeSlice = TimeSliceCalculate(rq, sched in PriQueInsert()
259 SchedHPF *sched = (SchedHPF *)&taskCB->sp; HPFDequeue() local
358 SchedHPF *sched = (SchedHPF *)&taskCB->sp; BasePriorityModify() local
378 SchedHPF *sched = (SchedHPF *)&taskCB->sp; HPFSchedParamModify() local
412 SchedHPF *sched = (SchedHPF *)&taskCB->sp; HPFSchedParamGet() local
521 SchedHPF *sched = (SchedHPF *)&taskCB->sp; HPFTaskSchedParamInit() local
[all...]
/kernel/linux/linux-5.10/drivers/slimbus/
H A Dsched.c29 struct slim_sched *sched = &ctrl->sched; in slim_ctrl_clk_pause() local
38 mutex_lock(&sched->m_reconf); in slim_ctrl_clk_pause()
40 if (sched->clk_state == SLIM_CLK_ACTIVE) { in slim_ctrl_clk_pause()
41 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
49 ret = wait_for_completion_timeout(&sched->pause_comp, in slim_ctrl_clk_pause()
52 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
63 if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup) in slim_ctrl_clk_pause()
66 sched->clk_state = SLIM_CLK_ACTIVE; in slim_ctrl_clk_pause()
67 mutex_unlock(&sched in slim_ctrl_clk_pause()
[all...]
/kernel/linux/linux-6.6/drivers/slimbus/
H A Dsched.c29 struct slim_sched *sched = &ctrl->sched; in slim_ctrl_clk_pause() local
38 mutex_lock(&sched->m_reconf); in slim_ctrl_clk_pause()
40 if (sched->clk_state == SLIM_CLK_ACTIVE) { in slim_ctrl_clk_pause()
41 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
49 ret = wait_for_completion_timeout(&sched->pause_comp, in slim_ctrl_clk_pause()
52 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
63 if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup) in slim_ctrl_clk_pause()
66 sched->clk_state = SLIM_CLK_ACTIVE; in slim_ctrl_clk_pause()
67 mutex_unlock(&sched in slim_ctrl_clk_pause()
[all...]
/kernel/linux/linux-5.10/net/netfilter/ipvs/
H A Dip_vs_sched.c61 struct ip_vs_scheduler *sched) in ip_vs_unbind_scheduler()
66 /* This check proves that old 'sched' was installed */ in ip_vs_unbind_scheduler()
70 if (sched->done_service) in ip_vs_unbind_scheduler()
71 sched->done_service(svc); in ip_vs_unbind_scheduler()
81 struct ip_vs_scheduler *sched; in ip_vs_sched_getbyname() local
87 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { in ip_vs_sched_getbyname()
91 if (sched->module && !try_module_get(sched->module)) { in ip_vs_sched_getbyname()
97 if (strcmp(sched_name, sched->name)==0) { in ip_vs_sched_getbyname()
100 return sched; in ip_vs_sched_getbyname()
60 ip_vs_unbind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *sched) ip_vs_unbind_scheduler() argument
115 struct ip_vs_scheduler *sched; ip_vs_scheduler_get() local
145 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); ip_vs_scheduler_err() local
169 struct ip_vs_scheduler *sched; register_ip_vs_scheduler() local
[all...]
/kernel/linux/linux-6.6/net/netfilter/ipvs/
H A Dip_vs_sched.c61 struct ip_vs_scheduler *sched) in ip_vs_unbind_scheduler()
66 /* This check proves that old 'sched' was installed */ in ip_vs_unbind_scheduler()
70 if (sched->done_service) in ip_vs_unbind_scheduler()
71 sched->done_service(svc); in ip_vs_unbind_scheduler()
81 struct ip_vs_scheduler *sched; in ip_vs_sched_getbyname() local
87 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { in ip_vs_sched_getbyname()
91 if (sched->module && !try_module_get(sched->module)) { in ip_vs_sched_getbyname()
97 if (strcmp(sched_name, sched->name)==0) { in ip_vs_sched_getbyname()
100 return sched; in ip_vs_sched_getbyname()
60 ip_vs_unbind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *sched) ip_vs_unbind_scheduler() argument
115 struct ip_vs_scheduler *sched; ip_vs_scheduler_get() local
145 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); ip_vs_scheduler_err() local
169 struct ip_vs_scheduler *sched; register_ip_vs_scheduler() local
[all...]
/kernel/linux/linux-6.6/net/sctp/
H A Dstream_sched.c116 void sctp_sched_ops_register(enum sctp_sched_type sched, in sctp_sched_ops_register() argument
119 sctp_sched_ops[sched] = sched_ops; in sctp_sched_ops_register()
133 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); in sctp_sched_free_sched() local
137 sched->unsched_all(stream); in sctp_sched_free_sched()
142 sched->free_sid(stream, i); in sctp_sched_free_sched()
149 enum sctp_sched_type sched) in sctp_sched_set_sched()
151 struct sctp_sched_ops *old = asoc->outqueue.sched; in sctp_sched_set_sched()
157 if (sched > SCTP_SS_MAX) in sctp_sched_set_sched()
160 n = sctp_sched_ops[sched]; in sctp_sched_set_sched()
167 asoc->outqueue.sched in sctp_sched_set_sched()
148 sctp_sched_set_sched(struct sctp_association *asoc, enum sctp_sched_type sched) sctp_sched_set_sched() argument
266 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); sctp_sched_init_sid() local
[all...]
/kernel/linux/linux-5.10/net/sctp/
H A Dstream_sched.c121 void sctp_sched_ops_register(enum sctp_sched_type sched, in sctp_sched_ops_register() argument
124 sctp_sched_ops[sched] = sched_ops; in sctp_sched_ops_register()
135 enum sctp_sched_type sched) in sctp_sched_set_sched()
137 struct sctp_sched_ops *n = sctp_sched_ops[sched]; in sctp_sched_set_sched()
138 struct sctp_sched_ops *old = asoc->outqueue.sched; in sctp_sched_set_sched()
146 if (sched > SCTP_SS_MAX) in sctp_sched_set_sched()
165 asoc->outqueue.sched = n; in sctp_sched_set_sched()
188 asoc->outqueue.sched = &sctp_sched_fcfs; /* Always safe */ in sctp_sched_set_sched()
198 if (asoc->outqueue.sched == sctp_sched_ops[i]) in sctp_sched_get_sched()
218 return asoc->outqueue.sched in sctp_sched_set_value()
134 sctp_sched_set_sched(struct sctp_association *asoc, enum sctp_sched_type sched) sctp_sched_set_sched() argument
264 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); sctp_sched_init_sid() local
[all...]
/kernel/linux/linux-5.10/include/drm/
H A Dgpu_scheduler.h110 * @sched: the scheduler to which this rq belongs to.
120 struct drm_gpu_scheduler *sched; member
154 * @sched: the scheduler instance to which the job having this struct
157 struct drm_gpu_scheduler *sched; member
174 * @sched: the scheduler instance on which this job is scheduled.
192 struct drm_gpu_scheduler *sched; member
267 * @score: score to help loadbalancer pick a idle sched
293 int drm_sched_init(struct drm_gpu_scheduler *sched,
298 void drm_sched_fini(struct drm_gpu_scheduler *sched);
307 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
[all...]
/kernel/linux/linux-5.10/drivers/net/wireless/ath/ath9k/
H A Dchannel.c261 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active()
263 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
311 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
382 mod_timer(&sc->sched.timer, jiffies + timeout); in ath_chanctx_setup_timer()
399 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss()
401 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss()
408 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss()
410 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss()
423 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa()
451 if (sc->sched in ath_chanctx_set_periodic_noa()
[all...]
/kernel/linux/linux-6.6/drivers/net/wireless/ath/ath9k/
H A Dchannel.c261 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active()
263 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
311 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
382 mod_timer(&sc->sched.timer, jiffies + timeout); in ath_chanctx_setup_timer()
399 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss()
401 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss()
408 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss()
410 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss()
423 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa()
451 if (sc->sched in ath_chanctx_set_periodic_noa()
[all...]
/kernel/linux/build/test/moduletest/runtest/bin/cpuisolation_t/testcases/bin/
H A Dcpuisolation05.sh29 eval_need=$(cat /sys/kernel/debug/tracing/events/sched/core_ctl_eval_need/enable)
30 set_busy=$(cat /sys/kernel/debug/tracing/events/sched/core_ctl_set_busy/enable)
31 update_nr_need=$(cat /sys/kernel/debug/tracing/events/sched/core_ctl_update_nr_need/enable)
38 echo 1 > /sys/kernel/debug/tracing/events/sched/core_ctl_eval_need/enable
39 echo 1 > /sys/kernel/debug/tracing/events/sched/core_ctl_set_busy/enable
40 echo 1 > /sys/kernel/debug/tracing/events/sched/core_ctl_update_nr_need/enable
43 bytrace -t 10 -b 32000 --overwrite sched ace app disk ohos graphic sync \
71 echo $eval_need > /sys/kernel/debug/tracing/events/sched/core_ctl_eval_need/enable
72 echo $set_busy > /sys/kernel/debug/tracing/events/sched/core_ctl_set_busy/enable
73 echo $update_nr_need > /sys/kernel/debug/tracing/events/sched/core_ctl_update_nr_nee
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c26 #include <linux/sched.h>
36 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_timedout()
45 __func__, s_job->sched->name); in amdgpu_job_timedout()
57 s_job->sched->name); in amdgpu_job_timedout()
63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
82 drm_sched_suspend_timeout(&ring->sched); in amdgpu_job_timedout()
108 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
162 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources()
242 job->base.sched in amdgpu_job_submit_direct()
321 amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) amdgpu_job_stop_all_jobs_on_sched() argument
[all...]
/kernel/linux/linux-6.6/include/drm/
H A Dgpu_scheduler.h247 * @sched: the scheduler to which this rq belongs to.
258 struct drm_gpu_scheduler *sched; member
299 * @sched: the scheduler instance to which the job having this struct
302 struct drm_gpu_scheduler *sched; member
320 * @sched: the scheduler instance on which this job is scheduled.
339 struct drm_gpu_scheduler *sched; member
490 * @score: score to help loadbalancer pick a idle sched
521 int drm_sched_init(struct drm_gpu_scheduler *sched,
527 void drm_sched_fini(struct drm_gpu_scheduler *sched);
551 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c26 #include <linux/sched.h>
33 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); in amdgpu_job_timedout()
43 s_job->sched->name); in amdgpu_job_timedout()
49 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
57 drm_sched_suspend_timeout(&ring->sched); in amdgpu_job_timedout()
81 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
113 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); in amdgpu_job_free_resources()
117 /* use sched fence if available */ in amdgpu_job_free_resources()
170 job->base.sched in amdgpu_job_submit_direct()
247 amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) amdgpu_job_stop_all_jobs_on_sched() argument
[all...]
/kernel/linux/linux-5.10/crypto/
H A Dfcrypt.c54 __be32 sched[ROUNDS]; member
226 #define F_ENCRYPT(R, L, sched) \
229 u.l = sched ^ R; \
245 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); in fcrypt_encrypt()
246 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); in fcrypt_encrypt()
247 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); in fcrypt_encrypt()
248 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); in fcrypt_encrypt()
249 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); in fcrypt_encrypt()
250 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); in fcrypt_encrypt()
251 F_ENCRYPT(X.r, X.l, ctx->sched[ in fcrypt_encrypt()
[all...]
/kernel/linux/linux-6.6/crypto/
H A Dfcrypt.c54 __be32 sched[ROUNDS]; member
223 #define F_ENCRYPT(R, L, sched) \
226 u.l = sched ^ R; \
242 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); in fcrypt_encrypt()
243 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); in fcrypt_encrypt()
244 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); in fcrypt_encrypt()
245 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); in fcrypt_encrypt()
246 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); in fcrypt_encrypt()
247 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); in fcrypt_encrypt()
248 F_ENCRYPT(X.r, X.l, ctx->sched[ in fcrypt_encrypt()
[all...]
/kernel/linux/linux-5.10/arch/x86/events/
H A Dcore.c23 #include <linux/sched/mm.h>
24 #include <linux/sched/clock.h>
767 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, in perf_sched_init() argument
772 memset(sched, 0, sizeof(*sched)); in perf_sched_init()
773 sched->max_events = num; in perf_sched_init()
774 sched->max_weight = wmax; in perf_sched_init()
775 sched->max_gp = gpmax; in perf_sched_init()
776 sched->constraints = constraints; in perf_sched_init()
783 sched in perf_sched_init()
788 perf_sched_save_state(struct perf_sched *sched) perf_sched_save_state() argument
797 perf_sched_restore_state(struct perf_sched *sched) perf_sched_restore_state() argument
819 __perf_sched_find_counter(struct perf_sched *sched) __perf_sched_find_counter() argument
874 perf_sched_find_counter(struct perf_sched *sched) perf_sched_find_counter() argument
888 perf_sched_next_event(struct perf_sched *sched) perf_sched_next_event() argument
919 struct perf_sched sched; perf_assign_events() local
[all...]

Completed in 21 milliseconds

12345678910>>...273