Home
last modified time | relevance | path

Searched refs:rq (Results 1 - 25 of 1267) sorted by relevance

12345678910>>...51

/kernel/linux/linux-6.6/drivers/gpu/drm/i915/
H A Di915_request.c115 struct i915_request *rq = to_request(fence); in i915_fence_release() local
117 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
118 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
120 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); in i915_fence_release()
121 if (rq->batch_res) { in i915_fence_release()
122 i915_vma_resource_put(rq->batch_res); in i915_fence_release()
123 rq->batch_res = NULL; in i915_fence_release()
133 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
134 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
140 * very careful in what rq in i915_fence_release()
192 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) __notify_execute_cb() argument
205 __notify_execute_cb_irq(struct i915_request *rq) __notify_execute_cb_irq() argument
216 i915_request_notify_execute_cb_imm(struct i915_request *rq) i915_request_notify_execute_cb_imm() argument
221 __i915_request_fill(struct i915_request *rq, u8 val) __i915_request_fill() argument
245 i915_request_active_engine(struct i915_request *rq, struct intel_engine_cs **active) i915_request_active_engine() argument
277 __rq_init_watchdog(struct i915_request *rq) __rq_init_watchdog() argument
284 struct i915_request *rq = __rq_watchdog_expired() local
298 __rq_arm_watchdog(struct i915_request *rq) __rq_arm_watchdog() argument
317 __rq_cancel_watchdog(struct i915_request *rq) __rq_cancel_watchdog() argument
357 i915_request_retire(struct i915_request *rq) i915_request_retire() argument
419 i915_request_retire_upto(struct i915_request *rq) i915_request_retire_upto() argument
441 struct i915_request * const *port, *rq; __request_in_flight() local
502 __await_execution(struct i915_request *rq, struct i915_request *signal, gfp_t gfp) __await_execution() argument
553 __i915_request_skip(struct i915_request *rq) __i915_request_skip() argument
571 i915_request_set_error_once(struct i915_request *rq, int error) i915_request_set_error_once() argument
589 i915_request_mark_eio(struct i915_request *rq) i915_request_mark_eio() argument
762 i915_request_cancel(struct i915_request *rq, int error) i915_request_cancel() argument
811 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); semaphore_notify() local
827 struct i915_request *rq, *rn; retire_requests() local
839 struct i915_request *rq; request_alloc_slow() local
875 struct i915_request *rq = arg; __i915_request_ctor() local
898 struct i915_request *rq; __i915_request_create() local
1031 struct i915_request *rq; i915_request_create() local
1060 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) i915_request_await_start() argument
1126 already_busywaiting(struct i915_request *rq) already_busywaiting() argument
1335 mark_external(struct i915_request *rq) mark_external() argument
1349 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) __i915_request_await_external() argument
1359 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) i915_request_await_external() argument
1384 is_parallel_rq(struct i915_request *rq) is_parallel_rq() argument
1389 request_to_parent(struct i915_request *rq) request_to_parent() argument
1404 i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence) i915_request_await_execution() argument
1501 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) i915_request_await_dma_fence() argument
1569 i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps) i915_request_await_deps() argument
1621 i915_request_await_huc(struct i915_request *rq) i915_request_await_huc() argument
1636 __i915_request_ensure_parallel_ordering(struct i915_request *rq, struct intel_timeline *timeline) __i915_request_ensure_parallel_ordering() argument
1671 __i915_request_ensure_ordering(struct i915_request *rq, struct intel_timeline *timeline) __i915_request_ensure_ordering() argument
1720 __i915_request_add_to_timeline(struct i915_request *rq) __i915_request_add_to_timeline() argument
1787 __i915_request_commit(struct i915_request *rq) __i915_request_commit() argument
1817 __i915_request_queue_bh(struct i915_request *rq) __i915_request_queue_bh() argument
1823 __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr) __i915_request_queue() argument
1845 i915_request_add(struct i915_request *rq) i915_request_add() argument
1902 __i915_spin_request(struct i915_request * const rq, int state) __i915_spin_request() argument
1981 i915_request_wait_timeout(struct i915_request *rq, unsigned int flags, long timeout) i915_request_wait_timeout() argument
2120 i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) i915_request_wait() argument
2147 queue_status(const struct i915_request *rq) queue_status() argument
2158 run_status(const struct i915_request *rq) run_status() argument
2172 fence_status(const struct i915_request *rq) fence_status() argument
2183 i915_request_show(struct drm_printer *m, const struct i915_request *rq, const char *prefix, int indent) i915_request_show() argument
2235 engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq) engine_match_ring() argument
2242 match_ring(struct i915_request *rq) match_ring() argument
2262 i915_test_request_state(struct i915_request *rq) i915_test_request_state() argument
[all...]
H A Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \
65 const struct i915_request *rq__ = (rq); \
276 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
378 void __i915_request_skip(struct i915_request *rq);
379 bool i915_request_set_error_once(struct i915_request *rq, int error);
380 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
383 void __i915_request_queue(struct i915_request *rq,
385 void __i915_request_queue_bh(struct i915_request *rq);
387 bool i915_request_retire(struct i915_request *rq);
388 void i915_request_retire_upto(struct i915_request *rq);
400 i915_request_get(struct i915_request *rq) i915_request_get() argument
406 i915_request_get_rcu(struct i915_request *rq) i915_request_get_rcu() argument
412 i915_request_put(struct i915_request *rq) i915_request_put() argument
454 i915_request_signaled(const struct i915_request *rq) i915_request_signaled() argument
460 i915_request_is_active(const struct i915_request *rq) i915_request_is_active() argument
465 i915_request_in_priority_queue(const struct i915_request *rq) i915_request_in_priority_queue() argument
471 i915_request_has_initial_breadcrumb(const struct i915_request *rq) i915_request_has_initial_breadcrumb() argument
484 __hwsp_seqno(const struct i915_request *rq) __hwsp_seqno() argument
504 hwsp_seqno(const struct i915_request *rq) hwsp_seqno() argument
515 __i915_request_has_started(const struct i915_request *rq) __i915_request_has_started() argument
546 i915_request_started(const struct i915_request *rq) i915_request_started() argument
571 i915_request_is_running(const struct i915_request *rq) i915_request_is_running() argument
601 i915_request_is_ready(const struct i915_request *rq) i915_request_is_ready() argument
606 __i915_request_is_complete(const struct i915_request *rq) __i915_request_is_complete() argument
611 i915_request_completed(const struct i915_request *rq) i915_request_completed() argument
627 i915_request_mark_complete(struct i915_request *rq) i915_request_mark_complete() argument
633 i915_request_has_waitboost(const struct i915_request *rq) i915_request_has_waitboost() argument
638 i915_request_has_nopreempt(const struct i915_request *rq) i915_request_has_nopreempt() argument
644 i915_request_has_sentinel(const struct i915_request *rq) i915_request_has_sentinel() argument
649 i915_request_on_hold(const struct i915_request *rq) i915_request_on_hold() argument
654 i915_request_set_hold(struct i915_request *rq) i915_request_set_hold() argument
659 i915_request_clear_hold(struct i915_request *rq) i915_request_clear_hold() argument
665 i915_request_timeline(const struct i915_request *rq) i915_request_timeline() argument
674 i915_request_gem_context(const struct i915_request *rq) i915_request_gem_context() argument
681 i915_request_active_timeline(const struct i915_request *rq) i915_request_active_timeline() argument
693 i915_request_active_seqno(const struct i915_request *rq) i915_request_active_seqno() argument
[all...]
/kernel/linux/linux-5.10/kernel/sched/
H A Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *r argument
24 thermal_load_avg(struct rq *rq) thermal_load_avg() argument
34 update_irq_load_avg(struct rq *rq, u64 running) update_irq_load_avg() argument
76 update_rq_clock_pelt(struct rq *rq, s64 delta) update_rq_clock_pelt() argument
115 update_idle_rq_clock_pelt(struct rq *rq) update_idle_rq_clock_pelt() argument
135 rq_clock_pelt(struct rq *rq) rq_clock_pelt() argument
168 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) update_rt_rq_load_avg() argument
174 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) update_dl_rq_load_avg() argument
180 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg() argument
185 thermal_load_avg(struct rq *rq) thermal_load_avg() argument
191 update_irq_load_avg(struct rq *rq, u64 running) update_irq_load_avg() argument
196 rq_clock_pelt(struct rq *rq) rq_clock_pelt() argument
202 update_rq_clock_pelt(struct rq *rq, s64 delta) update_rq_clock_pelt() argument
205 update_idle_rq_clock_pelt(struct rq *rq) update_idle_rq_clock_pelt() argument
[all...]
H A Dstats.h9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
11 if (rq) { in rq_sched_info_arrive()
12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
13 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
23 if (rq) in rq_sched_info_depart()
24 rq->rq_cpu_time += delta; in rq_sched_info_depart()
28 rq_sched_info_dequeued(struct rq *r argument
44 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) rq_sched_info_arrive() argument
45 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) rq_sched_info_dequeued() argument
46 rq_sched_info_depart(struct rq *rq, unsigned long long delta) rq_sched_info_depart() argument
122 struct rq *rq; psi_ttwu_dequeue() local
147 psi_task_tick(struct rq *rq) psi_task_tick() argument
162 psi_task_tick(struct rq *rq) psi_task_tick() argument
177 sched_info_dequeued(struct rq *rq, struct task_struct *t) sched_info_dequeued() argument
196 sched_info_arrive(struct rq *rq, struct task_struct *t) sched_info_arrive() argument
215 sched_info_queued(struct rq *rq, struct task_struct *t) sched_info_queued() argument
231 sched_info_depart(struct rq *rq, struct task_struct *t) sched_info_depart() argument
247 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) __sched_info_switch() argument
262 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) sched_info_switch() argument
[all...]
H A Ddeadline.c30 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
32 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
38 struct rq *rq = task_rq(p); in dl_rq_of_se() local
40 return &rq->dl; in dl_rq_of_se()
112 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
221 struct rq *rq; in dl_change_utilization() local
228 rq = task_rq(p); in dl_change_utilization()
230 sub_running_bw(&p->dl, &rq in dl_change_utilization()
305 struct rq *rq = rq_of_dl_rq(dl_rq); task_non_contending() local
443 dl_overloaded(struct rq *rq) dl_overloaded() argument
448 dl_set_overload(struct rq *rq) dl_set_overload() argument
464 dl_clear_overload(struct rq *rq) dl_clear_overload() argument
510 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) enqueue_pushable_dl_task() argument
540 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) dequeue_pushable_dl_task() argument
561 has_pushable_dl_tasks(struct rq *rq) has_pushable_dl_tasks() argument
568 need_pull_dl_task(struct rq *rq, struct task_struct *prev) need_pull_dl_task() argument
579 deadline_queue_push_tasks(struct rq *rq) deadline_queue_push_tasks() argument
587 deadline_queue_pull_task(struct rq *rq) deadline_queue_pull_task() argument
594 dl_task_offline_migration(struct rq *rq, struct task_struct *p) dl_task_offline_migration() argument
667 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) enqueue_pushable_dl_task() argument
672 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) dequeue_pushable_dl_task() argument
686 need_pull_dl_task(struct rq *rq, struct task_struct *prev) need_pull_dl_task() argument
691 pull_dl_task(struct rq *rq) pull_dl_task() argument
695 deadline_queue_push_tasks(struct rq *rq) deadline_queue_push_tasks() argument
699 deadline_queue_pull_task(struct rq *rq) deadline_queue_pull_task() argument
723 struct rq *rq = rq_of_dl_rq(dl_rq); setup_new_dl_entity() local
766 struct rq *rq = rq_of_dl_rq(dl_rq); replenish_dl_entity() local
886 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) update_dl_revised_wakeup() argument
950 struct rq *rq = rq_of_dl_rq(dl_rq); update_dl_entity() local
986 struct rq *rq = task_rq(p); start_dl_timer() local
1047 struct rq *rq; dl_task_timer() local
1176 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); dl_check_constrained_dl() local
1215 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) grub_reclaim() argument
1241 update_curr_dl(struct rq *rq) update_curr_dl() argument
1351 struct rq *rq; inactive_task_timer() local
1398 struct rq *rq = rq_of_dl_rq(dl_rq); inc_dl_deadline() local
1409 struct rq *rq = rq_of_dl_rq(dl_rq); dec_dl_deadline() local
1535 enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) enqueue_task_dl() argument
1616 __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) __dequeue_task_dl() argument
1622 dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) dequeue_task_dl() argument
1655 yield_task_dl(struct rq *rq) yield_task_dl() argument
1684 struct rq *rq; select_task_rq_dl() local
1732 struct rq *rq; migrate_task_rq_dl() local
1762 check_preempt_equal_dl(struct rq *rq, struct task_struct *p) check_preempt_equal_dl() argument
1783 balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) balance_dl() argument
1805 check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags) check_preempt_curr_dl() argument
1825 start_hrtick_dl(struct rq *rq, struct task_struct *p) start_hrtick_dl() argument
1830 start_hrtick_dl(struct rq *rq, struct task_struct *p) start_hrtick_dl() argument
1835 set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) set_next_task_dl() argument
1849 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); set_next_task_dl() local
1864 pick_next_task_dl(struct rq *rq) pick_next_task_dl() argument
1880 put_prev_task_dl(struct rq *rq, struct task_struct *p) put_prev_task_dl() argument
1884 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); put_prev_task_dl() local
1897 task_tick_dl(struct rq *rq, struct task_struct *p, int queued) task_tick_dl() argument
1901 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); task_tick_dl() local
1925 pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) pick_dl_task() argument
1937 pick_earliest_pushable_dl_task(struct rq *rq, int cpu) pick_earliest_pushable_dl_task() argument
2048 pick_next_pushable_dl_task(struct rq *rq) pick_next_pushable_dl_task() argument
2069 find_lock_later_rq(struct task_struct *task, struct rq *rq) find_lock_later_rq() argument
2136 push_dl_task(struct rq *rq) push_dl_task() argument
2217 push_dl_tasks(struct rq *rq) push_dl_tasks() argument
2309 task_woken_dl(struct rq *rq, struct task_struct *p) task_woken_dl() argument
2325 struct rq *rq; set_cpus_allowed_dl() local
2355 rq_online_dl(struct rq *rq) rq_online_dl() argument
2366 rq_offline_dl(struct rq *rq) rq_offline_dl() argument
2387 struct rq *rq; dl_add_task_root_domain() local
2416 switched_from_dl(struct rq *rq, struct task_struct *p) switched_from_dl() argument
2470 switched_to_dl(struct rq *rq, struct task_struct *p) switched_to_dl() argument
2498 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); switched_to_dl() local
2506 prio_changed_dl(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_dl() argument
[all...]
H A Dsched.h87 struct rq;
146 extern void calc_global_load_tick(struct rq *this_rq);
147 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
153 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
341 /* nests inside the rq lock: */
674 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
744 /* Nests inside the rq lock: */
750 struct rq *r member
999 struct rq { global() struct
1207 cpu_of(struct rq *rq) cpu_of() argument
1220 update_idle_core(struct rq *rq) update_idle_core() argument
1227 update_idle_core(struct rq *rq) update_idle_core() argument
1240 __rq_clock_broken(struct rq *rq) __rq_clock_broken() argument
1272 assert_clock_updated(struct rq *rq) assert_clock_updated() argument
1281 rq_clock(struct rq *rq) rq_clock() argument
1289 rq_clock_task(struct rq *rq) rq_clock_task() argument
1310 rq_clock_thermal(struct rq *rq) rq_clock_thermal() argument
1315 rq_clock_skip_update(struct rq *rq) rq_clock_skip_update() argument
1325 rq_clock_cancel_skipupdate(struct rq *rq) rq_clock_cancel_skipupdate() argument
1354 rq_pin_lock(struct rq *rq, struct rq_flags *rf) rq_pin_lock() argument
1364 rq_unpin_lock(struct rq *rq, struct rq_flags *rf) rq_unpin_lock() argument
1374 rq_repin_lock(struct rq *rq, struct rq_flags *rf) rq_repin_lock() argument
1470 struct rq *rq; global() variable
1524 queue_balance_callback(struct rq *rq, struct callback_head *head, void (*func)(struct rq *rq)) queue_balance_callback() argument
1831 task_current(struct rq *rq, struct task_struct *p) task_current() argument
1836 task_running(struct rq *rq, struct task_struct *p) task_running() argument
1986 put_prev_task(struct rq *rq, struct task_struct *prev) put_prev_task() argument
1992 set_next_task(struct rq *rq, struct task_struct *next) set_next_task() argument
2017 sched_stop_runnable(struct rq *rq) sched_stop_runnable() argument
2022 sched_dl_runnable(struct rq *rq) sched_dl_runnable() argument
2027 sched_rt_runnable(struct rq *rq) sched_rt_runnable() argument
2032 sched_fair_runnable(struct rq *rq) sched_fair_runnable() argument
2051 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) idle_set_state() argument
2057 idle_get_state(struct rq *rq) idle_get_state() argument
2064 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) idle_set_state() argument
2069 idle_get_state(struct rq *rq) idle_get_state() argument
2117 sched_update_tick_dependency(struct rq *rq) sched_update_tick_dependency() argument
2131 sched_update_tick_dependency(struct rq *rq) sched_update_tick_dependency() argument
2134 add_nr_running(struct rq *rq, unsigned count) add_nr_running() argument
2153 sub_nr_running(struct rq *rq, unsigned count) sub_nr_running() argument
2179 hrtick_enabled(struct rq *rq) hrtick_enabled() argument
2192 hrtick_enabled(struct rq *rq) hrtick_enabled() argument
2465 nohz_balance_exit_idle(struct rq *rq) nohz_balance_exit_idle() argument
2479 struct rq *rq = cpu_rq(i); __dl_update() local
2550 cpufreq_update_util(struct rq *rq, unsigned int flags) cpufreq_update_util() argument
2569 cpufreq_update_util(struct rq *rq, unsigned int flags) cpufreq_update_util() argument
2575 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) uclamp_rq_get() argument
2581 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) uclamp_rq_set() argument
2587 uclamp_rq_is_idle(struct rq *rq) uclamp_rq_is_idle() argument
2610 uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) uclamp_rq_util_with() argument
2673 uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) uclamp_rq_util_with() argument
2689 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) uclamp_rq_get() argument
2698 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) uclamp_rq_set() argument
2703 uclamp_rq_is_idle(struct rq *rq) uclamp_rq_is_idle() argument
2768 cpu_bw_dl(struct rq *rq) cpu_bw_dl() argument
2773 cpu_util_dl(struct rq *rq) cpu_util_dl() argument
2778 cpu_util_cfs(struct rq *rq) cpu_util_cfs() argument
2790 cpu_util_rt(struct rq *rq) cpu_util_rt() argument
2804 cpu_util_irq(struct rq *rq) cpu_util_irq() argument
2819 cpu_util_irq(struct rq *rq) cpu_util_irq() argument
2856 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) membarrier_switch_mm() argument
2872 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) membarrier_switch_mm() argument
2932 walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) walt_fixup_cum_window_demand() argument
2942 struct rq *rq = cpu_rq(src_cpu); same_freq_domain() local
2955 struct rq *rq = cpu_rq(cpu); is_reserved() local
2962 struct rq *rq = cpu_rq(cpu); mark_reserved() local
2969 struct rq *rq = cpu_rq(cpu); clear_reserved() local
3108 struct rq *rq = cpu_rq(cpu); cpu_util_freq_walt() local
3125 walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) walt_fixup_cum_window_demand() argument
[all...]
H A Drt.c132 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
134 return rt_rq->rq; in rq_of_rt_rq()
142 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
146 return rt_rq->rq; in rq_of_rt_se()
171 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
175 rt_rq->rq = rq; in init_tg_rt_entry()
185 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
243 static inline struct rq *rq_of_rt_r
257 struct rq *rq = rq_of_rt_se(rt_se); rt_rq_of_se() local
274 need_pull_rt_task(struct rq *rq, struct task_struct *prev) need_pull_rt_task() argument
284 rt_overloaded(struct rq *rq) rt_overloaded() argument
289 rt_set_overload(struct rq *rq) rt_set_overload() argument
308 rt_clear_overload(struct rq *rq) rt_clear_overload() argument
365 has_pushable_tasks(struct rq *rq) has_pushable_tasks() argument
376 rt_queue_push_tasks(struct rq *rq) rt_queue_push_tasks() argument
384 rt_queue_pull_task(struct rq *rq) rt_queue_pull_task() argument
389 enqueue_pushable_task(struct rq *rq, struct task_struct *p) enqueue_pushable_task() argument
400 dequeue_pushable_task(struct rq *rq, struct task_struct *p) dequeue_pushable_task() argument
415 enqueue_pushable_task(struct rq *rq, struct task_struct *p) enqueue_pushable_task() argument
419 dequeue_pushable_task(struct rq *rq, struct task_struct *p) dequeue_pushable_task() argument
433 need_pull_rt_task(struct rq *rq, struct task_struct *prev) need_pull_rt_task() argument
442 rt_queue_push_tasks(struct rq *rq) rt_queue_push_tasks() argument
543 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
644 struct rq *rq = rq_of_rt_rq(rt_rq); sched_rt_rq_enqueue() local
745 __disable_runtime(struct rq *rq) __disable_runtime() argument
827 __enable_runtime(struct rq *rq) __enable_runtime() argument
886 struct rq *rq = rq_of_rt_rq(rt_rq); do_sched_rt_period_timer() local
1019 update_curr_rt(struct rq *rq) update_curr_rt() argument
1066 struct rq *rq = rq_of_rt_rq(rt_rq); dequeue_top_rt_rq() local
1083 struct rq *rq = rq_of_rt_rq(rt_rq); enqueue_top_rt_rq() local
1107 struct rq *rq = rq_of_rt_rq(rt_rq); inc_rt_prio_smp() local
1123 struct rq *rq = rq_of_rt_rq(rt_rq); dec_rt_prio_smp() local
1373 struct rq *rq = rq_of_rt_se(rt_se); enqueue_rt_entity() local
1383 struct rq *rq = rq_of_rt_se(rt_se); dequeue_rt_entity() local
1400 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_task_rt() argument
1414 dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) dequeue_task_rt() argument
1443 requeue_task_rt(struct rq *rq, struct task_struct *p, int head) requeue_task_rt() argument
1454 yield_task_rt(struct rq *rq) yield_task_rt() argument
1466 struct rq *rq; select_task_rq_rt() local
1540 check_preempt_equal_prio(struct rq *rq, struct task_struct *p) check_preempt_equal_prio() argument
1567 balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) balance_rt() argument
1588 check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) check_preempt_curr_rt() argument
1613 set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) set_next_task_rt() argument
1652 _pick_next_task_rt(struct rq *rq) _pick_next_task_rt() argument
1667 pick_next_task_rt(struct rq *rq) pick_next_task_rt() argument
1679 put_prev_task_rt(struct rq *rq, struct task_struct *p) put_prev_task_rt() argument
1683 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); put_prev_task_rt() local
1698 pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) pick_rt_task() argument
1711 pick_highest_pushable_task(struct rq *rq, int cpu) pick_highest_pushable_task() argument
1993 pick_next_pushable_task(struct rq *rq) pick_next_pushable_task() argument
2014 find_lock_lowest_rq(struct task_struct *task, struct rq *rq) find_lock_lowest_rq() argument
2071 push_rt_task(struct rq *rq) push_rt_task() argument
2151 push_rt_tasks(struct rq *rq) push_rt_tasks() argument
2258 tell_cpu_to_push(struct rq *rq) tell_cpu_to_push() argument
2296 struct rq *rq; rto_push_irq_work_func() local
2430 task_woken_rt(struct rq *rq, struct task_struct *p) task_woken_rt() argument
2444 rq_online_rt(struct rq *rq) rq_online_rt() argument
2455 rq_offline_rt(struct rq *rq) rq_offline_rt() argument
2469 switched_from_rt(struct rq *rq, struct task_struct *p) switched_from_rt() argument
2501 switched_to_rt(struct rq *rq, struct task_struct *p) switched_to_rt() argument
2508 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); switched_to_rt() local
2532 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_rt() argument
2569 watchdog(struct rq *rq, struct task_struct *p) watchdog() argument
2593 watchdog(struct rq *rq, struct task_struct *p) watchdog() argument
2604 task_tick_rt(struct rq *rq, struct task_struct *p, int queued) task_tick_rt() argument
2609 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); task_tick_rt() local
2675 check_for_migration_rt(struct rq *rq, struct task_struct *p) check_for_migration_rt() argument
2720 rq, &rq->rt_active_balance_work); check_for_migration_rt() local
2725 get_rr_interval_rt(struct rq *rq, struct task_struct *task) get_rr_interval_rt() argument
[all...]
H A Dstop_task.c21 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
23 return sched_stop_runnable(rq); in balance_stop()
28 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
33 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
35 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
38 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
48 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) enqueue_task_stop() argument
55 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) dequeue_task_stop() argument
61 yield_task_stop(struct rq *rq) yield_task_stop() argument
66 put_prev_task_stop(struct rq *rq, struct task_struct *prev) put_prev_task_stop() argument
93 task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) task_tick_stop() argument
97 switched_to_stop(struct rq *rq, struct task_struct *p) switched_to_stop() argument
103 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_stop() argument
108 update_curr_stop(struct rq *rq) update_curr_stop() argument
[all...]
/kernel/linux/linux-6.6/kernel/sched/
H A Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *r argument
24 thermal_load_avg(struct rq *rq) thermal_load_avg() argument
34 update_irq_load_avg(struct rq *rq, u64 running) update_irq_load_avg() argument
64 rq_clock_pelt(struct rq *rq) rq_clock_pelt() argument
73 _update_idle_rq_clock_pelt(struct rq *rq) _update_idle_rq_clock_pelt() argument
95 update_rq_clock_pelt(struct rq *rq, s64 delta) update_rq_clock_pelt() argument
133 update_idle_rq_clock_pelt(struct rq *rq) update_idle_rq_clock_pelt() argument
193 update_rt_rq_load_avg(u64 now, struct rq *rq, int running) update_rt_rq_load_avg() argument
199 update_dl_rq_load_avg(u64 now, struct rq *rq, int running) update_dl_rq_load_avg() argument
205 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) update_thermal_load_avg() argument
210 thermal_load_avg(struct rq *rq) thermal_load_avg() argument
216 update_irq_load_avg(struct rq *rq, u64 running) update_irq_load_avg() argument
221 rq_clock_pelt(struct rq *rq) rq_clock_pelt() argument
227 update_rq_clock_pelt(struct rq *rq, s64 delta) update_rq_clock_pelt() argument
230 update_idle_rq_clock_pelt(struct rq *rq) update_idle_rq_clock_pelt() argument
[all...]
H A Dsched.h105 struct rq;
164 extern void calc_global_load_tick(struct rq *this_rq);
165 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
171 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
364 /* nests inside the rq lock: */
709 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
790 /* Nests inside the rq lock: */
796 struct rq *r member
1066 struct rq { global() struct
1322 cpu_of(struct rq *rq) cpu_of() argument
1356 sched_core_enabled(struct rq *rq) sched_core_enabled() argument
1370 rq_lockp(struct rq *rq) rq_lockp() argument
1378 __rq_lockp(struct rq *rq) __rq_lockp() argument
1396 sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) sched_cpu_cookie_match() argument
1405 sched_core_cookie_match(struct rq *rq, struct task_struct *p) sched_core_cookie_match() argument
1428 sched_group_cookie_match(struct rq *rq, struct task_struct *p, struct sched_group *group) sched_group_cookie_match() argument
1458 sched_core_enabled(struct rq *rq) sched_core_enabled() argument
1468 rq_lockp(struct rq *rq) rq_lockp() argument
1473 __rq_lockp(struct rq *rq) __rq_lockp() argument
1478 sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) sched_cpu_cookie_match() argument
1483 sched_core_cookie_match(struct rq *rq, struct task_struct *p) sched_core_cookie_match() argument
1488 sched_group_cookie_match(struct rq *rq, struct task_struct *p, struct sched_group *group) sched_group_cookie_match() argument
1496 lockdep_assert_rq_held(struct rq *rq) lockdep_assert_rq_held() argument
1505 raw_spin_rq_lock(struct rq *rq) raw_spin_rq_lock() argument
1510 raw_spin_rq_lock_irq(struct rq *rq) raw_spin_rq_lock_irq() argument
1516 raw_spin_rq_unlock_irq(struct rq *rq) raw_spin_rq_unlock_irq() argument
1522 _raw_spin_rq_lock_irqsave(struct rq *rq) _raw_spin_rq_lock_irqsave() argument
1530 raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) raw_spin_rq_unlock_irqrestore() argument
1544 update_idle_core(struct rq *rq) update_idle_core() argument
1551 update_idle_core(struct rq *rq) update_idle_core() argument
1590 struct rq *rq = task_rq(p); cfs_rq_of() local
1631 assert_clock_updated(struct rq *rq) assert_clock_updated() argument
1640 rq_clock(struct rq *rq) rq_clock() argument
1648 rq_clock_task(struct rq *rq) rq_clock_task() argument
1669 rq_clock_thermal(struct rq *rq) rq_clock_thermal() argument
1674 rq_clock_skip_update(struct rq *rq) rq_clock_skip_update() argument
1684 rq_clock_cancel_skipupdate(struct rq *rq) rq_clock_cancel_skipupdate() argument
1699 rq_clock_start_loop_update(struct rq *rq) rq_clock_start_loop_update() argument
1706 rq_clock_stop_loop_update(struct rq *rq) rq_clock_stop_loop_update() argument
1737 rq_pin_lock(struct rq *rq, struct rq_flags *rf) rq_pin_lock() argument
1750 rq_unpin_lock(struct rq *rq, struct rq_flags *rf) rq_unpin_lock() argument
1760 rq_repin_lock(struct rq *rq, struct rq_flags *rf) rq_repin_lock() argument
1783 raw_spin_rq_unlock(rq); global() variable
1792 raw_spin_rq_unlock(rq); global() variable
1808 raw_spin_rq_lock_irq(rq); global() variable
1816 raw_spin_rq_lock(rq); global() variable
1824 raw_spin_rq_lock(rq); global() variable
1841 raw_spin_rq_unlock_irq(rq); global() variable
1849 raw_spin_rq_unlock(rq); global() variable
1871 struct rq *rq; global() variable
1927 queue_balance_callback(struct rq *rq, struct balance_callback *head, void (*func)(struct rq *rq)) queue_balance_callback() argument
2116 sched_core_account_forceidle(struct rq *rq) sched_core_account_forceidle() argument
2124 sched_core_tick(struct rq *rq) sched_core_tick() argument
2132 sched_core_account_forceidle(struct rq *rq) sched_core_account_forceidle() argument
2134 sched_core_tick(struct rq *rq) sched_core_tick() argument
2283 task_current(struct rq *rq, struct task_struct *p) task_current() argument
2288 task_on_cpu(struct rq *rq, struct task_struct *p) task_on_cpu() argument
2456 put_prev_task(struct rq *rq, struct task_struct *prev) put_prev_task() argument
2462 set_next_task(struct rq *rq, struct task_struct *next) set_next_task() argument
2501 sched_stop_runnable(struct rq *rq) sched_stop_runnable() argument
2506 sched_dl_runnable(struct rq *rq) sched_dl_runnable() argument
2511 sched_rt_runnable(struct rq *rq) sched_rt_runnable() argument
2516 sched_fair_runnable(struct rq *rq) sched_fair_runnable() argument
2537 get_push_task(struct rq *rq) get_push_task() argument
2561 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) idle_set_state() argument
2567 idle_get_state(struct rq *rq) idle_get_state() argument
2574 idle_set_state(struct rq *rq, struct cpuidle_state *idle_state) idle_set_state() argument
2579 idle_get_state(struct rq *rq) idle_get_state() argument
2627 sched_update_tick_dependency(struct rq *rq) sched_update_tick_dependency() argument
2641 sched_update_tick_dependency(struct rq *rq) sched_update_tick_dependency() argument
2644 add_nr_running(struct rq *rq, unsigned count) add_nr_running() argument
2663 sub_nr_running(struct rq *rq, unsigned count) sub_nr_running() argument
2710 hrtick_enabled(struct rq *rq) hrtick_enabled() argument
2717 hrtick_enabled_fair(struct rq *rq) hrtick_enabled_fair() argument
2724 hrtick_enabled_dl(struct rq *rq) hrtick_enabled_dl() argument
2735 hrtick_enabled_fair(struct rq *rq) hrtick_enabled_fair() argument
2740 hrtick_enabled_dl(struct rq *rq) hrtick_enabled_dl() argument
2745 hrtick_enabled(struct rq *rq) hrtick_enabled() argument
3069 nohz_balance_exit_idle(struct rq *rq) nohz_balance_exit_idle() argument
3133 cpufreq_update_util(struct rq *rq, unsigned int flags) cpufreq_update_util() argument
3152 cpufreq_update_util(struct rq *rq, unsigned int flags) cpufreq_update_util() argument
3203 cpu_bw_dl(struct rq *rq) cpu_bw_dl() argument
3208 cpu_util_dl(struct rq *rq) cpu_util_dl() argument
3217 cpu_util_rt(struct rq *rq) cpu_util_rt() argument
3226 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) uclamp_rq_get() argument
3232 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) uclamp_rq_set() argument
3238 uclamp_rq_is_idle(struct rq *rq) uclamp_rq_is_idle() argument
3261 uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) uclamp_rq_util_with() argument
3302 uclamp_rq_is_capped(struct rq *rq) uclamp_rq_is_capped() argument
3339 uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) uclamp_rq_util_with() argument
3350 uclamp_rq_is_capped(struct rq *rq) uclamp_rq_is_capped() argument
3357 uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) uclamp_rq_get() argument
3366 uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) uclamp_rq_set() argument
3371 uclamp_rq_is_idle(struct rq *rq) uclamp_rq_is_idle() argument
3378 cpu_util_irq(struct rq *rq) cpu_util_irq() argument
3393 cpu_util_irq(struct rq *rq) cpu_util_irq() argument
3430 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) membarrier_switch_mm() argument
3446 membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, struct mm_struct *next_mm) membarrier_switch_mm() argument
3595 mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) mm_cid_snapshot_time() argument
3603 __mm_cid_get(struct rq *rq, struct mm_struct *mm) __mm_cid_get() argument
3655 mm_cid_get(struct rq *rq, struct mm_struct *mm) mm_cid_get() argument
3677 switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) switch_mm_cid() argument
3723 switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) switch_mm_cid() argument
3726 task_tick_mm_cid(struct rq *rq, struct task_struct *curr) task_tick_mm_cid() argument
3764 walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) walt_fixup_cum_window_demand() argument
3774 struct rq *rq = cpu_rq(src_cpu); same_freq_domain() local
3787 struct rq *rq = cpu_rq(cpu); is_reserved() local
3794 struct rq *rq = cpu_rq(cpu); mark_reserved() local
3801 struct rq *rq = cpu_rq(cpu); clear_reserved() local
3940 struct rq *rq = cpu_rq(cpu); cpu_util_freq_walt() local
3957 walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta) walt_fixup_cum_window_demand() argument
[all...]
H A Ddeadline.c63 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
65 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
71 struct rq *rq = task_rq(p); in dl_rq_of_se() local
73 return &rq->dl; in dl_rq_of_se()
142 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
178 struct rq *rq = cpu_rq(i); in __dl_update() local
180 rq->dl.extra_bw += bw; in __dl_update()
314 struct rq *r in dl_change_utilization() local
398 struct rq *rq = rq_of_dl_rq(dl_rq); task_non_contending() local
527 dl_overloaded(struct rq *rq) dl_overloaded() argument
532 dl_set_overload(struct rq *rq) dl_set_overload() argument
548 dl_clear_overload(struct rq *rq) dl_clear_overload() argument
602 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) enqueue_pushable_dl_task() argument
615 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) dequeue_pushable_dl_task() argument
631 has_pushable_dl_tasks(struct rq *rq) has_pushable_dl_tasks() argument
638 need_pull_dl_task(struct rq *rq, struct task_struct *prev) need_pull_dl_task() argument
649 deadline_queue_push_tasks(struct rq *rq) deadline_queue_push_tasks() argument
657 deadline_queue_pull_task(struct rq *rq) deadline_queue_pull_task() argument
664 dl_task_offline_migration(struct rq *rq, struct task_struct *p) dl_task_offline_migration() argument
737 enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) enqueue_pushable_dl_task() argument
742 dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) dequeue_pushable_dl_task() argument
756 deadline_queue_push_tasks(struct rq *rq) deadline_queue_push_tasks() argument
760 deadline_queue_pull_task(struct rq *rq) deadline_queue_pull_task() argument
769 replenish_dl_new_period(struct sched_dl_entity *dl_se, struct rq *rq) replenish_dl_new_period() argument
792 struct rq *rq = rq_of_dl_rq(dl_rq); setup_new_dl_entity() local
834 struct rq *rq = rq_of_dl_rq(dl_rq); replenish_dl_entity() local
951 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) update_dl_revised_wakeup() argument
1015 struct rq *rq = rq_of_dl_rq(dl_rq); update_dl_entity() local
1050 struct rq *rq = task_rq(p); start_dl_timer() local
1111 struct rq *rq; dl_task_timer() local
1240 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); dl_check_constrained_dl() local
1275 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) grub_reclaim() argument
1299 update_curr_dl(struct rq *rq) update_curr_dl() argument
1407 struct rq *rq; inactive_task_timer() local
1457 struct rq *rq = rq_of_dl_rq(dl_rq); inc_dl_deadline() local
1470 struct rq *rq = rq_of_dl_rq(dl_rq); dec_dl_deadline() local
1664 enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) enqueue_task_dl() argument
1748 __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) __dequeue_task_dl() argument
1755 dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) dequeue_task_dl() argument
1788 yield_task_dl(struct rq *rq) yield_task_dl() argument
1810 dl_task_is_earliest_deadline(struct task_struct *p, struct rq *rq) dl_task_is_earliest_deadline() argument
1825 struct rq *rq; select_task_rq_dl() local
1872 struct rq *rq; migrate_task_rq_dl() local
1902 check_preempt_equal_dl(struct rq *rq, struct task_struct *p) check_preempt_equal_dl() argument
1923 balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) balance_dl() argument
1945 check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags) check_preempt_curr_dl() argument
1965 start_hrtick_dl(struct rq *rq, struct task_struct *p) start_hrtick_dl() argument
1970 start_hrtick_dl(struct rq *rq, struct task_struct *p) start_hrtick_dl() argument
1975 set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) set_next_task_dl() argument
1994 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); set_next_task_dl() local
2009 pick_task_dl(struct rq *rq) pick_task_dl() argument
2025 pick_next_task_dl(struct rq *rq) pick_next_task_dl() argument
2036 put_prev_task_dl(struct rq *rq, struct task_struct *p) put_prev_task_dl() argument
2046 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); put_prev_task_dl() local
2059 task_tick_dl(struct rq *rq, struct task_struct *p, int queued) task_tick_dl() argument
2063 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); task_tick_dl() local
2087 pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) pick_dl_task() argument
2099 pick_earliest_pushable_dl_task(struct rq *rq, int cpu) pick_earliest_pushable_dl_task() argument
2213 find_lock_later_rq(struct task_struct *task, struct rq *rq) find_lock_later_rq() argument
2267 pick_next_pushable_dl_task(struct rq *rq) pick_next_pushable_dl_task() argument
2291 push_dl_task(struct rq *rq) push_dl_task() argument
2369 push_dl_tasks(struct rq *rq) push_dl_tasks() argument
2472 task_woken_dl(struct rq *rq, struct task_struct *p) task_woken_dl() argument
2488 struct rq *rq; set_cpus_allowed_dl() local
2518 rq_online_dl(struct rq *rq) rq_online_dl() argument
2529 rq_offline_dl(struct rq *rq) rq_offline_dl() argument
2550 struct rq *rq; dl_add_task_root_domain() local
2582 switched_from_dl(struct rq *rq, struct task_struct *p) switched_from_dl() argument
2636 switched_to_dl(struct rq *rq, struct task_struct *p) switched_to_dl() argument
2664 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); switched_to_dl() local
2672 prio_changed_dl(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_dl() argument
[all...]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *r argument
72 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) rq_sched_info_arrive() argument
73 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) rq_sched_info_dequeue() argument
74 rq_sched_info_depart(struct rq *rq, unsigned long long delta) rq_sched_info_depart() argument
170 struct rq *rq; psi_ttwu_dequeue() local
205 sched_info_dequeue(struct rq *rq, struct task_struct *t) sched_info_dequeue() argument
224 sched_info_arrive(struct rq *rq, struct task_struct *t) sched_info_arrive() argument
246 sched_info_enqueue(struct rq *rq, struct task_struct *t) sched_info_enqueue() argument
260 sched_info_depart(struct rq *rq, struct task_struct *t) sched_info_depart() argument
276 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) sched_info_switch() argument
[all...]
H A Dstop_task.c20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
22 return sched_stop_runnable(rq); in balance_stop()
27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
34 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
37 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
45 pick_next_task_stop(struct rq *rq) pick_next_task_stop() argument
56 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) enqueue_task_stop() argument
63 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) dequeue_task_stop() argument
69 yield_task_stop(struct rq *rq) yield_task_stop() argument
74 put_prev_task_stop(struct rq *rq, struct task_struct *prev) put_prev_task_stop() argument
98 task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) task_tick_stop() argument
102 switched_to_stop(struct rq *rq, struct task_struct *p) switched_to_stop() argument
108 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) prio_changed_stop() argument
113 update_curr_stop(struct rq *rq) update_curr_stop() argument
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c31 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
38 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
50 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
53 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
61 rq in vnic_rq_alloc_bufs()
66 vnic_rq_free(struct vnic_rq *rq) vnic_rq_free() argument
85 vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size) vnic_rq_alloc() argument
114 vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) vnic_rq_init_start() argument
138 vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) vnic_rq_init() argument
146 vnic_rq_error_status(struct vnic_rq *rq) vnic_rq_error_status() argument
151 vnic_rq_enable(struct vnic_rq *rq) vnic_rq_enable() argument
156 vnic_rq_disable(struct vnic_rq *rq) vnic_rq_disable() argument
184 vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) vnic_rq_clean() argument
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs()
26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
48 rq in vnic_rq_alloc_bufs()
53 vnic_rq_free(struct vnic_rq *rq) vnic_rq_free() argument
72 vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size) vnic_rq_alloc() argument
101 vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) vnic_rq_init_start() argument
125 vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) vnic_rq_init() argument
133 vnic_rq_error_status(struct vnic_rq *rq) vnic_rq_error_status() argument
138 vnic_rq_enable(struct vnic_rq *rq) vnic_rq_enable() argument
143 vnic_rq_disable(struct vnic_rq *rq) vnic_rq_disable() argument
171 vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) vnic_rq_clean() argument
[all...]
/kernel/linux/linux-5.10/drivers/scsi/fnic/
H A Dvnic_rq.c27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
30 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
35 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
42 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
45 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
46 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
48 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
59 rq in vnic_rq_alloc_bufs()
65 vnic_rq_free(struct vnic_rq *rq) vnic_rq_free() argument
82 vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size) vnic_rq_alloc() argument
111 vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) vnic_rq_init() argument
137 vnic_rq_error_status(struct vnic_rq *rq) vnic_rq_error_status() argument
142 vnic_rq_enable(struct vnic_rq *rq) vnic_rq_enable() argument
147 vnic_rq_disable(struct vnic_rq *rq) vnic_rq_disable() argument
165 vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) vnic_rq_clean() argument
[all...]
/kernel/linux/linux-6.6/drivers/scsi/fnic/
H A Dvnic_rq.c15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
47 rq in vnic_rq_alloc_bufs()
53 vnic_rq_free(struct vnic_rq *rq) vnic_rq_free() argument
70 vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, unsigned int desc_count, unsigned int desc_size) vnic_rq_alloc() argument
99 vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) vnic_rq_init() argument
125 vnic_rq_error_status(struct vnic_rq *rq) vnic_rq_error_status() argument
130 vnic_rq_enable(struct vnic_rq *rq) vnic_rq_enable() argument
135 vnic_rq_disable(struct vnic_rq *rq) vnic_rq_disable() argument
153 vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) vnic_rq_clean() argument
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dblk-mq.h119 * rq sectors used for blk stats. It has the same value
120 * with blk_rq_sectors(rq), except that it never be zeroed
198 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument
200 return blk_op_is_passthrough(rq->cmd_flags); in blk_rq_is_passthrough()
208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
210 #define rq_dma_dir(rq) \
211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
213 #define rq_list_add(listptr, rq) do { \
214 (rq)
259 rq_list_move(struct request **src, struct request **dst, struct request *rq, struct request *prev) rq_list_move() argument
527 struct request *rq; global() member
786 blk_mq_rq_state(struct request *rq) blk_mq_rq_state() argument
791 blk_mq_request_started(struct request *rq) blk_mq_request_started() argument
796 blk_mq_request_completed(struct request *rq) blk_mq_request_completed() argument
808 blk_mq_set_request_complete(struct request *rq) blk_mq_set_request_complete() argument
817 blk_mq_complete_request_direct(struct request *rq, void (*complete)(struct request *rq)) blk_mq_complete_request_direct() argument
833 blk_mq_need_time_stamp(struct request *rq) blk_mq_need_time_stamp() argument
838 blk_mq_is_reserved_rq(struct request *rq) blk_mq_is_reserved_rq() argument
937 blk_mq_rq_to_pdu(struct request *rq) blk_mq_rq_to_pdu() argument
949 blk_mq_cleanup_rq(struct request *rq) blk_mq_cleanup_rq() argument
955 blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs) blk_rq_bio_prep() argument
967 rq_is_sync(struct request *rq) rq_is_sync() argument
1031 blk_rq_pos(const struct request *rq) blk_rq_pos() argument
1036 blk_rq_bytes(const struct request *rq) blk_rq_bytes() argument
1041 blk_rq_cur_bytes(const struct request *rq) blk_rq_cur_bytes() argument
1050 blk_rq_sectors(const struct request *rq) blk_rq_sectors() argument
1055 blk_rq_cur_sectors(const struct request *rq) blk_rq_cur_sectors() argument
1060 blk_rq_stats_sectors(const struct request *rq) blk_rq_stats_sectors() argument
1071 blk_rq_payload_bytes(struct request *rq) blk_rq_payload_bytes() argument
1082 req_bvec(struct request *rq) req_bvec() argument
1089 blk_rq_count_bios(struct request *rq) blk_rq_count_bios() argument
1121 blk_rq_nr_phys_segments(struct request *rq) blk_rq_nr_phys_segments() argument
1132 blk_rq_nr_discard_segments(struct request *rq) blk_rq_nr_discard_segments() argument
1139 blk_rq_map_sg(struct request_queue *q, struct request *rq, struct scatterlist *sglist) blk_rq_map_sg() argument
1149 blk_rq_zone_no(struct request *rq) blk_rq_zone_no() argument
1154 blk_rq_zone_is_seq(struct request *rq) blk_rq_zone_is_seq() argument
1165 blk_rq_is_seq_zoned_write(struct request *rq) blk_rq_is_seq_zoned_write() argument
1176 blk_req_zone_write_lock(struct request *rq) blk_req_zone_write_lock() argument
1182 blk_req_zone_write_unlock(struct request *rq) blk_req_zone_write_unlock() argument
1188 blk_req_zone_is_write_locked(struct request *rq) blk_req_zone_is_write_locked() argument
1194 blk_req_can_dispatch_to_zone(struct request *rq) blk_req_can_dispatch_to_zone() argument
1201 blk_rq_is_seq_zoned_write(struct request *rq) blk_rq_is_seq_zoned_write() argument
1206 blk_req_needs_zone_write_lock(struct request *rq) blk_req_needs_zone_write_lock() argument
1211 blk_req_zone_write_lock(struct request *rq) blk_req_zone_write_lock() argument
1215 blk_req_zone_write_unlock(struct request *rq) blk_req_zone_write_unlock() argument
1218 blk_req_zone_is_write_locked(struct request *rq) blk_req_zone_is_write_locked() argument
1223 blk_req_can_dispatch_to_zone(struct request *rq) blk_req_can_dispatch_to_zone() argument
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/
H A Di915_request.c48 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
112 struct i915_request *rq = to_request(fence); in i915_fence_release() local
121 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
122 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
128 * very careful in what rq->engine we poke. The virtual engine is in i915_fence_release()
129 * referenced via the rq->context and we released that ref during in i915_fence_release()
137 * not be unsubmitted again, so rq->engine and rq->execution_mask in i915_fence_release()
138 * at this point is stable. rq->execution_mask will be a single in i915_fence_release()
142 * power-of-two we assume that rq in i915_fence_release()
191 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) __notify_execute_cb() argument
204 __notify_execute_cb_irq(struct i915_request *rq) __notify_execute_cb_irq() argument
215 __notify_execute_cb_imm(struct i915_request *rq) __notify_execute_cb_imm() argument
233 __i915_request_fill(struct i915_request *rq, u8 val) __i915_request_fill() argument
246 remove_from_engine(struct i915_request *rq) remove_from_engine() argument
276 i915_request_retire(struct i915_request *rq) i915_request_retire() argument
339 i915_request_retire_upto(struct i915_request *rq) i915_request_retire_upto() argument
361 struct i915_request * const *port, *rq; __request_in_flight() local
422 __await_execution(struct i915_request *rq, struct i915_request *signal, void (*hook)(struct i915_request *rq, struct dma_fence *signal), gfp_t gfp) __await_execution() argument
484 __i915_request_skip(struct i915_request *rq) __i915_request_skip() argument
500 i915_request_set_error_once(struct i915_request *rq, int error) i915_request_set_error_once() argument
708 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); semaphore_notify() local
724 struct i915_request *rq, *rn; retire_requests() local
736 struct i915_request *rq; request_alloc_slow() local
772 struct i915_request *rq = arg; __i915_request_ctor() local
788 struct i915_request *rq; __i915_request_create() local
917 struct i915_request *rq; i915_request_create() local
946 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) i915_request_await_start() argument
1006 already_busywaiting(struct i915_request *rq) already_busywaiting() argument
1138 __i915_request_await_execution(struct i915_request *to, struct i915_request *from, void (*hook)(struct i915_request *rq, struct dma_fence *signal)) __i915_request_await_execution() argument
1210 mark_external(struct i915_request *rq) mark_external() argument
1224 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) __i915_request_await_external() argument
1234 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) i915_request_await_external() argument
1260 i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence, void (*hook)(struct i915_request *rq, struct dma_fence *signal)) i915_request_await_execution() argument
1356 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) i915_request_await_dma_fence() argument
1476 __i915_request_add_to_timeline(struct i915_request *rq) __i915_request_add_to_timeline() argument
1546 __i915_request_commit(struct i915_request *rq) __i915_request_commit() argument
1576 __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr) __i915_request_queue() argument
1596 i915_request_add(struct i915_request *rq) i915_request_add() argument
1653 __i915_spin_request(struct i915_request * const rq, int state) __i915_spin_request() argument
1728 i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) i915_request_wait() argument
[all...]
H A Di915_request.h53 #define RQ_TRACE(rq, fmt, ...) do { \
54 const struct i915_request *rq__ = (rq); \
235 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
311 void i915_request_set_error_once(struct i915_request *rq, int error);
312 void __i915_request_skip(struct i915_request *rq);
315 void __i915_request_queue(struct i915_request *rq,
318 bool i915_request_retire(struct i915_request *rq);
319 void i915_request_retire_upto(struct i915_request *rq);
331 i915_request_get(struct i915_request *rq) in i915_request_get() argument
333 return to_request(dma_fence_get(&rq in i915_request_get()
337 i915_request_get_rcu(struct i915_request *rq) i915_request_get_rcu() argument
343 i915_request_put(struct i915_request *rq) i915_request_put() argument
374 i915_request_signaled(const struct i915_request *rq) i915_request_signaled() argument
380 i915_request_is_active(const struct i915_request *rq) i915_request_is_active() argument
385 i915_request_in_priority_queue(const struct i915_request *rq) i915_request_in_priority_queue() argument
391 i915_request_has_initial_breadcrumb(const struct i915_request *rq) i915_request_has_initial_breadcrumb() argument
404 __hwsp_seqno(const struct i915_request *rq) __hwsp_seqno() argument
424 hwsp_seqno(const struct i915_request *rq) hwsp_seqno() argument
435 __i915_request_has_started(const struct i915_request *rq) __i915_request_has_started() argument
466 i915_request_started(const struct i915_request *rq) i915_request_started() argument
491 i915_request_is_running(const struct i915_request *rq) i915_request_is_running() argument
521 i915_request_is_ready(const struct i915_request *rq) i915_request_is_ready() argument
526 __i915_request_is_complete(const struct i915_request *rq) __i915_request_is_complete() argument
531 i915_request_completed(const struct i915_request *rq) i915_request_completed() argument
547 i915_request_mark_complete(struct i915_request *rq) i915_request_mark_complete() argument
553 i915_request_has_waitboost(const struct i915_request *rq) i915_request_has_waitboost() argument
558 i915_request_has_nopreempt(const struct i915_request *rq) i915_request_has_nopreempt() argument
564 i915_request_has_sentinel(const struct i915_request *rq) i915_request_has_sentinel() argument
569 i915_request_on_hold(const struct i915_request *rq) i915_request_on_hold() argument
574 i915_request_set_hold(struct i915_request *rq) i915_request_set_hold() argument
579 i915_request_clear_hold(struct i915_request *rq) i915_request_clear_hold() argument
585 i915_request_timeline(const struct i915_request *rq) i915_request_timeline() argument
593 i915_request_gem_context(const struct i915_request *rq) i915_request_gem_context() argument
600 i915_request_active_timeline(const struct i915_request *rq) i915_request_active_timeline() argument
[all...]
/kernel/linux/linux-5.10/fs/erofs/
H A Ddecompressor.c25 int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
27 int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
31 static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, in z_erofs_lz4_prepare_destpages() argument
35 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; in z_erofs_lz4_prepare_destpages()
44 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_destpages()
54 availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; in z_erofs_lz4_prepare_destpages()
85 rq->out[i] = victim; in z_erofs_lz4_prepare_destpages()
90 static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, in generic_copy_inplace_data() argument
97 struct page **in = rq in generic_copy_inplace_data()
117 z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) z_erofs_lz4_decompress() argument
232 z_erofs_decompress_generic(struct z_erofs_decompress_req *rq, struct list_head *pagepool) z_erofs_decompress_generic() argument
303 z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq, struct list_head *pagepool) z_erofs_shifted_transform() argument
342 z_erofs_decompress(struct z_erofs_decompress_req *rq, struct list_head *pagepool) z_erofs_decompress() argument
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/
H A Dgen8_engine_cs.c13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument
42 if (GRAPHICS_VER(rq->i915) == 9) in gen8_emit_flush_rcs()
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs()
74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument
83 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs()
99 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs()
107 intel_ring_advance(rq, c in gen8_emit_flush_xcs()
112 gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) gen11_emit_flush_rcs() argument
226 mtl_dummy_pipe_control(struct i915_request *rq) mtl_dummy_pipe_control() argument
247 gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) gen12_emit_flush_rcs() argument
364 gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) gen12_emit_flush_xcs() argument
424 hwsp_offset(const struct i915_request *rq) hwsp_offset() argument
436 gen8_emit_init_breadcrumb(struct i915_request *rq) gen8_emit_init_breadcrumb() argument
483 __xehp_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags, u32 arb) __xehp_emit_bb_start() argument
524 xehp_emit_bb_start_noarb(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) xehp_emit_bb_start_noarb() argument
531 xehp_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) xehp_emit_bb_start() argument
538 gen8_emit_bb_start_noarb(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) gen8_emit_bb_start_noarb() argument
574 gen8_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) gen8_emit_bb_start() argument
602 assert_request_valid(struct i915_request *rq) assert_request_valid() argument
615 gen8_emit_wa_tail(struct i915_request *rq, u32 *cs) gen8_emit_wa_tail() argument
628 emit_preempt_busywait(struct i915_request *rq, u32 *cs) emit_preempt_busywait() argument
644 gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) gen8_emit_fini_breadcrumb_tail() argument
659 emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs) emit_xcs_breadcrumb() argument
664 gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) gen8_emit_fini_breadcrumb_xcs() argument
669 gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) gen8_emit_fini_breadcrumb_rcs() argument
689 gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) gen11_emit_fini_breadcrumb_rcs() argument
729 gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs) gen12_emit_preempt_busywait() argument
746 ccs_semaphore_offset(struct i915_request *rq) ccs_semaphore_offset() argument
753 ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs) ccs_emit_wa_busywait() argument
782 gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) gen12_emit_fini_breadcrumb_tail() argument
801 gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) gen12_emit_fini_breadcrumb_xcs() argument
808 gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) gen12_emit_fini_breadcrumb_rcs() argument
[all...]
H A Dgen6_engine_cs.c55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
130 cs = intel_ring_begin(rq, in gen6_emit_flush_rcs()
143 gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) gen6_emit_breadcrumb_rcs() argument
178 mi_flush_dw(struct i915_request *rq, u32 flags) mi_flush_dw() argument
214 gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) gen6_flush_dw() argument
219 gen6_emit_flush_xcs(struct i915_request *rq, u32 mode) gen6_emit_flush_xcs() argument
224 gen6_emit_flush_vcs(struct i915_request *rq, u32 mode) gen6_emit_flush_vcs() argument
229 gen6_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) gen6_emit_bb_start() argument
251 hsw_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) hsw_emit_bb_start() argument
272 gen7_stall_cs(struct i915_request *rq) gen7_stall_cs() argument
289 gen7_emit_flush_rcs(struct i915_request *rq, u32 mode) gen7_emit_flush_rcs() argument
353 gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) gen7_emit_breadcrumb_rcs() argument
375 gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) gen6_emit_breadcrumb_xcs() argument
393 gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) gen7_emit_breadcrumb_xcs() argument
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/
H A Dgen6_engine_cs.c54 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument
57 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush()
61 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
71 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
73 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush()
83 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush()
88 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument
91 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs()
97 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs()
129 cs = intel_ring_begin(rq, in gen6_emit_flush_rcs()
142 gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) gen6_emit_breadcrumb_rcs() argument
177 mi_flush_dw(struct i915_request *rq, u32 flags) mi_flush_dw() argument
213 gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) gen6_flush_dw() argument
218 gen6_emit_flush_xcs(struct i915_request *rq, u32 mode) gen6_emit_flush_xcs() argument
223 gen6_emit_flush_vcs(struct i915_request *rq, u32 mode) gen6_emit_flush_vcs() argument
228 gen6_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) gen6_emit_bb_start() argument
250 hsw_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) hsw_emit_bb_start() argument
271 gen7_stall_cs(struct i915_request *rq) gen7_stall_cs() argument
288 gen7_emit_flush_rcs(struct i915_request *rq, u32 mode) gen7_emit_flush_rcs() argument
352 gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs) gen7_emit_breadcrumb_rcs() argument
374 gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) gen6_emit_breadcrumb_xcs() argument
392 gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) gen7_emit_breadcrumb_xcs() argument
[all...]
/kernel/linux/linux-6.6/block/
H A Dblk-mq.c42 #include "blk-rq-qos.h"
48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
49 static void blk_mq_request_bypass_insert(struct request *rq,
92 static bool blk_mq_check_inflight(struct request *rq, void *priv) in blk_mq_check_inflight() argument
96 if (rq->part && blk_do_io_stat(rq) && in blk_mq_check_inflight()
97 (!mi->part->bd_partno || rq->part == mi->part) && in blk_mq_check_inflight()
98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_inflight()
99 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight()
315 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
333 blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) blk_mq_rq_time_init() argument
354 struct request *rq = tags->static_rqs[tag]; blk_mq_rq_ctx_init() local
410 struct request *rq; __blk_mq_alloc_requests_batch() local
440 struct request *rq; __blk_mq_alloc_requests() local
530 struct request *rq; blk_mq_rq_cache_fill() local
548 struct request *rq; blk_mq_alloc_cached_request() local
581 struct request *rq; blk_mq_alloc_request() local
621 struct request *rq; blk_mq_alloc_request_hctx() local
685 blk_mq_finish_request(struct request *rq) blk_mq_finish_request() argument
700 __blk_mq_free_request(struct request *rq) __blk_mq_free_request() argument
722 blk_mq_free_request(struct request *rq) blk_mq_free_request() argument
741 struct request *rq; blk_mq_free_plug_rqs() local
747 blk_dump_rq_flags(struct request *rq, char *msg) blk_dump_rq_flags() argument
761 req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, blk_status_t error) req_bio_endio() argument
1028 __blk_mq_end_request_acct(struct request *rq, u64 now) __blk_mq_end_request_acct() argument
1037 __blk_mq_end_request(struct request *rq, blk_status_t error) __blk_mq_end_request() argument
1054 blk_mq_end_request(struct request *rq, blk_status_t error) blk_mq_end_request() argument
1084 struct request *rq; blk_mq_end_request_batch() local
1133 struct request *rq, *next; blk_complete_reqs() local
1155 blk_mq_complete_need_ipi(struct request *rq) blk_mq_complete_need_ipi() argument
1181 blk_mq_complete_send_ipi(struct request *rq) blk_mq_complete_send_ipi() argument
1190 blk_mq_raise_softirq(struct request *rq) blk_mq_raise_softirq() argument
1201 blk_mq_complete_request_remote(struct request *rq) blk_mq_complete_request_remote() argument
1235 blk_mq_complete_request(struct request *rq) blk_mq_complete_request() argument
1250 blk_mq_start_request(struct request *rq) blk_mq_start_request() argument
1289 blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) blk_add_rq_to_plug() argument
1328 blk_execute_rq_nowait(struct request *rq, bool at_head) blk_execute_rq_nowait() argument
1357 blk_end_sync_rq(struct request *rq, blk_status_t ret) blk_end_sync_rq() argument
1366 blk_rq_is_poll(struct request *rq) blk_rq_is_poll() argument
1376 blk_rq_poll_completion(struct request *rq, struct completion *wait) blk_rq_poll_completion() argument
1394 blk_execute_rq(struct request *rq, bool at_head) blk_execute_rq() argument
1432 __blk_mq_requeue_request(struct request *rq) __blk_mq_requeue_request() argument
1447 blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) blk_mq_requeue_request() argument
1472 struct request *rq; blk_mq_requeue_work() local
1519 blk_is_flush_data_rq(struct request *rq) blk_is_flush_data_rq() argument
1524 blk_mq_rq_inflight(struct request *rq, void *priv) blk_mq_rq_inflight() argument
1578 blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) blk_mq_req_expired() argument
1598 blk_mq_put_rq_ref(struct request *rq) blk_mq_put_rq_ref() argument
1608 blk_mq_check_expired(struct request *rq, void *priv) blk_mq_check_expired() argument
1626 blk_mq_handle_expired(struct request *rq, void *priv) blk_mq_handle_expired() argument
1730 struct request *rq; global() member
1768 __blk_mq_alloc_driver_tag(struct request *rq) __blk_mq_alloc_driver_tag() argument
1792 __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) __blk_mq_get_driver_tag() argument
1833 blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) blk_mq_mark_tag_wait() argument
1946 blk_mq_handle_dev_resource(struct request *rq, struct list_head *list) blk_mq_handle_dev_resource() argument
1953 blk_mq_handle_zone_resource(struct request *rq, struct list_head *zone_list) blk_mq_handle_zone_resource() argument
1972 blk_mq_prep_dispatch_rq(struct request *rq, bool need_budget) blk_mq_prep_dispatch_rq() argument
2013 struct request *rq; blk_mq_release_budgets() local
2048 struct request *rq; blk_mq_dispatch_rq_list() local
2479 blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) blk_mq_request_bypass_insert() argument
2495 struct request *rq; blk_mq_insert_requests() local
2528 blk_mq_insert_request(struct request *rq, blk_insert_t flags) blk_mq_insert_request() argument
2590 blk_mq_bio_to_request(struct request *rq, struct bio *bio, unsigned int nr_segs) blk_mq_bio_to_request() argument
2608 __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) __blk_mq_issue_directly() argument
2641 blk_mq_get_budget_and_tag(struct request *rq) blk_mq_get_budget_and_tag() argument
2666 blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) blk_mq_try_issue_directly() argument
2697 blk_mq_request_issue_directly(struct request *rq, bool last) blk_mq_request_issue_directly() argument
2714 struct request *rq; blk_mq_plug_issue_direct() local
2769 struct request *rq = rq_list_pop(&plug->mq_list); blk_mq_dispatch_plug_list() local
2806 struct request *rq; blk_mq_flush_plug_list() local
2861 struct request *rq = list_first_entry(list, struct request, blk_mq_try_issue_list_directly() local
2909 struct request *rq; blk_mq_get_new_requests() local
2932 blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug, struct bio *bio) blk_mq_can_use_cached_rq() argument
2987 struct request *rq = NULL; blk_mq_submit_bio() local
3069 blk_insert_cloned_request(struct request *rq) blk_insert_cloned_request() argument
3135 blk_rq_unprep_clone(struct request *rq) blk_rq_unprep_clone() argument
3164 blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) blk_rq_prep_clone() argument
3221 blk_steal_bios(struct bio_list *list, struct request *rq) blk_steal_bios() argument
3263 struct request *rq = drv_tags->rqs[i]; blk_mq_clear_rq_mapping() local
3301 struct request *rq = tags->static_rqs[i]; blk_mq_free_rqs() local
3398 blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, int node) blk_mq_init_request() argument
3471 struct request *rq = p; blk_mq_alloc_rqs() local
3495 blk_mq_has_request(struct request *rq, void *data) blk_mq_has_request() argument
4868 blk_rq_poll(struct request *rq, struct io_comp_batch *iob, unsigned int poll_flags) blk_rq_poll() argument
4886 blk_mq_rq_cpu(struct request *rq) blk_mq_rq_cpu() argument
[all...]

Completed in 57 milliseconds

12345678910>>...51