Home
last modified time | relevance | path

Searched refs:scheduled (Results 1 - 25 of 49) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/gpu/drm/scheduler/
H A Dsched_fence.c53 int ret = dma_fence_signal(&fence->scheduled); in drm_sched_fence_scheduled()
56 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled()
59 DMA_FENCE_TRACE(&fence->scheduled, in drm_sched_fence_scheduled()
122 * Drop the extra reference from the scheduled fence to the base fence.
128 dma_fence_put(&fence->scheduled); in drm_sched_fence_release_finished()
146 return container_of(f, struct drm_sched_fence, scheduled); in to_drm_sched_fence()
170 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, in drm_sched_fence_create()
H A Dsched_main.c28 * into software queues which are then scheduled on a hardware run queue.
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
327 * jobs from it will not be scheduled further
348 if (bad->s_fence->scheduled.context == in drm_sched_increase_karma()
513 guilty_context = s_job->s_fence->scheduled.context; in drm_sched_resubmit_jobs()
516 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) in drm_sched_resubmit_jobs()
/kernel/linux/linux-6.6/net/mptcp/
H A Dsched.c119 bool scheduled) in mptcp_subflow_set_scheduled()
121 WRITE_ONCE(subflow->scheduled, scheduled); in mptcp_subflow_set_scheduled()
143 if (READ_ONCE(subflow->scheduled)) in mptcp_sched_get_send()
165 if (READ_ONCE(subflow->scheduled)) in mptcp_sched_get_retrans()
118 mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow, bool scheduled) mptcp_subflow_set_scheduled() argument
/kernel/linux/linux-6.6/drivers/gpu/drm/scheduler/
H A Dsched_fence.c68 /* Set the parent before signaling the scheduled fence, such that, in drm_sched_fence_scheduled()
70 * been scheduled (which is the case for drivers delegating waits in drm_sched_fence_scheduled()
77 dma_fence_signal(&fence->scheduled); in drm_sched_fence_scheduled()
143 * Drop the extra reference from the scheduled fence to the base fence.
149 dma_fence_put(&fence->scheduled); in drm_sched_fence_release_finished()
199 return container_of(f, struct drm_sched_fence, scheduled); in to_drm_sched_fence()
230 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, in drm_sched_fence_init()
H A Dsched_entity.c144 * drm_sched_entity_error - return error of last scheduled job
147 * Opportunistically return the error of the last scheduled job. Result can
187 if (s_fence && f == &s_fence->scheduled) { in drm_sched_entity_kill_jobs_cb()
188 /* The dependencies array had a reference on the scheduled in drm_sched_entity_kill_jobs_cb()
197 * had on the scheduled fence. in drm_sched_entity_kill_jobs_cb()
199 dma_fence_put(&s_fence->scheduled); in drm_sched_entity_kill_jobs_cb()
394 * Fence is a scheduled/finished fence from a job in drm_sched_entity_add_dependency_cb()
408 * it to be scheduled in drm_sched_entity_add_dependency_cb()
410 fence = dma_fence_get(&s_fence->scheduled); in drm_sched_entity_add_dependency_cb()
417 /* Ignore it when it is already scheduled */ in drm_sched_entity_add_dependency_cb()
[all...]
H A Dsched_main.c28 * into software queues which are then scheduled on a hardware run queue.
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
588 guilty_context = s_job->s_fence->scheduled.context; in drm_sched_resubmit_jobs()
591 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) in drm_sched_resubmit_jobs()
926 /* make the scheduled timestamp more accurate */ in drm_sched_get_cleanup_job()
931 next->s_fence->scheduled.timestamp = in drm_sched_get_cleanup_job()
1173 * jobs from it will not be scheduled further
1195 if (bad->s_fence->scheduled.context == in drm_sched_increase_karma()
/kernel/linux/linux-5.10/arch/s390/pci/
H A Dpci_irq.c147 atomic_t scheduled; member
153 atomic_t *scheduled = data; in zpci_handle_remote_irq() local
157 } while (atomic_dec_return(scheduled)); in zpci_handle_remote_irq()
179 if (atomic_inc_return(&cpu_data->scheduled) > 1) in zpci_handle_fallback_irq()
183 cpu_data->csd.info = &cpu_data->scheduled; in zpci_handle_fallback_irq()
/kernel/linux/linux-6.6/arch/s390/pci/
H A Dpci_irq.c179 atomic_t scheduled; member
185 atomic_t *scheduled = data; in zpci_handle_remote_irq() local
189 } while (atomic_dec_return(scheduled)); in zpci_handle_remote_irq()
212 if (atomic_inc_return(&cpu_data->scheduled) > 1) in zpci_handle_fallback_irq()
215 INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled); in zpci_handle_fallback_irq()
/kernel/linux/linux-5.10/fs/btrfs/
H A Dreada.c61 int scheduled; member
110 re->scheduled = 0; in __readahead_hook()
721 if (re->scheduled || list_empty(&re->extctl)) { in reada_start_machine_dev()
726 re->scheduled = 1; in reada_start_machine_dev()
886 pr_debug(" re: logical %llu size %u empty %d scheduled %d", in dump_devs()
888 list_empty(&re->extctl), re->scheduled); in dump_devs()
915 if (!re->scheduled) { in dump_devs()
919 pr_debug("re: logical %llu size %u list empty %d scheduled %d", in dump_devs()
921 list_empty(&re->extctl), re->scheduled); in dump_devs()
/kernel/linux/linux-5.10/net/sctp/
H A Dstream_sched_prio.c64 /* Look into scheduled priorities first, as they are sorted and in sctp_sched_prio_get_head()
65 * we can find it fast IF it's scheduled. in sctp_sched_prio_get_head()
105 bool scheduled = false; in sctp_sched_prio_unsched() local
111 scheduled = true; in sctp_sched_prio_unsched()
127 return scheduled; in sctp_sched_prio_unsched()
137 /* Nothing to do if already scheduled */ in sctp_sched_prio_sched()
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c196 struct dma_fence *fence = &leader->base.s_fence->scheduled; in amdgpu_job_set_gang_leader()
216 if (job->gang_submit != &job->base.s_fence->scheduled) in amdgpu_job_free()
327 /* Signal all jobs not yet scheduled */ in amdgpu_job_stop_all_jobs_on_sched()
335 dma_fence_signal(&s_fence->scheduled); in amdgpu_job_stop_all_jobs_on_sched()
343 /* Signal all jobs already scheduled to HW */ in amdgpu_job_stop_all_jobs_on_sched()
H A Damdgpu_sync.c297 * when they are scheduled. in amdgpu_sync_peek_fence()
300 if (dma_fence_is_signaled(&s_fence->scheduled)) in amdgpu_sync_peek_fence()
303 return &s_fence->scheduled; in amdgpu_sync_peek_fence()
/kernel/linux/linux-6.6/net/sctp/
H A Dstream_sched_prio.c64 /* Look into scheduled priorities first, as they are sorted and in sctp_sched_prio_get_head()
65 * we can find it fast IF it's scheduled. in sctp_sched_prio_get_head()
105 bool scheduled = false; in sctp_sched_prio_unsched() local
111 scheduled = true; in sctp_sched_prio_unsched()
127 return scheduled; in sctp_sched_prio_unsched()
137 /* Nothing to do if already scheduled */ in sctp_sched_prio_sched()
/kernel/linux/linux-5.10/kernel/
H A Dworkqueue_internal.h34 struct list_head scheduled; /* L: scheduled works */ member
H A Dworkqueue.c314 /* CPU where unbound work was last round robin scheduled from this CPU */
1054 * @work: start of series of works to be scheduled
1059 * be scheduled starts at @work and includes any consecutive work with
1063 * the last scheduled work. This allows move_linked_works() to be
1086 * multiple works to the scheduled queue, the next position in move_linked_works()
1125 * the release work item is scheduled on a per-cpu workqueue. To in put_pwq()
1676 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1708 * zero, @work is guaranteed to be scheduled immediately regardless of its
1839 INIT_LIST_HEAD(&worker->scheduled); in alloc_worker()
1987 WARN_ON(!list_empty(&worker->scheduled)) || in destroy_worker()
2478 struct list_head *scheduled = &rescuer->scheduled; rescuer_thread() local
[all...]
/kernel/linux/linux-6.6/kernel/
H A Dworkqueue_internal.h42 struct list_head scheduled; /* L: scheduled works */ member
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_sync.c306 * when they are scheduled. in amdgpu_sync_peek_fence()
309 if (dma_fence_is_signaled(&s_fence->scheduled)) in amdgpu_sync_peek_fence()
312 return &s_fence->scheduled; in amdgpu_sync_peek_fence()
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/
H A Drc.h53 u8 *prev_ack, bool *scheduled);
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/
H A Drc.h53 u8 *prev_ack, bool *scheduled);
/kernel/linux/linux-5.10/kernel/time/
H A Dposix-cpu-timers.c1161 p->posix_cputimers_work.scheduled = false; in clear_posix_cputimers_work()
1174 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1181 return tsk->posix_cputimers_work.scheduled; in posix_cpu_timers_work_scheduled()
1186 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled)) in __run_posix_cpu_timers()
1190 tsk->posix_cputimers_work.scheduled = true; in __run_posix_cpu_timers()
1205 tsk->posix_cputimers_work.scheduled = false; in posix_cpu_timers_enable_work()
1226 tsk->posix_cputimers_work.scheduled = false; in posix_cpu_timers_enable_work()
1378 * work is already scheduled there is no point to do anything here. in run_posix_cpu_timers()
/kernel/linux/linux-6.6/kernel/time/
H A Dposix-cpu-timers.c1225 p->posix_cputimers_work.scheduled = false; in clear_posix_cputimers_work()
1238 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1245 return tsk->posix_cputimers_work.scheduled; in posix_cpu_timers_work_scheduled()
1250 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled)) in __run_posix_cpu_timers()
1254 tsk->posix_cputimers_work.scheduled = true; in __run_posix_cpu_timers()
1269 tsk->posix_cputimers_work.scheduled = false; in posix_cpu_timers_enable_work()
1290 tsk->posix_cputimers_work.scheduled = false; in posix_cpu_timers_enable_work()
1442 * work is already scheduled there is no point to do anything here. in run_posix_cpu_timers()
/kernel/linux/linux-5.10/drivers/scsi/libsas/
H A Dsas_scsi_host.c422 int scheduled = 0, tries = 100; in sas_queue_reset() local
432 while (!scheduled && tries--) { in sas_queue_reset()
436 scheduled = 1; in sas_queue_reset()
449 if (scheduled) in sas_queue_reset()
768 /* check if any new eh work was scheduled during the last run */ in sas_scsi_recover_host()
/kernel/linux/linux-5.10/include/linux/
H A Dposix-timers.h134 * @work: The task work to be scheduled
136 * @scheduled: @work has been scheduled already, no further processing
141 unsigned int scheduled; member
/kernel/linux/linux-5.10/include/drm/
H A Dgpu_scheduler.h55 * @rq: runqueue on which this entity is currently scheduled.
57 * Jobs from this entity can be scheduled on any scheduler
67 * The &drm_sched_fence.scheduled uses the
75 * @last_scheduled: points to the finished fence of the last scheduled job.
107 * struct drm_sched_rq - queue of entities to be scheduled.
111 * @entities: list of the entities to be scheduled.
112 * @current_entity: the entity which is to be scheduled.
130 * @scheduled: this fence is what will be signaled by the scheduler
131 * when the job is scheduled.
133 struct dma_fence scheduled; member
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dposix-timers.h140 * @work: The task work to be scheduled
142 * @scheduled: @work has been scheduled already, no further processing
147 unsigned int scheduled; member

Completed in 23 milliseconds

12