Lines Matching defs:task

25 /* Check if task is idle i.e. not running, not scheduled in
28 * a qp reference to cover the gap from now until the task finishes.
29 * state will move out of busy if task returns a non zero value
32 * over the task.
33 * Context: caller should hold task->lock.
36 static bool __reserve_if_idle(struct rxe_task *task)
38 WARN_ON(rxe_read(task->qp) <= 0);
40 if (task->state == TASK_STATE_IDLE) {
41 rxe_get(task->qp);
42 task->state = TASK_STATE_BUSY;
43 task->num_sched++;
47 if (task->state == TASK_STATE_BUSY)
48 task->state = TASK_STATE_ARMED;
53 /* check if task is idle or drained and not currently
57 * Context: caller should hold task->lock.
60 static bool __is_done(struct rxe_task *task)
62 if (work_pending(&task->work))
65 if (task->state == TASK_STATE_IDLE ||
66 task->state == TASK_STATE_DRAINED) {
74 static bool is_done(struct rxe_task *task)
79 spin_lock_irqsave(&task->lock, flags);
80 done = __is_done(task);
81 spin_unlock_irqrestore(&task->lock, flags);
90 * schedules the task. They must call __reserve_if_idle to
91 * move the task to busy before calling or scheduling.
92 * The task can also be moved to drained or invalid
99 * The number of times the task can be run is limited by
100 * max iterations so one task cannot hold the cpu forever.
101 * If the limit is hit and work remains the task is rescheduled.
103 static void do_task(struct rxe_task *task)
111 WARN_ON(rxe_read(task->qp) <= 0);
113 spin_lock_irqsave(&task->lock, flags);
114 if (task->state >= TASK_STATE_DRAINED) {
115 rxe_put(task->qp);
116 task->num_done++;
117 spin_unlock_irqrestore(&task->lock, flags);
120 spin_unlock_irqrestore(&task->lock, flags);
127 ret = task->func(task->qp);
130 spin_lock_irqsave(&task->lock, flags);
132 * yield the cpu and reschedule the task
135 task->state = TASK_STATE_IDLE;
140 switch (task->state) {
142 task->state = TASK_STATE_IDLE;
145 /* someone tried to schedule the task while we
149 task->state = TASK_STATE_BUSY;
154 task->state = TASK_STATE_DRAINED;
159 rxe_dbg_qp(task->qp, "unexpected task state = %d",
160 task->state);
161 task->state = TASK_STATE_IDLE;
166 task->num_done++;
167 if (WARN_ON(task->num_done != task->num_sched))
169 task->qp,
171 task->num_sched, task->num_done);
173 spin_unlock_irqrestore(&task->lock, flags);
176 task->ret = ret;
179 rxe_sched_task(task);
181 rxe_put(task->qp);
190 int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
195 task->qp = qp;
196 task->func = func;
197 task->state = TASK_STATE_IDLE;
198 spin_lock_init(&task->lock);
199 INIT_WORK(&task->work, do_work);
207 * task is moved to invalid and returns. The qp cleanup
208 * code then calls the task functions directly without
209 * using the task struct to drain any late arriving packets
212 void rxe_cleanup_task(struct rxe_task *task)
216 spin_lock_irqsave(&task->lock, flags);
217 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
218 task->state = TASK_STATE_DRAINING;
220 task->state = TASK_STATE_INVALID;
221 spin_unlock_irqrestore(&task->lock, flags);
224 spin_unlock_irqrestore(&task->lock, flags);
226 /* now the task cannot be scheduled or run just wait
229 while (!is_done(task))
232 spin_lock_irqsave(&task->lock, flags);
233 task->state = TASK_STATE_INVALID;
234 spin_unlock_irqrestore(&task->lock, flags);
237 /* run the task inline if it is currently idle
240 void rxe_run_task(struct rxe_task *task)
245 WARN_ON(rxe_read(task->qp) <= 0);
247 spin_lock_irqsave(&task->lock, flags);
248 run = __reserve_if_idle(task);
249 spin_unlock_irqrestore(&task->lock, flags);
252 do_task(task);
255 /* schedule the task to run later as a work queue entry.
259 void rxe_sched_task(struct rxe_task *task)
263 WARN_ON(rxe_read(task->qp) <= 0);
265 spin_lock_irqsave(&task->lock, flags);
266 if (__reserve_if_idle(task))
267 queue_work(rxe_wq, &task->work);
268 spin_unlock_irqrestore(&task->lock, flags);
275 void rxe_disable_task(struct rxe_task *task)
279 WARN_ON(rxe_read(task->qp) <= 0);
281 spin_lock_irqsave(&task->lock, flags);
282 if (!__is_done(task) && task->state < TASK_STATE_DRAINED) {
283 task->state = TASK_STATE_DRAINING;
285 task->state = TASK_STATE_DRAINED;
286 spin_unlock_irqrestore(&task->lock, flags);
289 spin_unlock_irqrestore(&task->lock, flags);
291 while (!is_done(task))
294 spin_lock_irqsave(&task->lock, flags);
295 task->state = TASK_STATE_DRAINED;
296 spin_unlock_irqrestore(&task->lock, flags);
299 void rxe_enable_task(struct rxe_task *task)
303 WARN_ON(rxe_read(task->qp) <= 0);
305 spin_lock_irqsave(&task->lock, flags);
306 if (task->state == TASK_STATE_INVALID) {
307 spin_unlock_irqrestore(&task->lock, flags);
311 task->state = TASK_STATE_IDLE;
312 spin_unlock_irqrestore(&task->lock, flags);