Lines Matching defs:task

875 	struct task_struct		*task;
899 struct task_struct *task;
1082 struct task_struct *task,
1204 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
1210 if (task && head->task != task)
1237 static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
1242 if (task && head->task != task)
1407 atomic_inc(&req->task->io_uring->inflight_tracked);
1477 struct io_uring_task *tctx = req->task->io_uring;
1495 if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
1736 static inline void io_put_task(struct task_struct *task, int nr)
1738 struct io_uring_task *tctx = task->io_uring;
1740 if (likely(task == current)) {
1746 put_task_struct_many(task, nr);
1768 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
1770 struct io_uring_task *tctx = task->io_uring;
1776 put_task_struct_many(task, refs);
1788 * If we're in ring overflow flush mode, or in task cancel mode,
1863 io_put_task(req->task, 1);
2043 io_put_task(req->task, 1);
2227 /* relaxed read is enough as only the task itself sets ->in_idle */
2234 struct task_struct *tsk = req->task;
2295 /* req->task == current here, checking PF_EXITING is safe */
2296 if (likely(!(req->task->flags & PF_EXITING)))
2341 struct task_struct *task;
2350 rb->task = NULL;
2358 if (rb->task)
2359 io_put_task(rb->task, rb->task_refs);
2368 if (req->task != rb->task) {
2369 if (rb->task)
2370 io_put_task(rb->task, rb->task_refs);
2371 rb->task = req->task;
2723 * not in the original thread group (or in task context).
2725 if (!same_thread_group(req->task, current) || !in_task())
2894 * in sq thread task context or in io worker task context. If
2895 * current task context is sq thread, we don't need to check
5484 /* req->task == current here, checking PF_EXITING is safe */
5485 if (unlikely(req->task->flags & PF_EXITING))
6388 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
6390 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
6436 struct io_uring_task *tctx = node->task->io_uring;
6735 struct io_uring_task *tctx = req->task->io_uring;
7005 if (!(req->task->flags & PF_EXITING))
7178 req->task = current;
7627 * the task, and the next invocation will do it.
7671 * can take into account that the task is waiting for IO - turns out
7876 /* As we may drop ->uring_lock, other task may have started quiesce */
8143 /* fall through for EPERM case, setup new sqd/task */
8636 struct task_struct *task)
8657 data.task = task;
8667 static int io_uring_alloc_task_context(struct task_struct *task,
8683 tctx->io_wq = io_init_wq_offload(ctx, task);
8695 task->io_uring = tctx;
9538 * a task that needs to finish task_work to make this loop
9540 * cause a stuck task backtrace, and hence a potential panic
9560 /* don't spin on a single task if cancellation failed */
9562 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
9565 wake_up_process(node->task);
9650 struct task_struct *task;
9659 return io_match_task_safe(req, cancel->task, cancel->all);
9663 struct task_struct *task, bool cancel_all)
9670 if (io_match_task_safe(de->req, task, cancel_all)) {
9696 struct io_uring_task *tctx = node->task->io_uring;
9713 struct task_struct *task,
9716 struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
9717 struct io_uring_task *tctx = task ? task->io_uring : NULL;
9723 if (!task) {
9728 * it's fine as the task is in exit/exec.
9745 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9746 ret |= io_poll_remove_all(ctx, task, cancel_all);
9747 ret |= io_kill_timeouts(ctx, task, cancel_all);
9748 if (task)
9782 node->task = current;
9800 * Note that this task has used io_uring. We use it for cancelation purposes.
9812 * Remove this io_uring_file -> task mapping.
9825 WARN_ON_ONCE(current != node->task);
9865 * Find any io_uring ctx that this task has registered or done IO on, and cancel
9895 /* sqpoll task will cancel all its requests */
10246 req->task->task_works != NULL);
10851 /* that's it for SQPOLL, only the SQPOLL task creates requests */
10857 struct io_uring_task *tctx = node->task->io_uring;