Lines Matching defs:work

887 	struct io_wq_work		work;
1100 static void io_rsrc_put_work(struct work_struct *work);
1283 static void io_fallback_req_func(struct work_struct *work)
1285 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
1286 fallback_work.work);
1442 req->work.list.next = NULL;
1443 req->work.flags = 0;
1445 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1449 io_wq_hash_work(&req->work, file_inode(req->file));
1452 req->work.flags |= IO_WQ_WORK_UNBOUND;
1485 /* init ->work of the whole link before punting */
1491 * canceled. That will make io-wq go through the usual work cancel
1496 req->work.flags |= IO_WQ_WORK_CANCEL;
1498 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1499 &req->work, req->flags);
1500 io_wq_enqueue(tctx->io_wq, &req->work);
2482 * notify work that needs processing.
2715 * If ref is dying, we might be running poll reap from the exit work.
6349 static bool io_cancel_cb(struct io_wq_work *work, void *data)
6351 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6871 static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
6873 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6876 return req ? &req->work : NULL;
6879 static void io_wq_submit_work(struct io_wq_work *work)
6881 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6895 if (work->flags & IO_WQ_WORK_CANCEL)
7028 * race with the completion of the linked work.
8357 static void io_rsrc_put_work(struct work_struct *work)
8362 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
9478 struct io_tctx_exit *work;
9480 work = container_of(cb, struct io_tctx_exit, task_work);
9485 * work cancelation off the exec path.
9488 io_uring_del_tctx_node((unsigned long)work->ctx);
9489 complete(&work->completion);
9492 static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
9494 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
9499 static void io_ring_exit_work(struct work_struct *work)
9501 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
9551 * completion_lock, see io_req_task_submit(). Apart from other work,
9654 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
9656 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
10136 * work, which can reduce cpu usage and uring_lock contention.