Lines Matching defs:data

35  * from data shared between the kernel and application. This is done both
37 * data that the application could potentially modify, it remains stable.
120 * This data is shared with the application through the mmap at offsets
184 * ordered with any other data.
331 /* const or read-mostly hot data */
345 /* submission data */
428 /* slow path rsrc auxilary data, used by update/register */
859 /* use only after cleaning per-op data, see io_clean_op() */
863 /* opcode allocated if it needs to store data for async defer */
927 /* size of async data needed, if any */
3515 * queue a task_work based retry of the operation, attempting to copy the data
6056 struct io_timeout_data *data = container_of(timer,
6058 struct io_kiocb *req = data->req;
6111 static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
6113 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) {
6157 struct io_timeout_data *data;
6163 data = req->async_data;
6165 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
6166 data->timer.function = io_timeout_fn;
6167 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
6244 struct io_timeout_data *data;
6270 data = req->async_data;
6271 data->req = req;
6272 data->flags = flags;
6274 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
6278 data->mode = io_translate_timeout_mode(flags);
6279 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
6297 struct io_timeout_data *data = req->async_data;
6338 data->timer.function = io_timeout_fn;
6339 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
6349 static bool io_cancel_cb(struct io_wq_work *work, void *data)
6352 struct io_cancel_data *cd = data;
6360 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
6367 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
6473 up.data = req->rsrc_update.arg;
7016 struct io_timeout_data *data = container_of(timer,
7018 struct io_kiocb *prev, *req = data->req;
7054 struct io_timeout_data *data = req->async_data;
7056 data->timer.function = io_link_timeout_fn;
7057 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
7058 data->mode);
7341 * write new data to them.
7513 static int io_sq_thread(void *data)
7515 struct io_sq_data *sqd = data;
7872 static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx)
7877 if (data->quiesce)
7880 data->quiesce = true;
7885 io_rsrc_node_switch(ctx, data);
7888 if (atomic_dec_and_test(&data->refs))
7892 ret = wait_for_completion_interruptible(&data->done);
7895 if (atomic_read(&data->refs) > 0) {
7906 atomic_inc(&data->refs);
7907 /* wait for all works potentially completing data->done */
7909 reinit_completion(&data->done);
7914 data->quiesce = false;
7919 static u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
7924 return &data->tags[table_idx][off];
7927 static void io_rsrc_data_free(struct io_rsrc_data *data)
7929 size_t size = data->nr * sizeof(data->tags[0][0]);
7931 if (data->tags)
7932 io_free_page_table((void **)data->tags, size);
7933 kfree(data);
7940 struct io_rsrc_data *data;
7944 data = kzalloc(sizeof(*data), GFP_KERNEL);
7945 if (!data)
7947 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
7948 if (!data->tags) {
7949 kfree(data);
7953 data->nr = nr;
7954 data->ctx = ctx;
7955 data->do_put = do_put;
7959 u64 *tag_slot = io_get_tag_slot(data, i);
7967 atomic_set(&data->refs, 1);
7968 init_completion(&data->done);
7969 *pdata = data;
7972 io_rsrc_data_free(data);
8457 static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
8460 u64 *tag_slot = io_get_tag_slot(data, idx);
8567 __s32 __user *fds = u64_to_user_ptr(up->data);
8568 struct io_rsrc_data *data = ctx->file_data;
8600 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
8625 *io_get_tag_slot(data, i) = tag;
8631 io_rsrc_node_switch(ctx, data);
8639 struct io_wq_data data;
8656 data.hash = hash;
8657 data.task = task;
8658 data.free_work = io_wq_free_work;
8659 data.do_work = io_wq_submit_work;
8664 return io_wq_create(concurrency, &data);
9184 struct io_rsrc_data *data;
9195 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9200 io_rsrc_data_free(data);
9211 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
9224 ctx->buf_data = data;
9237 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
9360 static void io_wait_rsrc_data(struct io_rsrc_data *data)
9362 if (data && !atomic_dec_and_test(&data->refs))
9363 wait_for_completion(&data->done);
9492 static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
9496 return req->ctx == data;
9654 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
9657 struct io_task_cancel *cancel = data;
10734 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
10737 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),