Lines Matching refs:sqe

2959 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2971 kiocb->ki_pos = READ_ONCE(sqe->off);
2974 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2987 ioprio = READ_ONCE(sqe->ioprio);
3012 req->buf_index = READ_ONCE(sqe->buf_index);
3027 req->rw.addr = READ_ONCE(sqe->addr);
3028 req->rw.len = READ_ONCE(sqe->len);
3503 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3507 return io_prep_rw(req, sqe, READ);
3729 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3733 return io_prep_rw(req, sqe, WRITE);
3842 const struct io_uring_sqe *sqe)
3849 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
3854 ren->old_dfd = READ_ONCE(sqe->fd);
3855 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3856 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3857 ren->new_dfd = READ_ONCE(sqe->len);
3858 ren->flags = READ_ONCE(sqe->rename_flags);
3893 const struct io_uring_sqe *sqe)
3900 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3901 sqe->splice_fd_in)
3906 un->dfd = READ_ONCE(sqe->fd);
3908 un->flags = READ_ONCE(sqe->unlink_flags);
3912 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3942 const struct io_uring_sqe *sqe)
3947 if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3948 sqe->buf_index || sqe->splice_fd_in))
3951 req->shutdown.how = READ_ONCE(sqe->len);
3982 const struct io_uring_sqe *sqe)
3990 sp->len = READ_ONCE(sqe->len);
3991 sp->flags = READ_ONCE(sqe->splice_flags);
3994 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
3999 const struct io_uring_sqe *sqe)
4001 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
4003 return __io_splice_prep(req, sqe);
4036 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4040 sp->off_in = READ_ONCE(sqe->splice_off_in);
4041 sp->off_out = READ_ONCE(sqe->off);
4042 return __io_splice_prep(req, sqe);
4093 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4099 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4100 sqe->splice_fd_in))
4103 req->sync.flags = READ_ONCE(sqe->fsync_flags);
4107 req->sync.off = READ_ONCE(sqe->off);
4108 req->sync.len = READ_ONCE(sqe->len);
4131 const struct io_uring_sqe *sqe)
4133 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
4134 sqe->splice_fd_in)
4139 req->sync.off = READ_ONCE(sqe->off);
4140 req->sync.len = READ_ONCE(sqe->addr);
4141 req->sync.mode = READ_ONCE(sqe->len);
4162 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4169 if (unlikely(sqe->ioprio || sqe->buf_index))
4178 req->open.dfd = READ_ONCE(sqe->fd);
4179 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4187 req->open.file_slot = READ_ONCE(sqe->file_index);
4196 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4198 u64 mode = READ_ONCE(sqe->len);
4199 u64 flags = READ_ONCE(sqe->open_flags);
4202 return __io_openat_prep(req, sqe);
4205 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4211 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4212 len = READ_ONCE(sqe->len);
4221 return __io_openat_prep(req, sqe);
4298 const struct io_uring_sqe *sqe)
4303 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
4304 sqe->splice_fd_in)
4307 tmp = READ_ONCE(sqe->fd);
4313 p->bgid = READ_ONCE(sqe->buf_group);
4370 const struct io_uring_sqe *sqe)
4376 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
4379 tmp = READ_ONCE(sqe->fd);
4383 p->addr = READ_ONCE(sqe->addr);
4384 p->len = READ_ONCE(sqe->len);
4396 p->bgid = READ_ONCE(sqe->buf_group);
4397 tmp = READ_ONCE(sqe->off);
4462 const struct io_uring_sqe *sqe)
4465 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
4470 req->epoll.epfd = READ_ONCE(sqe->fd);
4471 req->epoll.op = READ_ONCE(sqe->len);
4472 req->epoll.fd = READ_ONCE(sqe->off);
4477 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4508 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4511 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
4516 req->madvise.addr = READ_ONCE(sqe->addr);
4517 req->madvise.len = READ_ONCE(sqe->len);
4518 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4544 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4546 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
4551 req->fadvise.offset = READ_ONCE(sqe->off);
4552 req->fadvise.len = READ_ONCE(sqe->len);
4553 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4580 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4584 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
4589 req->statx.dfd = READ_ONCE(sqe->fd);
4590 req->statx.mask = READ_ONCE(sqe->len);
4591 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4592 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4593 req->statx.flags = READ_ONCE(sqe->statx_flags);
4615 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4619 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4620 sqe->rw_flags || sqe->buf_index)
4625 req->close.fd = READ_ONCE(sqe->fd);
4626 req->close.file_slot = READ_ONCE(sqe->file_index);
4684 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4690 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4691 sqe->splice_fd_in))
4694 req->sync.off = READ_ONCE(sqe->off);
4695 req->sync.len = READ_ONCE(sqe->len);
4696 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
4774 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4780 if (unlikely(sqe->addr2 || sqe->file_index))
4782 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
4785 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4786 sr->len = READ_ONCE(sqe->len);
4787 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
5024 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5030 if (unlikely(sqe->addr2 || sqe->file_index))
5032 if (unlikely(sqe->addr2 || sqe->file_index || sqe->ioprio))
5035 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
5036 sr->len = READ_ONCE(sqe->len);
5037 sr->bgid = READ_ONCE(sqe->buf_group);
5038 sr->msg_flags = READ_ONCE(sqe->msg_flags);
5191 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5197 if (sqe->ioprio || sqe->len || sqe->buf_index)
5200 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5201 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5202 accept->flags = READ_ONCE(sqe->accept_flags);
5205 accept->file_slot = READ_ONCE(sqe->file_index);
5262 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5268 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
5269 sqe->splice_fd_in)
5272 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
5273 conn->addr_len = READ_ONCE(sqe->addr2);
5326 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5923 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
5928 events = READ_ONCE(sqe->poll32_events);
5938 const struct io_uring_sqe *sqe)
5945 if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
5947 flags = READ_ONCE(sqe->len);
5955 upd->old_user_data = READ_ONCE(sqe->addr);
5959 upd->new_user_data = READ_ONCE(sqe->off);
5963 upd->events = io_poll_parse_events(sqe, flags);
5964 else if (sqe->poll32_events)
5970 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5977 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr)
5979 flags = READ_ONCE(sqe->len);
5984 poll->events = io_poll_parse_events(sqe, flags);
6172 const struct io_uring_sqe *sqe)
6180 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
6184 tr->addr = READ_ONCE(sqe->addr);
6185 tr->flags = READ_ONCE(sqe->timeout_flags);
6193 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
6241 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6246 u32 off = READ_ONCE(sqe->off);
6250 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
6251 sqe->splice_fd_in)
6255 flags = READ_ONCE(sqe->timeout_flags);
6274 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
6304 * sqe->off holds how many events that need to occur for this
6407 const struct io_uring_sqe *sqe)
6413 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
6414 sqe->splice_fd_in)
6417 req->cancel.addr = READ_ONCE(sqe->addr);
6451 const struct io_uring_sqe *sqe)
6455 if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
6458 req->rsrc_update.offset = READ_ONCE(sqe->off);
6459 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
6462 req->rsrc_update.arg = READ_ONCE(sqe->addr);
6490 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6498 return io_read_prep(req, sqe);
6502 return io_write_prep(req, sqe);
6504 return io_poll_add_prep(req, sqe);
6506 return io_poll_update_prep(req, sqe);
6508 return io_fsync_prep(req, sqe);
6510 return io_sfr_prep(req, sqe);
6513 return io_sendmsg_prep(req, sqe);
6516 return io_recvmsg_prep(req, sqe);
6518 return io_connect_prep(req, sqe);
6520 return io_timeout_prep(req, sqe, false);
6522 return io_timeout_remove_prep(req, sqe);
6524 return io_async_cancel_prep(req, sqe);
6526 return io_timeout_prep(req, sqe, true);
6528 return io_accept_prep(req, sqe);
6530 return io_fallocate_prep(req, sqe);
6532 return io_openat_prep(req, sqe);
6534 return io_close_prep(req, sqe);
6536 return io_rsrc_update_prep(req, sqe);
6538 return io_statx_prep(req, sqe);
6540 return io_fadvise_prep(req, sqe);
6542 return io_madvise_prep(req, sqe);
6544 return io_openat2_prep(req, sqe);
6546 return io_epoll_ctl_prep(req, sqe);
6548 return io_splice_prep(req, sqe);
6550 return io_provide_buffers_prep(req, sqe);
6552 return io_remove_buffers_prep(req, sqe);
6554 return io_tee_prep(req, sqe);
6556 return io_shutdown_prep(req, sqe);
6558 return io_renameat_prep(req, sqe);
6560 return io_unlinkat_prep(req, sqe);
7164 const struct io_uring_sqe *sqe)
7172 req->opcode = READ_ONCE(sqe->opcode);
7174 req->flags = sqe_flags = READ_ONCE(sqe->flags);
7175 req->user_data = READ_ONCE(sqe->user_data);
7194 personality = READ_ONCE(sqe->personality);
7215 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
7227 const struct io_uring_sqe *sqe)
7233 ret = io_init_req(ctx, req, sqe);
7259 ret = io_req_prep(req, sqe);
7264 /* don't need @sqe from now on */
7272 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
7347 * Fetch an sqe, if one is available. Note this returns a pointer to memory
7350 * being a good citizen. If members of the sqe are validated and then later
7391 const struct io_uring_sqe *sqe;
7400 sqe = io_get_sqe(ctx);
7401 if (unlikely(!sqe)) {
7407 if (io_submit_sqe(ctx, req, sqe))