Home
last modified time | relevance | path

Searched refs:sqe (Results 1 - 25 of 115) sorted by relevance

12345

/kernel/linux/linux-5.10/tools/io_uring/
H A Dliburing.h98 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) in io_uring_sqe_set_data() argument
100 sqe->user_data = (unsigned long) data; in io_uring_sqe_set_data()
108 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, in io_uring_prep_rw() argument
112 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_rw()
113 sqe->opcode = op; in io_uring_prep_rw()
114 sqe->fd = fd; in io_uring_prep_rw()
115 sqe->off = offset; in io_uring_prep_rw()
116 sqe->addr = (unsigned long) addr; in io_uring_prep_rw()
117 sqe in io_uring_prep_rw()
120 io_uring_prep_readv(struct io_uring_sqe *sqe, int fd, const struct iovec *iovecs, unsigned nr_vecs, off_t offset) io_uring_prep_readv() argument
127 io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd, void *buf, unsigned nbytes, off_t offset) io_uring_prep_read_fixed() argument
134 io_uring_prep_writev(struct io_uring_sqe *sqe, int fd, const struct iovec *iovecs, unsigned nr_vecs, off_t offset) io_uring_prep_writev() argument
141 io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd, const void *buf, unsigned nbytes, off_t offset) io_uring_prep_write_fixed() argument
148 io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, unsigned poll_mask) io_uring_prep_poll_add() argument
160 io_uring_prep_poll_remove(struct io_uring_sqe *sqe, void *user_data) io_uring_prep_poll_remove() argument
168 io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd, unsigned fsync_flags) io_uring_prep_fsync() argument
177 io_uring_prep_nop(struct io_uring_sqe *sqe) io_uring_prep_nop() argument
[all...]
H A Dio_uring-cp.c71 struct io_uring_sqe *sqe; in queue_prepped() local
73 sqe = io_uring_get_sqe(ring); in queue_prepped()
74 assert(sqe); in queue_prepped()
77 io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset); in queue_prepped()
79 io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset); in queue_prepped()
81 io_uring_sqe_set_data(sqe, data); in queue_prepped()
86 struct io_uring_sqe *sqe; in queue_read() local
93 sqe = io_uring_get_sqe(ring); in queue_read()
94 if (!sqe) { in queue_read()
106 io_uring_prep_readv(sqe, inf in queue_read()
[all...]
H A Dio_uring-bench.c145 struct io_uring_sqe *sqe = &s->sqes[index]; in init_io() local
151 sqe->opcode = IORING_OP_NOP; in init_io()
172 sqe->flags = IOSQE_FIXED_FILE; in init_io()
173 sqe->fd = f->fixed_fd; in init_io()
175 sqe->flags = 0; in init_io()
176 sqe->fd = f->real_fd; in init_io()
179 sqe->opcode = IORING_OP_READ_FIXED; in init_io()
180 sqe->addr = (unsigned long) s->iovecs[index].iov_base; in init_io()
181 sqe->len = BS; in init_io()
182 sqe in init_io()
[all...]
/kernel/linux/linux-6.6/io_uring/
H A Dfs.c50 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_renameat_prep() argument
55 if (sqe->buf_index || sqe->splice_fd_in) in io_renameat_prep()
60 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep()
61 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_renameat_prep()
62 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_renameat_prep()
63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep()
64 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep()
104 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_unlinkat_prep() argument
109 if (sqe in io_unlinkat_prep()
154 io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_mkdirat_prep() argument
198 io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_symlinkat_prep() argument
241 io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_linkat_prep() argument
[all...]
H A Dsync.c25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
29 if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) in io_sfr_prep()
32 sync->off = READ_ONCE(sqe->off); in io_sfr_prep()
33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep()
34 sync->flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
53 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
57 if (unlikely(sqe->addr || sqe in io_fsync_prep()
85 io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_fallocate_prep() argument
[all...]
H A Dadvise.c31 int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
36 if (sqe->buf_index || sqe->off || sqe->splice_fd_in) in io_madvise_prep()
39 ma->addr = READ_ONCE(sqe->addr); in io_madvise_prep()
40 ma->len = READ_ONCE(sqe->len); in io_madvise_prep()
41 ma->advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
77 int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
81 if (sqe->buf_index || sqe in io_fadvise_prep()
[all...]
H A Dopenclose.c45 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
51 if (unlikely(sqe->buf_index)) in __io_openat_prep()
60 open->dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
61 fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_openat_prep()
69 open->file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
80 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
83 u64 mode = READ_ONCE(sqe->len); in io_openat_prep()
84 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep()
87 return __io_openat_prep(req, sqe); in io_openat_prep()
90 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
202 io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_close_prep() argument
[all...]
H A Dxattr.c45 const struct io_uring_sqe *sqe) in __io_getxattr_prep()
56 name = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_getxattr_prep()
57 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in __io_getxattr_prep()
58 ix->ctx.size = READ_ONCE(sqe->len); in __io_getxattr_prep()
59 ix->ctx.flags = READ_ONCE(sqe->xattr_flags); in __io_getxattr_prep()
82 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fgetxattr_prep() argument
84 return __io_getxattr_prep(req, sqe); in io_fgetxattr_prep()
87 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_getxattr_prep() argument
93 ret = __io_getxattr_prep(req, sqe); in io_getxattr_prep()
97 path = u64_to_user_ptr(READ_ONCE(sqe in io_getxattr_prep()
44 __io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) __io_getxattr_prep() argument
148 __io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) __io_setxattr_prep() argument
180 io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_setxattr_prep() argument
201 io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_fsetxattr_prep() argument
[all...]
H A Dsplice.c27 const struct io_uring_sqe *sqe) in __io_splice_prep()
32 sp->len = READ_ONCE(sqe->len); in __io_splice_prep()
33 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep()
36 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); in __io_splice_prep()
41 int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_tee_prep() argument
43 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) in io_tee_prep()
45 return __io_splice_prep(req, sqe); in io_tee_prep()
79 int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
83 sp->off_in = READ_ONCE(sqe in io_splice_prep()
26 __io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) __io_splice_prep() argument
[all...]
H A Dstatx.c23 int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
28 if (sqe->buf_index || sqe->splice_fd_in) in io_statx_prep()
33 sx->dfd = READ_ONCE(sqe->fd); in io_statx_prep()
34 sx->mask = READ_ONCE(sqe->len); in io_statx_prep()
35 path = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
36 sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
37 sx->flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
H A Depoll.c24 int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_epoll_ctl_prep() argument
28 if (sqe->buf_index || sqe->splice_fd_in) in io_epoll_ctl_prep()
31 epoll->epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
32 epoll->op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
33 epoll->fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
38 ev = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_epoll_ctl_prep()
H A Dnet.h34 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
39 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
46 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
55 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
59 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
64 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
H A During_cmd.c81 memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx)); in io_uring_cmd_prep_async()
82 ioucmd->sqe = req->async_data; in io_uring_cmd_prep_async()
86 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_uring_cmd_prep() argument
90 if (sqe->__pad1) in io_uring_cmd_prep()
93 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags); in io_uring_cmd_prep()
101 req->buf_index = READ_ONCE(sqe->buf_index); in io_uring_cmd_prep()
108 ioucmd->sqe = sqe; in io_uring_cmd_prep()
109 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); in io_uring_cmd_prep()
178 switch (cmd->sqe in io_uring_cmd_sock()
[all...]
H A Dnet.c94 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() argument
98 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags || in io_shutdown_prep()
99 sqe->buf_index || sqe->splice_fd_in)) in io_shutdown_prep()
102 shutdown->how = READ_ONCE(sqe->len); in io_shutdown_prep()
371 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
376 if (READ_ONCE(sqe->__pad3[0])) in io_sendmsg_prep()
378 sr->addr = u64_to_user_ptr(READ_ONCE(sqe in io_sendmsg_prep()
615 io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_recvmsg_prep() argument
1035 io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_send_zc_prep() argument
1347 io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_accept_prep() argument
1439 io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_socket_prep() argument
1501 io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_connect_prep() argument
[all...]
H A Dfdinfo.c76 * we may get imprecise sqe and cqe info if uring is actively running in io_uring_show_fdinfo()
93 struct io_uring_sqe *sqe; in io_uring_show_fdinfo() local
101 sqe = &ctx->sq_sqes[sq_idx << sq_shift]; in io_uring_show_fdinfo()
105 sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd, in io_uring_show_fdinfo()
106 sqe->flags, (unsigned long long) sqe->off, in io_uring_show_fdinfo()
107 (unsigned long long) sqe->addr, sqe->rw_flags, in io_uring_show_fdinfo()
108 sqe in io_uring_show_fdinfo()
[all...]
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/
H A Dsiw_qp.c275 wqe->sqe.flags = 0; in siw_qp_mpa_rts()
276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts()
277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts()
278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts()
279 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts()
284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts()
285 wqe->sqe.raddr = 0; in siw_qp_mpa_rts()
289 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts()
293 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts()
300 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts()
869 siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) siw_read_to_orq() argument
883 struct siw_sqe *sqe; siw_activate_tx_from_sq() local
1060 siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, enum siw_wc_status status) siw_sqe_complete() argument
1190 struct siw_sqe *sqe; siw_sq_flush() local
[all...]
H A Dsiw_qp_tx.c43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg()
46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg()
53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg()
137 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx()
139 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx()
140 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
141 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx()
142 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx()
185 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
197 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe in siw_qp_prepare_tx()
929 siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) siw_fastreg_mr() argument
[all...]
H A Dsiw_verbs.c640 struct siw_sqe *sqe) in siw_copy_inline_sgl()
643 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl()
646 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl()
647 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl()
665 sqe->sge[0].length = bytes > 0 ? bytes : 0; in siw_copy_inline_sgl()
666 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl()
678 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local
682 sqe.opcode = SIW_OP_WRITE; in siw_sq_flush_wr()
685 sqe.opcode = SIW_OP_READ; in siw_sq_flush_wr()
688 sqe in siw_sq_flush_wr()
639 siw_copy_inline_sgl(const struct ib_send_wr *core_wr, struct siw_sqe *sqe) siw_copy_inline_sgl() argument
815 struct siw_sqe *sqe = &qp->sendq[idx]; siw_post_send() local
[all...]
/kernel/linux/linux-6.6/drivers/infiniband/sw/siw/
H A Dsiw_qp.c278 wqe->sqe.flags = 0; in siw_qp_mpa_rts()
279 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts()
280 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts()
281 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts()
282 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts()
287 wqe->sqe.rkey = 1; in siw_qp_mpa_rts()
288 wqe->sqe.raddr = 0; in siw_qp_mpa_rts()
292 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts()
296 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts()
303 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts()
872 siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) siw_read_to_orq() argument
886 struct siw_sqe *sqe; siw_activate_tx_from_sq() local
1063 siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, enum siw_wc_status status) siw_sqe_complete() argument
1193 struct siw_sqe *sqe; siw_sq_flush() local
[all...]
H A Dsiw_qp_tx.c43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg()
46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg()
53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg()
136 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx()
138 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx()
139 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx()
140 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx()
141 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); in siw_qp_prepare_tx()
184 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx()
196 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe in siw_qp_prepare_tx()
938 siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) siw_fastreg_mr() argument
[all...]
H A Dsiw_verbs.c644 struct siw_sqe *sqe) in siw_copy_inline_sgl()
647 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl()
650 sqe->sge[0].laddr = (uintptr_t)kbuf; in siw_copy_inline_sgl()
651 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl()
669 sqe->sge[0].length = max(bytes, 0); in siw_copy_inline_sgl()
670 sqe->num_sge = bytes > 0 ? 1 : 0; in siw_copy_inline_sgl()
682 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local
686 sqe.opcode = SIW_OP_WRITE; in siw_sq_flush_wr()
689 sqe.opcode = SIW_OP_READ; in siw_sq_flush_wr()
692 sqe in siw_sq_flush_wr()
643 siw_copy_inline_sgl(const struct ib_send_wr *core_wr, struct siw_sqe *sqe) siw_copy_inline_sgl() argument
819 struct siw_sqe *sqe = &qp->sendq[idx]; siw_post_send() local
[all...]
/kernel/linux/linux-6.6/drivers/crypto/hisilicon/zip/
H A Dzip_crypto.c101 void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
102 void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
103 void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
104 void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
105 void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
106 void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
107 u32 (*get_tag)(struct hisi_zip_sqe *sqe);
108 u32 (*get_status)(struct hisi_zip_sqe *sqe);
109 u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
263 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struc argument
271 hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) hisi_zip_fill_buf_size() argument
281 hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) hisi_zip_fill_buf_type() argument
290 hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type) hisi_zip_fill_req_type() argument
299 hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) hisi_zip_fill_tag_v1() argument
304 hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) hisi_zip_fill_tag_v2() argument
309 hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type) hisi_zip_fill_sqe_type() argument
318 hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe, u8 req_type, struct hisi_zip_req *req) hisi_zip_fill_sqe() argument
386 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe) hisi_zip_get_tag_v1() argument
391 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe) hisi_zip_get_tag_v2() argument
396 hisi_zip_get_status(struct hisi_zip_sqe *sqe) hisi_zip_get_status() argument
401 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe) hisi_zip_get_dstlen() argument
413 struct hisi_zip_sqe *sqe = data; hisi_zip_acomp_cb() local
[all...]
/kernel/linux/linux-6.6/tools/testing/selftests/net/
H A Dio_uring_zerocopy_tx.c271 static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd, in io_uring_prep_send() argument
274 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_send()
275 sqe->opcode = (__u8) IORING_OP_SEND; in io_uring_prep_send()
276 sqe->fd = sockfd; in io_uring_prep_send()
277 sqe->addr = (unsigned long) buf; in io_uring_prep_send()
278 sqe->len = len; in io_uring_prep_send()
279 sqe->msg_flags = (__u32) flags; in io_uring_prep_send()
282 static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd, in io_uring_prep_sendzc() argument
286 io_uring_prep_send(sqe, sockf in io_uring_prep_sendzc()
360 struct io_uring_sqe *sqe; do_tx() local
[all...]
/kernel/linux/linux-5.10/drivers/crypto/hisilicon/zip/
H A Dzip_crypto.c122 static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) in hisi_zip_config_buf_type() argument
126 val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; in hisi_zip_config_buf_type()
128 sqe->dw9 = val; in hisi_zip_config_buf_type()
131 static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) in hisi_zip_config_tag() argument
133 sqe->tag = tag; in hisi_zip_config_tag()
136 static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, in hisi_zip_fill_sqe() argument
140 memset(sqe, 0, sizeof(struct hisi_zip_sqe)); in hisi_zip_fill_sqe()
142 sqe->input_data_length = slen - sskip; in hisi_zip_fill_sqe()
143 sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip); in hisi_zip_fill_sqe()
144 sqe in hisi_zip_fill_sqe()
352 struct hisi_zip_sqe *sqe = data; hisi_zip_acomp_cb() local
[all...]
/kernel/linux/linux-5.10/io_uring/
H A Dio_uring.c2959 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
2971 kiocb->ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
2974 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); in io_prep_rw()
2987 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
3012 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
3027 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
3028 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
3503 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3507 return io_prep_rw(req, sqe, READ); in io_read_prep()
3729 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3841 io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_renameat_prep() argument
3892 io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_unlinkat_prep() argument
3941 io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_shutdown_prep() argument
3981 __io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) __io_splice_prep() argument
3998 io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_tee_prep() argument
4036 io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_splice_prep() argument
4093 io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_fsync_prep() argument
4130 io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_fallocate_prep() argument
4162 __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) __io_openat_prep() argument
4196 io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_openat_prep() argument
4205 io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_openat2_prep() argument
4297 io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_remove_buffers_prep() argument
4369 io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_provide_buffers_prep() argument
4461 io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_epoll_ctl_prep() argument
4508 io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_madvise_prep() argument
4544 io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_fadvise_prep() argument
4580 io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_statx_prep() argument
4615 io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_close_prep() argument
4684 io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_sfr_prep() argument
4774 io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_sendmsg_prep() argument
5024 io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_recvmsg_prep() argument
5191 io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_accept_prep() argument
5262 io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_connect_prep() argument
5923 io_poll_parse_events(const struct io_uring_sqe *sqe, unsigned int flags) io_poll_parse_events() argument
5937 io_poll_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_poll_update_prep() argument
5970 io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_poll_add_prep() argument
6171 io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_timeout_remove_prep() argument
6241 io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, bool is_timeout_link) io_timeout_prep() argument
6406 io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_async_cancel_prep() argument
6450 io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_rsrc_update_prep() argument
6490 io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) io_req_prep() argument
7391 const struct io_uring_sqe *sqe; global() variable
[all...]

Completed in 24 milliseconds

12345