Lines Matching defs:iocb
172 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
193 * NOTE! Each of the iocb union members has the file pointer
216 * If the aio_resfd field of the userspace iocb is not zero,
566 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
576 if (!(iocb->ki_flags & IOCB_AIO_RW))
579 req = container_of(iocb, struct aio_kiocb, rw);
1091 static inline void iocb_destroy(struct aio_kiocb *iocb)
1093 if (iocb->ki_eventfd)
1094 eventfd_ctx_put(iocb->ki_eventfd);
1095 if (iocb->ki_filp)
1096 fput(iocb->ki_filp);
1097 percpu_ref_put(&iocb->ki_ctx->reqs);
1098 kmem_cache_free(kiocb_cachep, iocb);
1102 * Called when the io request on the given iocb is complete.
1104 static void aio_complete(struct aio_kiocb *iocb)
1106 struct kioctx *ctx = iocb->ki_ctx;
1128 *event = iocb->ki_res;
1133 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1134 (void __user *)(unsigned long)iocb->ki_res.obj,
1135 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1155 pr_debug("added to ring %p at [%u]\n", iocb, tail);
1162 if (iocb->ki_eventfd)
1163 eventfd_signal(iocb->ki_eventfd, 1);
1177 static inline void iocb_put(struct aio_kiocb *iocb)
1179 if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1180 aio_complete(iocb);
1181 iocb_destroy(iocb);
1427 static void aio_remove_iocb(struct aio_kiocb *iocb)
1429 struct kioctx *ctx = iocb->ki_ctx;
1433 list_del(&iocb->ki_list);
1439 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1441 if (!list_empty_careful(&iocb->ki_list))
1442 aio_remove_iocb(iocb);
1456 iocb->ki_res.res = res;
1457 iocb->ki_res.res2 = res2;
1458 iocb_put(iocb);
1461 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1467 req->ki_pos = iocb->aio_offset;
1469 if (iocb->aio_flags & IOCB_FLAG_RESFD)
1472 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1478 ret = ioprio_check_cap(iocb->aio_reqprio);
1484 req->ki_ioprio = iocb->aio_reqprio;
1488 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1496 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1500 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1501 size_t len = iocb->aio_nbytes;
1532 static int aio_read(struct kiocb *req, const struct iocb *iocb,
1540 ret = aio_prep_rw(req, iocb);
1550 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1560 static int aio_write(struct kiocb *req, const struct iocb *iocb,
1568 ret = aio_prep_rw(req, iocb);
1578 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1603 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1604 const struct cred *old_cred = override_creds(iocb->fsync.creds);
1606 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1608 put_cred(iocb->fsync.creds);
1609 iocb_put(iocb);
1612 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1615 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1616 iocb->aio_rw_flags))
1635 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1637 iocb_put(iocb);
1688 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1690 struct kioctx *ctx = iocb->ki_ctx;
1697 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1723 list_del_init(&iocb->ki_list);
1724 iocb->ki_res.res = mangle_poll(mask);
1727 iocb_put(iocb);
1731 static int aio_poll_cancel(struct kiocb *iocb)
1733 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1752 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1774 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1775 struct kioctx *ctx = iocb->ki_ctx;
1778 list_del(&iocb->ki_list);
1779 iocb->ki_res.res = mangle_poll(mask);
1780 if (iocb->ki_eventfd && eventfd_signal_count()) {
1781 iocb = NULL;
1786 if (iocb)
1787 iocb_put(iocb);
1832 struct aio_kiocb *iocb;
1851 pt->iocb->poll.head = head;
1852 add_wait_queue(head, &pt->iocb->poll.wait);
1855 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1864 if ((u16)iocb->aio_buf != iocb->aio_buf)
1867 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1871 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1880 apt.iocb = aiocb;
1930 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1931 struct iocb __user *user_iocb, struct aio_kiocb *req,
1934 req->ki_filp = fget(iocb->aio_fildes);
1938 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1946 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1959 req->ki_res.data = iocb->aio_data;
1963 switch (iocb->aio_lio_opcode) {
1965 return aio_read(&req->rw, iocb, false, compat);
1967 return aio_write(&req->rw, iocb, false, compat);
1969 return aio_read(&req->rw, iocb, true, compat);
1971 return aio_write(&req->rw, iocb, true, compat);
1973 return aio_fsync(&req->fsync, iocb, false);
1975 return aio_fsync(&req->fsync, iocb, true);
1977 return aio_poll(req, iocb);
1979 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1984 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1988 struct iocb iocb;
1991 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
1995 if (unlikely(iocb.aio_reserved2)) {
2002 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2003 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2004 ((ssize_t)iocb.aio_nbytes < 0)
2014 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2034 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
2036 * is invalid for the file descriptor in the iocb. May fail with
2039 * iocb is invalid. May fail with -EAGAIN if insufficient resources
2044 struct iocb __user * __user *, iocbpp)
2066 struct iocb __user *user_iocb;
2128 * Attempts to cancel an iocb previously passed to io_submit. If
2134 * invalid. May fail with -EAGAIN if the iocb specified was not
2137 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2144 u64 obj = (u64)(unsigned long)iocb;
2146 if (unlikely(get_user(key, &iocb->aio_key)))