Lines Matching refs:ret

80 	int ret;
100 ret = ioprio_check_cap(ioprio);
101 if (ret)
102 return ret;
118 ret = io_iov_buffer_select_prep(req);
119 if (ret)
120 return ret;
133 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
135 switch (ret) {
147 ret = -EINTR;
150 kiocb->ki_complete(kiocb, ret);
336 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
340 unsigned final_ret = io_fixup_rw_res(req, ret);
342 if (ret >= 0 && req->flags & REQ_F_CUR_POS)
344 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
345 if (!__io_complete_rw_common(req, ret)) {
356 io_rw_done(&rw->kiocb, ret);
379 ssize_t ret;
382 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
383 if (ret)
384 return ERR_PTR(ret);
401 ret = import_ubuf(ddir, buf, sqe_len, iter);
402 if (ret)
403 return ERR_PTR(ret);
408 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
410 if (unlikely(ret < 0))
411 return ERR_PTR(ret);
440 ssize_t ret = 0;
478 if (!ret)
479 ret = nr;
482 ret += nr;
495 return ret;
550 int ret;
556 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
557 if (unlikely(ret < 0))
558 return ret;
675 int ret;
684 ret = kiocb_set_rw_flags(kiocb, rw->flags);
685 if (unlikely(ret))
686 return ret;
723 ssize_t ret, ret2;
727 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
728 if (unlikely(ret < 0))
729 return ret;
739 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
740 if (unlikely(ret < 0))
741 return ret;
752 ret = io_rw_init_file(req, FMODE_READ);
753 if (unlikely(ret)) {
755 return ret;
762 ret = io_setup_async_rw(req, iovec, s, true);
763 return ret ?: -EAGAIN;
773 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
774 if (unlikely(ret)) {
776 return ret;
779 ret = io_iter_do_read(rw, &s->iter);
781 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
792 ret = 0;
793 } else if (ret == -EIOCBQUEUED) {
797 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
813 ret = ret > 0 ? ret : ret2;
830 iov_iter_advance(&s->iter, ret);
833 io->bytes_done += ret;
849 ret = io_iter_do_read(rw, &s->iter);
850 if (ret == -EIOCBQUEUED)
855 } while (ret > 0);
860 return kiocb_done(req, ret, issue_flags);
870 ssize_t ret, ret2;
874 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
875 if (unlikely(ret < 0))
876 return ret;
884 ret = io_rw_init_file(req, FMODE_WRITE);
885 if (unlikely(ret)) {
887 return ret;
910 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
911 if (unlikely(ret)) {
913 return ret;
958 ret = io_setup_async_rw(req, iovec, s, true);
966 return ret ? ret : -EAGAIN;
969 ret = kiocb_done(req, ret2, issue_flags);
973 ret = io_setup_async_rw(req, iovec, s, false);
974 if (!ret) {
979 return ret;
984 return ret;
1012 int ret;
1026 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1031 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1033 if (unlikely(ret < 0))
1034 return ret;
1035 else if (ret)