Lines Matching defs:ret

1721 	bool ret = true;
1727 ret = __io_cqring_overflow_flush(ctx, false);
1732 return ret;
1987 int ret, i;
1994 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
2001 if (unlikely(ret <= 0)) {
2005 ret = 1;
2008 for (i = 0; i < ret; i++)
2010 state->free_reqs = ret;
2302 static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2304 req->result = ret;
2561 int ret;
2575 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2576 if (unlikely(ret < 0))
2577 return ret;
2578 else if (ret)
2627 int ret = 0;
2667 ret = io_do_iopoll(ctx, &nr_events, min);
2670 ret = -EINTR;
2673 } while (!ret && nr_events < min && !need_resched());
2676 return ret;
2966 int ret;
2974 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2975 if (unlikely(ret))
2976 return ret;
2989 ret = ioprio_check_cap(ioprio);
2990 if (ret)
2991 return ret;
3032 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
3034 switch (ret) {
3046 ret = -EINTR;
3049 kiocb->ki_complete(kiocb, ret, 0);
3070 static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
3077 if (ret >= 0 && (kiocb->ki_complete == io_complete_rw)) {
3078 if (!__io_complete_rw_common(req, ret)) {
3085 io_fixup_rw_res(req, ret),
3089 io_rw_done(kiocb, ret);
3100 ret = io_fixup_rw_res(req, ret);
3104 __io_req_complete(req, issue_flags, ret, cflags);
3107 __io_req_complete(req, issue_flags, ret, cflags);
3322 ssize_t ret;
3341 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3343 return ret;
3347 ret = io_iov_buffer_select(req, *iovec, needs_lock);
3348 if (!ret)
3351 return ret;
3371 ssize_t ret = 0;
3406 if (!ret)
3407 ret = nr;
3410 ret += nr;
3423 return ret;
3486 int ret;
3491 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
3492 if (unlikely(ret < 0))
3493 return ret;
3605 ssize_t ret, ret2;
3619 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3620 if (ret < 0)
3621 return ret;
3635 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3636 return ret ?: -EAGAIN;
3641 ret = rw_verify_area(READ, req->file, ppos, req->result);
3642 if (unlikely(ret)) {
3644 return ret;
3647 ret = io_iter_do_read(req, iter);
3649 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
3657 ret = 0;
3658 } else if (ret == -EIOCBQUEUED) {
3660 } else if (ret <= 0 || ret == req->result || !force_nonblock ||
3694 iov_iter_advance(iter, ret);
3697 rw->bytes_done += ret;
3713 ret = io_iter_do_read(req, iter);
3714 if (ret == -EIOCBQUEUED)
3719 } while (ret > 0);
3721 kiocb_done(kiocb, ret, issue_flags);
3744 ssize_t ret, ret2;
3753 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3754 if (ret < 0)
3755 return ret;
3778 ret = rw_verify_area(WRITE, req->file, ppos, req->result);
3779 if (unlikely(ret))
3826 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3827 if (!ret) {
3832 return ret;
3838 return ret;
3877 int ret;
3882 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3886 if (ret < 0)
3888 io_req_complete(req, ret);
3924 int ret;
3930 ret = do_rmdir(un->dfd, un->filename);
3932 ret = do_unlinkat(un->dfd, un->filename);
3935 if (ret < 0)
3937 io_req_complete(req, ret);
3962 int ret;
3967 sock = sock_from_file(req->file, &ret);
3969 return ret;
3971 ret = __sys_shutdown_sock(sock, req->shutdown.how);
3972 if (ret < 0)
3974 io_req_complete(req, ret);
4012 long ret = 0;
4020 ret = -EBADF;
4025 ret = do_tee(in, out, sp->len, flags);
4030 if (ret != sp->len)
4032 io_req_complete(req, ret);
4052 long ret = 0;
4060 ret = -EBADF;
4068 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
4073 if (ret != sp->len)
4075 io_req_complete(req, ret);
4115 int ret;
4121 ret = vfs_fsync_range(req->file, req->sync.off,
4124 if (ret < 0)
4126 io_req_complete(req, ret);
4147 int ret;
4152 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
4154 if (ret < 0)
4158 io_req_complete(req, ret);
4165 int ret;
4182 ret = PTR_ERR(req->open.filename);
4184 return ret;
4209 int ret;
4216 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
4218 if (ret)
4219 return ret;
4230 int ret;
4232 ret = build_open_flags(&req->open.how, &op);
4233 if (ret)
4251 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4252 if (ret < 0)
4264 put_unused_fd(ret);
4266 ret = PTR_ERR(file);
4268 if (ret == -EAGAIN &&
4279 fd_install(ret, file);
4281 ret = io_install_fixed_file(req, file, issue_flags,
4286 if (ret < 0)
4288 __io_req_complete(req, issue_flags, ret, 0);
4349 int ret = 0;
4356 ret = -ENOENT;
4359 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
4360 if (ret < 0)
4364 __io_req_complete(req, issue_flags, ret, 0);
4437 int ret = 0;
4446 ret = io_add_buffers(p, &head);
4447 if (ret >= 0 && !list) {
4448 ret = xa_insert(&ctx->io_buffers, p->bgid, head,
4450 if (ret < 0)
4453 if (ret < 0)
4456 __io_req_complete(req, issue_flags, ret, 0);
4492 int ret;
4495 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4496 if (force_nonblock && ret == -EAGAIN)
4499 if (ret < 0)
4501 __io_req_complete(req, issue_flags, ret, 0);
4529 int ret;
4534 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
4535 if (ret < 0)
4537 io_req_complete(req, ret);
4560 int ret;
4573 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4574 if (ret < 0)
4576 __io_req_complete(req, issue_flags, ret, 0);
4601 int ret;
4606 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4609 if (ret < 0)
4611 io_req_complete(req, ret);
4639 int ret = -EBADF;
4642 ret = io_close_fixed(req, issue_flags);
4665 ret = __close_fd_get_file(close->fd, &file);
4667 if (ret < 0) {
4668 if (ret == -ENOENT)
4669 ret = -EBADF;
4674 ret = filp_close(file, current->files);
4676 if (ret < 0)
4680 __io_req_complete(req, issue_flags, ret, 0);
4702 int ret;
4708 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4710 if (ret < 0)
4712 io_req_complete(req, ret);
4753 int ret;
4757 ret = sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4761 return ret;
4766 int ret;
4768 ret = io_sendmsg_copy_hdr(req, req->async_data);
4769 if (!ret)
4771 return ret;
4806 int ret;
4808 sock = sock_from_file(req->file, &ret);
4810 return ret;
4814 ret = io_sendmsg_copy_hdr(req, &iomsg);
4815 if (ret)
4816 return ret;
4828 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4830 if (ret < min_ret) {
4831 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
4833 if (ret == -ERESTARTSYS)
4834 ret = -EINTR;
4835 if (ret > 0 && io_net_retry(sock, flags)) {
4836 sr->done_io += ret;
4846 if (ret >= 0)
4847 ret += sr->done_io;
4849 ret = sr->done_io;
4850 __io_req_complete(req, issue_flags, ret, 0);
4862 int ret;
4864 sock = sock_from_file(req->file, &ret);
4866 return ret;
4868 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4869 if (unlikely(ret))
4870 return ret;
4884 ret = sock_sendmsg(sock, &msg);
4885 if (ret < min_ret) {
4886 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
4888 if (ret == -ERESTARTSYS)
4889 ret = -EINTR;
4890 if (ret > 0 && io_net_retry(sock, flags)) {
4891 sr->len -= ret;
4892 sr->buf += ret;
4893 sr->done_io += ret;
4899 if (ret >= 0)
4900 ret += sr->done_io;
4902 ret = sr->done_io;
4903 __io_req_complete(req, issue_flags, ret, 0);
4913 int ret;
4915 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4917 if (ret)
4918 return ret;
4929 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4932 if (ret > 0)
4933 ret = 0;
4936 return ret;
4947 int ret;
4949 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
4951 if (ret)
4952 return ret;
4970 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4973 if (ret < 0)
4974 return ret;
5016 int ret;
5018 ret = io_recvmsg_copy_hdr(req, req->async_data);
5019 if (!ret)
5021 return ret;
5058 int ret, cflags = 0;
5061 sock = sock_from_file(req->file, &ret);
5063 return ret;
5067 ret = io_recvmsg_copy_hdr(req, &iomsg);
5068 if (ret)
5069 return ret;
5089 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
5091 if (ret < min_ret) {
5092 if (ret == -EAGAIN && force_nonblock)
5094 if (ret == -ERESTARTSYS)
5095 ret = -EINTR;
5096 if (ret > 0 && io_net_retry(sock, flags)) {
5099 sr->done_io += ret;
5114 if (ret >= 0)
5115 ret += sr->done_io;
5117 ret = sr->done_io;
5118 __io_req_complete(req, issue_flags, ret, cflags);
5132 int ret, cflags = 0;
5135 sock = sock_from_file(req->file, &ret);
5137 return ret;
5146 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
5147 if (unlikely(ret))
5163 ret = sock_recvmsg(sock, &msg, flags);
5164 if (ret < min_ret) {
5165 if (ret == -EAGAIN && force_nonblock)
5167 if (ret == -ERESTARTSYS)
5168 ret = -EINTR;
5169 if (ret > 0 && io_net_retry(sock, flags)) {
5170 sr->len -= ret;
5171 sr->buf += ret;
5172 sr->done_io += ret;
5183 if (ret >= 0)
5184 ret += sr->done_io;
5186 ret = sr->done_io;
5187 __io_req_complete(req, issue_flags, ret, cflags);
5222 int ret, fd;
5235 ret = PTR_ERR(file);
5238 if (ret == -EAGAIN && force_nonblock)
5240 if (ret == -ERESTARTSYS)
5241 ret = -EINTR;
5245 ret = fd;
5247 ret = io_install_fixed_file(req, file, issue_flags,
5250 __io_req_complete(req, issue_flags, ret, 0);
5281 int ret;
5287 ret = move_addr_to_kernel(req->connect.addr,
5290 if (ret)
5297 ret = __sys_connect_file(req->file, &io->address,
5299 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
5303 ret = -ENOMEM;
5309 if (ret == -ERESTARTSYS)
5310 ret = -EINTR;
5312 if (ret < 0)
5314 __io_req_complete(req, issue_flags, ret, 0);
5553 int ret;
5555 ret = io_poll_check_events(req);
5556 if (ret > 0)
5559 if (!ret) {
5562 req->result = ret;
5576 int ret;
5578 ret = io_poll_check_events(req);
5579 if (ret > 0)
5588 if (!ret)
5591 io_req_complete_failed(req, ret);
5810 int ret;
5846 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
5847 if (ret || ipt.error)
5848 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
5992 int ret;
5996 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
5997 if (!ret && ipt.error)
5999 ret = ret ?: ipt.error;
6000 if (ret)
6001 __io_req_complete(req, issue_flags, ret, 0);
6009 int ret2, ret = 0;
6017 ret = preq ? -EALREADY : -ENOENT;
6040 if (ret < 0)
6043 io_req_complete(req, ret);
6216 int ret;
6221 ret = io_timeout_cancel(ctx, tr->addr);
6229 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
6231 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
6235 if (ret < 0)
6237 io_req_complete_post(req, ret, 0);
6362 int ret = 0;
6370 ret = 0;
6373 ret = -EALREADY;
6376 ret = -ENOENT;
6380 return ret;
6386 int ret;
6390 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
6391 if (ret != -ENOENT)
6392 return ret;
6396 ret = io_timeout_cancel(ctx, sqe_addr);
6398 if (ret != -ENOENT)
6400 ret = io_poll_cancel(ctx, sqe_addr, false);
6403 return ret;
6426 int ret;
6428 ret = io_try_cancel_userdata(req, sqe_addr);
6429 if (ret != -ENOENT)
6434 ret = -ENOENT;
6438 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
6439 if (ret != -ENOENT)
6444 if (ret < 0)
6446 io_req_complete_post(req, ret, 0);
6470 int ret;
6480 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
6484 if (ret < 0)
6486 __io_req_complete(req, issue_flags, ret, 0);
6609 int ret;
6651 ret = io_req_prep_async(req);
6652 if (ret)
6657 ret = -ENOMEM;
6659 io_req_complete_failed(req, ret);
6749 int ret;
6756 ret = io_nop(req, issue_flags);
6761 ret = io_read(req, issue_flags);
6766 ret = io_write(req, issue_flags);
6769 ret = io_fsync(req, issue_flags);
6772 ret = io_poll_add(req, issue_flags);
6775 ret = io_poll_update(req, issue_flags);
6778 ret = io_sync_file_range(req, issue_flags);
6781 ret = io_sendmsg(req, issue_flags);
6784 ret = io_send(req, issue_flags);
6787 ret = io_recvmsg(req, issue_flags);
6790 ret = io_recv(req, issue_flags);
6793 ret = io_timeout(req, issue_flags);
6796 ret = io_timeout_remove(req, issue_flags);
6799 ret = io_accept(req, issue_flags);
6802 ret = io_connect(req, issue_flags);
6805 ret = io_async_cancel(req, issue_flags);
6808 ret = io_fallocate(req, issue_flags);
6811 ret = io_openat(req, issue_flags);
6814 ret = io_close(req, issue_flags);
6817 ret = io_files_update(req, issue_flags);
6820 ret = io_statx(req, issue_flags);
6823 ret = io_fadvise(req, issue_flags);
6826 ret = io_madvise(req, issue_flags);
6829 ret = io_openat2(req, issue_flags);
6832 ret = io_epoll_ctl(req, issue_flags);
6835 ret = io_splice(req, issue_flags);
6838 ret = io_provide_buffers(req, issue_flags);
6841 ret = io_remove_buffers(req, issue_flags);
6844 ret = io_tee(req, issue_flags);
6847 ret = io_shutdown(req, issue_flags);
6850 ret = io_renameat(req, issue_flags);
6853 ret = io_unlinkat(req, issue_flags);
6856 ret = -EINVAL;
6862 if (ret)
6863 return ret;
6883 int ret = 0;
6896 ret = -ECANCELED;
6898 if (!ret) {
6900 ret = io_issue_sqe(req, 0);
6906 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL))
6922 if (ret)
6923 io_req_task_queue_fail(req, ret);
7002 int ret = -ENOENT;
7006 ret = io_try_cancel_userdata(req, prev->user_data);
7007 io_req_complete_post(req, ret ?: -ETIME, 0);
7070 int ret;
7073 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
7079 if (likely(!ret)) {
7093 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
7113 io_req_complete_failed(req, ret);
7128 int ret = io_req_prep_async(req);
7130 if (unlikely(ret))
7131 io_req_complete_failed(req, ret);
7169 int personality, ret = 0;
7219 ret = -EBADF;
7223 return ret;
7231 int ret;
7233 ret = io_init_req(ctx, req, sqe);
7234 if (unlikely(ret)) {
7244 * failure so that we can set the correct ret code for it.
7254 io_req_complete_failed(req, ret);
7255 return ret;
7257 req_fail_link_node(req, ret);
7259 ret = io_req_prep(req, sqe);
7260 if (unlikely(ret))
7280 ret = io_req_prep_async(req);
7281 if (unlikely(ret)) {
7282 req_fail_link_node(req, ret);
7451 int ret = 0;
7475 ret = io_submit_sqes(ctx, to_submit);
7484 return ret;
7542 int ret = __io_sq_thread(ctx, cap_entries);
7544 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
7659 int io_wait, ret;
7662 ret = io_run_task_work_sig();
7663 if (ret || io_should_wake(iowq))
7664 return ret;
7677 ret = 1;
7679 ret = -ETIME;
7681 return ret;
7695 int ret;
7716 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
7720 ret = set_user_sigmask(sig, sigsz);
7722 if (ret)
7723 return ret;
7737 ret = -EBUSY;
7742 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
7745 } while (ret > 0);
7747 restore_saved_sigmask_unless(ret == -EINTR);
7749 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
7874 int ret;
7882 ret = io_rsrc_node_switch_start(ctx);
7883 if (ret)
7892 ret = wait_for_completion_interruptible(&data->done);
7893 if (!ret) {
7911 ret = io_run_task_work_sig();
7913 } while (ret >= 0);
7916 return ret;
7941 int ret = -ENOMEM;
7957 ret = -EFAULT;
7973 return ret;
8019 int ret;
8029 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
8031 if (!ret)
8033 return ret;
8230 int ret = 0;
8237 ret = __io_sqe_files_scm(ctx, this_files, total);
8238 if (ret)
8244 if (!ret)
8255 return ret;
8380 int fd, ret;
8391 ret = io_rsrc_node_switch_start(ctx);
8392 if (ret)
8393 return ret;
8394 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
8396 if (ret)
8397 return ret;
8399 ret = -ENOMEM;
8405 ret = -EFAULT;
8410 ret = -EINVAL;
8417 ret = -EBADF;
8435 ret = io_sqe_files_scm(ctx);
8436 if (ret) {
8438 return ret;
8442 return ret;
8454 return ret;
8481 int ret = -EBADF;
8486 ret = -ENXIO;
8489 ret = -EINVAL;
8499 ret = io_rsrc_node_switch_start(ctx);
8500 if (ret)
8504 ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
8506 if (ret)
8514 ret = 0;
8519 if (ret)
8521 return ret;
8530 int ret;
8533 ret = -ENXIO;
8536 ret = -EINVAL;
8539 ret = io_rsrc_node_switch_start(ctx);
8540 if (ret)
8545 ret = -EBADF;
8550 ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
8551 if (ret)
8556 ret = 0;
8559 return ret;
8671 int ret;
8677 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
8678 if (unlikely(ret)) {
8680 return ret;
8685 ret = PTR_ERR(tctx->io_wq);
8688 return ret;
8718 int ret;
8741 ret = PTR_ERR(sqd);
8755 ret = (attached && !sqd->thread) ? -ENXIO : 0;
8758 if (ret < 0)
8766 ret = -EINVAL;
8778 ret = PTR_ERR(tsk);
8783 ret = io_uring_alloc_task_context(tsk, ctx);
8785 if (ret)
8789 ret = -EINVAL;
8798 return ret;
8837 int ret;
8840 ret = __io_account_mem(ctx->user, nr_pages);
8841 if (ret)
8842 return ret;
8936 int ret;
8946 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
8948 if (!ret)
8950 return ret;
9019 int i, ret;
9041 ret = io_account_mem(ctx, imu->acct_pages);
9042 if (ret)
9044 return ret;
9056 int ret, pret, nr_pages, i;
9069 ret = -ENOMEM;
9084 ret = 0;
9094 ret = -EINVAL;
9100 ret = -EOPNOTSUPP;
9105 ret = pret < 0 ? pret : -EFAULT;
9108 if (ret) {
9118 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
9119 if (ret) {
9141 ret = 0;
9143 if (ret)
9147 return ret;
9185 int i, ret;
9192 ret = io_rsrc_node_switch_start(ctx);
9193 if (ret)
9194 return ret;
9195 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
9196 if (ret)
9197 return ret;
9198 ret = io_buffers_map_alloc(ctx, nr_args);
9199 if (ret) {
9201 return ret;
9205 ret = io_copy_iov(ctx, &iov, arg, i);
9206 if (ret)
9208 ret = io_buffer_validate(&iov);
9209 if (ret)
9212 ret = -EINVAL;
9216 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
9218 if (ret)
9225 if (ret)
9229 return ret;
9305 int ret = PTR_ERR(ctx->cq_ev_fd);
9308 return ret;
9506 int ret;
9562 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
9563 if (WARN_ON_ONCE(ret))
9692 bool ret = false;
9705 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9709 return ret;
9721 bool ret = false;
9724 ret |= io_uring_try_cancel_iowq(ctx);
9732 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
9740 ret = true;
9745 ret |= io_cancel_defer_files(ctx, task, cancel_all);
9746 ret |= io_poll_remove_all(ctx, task, cancel_all);
9747 ret |= io_kill_timeouts(ctx, task, cancel_all);
9749 ret |= io_run_task_work();
9750 if (!ret)
9760 int ret;
9763 ret = io_uring_alloc_task_context(current, ctx);
9764 if (unlikely(ret))
9765 return ret;
9772 ret = io_wq_max_workers(tctx->io_wq, limits);
9773 if (ret)
9774 return ret;
9784 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
9786 if (ret) {
9788 return ret;
10065 long ret;
10077 ret = -EOPNOTSUPP;
10081 ret = -ENXIO;
10086 ret = -EBADFD;
10095 ret = 0;
10100 ret = -EOWNERDEAD;
10106 ret = io_sqpoll_wait_sq(ctx);
10107 if (ret)
10112 ret = io_uring_add_tctx_node(ctx);
10113 if (unlikely(ret))
10126 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
10127 if (unlikely(ret))
10140 ret = io_iopoll_check(ctx, min_complete);
10142 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
10150 return submitted ? submitted : ret;
10321 int ret, fd;
10327 ret = io_uring_add_tctx_node(ctx);
10328 if (ret) {
10330 return ret;
10346 int ret;
10348 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
10350 if (ret)
10351 return ERR_PTR(ret);
10372 int ret;
10427 ret = io_allocate_scq_urings(ctx, p);
10428 if (ret)
10431 ret = io_sq_offload_create(ctx, p);
10432 if (ret)
10435 ret = io_rsrc_node_switch_start(ctx);
10436 if (ret)
10466 ret = -EFAULT;
10472 ret = PTR_ERR(file);
10480 ret = io_uring_install_fd(ctx, file);
10481 if (ret < 0) {
10484 return ret;
10487 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
10488 return ret;
10491 return ret;
10530 int i, ret;
10539 ret = -EFAULT;
10542 ret = -EINVAL;
10557 ret = 0;
10559 ret = -EFAULT;
10562 return ret;
10569 int ret;
10573 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
10575 if (ret < 0) {
10577 return ret;
10587 int i, ret;
10608 ret = 0;
10614 ret = -EINVAL;
10623 ret = -EINVAL;
10636 ret = -EINVAL;
10643 if (ret != 0)
10649 return ret;
10748 int ret;
10762 ret = compat_get_bitmap(cpumask_bits(new_mask),
10766 ret = copy_from_user(new_mask, arg, len);
10769 ret = copy_from_user(new_mask, arg, len);
10772 if (ret) {
10777 ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
10779 return ret;
10800 int i, ret;
10834 ret = -EINVAL;
10836 ret = io_wq_max_workers(tctx->io_wq, new_count);
10837 if (ret)
10873 return ret;
10902 long ret;
10915 ret = wait_for_completion_interruptible(&ctx->ref_comp);
10916 if (!ret)
10918 ret = io_run_task_work_sig();
10919 } while (ret >= 0);
10922 if (ret)
10924 return ret;
10932 int ret;
10949 ret = io_ctx_quiesce(ctx);
10950 if (ret)
10951 return ret;
10956 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
10959 ret = -EINVAL;
10962 ret = io_sqe_buffers_unregister(ctx);
10965 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
10968 ret = -EINVAL;
10971 ret = io_sqe_files_unregister(ctx);
10974 ret = io_register_files_update(ctx, arg, nr_args);
10978 ret = -EINVAL;
10981 ret = io_eventfd_register(ctx, arg);
10982 if (ret)
10990 ret = -EINVAL;
10993 ret = io_eventfd_unregister(ctx);
10996 ret = -EINVAL;
10999 ret = io_probe(ctx, arg, nr_args);
11002 ret = -EINVAL;
11005 ret = io_register_personality(ctx);
11008 ret = -EINVAL;
11011 ret = io_unregister_personality(ctx, nr_args);
11014 ret = -EINVAL;
11017 ret = io_register_enable_rings(ctx);
11020 ret = io_register_restrictions(ctx, arg, nr_args);
11023 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
11026 ret = io_register_rsrc_update(ctx, arg, nr_args,
11030 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
11033 ret = io_register_rsrc_update(ctx, arg, nr_args,
11037 ret = -EINVAL;
11040 ret = io_register_iowq_aff(ctx, arg, nr_args);
11043 ret = -EINVAL;
11046 ret = io_unregister_iowq_aff(ctx);
11049 ret = -EINVAL;
11052 ret = io_register_iowq_max_workers(ctx, arg);
11055 ret = -EINVAL;
11064 return ret;
11071 long ret = -EBADF;
11081 ret = -EOPNOTSUPP;
11090 ret = __io_uring_register(ctx, opcode, arg, nr_args);
11093 ctx->cq_ev_fd != NULL, ret);
11096 return ret;