Lines Matching refs:sqd

283 	/* ctx's that are using this sqd */
1084 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
7426 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
7428 return READ_ONCE(sqd->state);
7487 static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
7492 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7494 sqd->sq_thread_idle = sq_thread_idle;
7497 static bool io_sqd_handle_event(struct io_sq_data *sqd)
7502 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
7504 mutex_unlock(&sqd->lock);
7508 mutex_lock(&sqd->lock);
7510 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
7515 struct io_sq_data *sqd = data;
7521 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
7524 if (sqd->sq_cpu != -1)
7525 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
7530 mutex_lock(&sqd->lock);
7534 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
7535 if (io_sqd_handle_event(sqd))
7537 timeout = jiffies + sqd->sq_thread_idle;
7540 cap_entries = !list_is_singular(&sqd->ctx_list);
7541 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7553 timeout = jiffies + sqd->sq_thread_idle;
7557 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7558 if (!io_sqd_events_pending(sqd) && !current->task_works) {
7561 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
7576 mutex_unlock(&sqd->lock);
7578 mutex_lock(&sqd->lock);
7580 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7584 finish_wait(&sqd->wait, &wait);
7585 timeout = jiffies + sqd->sq_thread_idle;
7588 io_uring_cancel_generic(true, sqd);
7589 sqd->thread = NULL;
7590 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
7593 mutex_unlock(&sqd->lock);
7595 complete(&sqd->exited);
8036 static void io_sq_thread_unpark(struct io_sq_data *sqd)
8037 __releases(&sqd->lock)
8039 WARN_ON_ONCE(sqd->thread == current);
8045 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
8046 if (atomic_dec_return(&sqd->park_pending))
8047 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
8048 mutex_unlock(&sqd->lock);
8051 static void io_sq_thread_park(struct io_sq_data *sqd)
8052 __acquires(&sqd->lock)
8054 WARN_ON_ONCE(sqd->thread == current);
8056 atomic_inc(&sqd->park_pending);
8057 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
8058 mutex_lock(&sqd->lock);
8059 if (sqd->thread)
8060 wake_up_process(sqd->thread);
8063 static void io_sq_thread_stop(struct io_sq_data *sqd)
8065 WARN_ON_ONCE(sqd->thread == current);
8066 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
8068 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
8069 mutex_lock(&sqd->lock);
8070 if (sqd->thread)
8071 wake_up_process(sqd->thread);
8072 mutex_unlock(&sqd->lock);
8073 wait_for_completion(&sqd->exited);
8076 static void io_put_sq_data(struct io_sq_data *sqd)
8078 if (refcount_dec_and_test(&sqd->refs)) {
8079 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
8081 io_sq_thread_stop(sqd);
8082 kfree(sqd);
8088 struct io_sq_data *sqd = ctx->sq_data;
8090 if (sqd) {
8091 io_sq_thread_park(sqd);
8093 io_sqd_update_thread_idle(sqd);
8094 io_sq_thread_unpark(sqd);
8096 io_put_sq_data(sqd);
8104 struct io_sq_data *sqd;
8116 sqd = ctx_attach->sq_data;
8117 if (!sqd) {
8121 if (sqd->task_tgid != current->tgid) {
8126 refcount_inc(&sqd->refs);
8128 return sqd;
8134 struct io_sq_data *sqd;
8138 sqd = io_attach_sq_data(p);
8139 if (!IS_ERR(sqd)) {
8141 return sqd;
8143 /* fall through for EPERM case, setup new sqd/task */
8144 if (PTR_ERR(sqd) != -EPERM)
8145 return sqd;
8148 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
8149 if (!sqd)
8152 atomic_set(&sqd->park_pending, 0);
8153 refcount_set(&sqd->refs, 1);
8154 INIT_LIST_HEAD(&sqd->ctx_list);
8155 mutex_init(&sqd->lock);
8156 init_waitqueue_head(&sqd->wait);
8157 init_completion(&sqd->exited);
8158 return sqd;
8736 struct io_sq_data *sqd;
8739 sqd = io_get_sq_data(p, &attached);
8740 if (IS_ERR(sqd)) {
8741 ret = PTR_ERR(sqd);
8746 ctx->sq_data = sqd;
8751 io_sq_thread_park(sqd);
8752 list_add(&ctx->sqd_list, &sqd->ctx_list);
8753 io_sqd_update_thread_idle(sqd);
8755 ret = (attached && !sqd->thread) ? -ENXIO : 0;
8756 io_sq_thread_unpark(sqd);
8769 sqd->sq_cpu = cpu;
8771 sqd->sq_cpu = -1;
8774 sqd->task_pid = current->pid;
8775 sqd->task_tgid = current->tgid;
8776 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
8782 sqd->thread = tsk;
9517 struct io_sq_data *sqd = ctx->sq_data;
9520 io_sq_thread_park(sqd);
9521 tsk = sqd->thread;
9525 io_sq_thread_unpark(sqd);
9866 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
9868 static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
9875 WARN_ON_ONCE(sqd && sqd->thread != current);
9890 if (!sqd) {
9902 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
10798 struct io_sq_data *sqd = NULL;
10809 sqd = ctx->sq_data;
10810 if (sqd) {
10812 * Observe the correct sqd->lock -> ctx->uring_lock
10816 refcount_inc(&sqd->refs);
10818 mutex_lock(&sqd->lock);
10820 if (sqd->thread)
10821 tctx = sqd->thread->io_uring;
10843 if (sqd) {
10844 mutex_unlock(&sqd->lock);
10845 io_put_sq_data(sqd);
10852 if (sqd)
10869 if (sqd) {
10870 mutex_unlock(&sqd->lock);
10871 io_put_sq_data(sqd);