Lines Matching refs:sqd
27 void io_sq_thread_unpark(struct io_sq_data *sqd)
28 __releases(&sqd->lock)
30 WARN_ON_ONCE(sqd->thread == current);
36 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
37 if (atomic_dec_return(&sqd->park_pending))
38 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
39 mutex_unlock(&sqd->lock);
42 void io_sq_thread_park(struct io_sq_data *sqd)
43 __acquires(&sqd->lock)
45 WARN_ON_ONCE(sqd->thread == current);
47 atomic_inc(&sqd->park_pending);
48 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
49 mutex_lock(&sqd->lock);
50 if (sqd->thread)
51 wake_up_process(sqd->thread);
54 void io_sq_thread_stop(struct io_sq_data *sqd)
56 WARN_ON_ONCE(sqd->thread == current);
57 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
59 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
60 mutex_lock(&sqd->lock);
61 if (sqd->thread)
62 wake_up_process(sqd->thread);
63 mutex_unlock(&sqd->lock);
64 wait_for_completion(&sqd->exited);
67 void io_put_sq_data(struct io_sq_data *sqd)
69 if (refcount_dec_and_test(&sqd->refs)) {
70 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
72 io_sq_thread_stop(sqd);
73 kfree(sqd);
77 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
82 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
84 sqd->sq_thread_idle = sq_thread_idle;
89 struct io_sq_data *sqd = ctx->sq_data;
91 if (sqd) {
92 io_sq_thread_park(sqd);
94 io_sqd_update_thread_idle(sqd);
95 io_sq_thread_unpark(sqd);
97 io_put_sq_data(sqd);
105 struct io_sq_data *sqd;
117 sqd = ctx_attach->sq_data;
118 if (!sqd) {
122 if (sqd->task_tgid != current->tgid) {
127 refcount_inc(&sqd->refs);
129 return sqd;
135 struct io_sq_data *sqd;
139 sqd = io_attach_sq_data(p);
140 if (!IS_ERR(sqd)) {
142 return sqd;
144 /* fall through for EPERM case, setup new sqd/task */
145 if (PTR_ERR(sqd) != -EPERM)
146 return sqd;
149 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
150 if (!sqd)
153 atomic_set(&sqd->park_pending, 0);
154 refcount_set(&sqd->refs, 1);
155 INIT_LIST_HEAD(&sqd->ctx_list);
156 mutex_init(&sqd->lock);
157 init_waitqueue_head(&sqd->wait);
158 init_completion(&sqd->exited);
159 return sqd;
162 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
164 return READ_ONCE(sqd->state);
205 static bool io_sqd_handle_event(struct io_sq_data *sqd)
210 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
212 mutex_unlock(&sqd->lock);
216 mutex_lock(&sqd->lock);
217 sqd->sq_cpu = raw_smp_processor_id();
219 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
224 struct io_sq_data *sqd = data;
230 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
234 sqd->task_pid = current->pid;
236 if (sqd->sq_cpu != -1) {
237 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
240 sqd->sq_cpu = raw_smp_processor_id();
243 mutex_lock(&sqd->lock);
247 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
248 if (io_sqd_handle_event(sqd))
250 timeout = jiffies + sqd->sq_thread_idle;
253 cap_entries = !list_is_singular(&sqd->ctx_list);
254 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
265 timeout = jiffies + sqd->sq_thread_idle;
267 mutex_unlock(&sqd->lock);
269 mutex_lock(&sqd->lock);
270 sqd->sq_cpu = raw_smp_processor_id();
275 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
276 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
279 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
301 mutex_unlock(&sqd->lock);
303 mutex_lock(&sqd->lock);
304 sqd->sq_cpu = raw_smp_processor_id();
306 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
311 finish_wait(&sqd->wait, &wait);
312 timeout = jiffies + sqd->sq_thread_idle;
315 io_uring_cancel_generic(true, sqd);
316 sqd->thread = NULL;
317 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
320 mutex_unlock(&sqd->lock);
322 complete(&sqd->exited);
364 struct io_sq_data *sqd;
371 sqd = io_get_sq_data(p, &attached);
372 if (IS_ERR(sqd)) {
373 ret = PTR_ERR(sqd);
378 ctx->sq_data = sqd;
383 io_sq_thread_park(sqd);
384 list_add(&ctx->sqd_list, &sqd->ctx_list);
385 io_sqd_update_thread_idle(sqd);
387 ret = (attached && !sqd->thread) ? -ENXIO : 0;
388 io_sq_thread_unpark(sqd);
401 sqd->sq_cpu = cpu;
403 sqd->sq_cpu = -1;
406 sqd->task_pid = current->pid;
407 sqd->task_tgid = current->tgid;
408 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
414 sqd->thread = tsk;
436 struct io_sq_data *sqd = ctx->sq_data;
439 if (sqd) {
440 io_sq_thread_park(sqd);
442 if (sqd->thread)
443 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
444 io_sq_thread_unpark(sqd);