Lines Matching refs:uring_lock
324 /* inline/task_work completion list, under ->uring_lock */
347 struct mutex uring_lock;
368 * uring_lock, and updated through io_uring_register(2)
415 * ->iopoll_list is protected by the ctx->uring_lock for
1132 mutex_lock(&ctx->uring_lock);
1298 mutex_unlock(&ctx->uring_lock);
1346 mutex_init(&ctx->uring_lock);
1724 /* iopoll syncs against uring_lock, not completion_lock */
1726 mutex_lock(&ctx->uring_lock);
1729 mutex_unlock(&ctx->uring_lock);
1979 * Because of that, io_alloc_req() should be called only under ->uring_lock
1983 __must_hold(&ctx->uring_lock)
2175 mutex_unlock(&ctx->uring_lock);
2212 locked = mutex_trylock(&ctx->uring_lock);
2384 __must_hold(&ctx->uring_lock)
2601 mutex_lock(&ctx->uring_lock);
2616 mutex_unlock(&ctx->uring_lock);
2618 mutex_lock(&ctx->uring_lock);
2621 mutex_unlock(&ctx->uring_lock);
2634 mutex_lock(&ctx->uring_lock);
2648 * issued. That app will hold the uring_lock for the duration
2658 mutex_unlock(&ctx->uring_lock);
2660 mutex_lock(&ctx->uring_lock);
2675 mutex_unlock(&ctx->uring_lock);
2854 /* workqueue context doesn't hold uring_lock, grab it now */
2856 mutex_lock(&ctx->uring_lock);
2902 mutex_unlock(&ctx->uring_lock);
3103 mutex_lock(&ctx->uring_lock);
3105 mutex_unlock(&ctx->uring_lock);
3181 mutex_unlock(&ctx->uring_lock);
3187 * "Normal" inline submissions always hold the uring_lock, since we
3193 mutex_lock(&ctx->uring_lock);
3207 lockdep_assert_held(&req->ctx->uring_lock);
4354 lockdep_assert_held(&ctx->uring_lock);
4442 lockdep_assert_held(&ctx->uring_lock);
7067 __must_hold(&req->ctx->uring_lock)
7118 __must_hold(&req->ctx->uring_lock)
7165 __must_hold(&ctx->uring_lock)
7228 __must_hold(&ctx->uring_lock)
7379 __must_hold(&ctx->uring_lock)
7465 mutex_lock(&ctx->uring_lock);
7476 mutex_unlock(&ctx->uring_lock);
7876 /* As we may drop ->uring_lock, other task may have started quiesce */
7890 mutex_unlock(&ctx->uring_lock);
7894 mutex_lock(&ctx->uring_lock);
7900 mutex_unlock(&ctx->uring_lock);
7912 mutex_lock(&ctx->uring_lock);
8025 * Quiesce may unlock ->uring_lock, and while it's not held
8642 mutex_lock(&ctx->uring_lock);
8647 mutex_unlock(&ctx->uring_lock);
8654 mutex_unlock(&ctx->uring_lock);
8942 * Quiesce may unlock ->uring_lock, and while it's not held
9348 mutex_lock(&ctx->uring_lock);
9357 mutex_unlock(&ctx->uring_lock);
9370 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9374 mutex_lock(&ctx->uring_lock);
9381 mutex_unlock(&ctx->uring_lock);
9442 * lock(&ctx->uring_lock);
9444 * lock(&ctx->uring_lock);
9550 * and they are free to do so while still holding uring_lock or
9554 mutex_lock(&ctx->uring_lock);
9567 mutex_unlock(&ctx->uring_lock);
9574 mutex_lock(&ctx->uring_lock);
9576 mutex_unlock(&ctx->uring_lock);
9612 mutex_lock(&ctx->uring_lock);
9618 mutex_unlock(&ctx->uring_lock);
9694 mutex_lock(&ctx->uring_lock);
9699 * io_wq will stay alive while we hold uring_lock, because it's
9707 mutex_unlock(&ctx->uring_lock);
9791 mutex_lock(&ctx->uring_lock);
9793 mutex_unlock(&ctx->uring_lock);
9828 mutex_lock(&node->ctx->uring_lock);
9830 mutex_unlock(&node->ctx->uring_lock);
9850 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
10115 mutex_lock(&ctx->uring_lock);
10117 mutex_unlock(&ctx->uring_lock);
10136 * work, which can reduce cpu usage and uring_lock contention.
10198 has_lock = mutex_trylock(&ctx->uring_lock);
10250 mutex_unlock(&ctx->uring_lock);
10794 __must_hold(&ctx->uring_lock)
10812 * Observe the correct sqd->lock -> ctx->uring_lock
10813 * ordering. Fine to drop uring_lock here, we hold
10817 mutex_unlock(&ctx->uring_lock);
10819 mutex_lock(&ctx->uring_lock);
10909 * uring_lock to make progress. If we hold it here across the drain
10913 mutex_unlock(&ctx->uring_lock);
10920 mutex_lock(&ctx->uring_lock);
10929 __releases(ctx->uring_lock)
10930 __acquires(ctx->uring_lock)
11089 mutex_lock(&ctx->uring_lock);
11091 mutex_unlock(&ctx->uring_lock);