Searched refs:uring_lock (Results 1 - 16 of 16) sorted by relevance
/kernel/linux/linux-6.6/io_uring/ |
H A D | tctx.c | 22 mutex_lock(&ctx->uring_lock); in io_init_wq_offload() 27 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload() 34 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload() 129 mutex_lock(&ctx->uring_lock); in __io_uring_add_tctx_node() 131 mutex_unlock(&ctx->uring_lock); in __io_uring_add_tctx_node() 169 mutex_lock(&node->ctx->uring_lock); in io_uring_del_tctx_node() 171 mutex_unlock(&node->ctx->uring_lock); in io_uring_del_tctx_node() 191 * uring_lock) to avoid race with io_uring_try_cancel_iowq(). in io_uring_clean_tctx() 264 mutex_unlock(&ctx->uring_lock); in io_ringfd_register() 266 mutex_lock(&ctx->uring_lock); in io_ringfd_register() [all...] |
H A D | io_uring.h | 104 lockdep_assert_held(&ctx->uring_lock); in io_lockdep_assert_cq_locked() 212 lockdep_assert_held(&ctx->uring_lock); in io_ring_submit_unlock() 214 mutex_unlock(&ctx->uring_lock); in io_ring_submit_unlock() 221 * "Normal" inline submissions always hold the uring_lock, since we in io_ring_submit_lock() 227 mutex_lock(&ctx->uring_lock); in io_ring_submit_lock() 228 lockdep_assert_held(&ctx->uring_lock); in io_ring_submit_lock() 313 mutex_lock(&ctx->uring_lock); in io_tw_lock() 320 * Protected by ->uring_lock and can only be used either with 324 __must_hold(&req->ctx->uring_lock) 328 lockdep_assert_held(&req->ctx->uring_lock); [all...] |
H A D | io_uring.c | 259 mutex_lock(&ctx->uring_lock); in io_fallback_req_func() 265 mutex_unlock(&ctx->uring_lock); in io_fallback_req_func() 323 mutex_init(&ctx->uring_lock); in io_ring_ctx_alloc() 715 /* iopoll syncs against uring_lock, not completion_lock */ in io_cqring_do_overflow_flush() 717 mutex_lock(&ctx->uring_lock); in io_cqring_do_overflow_flush() 720 mutex_unlock(&ctx->uring_lock); in io_cqring_do_overflow_flush() 890 __must_hold(&ctx->uring_lock) 895 lockdep_assert_held(&ctx->uring_lock); 946 lockdep_assert_held(&ctx->uring_lock); in io_fill_cqe_req_aux() 1028 mutex_lock(&ctx->uring_lock); in io_req_complete_post() [all...] |
H A D | msg_ring.c | 38 mutex_unlock(&octx->uring_lock); in io_double_unlock_ctx() 50 if (!mutex_trylock(&octx->uring_lock)) in io_double_lock_ctx() 54 mutex_lock(&octx->uring_lock); in io_double_lock_ctx() 109 * holding the uring_lock for posting completions. Other ring in io_msg_tw_complete() 114 mutex_lock(&target_ctx->uring_lock); in io_msg_tw_complete() 118 mutex_unlock(&target_ctx->uring_lock); in io_msg_tw_complete()
|
H A D | cancel.c | 243 /* fixed must be grabbed every time since we drop the uring_lock */ in __io_sync_cancel() 258 __must_hold(&ctx->uring_lock) 320 mutex_unlock(&ctx->uring_lock); 332 mutex_lock(&ctx->uring_lock); 336 mutex_lock(&ctx->uring_lock);
|
H A D | rsrc.h | 87 lockdep_assert_held(&ctx->uring_lock); in io_put_rsrc_node() 112 lockdep_assert_held(&ctx->uring_lock); in io_req_set_rsrc_node()
|
H A D | kbuf.c | 54 lockdep_assert_held(&ctx->uring_lock); in io_buffer_get_list() 65 * always under the ->uring_lock, but the RCU lookup from mmap does. in io_buffer_add_list() 111 * ctx->uring_lock. If we already hold this lock, add back to this in __io_put_kbuf() 129 lockdep_assert_held(&req->ctx->uring_lock); in __io_put_kbuf() 289 lockdep_assert_held(&ctx->uring_lock); in __io_remove_buffers() 417 * Completions that don't happen inline (eg not under uring_lock) will in io_refill_buffer_cache() 652 lockdep_assert_held(&ctx->uring_lock); in io_register_pbuf_ring() 717 lockdep_assert_held(&ctx->uring_lock); in io_unregister_pbuf_ring()
|
H A D | notif.h | 31 __must_hold(¬if->ctx->uring_lock)
|
H A D | fdinfo.c | 77 * since we get cached_sq_head and cached_cq_tail without uring_lock in io_uring_show_fdinfo() 143 has_lock = mutex_trylock(&ctx->uring_lock); in io_uring_show_fdinfo() 199 mutex_unlock(&ctx->uring_lock); in io_uring_show_fdinfo()
|
H A D | notif.c | 66 __must_hold(&ctx->uring_lock)
|
H A D | rsrc.c | 177 __must_hold(&node->ctx->uring_lock) 224 /* As We may drop ->uring_lock, other task may have started quiesce */ in io_rsrc_ref_quiesce() 249 mutex_unlock(&ctx->uring_lock); in io_rsrc_ref_quiesce() 253 mutex_lock(&ctx->uring_lock); in io_rsrc_ref_quiesce() 261 mutex_lock(&ctx->uring_lock); in io_rsrc_ref_quiesce() 479 lockdep_assert_held(&ctx->uring_lock); in __io_register_rsrc_update() 689 * Quiesce may unlock ->uring_lock, and while it's not held in io_sqe_files_unregister() 795 * Quiesce may unlock ->uring_lock, and while it's not held in io_sqe_buffers_unregister()
|
H A D | filetable.c | 65 __must_hold(&req->ctx->uring_lock)
|
H A D | sqpoll.c | 183 mutex_lock(&ctx->uring_lock); in __io_sq_thread() 194 mutex_unlock(&ctx->uring_lock); in __io_sq_thread()
|
H A D | poll.c | 146 lockdep_assert_held(&req->ctx->uring_lock); in io_poll_req_insert_locked() 157 * ->cancel_table_locked is protected by ->uring_lock in in io_poll_tw_hash_eject() 603 /* io-wq doesn't hold uring_lock */ in __io_arm_poll_handler() 780 __must_hold(&ctx->uring_lock) 962 * If sqpoll or single issuer, there is no contention for ->uring_lock in io_poll_add()
|
/kernel/linux/linux-5.10/io_uring/ |
H A D | io_uring.c | 324 /* inline/task_work completion list, under ->uring_lock */ 347 struct mutex uring_lock; member 368 * uring_lock, and updated through io_uring_register(2) 415 * ->iopoll_list is protected by the ctx->uring_lock for 1132 mutex_lock(&ctx->uring_lock); in io_tw_lock() 1298 mutex_unlock(&ctx->uring_lock); in io_fallback_req_func() 1346 mutex_init(&ctx->uring_lock); in io_ring_ctx_alloc() 1724 /* iopoll syncs against uring_lock, not completion_lock */ in io_cqring_overflow_flush() 1726 mutex_lock(&ctx->uring_lock); in io_cqring_overflow_flush() 1729 mutex_unlock(&ctx->uring_lock); in io_cqring_overflow_flush() [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | io_uring_types.h | 168 /* inline/task_work completion list, under ->uring_lock */ 222 struct mutex uring_lock; member 242 * uring_lock, and updated through io_uring_register(2) 261 * ->iopoll_list is protected by the ctx->uring_lock for 330 /* deferred free list, protected by ->uring_lock */ 342 /* protected by ->uring_lock */ 385 /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */ 488 /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
|
Completed in 21 milliseconds