Searched refs:completion_lock (Results 1 - 15 of 15) sorted by relevance
/kernel/linux/linux-6.6/io_uring/ |
H A D | timeout.c | 161 __must_hold(&req->ctx->completion_lock) 193 __must_hold(&req->ctx->completion_lock) 220 __must_hold(&req->ctx->completion_lock) 288 __must_hold(&ctx->completion_lock) 472 spin_lock(&ctx->completion_lock); in io_timeout_remove() 474 spin_unlock(&ctx->completion_lock); in io_timeout_remove() 595 * This is safe because ->completion_lock is held, and submissions in io_timeout() 596 * and completions are never mixed in the same ->completion_lock section. in io_timeout() 671 * completion_lock is needed for io_match_task(). Take it before in io_kill_timeouts() 674 spin_lock(&ctx->completion_lock); in io_kill_timeouts() [all...] |
H A D | io_uring.c | 327 spin_lock_init(&ctx->completion_lock); in io_ring_ctx_alloc() 374 spin_lock(&req->ctx->completion_lock); in io_clean_op() 376 spin_unlock(&req->ctx->completion_lock); in io_clean_op() 596 spin_lock(&ctx->completion_lock); in io_eventfd_flush_signal() 608 spin_unlock(&ctx->completion_lock); in io_eventfd_flush_signal() 622 spin_lock(&ctx->completion_lock); in __io_commit_cqring_flush() 624 spin_unlock(&ctx->completion_lock); in __io_commit_cqring_flush() 633 spin_lock(&ctx->completion_lock); in __io_cq_lock() 637 __acquires(ctx->completion_lock) 639 spin_lock(&ctx->completion_lock); [all...] |
H A D | kbuf.c | 114 * ctx->completion_lock. in __io_put_kbuf() 125 spin_lock(&ctx->completion_lock); in __io_put_kbuf() 127 spin_unlock(&ctx->completion_lock); in __io_put_kbuf() 422 spin_lock(&ctx->completion_lock); in io_refill_buffer_cache() 426 spin_unlock(&ctx->completion_lock); in io_refill_buffer_cache() 429 spin_unlock(&ctx->completion_lock); in io_refill_buffer_cache()
|
H A D | cancel.c | 122 spin_lock(&ctx->completion_lock); in io_try_cancel() 125 spin_unlock(&ctx->completion_lock); in io_try_cancel()
|
H A D | fdinfo.c | 202 spin_lock(&ctx->completion_lock); in io_uring_show_fdinfo() 211 spin_unlock(&ctx->completion_lock); in io_uring_show_fdinfo()
|
H A D | kbuf.h | 129 lockdep_assert_held(&req->ctx->completion_lock); in io_put_kbuf_comp()
|
H A D | io_uring.h | 106 lockdep_assert_held(&ctx->completion_lock); in io_lockdep_assert_cq_locked()
|
/kernel/linux/linux-5.10/io_uring/ |
H A D | io_uring.c | 386 /* IRQ completion list, under ->completion_lock */ 410 spinlock_t completion_lock; member 1348 spin_lock_init(&ctx->completion_lock); in io_ring_ctx_alloc() 1506 __must_hold(&req->ctx->completion_lock) 1524 lockdep_assert_held(&ctx->completion_lock); in io_queue_deferred() 1539 __must_hold(&ctx->completion_lock) 1685 spin_lock(&ctx->completion_lock); in __io_cqring_overflow_flush() 1713 spin_unlock(&ctx->completion_lock); in __io_cqring_overflow_flush() 1724 /* iopoll syncs against uring_lock, not completion_lock */ in io_cqring_overflow_flush() 1847 spin_lock(&ctx->completion_lock); in io_req_complete_post() [all...] |
/kernel/linux/linux-5.10/fs/ |
H A D | aio.c | 161 spinlock_t completion_lock; member 391 * happen under the ctx->completion_lock. That does not work with the in aio_migratepage() 437 /* Take completion_lock to prevent other writes to the ring buffer in aio_migratepage() 441 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_migratepage() 445 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_migratepage() 757 spin_lock_init(&ctx->completion_lock); in ioctx_alloc() 970 * called holding ctx->completion_lock. 1003 spin_lock_irq(&ctx->completion_lock); in user_refill_reqs_available() 1013 * ctx->completion_lock. Even if head is invalid, the check in user_refill_reqs_available() 1024 spin_unlock_irq(&ctx->completion_lock); in user_refill_reqs_available() [all...] |
/kernel/linux/linux-6.6/fs/ |
H A D | aio.c | 160 spinlock_t completion_lock; member 417 * happen under the ctx->completion_lock. That does not work with the in aio_migrate_folio() 463 /* Take completion_lock to prevent other writes to the ring buffer in aio_migrate_folio() 467 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_migrate_folio() 471 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_migrate_folio() 776 spin_lock_init(&ctx->completion_lock); in ioctx_alloc() 986 * called holding ctx->completion_lock. 1019 spin_lock_irq(&ctx->completion_lock); in user_refill_reqs_available() 1029 * ctx->completion_lock. Even if head is invalid, the check in user_refill_reqs_available() 1039 spin_unlock_irq(&ctx->completion_lock); in user_refill_reqs_available() [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | io_uring_types.h | 306 spinlock_t completion_lock; member 308 /* IRQ completion list, under ->completion_lock */ 371 /* protected by ->completion_lock */
|
/kernel/linux/linux-5.10/drivers/scsi/megaraid/ |
H A D | megaraid_sas.h | 2361 spinlock_t completion_lock; member
|
H A D | megaraid_sas_base.c | 2280 spin_lock_irqsave(&instance->completion_lock, flags); in megasas_complete_cmd_dpc() 2305 spin_unlock_irqrestore(&instance->completion_lock, flags); in megasas_complete_cmd_dpc() 7368 spin_lock_init(&instance->completion_lock); in megasas_init_ctrl_params()
|
/kernel/linux/linux-6.6/drivers/scsi/megaraid/ |
H A D | megaraid_sas.h | 2366 spinlock_t completion_lock; member
|
H A D | megaraid_sas_base.c | 2290 spin_lock_irqsave(&instance->completion_lock, flags); in megasas_complete_cmd_dpc() 2315 spin_unlock_irqrestore(&instance->completion_lock, flags); in megasas_complete_cmd_dpc() 7426 spin_lock_init(&instance->completion_lock); in megasas_init_ctrl_params()
|
Completed in 50 milliseconds