/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
H A D | i40iw_uk.c | 54 if (!qp->sq_ring.head) in i40iw_nop_1() 57 wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_nop_1() 62 peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; in i40iw_nop_1() 99 sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_qp_post_wr() 114 qp->initial_ring.head = qp->sq_ring.head; in i40iw_qp_post_wr() 125 qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_qp_ring_push_db() 149 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_qp_get_next_send_wqe() 159 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); in i40iw_qp_get_next_send_wqe() 164 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_qp_get_next_send_wqe() [all...] |
H A D | i40iw_ctrl.c | 116 I40IW_RING_MOVE_TAIL(cqp->sq_ring); in i40iw_cqp_poll_registers() 482 I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size); in i40iw_sc_cqp_init() 585 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); in i40iw_sc_cqp_post_sq() 587 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); in i40iw_sc_cqp_post_sq() 593 cqp->sq_ring.head, in i40iw_sc_cqp_post_sq() 594 cqp->sq_ring.tail, in i40iw_sc_cqp_post_sq() 595 cqp->sq_ring.size); in i40iw_sc_cqp_post_sq() 610 if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { in i40iw_sc_cqp_get_next_send_wqe_idx() 615 cqp->sq_ring.head, in i40iw_sc_cqp_get_next_send_wqe_idx() 616 cqp->sq_ring in i40iw_sc_cqp_get_next_send_wqe_idx() [all...] |
H A D | i40iw_user.h | 345 struct i40iw_ring sq_ring; member
|
H A D | i40iw_utils.c | 463 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring); in i40iw_cleanup_pending_cqp_op() 464 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring); in i40iw_cleanup_pending_cqp_op() 469 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring); in i40iw_cleanup_pending_cqp_op()
|
H A D | i40iw_puda.c | 211 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); in i40iw_puda_get_next_send_wqe() 214 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); in i40iw_puda_get_next_send_wqe() 346 I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); in i40iw_puda_poll_completion() 599 I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size); in i40iw_puda_qp_create()
|
H A D | i40iw_verbs.c | 1637 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring)) in i40iw_drain_sq() 2391 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring)) in i40iw_poll_cq()
|
H A D | i40iw_type.h | 306 struct i40iw_ring sq_ring; member
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/irdma/ |
H A D | uk.c | 65 if (!qp->sq_ring.head) in irdma_nop_1() 68 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_nop_1() 100 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; in irdma_clr_wqes() 128 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_uk_qp_post_wr() 143 qp->initial_ring.head = qp->sq_ring.head; in irdma_uk_qp_post_wr() 164 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) % in irdma_qp_get_next_send_wqe() 168 if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) in irdma_qp_get_next_send_wqe() 173 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring)) in irdma_qp_get_next_send_wqe() 178 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); in irdma_qp_get_next_send_wqe() 182 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_qp_get_next_send_wqe() [all...] |
H A D | utils.c | 539 pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring); in irdma_cleanup_pending_cqp_op() 540 wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); in irdma_cleanup_pending_cqp_op() 546 wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring); in irdma_cleanup_pending_cqp_op() 2454 struct irdma_ring *sq_ring = &qp->sq_ring; in irdma_generate_flush_completions() local 2468 while (IRDMA_RING_MORE_WORK(*sq_ring)) { in irdma_generate_flush_completions() 2476 wqe_idx = sq_ring->tail; in irdma_generate_flush_completions() 2485 IRDMA_RING_SET_TAIL(*sq_ring, in irdma_generate_flush_completions() 2486 sq_ring->tail + qp->sq_wrtrk_array[sq_ring in irdma_generate_flush_completions() [all...] |
H A D | ctrl.c | 411 qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, in irdma_sc_qp_init() 414 "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n", in irdma_sc_qp_init() 415 qp->hw_sq_size, qp->qp_uk.sq_ring.size); in irdma_sc_qp_init() 2082 cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size); in irdma_sc_gather_stats() 2763 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); in irdma_cqp_poll_registers() 3123 IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size); in irdma_sc_cqp_init() 3246 writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db); in irdma_sc_cqp_post_sq() 3250 cqp->sq_ring in irdma_sc_cqp_post_sq() [all...] |
H A D | puda.c | 196 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); in irdma_puda_get_next_send_wqe() 199 IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code); in irdma_puda_get_next_send_wqe() 399 IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); in irdma_puda_poll_cmpl() 687 IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size); in irdma_puda_qp_create()
|
H A D | user.h | 322 struct irdma_ring sq_ring; member
|
H A D | hw.c | 2569 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { in irdma_hw_flush_wqes_callback() 2632 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { in irdma_hw_flush_wqes() 2670 if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) in irdma_hw_flush_wqes()
|
H A D | type.h | 363 struct irdma_ring sq_ring; member
|
/kernel/linux/linux-5.10/tools/io_uring/ |
H A D | io_uring-bench.c | 76 struct io_sq_ring sq_ring; member 196 struct io_sq_ring *ring = &s->sq_ring; in prep_more_ios() 281 struct io_sq_ring *ring = &s->sq_ring; in submitter_fn() 391 struct io_sq_ring *sring = &s->sq_ring; in setup_ring() 435 printf("sq_ring ptr = 0x%p\n", ptr); in setup_ring() 558 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries); in main()
|
/kernel/linux/linux-6.6/tools/testing/selftests/x86/ |
H A D | lam.c | 104 struct io_uring_queue sq_ring; member 386 struct io_uring_queue *sring = &s->sq_ring; in mmap_io_uring() 429 s->sq_ring.queue.sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe), in mmap_io_uring() 432 if (s->sq_ring.queue.sqes == MAP_FAILED) { in mmap_io_uring() 508 struct io_uring_queue *sring = &ring->sq_ring; in handle_uring_sq() 538 index = tail & *ring->sq_ring.ring_mask; in handle_uring_sq() 540 sqe = &ring->sq_ring.queue.sqes[index]; in handle_uring_sq()
|