Home
last modified time | relevance | path

Searched refs:q_depth (Results 1 - 25 of 58) sorted by relevance

123

/kernel/linux/linux-6.6/drivers/net/ethernet/microsoft/mana/
H A Dhw_channel.c349 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, in mana_hwc_create_cq() argument
362 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); in mana_hwc_create_cq()
366 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); in mana_hwc_create_cq()
389 comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL); in mana_hwc_create_cq()
397 hwc_cq->queue_depth = q_depth; in mana_hwc_create_cq()
410 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, in mana_hwc_alloc_dma_buf() argument
424 dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL); in mana_hwc_alloc_dma_buf()
428 dma_buf->num_reqs = q_depth; in mana_hwc_alloc_dma_buf()
430 buf_size = PAGE_ALIGN(q_depth * max_msg_size); in mana_hwc_alloc_dma_buf()
442 for (i = 0; i < q_depth; in mana_hwc_alloc_dma_buf()
481 mana_hwc_create_wq(struct hw_channel_context *hwc, enum gdma_queue_type q_type, u16 q_depth, u32 max_msg_size, struct hwc_cq *hwc_cq, struct hwc_wq **hwc_wq_ptr) mana_hwc_create_wq() argument
585 mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth, u32 max_req_msg_size, u32 max_resp_msg_size) mana_hwc_test_channel() argument
615 mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth, u32 *max_req_msg_size, u32 *max_resp_msg_size) mana_hwc_establish_channel() argument
657 mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth, u32 max_req_msg_size, u32 max_resp_msg_size) mana_hwc_init_queues() argument
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/amazon/ena/
H A Dena_eth_com.c15 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc()
41 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue()
56 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev()
82 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev()
210 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail()
219 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr()
248 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get()
516 u16 q_depth = io_cq->q_depth; in ena_com_rx_pkt() local
544 if (unlikely(ena_buf[i].req_id >= q_depth)) in ena_com_rx_pkt()
[all...]
H A Dena_eth_com.h80 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries()
176 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); in ena_com_update_dev_comp_head()
213 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head()
224 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get()
242 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
H A Dena_com.c89 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq()
111 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq()
134 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq()
144 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq()
154 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq()
180 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt()
182 command_id, admin_queue->q_depth); in get_comp_ctxt()
215 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd()
221 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd()
264 size_t size = admin_queue->q_depth * sizeo in ena_com_init_comp_ctxt()
[all...]
H A Dena_com.h130 u16 q_depth; member
174 u16 q_depth; member
223 u16 q_depth; member
253 u16 q_depth; member
/kernel/linux/linux-6.6/drivers/net/ethernet/amazon/ena/
H A Dena_eth_com.c15 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_get_next_rx_cdesc()
41 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue()
56 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev()
84 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev()
221 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail()
230 idx &= (io_cq->q_depth - 1); in ena_com_rx_cdesc_idx_to_ptr()
259 head_masked = io_cq->head & (io_cq->q_depth - 1); in ena_com_cdesc_rx_pkt_get()
545 u16 q_depth = io_cq->q_depth; in ena_com_rx_pkt() local
575 if (unlikely(ena_buf[i].req_id >= q_depth)) in ena_com_rx_pkt()
[all...]
H A Dena_eth_com.h84 return io_sq->q_depth - 1 - cnt; in ena_com_free_q_entries()
183 need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); in ena_com_update_dev_comp_head()
221 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) in ena_com_cq_inc_head()
232 masked_head = io_cq->head & (io_cq->q_depth - 1); in ena_com_tx_comp_req_id_get()
250 if (unlikely(*req_id >= io_cq->q_depth)) { in ena_com_tx_comp_req_id_get()
H A Dena_com.c91 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_sq()
114 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); in ena_com_admin_init_cq()
137 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; in ena_com_admin_init_aenq()
147 aenq->head = aenq->q_depth; in ena_com_admin_init_aenq()
157 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; in ena_com_admin_init_aenq()
184 if (unlikely(command_id >= admin_queue->q_depth)) { in get_comp_ctxt()
187 command_id, admin_queue->q_depth); in get_comp_ctxt()
222 queue_size_mask = admin_queue->q_depth - 1; in __ena_com_submit_admin_cmd()
228 if (cnt >= admin_queue->q_depth) { in __ena_com_submit_admin_cmd()
273 size_t size = admin_queue->q_depth * sizeo in ena_com_init_comp_ctxt()
[all...]
H A Dena_com.h130 u16 q_depth; member
174 u16 q_depth; member
223 u16 q_depth; member
253 u16 q_depth; member
/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_wq.c34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
499 * @q_depth: number of wqebbs in WQ
505 u16 wqebb_size, u32 wq_page_size, u16 q_depth, in hinic_wq_allocate()
524 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate()
525 dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); in hinic_wq_allocate()
548 wq->q_depth = q_depth; in hinic_wq_allocate()
565 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate()
566 wq->mask = q_depth in hinic_wq_allocate()
504 hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) hinic_wq_allocate() argument
600 hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, struct hinic_hwif *hwif, int cmdq_blocks, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) hinic_wqs_cmdq_alloc() argument
[all...]
H A Dhinic_hw_wq.h30 u16 q_depth; member
80 u16 q_depth, u16 max_wqe_size);
91 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
H A Dhinic_hw_qp.c224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr()
252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr()
324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe()
334 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe()
367 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
H A Dhinic_hw_cmdq.c368 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp()
370 next_prod_idx -= wq->q_depth; in cmdq_sync_cmd_direct_resp()
447 if (next_prod_idx >= wq->q_depth) { in cmdq_set_arm_bit()
449 next_prod_idx -= wq->q_depth; in cmdq_set_arm_bit()
754 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq()
759 wq->q_depth)); in init_cmdq()
/kernel/linux/linux-6.6/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_wq.c34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
498 * @q_depth: number of wqebbs in WQ
504 u16 wqebb_size, u32 wq_page_size, u16 q_depth, in hinic_wq_allocate()
523 if (q_depth & (q_depth - 1)) { in hinic_wq_allocate()
524 dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); in hinic_wq_allocate()
547 wq->q_depth = q_depth; in hinic_wq_allocate()
564 atomic_set(&wq->delta, q_depth); in hinic_wq_allocate()
565 wq->mask = q_depth in hinic_wq_allocate()
503 hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) hinic_wq_allocate() argument
599 hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, struct hinic_hwif *hwif, int cmdq_blocks, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) hinic_wqs_cmdq_alloc() argument
[all...]
H A Dhinic_hw_wq.h30 u16 q_depth; member
80 u16 q_depth, u16 max_wqe_size);
91 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
H A Dhinic_hw_qp.c224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr()
252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); in alloc_rq_skb_arr()
324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); in alloc_rq_cqe()
334 for (i = 0; i < wq->q_depth; i++) { in alloc_rq_cqe()
367 for (i = 0; i < wq->q_depth; i++) in free_rq_cqe()
/kernel/linux/linux-5.10/drivers/net/ethernet/brocade/bna/
H A Dbnad.c78 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup()
91 u32 q_depth, u32 index) in bnad_tx_buff_unmap()
114 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
126 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
143 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup()
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
162 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local
174 q_depth = tcb->q_depth; in bnad_txcmpl_process()
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process()
89 bnad_tx_buff_unmap(struct bnad *bnad, struct bnad_tx_unmap *unmap_q, u32 q_depth, u32 index) bnad_tx_buff_unmap() argument
341 u32 alloced, prod, q_depth; bnad_rxq_refill_page() local
415 u32 alloced, prod, q_depth, buff_sz; bnad_rxq_refill_skb() local
2920 u32 prod, q_depth, vect_id; bnad_start_xmit() local
[all...]
H A Dbna_types.h422 u32 q_depth; member
550 u32 q_depth; member
566 int q_depth; member
614 u32 q_depth; member
/kernel/linux/linux-6.6/drivers/net/ethernet/brocade/bna/
H A Dbnad.c78 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup()
91 u32 q_depth, u32 index) in bnad_tx_buff_unmap()
114 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
126 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap()
143 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup()
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
162 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local
174 q_depth = tcb->q_depth; in bnad_txcmpl_process()
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process()
89 bnad_tx_buff_unmap(struct bnad *bnad, struct bnad_tx_unmap *unmap_q, u32 q_depth, u32 index) bnad_tx_buff_unmap() argument
341 u32 alloced, prod, q_depth; bnad_rxq_refill_page() local
415 u32 alloced, prod, q_depth, buff_sz; bnad_rxq_refill_skb() local
2916 u32 prod, q_depth, vect_id; bnad_start_xmit() local
[all...]
H A Dbna_types.h428 u32 q_depth; member
559 u32 q_depth; member
575 int q_depth; member
623 u32 q_depth; member
/kernel/linux/linux-5.10/drivers/nvme/host/
H A Dpci.c33 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
34 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
124 u32 q_depth; member
199 u32 q_depth; member
477 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
501 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_cmd()
1030 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1154 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1184 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1454 int q_depth in nvme_cmb_qdepth() local
[all...]
/kernel/linux/linux-6.6/drivers/nvme/host/
H A Dpci.c35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
130 u32 q_depth; member
199 u32 q_depth; member
476 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
493 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd()
1050 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1172 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1201 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1471 int q_depth in nvme_cmb_qdepth() local
[all...]
/kernel/linux/linux-6.6/drivers/crypto/hisilicon/sec2/
H A Dsec_crypto.c310 u16 q_depth = res->depth; in sec_alloc_civ_resource() local
313 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_civ_resource()
318 for (i = 1; i < q_depth; i++) { in sec_alloc_civ_resource()
335 u16 q_depth = res->depth; in sec_alloc_aiv_resource() local
338 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), in sec_alloc_aiv_resource()
343 for (i = 1; i < q_depth; i++) { in sec_alloc_aiv_resource()
360 u16 q_depth = res->depth; in sec_alloc_mac_resource() local
363 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, in sec_alloc_mac_resource()
368 for (i = 1; i < q_depth; i++) { in sec_alloc_mac_resource()
397 u16 q_depth in sec_alloc_pbuf_resource() local
488 u16 q_depth = qp_ctx->qp->sq_depth; sec_alloc_qp_ctx_resource() local
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeon_ep/
H A Doctep_ctrl_mbox.c228 u32 pi, ci, r_sz, buf_sz, q_depth; in octep_ctrl_mbox_recv() local
240 q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz); in octep_ctrl_mbox_recv()
241 if (q_depth < mbox_hdr_sz) { in octep_ctrl_mbox_recv()
/kernel/linux/linux-5.10/drivers/block/rsxx/
H A Dcregs.c133 card->creg_ctrl.q_depth--; in creg_kick_queue()
185 card->creg_ctrl.q_depth++; in creg_queue_cmd()
322 card->creg_ctrl.q_depth--; in creg_reset()
399 card->creg_ctrl.q_depth + 20000); in __issue_creg_rw()
706 card->creg_ctrl.q_depth++; in rsxx_eeh_save_issued_creg()

Completed in 44 milliseconds

123