/kernel/linux/linux-5.10/net/9p/ |
H A D | trans_rdma.c | 59 * @sq_depth: The depth of the Send Queue 86 int sq_depth; member 118 * @sq_depth: The requested depth of the SQ. This really doesn't need 126 int sq_depth; member 157 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options() 158 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options() 183 opts->sq_depth = P9_RDMA_SQ_DEPTH; in parse_opts() 218 opts->sq_depth = option; in parse_opts() 234 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); in parse_opts() 582 rdma->sq_depth in alloc_rdma() [all...] |
/kernel/linux/linux-6.6/net/9p/ |
H A D | trans_rdma.c | 56 * @sq_depth: The depth of the Send Queue 83 int sq_depth; member 117 * @sq_depth: The requested depth of the SQ. This really doesn't need 125 int sq_depth; member 156 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options() 157 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options() 182 opts->sq_depth = P9_RDMA_SQ_DEPTH; in parse_opts() 217 opts->sq_depth = option; in parse_opts() 233 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); in parse_opts() 581 rdma->sq_depth in alloc_rdma() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/fungible/funcore/ |
H A D | fun_queue.h | 46 u32 sq_depth; member 97 if (++tail == funq->sq_depth) in funq_sq_post_tail() 123 u32 sq_depth; member 133 u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
|
H A D | fun_queue.c | 76 u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr, in fun_sq_create() 89 if (sq_depth > fdev->q_depth) in fun_sq_create() 94 wb_addr = dma_addr + (sq_depth << sqe_size_log2); in fun_sq_create() 101 sq_depth - 1, dma_addr, 0, in fun_sq_create() 356 funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth, in fun_alloc_sqes() 390 fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2, in fun_free_queue() 442 funq->sq_depth = req->sq_depth; in fun_alloc_queue() 512 funq->sqe_size_log2, funq->sq_depth, in fun_create_sq() 75 fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid, u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr, u8 coal_nentries, u8 coal_usec, u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp) fun_sq_create() argument
|
H A D | fun_dev.c | 231 .sq_depth = areq->sq_depth, in fun_enable_admin_queue() 234 unsigned int ntags = areq->sq_depth - 1; in fun_enable_admin_queue() 241 if (areq->sq_depth < AQA_MIN_QUEUE_SIZE || in fun_enable_admin_queue() 242 areq->sq_depth > AQA_MAX_QUEUE_SIZE || in fun_enable_admin_queue() 269 writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT | in fun_enable_admin_queue() 401 if (++funq->sq_tail == funq->sq_depth) in fun_submit_admin_cmd()
|
H A D | fun_dev.h | 93 u16 sq_depth; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth.h | 77 unsigned int sq_depth; member 115 unsigned int sq_depth; member
|
H A D | funeth_main.c | 494 xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth, in fun_alloc_rings() 503 err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth, in fun_alloc_rings() 843 .sq_depth = fp->sq_depth, in funeth_open() 1085 xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL); in fun_enter_xdp() 1644 .sq_depth = fp->sq_depth, in fun_change_num_queues() 1781 fp->sq_depth = min(SQ_DEPTH, fdev->q_depth); in fun_create_netdev() 2002 .sq_depth = ADMIN_SQ_DEPTH, in funeth_probe()
|
H A D | funeth_ethtool.c | 567 ring->tx_pending = fp->sq_depth; in fun_get_ringparam() 593 if (fp->sq_depth == ring->tx_pending && in fun_set_ringparam() 601 .sq_depth = ring->tx_pending in fun_set_ringparam() 609 fp->sq_depth = ring->tx_pending; in fun_set_ringparam()
|
/kernel/linux/linux-6.6/include/uapi/misc/uacce/ |
H A D | hisi_qm.h | 20 * @sq_depth: The number of sqe 26 __u16 sq_depth; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_dev.c | 266 * @sq_depth: sq depth 270 static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth, in set_hw_ioctxt() argument 289 hw_ioctxt.sq_depth = ilog2(sq_depth); in set_hw_ioctxt() 433 * @sq_depth: the send queue depth 438 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) in hinic_hwdev_ifup() argument 461 func_to_io->sq_depth = sq_depth; in hinic_hwdev_ifup() 489 err = set_hw_ioctxt(hwdev, sq_depth, rq_depth); in hinic_hwdev_ifup()
|
H A D | hinic_dev.h | 99 u16 sq_depth; member
|
H A D | hinic_hw_io.h | 73 u16 sq_depth; member
|
H A D | hinic_hw_dev.h | 288 u16 sq_depth; member 627 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
|
H A D | hinic_hw_mbox.c | 1291 (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \ 1292 (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \ 1300 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && in hw_ctxt_qps_param_valid()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_dev.c | 272 * @sq_depth: sq depth 276 static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth, in set_hw_ioctxt() argument 295 hw_ioctxt.sq_depth = ilog2(sq_depth); in set_hw_ioctxt() 439 * @sq_depth: the send queue depth 444 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) in hinic_hwdev_ifup() argument 467 func_to_io->sq_depth = sq_depth; in hinic_hwdev_ifup() 495 err = set_hw_ioctxt(hwdev, sq_depth, rq_depth); in hinic_hwdev_ifup()
|
H A D | hinic_dev.h | 95 u16 sq_depth; member
|
H A D | hinic_hw_io.h | 73 u16 sq_depth; member
|
H A D | hinic_hw_dev.h | 222 u16 sq_depth; member 561 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
|
H A D | hinic_hw_mbox.c | 1296 (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \ 1297 (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \ 1305 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && in hw_ctxt_qps_param_valid()
|
/kernel/linux/linux-6.6/drivers/crypto/hisilicon/ |
H A D | qm.c | 253 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 254 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 1872 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) in qm_get_avail_sqe() 1993 sqc->w8 = cpu_to_le16(qp->sq_depth - 1); in qm_sq_ctx_cfg() 1995 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); in qm_sq_ctx_cfg() 2125 u16 sq_depth = qp->sq_depth; in qp_stop_fail_cb() local 2126 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; in qp_stop_fail_cb() 2132 pos = (i + cur_head) % sq_depth; in qp_stop_fail_cb() 2645 u16 sq_depth, cq_depth; qm_alloc_uacce() local 2801 hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, u16 sq_depth, u16 cq_depth) hisi_qp_memory_init() argument 5332 u16 sq_depth, cq_depth; hisi_qp_alloc_memory() local [all...] |
H A D | debugfs.c | 298 u16 sq_depth = qm->qp_array->cq_depth; in qm_sq_dump() local 304 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); in qm_sq_dump() 308 sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); in qm_sq_dump() 313 memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); in qm_sq_dump()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/irdma/ |
H A D | user.h | 300 u32 *sq_depth, u8 *sq_shift); 378 u32 sq_depth; member
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/efa/ |
H A D | efa_com_cmd.h | 23 u32 sq_depth; member
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/efa/ |
H A D | efa_com_cmd.h | 23 u32 sq_depth; member
|