Lines Matching refs:rsrc
50 *rsrc, bool initial);
68 * @rsrc: resource to use for buffer
70 struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
73 struct list_head *list = &rsrc->bufpool;
76 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
79 rsrc->avail_buf_count--;
81 rsrc->stats_buf_alloc_fail++;
82 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
87 * i40iw_puda_ret_bufpool - return buffer to rsrc list
88 * @rsrc: resource to use for buffer
91 void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
96 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
97 list_add(&buf->list, &rsrc->bufpool);
98 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
99 rsrc->avail_buf_count++;
104 * @rsrc: resource ptr
109 static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
113 struct i40iw_sc_qp *qp = &rsrc->qp;
118 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
134 * @rsrc: resource to use for buffer
137 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
141 u32 invalid_cnt = rsrc->rxq_invalid_cnt;
145 buf = i40iw_puda_get_bufpool(rsrc);
148 i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
150 rsrc->rx_wqe_idx =
151 ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
152 rsrc->rxq_invalid_cnt--;
288 struct i40iw_puda_rsrc *rsrc;
294 rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
308 if (!qp || !rsrc) {
313 if (qp->qp_id != rsrc->qp_id) {
323 rsrc->stats_rcvd_pkt_err++;
325 i40iw_ilq_putback_rcvbuf(&rsrc->qp,
328 i40iw_puda_ret_bufpool(rsrc, buf);
329 i40iw_puda_replenish_rq(rsrc, false);
334 rsrc->stats_pkt_rcvd++;
335 rsrc->compl_rxwqe_idx = info.wqe_idx;
337 rsrc->receive(rsrc->vsi, buf);
339 i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
341 i40iw_puda_replenish_rq(rsrc, false);
347 rsrc->xmit_complete(rsrc->vsi, sqwrid);
348 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
349 rsrc->tx_wqe_avail_cnt++;
350 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
351 if (!list_empty(&rsrc->txpend))
352 i40iw_puda_send_buf(rsrc, NULL);
419 * @rsrc: resource to use for buffer
422 void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
428 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
432 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
433 list_add_tail(&buf->list, &rsrc->txpend);
434 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
435 rsrc->stats_sent_pkt_q++;
436 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
437 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
441 rsrc->tx_wqe_avail_cnt--;
446 buf = i40iw_puda_get_listbuf(&rsrc->txpend);
457 info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
459 ret = i40iw_puda_send(&rsrc->qp, &info);
461 rsrc->tx_wqe_avail_cnt++;
462 rsrc->stats_sent_pkt_q++;
463 list_add(&buf->list, &rsrc->txpend);
464 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
465 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
468 rsrc->stats_pkt_sent++;
471 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
476 * @rsrc: qp's resource
478 static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
480 struct i40iw_sc_qp *qp = &rsrc->qp;
490 set_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE));
495 LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
496 LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
508 i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
514 * @rsrc: resource for qp
550 * @rsrc: resource to use for buffer
552 static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
554 struct i40iw_sc_qp *qp = &rsrc->qp;
560 sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
561 rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
566 i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
569 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
573 mem = &rsrc->qpmem;
575 qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
576 qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
577 qp->pd = &rsrc->sc_pd;
579 qp->dev = rsrc->dev;
580 qp->back_qp = (void *)rsrc;
583 qp->vsi = rsrc->vsi;
585 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
586 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
591 ukqp->qp_id = rsrc->qp_id;
592 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
593 ukqp->rq_wrid_array = rsrc->rq_wrid_array;
595 ukqp->qp_id = rsrc->qp_id;
596 ukqp->sq_size = rsrc->sq_size;
597 ukqp->rq_size = rsrc->rq_size;
612 i40iw_puda_qp_setctx(rsrc);
613 if (rsrc->dev->ceq_valid)
614 ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
616 ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
619 i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
626 * @rsrc: resource for cq
670 * @rsrc: resource for which cq to create
672 static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
674 struct i40iw_sc_dev *dev = rsrc->dev;
675 struct i40iw_sc_cq *cq = &rsrc->cq;
682 cq->vsi = rsrc->vsi;
683 cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
685 ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
690 mem = &rsrc->cqmem;
693 info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
695 info.shadow_read_threshold = rsrc->cq_size >> 2;
701 init_info->cq_size = rsrc->cq_size;
702 init_info->cq_id = rsrc->cq_id;
708 if (rsrc->dev->ceq_valid)
714 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
720 * @rsrc: resource for which qp to free
722 static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
726 struct i40iw_sc_dev *dev = rsrc->dev;
728 if (rsrc->dev->ceq_valid) {
729 i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
733 ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
753 * @rsrc: resource for which cq to free
755 static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
759 struct i40iw_sc_dev *dev = rsrc->dev;
761 if (rsrc->dev->ceq_valid) {
762 i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
765 ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
794 struct i40iw_puda_rsrc *rsrc;
801 rsrc = vsi->ilq;
805 rsrc = vsi->ieq;
814 switch (rsrc->completion) {
816 i40iw_free_hash_desc(rsrc->hash_desc);
820 i40iw_puda_free_qp(rsrc);
822 i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
826 i40iw_puda_free_cq(rsrc);
828 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
831 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
835 buf = rsrc->alloclist;
840 rsrc->alloc_buf_count--;
847 * @rsrc: resource for buffer allocation
850 static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
858 buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
860 rsrc->stats_buf_alloc_fail++;
863 i40iw_puda_ret_bufpool(rsrc, buf);
864 rsrc->alloc_buf_count++;
865 if (!rsrc->alloclist) {
866 rsrc->alloclist = buf;
868 nextbuf = rsrc->alloclist;
869 rsrc->alloclist = buf;
873 rsrc->avail_buf_count = rsrc->alloc_buf_count;
887 struct i40iw_puda_rsrc *rsrc;
911 rsrc = (struct i40iw_puda_rsrc *)vmem->va;
912 spin_lock_init(&rsrc->bufpool_lock);
916 rsrc->receive = info->receive;
917 rsrc->xmit_complete = info->xmit_complete;
922 rsrc->receive = i40iw_ieq_receive;
923 rsrc->xmit_complete = i40iw_ieq_tx_compl;
926 rsrc->type = info->type;
927 rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
928 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
930 INIT_LIST_HEAD(&rsrc->bufpool);
931 INIT_LIST_HEAD(&rsrc->txpend);
933 rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
934 dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
935 rsrc->qp_id = info->qp_id;
936 rsrc->cq_id = info->cq_id;
937 rsrc->sq_size = info->sq_size;
938 rsrc->rq_size = info->rq_size;
939 rsrc->cq_size = info->rq_size + info->sq_size;
940 rsrc->buf_size = info->buf_size;
941 rsrc->dev = dev;
942 rsrc->vsi = vsi;
944 ret = i40iw_puda_cq_create(rsrc);
946 rsrc->completion = PUDA_CQ_CREATED;
947 ret = i40iw_puda_qp_create(rsrc);
954 rsrc->completion = PUDA_QP_CREATED;
956 ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
963 rsrc->rxq_invalid_cnt = info->rq_size;
964 ret = i40iw_puda_replenish_rq(rsrc, true);
969 if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
970 rsrc->check_crc = true;
971 rsrc->completion = PUDA_HASH_CRC_COMPLETE;
976 dev->ccq_ops->ccq_arm(&rsrc->cq);