Lines Matching refs:rsrc
34 * @rsrc: resource to use for buffer
36 struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
39 struct list_head *list = &rsrc->bufpool;
42 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
45 rsrc->avail_buf_count--;
46 buf->vsi = rsrc->vsi;
48 rsrc->stats_buf_alloc_fail++;
50 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
56 * irdma_puda_ret_bufpool - return buffer to rsrc list
57 * @rsrc: resource to use for buffer
60 void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
66 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
67 list_add(&buf->list, &rsrc->bufpool);
68 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
69 rsrc->avail_buf_count++;
74 * @rsrc: resource ptr
79 static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
83 struct irdma_sc_qp *qp = &rsrc->qp;
87 dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa,
113 * @rsrc: resource to use for buffer
116 static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
119 u32 invalid_cnt = rsrc->rxq_invalid_cnt;
123 buf = irdma_puda_get_bufpool(rsrc);
126 irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
127 rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
128 rsrc->rxq_invalid_cnt--;
331 struct irdma_puda_rsrc *rsrc;
336 rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
351 if (!qp || !rsrc) {
356 if (qp->qp_id != rsrc->qp_id) {
371 rsrc->stats_rcvd_pkt_err++;
373 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
376 irdma_puda_ret_bufpool(rsrc, buf);
377 irdma_puda_replenish_rq(rsrc, false);
382 rsrc->stats_pkt_rcvd++;
383 rsrc->compl_rxwqe_idx = info.wqe_idx;
385 rsrc->receive(rsrc->vsi, buf);
387 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
389 irdma_puda_replenish_rq(rsrc, false);
400 rsrc->xmit_complete(rsrc->vsi, buf);
401 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
402 rsrc->tx_wqe_avail_cnt++;
403 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
404 if (!list_empty(&rsrc->txpend))
405 irdma_puda_send_buf(rsrc, NULL);
498 * @rsrc: resource to use for buffer
501 void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
508 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
512 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
513 list_add_tail(&buf->list, &rsrc->txpend);
514 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
515 rsrc->stats_sent_pkt_q++;
516 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
517 ibdev_dbg(to_ibdev(rsrc->dev),
521 rsrc->tx_wqe_avail_cnt--;
526 buf = irdma_puda_get_listbuf(&rsrc->txpend);
537 if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
545 dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa,
547 ret = irdma_puda_send(&rsrc->qp, &info);
549 rsrc->tx_wqe_avail_cnt++;
550 rsrc->stats_sent_pkt_q++;
551 list_add(&buf->list, &rsrc->txpend);
552 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
553 ibdev_dbg(to_ibdev(rsrc->dev),
556 rsrc->stats_pkt_sent++;
559 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
564 * @rsrc: qp's resource
566 static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
568 struct irdma_sc_qp *qp = &rsrc->qp;
577 FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
582 FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
583 FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
585 FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
588 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
642 * @rsrc: resource to use for buffer
644 static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
646 struct irdma_sc_qp *qp = &rsrc->qp;
652 sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
653 rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
654 rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),
656 rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device,
657 rsrc->qpmem.size, &rsrc->qpmem.pa,
659 if (!rsrc->qpmem.va)
662 mem = &rsrc->qpmem;
663 memset(mem->va, 0, rsrc->qpmem.size);
664 qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
665 qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
666 qp->pd = &rsrc->sc_pd;
668 qp->dev = rsrc->dev;
669 qp->qp_uk.back_qp = rsrc;
672 qp->vsi = rsrc->vsi;
674 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
675 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
681 ukqp->qp_id = rsrc->qp_id;
682 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
683 ukqp->rq_wrid_array = rsrc->rq_wrid_array;
684 ukqp->sq_size = rsrc->sq_size;
685 ukqp->rq_size = rsrc->rq_size;
692 ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
694 dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
695 rsrc->qpmem.va, rsrc->qpmem.pa);
696 rsrc->qpmem.va = NULL;
701 irdma_puda_qp_setctx(rsrc);
703 if (rsrc->dev->ceq_valid)
704 ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
706 ret = irdma_puda_qp_wqe(rsrc->dev, qp);
709 rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
710 dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
711 rsrc->qpmem.va, rsrc->qpmem.pa);
712 rsrc->qpmem.va = NULL;
773 * @rsrc: resource for which cq to create
775 static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
777 struct irdma_sc_dev *dev = rsrc->dev;
778 struct irdma_sc_cq *cq = &rsrc->cq;
785 cq->vsi = rsrc->vsi;
786 cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
787 rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),
789 rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
790 &rsrc->cqmem.pa, GFP_KERNEL);
791 if (!rsrc->cqmem.va)
794 mem = &rsrc->cqmem;
796 info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
798 info.shadow_read_threshold = rsrc->cq_size >> 2;
803 init_info->cq_size = rsrc->cq_size;
804 init_info->cq_id = rsrc->cq_id;
807 info.vsi = rsrc->vsi;
813 if (rsrc->dev->ceq_valid)
819 dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
820 rsrc->cqmem.va, rsrc->cqmem.pa);
821 rsrc->cqmem.va = NULL;
829 * @rsrc: resource for which qp to free
831 static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
835 struct irdma_sc_dev *dev = rsrc->dev;
837 if (rsrc->dev->ceq_valid) {
838 irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
839 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
843 ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
856 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
861 * @rsrc: resource for which cq to free
863 static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
867 struct irdma_sc_dev *dev = rsrc->dev;
869 if (rsrc->dev->ceq_valid) {
870 irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
874 ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
896 struct irdma_puda_rsrc *rsrc;
905 rsrc = vsi->ilq;
909 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
912 rsrc = vsi->ieq;
916 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
924 switch (rsrc->cmpl) {
926 irdma_free_hash_desc(rsrc->hash_desc);
929 irdma_qp_rem_qos(&rsrc->qp);
932 irdma_puda_free_qp(rsrc);
934 dma_free_coherent(dev->hw->device, rsrc->qpmem.size,
935 rsrc->qpmem.va, rsrc->qpmem.pa);
936 rsrc->qpmem.va = NULL;
940 irdma_puda_free_cq(rsrc);
942 dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
943 rsrc->cqmem.va, rsrc->cqmem.pa);
944 rsrc->cqmem.va = NULL;
947 ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n");
951 buf = rsrc->alloclist;
956 rsrc->alloc_buf_count--;
964 * @rsrc: resource for buffer allocation
967 static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
974 buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
976 rsrc->stats_buf_alloc_fail++;
979 irdma_puda_ret_bufpool(rsrc, buf);
980 rsrc->alloc_buf_count++;
981 if (!rsrc->alloclist) {
982 rsrc->alloclist = buf;
984 nextbuf = rsrc->alloclist;
985 rsrc->alloclist = buf;
990 rsrc->avail_buf_count = rsrc->alloc_buf_count;
1005 struct irdma_puda_rsrc *rsrc;
1029 rsrc = vmem->va;
1030 spin_lock_init(&rsrc->bufpool_lock);
1035 rsrc->receive = info->receive;
1036 rsrc->xmit_complete = info->xmit_complete;
1041 rsrc->receive = irdma_ieq_receive;
1042 rsrc->xmit_complete = irdma_ieq_tx_compl;
1048 rsrc->type = info->type;
1049 rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
1051 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
1053 INIT_LIST_HEAD(&rsrc->bufpool);
1054 INIT_LIST_HEAD(&rsrc->txpend);
1056 rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
1057 irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
1058 rsrc->qp_id = info->qp_id;
1059 rsrc->cq_id = info->cq_id;
1060 rsrc->sq_size = info->sq_size;
1061 rsrc->rq_size = info->rq_size;
1062 rsrc->cq_size = info->rq_size + info->sq_size;
1064 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
1065 rsrc->cq_size += info->rq_size;
1067 rsrc->buf_size = info->buf_size;
1068 rsrc->dev = dev;
1069 rsrc->vsi = vsi;
1070 rsrc->stats_idx = info->stats_idx;
1071 rsrc->stats_idx_valid = info->stats_idx_valid;
1073 ret = irdma_puda_cq_create(rsrc);
1075 rsrc->cmpl = PUDA_CQ_CREATED;
1076 ret = irdma_puda_qp_create(rsrc);
1081 rsrc->type, ret);
1084 rsrc->cmpl = PUDA_QP_CREATED;
1086 ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
1092 rsrc->rxq_invalid_cnt = info->rq_size;
1093 ret = irdma_puda_replenish_rq(rsrc, true);
1098 if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
1099 rsrc->check_crc = true;
1100 rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
1105 irdma_sc_ccq_arm(&rsrc->cq);