Lines Matching refs:lnk

47 static int smc_ib_modify_qp_init(struct smc_link *lnk)
54 qp_attr.port_num = lnk->ibport;
57 return ib_modify_qp(lnk->roce_qp, &qp_attr,
62 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
72 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
74 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
75 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
77 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0);
78 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
79 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
80 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac,
81 sizeof(lnk->lgr->nexthop_mac));
83 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
84 sizeof(lnk->peer_mac));
85 qp_attr.dest_qp_num = lnk->peer_qpn;
86 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
92 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
95 int smc_ib_modify_qp_rts(struct smc_link *lnk)
104 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
108 return ib_modify_qp(lnk->roce_qp, &qp_attr,
114 int smc_ib_modify_qp_error(struct smc_link *lnk)
120 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
123 int smc_ib_ready_link(struct smc_link *lnk)
125 struct smc_link_group *lgr = smc_get_lgr(lnk);
128 rc = smc_ib_modify_qp_init(lnk);
132 rc = smc_ib_modify_qp_rtr(lnk);
135 smc_wr_remember_qp_attr(lnk);
136 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
140 rc = smc_wr_rx_post_init(lnk);
143 smc_wr_remember_qp_attr(lnk);
146 rc = smc_ib_modify_qp_rts(lnk);
149 smc_wr_remember_qp_attr(lnk);
341 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
342 lgr->lnk[i].smcibdev != smcibdev)
344 if (!smc_ib_check_link_gid(lgr->lnk[i].gid,
446 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
448 if (lnk->roce_pd)
449 ib_dealloc_pd(lnk->roce_pd);
450 lnk->roce_pd = NULL;
453 int smc_ib_create_protection_domain(struct smc_link *lnk)
457 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
458 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
459 if (IS_ERR(lnk->roce_pd))
460 lnk->roce_pd = NULL;
476 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
477 lgr->lnk[i].smcibdev != smcibdev)
631 struct smc_link *lnk = (struct smc_link *)priv;
632 struct smc_ib_device *smcibdev = lnk->smcibdev;
650 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
652 if (lnk->roce_qp)
653 ib_destroy_qp(lnk->roce_qp);
654 lnk->roce_qp = NULL;
658 int smc_ib_create_queue_pair(struct smc_link *lnk)
660 int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
663 .qp_context = lnk,
664 .send_cq = lnk->smcibdev->roce_cq_send,
665 .recv_cq = lnk->smcibdev->roce_cq_recv,
682 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
683 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
684 if (IS_ERR(lnk->roce_qp))
685 lnk->roce_qp = NULL;
687 smc_wr_remember_qp_attr(lnk);
734 bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
742 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
743 buf_slot->sgt[lnk->link_idx].nents, i) {
746 if (dma_need_sync(lnk->smcibdev->ibdev->dma_device,
758 void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
765 if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
769 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
770 buf_slot->sgt[lnk->link_idx].nents, i) {
773 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
781 void smc_ib_sync_sg_for_device(struct smc_link *lnk,
788 if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
792 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
793 buf_slot->sgt[lnk->link_idx].nents, i) {
796 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
804 int smc_ib_buf_map_sg(struct smc_link *lnk,
810 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
811 buf_slot->sgt[lnk->link_idx].sgl,
812 buf_slot->sgt[lnk->link_idx].orig_nents,
820 void smc_ib_buf_unmap_sg(struct smc_link *lnk,
824 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
827 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
828 buf_slot->sgt[lnk->link_idx].sgl,
829 buf_slot->sgt[lnk->link_idx].orig_nents,
831 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;