Lines Matching refs:lnk
43 static int smc_ib_modify_qp_init(struct smc_link *lnk)
50 qp_attr.port_num = lnk->ibport;
53 return ib_modify_qp(lnk->roce_qp, &qp_attr,
58 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
67 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
69 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
70 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
71 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
72 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
73 sizeof(lnk->peer_mac));
74 qp_attr.dest_qp_num = lnk->peer_qpn;
75 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
81 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
84 int smc_ib_modify_qp_rts(struct smc_link *lnk)
93 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
97 return ib_modify_qp(lnk->roce_qp, &qp_attr,
103 int smc_ib_modify_qp_error(struct smc_link *lnk)
109 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
112 int smc_ib_ready_link(struct smc_link *lnk)
114 struct smc_link_group *lgr = smc_get_lgr(lnk);
117 rc = smc_ib_modify_qp_init(lnk);
121 rc = smc_ib_modify_qp_rtr(lnk);
124 smc_wr_remember_qp_attr(lnk);
125 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
129 rc = smc_wr_rx_post_init(lnk);
132 smc_wr_remember_qp_attr(lnk);
135 rc = smc_ib_modify_qp_rts(lnk);
138 smc_wr_remember_qp_attr(lnk);
311 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
313 if (lnk->roce_pd)
314 ib_dealloc_pd(lnk->roce_pd);
315 lnk->roce_pd = NULL;
318 int smc_ib_create_protection_domain(struct smc_link *lnk)
322 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
323 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
324 if (IS_ERR(lnk->roce_pd))
325 lnk->roce_pd = NULL;
331 struct smc_link *lnk = (struct smc_link *)priv;
332 struct smc_ib_device *smcibdev = lnk->smcibdev;
350 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
352 if (lnk->roce_qp)
353 ib_destroy_qp(lnk->roce_qp);
354 lnk->roce_qp = NULL;
358 int smc_ib_create_queue_pair(struct smc_link *lnk)
362 .qp_context = lnk,
363 .send_cq = lnk->smcibdev->roce_cq_send,
364 .recv_cq = lnk->smcibdev->roce_cq_recv,
380 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
381 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
382 if (IS_ERR(lnk->roce_qp))
383 lnk->roce_qp = NULL;
385 smc_wr_remember_qp_attr(lnk);
432 void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
440 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
441 buf_slot->sgt[lnk->link_idx].nents, i) {
444 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
452 void smc_ib_sync_sg_for_device(struct smc_link *lnk,
460 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
461 buf_slot->sgt[lnk->link_idx].nents, i) {
464 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
472 int smc_ib_buf_map_sg(struct smc_link *lnk,
478 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
479 buf_slot->sgt[lnk->link_idx].sgl,
480 buf_slot->sgt[lnk->link_idx].orig_nents,
488 void smc_ib_buf_unmap_sg(struct smc_link *lnk,
492 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
495 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
496 buf_slot->sgt[lnk->link_idx].sgl,
497 buf_slot->sgt[lnk->link_idx].orig_nents,
499 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;