Lines Matching defs:gsi
46 /* Call with gsi->lock locked */
49 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
54 for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
65 gsi->outstanding_ci = index;
70 struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
73 struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi);
77 spin_lock_irqsave(&gsi->lock, flags);
85 spin_unlock_irqrestore(&gsi->lock, flags);
92 struct mlx5_ib_gsi_qp *gsi;
106 gsi = &mqp->gsi;
107 gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
108 if (!gsi->tx_qps)
111 gsi->outstanding_wrs =
112 kcalloc(attr->cap.max_send_wr, sizeof(*gsi->outstanding_wrs),
114 if (!gsi->outstanding_wrs) {
121 if (dev->devr.ports[port_num - 1].gsi) {
127 gsi->num_qps = num_qps;
128 spin_lock_init(&gsi->lock);
130 gsi->cap = attr->cap;
131 gsi->port_num = port_num;
133 gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
135 if (IS_ERR(gsi->cq)) {
137 PTR_ERR(gsi->cq));
138 ret = PTR_ERR(gsi->cq);
143 hw_init_attr.send_cq = gsi->cq;
150 gsi->rx_qp = mlx5_ib_create_qp(pd, &hw_init_attr, NULL);
151 if (IS_ERR(gsi->rx_qp)) {
153 PTR_ERR(gsi->rx_qp));
154 ret = PTR_ERR(gsi->rx_qp);
157 gsi->rx_qp->device = pd->device;
158 gsi->rx_qp->pd = pd;
159 gsi->rx_qp->real_qp = gsi->rx_qp;
161 gsi->rx_qp->qp_type = hw_init_attr.qp_type;
162 gsi->rx_qp->send_cq = hw_init_attr.send_cq;
163 gsi->rx_qp->recv_cq = hw_init_attr.recv_cq;
164 gsi->rx_qp->event_handler = hw_init_attr.event_handler;
165 spin_lock_init(&gsi->rx_qp->mr_lock);
166 INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs);
167 INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs);
169 dev->devr.ports[attr->port_num - 1].gsi = gsi;
176 ib_free_cq(gsi->cq);
179 kfree(gsi->outstanding_wrs);
181 kfree(gsi->tx_qps);
188 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
189 const int port_num = gsi->port_num;
194 ret = mlx5_ib_destroy_qp(gsi->rx_qp, NULL);
201 dev->devr.ports[port_num - 1].gsi = NULL;
203 gsi->rx_qp = NULL;
205 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
206 if (!gsi->tx_qps[qp_index])
208 WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
209 gsi->tx_qps[qp_index] = NULL;
212 ib_free_cq(gsi->cq);
214 kfree(gsi->outstanding_wrs);
215 kfree(gsi->tx_qps);
221 static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
223 struct ib_pd *pd = gsi->rx_qp->pd;
225 .event_handler = gsi->rx_qp->event_handler,
226 .qp_context = gsi->rx_qp->qp_context,
227 .send_cq = gsi->cq,
228 .recv_cq = gsi->rx_qp->recv_cq,
230 .max_send_wr = gsi->cap.max_send_wr,
231 .max_send_sge = gsi->cap.max_send_sge,
232 .max_inline_data = gsi->cap.max_inline_data,
241 static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
253 attr.port_num = gsi->port_num;
281 static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
283 struct ib_device *device = gsi->rx_qp->device;
295 ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
298 gsi->port_num, qp_index);
304 gsi->port_num, qp_index);
308 spin_lock_irqsave(&gsi->lock, flags);
309 qp = gsi->tx_qps[qp_index];
310 spin_unlock_irqrestore(&gsi->lock, flags);
313 gsi->port_num, qp_index);
317 qp = create_gsi_ud_qp(gsi);
327 ret = modify_to_rts(gsi, qp, pkey_index);
331 spin_lock_irqsave(&gsi->lock, flags);
332 WARN_ON_ONCE(gsi->tx_qps[qp_index]);
333 gsi->tx_qps[qp_index] = qp;
334 spin_unlock_irqrestore(&gsi->lock, flags);
342 static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
344 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
348 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
349 setup_qp(gsi, qp_index);
358 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
363 ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
369 if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
370 setup_qps(gsi);
379 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
382 ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
383 qp_init_attr->cap = gsi->cap;
387 /* Call with gsi->lock locked */
391 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
392 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
395 if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
400 gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
401 gsi->cap.max_send_wr];
402 gsi->outstanding_pi++;
419 /* Call with gsi->lock locked */
439 /* Call with gsi->lock locked */
440 static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
442 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
446 if (!gsi->num_qps)
447 return gsi->rx_qp;
452 if (qp_index >= gsi->num_qps)
455 return gsi->tx_qps[qp_index];
462 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
472 spin_lock_irqsave(&gsi->lock, flags);
473 tx_qp = get_tx_qp(gsi, &cur_wr);
478 spin_unlock_irqrestore(&gsi->lock, flags);
489 gsi->outstanding_pi--;
492 spin_unlock_irqrestore(&gsi->lock, flags);
498 spin_unlock_irqrestore(&gsi->lock, flags);
507 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
509 return ib_post_recv(gsi->rx_qp, wr, bad_wr);
512 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
514 if (!gsi)
517 setup_qps(gsi);