1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9				      struct net_device *ndev,
10				      mana_handle_t default_rxobj,
11				      mana_handle_t ind_table[],
12				      u32 log_ind_tbl_size, u32 rx_hash_key_len,
13				      u8 *rx_hash_key)
14{
15	struct mana_port_context *mpc = netdev_priv(ndev);
16	struct mana_cfg_rx_steer_req_v2 *req;
17	struct mana_cfg_rx_steer_resp resp = {};
18	mana_handle_t *req_indir_tab;
19	struct gdma_context *gc;
20	struct gdma_dev *mdev;
21	u32 req_buf_size;
22	int i, err;
23
24	mdev = dev->gdma_dev;
25	gc = mdev->gdma_context;
26
27	req_buf_size =
28		sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
29	req = kzalloc(req_buf_size, GFP_KERNEL);
30	if (!req)
31		return -ENOMEM;
32
33	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
34			     sizeof(resp));
35
36	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
37
38	req->vport = mpc->port_handle;
39	req->rx_enable = 1;
40	req->update_default_rxobj = 1;
41	req->default_rxobj = default_rxobj;
42	req->hdr.dev_id = mdev->dev_id;
43
44	/* If there are more than 1 entries in indirection table, enable RSS */
45	if (log_ind_tbl_size)
46		req->rss_enable = true;
47
48	req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
49	req->indir_tab_offset = sizeof(*req);
50	req->update_indir_tab = true;
51	req->cqe_coalescing_enable = 1;
52
53	req_indir_tab = (mana_handle_t *)(req + 1);
54	/* The ind table passed to the hardware must have
55	 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
56	 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
57	 */
58	ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
59	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
60		req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
61		ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
62			  req_indir_tab[i]);
63	}
64
65	req->update_hashkey = true;
66	if (rx_hash_key_len)
67		memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
68	else
69		netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
70
71	ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
72		  req->vport, default_rxobj);
73
74	err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
75	if (err) {
76		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
77		goto out;
78	}
79
80	if (resp.hdr.status) {
81		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
82			   resp.hdr.status);
83		err = -EPROTO;
84		goto out;
85	}
86
87	netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
88		    mpc->port_handle, log_ind_tbl_size);
89
90out:
91	kfree(req);
92	return err;
93}
94
95static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
96				 struct ib_qp_init_attr *attr,
97				 struct ib_udata *udata)
98{
99	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
100	struct mana_ib_dev *mdev =
101		container_of(pd->device, struct mana_ib_dev, ib_dev);
102	struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
103	struct mana_ib_create_qp_rss_resp resp = {};
104	struct mana_ib_create_qp_rss ucmd = {};
105	struct gdma_dev *gd = mdev->gdma_dev;
106	mana_handle_t *mana_ind_table;
107	struct mana_port_context *mpc;
108	struct mana_context *mc;
109	struct net_device *ndev;
110	struct mana_ib_cq *cq;
111	struct mana_ib_wq *wq;
112	unsigned int ind_tbl_size;
113	struct ib_cq *ibcq;
114	struct ib_wq *ibwq;
115	int i = 0;
116	u32 port;
117	int ret;
118
119	mc = gd->driver_data;
120
121	if (!udata || udata->inlen < sizeof(ucmd))
122		return -EINVAL;
123
124	ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
125	if (ret) {
126		ibdev_dbg(&mdev->ib_dev,
127			  "Failed copy from udata for create rss-qp, err %d\n",
128			  ret);
129		return ret;
130	}
131
132	if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
133		ibdev_dbg(&mdev->ib_dev,
134			  "Requested max_recv_wr %d exceeding limit\n",
135			  attr->cap.max_recv_wr);
136		return -EINVAL;
137	}
138
139	if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
140		ibdev_dbg(&mdev->ib_dev,
141			  "Requested max_recv_sge %d exceeding limit\n",
142			  attr->cap.max_recv_sge);
143		return -EINVAL;
144	}
145
146	ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
147	if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
148		ibdev_dbg(&mdev->ib_dev,
149			  "Indirect table size %d exceeding limit\n",
150			  ind_tbl_size);
151		return -EINVAL;
152	}
153
154	if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
155		ibdev_dbg(&mdev->ib_dev,
156			  "RX Hash function is not supported, %d\n",
157			  ucmd.rx_hash_function);
158		return -EINVAL;
159	}
160
161	/* IB ports start with 1, MANA start with 0 */
162	port = ucmd.port;
163	if (port < 1 || port > mc->num_ports) {
164		ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
165			  port);
166		return -EINVAL;
167	}
168	ndev = mc->ports[port - 1];
169	mpc = netdev_priv(ndev);
170
171	ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
172		  ucmd.rx_hash_function, port);
173
174	mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
175				 GFP_KERNEL);
176	if (!mana_ind_table) {
177		ret = -ENOMEM;
178		goto fail;
179	}
180
181	qp->port = port;
182
183	for (i = 0; i < ind_tbl_size; i++) {
184		struct mana_obj_spec wq_spec = {};
185		struct mana_obj_spec cq_spec = {};
186
187		ibwq = ind_tbl->ind_tbl[i];
188		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
189
190		ibcq = ibwq->cq;
191		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
192
193		wq_spec.gdma_region = wq->gdma_region;
194		wq_spec.queue_size = wq->wq_buf_size;
195
196		cq_spec.gdma_region = cq->gdma_region;
197		cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
198		cq_spec.modr_ctx_id = 0;
199		cq_spec.attached_eq = GDMA_CQ_NO_EQ;
200
201		ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
202					 &wq_spec, &cq_spec, &wq->rx_object);
203		if (ret)
204			goto fail;
205
206		/* The GDMA regions are now owned by the WQ object */
207		wq->gdma_region = GDMA_INVALID_DMA_REGION;
208		cq->gdma_region = GDMA_INVALID_DMA_REGION;
209
210		wq->id = wq_spec.queue_index;
211		cq->id = cq_spec.queue_index;
212
213		ibdev_dbg(&mdev->ib_dev,
214			  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
215			  ret, wq->rx_object, wq->id, cq->id);
216
217		resp.entries[i].cqid = cq->id;
218		resp.entries[i].wqid = wq->id;
219
220		mana_ind_table[i] = wq->rx_object;
221	}
222	resp.num_entries = i;
223
224	ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
225					 mana_ind_table,
226					 ind_tbl->log_ind_tbl_size,
227					 ucmd.rx_hash_key_len,
228					 ucmd.rx_hash_key);
229	if (ret)
230		goto fail;
231
232	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
233	if (ret) {
234		ibdev_dbg(&mdev->ib_dev,
235			  "Failed to copy to udata create rss-qp, %d\n",
236			  ret);
237		goto fail;
238	}
239
240	kfree(mana_ind_table);
241
242	return 0;
243
244fail:
245	while (i-- > 0) {
246		ibwq = ind_tbl->ind_tbl[i];
247		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
248		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
249	}
250
251	kfree(mana_ind_table);
252
253	return ret;
254}
255
256static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
257				 struct ib_qp_init_attr *attr,
258				 struct ib_udata *udata)
259{
260	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
261	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
262	struct mana_ib_dev *mdev =
263		container_of(ibpd->device, struct mana_ib_dev, ib_dev);
264	struct mana_ib_cq *send_cq =
265		container_of(attr->send_cq, struct mana_ib_cq, ibcq);
266	struct mana_ib_ucontext *mana_ucontext =
267		rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
268					  ibucontext);
269	struct mana_ib_create_qp_resp resp = {};
270	struct gdma_dev *gd = mdev->gdma_dev;
271	struct mana_ib_create_qp ucmd = {};
272	struct mana_obj_spec wq_spec = {};
273	struct mana_obj_spec cq_spec = {};
274	struct mana_port_context *mpc;
275	struct mana_context *mc;
276	struct net_device *ndev;
277	struct ib_umem *umem;
278	int err;
279	u32 port;
280
281	mc = gd->driver_data;
282
283	if (!mana_ucontext || udata->inlen < sizeof(ucmd))
284		return -EINVAL;
285
286	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
287	if (err) {
288		ibdev_dbg(&mdev->ib_dev,
289			  "Failed to copy from udata create qp-raw, %d\n", err);
290		return err;
291	}
292
293	/* IB ports start with 1, MANA Ethernet ports start with 0 */
294	port = ucmd.port;
295	if (port < 1 || port > mc->num_ports)
296		return -EINVAL;
297
298	if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
299		ibdev_dbg(&mdev->ib_dev,
300			  "Requested max_send_wr %d exceeding limit\n",
301			  attr->cap.max_send_wr);
302		return -EINVAL;
303	}
304
305	if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
306		ibdev_dbg(&mdev->ib_dev,
307			  "Requested max_send_sge %d exceeding limit\n",
308			  attr->cap.max_send_sge);
309		return -EINVAL;
310	}
311
312	ndev = mc->ports[port - 1];
313	mpc = netdev_priv(ndev);
314	ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
315
316	err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
317	if (err)
318		return -ENODEV;
319
320	qp->port = port;
321
322	ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
323		  ucmd.sq_buf_addr, ucmd.port);
324
325	umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
326			   IB_ACCESS_LOCAL_WRITE);
327	if (IS_ERR(umem)) {
328		err = PTR_ERR(umem);
329		ibdev_dbg(&mdev->ib_dev,
330			  "Failed to get umem for create qp-raw, err %d\n",
331			  err);
332		goto err_free_vport;
333	}
334	qp->sq_umem = umem;
335
336	err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
337					   &qp->sq_gdma_region);
338	if (err) {
339		ibdev_dbg(&mdev->ib_dev,
340			  "Failed to create dma region for create qp-raw, %d\n",
341			  err);
342		goto err_release_umem;
343	}
344
345	ibdev_dbg(&mdev->ib_dev,
346		  "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
347		  err, qp->sq_gdma_region);
348
349	/* Create a WQ on the same port handle used by the Ethernet */
350	wq_spec.gdma_region = qp->sq_gdma_region;
351	wq_spec.queue_size = ucmd.sq_buf_size;
352
353	cq_spec.gdma_region = send_cq->gdma_region;
354	cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
355	cq_spec.modr_ctx_id = 0;
356	cq_spec.attached_eq = GDMA_CQ_NO_EQ;
357
358	err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
359				 &cq_spec, &qp->tx_object);
360	if (err) {
361		ibdev_dbg(&mdev->ib_dev,
362			  "Failed to create wq for create raw-qp, err %d\n",
363			  err);
364		goto err_destroy_dma_region;
365	}
366
367	/* The GDMA regions are now owned by the WQ object */
368	qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
369	send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
370
371	qp->sq_id = wq_spec.queue_index;
372	send_cq->id = cq_spec.queue_index;
373
374	ibdev_dbg(&mdev->ib_dev,
375		  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
376		  qp->tx_object, qp->sq_id, send_cq->id);
377
378	resp.sqid = qp->sq_id;
379	resp.cqid = send_cq->id;
380	resp.tx_vp_offset = pd->tx_vp_offset;
381
382	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
383	if (err) {
384		ibdev_dbg(&mdev->ib_dev,
385			  "Failed copy udata for create qp-raw, %d\n",
386			  err);
387		goto err_destroy_wq_obj;
388	}
389
390	return 0;
391
392err_destroy_wq_obj:
393	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
394
395err_destroy_dma_region:
396	mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
397
398err_release_umem:
399	ib_umem_release(umem);
400
401err_free_vport:
402	mana_ib_uncfg_vport(mdev, pd, port - 1);
403
404	return err;
405}
406
407int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
408		      struct ib_udata *udata)
409{
410	switch (attr->qp_type) {
411	case IB_QPT_RAW_PACKET:
412		/* When rwq_ind_tbl is used, it's for creating WQs for RSS */
413		if (attr->rwq_ind_tbl)
414			return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
415						     udata);
416
417		return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
418	default:
419		/* Creating QP other than IB_QPT_RAW_PACKET is not supported */
420		ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
421			  attr->qp_type);
422	}
423
424	return -EINVAL;
425}
426
427int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
428		      int attr_mask, struct ib_udata *udata)
429{
430	/* modify_qp is not supported by this version of the driver */
431	return -EOPNOTSUPP;
432}
433
434static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
435				  struct ib_rwq_ind_table *ind_tbl,
436				  struct ib_udata *udata)
437{
438	struct mana_ib_dev *mdev =
439		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
440	struct gdma_dev *gd = mdev->gdma_dev;
441	struct mana_port_context *mpc;
442	struct mana_context *mc;
443	struct net_device *ndev;
444	struct mana_ib_wq *wq;
445	struct ib_wq *ibwq;
446	int i;
447
448	mc = gd->driver_data;
449	ndev = mc->ports[qp->port - 1];
450	mpc = netdev_priv(ndev);
451
452	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
453		ibwq = ind_tbl->ind_tbl[i];
454		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
455		ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
456			  wq->rx_object);
457		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
458	}
459
460	return 0;
461}
462
463static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
464{
465	struct mana_ib_dev *mdev =
466		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
467	struct gdma_dev *gd = mdev->gdma_dev;
468	struct ib_pd *ibpd = qp->ibqp.pd;
469	struct mana_port_context *mpc;
470	struct mana_context *mc;
471	struct net_device *ndev;
472	struct mana_ib_pd *pd;
473
474	mc = gd->driver_data;
475	ndev = mc->ports[qp->port - 1];
476	mpc = netdev_priv(ndev);
477	pd = container_of(ibpd, struct mana_ib_pd, ibpd);
478
479	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
480
481	if (qp->sq_umem) {
482		mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
483		ib_umem_release(qp->sq_umem);
484	}
485
486	mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
487
488	return 0;
489}
490
491int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
492{
493	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
494
495	switch (ibqp->qp_type) {
496	case IB_QPT_RAW_PACKET:
497		if (ibqp->rwq_ind_tbl)
498			return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
499						      udata);
500
501		return mana_ib_destroy_qp_raw(qp, udata);
502
503	default:
504		ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
505			  ibqp->qp_type);
506	}
507
508	return -ENOENT;
509}
510