1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#include <net/mana/gdma.h>
5#include <net/mana/hw_channel.h>
6
7static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
8{
9	struct gdma_resource *r = &hwc->inflight_msg_res;
10	unsigned long flags;
11	u32 index;
12
13	down(&hwc->sema);
14
15	spin_lock_irqsave(&r->lock, flags);
16
17	index = find_first_zero_bit(hwc->inflight_msg_res.map,
18				    hwc->inflight_msg_res.size);
19
20	bitmap_set(hwc->inflight_msg_res.map, index, 1);
21
22	spin_unlock_irqrestore(&r->lock, flags);
23
24	*msg_id = index;
25
26	return 0;
27}
28
29static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
30{
31	struct gdma_resource *r = &hwc->inflight_msg_res;
32	unsigned long flags;
33
34	spin_lock_irqsave(&r->lock, flags);
35	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
36	spin_unlock_irqrestore(&r->lock, flags);
37
38	up(&hwc->sema);
39}
40
41static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
42				    const struct gdma_resp_hdr *resp_msg,
43				    u32 resp_len)
44{
45	if (resp_len < sizeof(*resp_msg))
46		return -EPROTO;
47
48	if (resp_len > caller_ctx->output_buflen)
49		return -EPROTO;
50
51	return 0;
52}
53
54static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
55				 const struct gdma_resp_hdr *resp_msg)
56{
57	struct hwc_caller_ctx *ctx;
58	int err;
59
60	if (!test_bit(resp_msg->response.hwc_msg_id,
61		      hwc->inflight_msg_res.map)) {
62		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
63			resp_msg->response.hwc_msg_id);
64		return;
65	}
66
67	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
68	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
69	if (err)
70		goto out;
71
72	ctx->status_code = resp_msg->status;
73
74	memcpy(ctx->output_buf, resp_msg, resp_len);
75out:
76	ctx->error = err;
77	complete(&ctx->comp_event);
78}
79
80static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
81				struct hwc_work_request *req)
82{
83	struct device *dev = hwc_rxq->hwc->dev;
84	struct gdma_sge *sge;
85	int err;
86
87	sge = &req->sge;
88	sge->address = (u64)req->buf_sge_addr;
89	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
90	sge->size = req->buf_len;
91
92	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
93	req->wqe_req.sgl = sge;
94	req->wqe_req.num_sge = 1;
95	req->wqe_req.client_data_unit = 0;
96
97	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
98	if (err)
99		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100	return err;
101}
102
103static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104					struct gdma_event *event)
105{
106	struct hw_channel_context *hwc = ctx;
107	struct gdma_dev *gd = hwc->gdma_dev;
108	union hwc_init_type_data type_data;
109	union hwc_init_eq_id_db eq_db;
110	u32 type, val;
111
112	switch (event->type) {
113	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114		eq_db.as_uint32 = event->details[0];
115		hwc->cq->gdma_eq->id = eq_db.eq_id;
116		gd->doorbell = eq_db.doorbell;
117		break;
118
119	case GDMA_EQE_HWC_INIT_DATA:
120		type_data.as_uint32 = event->details[0];
121		type = type_data.type;
122		val = type_data.value;
123
124		switch (type) {
125		case HWC_INIT_DATA_CQID:
126			hwc->cq->gdma_cq->id = val;
127			break;
128
129		case HWC_INIT_DATA_RQID:
130			hwc->rxq->gdma_wq->id = val;
131			break;
132
133		case HWC_INIT_DATA_SQID:
134			hwc->txq->gdma_wq->id = val;
135			break;
136
137		case HWC_INIT_DATA_QUEUE_DEPTH:
138			hwc->hwc_init_q_depth_max = (u16)val;
139			break;
140
141		case HWC_INIT_DATA_MAX_REQUEST:
142			hwc->hwc_init_max_req_msg_size = val;
143			break;
144
145		case HWC_INIT_DATA_MAX_RESPONSE:
146			hwc->hwc_init_max_resp_msg_size = val;
147			break;
148
149		case HWC_INIT_DATA_MAX_NUM_CQS:
150			gd->gdma_context->max_num_cqs = val;
151			break;
152
153		case HWC_INIT_DATA_PDID:
154			hwc->gdma_dev->pdid = val;
155			break;
156
157		case HWC_INIT_DATA_GPA_MKEY:
158			hwc->rxq->msg_buf->gpa_mkey = val;
159			hwc->txq->msg_buf->gpa_mkey = val;
160			break;
161
162		case HWC_INIT_DATA_PF_DEST_RQ_ID:
163			hwc->pf_dest_vrq_id = val;
164			break;
165
166		case HWC_INIT_DATA_PF_DEST_CQ_ID:
167			hwc->pf_dest_vrcq_id = val;
168			break;
169		}
170
171		break;
172
173	case GDMA_EQE_HWC_INIT_DONE:
174		complete(&hwc->hwc_init_eqe_comp);
175		break;
176
177	case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
178		type_data.as_uint32 = event->details[0];
179		type = type_data.type;
180		val = type_data.value;
181
182		switch (type) {
183		case HWC_DATA_CFG_HWC_TIMEOUT:
184			hwc->hwc_timeout = val;
185			break;
186
187		default:
188			dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
189			break;
190		}
191
192		break;
193
194	default:
195		dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
196		/* Ignore unknown events, which should never happen. */
197		break;
198	}
199}
200
201static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
202				      const struct hwc_rx_oob *rx_oob)
203{
204	struct hw_channel_context *hwc = ctx;
205	struct hwc_wq *hwc_rxq = hwc->rxq;
206	struct hwc_work_request *rx_req;
207	struct gdma_resp_hdr *resp;
208	struct gdma_wqe *dma_oob;
209	struct gdma_queue *rq;
210	struct gdma_sge *sge;
211	u64 rq_base_addr;
212	u64 rx_req_idx;
213	u8 *wqe;
214
215	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
216		return;
217
218	rq = hwc_rxq->gdma_wq;
219	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
220	dma_oob = (struct gdma_wqe *)wqe;
221
222	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
223
224	/* Select the RX work request for virtual address and for reposting. */
225	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
226	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
227
228	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
229	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
230
231	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
232		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
233			resp->response.hwc_msg_id);
234		return;
235	}
236
237	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
238
239	/* Do no longer use 'resp', because the buffer is posted to the HW
240	 * in the below mana_hwc_post_rx_wqe().
241	 */
242	resp = NULL;
243
244	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
245}
246
247static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
248				      const struct hwc_rx_oob *rx_oob)
249{
250	struct hw_channel_context *hwc = ctx;
251	struct hwc_wq *hwc_txq = hwc->txq;
252
253	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
254}
255
256static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
257				   enum gdma_queue_type type, u64 queue_size,
258				   struct gdma_queue **queue)
259{
260	struct gdma_queue_spec spec = {};
261
262	if (type != GDMA_SQ && type != GDMA_RQ)
263		return -EINVAL;
264
265	spec.type = type;
266	spec.monitor_avl_buf = false;
267	spec.queue_size = queue_size;
268
269	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
270}
271
272static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
273				   u64 queue_size,
274				   void *ctx, gdma_cq_callback *cb,
275				   struct gdma_queue *parent_eq,
276				   struct gdma_queue **queue)
277{
278	struct gdma_queue_spec spec = {};
279
280	spec.type = GDMA_CQ;
281	spec.monitor_avl_buf = false;
282	spec.queue_size = queue_size;
283	spec.cq.context = ctx;
284	spec.cq.callback = cb;
285	spec.cq.parent_eq = parent_eq;
286
287	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
288}
289
290static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
291				   u64 queue_size,
292				   void *ctx, gdma_eq_callback *cb,
293				   struct gdma_queue **queue)
294{
295	struct gdma_queue_spec spec = {};
296
297	spec.type = GDMA_EQ;
298	spec.monitor_avl_buf = false;
299	spec.queue_size = queue_size;
300	spec.eq.context = ctx;
301	spec.eq.callback = cb;
302	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
303
304	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
305}
306
307static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
308{
309	struct hwc_rx_oob comp_data = {};
310	struct gdma_comp *completions;
311	struct hwc_cq *hwc_cq = ctx;
312	int comp_read, i;
313
314	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
315
316	completions = hwc_cq->comp_buf;
317	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
318	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
319
320	for (i = 0; i < comp_read; ++i) {
321		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
322
323		if (completions[i].is_sq)
324			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
325						completions[i].wq_num,
326						&comp_data);
327		else
328			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
329						completions[i].wq_num,
330						&comp_data);
331	}
332
333	mana_gd_ring_cq(q_self, SET_ARM_BIT);
334}
335
336static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
337{
338	kfree(hwc_cq->comp_buf);
339
340	if (hwc_cq->gdma_cq)
341		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
342
343	if (hwc_cq->gdma_eq)
344		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
345
346	kfree(hwc_cq);
347}
348
349static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
350			      gdma_eq_callback *callback, void *ctx,
351			      hwc_rx_event_handler_t *rx_ev_hdlr,
352			      void *rx_ev_ctx,
353			      hwc_tx_event_handler_t *tx_ev_hdlr,
354			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
355{
356	struct gdma_queue *eq, *cq;
357	struct gdma_comp *comp_buf;
358	struct hwc_cq *hwc_cq;
359	u32 eq_size, cq_size;
360	int err;
361
362	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
363	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
364		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
365
366	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
367	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
368		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
369
370	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
371	if (!hwc_cq)
372		return -ENOMEM;
373
374	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
375	if (err) {
376		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
377		goto out;
378	}
379	hwc_cq->gdma_eq = eq;
380
381	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
382				      eq, &cq);
383	if (err) {
384		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
385		goto out;
386	}
387	hwc_cq->gdma_cq = cq;
388
389	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
390	if (!comp_buf) {
391		err = -ENOMEM;
392		goto out;
393	}
394
395	hwc_cq->hwc = hwc;
396	hwc_cq->comp_buf = comp_buf;
397	hwc_cq->queue_depth = q_depth;
398	hwc_cq->rx_event_handler = rx_ev_hdlr;
399	hwc_cq->rx_event_ctx = rx_ev_ctx;
400	hwc_cq->tx_event_handler = tx_ev_hdlr;
401	hwc_cq->tx_event_ctx = tx_ev_ctx;
402
403	*hwc_cq_ptr = hwc_cq;
404	return 0;
405out:
406	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
407	return err;
408}
409
410static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
411				  u32 max_msg_size,
412				  struct hwc_dma_buf **dma_buf_ptr)
413{
414	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
415	struct hwc_work_request *hwc_wr;
416	struct hwc_dma_buf *dma_buf;
417	struct gdma_mem_info *gmi;
418	void *virt_addr;
419	u32 buf_size;
420	u8 *base_pa;
421	int err;
422	u16 i;
423
424	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
425	if (!dma_buf)
426		return -ENOMEM;
427
428	dma_buf->num_reqs = q_depth;
429
430	buf_size = PAGE_ALIGN(q_depth * max_msg_size);
431
432	gmi = &dma_buf->mem_info;
433	err = mana_gd_alloc_memory(gc, buf_size, gmi);
434	if (err) {
435		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
436		goto out;
437	}
438
439	virt_addr = dma_buf->mem_info.virt_addr;
440	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
441
442	for (i = 0; i < q_depth; i++) {
443		hwc_wr = &dma_buf->reqs[i];
444
445		hwc_wr->buf_va = virt_addr + i * max_msg_size;
446		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
447
448		hwc_wr->buf_len = max_msg_size;
449	}
450
451	*dma_buf_ptr = dma_buf;
452	return 0;
453out:
454	kfree(dma_buf);
455	return err;
456}
457
458static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
459				     struct hwc_dma_buf *dma_buf)
460{
461	if (!dma_buf)
462		return;
463
464	mana_gd_free_memory(&dma_buf->mem_info);
465
466	kfree(dma_buf);
467}
468
469static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
470				struct hwc_wq *hwc_wq)
471{
472	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
473
474	if (hwc_wq->gdma_wq)
475		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
476				      hwc_wq->gdma_wq);
477
478	kfree(hwc_wq);
479}
480
481static int mana_hwc_create_wq(struct hw_channel_context *hwc,
482			      enum gdma_queue_type q_type, u16 q_depth,
483			      u32 max_msg_size, struct hwc_cq *hwc_cq,
484			      struct hwc_wq **hwc_wq_ptr)
485{
486	struct gdma_queue *queue;
487	struct hwc_wq *hwc_wq;
488	u32 queue_size;
489	int err;
490
491	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
492
493	if (q_type == GDMA_RQ)
494		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
495	else
496		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
497
498	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
499		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
500
501	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
502	if (!hwc_wq)
503		return -ENOMEM;
504
505	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
506	if (err)
507		goto out;
508
509	hwc_wq->hwc = hwc;
510	hwc_wq->gdma_wq = queue;
511	hwc_wq->queue_depth = q_depth;
512	hwc_wq->hwc_cq = hwc_cq;
513
514	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
515				     &hwc_wq->msg_buf);
516	if (err)
517		goto out;
518
519	*hwc_wq_ptr = hwc_wq;
520	return 0;
521out:
522	if (err)
523		mana_hwc_destroy_wq(hwc, hwc_wq);
524	return err;
525}
526
527static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
528				struct hwc_work_request *req,
529				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
530				bool dest_pf)
531{
532	struct device *dev = hwc_txq->hwc->dev;
533	struct hwc_tx_oob *tx_oob;
534	struct gdma_sge *sge;
535	int err;
536
537	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
538		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
539			req->msg_size, req->buf_len);
540		return -EINVAL;
541	}
542
543	tx_oob = &req->tx_oob;
544
545	tx_oob->vrq_id = dest_virt_rq_id;
546	tx_oob->dest_vfid = 0;
547	tx_oob->vrcq_id = dest_virt_rcq_id;
548	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
549	tx_oob->loopback = false;
550	tx_oob->lso_override = false;
551	tx_oob->dest_pf = dest_pf;
552	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
553
554	sge = &req->sge;
555	sge->address = (u64)req->buf_sge_addr;
556	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
557	sge->size = req->msg_size;
558
559	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
560	req->wqe_req.sgl = sge;
561	req->wqe_req.num_sge = 1;
562	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
563	req->wqe_req.inline_oob_data = tx_oob;
564	req->wqe_req.client_data_unit = 0;
565
566	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
567	if (err)
568		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
569	return err;
570}
571
572static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
573				      u16 num_msg)
574{
575	int err;
576
577	sema_init(&hwc->sema, num_msg);
578
579	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
580	if (err)
581		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
582	return err;
583}
584
585static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
586				 u32 max_req_msg_size, u32 max_resp_msg_size)
587{
588	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
589	struct hwc_wq *hwc_rxq = hwc->rxq;
590	struct hwc_work_request *req;
591	struct hwc_caller_ctx *ctx;
592	int err;
593	int i;
594
595	/* Post all WQEs on the RQ */
596	for (i = 0; i < q_depth; i++) {
597		req = &hwc_rxq->msg_buf->reqs[i];
598		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
599		if (err)
600			return err;
601	}
602
603	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
604	if (!ctx)
605		return -ENOMEM;
606
607	for (i = 0; i < q_depth; ++i)
608		init_completion(&ctx[i].comp_event);
609
610	hwc->caller_ctx = ctx;
611
612	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
613}
614
615static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
616				      u32 *max_req_msg_size,
617				      u32 *max_resp_msg_size)
618{
619	struct hw_channel_context *hwc = gc->hwc.driver_data;
620	struct gdma_queue *rq = hwc->rxq->gdma_wq;
621	struct gdma_queue *sq = hwc->txq->gdma_wq;
622	struct gdma_queue *eq = hwc->cq->gdma_eq;
623	struct gdma_queue *cq = hwc->cq->gdma_cq;
624	int err;
625
626	init_completion(&hwc->hwc_init_eqe_comp);
627
628	err = mana_smc_setup_hwc(&gc->shm_channel, false,
629				 eq->mem_info.dma_handle,
630				 cq->mem_info.dma_handle,
631				 rq->mem_info.dma_handle,
632				 sq->mem_info.dma_handle,
633				 eq->eq.msix_index);
634	if (err)
635		return err;
636
637	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
638		return -ETIMEDOUT;
639
640	*q_depth = hwc->hwc_init_q_depth_max;
641	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
642	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
643
644	/* Both were set in mana_hwc_init_event_handler(). */
645	if (WARN_ON(cq->id >= gc->max_num_cqs))
646		return -EPROTO;
647
648	gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
649	if (!gc->cq_table)
650		return -ENOMEM;
651
652	gc->cq_table[cq->id] = cq;
653
654	return 0;
655}
656
657static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
658				u32 max_req_msg_size, u32 max_resp_msg_size)
659{
660	int err;
661
662	err = mana_hwc_init_inflight_msg(hwc, q_depth);
663	if (err)
664		return err;
665
666	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
667	 * queue depth and RQ queue depth.
668	 */
669	err = mana_hwc_create_cq(hwc, q_depth * 2,
670				 mana_hwc_init_event_handler, hwc,
671				 mana_hwc_rx_event_handler, hwc,
672				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
673	if (err) {
674		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
675		goto out;
676	}
677
678	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
679				 hwc->cq, &hwc->rxq);
680	if (err) {
681		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
682		goto out;
683	}
684
685	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
686				 hwc->cq, &hwc->txq);
687	if (err) {
688		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
689		goto out;
690	}
691
692	hwc->num_inflight_msg = q_depth;
693	hwc->max_req_msg_size = max_req_msg_size;
694
695	return 0;
696out:
697	/* mana_hwc_create_channel() will do the cleanup.*/
698	return err;
699}
700
701int mana_hwc_create_channel(struct gdma_context *gc)
702{
703	u32 max_req_msg_size, max_resp_msg_size;
704	struct gdma_dev *gd = &gc->hwc;
705	struct hw_channel_context *hwc;
706	u16 q_depth_max;
707	int err;
708
709	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
710	if (!hwc)
711		return -ENOMEM;
712
713	gd->gdma_context = gc;
714	gd->driver_data = hwc;
715	hwc->gdma_dev = gd;
716	hwc->dev = gc->dev;
717	hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
718
719	/* HWC's instance number is always 0. */
720	gd->dev_id.as_uint32 = 0;
721	gd->dev_id.type = GDMA_DEVICE_HWC;
722
723	gd->pdid = INVALID_PDID;
724	gd->doorbell = INVALID_DOORBELL;
725
726	/* mana_hwc_init_queues() only creates the required data structures,
727	 * and doesn't touch the HWC device.
728	 */
729	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
730				   HW_CHANNEL_MAX_REQUEST_SIZE,
731				   HW_CHANNEL_MAX_RESPONSE_SIZE);
732	if (err) {
733		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
734		goto out;
735	}
736
737	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
738					 &max_resp_msg_size);
739	if (err) {
740		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
741		goto out;
742	}
743
744	err = mana_hwc_test_channel(gc->hwc.driver_data,
745				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
746				    max_req_msg_size, max_resp_msg_size);
747	if (err) {
748		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
749		goto out;
750	}
751
752	return 0;
753out:
754	mana_hwc_destroy_channel(gc);
755	return err;
756}
757
758void mana_hwc_destroy_channel(struct gdma_context *gc)
759{
760	struct hw_channel_context *hwc = gc->hwc.driver_data;
761
762	if (!hwc)
763		return;
764
765	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
766	 * non-zero, the HWC worked and we should tear down the HWC here.
767	 */
768	if (gc->max_num_cqs > 0) {
769		mana_smc_teardown_hwc(&gc->shm_channel, false);
770		gc->max_num_cqs = 0;
771	}
772
773	kfree(hwc->caller_ctx);
774	hwc->caller_ctx = NULL;
775
776	if (hwc->txq)
777		mana_hwc_destroy_wq(hwc, hwc->txq);
778
779	if (hwc->rxq)
780		mana_hwc_destroy_wq(hwc, hwc->rxq);
781
782	if (hwc->cq)
783		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
784
785	mana_gd_free_res_map(&hwc->inflight_msg_res);
786
787	hwc->num_inflight_msg = 0;
788
789	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
790	hwc->gdma_dev->pdid = INVALID_PDID;
791
792	hwc->hwc_timeout = 0;
793
794	kfree(hwc);
795	gc->hwc.driver_data = NULL;
796	gc->hwc.gdma_context = NULL;
797
798	vfree(gc->cq_table);
799	gc->cq_table = NULL;
800}
801
802int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
803			  const void *req, u32 resp_len, void *resp)
804{
805	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
806	struct hwc_work_request *tx_wr;
807	struct hwc_wq *txq = hwc->txq;
808	struct gdma_req_hdr *req_msg;
809	struct hwc_caller_ctx *ctx;
810	u32 dest_vrcq = 0;
811	u32 dest_vrq = 0;
812	u16 msg_id;
813	int err;
814
815	mana_hwc_get_msg_index(hwc, &msg_id);
816
817	tx_wr = &txq->msg_buf->reqs[msg_id];
818
819	if (req_len > tx_wr->buf_len) {
820		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
821			tx_wr->buf_len);
822		err = -EINVAL;
823		goto out;
824	}
825
826	ctx = hwc->caller_ctx + msg_id;
827	ctx->output_buf = resp;
828	ctx->output_buflen = resp_len;
829
830	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
831	if (req)
832		memcpy(req_msg, req, req_len);
833
834	req_msg->req.hwc_msg_id = msg_id;
835
836	tx_wr->msg_size = req_len;
837
838	if (gc->is_pf) {
839		dest_vrq = hwc->pf_dest_vrq_id;
840		dest_vrcq = hwc->pf_dest_vrcq_id;
841	}
842
843	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
844	if (err) {
845		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
846		goto out;
847	}
848
849	if (!wait_for_completion_timeout(&ctx->comp_event,
850					 (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
851		dev_err(hwc->dev, "HWC: Request timed out!\n");
852		err = -ETIMEDOUT;
853		goto out;
854	}
855
856	if (ctx->error) {
857		err = ctx->error;
858		goto out;
859	}
860
861	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
862		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
863			ctx->status_code);
864		err = -EPROTO;
865		goto out;
866	}
867out:
868	mana_hwc_put_msg_index(hwc, msg_id);
869	return err;
870}
871