1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in
21 *    the documentation and/or other materials provided with the
22 *    distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/interrupt.h>
42#include <linux/spinlock.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/pci.h>
46#include <linux/delay.h>
47#include <linux/prefetch.h>
48#include <linux/if_ether.h>
49
50#include "roce_hsi.h"
51
52#include "qplib_res.h"
53#include "qplib_rcfw.h"
54#include "qplib_sp.h"
55#include "qplib_fp.h"
56
57static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
58
59static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
60{
61	qp->sq.condition = false;
62	qp->sq.send_phantom = false;
63	qp->sq.single = false;
64}
65
66/* Flush list */
67static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
68{
69	struct bnxt_qplib_cq *scq, *rcq;
70
71	scq = qp->scq;
72	rcq = qp->rcq;
73
74	if (!qp->sq.flushed) {
75		dev_dbg(&scq->hwq.pdev->dev,
76			"FP: Adding to SQ Flush list = %p\n", qp);
77		bnxt_qplib_cancel_phantom_processing(qp);
78		list_add_tail(&qp->sq_flush, &scq->sqf_head);
79		qp->sq.flushed = true;
80	}
81	if (!qp->srq) {
82		if (!qp->rq.flushed) {
83			dev_dbg(&rcq->hwq.pdev->dev,
84				"FP: Adding to RQ Flush list = %p\n", qp);
85			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86			qp->rq.flushed = true;
87		}
88	}
89}
90
91static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92				       unsigned long *flags)
93	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94{
95	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96	if (qp->scq == qp->rcq)
97		__acquire(&qp->rcq->flush_lock);
98	else
99		spin_lock(&qp->rcq->flush_lock);
100}
101
102static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103				       unsigned long *flags)
104	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105{
106	if (qp->scq == qp->rcq)
107		__release(&qp->rcq->flush_lock);
108	else
109		spin_unlock(&qp->rcq->flush_lock);
110	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111}
112
113void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
114{
115	unsigned long flags;
116
117	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
118	__bnxt_qplib_add_flush_qp(qp);
119	bnxt_qplib_release_cq_flush_locks(qp, &flags);
120}
121
122static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
123{
124	if (qp->sq.flushed) {
125		qp->sq.flushed = false;
126		list_del(&qp->sq_flush);
127	}
128	if (!qp->srq) {
129		if (qp->rq.flushed) {
130			qp->rq.flushed = false;
131			list_del(&qp->rq_flush);
132		}
133	}
134}
135
136void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
137{
138	unsigned long flags;
139
140	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
141	__clean_cq(qp->scq, (u64)(unsigned long)qp);
142	qp->sq.hwq.prod = 0;
143	qp->sq.hwq.cons = 0;
144	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
145	qp->rq.hwq.prod = 0;
146	qp->rq.hwq.cons = 0;
147
148	__bnxt_qplib_del_flush_qp(qp);
149	bnxt_qplib_release_cq_flush_locks(qp, &flags);
150}
151
152static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
153{
154	struct bnxt_qplib_nq_work *nq_work =
155			container_of(work, struct bnxt_qplib_nq_work, work);
156
157	struct bnxt_qplib_cq *cq = nq_work->cq;
158	struct bnxt_qplib_nq *nq = nq_work->nq;
159
160	if (cq && nq) {
161		spin_lock_bh(&cq->compl_lock);
162		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
163			dev_dbg(&nq->pdev->dev,
164				"%s:Trigger cq  = %p event nq = %p\n",
165				__func__, cq, nq);
166			nq->cqn_handler(nq, cq);
167		}
168		spin_unlock_bh(&cq->compl_lock);
169	}
170	kfree(nq_work);
171}
172
173static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
174				       struct bnxt_qplib_qp *qp)
175{
176	struct bnxt_qplib_q *rq = &qp->rq;
177	struct bnxt_qplib_q *sq = &qp->sq;
178
179	if (qp->rq_hdr_buf)
180		dma_free_coherent(&res->pdev->dev,
181				  rq->max_wqe * qp->rq_hdr_buf_size,
182				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
183	if (qp->sq_hdr_buf)
184		dma_free_coherent(&res->pdev->dev,
185				  sq->max_wqe * qp->sq_hdr_buf_size,
186				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187	qp->rq_hdr_buf = NULL;
188	qp->sq_hdr_buf = NULL;
189	qp->rq_hdr_buf_map = 0;
190	qp->sq_hdr_buf_map = 0;
191	qp->sq_hdr_buf_size = 0;
192	qp->rq_hdr_buf_size = 0;
193}
194
195static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196				       struct bnxt_qplib_qp *qp)
197{
198	struct bnxt_qplib_q *rq = &qp->rq;
199	struct bnxt_qplib_q *sq = &qp->sq;
200	int rc = 0;
201
202	if (qp->sq_hdr_buf_size && sq->max_wqe) {
203		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204					sq->max_wqe * qp->sq_hdr_buf_size,
205					&qp->sq_hdr_buf_map, GFP_KERNEL);
206		if (!qp->sq_hdr_buf) {
207			rc = -ENOMEM;
208			dev_err(&res->pdev->dev,
209				"Failed to create sq_hdr_buf\n");
210			goto fail;
211		}
212	}
213
214	if (qp->rq_hdr_buf_size && rq->max_wqe) {
215		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
216						    rq->max_wqe *
217						    qp->rq_hdr_buf_size,
218						    &qp->rq_hdr_buf_map,
219						    GFP_KERNEL);
220		if (!qp->rq_hdr_buf) {
221			rc = -ENOMEM;
222			dev_err(&res->pdev->dev,
223				"Failed to create rq_hdr_buf\n");
224			goto fail;
225		}
226	}
227	return 0;
228
229fail:
230	bnxt_qplib_free_qp_hdr_buf(res, qp);
231	return rc;
232}
233
234static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
235{
236	struct bnxt_qplib_hwq *hwq = &nq->hwq;
237	struct nq_base *nqe, **nq_ptr;
238	int budget = nq->budget;
239	u32 sw_cons, raw_cons;
240	uintptr_t q_handle;
241	u16 type;
242
243	spin_lock_bh(&hwq->lock);
244	/* Service the NQ until empty */
245	raw_cons = hwq->cons;
246	while (budget--) {
247		sw_cons = HWQ_CMP(raw_cons, hwq);
248		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
249		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
250		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
251			break;
252
253		/*
254		 * The valid test of the entry must be done first before
255		 * reading any further.
256		 */
257		dma_rmb();
258
259		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
260		switch (type) {
261		case NQ_BASE_TYPE_CQ_NOTIFICATION:
262		{
263			struct nq_cn *nqcne = (struct nq_cn *)nqe;
264
265			q_handle = le32_to_cpu(nqcne->cq_handle_low);
266			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
267						     << 32;
268			if ((unsigned long)cq == q_handle) {
269				nqcne->cq_handle_low = 0;
270				nqcne->cq_handle_high = 0;
271				cq->cnq_events++;
272			}
273			break;
274		}
275		default:
276			break;
277		}
278		raw_cons++;
279	}
280	spin_unlock_bh(&hwq->lock);
281}
282
283/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
284 * this CQ.
285 */
286static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
287{
288	u32 retry_cnt = 100;
289
290	while (retry_cnt--) {
291		if (cnq_events == cq->cnq_events)
292			return;
293		usleep_range(50, 100);
294		clean_nq(cq->nq, cq);
295	}
296}
297
298static void bnxt_qplib_service_nq(struct tasklet_struct *t)
299{
300	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
301	struct bnxt_qplib_hwq *hwq = &nq->hwq;
302	int num_srqne_processed = 0;
303	int num_cqne_processed = 0;
304	struct bnxt_qplib_cq *cq;
305	int budget = nq->budget;
306	u32 sw_cons, raw_cons;
307	struct nq_base *nqe;
308	uintptr_t q_handle;
309	u16 type;
310
311	spin_lock_bh(&hwq->lock);
312	/* Service the NQ until empty */
313	raw_cons = hwq->cons;
314	while (budget--) {
315		sw_cons = HWQ_CMP(raw_cons, hwq);
316		nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
317		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
318			break;
319
320		/*
321		 * The valid test of the entry must be done first before
322		 * reading any further.
323		 */
324		dma_rmb();
325
326		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
327		switch (type) {
328		case NQ_BASE_TYPE_CQ_NOTIFICATION:
329		{
330			struct nq_cn *nqcne = (struct nq_cn *)nqe;
331
332			q_handle = le32_to_cpu(nqcne->cq_handle_low);
333			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334						     << 32;
335			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
336			if (!cq)
337				break;
338			bnxt_qplib_armen_db(&cq->dbinfo,
339					    DBC_DBC_TYPE_CQ_ARMENA);
340			spin_lock_bh(&cq->compl_lock);
341			atomic_set(&cq->arm_state, 0);
342			if (!nq->cqn_handler(nq, (cq)))
343				num_cqne_processed++;
344			else
345				dev_warn(&nq->pdev->dev,
346					 "cqn - type 0x%x not handled\n", type);
347			cq->cnq_events++;
348			spin_unlock_bh(&cq->compl_lock);
349			break;
350		}
351		case NQ_BASE_TYPE_SRQ_EVENT:
352		{
353			struct bnxt_qplib_srq *srq;
354			struct nq_srq_event *nqsrqe =
355						(struct nq_srq_event *)nqe;
356
357			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
358			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
359				     << 32;
360			srq = (struct bnxt_qplib_srq *)q_handle;
361			bnxt_qplib_armen_db(&srq->dbinfo,
362					    DBC_DBC_TYPE_SRQ_ARMENA);
363			if (!nq->srqn_handler(nq,
364					      (struct bnxt_qplib_srq *)q_handle,
365					      nqsrqe->event))
366				num_srqne_processed++;
367			else
368				dev_warn(&nq->pdev->dev,
369					 "SRQ event 0x%x not handled\n",
370					 nqsrqe->event);
371			break;
372		}
373		case NQ_BASE_TYPE_DBQ_EVENT:
374			break;
375		default:
376			dev_warn(&nq->pdev->dev,
377				 "nqe with type = 0x%x not handled\n", type);
378			break;
379		}
380		raw_cons++;
381	}
382	if (hwq->cons != raw_cons) {
383		hwq->cons = raw_cons;
384		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
385	}
386	spin_unlock_bh(&hwq->lock);
387}
388
389static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
390{
391	struct bnxt_qplib_nq *nq = dev_instance;
392	struct bnxt_qplib_hwq *hwq = &nq->hwq;
393	u32 sw_cons;
394
395	/* Prefetch the NQ element */
396	sw_cons = HWQ_CMP(hwq->cons, hwq);
397	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
398
399	/* Fan out to CPU affinitized kthreads? */
400	tasklet_schedule(&nq->nq_tasklet);
401
402	return IRQ_HANDLED;
403}
404
405void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
406{
407	if (!nq->requested)
408		return;
409
410	tasklet_disable(&nq->nq_tasklet);
411	/* Mask h/w interrupt */
412	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
413	/* Sync with last running IRQ handler */
414	synchronize_irq(nq->msix_vec);
415	if (kill)
416		tasklet_kill(&nq->nq_tasklet);
417
418	irq_set_affinity_hint(nq->msix_vec, NULL);
419	free_irq(nq->msix_vec, nq);
420	kfree(nq->name);
421	nq->name = NULL;
422	nq->requested = false;
423}
424
425void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
426{
427	if (nq->cqn_wq) {
428		destroy_workqueue(nq->cqn_wq);
429		nq->cqn_wq = NULL;
430	}
431
432	/* Make sure the HW is stopped! */
433	bnxt_qplib_nq_stop_irq(nq, true);
434
435	if (nq->nq_db.reg.bar_reg) {
436		iounmap(nq->nq_db.reg.bar_reg);
437		nq->nq_db.reg.bar_reg = NULL;
438	}
439
440	nq->cqn_handler = NULL;
441	nq->srqn_handler = NULL;
442	nq->msix_vec = 0;
443}
444
445int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
446			    int msix_vector, bool need_init)
447{
448	struct bnxt_qplib_res *res = nq->res;
449	int rc;
450
451	if (nq->requested)
452		return -EFAULT;
453
454	nq->msix_vec = msix_vector;
455	if (need_init)
456		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
457	else
458		tasklet_enable(&nq->nq_tasklet);
459
460	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
461			     nq_indx, pci_name(res->pdev));
462	if (!nq->name)
463		return -ENOMEM;
464	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
465	if (rc) {
466		kfree(nq->name);
467		nq->name = NULL;
468		tasklet_disable(&nq->nq_tasklet);
469		return rc;
470	}
471
472	cpumask_clear(&nq->mask);
473	cpumask_set_cpu(nq_indx, &nq->mask);
474	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
475	if (rc) {
476		dev_warn(&nq->pdev->dev,
477			 "set affinity failed; vector: %d nq_idx: %d\n",
478			 nq->msix_vec, nq_indx);
479	}
480	nq->requested = true;
481	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
482
483	return rc;
484}
485
486static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
487{
488	resource_size_t reg_base;
489	struct bnxt_qplib_nq_db *nq_db;
490	struct pci_dev *pdev;
491	int rc = 0;
492
493	pdev = nq->pdev;
494	nq_db = &nq->nq_db;
495
496	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
497	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
498	if (!nq_db->reg.bar_base) {
499		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
500			nq_db->reg.bar_id);
501		rc = -ENOMEM;
502		goto fail;
503	}
504
505	reg_base = nq_db->reg.bar_base + reg_offt;
506	/* Unconditionally map 8 bytes to support 57500 series */
507	nq_db->reg.len = 8;
508	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
509	if (!nq_db->reg.bar_reg) {
510		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
511			nq_db->reg.bar_id);
512		rc = -ENOMEM;
513		goto fail;
514	}
515
516	nq_db->dbinfo.db = nq_db->reg.bar_reg;
517	nq_db->dbinfo.hwq = &nq->hwq;
518	nq_db->dbinfo.xid = nq->ring_id;
519fail:
520	return rc;
521}
522
523int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
524			 int nq_idx, int msix_vector, int bar_reg_offset,
525			 cqn_handler_t cqn_handler,
526			 srqn_handler_t srqn_handler)
527{
528	int rc = -1;
529
530	nq->pdev = pdev;
531	nq->cqn_handler = cqn_handler;
532	nq->srqn_handler = srqn_handler;
533
534	/* Have a task to schedule CQ notifiers in post send case */
535	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
536	if (!nq->cqn_wq)
537		return -ENOMEM;
538
539	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
540	if (rc)
541		goto fail;
542
543	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
544	if (rc) {
545		dev_err(&nq->pdev->dev,
546			"Failed to request irq for nq-idx %d\n", nq_idx);
547		goto fail;
548	}
549
550	return 0;
551fail:
552	bnxt_qplib_disable_nq(nq);
553	return rc;
554}
555
556void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
557{
558	if (nq->hwq.max_elements) {
559		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
560		nq->hwq.max_elements = 0;
561	}
562}
563
564int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
565{
566	struct bnxt_qplib_hwq_attr hwq_attr = {};
567	struct bnxt_qplib_sg_info sginfo = {};
568
569	nq->pdev = res->pdev;
570	nq->res = res;
571	if (!nq->hwq.max_elements ||
572	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
573		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
574
575	sginfo.pgsize = PAGE_SIZE;
576	sginfo.pgshft = PAGE_SHIFT;
577	hwq_attr.res = res;
578	hwq_attr.sginfo = &sginfo;
579	hwq_attr.depth = nq->hwq.max_elements;
580	hwq_attr.stride = sizeof(struct nq_base);
581	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
582	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
583		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
584		return -ENOMEM;
585	}
586	nq->budget = 8;
587	return 0;
588}
589
590/* SRQ */
591void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
592			   struct bnxt_qplib_srq *srq)
593{
594	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
595	struct cmdq_destroy_srq req;
596	struct creq_destroy_srq_resp resp;
597	u16 cmd_flags = 0;
598	int rc;
599
600	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
601
602	/* Configure the request */
603	req.srq_cid = cpu_to_le32(srq->id);
604
605	rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
606					  (struct creq_base *)&resp, NULL, 0);
607	kfree(srq->swq);
608	if (rc)
609		return;
610	bnxt_qplib_free_hwq(res, &srq->hwq);
611}
612
613int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
614			  struct bnxt_qplib_srq *srq)
615{
616	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
617	struct bnxt_qplib_hwq_attr hwq_attr = {};
618	struct creq_create_srq_resp resp;
619	struct cmdq_create_srq req;
620	struct bnxt_qplib_pbl *pbl;
621	u16 cmd_flags = 0;
622	u16 pg_sz_lvl;
623	int rc, idx;
624
625	hwq_attr.res = res;
626	hwq_attr.sginfo = &srq->sg_info;
627	hwq_attr.depth = srq->max_wqe;
628	hwq_attr.stride = srq->wqe_size;
629	hwq_attr.type = HWQ_TYPE_QUEUE;
630	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
631	if (rc)
632		goto exit;
633
634	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
635			   GFP_KERNEL);
636	if (!srq->swq) {
637		rc = -ENOMEM;
638		goto fail;
639	}
640
641	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
642
643	/* Configure the request */
644	req.dpi = cpu_to_le32(srq->dpi->dpi);
645	req.srq_handle = cpu_to_le64((uintptr_t)srq);
646
647	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
648	pbl = &srq->hwq.pbl[PBL_LVL_0];
649	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
650		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
651	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
652		      CMDQ_CREATE_SRQ_LVL_SFT;
653	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
654	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
655	req.pd_id = cpu_to_le32(srq->pd->id);
656	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
657
658	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
659					  (void *)&resp, NULL, 0);
660	if (rc)
661		goto fail;
662
663	spin_lock_init(&srq->lock);
664	srq->start_idx = 0;
665	srq->last_idx = srq->hwq.max_elements - 1;
666	for (idx = 0; idx < srq->hwq.max_elements; idx++)
667		srq->swq[idx].next_idx = idx + 1;
668	srq->swq[srq->last_idx].next_idx = -1;
669
670	srq->id = le32_to_cpu(resp.xid);
671	srq->dbinfo.hwq = &srq->hwq;
672	srq->dbinfo.xid = srq->id;
673	srq->dbinfo.db = srq->dpi->dbr;
674	srq->dbinfo.max_slot = 1;
675	srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
676	if (srq->threshold)
677		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
678	srq->arm_req = false;
679
680	return 0;
681fail:
682	bnxt_qplib_free_hwq(res, &srq->hwq);
683	kfree(srq->swq);
684exit:
685	return rc;
686}
687
688int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
689			  struct bnxt_qplib_srq *srq)
690{
691	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
692	u32 sw_prod, sw_cons, count = 0;
693
694	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
695	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
696
697	count = sw_prod > sw_cons ? sw_prod - sw_cons :
698				    srq_hwq->max_elements - sw_cons + sw_prod;
699	if (count > srq->threshold) {
700		srq->arm_req = false;
701		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
702	} else {
703		/* Deferred arming */
704		srq->arm_req = true;
705	}
706
707	return 0;
708}
709
710int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
711			 struct bnxt_qplib_srq *srq)
712{
713	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
714	struct cmdq_query_srq req;
715	struct creq_query_srq_resp resp;
716	struct bnxt_qplib_rcfw_sbuf *sbuf;
717	struct creq_query_srq_resp_sb *sb;
718	u16 cmd_flags = 0;
719	int rc = 0;
720
721	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
722
723	/* Configure the request */
724	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
725	if (!sbuf)
726		return -ENOMEM;
727	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
728	req.srq_cid = cpu_to_le32(srq->id);
729	sb = sbuf->sb;
730	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
731					  (void *)sbuf, 0);
732	srq->threshold = le16_to_cpu(sb->srq_limit);
733	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
734
735	return rc;
736}
737
738int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
739			     struct bnxt_qplib_swqe *wqe)
740{
741	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
742	struct rq_wqe *srqe;
743	struct sq_sge *hw_sge;
744	u32 sw_prod, sw_cons, count = 0;
745	int i, rc = 0, next;
746
747	spin_lock(&srq_hwq->lock);
748	if (srq->start_idx == srq->last_idx) {
749		dev_err(&srq_hwq->pdev->dev,
750			"FP: SRQ (0x%x) is full!\n", srq->id);
751		rc = -EINVAL;
752		spin_unlock(&srq_hwq->lock);
753		goto done;
754	}
755	next = srq->start_idx;
756	srq->start_idx = srq->swq[next].next_idx;
757	spin_unlock(&srq_hwq->lock);
758
759	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
760	srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
761	memset(srqe, 0, srq->wqe_size);
762	/* Calculate wqe_size16 and data_len */
763	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
764	     i < wqe->num_sge; i++, hw_sge++) {
765		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
766		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
767		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
768	}
769	srqe->wqe_type = wqe->type;
770	srqe->flags = wqe->flags;
771	srqe->wqe_size = wqe->num_sge +
772			((offsetof(typeof(*srqe), data) + 15) >> 4);
773	srqe->wr_id[0] = cpu_to_le32((u32)next);
774	srq->swq[next].wr_id = wqe->wr_id;
775
776	srq_hwq->prod++;
777
778	spin_lock(&srq_hwq->lock);
779	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
780	/* retaining srq_hwq->cons for this logic
781	 * actually the lock is only required to
782	 * read srq_hwq->cons.
783	 */
784	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
785	count = sw_prod > sw_cons ? sw_prod - sw_cons :
786				    srq_hwq->max_elements - sw_cons + sw_prod;
787	spin_unlock(&srq_hwq->lock);
788	/* Ring DB */
789	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
790	if (srq->arm_req == true && count > srq->threshold) {
791		srq->arm_req = false;
792		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
793	}
794done:
795	return rc;
796}
797
798/* QP */
799
800static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
801{
802	int rc = 0;
803	int indx;
804
805	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
806	if (!que->swq) {
807		rc = -ENOMEM;
808		goto out;
809	}
810
811	que->swq_start = 0;
812	que->swq_last = que->max_wqe - 1;
813	for (indx = 0; indx < que->max_wqe; indx++)
814		que->swq[indx].next_idx = indx + 1;
815	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
816	que->swq_last = 0;
817out:
818	return rc;
819}
820
821int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
822{
823	struct bnxt_qplib_hwq_attr hwq_attr = {};
824	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
825	struct bnxt_qplib_q *sq = &qp->sq;
826	struct bnxt_qplib_q *rq = &qp->rq;
827	struct creq_create_qp1_resp resp;
828	struct cmdq_create_qp1 req;
829	struct bnxt_qplib_pbl *pbl;
830	u16 cmd_flags = 0;
831	u32 qp_flags = 0;
832	u8 pg_sz_lvl;
833	u32 tbl_indx;
834	int rc;
835
836	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
837
838	/* General */
839	req.type = qp->type;
840	req.dpi = cpu_to_le32(qp->dpi->dpi);
841	req.qp_handle = cpu_to_le64(qp->qp_handle);
842
843	/* SQ */
844	hwq_attr.res = res;
845	hwq_attr.sginfo = &sq->sg_info;
846	hwq_attr.stride = sizeof(struct sq_sge);
847	hwq_attr.depth = bnxt_qplib_get_depth(sq);
848	hwq_attr.type = HWQ_TYPE_QUEUE;
849	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
850	if (rc)
851		goto exit;
852
853	rc = bnxt_qplib_alloc_init_swq(sq);
854	if (rc)
855		goto fail_sq;
856
857	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
858	pbl = &sq->hwq.pbl[PBL_LVL_0];
859	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
860	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
861		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
862	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
863	req.sq_pg_size_sq_lvl = pg_sz_lvl;
864	req.sq_fwo_sq_sge =
865		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
866			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
867	req.scq_cid = cpu_to_le32(qp->scq->id);
868
869	/* RQ */
870	if (rq->max_wqe) {
871		hwq_attr.res = res;
872		hwq_attr.sginfo = &rq->sg_info;
873		hwq_attr.stride = sizeof(struct sq_sge);
874		hwq_attr.depth = bnxt_qplib_get_depth(rq);
875		hwq_attr.type = HWQ_TYPE_QUEUE;
876		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
877		if (rc)
878			goto sq_swq;
879		rc = bnxt_qplib_alloc_init_swq(rq);
880		if (rc)
881			goto fail_rq;
882		req.rq_size = cpu_to_le32(rq->max_wqe);
883		pbl = &rq->hwq.pbl[PBL_LVL_0];
884		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
885		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
886			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
887		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
888		req.rq_pg_size_rq_lvl = pg_sz_lvl;
889		req.rq_fwo_rq_sge =
890			cpu_to_le16((rq->max_sge &
891				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
892				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
893	}
894	req.rcq_cid = cpu_to_le32(qp->rcq->id);
895	/* Header buffer - allow hdr_buf pass in */
896	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
897	if (rc) {
898		rc = -ENOMEM;
899		goto rq_rwq;
900	}
901	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
902	req.qp_flags = cpu_to_le32(qp_flags);
903	req.pd_id = cpu_to_le32(qp->pd->id);
904
905	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
906					  (void *)&resp, NULL, 0);
907	if (rc)
908		goto fail;
909
910	qp->id = le32_to_cpu(resp.xid);
911	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
912	qp->cctx = res->cctx;
913	sq->dbinfo.hwq = &sq->hwq;
914	sq->dbinfo.xid = qp->id;
915	sq->dbinfo.db = qp->dpi->dbr;
916	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
917	if (rq->max_wqe) {
918		rq->dbinfo.hwq = &rq->hwq;
919		rq->dbinfo.xid = qp->id;
920		rq->dbinfo.db = qp->dpi->dbr;
921		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
922	}
923	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
924	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
925	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
926
927	return 0;
928
929fail:
930	bnxt_qplib_free_qp_hdr_buf(res, qp);
931rq_rwq:
932	kfree(rq->swq);
933fail_rq:
934	bnxt_qplib_free_hwq(res, &rq->hwq);
935sq_swq:
936	kfree(sq->swq);
937fail_sq:
938	bnxt_qplib_free_hwq(res, &sq->hwq);
939exit:
940	return rc;
941}
942
943static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
944{
945	struct bnxt_qplib_hwq *hwq;
946	struct bnxt_qplib_q *sq;
947	u64 fpsne, psn_pg;
948	u16 indx_pad = 0;
949
950	sq = &qp->sq;
951	hwq = &sq->hwq;
952	/* First psn entry */
953	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
954	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
955		indx_pad = (fpsne & ~PAGE_MASK) / size;
956	hwq->pad_pgofft = indx_pad;
957	hwq->pad_pg = (u64 *)psn_pg;
958	hwq->pad_stride = size;
959}
960
961int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
962{
963	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
964	struct bnxt_qplib_hwq_attr hwq_attr = {};
965	struct bnxt_qplib_sg_info sginfo = {};
966	struct bnxt_qplib_q *sq = &qp->sq;
967	struct bnxt_qplib_q *rq = &qp->rq;
968	struct creq_create_qp_resp resp;
969	int rc, req_size, psn_sz = 0;
970	struct bnxt_qplib_hwq *xrrq;
971	struct bnxt_qplib_pbl *pbl;
972	struct cmdq_create_qp req;
973	u16 cmd_flags = 0;
974	u32 qp_flags = 0;
975	u8 pg_sz_lvl;
976	u32 tbl_indx;
977	u16 nsge;
978
979	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
980
981	/* General */
982	req.type = qp->type;
983	req.dpi = cpu_to_le32(qp->dpi->dpi);
984	req.qp_handle = cpu_to_le64(qp->qp_handle);
985
986	/* SQ */
987	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
988		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
989			 sizeof(struct sq_psn_search_ext) :
990			 sizeof(struct sq_psn_search);
991	}
992
993	hwq_attr.res = res;
994	hwq_attr.sginfo = &sq->sg_info;
995	hwq_attr.stride = sizeof(struct sq_sge);
996	hwq_attr.depth = bnxt_qplib_get_depth(sq);
997	hwq_attr.aux_stride = psn_sz;
998	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
999	hwq_attr.type = HWQ_TYPE_QUEUE;
1000	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1001	if (rc)
1002		goto exit;
1003
1004	rc = bnxt_qplib_alloc_init_swq(sq);
1005	if (rc)
1006		goto fail_sq;
1007
1008	if (psn_sz)
1009		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1010
1011	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1012	pbl = &sq->hwq.pbl[PBL_LVL_0];
1013	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1014	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1015		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1016	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1017	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1018	req.sq_fwo_sq_sge =
1019		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1020			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1021	req.scq_cid = cpu_to_le32(qp->scq->id);
1022
1023	/* RQ */
1024	if (!qp->srq) {
1025		hwq_attr.res = res;
1026		hwq_attr.sginfo = &rq->sg_info;
1027		hwq_attr.stride = sizeof(struct sq_sge);
1028		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1029		hwq_attr.aux_stride = 0;
1030		hwq_attr.aux_depth = 0;
1031		hwq_attr.type = HWQ_TYPE_QUEUE;
1032		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1033		if (rc)
1034			goto sq_swq;
1035		rc = bnxt_qplib_alloc_init_swq(rq);
1036		if (rc)
1037			goto fail_rq;
1038
1039		req.rq_size = cpu_to_le32(rq->max_wqe);
1040		pbl = &rq->hwq.pbl[PBL_LVL_0];
1041		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1042		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1043			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1044		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1045		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1046		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1047			6 : rq->max_sge;
1048		req.rq_fwo_rq_sge =
1049			cpu_to_le16(((nsge &
1050				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1051				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1052	} else {
1053		/* SRQ */
1054		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1055		req.srq_cid = cpu_to_le32(qp->srq->id);
1056	}
1057	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1058
1059	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1060	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1061	if (qp->sig_type)
1062		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1063	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1064		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1065	req.qp_flags = cpu_to_le32(qp_flags);
1066
1067	/* ORRQ and IRRQ */
1068	if (psn_sz) {
1069		xrrq = &qp->orrq;
1070		xrrq->max_elements =
1071			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1072		req_size = xrrq->max_elements *
1073			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1074		req_size &= ~(PAGE_SIZE - 1);
1075		sginfo.pgsize = req_size;
1076		sginfo.pgshft = PAGE_SHIFT;
1077
1078		hwq_attr.res = res;
1079		hwq_attr.sginfo = &sginfo;
1080		hwq_attr.depth = xrrq->max_elements;
1081		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1082		hwq_attr.aux_stride = 0;
1083		hwq_attr.aux_depth = 0;
1084		hwq_attr.type = HWQ_TYPE_CTX;
1085		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1086		if (rc)
1087			goto rq_swq;
1088		pbl = &xrrq->pbl[PBL_LVL_0];
1089		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1090
1091		xrrq = &qp->irrq;
1092		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1093						qp->max_dest_rd_atomic);
1094		req_size = xrrq->max_elements *
1095			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1096		req_size &= ~(PAGE_SIZE - 1);
1097		sginfo.pgsize = req_size;
1098		hwq_attr.depth =  xrrq->max_elements;
1099		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1100		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1101		if (rc)
1102			goto fail_orrq;
1103
1104		pbl = &xrrq->pbl[PBL_LVL_0];
1105		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1106	}
1107	req.pd_id = cpu_to_le32(qp->pd->id);
1108
1109	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1110					  (void *)&resp, NULL, 0);
1111	if (rc)
1112		goto fail;
1113
1114	qp->id = le32_to_cpu(resp.xid);
1115	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1116	INIT_LIST_HEAD(&qp->sq_flush);
1117	INIT_LIST_HEAD(&qp->rq_flush);
1118	qp->cctx = res->cctx;
1119	sq->dbinfo.hwq = &sq->hwq;
1120	sq->dbinfo.xid = qp->id;
1121	sq->dbinfo.db = qp->dpi->dbr;
1122	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1123	if (rq->max_wqe) {
1124		rq->dbinfo.hwq = &rq->hwq;
1125		rq->dbinfo.xid = qp->id;
1126		rq->dbinfo.db = qp->dpi->dbr;
1127		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1128	}
1129	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1130	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1131	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1132
1133	return 0;
1134fail:
1135	bnxt_qplib_free_hwq(res, &qp->irrq);
1136fail_orrq:
1137	bnxt_qplib_free_hwq(res, &qp->orrq);
1138rq_swq:
1139	kfree(rq->swq);
1140fail_rq:
1141	bnxt_qplib_free_hwq(res, &rq->hwq);
1142sq_swq:
1143	kfree(sq->swq);
1144fail_sq:
1145	bnxt_qplib_free_hwq(res, &sq->hwq);
1146exit:
1147	return rc;
1148}
1149
1150static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1151{
1152	switch (qp->state) {
1153	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1154		/* INIT->RTR, configure the path_mtu to the default
1155		 * 2048 if not being requested
1156		 */
1157		if (!(qp->modify_flags &
1158		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1159			qp->modify_flags |=
1160				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1161			qp->path_mtu =
1162				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1163		}
1164		qp->modify_flags &=
1165			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1166		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1167		if (qp->max_dest_rd_atomic < 1)
1168			qp->max_dest_rd_atomic = 1;
1169		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1170		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1171		if (!(qp->modify_flags &
1172		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1173			qp->modify_flags |=
1174				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1175			qp->ah.sgid_index = 0;
1176		}
1177		break;
1178	default:
1179		break;
1180	}
1181}
1182
1183static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1184{
1185	switch (qp->state) {
1186	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1187		/* Bono FW requires the max_rd_atomic to be >= 1 */
1188		if (qp->max_rd_atomic < 1)
1189			qp->max_rd_atomic = 1;
1190		/* Bono FW does not allow PKEY_INDEX,
1191		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1192		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1193		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1194		 * modification
1195		 */
1196		qp->modify_flags &=
1197			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1198			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1199			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1200			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1201			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1202			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1203			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1204			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1205			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1206			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1207			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1208			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1209		break;
1210	default:
1211		break;
1212	}
1213}
1214
1215static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1216{
1217	switch (qp->cur_qp_state) {
1218	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1219		break;
1220	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1221		__modify_flags_from_init_state(qp);
1222		break;
1223	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1224		__modify_flags_from_rtr_state(qp);
1225		break;
1226	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1227		break;
1228	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1229		break;
1230	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1231		break;
1232	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1233		break;
1234	default:
1235		break;
1236	}
1237}
1238
1239int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1240{
1241	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1242	struct cmdq_modify_qp req;
1243	struct creq_modify_qp_resp resp;
1244	u16 cmd_flags = 0, pkey;
1245	u32 temp32[4];
1246	u32 bmask;
1247	int rc;
1248
1249	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1250
1251	/* Filter out the qp_attr_mask based on the state->new transition */
1252	__filter_modify_flags(qp);
1253	bmask = qp->modify_flags;
1254	req.modify_mask = cpu_to_le32(qp->modify_flags);
1255	req.qp_cid = cpu_to_le32(qp->id);
1256	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1257		req.network_type_en_sqd_async_notify_new_state =
1258				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1259				(qp->en_sqd_async_notify ?
1260					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1261	}
1262	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1263
1264	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1265		req.access = qp->access;
1266
1267	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1268		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1269					 qp->pkey_index, &pkey))
1270			req.pkey = cpu_to_le16(pkey);
1271	}
1272	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1273		req.qkey = cpu_to_le32(qp->qkey);
1274
1275	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1276		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1277		req.dgid[0] = cpu_to_le32(temp32[0]);
1278		req.dgid[1] = cpu_to_le32(temp32[1]);
1279		req.dgid[2] = cpu_to_le32(temp32[2]);
1280		req.dgid[3] = cpu_to_le32(temp32[3]);
1281	}
1282	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1283		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1284
1285	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1286		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1287					     [qp->ah.sgid_index]);
1288
1289	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1290		req.hop_limit = qp->ah.hop_limit;
1291
1292	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1293		req.traffic_class = qp->ah.traffic_class;
1294
1295	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1296		memcpy(req.dest_mac, qp->ah.dmac, 6);
1297
1298	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1299		req.path_mtu = qp->path_mtu;
1300
1301	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1302		req.timeout = qp->timeout;
1303
1304	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1305		req.retry_cnt = qp->retry_cnt;
1306
1307	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1308		req.rnr_retry = qp->rnr_retry;
1309
1310	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1311		req.min_rnr_timer = qp->min_rnr_timer;
1312
1313	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1314		req.rq_psn = cpu_to_le32(qp->rq.psn);
1315
1316	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1317		req.sq_psn = cpu_to_le32(qp->sq.psn);
1318
1319	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1320		req.max_rd_atomic =
1321			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1322
1323	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1324		req.max_dest_rd_atomic =
1325			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1326
1327	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1328	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1329	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1330	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1331	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1332	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1333		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1334
1335	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1336
1337	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1338					  (void *)&resp, NULL, 0);
1339	if (rc)
1340		return rc;
1341	qp->cur_qp_state = qp->state;
1342	return 0;
1343}
1344
1345int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1346{
1347	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1348	struct cmdq_query_qp req;
1349	struct creq_query_qp_resp resp;
1350	struct bnxt_qplib_rcfw_sbuf *sbuf;
1351	struct creq_query_qp_resp_sb *sb;
1352	u16 cmd_flags = 0;
1353	u32 temp32[4];
1354	int i, rc = 0;
1355
1356	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1357
1358	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1359	if (!sbuf)
1360		return -ENOMEM;
1361	sb = sbuf->sb;
1362
1363	req.qp_cid = cpu_to_le32(qp->id);
1364	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1365	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1366					  (void *)sbuf, 0);
1367	if (rc)
1368		goto bail;
1369	/* Extract the context from the side buffer */
1370	qp->state = sb->en_sqd_async_notify_state &
1371			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1372	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1373				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1374				  true : false;
1375	qp->access = sb->access;
1376	qp->pkey_index = le16_to_cpu(sb->pkey);
1377	qp->qkey = le32_to_cpu(sb->qkey);
1378
1379	temp32[0] = le32_to_cpu(sb->dgid[0]);
1380	temp32[1] = le32_to_cpu(sb->dgid[1]);
1381	temp32[2] = le32_to_cpu(sb->dgid[2]);
1382	temp32[3] = le32_to_cpu(sb->dgid[3]);
1383	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1384
1385	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1386
1387	qp->ah.sgid_index = 0;
1388	for (i = 0; i < res->sgid_tbl.max; i++) {
1389		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1390			qp->ah.sgid_index = i;
1391			break;
1392		}
1393	}
1394	if (i == res->sgid_tbl.max)
1395		dev_warn(&res->pdev->dev, "SGID not found??\n");
1396
1397	qp->ah.hop_limit = sb->hop_limit;
1398	qp->ah.traffic_class = sb->traffic_class;
1399	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1400	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1401				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1402				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1403	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1404				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1405				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1406	qp->timeout = sb->timeout;
1407	qp->retry_cnt = sb->retry_cnt;
1408	qp->rnr_retry = sb->rnr_retry;
1409	qp->min_rnr_timer = sb->min_rnr_timer;
1410	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1411	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1412	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1413	qp->max_dest_rd_atomic =
1414			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1415	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1416	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1417	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1418	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1419	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1420	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1421	memcpy(qp->smac, sb->src_mac, 6);
1422	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1423bail:
1424	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1425	return rc;
1426}
1427
1428static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1429{
1430	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1431	struct cq_base *hw_cqe;
1432	int i;
1433
1434	for (i = 0; i < cq_hwq->max_elements; i++) {
1435		hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1436		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1437			continue;
1438		/*
1439		 * The valid test of the entry must be done first before
1440		 * reading any further.
1441		 */
1442		dma_rmb();
1443		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1444		case CQ_BASE_CQE_TYPE_REQ:
1445		case CQ_BASE_CQE_TYPE_TERMINAL:
1446		{
1447			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1448
1449			if (qp == le64_to_cpu(cqe->qp_handle))
1450				cqe->qp_handle = 0;
1451			break;
1452		}
1453		case CQ_BASE_CQE_TYPE_RES_RC:
1454		case CQ_BASE_CQE_TYPE_RES_UD:
1455		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1456		{
1457			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1458
1459			if (qp == le64_to_cpu(cqe->qp_handle))
1460				cqe->qp_handle = 0;
1461			break;
1462		}
1463		default:
1464			break;
1465		}
1466	}
1467}
1468
1469int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1470			  struct bnxt_qplib_qp *qp)
1471{
1472	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1473	struct cmdq_destroy_qp req;
1474	struct creq_destroy_qp_resp resp;
1475	u16 cmd_flags = 0;
1476	u32 tbl_indx;
1477	int rc;
1478
1479	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1480	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1481	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1482
1483	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1484
1485	req.qp_cid = cpu_to_le32(qp->id);
1486	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1487					  (void *)&resp, NULL, 0);
1488	if (rc) {
1489		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1490		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1491		return rc;
1492	}
1493
1494	return 0;
1495}
1496
1497void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1498			    struct bnxt_qplib_qp *qp)
1499{
1500	bnxt_qplib_free_qp_hdr_buf(res, qp);
1501	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1502	kfree(qp->sq.swq);
1503
1504	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1505	kfree(qp->rq.swq);
1506
1507	if (qp->irrq.max_elements)
1508		bnxt_qplib_free_hwq(res, &qp->irrq);
1509	if (qp->orrq.max_elements)
1510		bnxt_qplib_free_hwq(res, &qp->orrq);
1511
1512}
1513
1514void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1515				struct bnxt_qplib_sge *sge)
1516{
1517	struct bnxt_qplib_q *sq = &qp->sq;
1518	u32 sw_prod;
1519
1520	memset(sge, 0, sizeof(*sge));
1521
1522	if (qp->sq_hdr_buf) {
1523		sw_prod = sq->swq_start;
1524		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1525					 sw_prod * qp->sq_hdr_buf_size);
1526		sge->lkey = 0xFFFFFFFF;
1527		sge->size = qp->sq_hdr_buf_size;
1528		return qp->sq_hdr_buf + sw_prod * sge->size;
1529	}
1530	return NULL;
1531}
1532
1533u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1534{
1535	struct bnxt_qplib_q *rq = &qp->rq;
1536
1537	return rq->swq_start;
1538}
1539
1540dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1541{
1542	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1543}
1544
1545void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1546				struct bnxt_qplib_sge *sge)
1547{
1548	struct bnxt_qplib_q *rq = &qp->rq;
1549	u32 sw_prod;
1550
1551	memset(sge, 0, sizeof(*sge));
1552
1553	if (qp->rq_hdr_buf) {
1554		sw_prod = rq->swq_start;
1555		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1556					 sw_prod * qp->rq_hdr_buf_size);
1557		sge->lkey = 0xFFFFFFFF;
1558		sge->size = qp->rq_hdr_buf_size;
1559		return qp->rq_hdr_buf + sw_prod * sge->size;
1560	}
1561	return NULL;
1562}
1563
1564static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1565				       struct bnxt_qplib_swqe *wqe,
1566				       struct bnxt_qplib_swq *swq)
1567{
1568	struct sq_psn_search_ext *psns_ext;
1569	struct sq_psn_search *psns;
1570	u32 flg_npsn;
1571	u32 op_spsn;
1572
1573	if (!swq->psn_search)
1574		return;
1575	psns = swq->psn_search;
1576	psns_ext = swq->psn_ext;
1577
1578	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1579		    SQ_PSN_SEARCH_START_PSN_MASK);
1580	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1581		     SQ_PSN_SEARCH_OPCODE_MASK);
1582	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1583		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1584
1585	if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1586		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1587		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1588		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1589	} else {
1590		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1591		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1592	}
1593}
1594
1595static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1596				 struct bnxt_qplib_swqe *wqe,
1597				 u16 *idx)
1598{
1599	struct bnxt_qplib_hwq *hwq;
1600	int len, t_len, offt;
1601	bool pull_dst = true;
1602	void *il_dst = NULL;
1603	void *il_src = NULL;
1604	int t_cplen, cplen;
1605	int indx;
1606
1607	hwq = &qp->sq.hwq;
1608	t_len = 0;
1609	for (indx = 0; indx < wqe->num_sge; indx++) {
1610		len = wqe->sg_list[indx].size;
1611		il_src = (void *)wqe->sg_list[indx].addr;
1612		t_len += len;
1613		if (t_len > qp->max_inline_data)
1614			return -ENOMEM;
1615		while (len) {
1616			if (pull_dst) {
1617				pull_dst = false;
1618				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1619				(*idx)++;
1620				t_cplen = 0;
1621				offt = 0;
1622			}
1623			cplen = min_t(int, len, sizeof(struct sq_sge));
1624			cplen = min_t(int, cplen,
1625					(sizeof(struct sq_sge) - offt));
1626			memcpy(il_dst, il_src, cplen);
1627			t_cplen += cplen;
1628			il_src += cplen;
1629			il_dst += cplen;
1630			offt += cplen;
1631			len -= cplen;
1632			if (t_cplen == sizeof(struct sq_sge))
1633				pull_dst = true;
1634		}
1635	}
1636
1637	return t_len;
1638}
1639
1640static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1641			       struct bnxt_qplib_sge *ssge,
1642			       u16 nsge, u16 *idx)
1643{
1644	struct sq_sge *dsge;
1645	int indx, len = 0;
1646
1647	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1648		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1649		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1650		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1651		dsge->size = cpu_to_le32(ssge[indx].size);
1652		len += ssge[indx].size;
1653	}
1654
1655	return len;
1656}
1657
1658static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1659				     struct bnxt_qplib_swqe *wqe,
1660				     u16 *wqe_sz, u16 *qdf, u8 mode)
1661{
1662	u32 ilsize, bytes;
1663	u16 nsge;
1664	u16 slot;
1665
1666	nsge = wqe->num_sge;
1667	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1668	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1669	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1670		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1671		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1672		bytes += sizeof(struct sq_send_hdr);
1673	}
1674
1675	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1676	slot = bytes >> 4;
1677	*wqe_sz = slot;
1678	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1679		slot = 8;
1680	return slot;
1681}
1682
1683static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1684				     struct bnxt_qplib_swq *swq)
1685{
1686	struct bnxt_qplib_hwq *hwq;
1687	u32 pg_num, pg_indx;
1688	void *buff;
1689	u32 tail;
1690
1691	hwq = &sq->hwq;
1692	if (!hwq->pad_pg)
1693		return;
1694	tail = swq->slot_idx / sq->dbinfo.max_slot;
1695	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1696	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1697	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1698	swq->psn_ext = buff;
1699	swq->psn_search = buff;
1700}
1701
1702void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1703{
1704	struct bnxt_qplib_q *sq = &qp->sq;
1705
1706	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1707}
1708
1709int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1710			 struct bnxt_qplib_swqe *wqe)
1711{
1712	struct bnxt_qplib_nq_work *nq_work = NULL;
1713	int i, rc = 0, data_len = 0, pkt_num = 0;
1714	struct bnxt_qplib_q *sq = &qp->sq;
1715	struct bnxt_qplib_hwq *hwq;
1716	struct bnxt_qplib_swq *swq;
1717	bool sch_handler = false;
1718	u16 wqe_sz, qdf = 0;
1719	void *base_hdr;
1720	void *ext_hdr;
1721	__le32 temp32;
1722	u32 wqe_idx;
1723	u32 slots;
1724	u16 idx;
1725
1726	hwq = &sq->hwq;
1727	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1728	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1729		dev_err(&hwq->pdev->dev,
1730			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1731			qp->id, qp->state);
1732		rc = -EINVAL;
1733		goto done;
1734	}
1735
1736	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1737	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1738		dev_err(&hwq->pdev->dev,
1739			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1740			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1741		rc = -ENOMEM;
1742		goto done;
1743	}
1744
1745	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1746	bnxt_qplib_pull_psn_buff(sq, swq);
1747
1748	idx = 0;
1749	swq->slot_idx = hwq->prod;
1750	swq->slots = slots;
1751	swq->wr_id = wqe->wr_id;
1752	swq->type = wqe->type;
1753	swq->flags = wqe->flags;
1754	swq->start_psn = sq->psn & BTH_PSN_MASK;
1755	if (qp->sig_type)
1756		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1757
1758	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1759		sch_handler = true;
1760		dev_dbg(&hwq->pdev->dev,
1761			"%s Error QP. Scheduling for poll_cq\n", __func__);
1762		goto queue_err;
1763	}
1764
1765	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1766	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1767	memset(base_hdr, 0, sizeof(struct sq_sge));
1768	memset(ext_hdr, 0, sizeof(struct sq_sge));
1769
1770	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1771		/* Copy the inline data */
1772		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1773	else
1774		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1775					       &idx);
1776	if (data_len < 0)
1777		goto queue_err;
1778	/* Specifics */
1779	switch (wqe->type) {
1780	case BNXT_QPLIB_SWQE_TYPE_SEND:
1781		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1782			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1783			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1784			/* Assemble info for Raw Ethertype QPs */
1785
1786			sqe->wqe_type = wqe->type;
1787			sqe->flags = wqe->flags;
1788			sqe->wqe_size = wqe_sz;
1789			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1790			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1791			sqe->length = cpu_to_le32(data_len);
1792			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1793				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1794				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1795
1796			break;
1797		}
1798		fallthrough;
1799	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1800	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1801	{
1802		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1803		struct sq_send_hdr *sqe = base_hdr;
1804
1805		sqe->wqe_type = wqe->type;
1806		sqe->flags = wqe->flags;
1807		sqe->wqe_size = wqe_sz;
1808		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1809		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1810		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1811			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1812			sqe->length = cpu_to_le32(data_len);
1813			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1814			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1815						      SQ_SEND_DST_QP_MASK);
1816			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1817						    SQ_SEND_AVID_MASK);
1818		} else {
1819			sqe->length = cpu_to_le32(data_len);
1820			if (qp->mtu)
1821				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1822			if (!pkt_num)
1823				pkt_num = 1;
1824			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1825		}
1826		break;
1827	}
1828	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1829	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1830	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1831	{
1832		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1833		struct sq_rdma_hdr *sqe = base_hdr;
1834
1835		sqe->wqe_type = wqe->type;
1836		sqe->flags = wqe->flags;
1837		sqe->wqe_size = wqe_sz;
1838		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1839		sqe->length = cpu_to_le32((u32)data_len);
1840		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1841		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1842		if (qp->mtu)
1843			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1844		if (!pkt_num)
1845			pkt_num = 1;
1846		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1847		break;
1848	}
1849	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1850	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1851	{
1852		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1853		struct sq_atomic_hdr *sqe = base_hdr;
1854
1855		sqe->wqe_type = wqe->type;
1856		sqe->flags = wqe->flags;
1857		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1858		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1859		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1860		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1861		if (qp->mtu)
1862			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1863		if (!pkt_num)
1864			pkt_num = 1;
1865		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1866		break;
1867	}
1868	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1869	{
1870		struct sq_localinvalidate *sqe = base_hdr;
1871
1872		sqe->wqe_type = wqe->type;
1873		sqe->flags = wqe->flags;
1874		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1875
1876		break;
1877	}
1878	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1879	{
1880		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1881		struct sq_fr_pmr_hdr *sqe = base_hdr;
1882
1883		sqe->wqe_type = wqe->type;
1884		sqe->flags = wqe->flags;
1885		sqe->access_cntl = wqe->frmr.access_cntl |
1886				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1887		sqe->zero_based_page_size_log =
1888			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1889			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1890			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1891		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1892		temp32 = cpu_to_le32(wqe->frmr.length);
1893		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1894		sqe->numlevels_pbl_page_size_log =
1895			((wqe->frmr.pbl_pg_sz_log <<
1896					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1897					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1898			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1899					SQ_FR_PMR_NUMLEVELS_MASK);
1900
1901		for (i = 0; i < wqe->frmr.page_list_len; i++)
1902			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1903						wqe->frmr.page_list[i] |
1904						PTU_PTE_VALID);
1905		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1906		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1907
1908		break;
1909	}
1910	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1911	{
1912		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1913		struct sq_bind_hdr *sqe = base_hdr;
1914
1915		sqe->wqe_type = wqe->type;
1916		sqe->flags = wqe->flags;
1917		sqe->access_cntl = wqe->bind.access_cntl;
1918		sqe->mw_type_zero_based = wqe->bind.mw_type |
1919			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1920		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1921		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1922		ext_sqe->va = cpu_to_le64(wqe->bind.va);
1923		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1924		break;
1925	}
1926	default:
1927		/* Bad wqe, return error */
1928		rc = -EINVAL;
1929		goto done;
1930	}
1931	swq->next_psn = sq->psn & BTH_PSN_MASK;
1932	bnxt_qplib_fill_psn_search(qp, wqe, swq);
1933queue_err:
1934	bnxt_qplib_swq_mod_start(sq, wqe_idx);
1935	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1936	qp->wqe_cnt++;
1937done:
1938	if (sch_handler) {
1939		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1940		if (nq_work) {
1941			nq_work->cq = qp->scq;
1942			nq_work->nq = qp->scq->nq;
1943			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1944			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1945		} else {
1946			dev_err(&hwq->pdev->dev,
1947				"FP: Failed to allocate SQ nq_work!\n");
1948			rc = -ENOMEM;
1949		}
1950	}
1951	return rc;
1952}
1953
1954void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1955{
1956	struct bnxt_qplib_q *rq = &qp->rq;
1957
1958	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1959}
1960
1961int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1962			 struct bnxt_qplib_swqe *wqe)
1963{
1964	struct bnxt_qplib_nq_work *nq_work = NULL;
1965	struct bnxt_qplib_q *rq = &qp->rq;
1966	struct rq_wqe_hdr *base_hdr;
1967	struct rq_ext_hdr *ext_hdr;
1968	struct bnxt_qplib_hwq *hwq;
1969	struct bnxt_qplib_swq *swq;
1970	bool sch_handler = false;
1971	u16 wqe_sz, idx;
1972	u32 wqe_idx;
1973	int rc = 0;
1974
1975	hwq = &rq->hwq;
1976	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1977		dev_err(&hwq->pdev->dev,
1978			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1979			qp->id, qp->state);
1980		rc = -EINVAL;
1981		goto done;
1982	}
1983
1984	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1985		dev_err(&hwq->pdev->dev,
1986			"FP: QP (0x%x) RQ is full!\n", qp->id);
1987		rc = -EINVAL;
1988		goto done;
1989	}
1990
1991	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1992	swq->wr_id = wqe->wr_id;
1993	swq->slots = rq->dbinfo.max_slot;
1994
1995	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1996		sch_handler = true;
1997		dev_dbg(&hwq->pdev->dev,
1998			"%s: Error QP. Scheduling for poll_cq\n", __func__);
1999		goto queue_err;
2000	}
2001
2002	idx = 0;
2003	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2004	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2005	memset(base_hdr, 0, sizeof(struct sq_sge));
2006	memset(ext_hdr, 0, sizeof(struct sq_sge));
2007	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2008	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2009	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2010	if (!wqe->num_sge) {
2011		struct sq_sge *sge;
2012
2013		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2014		sge->size = 0;
2015		wqe_sz++;
2016	}
2017	base_hdr->wqe_type = wqe->type;
2018	base_hdr->flags = wqe->flags;
2019	base_hdr->wqe_size = wqe_sz;
2020	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2021queue_err:
2022	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2023	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2024done:
2025	if (sch_handler) {
2026		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2027		if (nq_work) {
2028			nq_work->cq = qp->rcq;
2029			nq_work->nq = qp->rcq->nq;
2030			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2031			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2032		} else {
2033			dev_err(&hwq->pdev->dev,
2034				"FP: Failed to allocate RQ nq_work!\n");
2035			rc = -ENOMEM;
2036		}
2037	}
2038
2039	return rc;
2040}
2041
2042/* CQ */
2043int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2044{
2045	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2046	struct bnxt_qplib_hwq_attr hwq_attr = {};
2047	struct creq_create_cq_resp resp;
2048	struct bnxt_qplib_pbl *pbl;
2049	struct cmdq_create_cq req;
2050	u16 cmd_flags = 0;
2051	u32 pg_sz_lvl;
2052	int rc;
2053
2054	if (!cq->dpi) {
2055		dev_err(&rcfw->pdev->dev,
2056			"FP: CREATE_CQ failed due to NULL DPI\n");
2057		return -EINVAL;
2058	}
2059
2060	hwq_attr.res = res;
2061	hwq_attr.depth = cq->max_wqe;
2062	hwq_attr.stride = sizeof(struct cq_base);
2063	hwq_attr.type = HWQ_TYPE_QUEUE;
2064	hwq_attr.sginfo = &cq->sg_info;
2065	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2066	if (rc)
2067		return rc;
2068
2069	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2070
2071	req.dpi = cpu_to_le32(cq->dpi->dpi);
2072	req.cq_handle = cpu_to_le64(cq->cq_handle);
2073	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2074	pbl = &cq->hwq.pbl[PBL_LVL_0];
2075	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2076		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2077	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2078	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2079	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2080	req.cq_fco_cnq_id = cpu_to_le32(
2081			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2082			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2083
2084	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2085					  (void *)&resp, NULL, 0);
2086	if (rc)
2087		goto fail;
2088
2089	cq->id = le32_to_cpu(resp.xid);
2090	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2091	init_waitqueue_head(&cq->waitq);
2092	INIT_LIST_HEAD(&cq->sqf_head);
2093	INIT_LIST_HEAD(&cq->rqf_head);
2094	spin_lock_init(&cq->compl_lock);
2095	spin_lock_init(&cq->flush_lock);
2096
2097	cq->dbinfo.hwq = &cq->hwq;
2098	cq->dbinfo.xid = cq->id;
2099	cq->dbinfo.db = cq->dpi->dbr;
2100	cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2101
2102	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2103
2104	return 0;
2105
2106fail:
2107	bnxt_qplib_free_hwq(res, &cq->hwq);
2108	return rc;
2109}
2110
2111int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2112{
2113	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2114	struct cmdq_destroy_cq req;
2115	struct creq_destroy_cq_resp resp;
2116	u16 total_cnq_events;
2117	u16 cmd_flags = 0;
2118	int rc;
2119
2120	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2121
2122	req.cq_cid = cpu_to_le32(cq->id);
2123	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2124					  (void *)&resp, NULL, 0);
2125	if (rc)
2126		return rc;
2127	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2128	__wait_for_all_nqes(cq, total_cnq_events);
2129	bnxt_qplib_free_hwq(res, &cq->hwq);
2130	return 0;
2131}
2132
2133static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2134		      struct bnxt_qplib_cqe **pcqe, int *budget)
2135{
2136	struct bnxt_qplib_cqe *cqe;
2137	u32 start, last;
2138	int rc = 0;
2139
2140	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2141	start = sq->swq_start;
2142	cqe = *pcqe;
2143	while (*budget) {
2144		last = sq->swq_last;
2145		if (start == last)
2146			break;
2147		/* Skip the FENCE WQE completions */
2148		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2149			bnxt_qplib_cancel_phantom_processing(qp);
2150			goto skip_compl;
2151		}
2152		memset(cqe, 0, sizeof(*cqe));
2153		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2154		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2155		cqe->qp_handle = (u64)(unsigned long)qp;
2156		cqe->wr_id = sq->swq[last].wr_id;
2157		cqe->src_qp = qp->id;
2158		cqe->type = sq->swq[last].type;
2159		cqe++;
2160		(*budget)--;
2161skip_compl:
2162		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2163		sq->swq_last = sq->swq[last].next_idx;
2164	}
2165	*pcqe = cqe;
2166	if (!(*budget) && sq->swq_last != start)
2167		/* Out of budget */
2168		rc = -EAGAIN;
2169
2170	return rc;
2171}
2172
2173static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2174		      struct bnxt_qplib_cqe **pcqe, int *budget)
2175{
2176	struct bnxt_qplib_cqe *cqe;
2177	u32 start, last;
2178	int opcode = 0;
2179	int rc = 0;
2180
2181	switch (qp->type) {
2182	case CMDQ_CREATE_QP1_TYPE_GSI:
2183		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2184		break;
2185	case CMDQ_CREATE_QP_TYPE_RC:
2186		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2187		break;
2188	case CMDQ_CREATE_QP_TYPE_UD:
2189	case CMDQ_CREATE_QP_TYPE_GSI:
2190		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2191		break;
2192	}
2193
2194	/* Flush the rest of the RQ */
2195	start = rq->swq_start;
2196	cqe = *pcqe;
2197	while (*budget) {
2198		last = rq->swq_last;
2199		if (last == start)
2200			break;
2201		memset(cqe, 0, sizeof(*cqe));
2202		cqe->status =
2203		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2204		cqe->opcode = opcode;
2205		cqe->qp_handle = (unsigned long)qp;
2206		cqe->wr_id = rq->swq[last].wr_id;
2207		cqe++;
2208		(*budget)--;
2209		bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2210		rq->swq_last = rq->swq[last].next_idx;
2211	}
2212	*pcqe = cqe;
2213	if (!*budget && rq->swq_last != start)
2214		/* Out of budget */
2215		rc = -EAGAIN;
2216
2217	return rc;
2218}
2219
2220void bnxt_qplib_mark_qp_error(void *qp_handle)
2221{
2222	struct bnxt_qplib_qp *qp = qp_handle;
2223
2224	if (!qp)
2225		return;
2226
2227	/* Must block new posting of SQ and RQ */
2228	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2229	bnxt_qplib_cancel_phantom_processing(qp);
2230}
2231
2232/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2233 *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2234 */
2235static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2236		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2237{
2238	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2239	struct bnxt_qplib_q *sq = &qp->sq;
2240	struct cq_req *peek_req_hwcqe;
2241	struct bnxt_qplib_qp *peek_qp;
2242	struct bnxt_qplib_q *peek_sq;
2243	struct bnxt_qplib_swq *swq;
2244	struct cq_base *peek_hwcqe;
2245	int i, rc = 0;
2246
2247	/* Normal mode */
2248	/* Check for the psn_search marking before completing */
2249	swq = &sq->swq[swq_last];
2250	if (swq->psn_search &&
2251	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2252		/* Unmark */
2253		swq->psn_search->flags_next_psn = cpu_to_le32
2254			(le32_to_cpu(swq->psn_search->flags_next_psn)
2255				     & ~0x80000000);
2256		dev_dbg(&cq->hwq.pdev->dev,
2257			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2258			cq_cons, qp->id, swq_last, cqe_sq_cons);
2259		sq->condition = true;
2260		sq->send_phantom = true;
2261
2262		/* TODO: Only ARM if the previous SQE is ARMALL */
2263		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2264		rc = -EAGAIN;
2265		goto out;
2266	}
2267	if (sq->condition) {
2268		/* Peek at the completions */
2269		peek_raw_cq_cons = cq->hwq.cons;
2270		peek_sw_cq_cons = cq_cons;
2271		i = cq->hwq.max_elements;
2272		while (i--) {
2273			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2274			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2275						       peek_sw_cq_cons, NULL);
2276			/* If the next hwcqe is VALID */
2277			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2278					  cq->hwq.max_elements)) {
2279			/*
2280			 * The valid test of the entry must be done first before
2281			 * reading any further.
2282			 */
2283				dma_rmb();
2284				/* If the next hwcqe is a REQ */
2285				if ((peek_hwcqe->cqe_type_toggle &
2286				    CQ_BASE_CQE_TYPE_MASK) ==
2287				    CQ_BASE_CQE_TYPE_REQ) {
2288					peek_req_hwcqe = (struct cq_req *)
2289							 peek_hwcqe;
2290					peek_qp = (struct bnxt_qplib_qp *)
2291						((unsigned long)
2292						 le64_to_cpu
2293						 (peek_req_hwcqe->qp_handle));
2294					peek_sq = &peek_qp->sq;
2295					peek_sq_cons_idx =
2296						((le16_to_cpu(
2297						  peek_req_hwcqe->sq_cons_idx)
2298						  - 1) % sq->max_wqe);
2299					/* If the hwcqe's sq's wr_id matches */
2300					if (peek_sq == sq &&
2301					    sq->swq[peek_sq_cons_idx].wr_id ==
2302					    BNXT_QPLIB_FENCE_WRID) {
2303						/*
2304						 *  Unbreak only if the phantom
2305						 *  comes back
2306						 */
2307						dev_dbg(&cq->hwq.pdev->dev,
2308							"FP: Got Phantom CQE\n");
2309						sq->condition = false;
2310						sq->single = true;
2311						rc = 0;
2312						goto out;
2313					}
2314				}
2315				/* Valid but not the phantom, so keep looping */
2316			} else {
2317				/* Not valid yet, just exit and wait */
2318				rc = -EINVAL;
2319				goto out;
2320			}
2321			peek_sw_cq_cons++;
2322			peek_raw_cq_cons++;
2323		}
2324		dev_err(&cq->hwq.pdev->dev,
2325			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2326			cq_cons, qp->id, swq_last, cqe_sq_cons);
2327		rc = -EINVAL;
2328	}
2329out:
2330	return rc;
2331}
2332
2333static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2334				     struct cq_req *hwcqe,
2335				     struct bnxt_qplib_cqe **pcqe, int *budget,
2336				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2337{
2338	struct bnxt_qplib_swq *swq;
2339	struct bnxt_qplib_cqe *cqe;
2340	struct bnxt_qplib_qp *qp;
2341	struct bnxt_qplib_q *sq;
2342	u32 cqe_sq_cons;
2343	int rc = 0;
2344
2345	qp = (struct bnxt_qplib_qp *)((unsigned long)
2346				      le64_to_cpu(hwcqe->qp_handle));
2347	if (!qp) {
2348		dev_err(&cq->hwq.pdev->dev,
2349			"FP: Process Req qp is NULL\n");
2350		return -EINVAL;
2351	}
2352	sq = &qp->sq;
2353
2354	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2355	if (qp->sq.flushed) {
2356		dev_dbg(&cq->hwq.pdev->dev,
2357			"%s: QP in Flush QP = %p\n", __func__, qp);
2358		goto done;
2359	}
2360	/* Require to walk the sq's swq to fabricate CQEs for all previously
2361	 * signaled SWQEs due to CQE aggregation from the current sq cons
2362	 * to the cqe_sq_cons
2363	 */
2364	cqe = *pcqe;
2365	while (*budget) {
2366		if (sq->swq_last == cqe_sq_cons)
2367			/* Done */
2368			break;
2369
2370		swq = &sq->swq[sq->swq_last];
2371		memset(cqe, 0, sizeof(*cqe));
2372		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2373		cqe->qp_handle = (u64)(unsigned long)qp;
2374		cqe->src_qp = qp->id;
2375		cqe->wr_id = swq->wr_id;
2376		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2377			goto skip;
2378		cqe->type = swq->type;
2379
2380		/* For the last CQE, check for status.  For errors, regardless
2381		 * of the request being signaled or not, it must complete with
2382		 * the hwcqe error status
2383		 */
2384		if (swq->next_idx == cqe_sq_cons &&
2385		    hwcqe->status != CQ_REQ_STATUS_OK) {
2386			cqe->status = hwcqe->status;
2387			dev_err(&cq->hwq.pdev->dev,
2388				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2389				sq->swq_last, cqe->wr_id, cqe->status);
2390			cqe++;
2391			(*budget)--;
2392			bnxt_qplib_mark_qp_error(qp);
2393			/* Add qp to flush list of the CQ */
2394			bnxt_qplib_add_flush_qp(qp);
2395		} else {
2396			/* Before we complete, do WA 9060 */
2397			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2398				      cqe_sq_cons)) {
2399				*lib_qp = qp;
2400				goto out;
2401			}
2402			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2403				cqe->status = CQ_REQ_STATUS_OK;
2404				cqe++;
2405				(*budget)--;
2406			}
2407		}
2408skip:
2409		bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2410		sq->swq_last = swq->next_idx;
2411		if (sq->single)
2412			break;
2413	}
2414out:
2415	*pcqe = cqe;
2416	if (sq->swq_last != cqe_sq_cons) {
2417		/* Out of budget */
2418		rc = -EAGAIN;
2419		goto done;
2420	}
2421	/*
2422	 * Back to normal completion mode only after it has completed all of
2423	 * the WC for this CQE
2424	 */
2425	sq->single = false;
2426done:
2427	return rc;
2428}
2429
2430static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2431{
2432	spin_lock(&srq->hwq.lock);
2433	srq->swq[srq->last_idx].next_idx = (int)tag;
2434	srq->last_idx = (int)tag;
2435	srq->swq[srq->last_idx].next_idx = -1;
2436	srq->hwq.cons++; /* Support for SRQE counter */
2437	spin_unlock(&srq->hwq.lock);
2438}
2439
2440static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2441					struct cq_res_rc *hwcqe,
2442					struct bnxt_qplib_cqe **pcqe,
2443					int *budget)
2444{
2445	struct bnxt_qplib_srq *srq;
2446	struct bnxt_qplib_cqe *cqe;
2447	struct bnxt_qplib_qp *qp;
2448	struct bnxt_qplib_q *rq;
2449	u32 wr_id_idx;
2450	int rc = 0;
2451
2452	qp = (struct bnxt_qplib_qp *)((unsigned long)
2453				      le64_to_cpu(hwcqe->qp_handle));
2454	if (!qp) {
2455		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2456		return -EINVAL;
2457	}
2458	if (qp->rq.flushed) {
2459		dev_dbg(&cq->hwq.pdev->dev,
2460			"%s: QP in Flush QP = %p\n", __func__, qp);
2461		goto done;
2462	}
2463
2464	cqe = *pcqe;
2465	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2466	cqe->length = le32_to_cpu(hwcqe->length);
2467	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2468	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2469	cqe->flags = le16_to_cpu(hwcqe->flags);
2470	cqe->status = hwcqe->status;
2471	cqe->qp_handle = (u64)(unsigned long)qp;
2472
2473	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2474				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2475	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2476		srq = qp->srq;
2477		if (!srq)
2478			return -EINVAL;
2479		if (wr_id_idx >= srq->hwq.max_elements) {
2480			dev_err(&cq->hwq.pdev->dev,
2481				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2482				wr_id_idx, srq->hwq.max_elements);
2483			return -EINVAL;
2484		}
2485		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2486		bnxt_qplib_release_srqe(srq, wr_id_idx);
2487		cqe++;
2488		(*budget)--;
2489		*pcqe = cqe;
2490	} else {
2491		struct bnxt_qplib_swq *swq;
2492
2493		rq = &qp->rq;
2494		if (wr_id_idx > (rq->max_wqe - 1)) {
2495			dev_err(&cq->hwq.pdev->dev,
2496				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2497				wr_id_idx, rq->max_wqe);
2498			return -EINVAL;
2499		}
2500		if (wr_id_idx != rq->swq_last)
2501			return -EINVAL;
2502		swq = &rq->swq[rq->swq_last];
2503		cqe->wr_id = swq->wr_id;
2504		cqe++;
2505		(*budget)--;
2506		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2507		rq->swq_last = swq->next_idx;
2508		*pcqe = cqe;
2509
2510		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2511			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2512			/* Add qp to flush list of the CQ */
2513			bnxt_qplib_add_flush_qp(qp);
2514		}
2515	}
2516
2517done:
2518	return rc;
2519}
2520
2521static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2522					struct cq_res_ud *hwcqe,
2523					struct bnxt_qplib_cqe **pcqe,
2524					int *budget)
2525{
2526	struct bnxt_qplib_srq *srq;
2527	struct bnxt_qplib_cqe *cqe;
2528	struct bnxt_qplib_qp *qp;
2529	struct bnxt_qplib_q *rq;
2530	u32 wr_id_idx;
2531	int rc = 0;
2532
2533	qp = (struct bnxt_qplib_qp *)((unsigned long)
2534				      le64_to_cpu(hwcqe->qp_handle));
2535	if (!qp) {
2536		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2537		return -EINVAL;
2538	}
2539	if (qp->rq.flushed) {
2540		dev_dbg(&cq->hwq.pdev->dev,
2541			"%s: QP in Flush QP = %p\n", __func__, qp);
2542		goto done;
2543	}
2544	cqe = *pcqe;
2545	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2546	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2547	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2548	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2549	cqe->flags = le16_to_cpu(hwcqe->flags);
2550	cqe->status = hwcqe->status;
2551	cqe->qp_handle = (u64)(unsigned long)qp;
2552	/*FIXME: Endianness fix needed for smace */
2553	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2554	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2555				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2556	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2557				  ((le32_to_cpu(
2558				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2559				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2560
2561	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2562		srq = qp->srq;
2563		if (!srq)
2564			return -EINVAL;
2565
2566		if (wr_id_idx >= srq->hwq.max_elements) {
2567			dev_err(&cq->hwq.pdev->dev,
2568				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2569				wr_id_idx, srq->hwq.max_elements);
2570			return -EINVAL;
2571		}
2572		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2573		bnxt_qplib_release_srqe(srq, wr_id_idx);
2574		cqe++;
2575		(*budget)--;
2576		*pcqe = cqe;
2577	} else {
2578		struct bnxt_qplib_swq *swq;
2579
2580		rq = &qp->rq;
2581		if (wr_id_idx > (rq->max_wqe - 1)) {
2582			dev_err(&cq->hwq.pdev->dev,
2583				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2584				wr_id_idx, rq->max_wqe);
2585			return -EINVAL;
2586		}
2587
2588		if (rq->swq_last != wr_id_idx)
2589			return -EINVAL;
2590		swq = &rq->swq[rq->swq_last];
2591		cqe->wr_id = swq->wr_id;
2592		cqe++;
2593		(*budget)--;
2594		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2595		rq->swq_last = swq->next_idx;
2596		*pcqe = cqe;
2597
2598		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2599			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2600			/* Add qp to flush list of the CQ */
2601			bnxt_qplib_add_flush_qp(qp);
2602		}
2603	}
2604done:
2605	return rc;
2606}
2607
2608bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2609{
2610	struct cq_base *hw_cqe;
2611	u32 sw_cons, raw_cons;
2612	bool rc = true;
2613
2614	raw_cons = cq->hwq.cons;
2615	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2616	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2617	 /* Check for Valid bit. If the CQE is valid, return false */
2618	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2619	return rc;
2620}
2621
2622static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2623						struct cq_res_raweth_qp1 *hwcqe,
2624						struct bnxt_qplib_cqe **pcqe,
2625						int *budget)
2626{
2627	struct bnxt_qplib_qp *qp;
2628	struct bnxt_qplib_q *rq;
2629	struct bnxt_qplib_srq *srq;
2630	struct bnxt_qplib_cqe *cqe;
2631	u32 wr_id_idx;
2632	int rc = 0;
2633
2634	qp = (struct bnxt_qplib_qp *)((unsigned long)
2635				      le64_to_cpu(hwcqe->qp_handle));
2636	if (!qp) {
2637		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2638		return -EINVAL;
2639	}
2640	if (qp->rq.flushed) {
2641		dev_dbg(&cq->hwq.pdev->dev,
2642			"%s: QP in Flush QP = %p\n", __func__, qp);
2643		goto done;
2644	}
2645	cqe = *pcqe;
2646	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2647	cqe->flags = le16_to_cpu(hwcqe->flags);
2648	cqe->qp_handle = (u64)(unsigned long)qp;
2649
2650	wr_id_idx =
2651		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2652				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2653	cqe->src_qp = qp->id;
2654	if (qp->id == 1 && !cqe->length) {
2655		/* Add workaround for the length misdetection */
2656		cqe->length = 296;
2657	} else {
2658		cqe->length = le16_to_cpu(hwcqe->length);
2659	}
2660	cqe->pkey_index = qp->pkey_index;
2661	memcpy(cqe->smac, qp->smac, 6);
2662
2663	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2664	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2665	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2666
2667	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2668		srq = qp->srq;
2669		if (!srq) {
2670			dev_err(&cq->hwq.pdev->dev,
2671				"FP: SRQ used but not defined??\n");
2672			return -EINVAL;
2673		}
2674		if (wr_id_idx >= srq->hwq.max_elements) {
2675			dev_err(&cq->hwq.pdev->dev,
2676				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2677				wr_id_idx, srq->hwq.max_elements);
2678			return -EINVAL;
2679		}
2680		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2681		bnxt_qplib_release_srqe(srq, wr_id_idx);
2682		cqe++;
2683		(*budget)--;
2684		*pcqe = cqe;
2685	} else {
2686		struct bnxt_qplib_swq *swq;
2687
2688		rq = &qp->rq;
2689		if (wr_id_idx > (rq->max_wqe - 1)) {
2690			dev_err(&cq->hwq.pdev->dev,
2691				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2692				wr_id_idx, rq->max_wqe);
2693			return -EINVAL;
2694		}
2695		if (rq->swq_last != wr_id_idx)
2696			return -EINVAL;
2697		swq = &rq->swq[rq->swq_last];
2698		cqe->wr_id = swq->wr_id;
2699		cqe++;
2700		(*budget)--;
2701		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2702		rq->swq_last = swq->next_idx;
2703		*pcqe = cqe;
2704
2705		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2706			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2707			/* Add qp to flush list of the CQ */
2708			bnxt_qplib_add_flush_qp(qp);
2709		}
2710	}
2711
2712done:
2713	return rc;
2714}
2715
2716static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2717					  struct cq_terminal *hwcqe,
2718					  struct bnxt_qplib_cqe **pcqe,
2719					  int *budget)
2720{
2721	struct bnxt_qplib_qp *qp;
2722	struct bnxt_qplib_q *sq, *rq;
2723	struct bnxt_qplib_cqe *cqe;
2724	u32 swq_last = 0, cqe_cons;
2725	int rc = 0;
2726
2727	/* Check the Status */
2728	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2729		dev_warn(&cq->hwq.pdev->dev,
2730			 "FP: CQ Process Terminal Error status = 0x%x\n",
2731			 hwcqe->status);
2732
2733	qp = (struct bnxt_qplib_qp *)((unsigned long)
2734				      le64_to_cpu(hwcqe->qp_handle));
2735	if (!qp)
2736		return -EINVAL;
2737
2738	/* Must block new posting of SQ and RQ */
2739	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2740
2741	sq = &qp->sq;
2742	rq = &qp->rq;
2743
2744	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2745	if (cqe_cons == 0xFFFF)
2746		goto do_rq;
2747	cqe_cons %= sq->max_wqe;
2748
2749	if (qp->sq.flushed) {
2750		dev_dbg(&cq->hwq.pdev->dev,
2751			"%s: QP in Flush QP = %p\n", __func__, qp);
2752		goto sq_done;
2753	}
2754
2755	/* Terminal CQE can also include aggregated successful CQEs prior.
2756	 * So we must complete all CQEs from the current sq's cons to the
2757	 * cq_cons with status OK
2758	 */
2759	cqe = *pcqe;
2760	while (*budget) {
2761		swq_last = sq->swq_last;
2762		if (swq_last == cqe_cons)
2763			break;
2764		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2765			memset(cqe, 0, sizeof(*cqe));
2766			cqe->status = CQ_REQ_STATUS_OK;
2767			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2768			cqe->qp_handle = (u64)(unsigned long)qp;
2769			cqe->src_qp = qp->id;
2770			cqe->wr_id = sq->swq[swq_last].wr_id;
2771			cqe->type = sq->swq[swq_last].type;
2772			cqe++;
2773			(*budget)--;
2774		}
2775		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2776		sq->swq_last = sq->swq[swq_last].next_idx;
2777	}
2778	*pcqe = cqe;
2779	if (!(*budget) && swq_last != cqe_cons) {
2780		/* Out of budget */
2781		rc = -EAGAIN;
2782		goto sq_done;
2783	}
2784sq_done:
2785	if (rc)
2786		return rc;
2787do_rq:
2788	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2789	if (cqe_cons == 0xFFFF) {
2790		goto done;
2791	} else if (cqe_cons > rq->max_wqe - 1) {
2792		dev_err(&cq->hwq.pdev->dev,
2793			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2794			cqe_cons, rq->max_wqe);
2795		rc = -EINVAL;
2796		goto done;
2797	}
2798
2799	if (qp->rq.flushed) {
2800		dev_dbg(&cq->hwq.pdev->dev,
2801			"%s: QP in Flush QP = %p\n", __func__, qp);
2802		rc = 0;
2803		goto done;
2804	}
2805
2806	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2807	 * from the current rq->cons to the rq->prod regardless what the
2808	 * rq->cons the terminal CQE indicates
2809	 */
2810
2811	/* Add qp to flush list of the CQ */
2812	bnxt_qplib_add_flush_qp(qp);
2813done:
2814	return rc;
2815}
2816
2817static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2818					struct cq_cutoff *hwcqe)
2819{
2820	/* Check the Status */
2821	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2822		dev_err(&cq->hwq.pdev->dev,
2823			"FP: CQ Process Cutoff Error status = 0x%x\n",
2824			hwcqe->status);
2825		return -EINVAL;
2826	}
2827	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2828	wake_up_interruptible(&cq->waitq);
2829
2830	return 0;
2831}
2832
2833int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2834				  struct bnxt_qplib_cqe *cqe,
2835				  int num_cqes)
2836{
2837	struct bnxt_qplib_qp *qp = NULL;
2838	u32 budget = num_cqes;
2839	unsigned long flags;
2840
2841	spin_lock_irqsave(&cq->flush_lock, flags);
2842	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2843		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2844		__flush_sq(&qp->sq, qp, &cqe, &budget);
2845	}
2846
2847	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2848		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2849		__flush_rq(&qp->rq, qp, &cqe, &budget);
2850	}
2851	spin_unlock_irqrestore(&cq->flush_lock, flags);
2852
2853	return num_cqes - budget;
2854}
2855
2856int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2857		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2858{
2859	struct cq_base *hw_cqe;
2860	u32 sw_cons, raw_cons;
2861	int budget, rc = 0;
2862
2863	raw_cons = cq->hwq.cons;
2864	budget = num_cqes;
2865
2866	while (budget) {
2867		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2868		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2869
2870		/* Check for Valid bit */
2871		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2872			break;
2873
2874		/*
2875		 * The valid test of the entry must be done first before
2876		 * reading any further.
2877		 */
2878		dma_rmb();
2879		/* From the device's respective CQE format to qplib_wc*/
2880		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2881		case CQ_BASE_CQE_TYPE_REQ:
2882			rc = bnxt_qplib_cq_process_req(cq,
2883						       (struct cq_req *)hw_cqe,
2884						       &cqe, &budget,
2885						       sw_cons, lib_qp);
2886			break;
2887		case CQ_BASE_CQE_TYPE_RES_RC:
2888			rc = bnxt_qplib_cq_process_res_rc(cq,
2889							  (struct cq_res_rc *)
2890							  hw_cqe, &cqe,
2891							  &budget);
2892			break;
2893		case CQ_BASE_CQE_TYPE_RES_UD:
2894			rc = bnxt_qplib_cq_process_res_ud
2895					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2896					 &budget);
2897			break;
2898		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2899			rc = bnxt_qplib_cq_process_res_raweth_qp1
2900					(cq, (struct cq_res_raweth_qp1 *)
2901					 hw_cqe, &cqe, &budget);
2902			break;
2903		case CQ_BASE_CQE_TYPE_TERMINAL:
2904			rc = bnxt_qplib_cq_process_terminal
2905					(cq, (struct cq_terminal *)hw_cqe,
2906					 &cqe, &budget);
2907			break;
2908		case CQ_BASE_CQE_TYPE_CUT_OFF:
2909			bnxt_qplib_cq_process_cutoff
2910					(cq, (struct cq_cutoff *)hw_cqe);
2911			/* Done processing this CQ */
2912			goto exit;
2913		default:
2914			dev_err(&cq->hwq.pdev->dev,
2915				"process_cq unknown type 0x%lx\n",
2916				hw_cqe->cqe_type_toggle &
2917				CQ_BASE_CQE_TYPE_MASK);
2918			rc = -EINVAL;
2919			break;
2920		}
2921		if (rc < 0) {
2922			if (rc == -EAGAIN)
2923				break;
2924			/* Error while processing the CQE, just skip to the
2925			 * next one
2926			 */
2927			dev_err(&cq->hwq.pdev->dev,
2928				"process_cqe error rc = 0x%x\n", rc);
2929		}
2930		raw_cons++;
2931	}
2932	if (cq->hwq.cons != raw_cons) {
2933		cq->hwq.cons = raw_cons;
2934		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2935	}
2936exit:
2937	return num_cqes - budget;
2938}
2939
2940void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2941{
2942	if (arm_type)
2943		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
2944	/* Using cq->arm_state variable to track whether to issue cq handler */
2945	atomic_set(&cq->arm_state, 1);
2946}
2947
2948void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2949{
2950	flush_workqueue(qp->scq->nq->cqn_wq);
2951	if (qp->scq != qp->rcq)
2952		flush_workqueue(qp->rcq->nq->cqn_wq);
2953}
2954