1/*******************************************************************************
2*
3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4*
5* This software is available to you under a choice of one of two
6* licenses.  You may choose to be licensed under the terms of the GNU
7* General Public License (GPL) Version 2, available from the file
8* COPYING in the main directory of this source tree, or the
9* OpenFabrics.org BSD license below:
10*
11*   Redistribution and use in source and binary forms, with or
12*   without modification, are permitted provided that the following
13*   conditions are met:
14*
15*    - Redistributions of source code must retain the above
16*	copyright notice, this list of conditions and the following
17*	disclaimer.
18*
19*    - Redistributions in binary form must reproduce the above
20*	copyright notice, this list of conditions and the following
21*	disclaimer in the documentation and/or other materials
22*	provided with the distribution.
23*
24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31* SOFTWARE.
32*
33*******************************************************************************/
34
35#include "i40iw_osdep.h"
36#include "i40iw_register.h"
37#include "i40iw_status.h"
38#include "i40iw_hmc.h"
39
40#include "i40iw_d.h"
41#include "i40iw_type.h"
42#include "i40iw_p.h"
43#include "i40iw_vf.h"
44#include "i40iw_virtchnl.h"
45
46/**
47 * i40iw_insert_wqe_hdr - write wqe header
48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe
50 */
51void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52{
53	wmb();            /* make sure WQE is populated before polarity is set */
54	set_64bit_val(wqe, 24, header);
55}
56
57void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
58{
59	if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
60		cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
61		cqp_timeout->count = 0;
62	} else {
63		if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
64			cqp_timeout->count++;
65	}
66}
67
68/**
69 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
70 * @cqp: struct for cqp hw
71 * @val: cqp tail register value
72 * @tail:wqtail register value
73 * @error: cqp processing err
74 */
75static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
76					  u32 *val,
77					  u32 *tail,
78					  u32 *error)
79{
80	if (cqp->dev->is_pf) {
81		*val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
82		*tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
83		*error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
84	} else {
85		*val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
86		*tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
87		*error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
88	}
89}
90
91/**
92 * i40iw_cqp_poll_registers - poll cqp registers
93 * @cqp: struct for cqp hw
94 * @tail:wqtail register value
95 * @count: how many times to try for completion
96 */
97static enum i40iw_status_code i40iw_cqp_poll_registers(
98						struct i40iw_sc_cqp *cqp,
99						u32 tail,
100						u32 count)
101{
102	u32 i = 0;
103	u32 newtail, error, val;
104
105	while (i < count) {
106		i++;
107		i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
108		if (error) {
109			error = (cqp->dev->is_pf) ?
110				 i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
111				 i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
112			return I40IW_ERR_CQP_COMPL_ERROR;
113		}
114		if (newtail != tail) {
115			/* SUCCESS */
116			I40IW_RING_MOVE_TAIL(cqp->sq_ring);
117			cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
118			return 0;
119		}
120		udelay(I40IW_SLEEP_COUNT);
121	}
122	return I40IW_ERR_TIMEOUT;
123}
124
125/**
126 * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
127 * @buf: ptr to fpm commit buffer
128 * @info: ptr to i40iw_hmc_obj_info struct
129 * @sd: number of SDs for HMC objects
130 *
131 * parses fpm commit info and copy base value
132 * of hmc objects in hmc_info
133 */
134static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
135				u64 *buf,
136				struct i40iw_hmc_obj_info *info,
137				u32 *sd)
138{
139	u64 temp;
140	u64 size;
141	u64 base = 0;
142	u32 i, j;
143	u32 k = 0;
144
145	/* copy base values in obj_info */
146	for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
147		if ((i == I40IW_HMC_IW_SRQ) ||
148			(i == I40IW_HMC_IW_FSIMC) ||
149			(i == I40IW_HMC_IW_FSIAV)) {
150			info[i].base = 0;
151			info[i].cnt = 0;
152			continue;
153		}
154		get_64bit_val(buf, j, &temp);
155		info[i].base = RS_64_1(temp, 32) * 512;
156		if (info[i].base > base) {
157			base = info[i].base;
158			k = i;
159		}
160		if (i == I40IW_HMC_IW_APBVT_ENTRY) {
161			info[i].cnt = 1;
162			continue;
163		}
164		if (i == I40IW_HMC_IW_QP)
165			info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
166		else if (i == I40IW_HMC_IW_CQ)
167			info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
168		else
169			info[i].cnt = (u32)(temp);
170	}
171	size = info[k].cnt * info[k].size + info[k].base;
172	if (size & 0x1FFFFF)
173		*sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
174	else
175		*sd = (u32)(size >> 21);
176
177	return 0;
178}
179
180/**
181 * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
182 * @buf: ptr to fpm query buffer
183 * @buf_idx: index into buf
184 * @info: ptr to i40iw_hmc_obj_info struct
185 * @rsrc_idx: resource index into info
186 *
187 * Decode a 64 bit value from fpm query buffer into max count and size
188 */
189static u64 i40iw_sc_decode_fpm_query(u64 *buf,
190					    u32 buf_idx,
191					    struct i40iw_hmc_obj_info *obj_info,
192					    u32 rsrc_idx)
193{
194	u64 temp;
195	u32 size;
196
197	get_64bit_val(buf, buf_idx, &temp);
198	obj_info[rsrc_idx].max_cnt = (u32)temp;
199	size = (u32)RS_64_1(temp, 32);
200	obj_info[rsrc_idx].size = LS_64_1(1, size);
201
202	return temp;
203}
204
205/**
206 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
207 * @buf: ptr to fpm query buffer
208 * @info: ptr to i40iw_hmc_obj_info struct
209 * @hmc_fpm_misc: ptr to fpm data
210 *
211 * parses fpm query buffer and copy max_cnt and
212 * size value of hmc objects in hmc_info
213 */
214static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
215				u64 *buf,
216				struct i40iw_hmc_info *hmc_info,
217				struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
218{
219	struct i40iw_hmc_obj_info *obj_info;
220	u64 temp;
221	u32 size;
222	u16 max_pe_sds;
223
224	obj_info = hmc_info->hmc_obj;
225
226	get_64bit_val(buf, 0, &temp);
227	hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
228	max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
229
230	/* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
231	if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
232		max_pe_sds--;
233	hmc_fpm_misc->max_sds = max_pe_sds;
234	hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
235
236	get_64bit_val(buf, 8, &temp);
237	obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
238	size = (u32)RS_64_1(temp, 32);
239	obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
240
241	get_64bit_val(buf, 16, &temp);
242	obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
243	size = (u32)RS_64_1(temp, 32);
244	obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
245
246	i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
247	i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
248
249	obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
250	obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
251
252	i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
253	i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
254
255	get_64bit_val(buf, 64, &temp);
256	obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
257	obj_info[I40IW_HMC_IW_XFFL].size = 4;
258	hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
259	if (!hmc_fpm_misc->xf_block_size)
260		return I40IW_ERR_INVALID_SIZE;
261
262	i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
263
264	get_64bit_val(buf, 80, &temp);
265	obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
266	obj_info[I40IW_HMC_IW_Q1FL].size = 4;
267	hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
268	if (!hmc_fpm_misc->q1_block_size)
269		return I40IW_ERR_INVALID_SIZE;
270
271	i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
272
273	get_64bit_val(buf, 112, &temp);
274	obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
275	obj_info[I40IW_HMC_IW_PBLE].size = 8;
276
277	get_64bit_val(buf, 120, &temp);
278	hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
279	hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
280	hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
281
282	return 0;
283}
284
285/**
286 * i40iw_fill_qos_list - Change all unknown qs handles to available ones
287 * @qs_list: list of qs_handles to be fixed with valid qs_handles
288 */
289static void i40iw_fill_qos_list(u16 *qs_list)
290{
291	u16 qshandle = qs_list[0];
292	int i;
293
294	for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
295		if (qs_list[i] == QS_HANDLE_UNKNOWN)
296			qs_list[i] = qshandle;
297		else
298			qshandle = qs_list[i];
299	}
300}
301
302/**
303 * i40iw_qp_from_entry - Given entry, get to the qp structure
304 * @entry: Points to list of qp structure
305 */
306static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
307{
308	if (!entry)
309		return NULL;
310
311	return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
312}
313
314/**
315 * i40iw_get_qp - get the next qp from the list given current qp
316 * @head: Listhead of qp's
317 * @qp: current qp
318 */
319static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
320{
321	struct list_head *entry = NULL;
322	struct list_head *lastentry;
323
324	if (list_empty(head))
325		return NULL;
326
327	if (!qp) {
328		entry = head->next;
329	} else {
330		lastentry = &qp->list;
331		entry = (lastentry != head) ? lastentry->next : NULL;
332	}
333
334	return i40iw_qp_from_entry(entry);
335}
336
337/**
338 * i40iw_change_l2params - given the new l2 parameters, change all qp
339 * @vsi: pointer to the vsi structure
340 * @l2params: New paramaters from l2
341 */
342void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
343{
344	struct i40iw_sc_dev *dev = vsi->dev;
345	struct i40iw_sc_qp *qp = NULL;
346	bool qs_handle_change = false;
347	unsigned long flags;
348	u16 qs_handle;
349	int i;
350
351	if (vsi->mtu != l2params->mtu) {
352		vsi->mtu = l2params->mtu;
353		i40iw_reinitialize_ieq(dev);
354	}
355
356	i40iw_fill_qos_list(l2params->qs_handle_list);
357	for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
358		qs_handle = l2params->qs_handle_list[i];
359		if (vsi->qos[i].qs_handle != qs_handle)
360			qs_handle_change = true;
361		spin_lock_irqsave(&vsi->qos[i].lock, flags);
362		qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
363		while (qp) {
364			if (qs_handle_change) {
365				qp->qs_handle = qs_handle;
366				/* issue cqp suspend command */
367				i40iw_qp_suspend_resume(dev, qp, true);
368			}
369			qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
370		}
371		spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
372		vsi->qos[i].qs_handle = qs_handle;
373	}
374}
375
376/**
377 * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
378 * @qp: qp to be removed from qos
379 */
380void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
381{
382	struct i40iw_sc_vsi *vsi = qp->vsi;
383	unsigned long flags;
384
385	if (!qp->on_qoslist)
386		return;
387	spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
388	list_del(&qp->list);
389	spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
390}
391
392/**
393 * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
394 * @qp: qp to be added to qos
395 */
396void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
397{
398	struct i40iw_sc_vsi *vsi = qp->vsi;
399	unsigned long flags;
400
401	if (qp->on_qoslist)
402		return;
403	spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
404	qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
405	list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
406	qp->on_qoslist = true;
407	spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
408}
409
410/**
411 * i40iw_sc_pd_init - initialize sc pd struct
412 * @dev: sc device struct
413 * @pd: sc pd ptr
414 * @pd_id: pd_id for allocated pd
415 * @abi_ver: ABI version from user context, -1 if not valid
416 */
417static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
418			     struct i40iw_sc_pd *pd,
419			     u16 pd_id,
420			     int abi_ver)
421{
422	pd->size = sizeof(*pd);
423	pd->pd_id = pd_id;
424	pd->abi_ver = abi_ver;
425	pd->dev = dev;
426}
427
428/**
429 * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
430 * @wqsize: size of the wq (sq, rq, srq) to encoded_size
431 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
432 */
433u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
434{
435	u8 encoded_size = 0;
436
437	/* cqp sq's hw coded value starts from 1 for size of 4
438	 * while it starts from 0 for qp' wq's.
439	 */
440	if (cqpsq)
441		encoded_size = 1;
442	wqsize >>= 2;
443	while (wqsize >>= 1)
444		encoded_size++;
445	return encoded_size;
446}
447
448/**
449 * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
450 * @cqp: IWARP control queue pair pointer
451 * @info: IWARP control queue pair init info pointer
452 *
453 * Initializes the object and context buffers for a control Queue Pair.
454 */
455static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
456						struct i40iw_cqp_init_info *info)
457{
458	u8 hw_sq_size;
459
460	if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
461	    (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
462	    ((info->sq_size & (info->sq_size - 1))))
463		return I40IW_ERR_INVALID_SIZE;
464
465	hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
466	cqp->size = sizeof(*cqp);
467	cqp->sq_size = info->sq_size;
468	cqp->hw_sq_size = hw_sq_size;
469	cqp->sq_base = info->sq;
470	cqp->host_ctx = info->host_ctx;
471	cqp->sq_pa = info->sq_pa;
472	cqp->host_ctx_pa = info->host_ctx_pa;
473	cqp->dev = info->dev;
474	cqp->struct_ver = info->struct_ver;
475	cqp->scratch_array = info->scratch_array;
476	cqp->polarity = 0;
477	cqp->en_datacenter_tcp = info->en_datacenter_tcp;
478	cqp->enabled_vf_count = info->enabled_vf_count;
479	cqp->hmc_profile = info->hmc_profile;
480	info->dev->cqp = cqp;
481
482	I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
483	cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
484	cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
485	INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);               /* for the cqp commands backlog. */
486
487	i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
488	i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
489
490	i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
491		    "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
492		    __func__, cqp->sq_size, cqp->hw_sq_size,
493		    cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
494	return 0;
495}
496
497/**
498 * i40iw_sc_cqp_create - create cqp during bringup
499 * @cqp: struct for cqp hw
500 * @maj_err: If error, major err number
501 * @min_err: If error, minor err number
502 */
503static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
504						  u16 *maj_err,
505						  u16 *min_err)
506{
507	u64 temp;
508	u32 cnt = 0, p1, p2, val = 0, err_code;
509	enum i40iw_status_code ret_code;
510
511	*maj_err = 0;
512	*min_err = 0;
513
514	ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
515					  &cqp->sdbuf,
516					  I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
517					  I40IW_SD_BUF_ALIGNMENT);
518
519	if (ret_code)
520		goto exit;
521
522	temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
523	       LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
524
525	set_64bit_val(cqp->host_ctx, 0, temp);
526	set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
527	temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
528	       LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
529	set_64bit_val(cqp->host_ctx, 16, temp);
530	set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
531	set_64bit_val(cqp->host_ctx, 32, 0);
532	set_64bit_val(cqp->host_ctx, 40, 0);
533	set_64bit_val(cqp->host_ctx, 48, 0);
534	set_64bit_val(cqp->host_ctx, 56, 0);
535
536	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
537			cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
538
539	p1 = RS_32_1(cqp->host_ctx_pa, 32);
540	p2 = (u32)cqp->host_ctx_pa;
541
542	if (cqp->dev->is_pf) {
543		i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
544		i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
545	} else {
546		i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
547		i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
548	}
549	do {
550		if (cnt++ > I40IW_DONE_COUNT) {
551			i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
552			ret_code = I40IW_ERR_TIMEOUT;
553			/*
554			 * read PFPE_CQPERRORCODES register to get the minor
555			 * and major error code
556			 */
557			if (cqp->dev->is_pf)
558				err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
559			else
560				err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
561			*min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
562			*maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
563			goto exit;
564		}
565		udelay(I40IW_SLEEP_COUNT);
566		if (cqp->dev->is_pf)
567			val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
568		else
569			val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
570	} while (!val);
571
572exit:
573	if (!ret_code)
574		cqp->process_cqp_sds = i40iw_update_sds_noccq;
575	return ret_code;
576}
577
578/**
579 * i40iw_sc_cqp_post_sq - post of cqp's sq
580 * @cqp: struct for cqp hw
581 */
582void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
583{
584	if (cqp->dev->is_pf)
585		i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
586	else
587		i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
588
589	i40iw_debug(cqp->dev,
590		    I40IW_DEBUG_WQE,
591		    "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
592		    __func__,
593		    cqp->sq_ring.head,
594		    cqp->sq_ring.tail,
595		    cqp->sq_ring.size);
596}
597
598/**
599 * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
600 * @cqp: pointer to CQP structure
601 * @scratch: private data for CQP WQE
602 * @wqe_idx: WQE index for next WQE on CQP SQ
603 */
604static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
605					       u64 scratch, u32 *wqe_idx)
606{
607	u64 *wqe = NULL;
608	enum i40iw_status_code ret_code;
609
610	if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
611		i40iw_debug(cqp->dev,
612			    I40IW_DEBUG_WQE,
613			    "%s: ring is full head %x tail %x size %x\n",
614			    __func__,
615			    cqp->sq_ring.head,
616			    cqp->sq_ring.tail,
617			    cqp->sq_ring.size);
618		return NULL;
619	}
620	I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
621	cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
622	if (ret_code)
623		return NULL;
624	if (!*wqe_idx)
625		cqp->polarity = !cqp->polarity;
626
627	wqe = cqp->sq_base[*wqe_idx].elem;
628	cqp->scratch_array[*wqe_idx] = scratch;
629	I40IW_CQP_INIT_WQE(wqe);
630
631	return wqe;
632}
633
634/**
635 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
636 * @cqp: struct for cqp hw
637 * @scratch: private data for CQP WQE
638 */
639u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
640{
641	u32 wqe_idx;
642
643	return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
644}
645
646/**
647 * i40iw_sc_cqp_destroy - destroy cqp during close
648 * @cqp: struct for cqp hw
649 */
650static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
651{
652	u32 cnt = 0, val = 1;
653	enum i40iw_status_code ret_code = 0;
654	u32 cqpstat_addr;
655
656	if (cqp->dev->is_pf) {
657		i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
658		i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
659		cqpstat_addr = I40E_PFPE_CCQPSTATUS;
660	} else {
661		i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
662		i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
663		cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
664	}
665	do {
666		if (cnt++ > I40IW_DONE_COUNT) {
667			ret_code = I40IW_ERR_TIMEOUT;
668			break;
669		}
670		udelay(I40IW_SLEEP_COUNT);
671		val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
672	} while (val);
673
674	i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
675	return ret_code;
676}
677
678/**
679 * i40iw_sc_ccq_arm - enable intr for control cq
680 * @ccq: ccq sc struct
681 */
682static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
683{
684	u64 temp_val;
685	u16 sw_cq_sel;
686	u8 arm_next_se;
687	u8 arm_seq_num;
688
689	/* write to cq doorbell shadow area */
690	/* arm next se should always be zero */
691	get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
692
693	sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
694	arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
695
696	arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
697	arm_seq_num++;
698
699	temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
700		   LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
701		   LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
702		   LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
703
704	set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
705
706	wmb();       /* make sure shadow area is updated before arming */
707
708	if (ccq->dev->is_pf)
709		i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
710	else
711		i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
712}
713
714/**
715 * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
716 * @ccq: ccq sc struct
717 * @info: completion q entry to return
718 */
719static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
720					struct i40iw_sc_cq *ccq,
721					struct i40iw_ccq_cqe_info *info)
722{
723	u64 qp_ctx, temp, temp1;
724	u64 *cqe;
725	struct i40iw_sc_cqp *cqp;
726	u32 wqe_idx;
727	u8 polarity;
728	enum i40iw_status_code ret_code = 0;
729
730	if (ccq->cq_uk.avoid_mem_cflct)
731		cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
732	else
733		cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
734
735	get_64bit_val(cqe, 24, &temp);
736	polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
737	if (polarity != ccq->cq_uk.polarity)
738		return I40IW_ERR_QUEUE_EMPTY;
739
740	get_64bit_val(cqe, 8, &qp_ctx);
741	cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
742	info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
743	info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
744	if (info->error) {
745		info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
746		info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
747	}
748	wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
749	info->scratch = cqp->scratch_array[wqe_idx];
750
751	get_64bit_val(cqe, 16, &temp1);
752	info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
753	get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
754	info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
755	info->cqp = cqp;
756
757	/*  move the head for cq */
758	I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
759	if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
760		ccq->cq_uk.polarity ^= 1;
761
762	/* update cq tail in cq shadow memory also */
763	I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
764	set_64bit_val(ccq->cq_uk.shadow_area,
765		      0,
766		      I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
767	wmb(); /* write shadow area before tail */
768	I40IW_RING_MOVE_TAIL(cqp->sq_ring);
769	ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
770
771	return ret_code;
772}
773
774/**
775 * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
776 * @cqp: struct for cqp hw
777 * @op_code: cqp opcode for completion
778 * @info: completion q entry to return
779 */
780static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
781					struct i40iw_sc_cqp *cqp,
782					u8 op_code,
783					struct i40iw_ccq_cqe_info *compl_info)
784{
785	struct i40iw_ccq_cqe_info info;
786	struct i40iw_sc_cq *ccq;
787	enum i40iw_status_code ret_code = 0;
788	u32 cnt = 0;
789
790	memset(&info, 0, sizeof(info));
791	ccq = cqp->dev->ccq;
792	while (1) {
793		if (cnt++ > I40IW_DONE_COUNT)
794			return I40IW_ERR_TIMEOUT;
795
796		if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
797			udelay(I40IW_SLEEP_COUNT);
798			continue;
799		}
800
801		if (info.error) {
802			ret_code = I40IW_ERR_CQP_COMPL_ERROR;
803			break;
804		}
805		/* check if opcode is cq create */
806		if (op_code != info.op_code) {
807			i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
808				    "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
809				    __func__, op_code, info.op_code);
810		}
811		/* success, exit out of the loop */
812		if (op_code == info.op_code)
813			break;
814	}
815
816	if (compl_info)
817		memcpy(compl_info, &info, sizeof(*compl_info));
818
819	return ret_code;
820}
821
822/**
823 * i40iw_sc_manage_push_page - Handle push page
824 * @cqp: struct for cqp hw
825 * @info: push page info
826 * @scratch: u64 saved to be used during cqp completion
827 * @post_sq: flag for cqp db to ring
828 */
829static enum i40iw_status_code i40iw_sc_manage_push_page(
830				struct i40iw_sc_cqp *cqp,
831				struct i40iw_cqp_manage_push_page_info *info,
832				u64 scratch,
833				bool post_sq)
834{
835	u64 *wqe;
836	u64 header;
837
838	if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
839		return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
840
841	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
842	if (!wqe)
843		return I40IW_ERR_RING_FULL;
844
845	set_64bit_val(wqe, 16, info->qs_handle);
846
847	header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
848		 LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
849		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
850		 LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
851
852	i40iw_insert_wqe_hdr(wqe, header);
853
854	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
855			wqe, I40IW_CQP_WQE_SIZE * 8);
856
857	if (post_sq)
858		i40iw_sc_cqp_post_sq(cqp);
859	return 0;
860}
861
862/**
863 * i40iw_sc_manage_hmc_pm_func_table - manage of function table
864 * @cqp: struct for cqp hw
865 * @scratch: u64 saved to be used during cqp completion
866 * @vf_index: vf index for cqp
867 * @free_pm_fcn: function number
868 * @post_sq: flag for cqp db to ring
869 */
870static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
871				struct i40iw_sc_cqp *cqp,
872				u64 scratch,
873				u8 vf_index,
874				bool free_pm_fcn,
875				bool post_sq)
876{
877	u64 *wqe;
878	u64 header;
879
880	if (vf_index >= I40IW_MAX_VF_PER_PF)
881		return I40IW_ERR_INVALID_VF_ID;
882	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
883	if (!wqe)
884		return I40IW_ERR_RING_FULL;
885
886	header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
887		 LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
888		 LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
889		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
890
891	i40iw_insert_wqe_hdr(wqe, header);
892	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
893			wqe, I40IW_CQP_WQE_SIZE * 8);
894	if (post_sq)
895		i40iw_sc_cqp_post_sq(cqp);
896	return 0;
897}
898
899/**
900 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
901 * @cqp: struct for cqp hw
902 * @scratch: u64 saved to be used during cqp completion
903 * @hmc_profile_type: type of profile to set
904 * @vf_num: vf number for profile
905 * @post_sq: flag for cqp db to ring
906 * @poll_registers: flag to poll register for cqp completion
907 */
908static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
909				struct i40iw_sc_cqp *cqp,
910				u64 scratch,
911				u8 hmc_profile_type,
912				u8 vf_num, bool post_sq,
913				bool poll_registers)
914{
915	u64 *wqe;
916	u64 header;
917	u32 val, tail, error;
918	enum i40iw_status_code ret_code = 0;
919
920	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
921	if (!wqe)
922		return I40IW_ERR_RING_FULL;
923
924	set_64bit_val(wqe, 16,
925		      (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
926				LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
927
928	header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
929		       LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
930
931	i40iw_insert_wqe_hdr(wqe, header);
932
933	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
934			wqe, I40IW_CQP_WQE_SIZE * 8);
935
936	i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
937	if (error)
938		return I40IW_ERR_CQP_COMPL_ERROR;
939
940	if (post_sq) {
941		i40iw_sc_cqp_post_sq(cqp);
942		if (poll_registers)
943			ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
944		else
945			ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
946								 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
947								 NULL);
948	}
949
950	return ret_code;
951}
952
953/**
954 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
955 * @cqp: struct for cqp hw
956 */
957static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
958{
959	return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
960}
961
962/**
963 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
964 * @cqp: struct for cqp hw
965 */
966static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
967{
968	return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
969}
970
971/**
972 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
973 * @cqp: struct for cqp hw
974 * @scratch: u64 saved to be used during cqp completion
975 * @hmc_fn_id: hmc function id
976 * @commit_fpm_mem; Memory for fpm values
977 * @post_sq: flag for cqp db to ring
978 * @wait_type: poll ccq or cqp registers for cqp completion
979 */
980static enum i40iw_status_code i40iw_sc_commit_fpm_values(
981					struct i40iw_sc_cqp *cqp,
982					u64 scratch,
983					u8 hmc_fn_id,
984					struct i40iw_dma_mem *commit_fpm_mem,
985					bool post_sq,
986					u8 wait_type)
987{
988	u64 *wqe;
989	u64 header;
990	u32 tail, val, error;
991	enum i40iw_status_code ret_code = 0;
992
993	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
994	if (!wqe)
995		return I40IW_ERR_RING_FULL;
996
997	set_64bit_val(wqe, 16, hmc_fn_id);
998	set_64bit_val(wqe, 32, commit_fpm_mem->pa);
999
1000	header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
1001		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1002
1003	i40iw_insert_wqe_hdr(wqe, header);
1004
1005	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
1006			wqe, I40IW_CQP_WQE_SIZE * 8);
1007
1008	i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1009	if (error)
1010		return I40IW_ERR_CQP_COMPL_ERROR;
1011
1012	if (post_sq) {
1013		i40iw_sc_cqp_post_sq(cqp);
1014
1015		if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1016			ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1017		else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1018			ret_code = i40iw_sc_commit_fpm_values_done(cqp);
1019	}
1020
1021	return ret_code;
1022}
1023
1024/**
1025 * i40iw_sc_query_rdma_features_done - poll cqp for query features done
1026 * @cqp: struct for cqp hw
1027 */
1028static enum i40iw_status_code
1029i40iw_sc_query_rdma_features_done(struct i40iw_sc_cqp *cqp)
1030{
1031	return i40iw_sc_poll_for_cqp_op_done(
1032		cqp, I40IW_CQP_OP_QUERY_RDMA_FEATURES, NULL);
1033}
1034
1035/**
1036 * i40iw_sc_query_rdma_features - query rdma features
1037 * @cqp: struct for cqp hw
1038 * @feat_mem: holds PA for HW to use
1039 * @scratch: u64 saved to be used during cqp completion
1040 */
1041static enum i40iw_status_code
1042i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
1043			     struct i40iw_dma_mem *feat_mem, u64 scratch)
1044{
1045	u64 *wqe;
1046	u64 header;
1047
1048	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1049	if (!wqe)
1050		return I40IW_ERR_RING_FULL;
1051
1052	set_64bit_val(wqe, 32, feat_mem->pa);
1053
1054	header = LS_64(I40IW_CQP_OP_QUERY_RDMA_FEATURES, I40IW_CQPSQ_OPCODE) |
1055		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | feat_mem->size;
1056
1057	i40iw_insert_wqe_hdr(wqe, header);
1058
1059	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY RDMA FEATURES WQE",
1060			wqe, I40IW_CQP_WQE_SIZE * 8);
1061
1062	i40iw_sc_cqp_post_sq(cqp);
1063
1064	return 0;
1065}
1066
1067/**
1068 * i40iw_get_rdma_features - get RDMA features
1069 * @dev - sc device struct
1070 */
1071enum i40iw_status_code i40iw_get_rdma_features(struct i40iw_sc_dev *dev)
1072{
1073	enum i40iw_status_code ret_code;
1074	struct i40iw_dma_mem feat_buf;
1075	u64 temp;
1076	u16 byte_idx, feat_type, feat_cnt;
1077
1078	ret_code = i40iw_allocate_dma_mem(dev->hw, &feat_buf,
1079					  I40IW_FEATURE_BUF_SIZE,
1080					  I40IW_FEATURE_BUF_ALIGNMENT);
1081
1082	if (ret_code)
1083		return I40IW_ERR_NO_MEMORY;
1084
1085	ret_code = i40iw_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
1086	if (!ret_code)
1087		ret_code = i40iw_sc_query_rdma_features_done(dev->cqp);
1088
1089	if (ret_code)
1090		goto exit;
1091
1092	get_64bit_val(feat_buf.va, 0, &temp);
1093	feat_cnt = RS_64(temp, I40IW_FEATURE_CNT);
1094	if (feat_cnt < I40IW_MAX_FEATURES) {
1095		ret_code = I40IW_ERR_INVALID_FEAT_CNT;
1096		goto exit;
1097	} else if (feat_cnt > I40IW_MAX_FEATURES) {
1098		i40iw_debug(dev, I40IW_DEBUG_CQP,
1099			    "features buf size insufficient\n");
1100	}
1101
1102	for (byte_idx = 0, feat_type = 0; feat_type < I40IW_MAX_FEATURES;
1103	     feat_type++, byte_idx += 8) {
1104		get_64bit_val((u64 *)feat_buf.va, byte_idx, &temp);
1105		dev->feature_info[feat_type] = RS_64(temp, I40IW_FEATURE_INFO);
1106	}
1107exit:
1108	i40iw_free_dma_mem(dev->hw, &feat_buf);
1109
1110	return ret_code;
1111}
1112
1113/**
1114 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
1115 * @cqp: struct for cqp hw
1116 */
1117static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
1118{
1119	return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
1120}
1121
1122/**
1123 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
1124 * @cqp: struct for cqp hw
1125 * @scratch: u64 saved to be used during cqp completion
1126 * @hmc_fn_id: hmc function id
1127 * @query_fpm_mem: memory for return fpm values
1128 * @post_sq: flag for cqp db to ring
1129 * @wait_type: poll ccq or cqp registers for cqp completion
1130 */
1131static enum i40iw_status_code i40iw_sc_query_fpm_values(
1132					struct i40iw_sc_cqp *cqp,
1133					u64 scratch,
1134					u8 hmc_fn_id,
1135					struct i40iw_dma_mem *query_fpm_mem,
1136					bool post_sq,
1137					u8 wait_type)
1138{
1139	u64 *wqe;
1140	u64 header;
1141	u32 tail, val, error;
1142	enum i40iw_status_code ret_code = 0;
1143
1144	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1145	if (!wqe)
1146		return I40IW_ERR_RING_FULL;
1147
1148	set_64bit_val(wqe, 16, hmc_fn_id);
1149	set_64bit_val(wqe, 32, query_fpm_mem->pa);
1150
1151	header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
1152		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1153
1154	i40iw_insert_wqe_hdr(wqe, header);
1155
1156	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
1157			wqe, I40IW_CQP_WQE_SIZE * 8);
1158
1159	/* read the tail from CQP_TAIL register */
1160	i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1161
1162	if (error)
1163		return I40IW_ERR_CQP_COMPL_ERROR;
1164
1165	if (post_sq) {
1166		i40iw_sc_cqp_post_sq(cqp);
1167		if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1168			ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1169		else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1170			ret_code = i40iw_sc_query_fpm_values_done(cqp);
1171	}
1172
1173	return ret_code;
1174}
1175
1176/**
1177 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1178 * @cqp: struct for cqp hw
1179 * @info: arp entry information
1180 * @scratch: u64 saved to be used during cqp completion
1181 * @post_sq: flag for cqp db to ring
1182 */
1183static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
1184				struct i40iw_sc_cqp *cqp,
1185				struct i40iw_add_arp_cache_entry_info *info,
1186				u64 scratch,
1187				bool post_sq)
1188{
1189	u64 *wqe;
1190	u64 temp, header;
1191
1192	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1193	if (!wqe)
1194		return I40IW_ERR_RING_FULL;
1195	set_64bit_val(wqe, 8, info->reach_max);
1196
1197	temp = info->mac_addr[5] |
1198	       LS_64_1(info->mac_addr[4], 8) |
1199	       LS_64_1(info->mac_addr[3], 16) |
1200	       LS_64_1(info->mac_addr[2], 24) |
1201	       LS_64_1(info->mac_addr[1], 32) |
1202	       LS_64_1(info->mac_addr[0], 40);
1203
1204	set_64bit_val(wqe, 16, temp);
1205
1206	header = info->arp_index |
1207		 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1208		 LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
1209		 LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
1210		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1211
1212	i40iw_insert_wqe_hdr(wqe, header);
1213
1214	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
1215			wqe, I40IW_CQP_WQE_SIZE * 8);
1216
1217	if (post_sq)
1218		i40iw_sc_cqp_post_sq(cqp);
1219	return 0;
1220}
1221
1222/**
1223 * i40iw_sc_del_arp_cache_entry - dele arp cache entry
1224 * @cqp: struct for cqp hw
1225 * @scratch: u64 saved to be used during cqp completion
1226 * @arp_index: arp index to delete arp entry
1227 * @post_sq: flag for cqp db to ring
1228 */
1229static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
1230					struct i40iw_sc_cqp *cqp,
1231					u64 scratch,
1232					u16 arp_index,
1233					bool post_sq)
1234{
1235	u64 *wqe;
1236	u64 header;
1237
1238	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1239	if (!wqe)
1240		return I40IW_ERR_RING_FULL;
1241
1242	header = arp_index |
1243		 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1244		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1245	i40iw_insert_wqe_hdr(wqe, header);
1246
1247	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
1248			wqe, I40IW_CQP_WQE_SIZE * 8);
1249
1250	if (post_sq)
1251		i40iw_sc_cqp_post_sq(cqp);
1252	return 0;
1253}
1254
1255/**
1256 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1257 * @cqp: struct for cqp hw
1258 * @scratch: u64 saved to be used during cqp completion
1259 * @arp_index: arp index to delete arp entry
1260 * @post_sq: flag for cqp db to ring
1261 */
1262static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
1263				struct i40iw_sc_cqp *cqp,
1264				u64 scratch,
1265				u16 arp_index,
1266				bool post_sq)
1267{
1268	u64 *wqe;
1269	u64 header;
1270
1271	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1272	if (!wqe)
1273		return I40IW_ERR_RING_FULL;
1274
1275	header = arp_index |
1276		 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1277		 LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
1278		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1279
1280	i40iw_insert_wqe_hdr(wqe, header);
1281
1282	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
1283			wqe, I40IW_CQP_WQE_SIZE * 8);
1284
1285	if (post_sq)
1286		i40iw_sc_cqp_post_sq(cqp);
1287	return 0;
1288}
1289
1290/**
1291 * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
1292 * @cqp: struct for cqp hw
1293 * @info: info for apbvt entry to add or delete
1294 * @scratch: u64 saved to be used during cqp completion
1295 * @post_sq: flag for cqp db to ring
1296 */
1297static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
1298				struct i40iw_sc_cqp *cqp,
1299				struct i40iw_apbvt_info *info,
1300				u64 scratch,
1301				bool post_sq)
1302{
1303	u64 *wqe;
1304	u64 header;
1305
1306	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1307	if (!wqe)
1308		return I40IW_ERR_RING_FULL;
1309
1310	set_64bit_val(wqe, 16, info->port);
1311
1312	header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
1313		 LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
1314		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1315
1316	i40iw_insert_wqe_hdr(wqe, header);
1317
1318	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1319			wqe, I40IW_CQP_WQE_SIZE * 8);
1320
1321	if (post_sq)
1322		i40iw_sc_cqp_post_sq(cqp);
1323	return 0;
1324}
1325
1326/**
1327 * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1328 * @cqp: struct for cqp hw
1329 * @info: info for quad hash to manage
1330 * @scratch: u64 saved to be used during cqp completion
1331 * @post_sq: flag for cqp db to ring
1332 *
1333 * This is called before connection establishment is started. For passive connections, when
1334 * listener is created, it will call with entry type of  I40IW_QHASH_TYPE_TCP_SYN with local
1335 * ip address and tcp port. When SYN is received (passive connections) or
1336 * sent (active connections), this routine is called with entry type of
1337 * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1338 *
1339 * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1340 * the hardware will point to iwarp's qp number and requires no calls from the driver.
1341 */
1342static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
1343					struct i40iw_sc_cqp *cqp,
1344					struct i40iw_qhash_table_info *info,
1345					u64 scratch,
1346					bool post_sq)
1347{
1348	u64 *wqe;
1349	u64 qw1 = 0;
1350	u64 qw2 = 0;
1351	u64 temp;
1352	struct i40iw_sc_vsi *vsi = info->vsi;
1353
1354	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1355	if (!wqe)
1356		return I40IW_ERR_RING_FULL;
1357
1358	temp = info->mac_addr[5] |
1359		LS_64_1(info->mac_addr[4], 8) |
1360		LS_64_1(info->mac_addr[3], 16) |
1361		LS_64_1(info->mac_addr[2], 24) |
1362		LS_64_1(info->mac_addr[1], 32) |
1363		LS_64_1(info->mac_addr[0], 40);
1364
1365	set_64bit_val(wqe, 0, temp);
1366
1367	qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
1368	      LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
1369	if (info->ipv4_valid) {
1370		set_64bit_val(wqe,
1371			      48,
1372			      LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1373	} else {
1374		set_64bit_val(wqe,
1375			      56,
1376			      LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1377			      LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1378
1379		set_64bit_val(wqe,
1380			      48,
1381			      LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1382			      LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1383	}
1384	qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
1385	if (info->vlan_valid)
1386		qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
1387	set_64bit_val(wqe, 16, qw2);
1388	if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
1389		qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
1390		if (!info->ipv4_valid) {
1391			set_64bit_val(wqe,
1392				      40,
1393				      LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1394				      LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1395			set_64bit_val(wqe,
1396				      32,
1397				      LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1398				      LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1399		} else {
1400			set_64bit_val(wqe,
1401				      32,
1402				      LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1403		}
1404	}
1405
1406	set_64bit_val(wqe, 8, qw1);
1407	temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1408	       LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
1409	       LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
1410	       LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
1411	       LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
1412	       LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
1413
1414	i40iw_insert_wqe_hdr(wqe, temp);
1415
1416	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1417			wqe, I40IW_CQP_WQE_SIZE * 8);
1418
1419	if (post_sq)
1420		i40iw_sc_cqp_post_sq(cqp);
1421	return 0;
1422}
1423
1424/**
1425 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1426 * @cqp: struct for cqp hw
1427 * @scratch: u64 saved to be used during cqp completion
1428 * @post_sq: flag for cqp db to ring
1429 */
1430static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
1431					struct i40iw_sc_cqp *cqp,
1432					u64 scratch,
1433					bool post_sq)
1434{
1435	u64 *wqe;
1436	u64 header;
1437
1438	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1439	if (!wqe)
1440		return I40IW_ERR_RING_FULL;
1441	header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
1442		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1443
1444	i40iw_insert_wqe_hdr(wqe, header);
1445	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1446			wqe, I40IW_CQP_WQE_SIZE * 8);
1447	if (post_sq)
1448		i40iw_sc_cqp_post_sq(cqp);
1449	return 0;
1450}
1451
1452/**
1453 * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1454 * @cqp: struct for cqp hw
1455 * @info:mac addr info
1456 * @scratch: u64 saved to be used during cqp completion
1457 * @post_sq: flag for cqp db to ring
1458 */
1459static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
1460				struct i40iw_sc_cqp *cqp,
1461				struct i40iw_local_mac_ipaddr_entry_info *info,
1462				u64 scratch,
1463				bool post_sq)
1464{
1465	u64 *wqe;
1466	u64 temp, header;
1467
1468	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1469	if (!wqe)
1470		return I40IW_ERR_RING_FULL;
1471	temp = info->mac_addr[5] |
1472		LS_64_1(info->mac_addr[4], 8) |
1473		LS_64_1(info->mac_addr[3], 16) |
1474		LS_64_1(info->mac_addr[2], 24) |
1475		LS_64_1(info->mac_addr[1], 32) |
1476		LS_64_1(info->mac_addr[0], 40);
1477
1478	set_64bit_val(wqe, 32, temp);
1479
1480	header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1481		 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1482		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1483
1484	i40iw_insert_wqe_hdr(wqe, header);
1485
1486	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1487			wqe, I40IW_CQP_WQE_SIZE * 8);
1488
1489	if (post_sq)
1490		i40iw_sc_cqp_post_sq(cqp);
1491	return 0;
1492}
1493
1494/**
1495 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1496 * @cqp: struct for cqp hw
1497 * @scratch: u64 saved to be used during cqp completion
1498 * @entry_idx: index of mac entry
1499 * @ ignore_ref_count: to force mac adde delete
1500 * @post_sq: flag for cqp db to ring
1501 */
1502static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
1503				struct i40iw_sc_cqp *cqp,
1504				u64 scratch,
1505				u8 entry_idx,
1506				u8 ignore_ref_count,
1507				bool post_sq)
1508{
1509	u64 *wqe;
1510	u64 header;
1511
1512	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1513	if (!wqe)
1514		return I40IW_ERR_RING_FULL;
1515	header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1516		 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1517		 LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
1518		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1519		 LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
1520
1521	i40iw_insert_wqe_hdr(wqe, header);
1522
1523	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1524			wqe, I40IW_CQP_WQE_SIZE * 8);
1525
1526	if (post_sq)
1527		i40iw_sc_cqp_post_sq(cqp);
1528	return 0;
1529}
1530
1531/**
1532 * i40iw_sc_cqp_nop - send a nop wqe
1533 * @cqp: struct for cqp hw
1534 * @scratch: u64 saved to be used during cqp completion
1535 * @post_sq: flag for cqp db to ring
1536 */
1537static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1538					       u64 scratch,
1539					       bool post_sq)
1540{
1541	u64 *wqe;
1542	u64 header;
1543
1544	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1545	if (!wqe)
1546		return I40IW_ERR_RING_FULL;
1547	header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
1548		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1549	i40iw_insert_wqe_hdr(wqe, header);
1550	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1551			wqe, I40IW_CQP_WQE_SIZE * 8);
1552
1553	if (post_sq)
1554		i40iw_sc_cqp_post_sq(cqp);
1555	return 0;
1556}
1557
1558/**
1559 * i40iw_sc_ceq_init - initialize ceq
1560 * @ceq: ceq sc structure
1561 * @info: ceq initialization info
1562 */
1563static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
1564						struct i40iw_ceq_init_info *info)
1565{
1566	u32 pble_obj_cnt;
1567
1568	if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
1569	    (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
1570		return I40IW_ERR_INVALID_SIZE;
1571
1572	if (info->ceq_id >= I40IW_MAX_CEQID)
1573		return I40IW_ERR_INVALID_CEQ_ID;
1574
1575	pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1576
1577	if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1578		return I40IW_ERR_INVALID_PBLE_INDEX;
1579
1580	ceq->size = sizeof(*ceq);
1581	ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
1582	ceq->ceq_id = info->ceq_id;
1583	ceq->dev = info->dev;
1584	ceq->elem_cnt = info->elem_cnt;
1585	ceq->ceq_elem_pa = info->ceqe_pa;
1586	ceq->virtual_map = info->virtual_map;
1587
1588	ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
1589	ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
1590	ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
1591
1592	ceq->tph_en = info->tph_en;
1593	ceq->tph_val = info->tph_val;
1594	ceq->polarity = 1;
1595	I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
1596	ceq->dev->ceq[info->ceq_id] = ceq;
1597
1598	return 0;
1599}
1600
1601/**
1602 * i40iw_sc_ceq_create - create ceq wqe
1603 * @ceq: ceq sc structure
1604 * @scratch: u64 saved to be used during cqp completion
1605 * @post_sq: flag for cqp db to ring
1606 */
1607static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
1608						  u64 scratch,
1609						  bool post_sq)
1610{
1611	struct i40iw_sc_cqp *cqp;
1612	u64 *wqe;
1613	u64 header;
1614
1615	cqp = ceq->dev->cqp;
1616	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1617	if (!wqe)
1618		return I40IW_ERR_RING_FULL;
1619	set_64bit_val(wqe, 16, ceq->elem_cnt);
1620	set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
1621	set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
1622	set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
1623
1624	header = ceq->ceq_id |
1625		 LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
1626		 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1627		 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1628		 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1629		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1630
1631	i40iw_insert_wqe_hdr(wqe, header);
1632
1633	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1634			wqe, I40IW_CQP_WQE_SIZE * 8);
1635
1636	if (post_sq)
1637		i40iw_sc_cqp_post_sq(cqp);
1638	return 0;
1639}
1640
1641/**
1642 * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1643 * @ceq: ceq sc structure
1644 */
1645static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
1646{
1647	struct i40iw_sc_cqp *cqp;
1648
1649	cqp = ceq->dev->cqp;
1650	return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1651}
1652
1653/**
1654 * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1655 * @ceq: ceq sc structure
1656 */
1657static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
1658{
1659	struct i40iw_sc_cqp *cqp;
1660
1661	cqp = ceq->dev->cqp;
1662	cqp->process_cqp_sds = i40iw_update_sds_noccq;
1663	return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1664}
1665
1666/**
1667 * i40iw_sc_cceq_create - create cceq
1668 * @ceq: ceq sc structure
1669 * @scratch: u64 saved to be used during cqp completion
1670 */
1671static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
1672{
1673	enum i40iw_status_code ret_code;
1674
1675	ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
1676	if (!ret_code)
1677		ret_code = i40iw_sc_cceq_create_done(ceq);
1678	return ret_code;
1679}
1680
1681/**
1682 * i40iw_sc_ceq_destroy - destroy ceq
1683 * @ceq: ceq sc structure
1684 * @scratch: u64 saved to be used during cqp completion
1685 * @post_sq: flag for cqp db to ring
1686 */
1687static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
1688						   u64 scratch,
1689						   bool post_sq)
1690{
1691	struct i40iw_sc_cqp *cqp;
1692	u64 *wqe;
1693	u64 header;
1694
1695	cqp = ceq->dev->cqp;
1696	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1697	if (!wqe)
1698		return I40IW_ERR_RING_FULL;
1699	set_64bit_val(wqe, 16, ceq->elem_cnt);
1700	set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
1701	header = ceq->ceq_id |
1702		 LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
1703		 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1704		 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1705		 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1706		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1707	i40iw_insert_wqe_hdr(wqe, header);
1708	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1709			wqe, I40IW_CQP_WQE_SIZE * 8);
1710
1711	if (post_sq)
1712		i40iw_sc_cqp_post_sq(cqp);
1713	return 0;
1714}
1715
1716/**
1717 * i40iw_sc_process_ceq - process ceq
1718 * @dev: sc device struct
1719 * @ceq: ceq sc structure
1720 */
1721static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
1722{
1723	u64 temp;
1724	u64 *ceqe;
1725	struct i40iw_sc_cq *cq = NULL;
1726	u8 polarity;
1727
1728	ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
1729	get_64bit_val(ceqe, 0, &temp);
1730	polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
1731	if (polarity != ceq->polarity)
1732		return cq;
1733
1734	cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
1735
1736	I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
1737	if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
1738		ceq->polarity ^= 1;
1739
1740	if (dev->is_pf)
1741		i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
1742	else
1743		i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
1744
1745	return cq;
1746}
1747
1748/**
1749 * i40iw_sc_aeq_init - initialize aeq
1750 * @aeq: aeq structure ptr
1751 * @info: aeq initialization info
1752 */
1753static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
1754						struct i40iw_aeq_init_info *info)
1755{
1756	u32 pble_obj_cnt;
1757
1758	if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
1759	    (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
1760		return I40IW_ERR_INVALID_SIZE;
1761	pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1762
1763	if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1764		return I40IW_ERR_INVALID_PBLE_INDEX;
1765
1766	aeq->size = sizeof(*aeq);
1767	aeq->polarity = 1;
1768	aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
1769	aeq->dev = info->dev;
1770	aeq->elem_cnt = info->elem_cnt;
1771
1772	aeq->aeq_elem_pa = info->aeq_elem_pa;
1773	I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
1774	info->dev->aeq = aeq;
1775
1776	aeq->virtual_map = info->virtual_map;
1777	aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
1778	aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
1779	aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
1780	info->dev->aeq = aeq;
1781	return 0;
1782}
1783
1784/**
1785 * i40iw_sc_aeq_create - create aeq
1786 * @aeq: aeq structure ptr
1787 * @scratch: u64 saved to be used during cqp completion
1788 * @post_sq: flag for cqp db to ring
1789 */
1790static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
1791						  u64 scratch,
1792						  bool post_sq)
1793{
1794	u64 *wqe;
1795	struct i40iw_sc_cqp *cqp;
1796	u64 header;
1797
1798	cqp = aeq->dev->cqp;
1799	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1800	if (!wqe)
1801		return I40IW_ERR_RING_FULL;
1802	set_64bit_val(wqe, 16, aeq->elem_cnt);
1803	set_64bit_val(wqe, 32,
1804		      (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
1805	set_64bit_val(wqe, 48,
1806		      (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
1807
1808	header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
1809		 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1810		 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1811		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1812
1813	i40iw_insert_wqe_hdr(wqe, header);
1814	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1815			wqe, I40IW_CQP_WQE_SIZE * 8);
1816	if (post_sq)
1817		i40iw_sc_cqp_post_sq(cqp);
1818	return 0;
1819}
1820
1821/**
1822 * i40iw_sc_aeq_destroy - destroy aeq during close
1823 * @aeq: aeq structure ptr
1824 * @scratch: u64 saved to be used during cqp completion
1825 * @post_sq: flag for cqp db to ring
1826 */
1827static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
1828						   u64 scratch,
1829						   bool post_sq)
1830{
1831	u64 *wqe;
1832	struct i40iw_sc_cqp *cqp;
1833	u64 header;
1834
1835	cqp = aeq->dev->cqp;
1836	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1837	if (!wqe)
1838		return I40IW_ERR_RING_FULL;
1839	set_64bit_val(wqe, 16, aeq->elem_cnt);
1840	set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
1841	header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
1842		 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1843		 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1844		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1845	i40iw_insert_wqe_hdr(wqe, header);
1846
1847	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1848			wqe, I40IW_CQP_WQE_SIZE * 8);
1849	if (post_sq)
1850		i40iw_sc_cqp_post_sq(cqp);
1851	return 0;
1852}
1853
1854/**
1855 * i40iw_sc_get_next_aeqe - get next aeq entry
1856 * @aeq: aeq structure ptr
1857 * @info: aeqe info to be returned
1858 */
1859static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
1860						     struct i40iw_aeqe_info *info)
1861{
1862	u64 temp, compl_ctx;
1863	u64 *aeqe;
1864	u16 wqe_idx;
1865	u8 ae_src;
1866	u8 polarity;
1867
1868	aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
1869	get_64bit_val(aeqe, 0, &compl_ctx);
1870	get_64bit_val(aeqe, 8, &temp);
1871	polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
1872
1873	if (aeq->polarity != polarity)
1874		return I40IW_ERR_QUEUE_EMPTY;
1875
1876	i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
1877
1878	ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
1879	wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
1880	info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
1881	info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
1882	info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
1883	info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
1884	info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
1885	info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
1886
1887	switch (info->ae_id) {
1888	case I40IW_AE_PRIV_OPERATION_DENIED:
1889	case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
1890	case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
1891	case I40IW_AE_BAD_CLOSE:
1892	case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
1893	case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
1894	case I40IW_AE_STAG_ZERO_INVALID:
1895	case I40IW_AE_IB_RREQ_AND_Q1_FULL:
1896	case I40IW_AE_WQE_UNEXPECTED_OPCODE:
1897	case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
1898	case I40IW_AE_DDP_UBE_INVALID_MO:
1899	case I40IW_AE_DDP_UBE_INVALID_QN:
1900	case I40IW_AE_DDP_NO_L_BIT:
1901	case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
1902	case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
1903	case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
1904	case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
1905	case I40IW_AE_INVALID_ARP_ENTRY:
1906	case I40IW_AE_INVALID_TCP_OPTION_RCVD:
1907	case I40IW_AE_STALE_ARP_ENTRY:
1908	case I40IW_AE_LLP_CLOSE_COMPLETE:
1909	case I40IW_AE_LLP_CONNECTION_RESET:
1910	case I40IW_AE_LLP_FIN_RECEIVED:
1911	case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
1912	case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
1913	case I40IW_AE_LLP_SYN_RECEIVED:
1914	case I40IW_AE_LLP_TERMINATE_RECEIVED:
1915	case I40IW_AE_LLP_TOO_MANY_RETRIES:
1916	case I40IW_AE_LLP_DOUBT_REACHABILITY:
1917	case I40IW_AE_RESET_SENT:
1918	case I40IW_AE_TERMINATE_SENT:
1919	case I40IW_AE_RESET_NOT_SENT:
1920	case I40IW_AE_LCE_QP_CATASTROPHIC:
1921	case I40IW_AE_QP_SUSPEND_COMPLETE:
1922		info->qp = true;
1923		info->compl_ctx = compl_ctx;
1924		ae_src = I40IW_AE_SOURCE_RSVD;
1925		break;
1926	case I40IW_AE_LCE_CQ_CATASTROPHIC:
1927		info->cq = true;
1928		info->compl_ctx = LS_64_1(compl_ctx, 1);
1929		ae_src = I40IW_AE_SOURCE_RSVD;
1930		break;
1931	}
1932
1933	switch (ae_src) {
1934	case I40IW_AE_SOURCE_RQ:
1935	case I40IW_AE_SOURCE_RQ_0011:
1936		info->qp = true;
1937		info->wqe_idx = wqe_idx;
1938		info->compl_ctx = compl_ctx;
1939		break;
1940	case I40IW_AE_SOURCE_CQ:
1941	case I40IW_AE_SOURCE_CQ_0110:
1942	case I40IW_AE_SOURCE_CQ_1010:
1943	case I40IW_AE_SOURCE_CQ_1110:
1944		info->cq = true;
1945		info->compl_ctx = LS_64_1(compl_ctx, 1);
1946		break;
1947	case I40IW_AE_SOURCE_SQ:
1948	case I40IW_AE_SOURCE_SQ_0111:
1949		info->qp = true;
1950		info->sq = true;
1951		info->wqe_idx = wqe_idx;
1952		info->compl_ctx = compl_ctx;
1953		break;
1954	case I40IW_AE_SOURCE_IN_RR_WR:
1955	case I40IW_AE_SOURCE_IN_RR_WR_1011:
1956		info->qp = true;
1957		info->compl_ctx = compl_ctx;
1958		info->in_rdrsp_wr = true;
1959		break;
1960	case I40IW_AE_SOURCE_OUT_RR:
1961	case I40IW_AE_SOURCE_OUT_RR_1111:
1962		info->qp = true;
1963		info->compl_ctx = compl_ctx;
1964		info->out_rdrsp = true;
1965		break;
1966	case I40IW_AE_SOURCE_RSVD:
1967	default:
1968		break;
1969	}
1970	I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
1971	if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
1972		aeq->polarity ^= 1;
1973	return 0;
1974}
1975
1976/**
1977 * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1978 * @dev: sc device struct
1979 * @count: allocate count
1980 */
1981static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
1982							  u32 count)
1983{
1984
1985	if (dev->is_pf)
1986		i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
1987	else
1988		i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
1989
1990	return 0;
1991}
1992
1993/**
1994 * i40iw_sc_aeq_create_done - create aeq
1995 * @aeq: aeq structure ptr
1996 */
1997static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
1998{
1999	struct i40iw_sc_cqp *cqp;
2000
2001	cqp = aeq->dev->cqp;
2002	return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
2003}
2004
2005/**
2006 * i40iw_sc_aeq_destroy_done - destroy of aeq during close
2007 * @aeq: aeq structure ptr
2008 */
2009static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
2010{
2011	struct i40iw_sc_cqp *cqp;
2012
2013	cqp = aeq->dev->cqp;
2014	return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
2015}
2016
2017/**
2018 * i40iw_sc_ccq_init - initialize control cq
2019 * @cq: sc's cq ctruct
2020 * @info: info for control cq initialization
2021 */
2022static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
2023						struct i40iw_ccq_init_info *info)
2024{
2025	u32 pble_obj_cnt;
2026
2027	if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
2028		return I40IW_ERR_INVALID_SIZE;
2029
2030	if (info->ceq_id > I40IW_MAX_CEQID)
2031		return I40IW_ERR_INVALID_CEQ_ID;
2032
2033	pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2034
2035	if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
2036		return I40IW_ERR_INVALID_PBLE_INDEX;
2037
2038	cq->cq_pa = info->cq_pa;
2039	cq->cq_uk.cq_base = info->cq_base;
2040	cq->shadow_area_pa = info->shadow_area_pa;
2041	cq->cq_uk.shadow_area = info->shadow_area;
2042	cq->shadow_read_threshold = info->shadow_read_threshold;
2043	cq->dev = info->dev;
2044	cq->ceq_id = info->ceq_id;
2045	cq->cq_uk.cq_size = info->num_elem;
2046	cq->cq_type = I40IW_CQ_TYPE_CQP;
2047	cq->ceqe_mask = info->ceqe_mask;
2048	I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
2049
2050	cq->cq_uk.cq_id = 0;    /* control cq is id 0 always */
2051	cq->ceq_id_valid = info->ceq_id_valid;
2052	cq->tph_en = info->tph_en;
2053	cq->tph_val = info->tph_val;
2054	cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
2055
2056	cq->pbl_list = info->pbl_list;
2057	cq->virtual_map = info->virtual_map;
2058	cq->pbl_chunk_size = info->pbl_chunk_size;
2059	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2060	cq->cq_uk.polarity = true;
2061
2062	/* following are only for iw cqs so initialize them to zero */
2063	cq->cq_uk.cqe_alloc_reg = NULL;
2064	info->dev->ccq = cq;
2065	return 0;
2066}
2067
2068/**
2069 * i40iw_sc_ccq_create_done - poll cqp for ccq create
2070 * @ccq: ccq sc struct
2071 */
2072static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
2073{
2074	struct i40iw_sc_cqp *cqp;
2075
2076	cqp = ccq->dev->cqp;
2077	return	i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
2078}
2079
2080/**
2081 * i40iw_sc_ccq_create - create control cq
2082 * @ccq: ccq sc struct
2083 * @scratch: u64 saved to be used during cqp completion
2084 * @check_overflow: overlow flag for ccq
2085 * @post_sq: flag for cqp db to ring
2086 */
2087static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
2088						  u64 scratch,
2089						  bool check_overflow,
2090						  bool post_sq)
2091{
2092	u64 *wqe;
2093	struct i40iw_sc_cqp *cqp;
2094	u64 header;
2095	enum i40iw_status_code ret_code;
2096
2097	cqp = ccq->dev->cqp;
2098	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2099	if (!wqe)
2100		return I40IW_ERR_RING_FULL;
2101	set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
2102	set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
2103	set_64bit_val(wqe, 16,
2104		      LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2105	set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
2106	set_64bit_val(wqe, 40, ccq->shadow_area_pa);
2107	set_64bit_val(wqe, 48,
2108		      (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
2109	set_64bit_val(wqe, 56,
2110		      LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
2111
2112	header = ccq->cq_uk.cq_id |
2113		 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2114		 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2115		 LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2116		 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2117		 LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2118		 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2119		 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2120		 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
2121		 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2122		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2123
2124	i40iw_insert_wqe_hdr(wqe, header);
2125
2126	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
2127			wqe, I40IW_CQP_WQE_SIZE * 8);
2128
2129	if (post_sq) {
2130		i40iw_sc_cqp_post_sq(cqp);
2131		ret_code = i40iw_sc_ccq_create_done(ccq);
2132		if (ret_code)
2133			return ret_code;
2134	}
2135	cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
2136
2137	return 0;
2138}
2139
2140/**
2141 * i40iw_sc_ccq_destroy - destroy ccq during close
2142 * @ccq: ccq sc struct
2143 * @scratch: u64 saved to be used during cqp completion
2144 * @post_sq: flag for cqp db to ring
2145 */
2146static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
2147						   u64 scratch,
2148						   bool post_sq)
2149{
2150	struct i40iw_sc_cqp *cqp;
2151	u64 *wqe;
2152	u64 header;
2153	enum i40iw_status_code ret_code = 0;
2154	u32 tail, val, error;
2155
2156	cqp = ccq->dev->cqp;
2157	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2158	if (!wqe)
2159		return I40IW_ERR_RING_FULL;
2160	set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
2161	set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
2162	set_64bit_val(wqe, 40, ccq->shadow_area_pa);
2163
2164	header = ccq->cq_uk.cq_id |
2165		 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2166		 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2167		 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2168		 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2169		 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
2170		 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2171		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2172
2173	i40iw_insert_wqe_hdr(wqe, header);
2174
2175	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
2176			wqe, I40IW_CQP_WQE_SIZE * 8);
2177
2178	i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
2179	if (error)
2180		return I40IW_ERR_CQP_COMPL_ERROR;
2181
2182	if (post_sq) {
2183		i40iw_sc_cqp_post_sq(cqp);
2184		ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
2185	}
2186
2187	cqp->process_cqp_sds = i40iw_update_sds_noccq;
2188
2189	return ret_code;
2190}
2191
2192/**
2193 * i40iw_sc_cq_init - initialize completion q
2194 * @cq: cq struct
2195 * @info: cq initialization info
2196 */
2197static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
2198					       struct i40iw_cq_init_info *info)
2199{
2200	u32 __iomem *cqe_alloc_reg = NULL;
2201	enum i40iw_status_code ret_code;
2202	u32 pble_obj_cnt;
2203	u32 arm_offset;
2204
2205	pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2206
2207	if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
2208		return I40IW_ERR_INVALID_PBLE_INDEX;
2209
2210	cq->cq_pa = info->cq_base_pa;
2211	cq->dev = info->dev;
2212	cq->ceq_id = info->ceq_id;
2213	arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
2214	if (i40iw_get_hw_addr(cq->dev))
2215		cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
2216					      arm_offset);
2217	info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
2218	ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
2219	if (ret_code)
2220		return ret_code;
2221	cq->virtual_map = info->virtual_map;
2222	cq->pbl_chunk_size = info->pbl_chunk_size;
2223	cq->ceqe_mask = info->ceqe_mask;
2224	cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
2225
2226	cq->shadow_area_pa = info->shadow_area_pa;
2227	cq->shadow_read_threshold = info->shadow_read_threshold;
2228
2229	cq->ceq_id_valid = info->ceq_id_valid;
2230	cq->tph_en = info->tph_en;
2231	cq->tph_val = info->tph_val;
2232
2233	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2234
2235	return 0;
2236}
2237
2238/**
2239 * i40iw_sc_cq_create - create completion q
2240 * @cq: cq struct
2241 * @scratch: u64 saved to be used during cqp completion
2242 * @check_overflow: flag for overflow check
2243 * @post_sq: flag for cqp db to ring
2244 */
2245static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
2246						 u64 scratch,
2247						 bool check_overflow,
2248						 bool post_sq)
2249{
2250	u64 *wqe;
2251	struct i40iw_sc_cqp *cqp;
2252	u64 header;
2253
2254	if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
2255		return I40IW_ERR_INVALID_CQ_ID;
2256
2257	if (cq->ceq_id > I40IW_MAX_CEQID)
2258		return I40IW_ERR_INVALID_CEQ_ID;
2259
2260	cqp = cq->dev->cqp;
2261	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2262	if (!wqe)
2263		return I40IW_ERR_RING_FULL;
2264
2265	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2266	set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2267	set_64bit_val(wqe,
2268		      16,
2269		      LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2270
2271	set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2272
2273	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2274	set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2275	set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2276
2277	header = cq->cq_uk.cq_id |
2278		 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2279		 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2280		 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2281		 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2282		 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2283		 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2284		 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2285		 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2286		 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2287		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2288
2289	i40iw_insert_wqe_hdr(wqe, header);
2290
2291	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
2292			wqe, I40IW_CQP_WQE_SIZE * 8);
2293
2294	if (post_sq)
2295		i40iw_sc_cqp_post_sq(cqp);
2296	return 0;
2297}
2298
2299/**
2300 * i40iw_sc_cq_destroy - destroy completion q
2301 * @cq: cq struct
2302 * @scratch: u64 saved to be used during cqp completion
2303 * @post_sq: flag for cqp db to ring
2304 */
2305static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
2306						  u64 scratch,
2307						  bool post_sq)
2308{
2309	struct i40iw_sc_cqp *cqp;
2310	u64 *wqe;
2311	u64 header;
2312
2313	cqp = cq->dev->cqp;
2314	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2315	if (!wqe)
2316		return I40IW_ERR_RING_FULL;
2317	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2318	set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2319	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2320	set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2321
2322	header = cq->cq_uk.cq_id |
2323		 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2324		 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2325		 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2326		 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2327		 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2328		 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2329		 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2330		 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2331		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2332
2333	i40iw_insert_wqe_hdr(wqe, header);
2334
2335	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
2336			wqe, I40IW_CQP_WQE_SIZE * 8);
2337
2338	if (post_sq)
2339		i40iw_sc_cqp_post_sq(cqp);
2340	return 0;
2341}
2342
2343/**
2344 * i40iw_sc_cq_modify - modify a Completion Queue
2345 * @cq: cq struct
2346 * @info: modification info struct
2347 * @scratch:
2348 * @post_sq: flag to post to sq
2349 */
2350static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
2351						 struct i40iw_modify_cq_info *info,
2352						 u64 scratch,
2353						 bool post_sq)
2354{
2355	struct i40iw_sc_cqp *cqp;
2356	u64 *wqe;
2357	u64 header;
2358	u32 cq_size, ceq_id, first_pm_pbl_idx;
2359	u8 pbl_chunk_size;
2360	bool virtual_map, ceq_id_valid, check_overflow;
2361	u32 pble_obj_cnt;
2362
2363	if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
2364		return I40IW_ERR_INVALID_CEQ_ID;
2365
2366	pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2367
2368	if (info->cq_resize && info->virtual_map &&
2369	    (info->first_pm_pbl_idx >= pble_obj_cnt))
2370		return I40IW_ERR_INVALID_PBLE_INDEX;
2371
2372	cqp = cq->dev->cqp;
2373	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2374	if (!wqe)
2375		return I40IW_ERR_RING_FULL;
2376
2377	cq->pbl_list = info->pbl_list;
2378	cq->cq_pa = info->cq_pa;
2379	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2380
2381	cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
2382	if (info->ceq_change) {
2383		ceq_id_valid = true;
2384		ceq_id = info->ceq_id;
2385	} else {
2386		ceq_id_valid = cq->ceq_id_valid;
2387		ceq_id = ceq_id_valid ? cq->ceq_id : 0;
2388	}
2389	virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
2390	first_pm_pbl_idx = (info->cq_resize ?
2391			    (info->virtual_map ? info->first_pm_pbl_idx : 0) :
2392			    (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2393	pbl_chunk_size = (info->cq_resize ?
2394			  (info->virtual_map ? info->pbl_chunk_size : 0) :
2395			  (cq->virtual_map ? cq->pbl_chunk_size : 0));
2396	check_overflow = info->check_overflow_change ? info->check_overflow :
2397			 cq->check_overflow;
2398	cq->cq_uk.cq_size = cq_size;
2399	cq->ceq_id_valid = ceq_id_valid;
2400	cq->ceq_id = ceq_id;
2401	cq->virtual_map = virtual_map;
2402	cq->first_pm_pbl_idx = first_pm_pbl_idx;
2403	cq->pbl_chunk_size = pbl_chunk_size;
2404	cq->check_overflow = check_overflow;
2405
2406	set_64bit_val(wqe, 0, cq_size);
2407	set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2408	set_64bit_val(wqe, 16,
2409		      LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2410	set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2411	set_64bit_val(wqe, 40, cq->shadow_area_pa);
2412	set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
2413	set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2414
2415	header = cq->cq_uk.cq_id |
2416		 LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
2417		 LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
2418		 LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
2419		 LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2420		 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2421		 LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2422		 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2423		 LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2424		 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2425		 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2426		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2427
2428	i40iw_insert_wqe_hdr(wqe, header);
2429
2430	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2431			wqe, I40IW_CQP_WQE_SIZE * 8);
2432
2433	if (post_sq)
2434		i40iw_sc_cqp_post_sq(cqp);
2435	return 0;
2436}
2437
2438/**
2439 * i40iw_sc_qp_init - initialize qp
2440 * @qp: sc qp
2441 * @info: initialization qp info
2442 */
2443static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
2444					       struct i40iw_qp_init_info *info)
2445{
2446	u32 __iomem *wqe_alloc_reg = NULL;
2447	enum i40iw_status_code ret_code;
2448	u32 pble_obj_cnt;
2449	u8 wqe_size;
2450	u32 offset;
2451
2452	qp->dev = info->pd->dev;
2453	qp->vsi = info->vsi;
2454	qp->sq_pa = info->sq_pa;
2455	qp->rq_pa = info->rq_pa;
2456	qp->hw_host_ctx_pa = info->host_ctx_pa;
2457	qp->q2_pa = info->q2_pa;
2458	qp->shadow_area_pa = info->shadow_area_pa;
2459
2460	qp->q2_buf = info->q2;
2461	qp->pd = info->pd;
2462	qp->hw_host_ctx = info->host_ctx;
2463	offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
2464	if (i40iw_get_hw_addr(qp->pd->dev))
2465		wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
2466					      offset);
2467
2468	info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
2469	info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
2470	ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
2471	if (ret_code)
2472		return ret_code;
2473	qp->virtual_map = info->virtual_map;
2474
2475	pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2476
2477	if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
2478	    (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
2479		return I40IW_ERR_INVALID_PBLE_INDEX;
2480
2481	qp->llp_stream_handle = (void *)(-1);
2482	qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
2483
2484	qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
2485						    false);
2486	i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2487		    __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
2488
2489	switch (qp->pd->abi_ver) {
2490	case 4:
2491		ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
2492						       &wqe_size);
2493		if (ret_code)
2494			return ret_code;
2495		break;
2496	case 5: /* fallthrough until next ABI version */
2497	default:
2498		if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
2499			return I40IW_ERR_INVALID_FRAG_COUNT;
2500		wqe_size = I40IW_MAX_WQE_SIZE_RQ;
2501		break;
2502	}
2503	qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
2504				(wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
2505	i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
2506		    "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2507		    __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
2508	qp->sq_tph_val = info->sq_tph_val;
2509	qp->rq_tph_val = info->rq_tph_val;
2510	qp->sq_tph_en = info->sq_tph_en;
2511	qp->rq_tph_en = info->rq_tph_en;
2512	qp->rcv_tph_en = info->rcv_tph_en;
2513	qp->xmit_tph_en = info->xmit_tph_en;
2514	qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
2515
2516	return 0;
2517}
2518
2519/**
2520 * i40iw_sc_qp_create - create qp
2521 * @qp: sc qp
2522 * @info: qp create info
2523 * @scratch: u64 saved to be used during cqp completion
2524 * @post_sq: flag for cqp db to ring
2525 */
2526static enum i40iw_status_code i40iw_sc_qp_create(
2527				struct i40iw_sc_qp *qp,
2528				struct i40iw_create_qp_info *info,
2529				u64 scratch,
2530				bool post_sq)
2531{
2532	struct i40iw_sc_cqp *cqp;
2533	u64 *wqe;
2534	u64 header;
2535
2536	if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
2537	    (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
2538		return I40IW_ERR_INVALID_QP_ID;
2539
2540	cqp = qp->pd->dev->cqp;
2541	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2542	if (!wqe)
2543		return I40IW_ERR_RING_FULL;
2544
2545	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2546
2547	set_64bit_val(wqe, 40, qp->shadow_area_pa);
2548
2549	header = qp->qp_uk.qp_id |
2550		 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
2551		 LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
2552		 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2553		 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2554		 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2555		 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2556		 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2557		 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2558		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2559
2560	i40iw_insert_wqe_hdr(wqe, header);
2561	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2562			wqe, I40IW_CQP_WQE_SIZE * 8);
2563
2564	if (post_sq)
2565		i40iw_sc_cqp_post_sq(cqp);
2566	return 0;
2567}
2568
2569/**
2570 * i40iw_sc_qp_modify - modify qp cqp wqe
2571 * @qp: sc qp
2572 * @info: modify qp info
2573 * @scratch: u64 saved to be used during cqp completion
2574 * @post_sq: flag for cqp db to ring
2575 */
2576static enum i40iw_status_code i40iw_sc_qp_modify(
2577				struct i40iw_sc_qp *qp,
2578				struct i40iw_modify_qp_info *info,
2579				u64 scratch,
2580				bool post_sq)
2581{
2582	u64 *wqe;
2583	struct i40iw_sc_cqp *cqp;
2584	u64 header;
2585	u8 term_actions = 0;
2586	u8 term_len = 0;
2587
2588	cqp = qp->pd->dev->cqp;
2589	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2590	if (!wqe)
2591		return I40IW_ERR_RING_FULL;
2592	if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
2593		if (info->dont_send_fin)
2594			term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
2595		if (info->dont_send_term)
2596			term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
2597		if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
2598		    (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
2599			term_len = info->termlen;
2600	}
2601
2602	set_64bit_val(wqe,
2603		      8,
2604		      LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2605
2606	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2607	set_64bit_val(wqe, 40, qp->shadow_area_pa);
2608
2609	header = qp->qp_uk.qp_id |
2610		 LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
2611		 LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
2612		 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2613		 LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
2614		 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2615		 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2616		 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2617		 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2618		 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2619		 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
2620		 LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
2621		 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2622		 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2623		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2624
2625	i40iw_insert_wqe_hdr(wqe, header);
2626
2627	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2628			wqe, I40IW_CQP_WQE_SIZE * 8);
2629
2630	if (post_sq)
2631		i40iw_sc_cqp_post_sq(cqp);
2632	return 0;
2633}
2634
2635/**
2636 * i40iw_sc_qp_destroy - cqp destroy qp
2637 * @qp: sc qp
2638 * @scratch: u64 saved to be used during cqp completion
2639 * @remove_hash_idx: flag if to remove hash idx
2640 * @ignore_mw_bnd: memory window bind flag
2641 * @post_sq: flag for cqp db to ring
2642 */
2643static enum i40iw_status_code i40iw_sc_qp_destroy(
2644					struct i40iw_sc_qp *qp,
2645					u64 scratch,
2646					bool remove_hash_idx,
2647					bool ignore_mw_bnd,
2648					bool post_sq)
2649{
2650	u64 *wqe;
2651	struct i40iw_sc_cqp *cqp;
2652	u64 header;
2653
2654	i40iw_qp_rem_qos(qp);
2655	cqp = qp->pd->dev->cqp;
2656	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2657	if (!wqe)
2658		return I40IW_ERR_RING_FULL;
2659	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2660	set_64bit_val(wqe, 40, qp->shadow_area_pa);
2661
2662	header = qp->qp_uk.qp_id |
2663		 LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
2664		 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2665		 LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
2666		 LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2667		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2668
2669	i40iw_insert_wqe_hdr(wqe, header);
2670	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2671			wqe, I40IW_CQP_WQE_SIZE * 8);
2672
2673	if (post_sq)
2674		i40iw_sc_cqp_post_sq(cqp);
2675	return 0;
2676}
2677
2678/**
2679 * i40iw_sc_qp_flush_wqes - flush qp's wqe
2680 * @qp: sc qp
2681 * @info: dlush information
2682 * @scratch: u64 saved to be used during cqp completion
2683 * @post_sq: flag for cqp db to ring
2684 */
2685static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
2686				struct i40iw_sc_qp *qp,
2687				struct i40iw_qp_flush_info *info,
2688				u64 scratch,
2689				bool post_sq)
2690{
2691	u64 temp = 0;
2692	u64 *wqe;
2693	struct i40iw_sc_cqp *cqp;
2694	u64 header;
2695	bool flush_sq = false, flush_rq = false;
2696
2697	if (info->rq && !qp->flush_rq)
2698		flush_rq = true;
2699
2700	if (info->sq && !qp->flush_sq)
2701		flush_sq = true;
2702
2703	qp->flush_sq |= flush_sq;
2704	qp->flush_rq |= flush_rq;
2705	if (!flush_sq && !flush_rq)
2706		return 0;
2707
2708	cqp = qp->pd->dev->cqp;
2709	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2710	if (!wqe)
2711		return I40IW_ERR_RING_FULL;
2712	if (info->userflushcode) {
2713		if (flush_rq) {
2714			temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
2715				LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
2716		}
2717		if (flush_sq) {
2718			temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
2719				LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
2720		}
2721	}
2722	set_64bit_val(wqe, 16, temp);
2723
2724	temp = (info->generate_ae) ?
2725		info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
2726
2727	set_64bit_val(wqe, 8, temp);
2728
2729	header = qp->qp_uk.qp_id |
2730		 LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
2731		 LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2732		 LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
2733		 LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
2734		 LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
2735		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2736
2737	i40iw_insert_wqe_hdr(wqe, header);
2738
2739	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2740			wqe, I40IW_CQP_WQE_SIZE * 8);
2741
2742	if (post_sq)
2743		i40iw_sc_cqp_post_sq(cqp);
2744	return 0;
2745}
2746
2747/**
2748 * i40iw_sc_gen_ae - generate AE, currently uses flush WQE CQP OP
2749 * @qp: sc qp
2750 * @info: gen ae information
2751 * @scratch: u64 saved to be used during cqp completion
2752 * @post_sq: flag for cqp db to ring
2753 */
2754static enum i40iw_status_code i40iw_sc_gen_ae(
2755				struct i40iw_sc_qp *qp,
2756				struct i40iw_gen_ae_info *info,
2757				u64 scratch,
2758				bool post_sq)
2759{
2760	u64 temp;
2761	u64 *wqe;
2762	struct i40iw_sc_cqp *cqp;
2763	u64 header;
2764
2765	cqp = qp->pd->dev->cqp;
2766	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2767	if (!wqe)
2768		return I40IW_ERR_RING_FULL;
2769
2770	temp = info->ae_code |
2771	       LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE);
2772
2773	set_64bit_val(wqe, 8, temp);
2774
2775	header = qp->qp_uk.qp_id |
2776		 LS_64(I40IW_CQP_OP_GEN_AE, I40IW_CQPSQ_OPCODE) |
2777		 LS_64(1, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2778		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2779
2780	i40iw_insert_wqe_hdr(wqe, header);
2781
2782	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "GEN_AE WQE",
2783			wqe, I40IW_CQP_WQE_SIZE * 8);
2784
2785	if (post_sq)
2786		i40iw_sc_cqp_post_sq(cqp);
2787	return 0;
2788}
2789
2790/**
2791 * i40iw_sc_qp_upload_context - upload qp's context
2792 * @dev: sc device struct
2793 * @info: upload context info ptr for return
2794 * @scratch: u64 saved to be used during cqp completion
2795 * @post_sq: flag for cqp db to ring
2796 */
2797static enum i40iw_status_code i40iw_sc_qp_upload_context(
2798					struct i40iw_sc_dev *dev,
2799					struct i40iw_upload_context_info *info,
2800					u64 scratch,
2801					bool post_sq)
2802{
2803	u64 *wqe;
2804	struct i40iw_sc_cqp *cqp;
2805	u64 header;
2806
2807	cqp = dev->cqp;
2808	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2809	if (!wqe)
2810		return I40IW_ERR_RING_FULL;
2811	set_64bit_val(wqe, 16, info->buf_pa);
2812
2813	header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
2814		 LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
2815		 LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
2816		 LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
2817		 LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
2818		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2819
2820	i40iw_insert_wqe_hdr(wqe, header);
2821
2822	i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
2823			wqe, I40IW_CQP_WQE_SIZE * 8);
2824
2825	if (post_sq)
2826		i40iw_sc_cqp_post_sq(cqp);
2827	return 0;
2828}
2829
2830/**
2831 * i40iw_sc_qp_setctx - set qp's context
2832 * @qp: sc qp
2833 * @qp_ctx: context ptr
2834 * @info: ctx info
2835 */
2836static enum i40iw_status_code i40iw_sc_qp_setctx(
2837				struct i40iw_sc_qp *qp,
2838				u64 *qp_ctx,
2839				struct i40iw_qp_host_ctx_info *info)
2840{
2841	struct i40iwarp_offload_info *iw;
2842	struct i40iw_tcp_offload_info *tcp;
2843	struct i40iw_sc_vsi *vsi;
2844	struct i40iw_sc_dev *dev;
2845	u64 qw0, qw3, qw7 = 0;
2846
2847	iw = info->iwarp_info;
2848	tcp = info->tcp_info;
2849	vsi = qp->vsi;
2850	dev = qp->dev;
2851	if (info->add_to_qoslist) {
2852		qp->user_pri = info->user_pri;
2853		i40iw_qp_add_qos(qp);
2854		i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
2855			    __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
2856	}
2857	qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
2858	      LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
2859	      LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
2860	      LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
2861	      LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
2862	      LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
2863	      LS_64(info->push_idx, I40IWQPC_PPIDX) |
2864	      LS_64(info->push_mode_en, I40IWQPC_PMENA);
2865
2866	set_64bit_val(qp_ctx, 8, qp->sq_pa);
2867	set_64bit_val(qp_ctx, 16, qp->rq_pa);
2868
2869	qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2870	      LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
2871	      LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
2872
2873	set_64bit_val(qp_ctx,
2874		      128,
2875		      LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
2876
2877	set_64bit_val(qp_ctx,
2878		      136,
2879		      LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
2880		      LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
2881
2882	set_64bit_val(qp_ctx,
2883		      168,
2884		      LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
2885	set_64bit_val(qp_ctx,
2886		      176,
2887		      LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
2888		      LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
2889		      LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
2890		      LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
2891
2892	if (info->iwarp_info_valid) {
2893		qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
2894		       LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
2895
2896		qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
2897		set_64bit_val(qp_ctx,
2898			      144,
2899			      LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
2900			      LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
2901		set_64bit_val(qp_ctx,
2902			      152,
2903			      LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
2904
2905		set_64bit_val(qp_ctx,
2906			      160,
2907			      LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
2908			      LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
2909			      LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
2910			      LS_64(iw->rd_enable, I40IWQPC_RDOK) |
2911			      LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
2912			      LS_64(iw->bind_en, I40IWQPC_BINDEN) |
2913			      LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
2914			      LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
2915			      LS_64((((vsi->stats_fcn_id_alloc) &&
2916				      (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
2917				    I40IWQPC_USESTATSINSTANCE) |
2918			      LS_64(1, I40IWQPC_IWARPMODE) |
2919			      LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
2920			      LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
2921			      LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
2922			      LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
2923			      LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
2924	}
2925	if (info->tcp_info_valid) {
2926		qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
2927		       LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
2928		       LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
2929		       LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
2930		       LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
2931		       LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
2932		       LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
2933
2934		qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
2935		       LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2936		       LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
2937		       LS_64(tcp->tos, I40IWQPC_TOS) |
2938		       LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
2939		       LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
2940
2941		qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
2942		set_64bit_val(qp_ctx,
2943			      32,
2944			      LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
2945			      LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
2946
2947		set_64bit_val(qp_ctx,
2948			      40,
2949			      LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
2950			      LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
2951
2952		set_64bit_val(qp_ctx,
2953			      48,
2954			      LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
2955				LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
2956				LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
2957
2958		qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
2959		       LS_64(tcp->wscale, I40IWQPC_WSCALE) |
2960		       LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
2961		       LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
2962		       LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
2963		       LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
2964		       LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
2965
2966		set_64bit_val(qp_ctx,
2967			      72,
2968			      LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
2969			      LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
2970		set_64bit_val(qp_ctx,
2971			      80,
2972			      LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
2973			      LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
2974
2975		set_64bit_val(qp_ctx,
2976			      88,
2977			      LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
2978			      LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
2979		set_64bit_val(qp_ctx,
2980			      96,
2981			      LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
2982			      LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
2983		set_64bit_val(qp_ctx,
2984			      104,
2985			      LS_64(tcp->srtt, I40IWQPC_SRTT) |
2986			      LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
2987		set_64bit_val(qp_ctx,
2988			      112,
2989			      LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
2990			      LS_64(tcp->cwnd, I40IWQPC_CWND));
2991		set_64bit_val(qp_ctx,
2992			      120,
2993			      LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
2994			      LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
2995		set_64bit_val(qp_ctx,
2996			      128,
2997			      LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
2998			      LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
2999		set_64bit_val(qp_ctx,
3000			      184,
3001			      LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
3002			      LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
3003		set_64bit_val(qp_ctx,
3004			      192,
3005			      LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
3006			      LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
3007	}
3008
3009	set_64bit_val(qp_ctx, 0, qw0);
3010	set_64bit_val(qp_ctx, 24, qw3);
3011	set_64bit_val(qp_ctx, 56, qw7);
3012
3013	i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
3014			qp_ctx, I40IW_QP_CTX_SIZE);
3015	return 0;
3016}
3017
3018/**
3019 * i40iw_sc_alloc_stag - mr stag alloc
3020 * @dev: sc device struct
3021 * @info: stag info
3022 * @scratch: u64 saved to be used during cqp completion
3023 * @post_sq: flag for cqp db to ring
3024 */
3025static enum i40iw_status_code i40iw_sc_alloc_stag(
3026				struct i40iw_sc_dev *dev,
3027				struct i40iw_allocate_stag_info *info,
3028				u64 scratch,
3029				bool post_sq)
3030{
3031	u64 *wqe;
3032	struct i40iw_sc_cqp *cqp;
3033	u64 header;
3034	enum i40iw_page_size page_size;
3035
3036	if (!info->total_len && !info->all_memory)
3037		return -EINVAL;
3038
3039	page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
3040	cqp = dev->cqp;
3041	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3042	if (!wqe)
3043		return I40IW_ERR_RING_FULL;
3044	set_64bit_val(wqe,
3045		      8,
3046		      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
3047		      LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
3048	set_64bit_val(wqe,
3049		      16,
3050		      LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
3051	set_64bit_val(wqe,
3052		      40,
3053		      LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
3054
3055	header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3056		 LS_64(1, I40IW_CQPSQ_STAG_MR) |
3057		 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
3058		 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
3059		 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
3060		 LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
3061		 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
3062		 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
3063		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3064
3065	i40iw_insert_wqe_hdr(wqe, header);
3066
3067	i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
3068			wqe, I40IW_CQP_WQE_SIZE * 8);
3069
3070	if (post_sq)
3071		i40iw_sc_cqp_post_sq(cqp);
3072	return 0;
3073}
3074
3075/**
3076 * i40iw_sc_mr_reg_non_shared - non-shared mr registration
3077 * @dev: sc device struct
3078 * @info: mr info
3079 * @scratch: u64 saved to be used during cqp completion
3080 * @post_sq: flag for cqp db to ring
3081 */
3082static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
3083				struct i40iw_sc_dev *dev,
3084				struct i40iw_reg_ns_stag_info *info,
3085				u64 scratch,
3086				bool post_sq)
3087{
3088	u64 *wqe;
3089	u64 temp;
3090	struct i40iw_sc_cqp *cqp;
3091	u64 header;
3092	u32 pble_obj_cnt;
3093	bool remote_access;
3094	u8 addr_type;
3095	enum i40iw_page_size page_size;
3096
3097	if (!info->total_len && !info->all_memory)
3098		return -EINVAL;
3099
3100	page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
3101	if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
3102				   I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
3103		remote_access = true;
3104	else
3105		remote_access = false;
3106
3107	pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
3108
3109	if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
3110		return I40IW_ERR_INVALID_PBLE_INDEX;
3111
3112	cqp = dev->cqp;
3113	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3114	if (!wqe)
3115		return I40IW_ERR_RING_FULL;
3116
3117	temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
3118	set_64bit_val(wqe, 0, temp);
3119
3120	set_64bit_val(wqe,
3121		      8,
3122		      LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
3123		      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3124
3125	set_64bit_val(wqe,
3126		      16,
3127		      LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
3128		      LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
3129	if (!info->chunk_size) {
3130		set_64bit_val(wqe, 32, info->reg_addr_pa);
3131		set_64bit_val(wqe, 48, 0);
3132	} else {
3133		set_64bit_val(wqe, 32, 0);
3134		set_64bit_val(wqe, 48, info->first_pm_pbl_index);
3135	}
3136	set_64bit_val(wqe, 40, info->hmc_fcn_index);
3137	set_64bit_val(wqe, 56, 0);
3138
3139	addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
3140	header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
3141		 LS_64(1, I40IW_CQPSQ_STAG_MR) |
3142		 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
3143		 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
3144		 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
3145		 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
3146		 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
3147		 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
3148		 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
3149		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3150
3151	i40iw_insert_wqe_hdr(wqe, header);
3152
3153	i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
3154			wqe, I40IW_CQP_WQE_SIZE * 8);
3155
3156	if (post_sq)
3157		i40iw_sc_cqp_post_sq(cqp);
3158	return 0;
3159}
3160
3161/**
3162 * i40iw_sc_mr_reg_shared - registered shared memory region
3163 * @dev: sc device struct
3164 * @info: info for shared memory registeration
3165 * @scratch: u64 saved to be used during cqp completion
3166 * @post_sq: flag for cqp db to ring
3167 */
3168static enum i40iw_status_code i40iw_sc_mr_reg_shared(
3169					struct i40iw_sc_dev *dev,
3170					struct i40iw_register_shared_stag *info,
3171					u64 scratch,
3172					bool post_sq)
3173{
3174	u64 *wqe;
3175	struct i40iw_sc_cqp *cqp;
3176	u64 temp, va64, fbo, header;
3177	u32 va32;
3178	bool remote_access;
3179	u8 addr_type;
3180
3181	if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
3182				   I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
3183		remote_access = true;
3184	else
3185		remote_access = false;
3186	cqp = dev->cqp;
3187	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3188	if (!wqe)
3189		return I40IW_ERR_RING_FULL;
3190	va64 = (uintptr_t)(info->va);
3191	va32 = (u32)(va64 & 0x00000000FFFFFFFF);
3192	fbo = (u64)(va32 & (4096 - 1));
3193
3194	set_64bit_val(wqe,
3195		      0,
3196		      (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
3197
3198	set_64bit_val(wqe,
3199		      8,
3200		      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3201	temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
3202	       LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
3203	       LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
3204	set_64bit_val(wqe, 16, temp);
3205
3206	addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
3207	header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
3208		 LS_64(1, I40IW_CQPSQ_STAG_MR) |
3209		 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
3210		 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
3211		 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
3212		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3213
3214	i40iw_insert_wqe_hdr(wqe, header);
3215
3216	i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
3217			wqe, I40IW_CQP_WQE_SIZE * 8);
3218
3219	if (post_sq)
3220		i40iw_sc_cqp_post_sq(cqp);
3221	return 0;
3222}
3223
3224/**
3225 * i40iw_sc_dealloc_stag - deallocate stag
3226 * @dev: sc device struct
3227 * @info: dealloc stag info
3228 * @scratch: u64 saved to be used during cqp completion
3229 * @post_sq: flag for cqp db to ring
3230 */
3231static enum i40iw_status_code i40iw_sc_dealloc_stag(
3232					struct i40iw_sc_dev *dev,
3233					struct i40iw_dealloc_stag_info *info,
3234					u64 scratch,
3235					bool post_sq)
3236{
3237	u64 header;
3238	u64 *wqe;
3239	struct i40iw_sc_cqp *cqp;
3240
3241	cqp = dev->cqp;
3242	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3243	if (!wqe)
3244		return I40IW_ERR_RING_FULL;
3245	set_64bit_val(wqe,
3246		      8,
3247		      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3248	set_64bit_val(wqe,
3249		      16,
3250		      LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
3251
3252	header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3253		 LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
3254		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3255
3256	i40iw_insert_wqe_hdr(wqe, header);
3257
3258	i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
3259			wqe, I40IW_CQP_WQE_SIZE * 8);
3260
3261	if (post_sq)
3262		i40iw_sc_cqp_post_sq(cqp);
3263	return 0;
3264}
3265
3266/**
3267 * i40iw_sc_query_stag - query hardware for stag
3268 * @dev: sc device struct
3269 * @scratch: u64 saved to be used during cqp completion
3270 * @stag_index: stag index for query
3271 * @post_sq: flag for cqp db to ring
3272 */
3273static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
3274						  u64 scratch,
3275						  u32 stag_index,
3276						  bool post_sq)
3277{
3278	u64 header;
3279	u64 *wqe;
3280	struct i40iw_sc_cqp *cqp;
3281
3282	cqp = dev->cqp;
3283	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3284	if (!wqe)
3285		return I40IW_ERR_RING_FULL;
3286	set_64bit_val(wqe,
3287		      16,
3288		      LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
3289
3290	header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
3291		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3292
3293	i40iw_insert_wqe_hdr(wqe, header);
3294
3295	i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
3296			wqe, I40IW_CQP_WQE_SIZE * 8);
3297
3298	if (post_sq)
3299		i40iw_sc_cqp_post_sq(cqp);
3300	return 0;
3301}
3302
3303/**
3304 * i40iw_sc_mw_alloc - mw allocate
3305 * @dev: sc device struct
3306 * @scratch: u64 saved to be used during cqp completion
3307 * @mw_stag_index:stag index
3308 * @pd_id: pd is for this mw
3309 * @post_sq: flag for cqp db to ring
3310 */
3311static enum i40iw_status_code i40iw_sc_mw_alloc(
3312					struct i40iw_sc_dev *dev,
3313					u64 scratch,
3314					u32 mw_stag_index,
3315					u16 pd_id,
3316					bool post_sq)
3317{
3318	u64 header;
3319	struct i40iw_sc_cqp *cqp;
3320	u64 *wqe;
3321
3322	cqp = dev->cqp;
3323	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3324	if (!wqe)
3325		return I40IW_ERR_RING_FULL;
3326	set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
3327	set_64bit_val(wqe,
3328		      16,
3329		      LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
3330
3331	header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3332		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3333
3334	i40iw_insert_wqe_hdr(wqe, header);
3335
3336	i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
3337			wqe, I40IW_CQP_WQE_SIZE * 8);
3338
3339	if (post_sq)
3340		i40iw_sc_cqp_post_sq(cqp);
3341	return 0;
3342}
3343
3344/**
3345 * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
3346 * @qp: sc qp struct
3347 * @info: fast mr info
3348 * @post_sq: flag for cqp db to ring
3349 */
3350enum i40iw_status_code i40iw_sc_mr_fast_register(
3351				struct i40iw_sc_qp *qp,
3352				struct i40iw_fast_reg_stag_info *info,
3353				bool post_sq)
3354{
3355	u64 temp, header;
3356	u64 *wqe;
3357	u32 wqe_idx;
3358	enum i40iw_page_size page_size;
3359
3360	page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
3361	wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
3362					 0, info->wr_id);
3363	if (!wqe)
3364		return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3365
3366	i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
3367		    __func__, info->wr_id, wqe_idx,
3368		    &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
3369	temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
3370	set_64bit_val(wqe, 0, temp);
3371
3372	temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
3373	set_64bit_val(wqe,
3374		      8,
3375		      LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
3376		      LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
3377
3378	set_64bit_val(wqe,
3379		      16,
3380		      info->total_len |
3381		      LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
3382
3383	header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
3384		 LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
3385		 LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
3386		 LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
3387		 LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
3388		 LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
3389		 LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
3390		 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
3391		 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
3392		 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
3393		 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3394
3395	i40iw_insert_wqe_hdr(wqe, header);
3396
3397	i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
3398			wqe, I40IW_QP_WQE_MIN_SIZE);
3399
3400	if (post_sq)
3401		i40iw_qp_post_wr(&qp->qp_uk);
3402	return 0;
3403}
3404
3405/**
3406 * i40iw_sc_send_lsmm - send last streaming mode message
3407 * @qp: sc qp struct
3408 * @lsmm_buf: buffer with lsmm message
3409 * @size: size of lsmm buffer
3410 * @stag: stag of lsmm buffer
3411 */
3412static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
3413			       void *lsmm_buf,
3414			       u32 size,
3415			       i40iw_stag stag)
3416{
3417	u64 *wqe;
3418	u64 header;
3419	struct i40iw_qp_uk *qp_uk;
3420
3421	qp_uk = &qp->qp_uk;
3422	wqe = qp_uk->sq_base->elem;
3423
3424	set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3425
3426	set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
3427
3428	set_64bit_val(wqe, 16, 0);
3429
3430	header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3431		 LS_64(1, I40IWQPSQ_STREAMMODE) |
3432		 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3433		 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3434
3435	i40iw_insert_wqe_hdr(wqe, header);
3436
3437	i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
3438			wqe, I40IW_QP_WQE_MIN_SIZE);
3439}
3440
3441/**
3442 * i40iw_sc_send_lsmm_nostag - for privilege qp
3443 * @qp: sc qp struct
3444 * @lsmm_buf: buffer with lsmm message
3445 * @size: size of lsmm buffer
3446 */
3447static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
3448				      void *lsmm_buf,
3449				      u32 size)
3450{
3451	u64 *wqe;
3452	u64 header;
3453	struct i40iw_qp_uk *qp_uk;
3454
3455	qp_uk = &qp->qp_uk;
3456	wqe = qp_uk->sq_base->elem;
3457
3458	set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3459
3460	set_64bit_val(wqe, 8, size);
3461
3462	set_64bit_val(wqe, 16, 0);
3463
3464	header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3465		 LS_64(1, I40IWQPSQ_STREAMMODE) |
3466		 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3467		 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3468
3469	i40iw_insert_wqe_hdr(wqe, header);
3470
3471	i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
3472			wqe, I40IW_QP_WQE_MIN_SIZE);
3473}
3474
3475/**
3476 * i40iw_sc_send_rtt - send last read0 or write0
3477 * @qp: sc qp struct
3478 * @read: Do read0 or write0
3479 */
3480static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
3481{
3482	u64 *wqe;
3483	u64 header;
3484	struct i40iw_qp_uk *qp_uk;
3485
3486	qp_uk = &qp->qp_uk;
3487	wqe = qp_uk->sq_base->elem;
3488
3489	set_64bit_val(wqe, 0, 0);
3490	set_64bit_val(wqe, 8, 0);
3491	set_64bit_val(wqe, 16, 0);
3492	if (read) {
3493		header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
3494			 LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
3495			 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3496		set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
3497	} else {
3498		header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
3499			 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3500	}
3501
3502	i40iw_insert_wqe_hdr(wqe, header);
3503
3504	i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
3505			wqe, I40IW_QP_WQE_MIN_SIZE);
3506}
3507
3508/**
3509 * i40iw_sc_post_wqe0 - send wqe with opcode
3510 * @qp: sc qp struct
3511 * @opcode: opcode to use for wqe0
3512 */
3513static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
3514{
3515	u64 *wqe;
3516	u64 header;
3517	struct i40iw_qp_uk *qp_uk;
3518
3519	qp_uk = &qp->qp_uk;
3520	wqe = qp_uk->sq_base->elem;
3521
3522	if (!wqe)
3523		return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3524	switch (opcode) {
3525	case I40IWQP_OP_NOP:
3526		set_64bit_val(wqe, 0, 0);
3527		set_64bit_val(wqe, 8, 0);
3528		set_64bit_val(wqe, 16, 0);
3529		header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3530			 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3531
3532		i40iw_insert_wqe_hdr(wqe, header);
3533		break;
3534	case I40IWQP_OP_RDMA_SEND:
3535		set_64bit_val(wqe, 0, 0);
3536		set_64bit_val(wqe, 8, 0);
3537		set_64bit_val(wqe, 16, 0);
3538		header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3539			 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
3540			 LS_64(1, I40IWQPSQ_STREAMMODE) |
3541			 LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
3542
3543		i40iw_insert_wqe_hdr(wqe, header);
3544		break;
3545	default:
3546		i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
3547			    __func__);
3548		break;
3549	}
3550	return 0;
3551}
3552
3553/**
3554 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3555 * @dev : ptr to i40iw_dev struct
3556 * @hmc_fn_id: hmc function id
3557 */
3558enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
3559{
3560	struct i40iw_hmc_info *hmc_info;
3561	struct i40iw_dma_mem query_fpm_mem;
3562	struct i40iw_virt_mem virt_mem;
3563	struct i40iw_vfdev *vf_dev = NULL;
3564	u32 mem_size;
3565	enum i40iw_status_code ret_code = 0;
3566	bool poll_registers = true;
3567	u16 iw_vf_idx;
3568	u8 wait_type;
3569
3570	if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3571	    (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3572		return I40IW_ERR_INVALID_HMCFN_ID;
3573
3574	i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
3575		    dev->hmc_fn_id);
3576	if (hmc_fn_id == dev->hmc_fn_id) {
3577		hmc_info = dev->hmc_info;
3578		query_fpm_mem.pa = dev->fpm_query_buf_pa;
3579		query_fpm_mem.va = dev->fpm_query_buf;
3580	} else {
3581		vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
3582		if (!vf_dev)
3583			return I40IW_ERR_INVALID_VF_ID;
3584
3585		hmc_info = &vf_dev->hmc_info;
3586		iw_vf_idx = vf_dev->iw_vf_idx;
3587		i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
3588			    hmc_info, hmc_info->hmc_obj);
3589		if (!vf_dev->fpm_query_buf) {
3590			if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
3591				ret_code = i40iw_alloc_query_fpm_buf(dev,
3592								     &dev->vf_fpm_query_buf[iw_vf_idx]);
3593				if (ret_code)
3594					return ret_code;
3595			}
3596			vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
3597			vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
3598		}
3599		query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
3600		query_fpm_mem.va = vf_dev->fpm_query_buf;
3601		/**
3602		 * It is HARDWARE specific:
3603		 * this call is done by PF for VF and
3604		 * i40iw_sc_query_fpm_values needs ccq poll
3605		 * because PF ccq is already created.
3606		 */
3607		poll_registers = false;
3608	}
3609
3610	hmc_info->hmc_fn_id = hmc_fn_id;
3611
3612	if (hmc_fn_id != dev->hmc_fn_id) {
3613		ret_code =
3614			i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3615	} else {
3616		wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3617			    (u8)I40IW_CQP_WAIT_POLL_CQ;
3618
3619		ret_code = i40iw_sc_query_fpm_values(
3620					dev->cqp,
3621					0,
3622					hmc_info->hmc_fn_id,
3623					&query_fpm_mem,
3624					true,
3625					wait_type);
3626	}
3627	if (ret_code)
3628		return ret_code;
3629
3630	/* parse the fpm_query_buf and fill hmc obj info */
3631	ret_code =
3632		i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
3633					     hmc_info,
3634					     &dev->hmc_fpm_misc);
3635	if (ret_code)
3636		return ret_code;
3637	i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
3638			query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
3639
3640	if (hmc_fn_id != dev->hmc_fn_id) {
3641		i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3642
3643		/* parse the fpm_commit_buf and fill hmc obj info */
3644		i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
3645		mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3646			   (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
3647		ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3648		if (ret_code)
3649			return ret_code;
3650		hmc_info->sd_table.sd_entry = virt_mem.va;
3651	}
3652
3653	return ret_code;
3654}
3655
3656/**
3657 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3658 * populates fpm base address in hmc_info
3659 * @dev : ptr to i40iw_dev struct
3660 * @hmc_fn_id: hmc function id
3661 */
3662static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
3663							u8 hmc_fn_id)
3664{
3665	struct i40iw_hmc_info *hmc_info;
3666	struct i40iw_hmc_obj_info *obj_info;
3667	u64 *buf;
3668	struct i40iw_dma_mem commit_fpm_mem;
3669	u32 i, j;
3670	enum i40iw_status_code ret_code = 0;
3671	bool poll_registers = true;
3672	u8 wait_type;
3673
3674	if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3675	    (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3676		return I40IW_ERR_INVALID_HMCFN_ID;
3677
3678	if (hmc_fn_id == dev->hmc_fn_id) {
3679		hmc_info = dev->hmc_info;
3680	} else {
3681		hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
3682		poll_registers = false;
3683	}
3684	if (!hmc_info)
3685		return I40IW_ERR_BAD_PTR;
3686
3687	obj_info = hmc_info->hmc_obj;
3688	buf = dev->fpm_commit_buf;
3689
3690	/* copy cnt values in commit buf */
3691	for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
3692	     i++, j += 8)
3693		set_64bit_val(buf, j, (u64)obj_info[i].cnt);
3694
3695	set_64bit_val(buf, 40, 0);   /* APBVT rsvd */
3696
3697	commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
3698	commit_fpm_mem.va = dev->fpm_commit_buf;
3699	wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3700			(u8)I40IW_CQP_WAIT_POLL_CQ;
3701	ret_code = i40iw_sc_commit_fpm_values(
3702					dev->cqp,
3703					0,
3704					hmc_info->hmc_fn_id,
3705					&commit_fpm_mem,
3706					true,
3707					wait_type);
3708
3709	/* parse the fpm_commit_buf and fill hmc obj info */
3710	if (!ret_code)
3711		ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
3712							 hmc_info->hmc_obj,
3713							 &hmc_info->sd_table.sd_cnt);
3714
3715	i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
3716			commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
3717
3718	return ret_code;
3719}
3720
3721/**
3722 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3723 * @cqp: struct for cqp hw
3724 * @info; sd info for wqe
3725 * @scratch: u64 saved to be used during cqp completion
3726 */
3727static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3728					       struct i40iw_update_sds_info *info,
3729					       u64 scratch)
3730{
3731	u64 data;
3732	u64 header;
3733	u64 *wqe;
3734	int mem_entries, wqe_entries;
3735	struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3736	u64 offset;
3737	u32 wqe_idx;
3738
3739	wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
3740	if (!wqe)
3741		return I40IW_ERR_RING_FULL;
3742
3743	I40IW_CQP_INIT_WQE(wqe);
3744	wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
3745	mem_entries = info->cnt - wqe_entries;
3746
3747	header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
3748		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3749		 LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
3750
3751	if (mem_entries) {
3752		offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
3753		memcpy((char *)sdbuf->va + offset, &info->entry[3],
3754		       mem_entries << 4);
3755		data = (u64)sdbuf->pa + offset;
3756	} else {
3757		data = 0;
3758	}
3759	data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
3760
3761	set_64bit_val(wqe, 16, data);
3762
3763	switch (wqe_entries) {
3764	case 3:
3765		set_64bit_val(wqe, 48,
3766			      (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3767					LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3768
3769		set_64bit_val(wqe, 56, info->entry[2].data);
3770		fallthrough;
3771	case 2:
3772		set_64bit_val(wqe, 32,
3773			      (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3774					LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3775
3776		set_64bit_val(wqe, 40, info->entry[1].data);
3777		fallthrough;
3778	case 1:
3779		set_64bit_val(wqe, 0,
3780			      LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
3781
3782		set_64bit_val(wqe, 8, info->entry[0].data);
3783		break;
3784	default:
3785		break;
3786	}
3787
3788	i40iw_insert_wqe_hdr(wqe, header);
3789
3790	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3791			wqe, I40IW_CQP_WQE_SIZE * 8);
3792	return 0;
3793}
3794
3795/**
3796 * i40iw_update_pe_sds - cqp wqe for sd
3797 * @dev: ptr to i40iw_dev struct
3798 * @info: sd info for sd's
3799 * @scratch: u64 saved to be used during cqp completion
3800 */
3801static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
3802						  struct i40iw_update_sds_info *info,
3803						  u64 scratch)
3804{
3805	struct i40iw_sc_cqp *cqp = dev->cqp;
3806	enum i40iw_status_code ret_code;
3807
3808	ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3809	if (!ret_code)
3810		i40iw_sc_cqp_post_sq(cqp);
3811
3812	return ret_code;
3813}
3814
3815/**
3816 * i40iw_update_sds_noccq - update sd before ccq created
3817 * @dev: sc device struct
3818 * @info: sd info for sd's
3819 */
3820enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
3821					      struct i40iw_update_sds_info *info)
3822{
3823	u32 error, val, tail;
3824	struct i40iw_sc_cqp *cqp = dev->cqp;
3825	enum i40iw_status_code ret_code;
3826
3827	ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3828	if (ret_code)
3829		return ret_code;
3830	i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3831	if (error)
3832		return I40IW_ERR_CQP_COMPL_ERROR;
3833
3834	i40iw_sc_cqp_post_sq(cqp);
3835	ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3836
3837	return ret_code;
3838}
3839
3840/**
3841 * i40iw_sc_suspend_qp - suspend qp for param change
3842 * @cqp: struct for cqp hw
3843 * @qp: sc qp struct
3844 * @scratch: u64 saved to be used during cqp completion
3845 */
3846enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3847					   struct i40iw_sc_qp *qp,
3848					   u64 scratch)
3849{
3850	u64 header;
3851	u64 *wqe;
3852
3853	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3854	if (!wqe)
3855		return I40IW_ERR_RING_FULL;
3856	header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
3857		 LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
3858		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3859
3860	i40iw_insert_wqe_hdr(wqe, header);
3861
3862	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3863			wqe, I40IW_CQP_WQE_SIZE * 8);
3864
3865	i40iw_sc_cqp_post_sq(cqp);
3866	return 0;
3867}
3868
3869/**
3870 * i40iw_sc_resume_qp - resume qp after suspend
3871 * @cqp: struct for cqp hw
3872 * @qp: sc qp struct
3873 * @scratch: u64 saved to be used during cqp completion
3874 */
3875enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3876					  struct i40iw_sc_qp *qp,
3877					  u64 scratch)
3878{
3879	u64 header;
3880	u64 *wqe;
3881
3882	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3883	if (!wqe)
3884		return I40IW_ERR_RING_FULL;
3885	set_64bit_val(wqe,
3886		      16,
3887			LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
3888
3889	header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
3890		 LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
3891		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3892
3893	i40iw_insert_wqe_hdr(wqe, header);
3894
3895	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3896			wqe, I40IW_CQP_WQE_SIZE * 8);
3897
3898	i40iw_sc_cqp_post_sq(cqp);
3899	return 0;
3900}
3901
3902/**
3903 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3904 * @cqp: struct for cqp hw
3905 * @scratch: u64 saved to be used during cqp completion
3906 * @hmc_fn_id: hmc function id
3907 * @post_sq: flag for cqp db to ring
3908 * @poll_registers: flag to poll register for cqp completion
3909 */
3910enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
3911					struct i40iw_sc_cqp *cqp,
3912					u64 scratch,
3913					u8 hmc_fn_id,
3914					bool post_sq,
3915					bool poll_registers)
3916{
3917	u64 header;
3918	u64 *wqe;
3919	u32 tail, val, error;
3920	enum i40iw_status_code ret_code = 0;
3921
3922	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3923	if (!wqe)
3924		return I40IW_ERR_RING_FULL;
3925	set_64bit_val(wqe,
3926		      16,
3927		      LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
3928
3929	header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
3930		 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3931
3932	i40iw_insert_wqe_hdr(wqe, header);
3933
3934	i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3935			wqe, I40IW_CQP_WQE_SIZE * 8);
3936	i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3937	if (error) {
3938		ret_code = I40IW_ERR_CQP_COMPL_ERROR;
3939		return ret_code;
3940	}
3941	if (post_sq) {
3942		i40iw_sc_cqp_post_sq(cqp);
3943		if (poll_registers)
3944			/* check for cqp sq tail update */
3945			ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3946		else
3947			ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3948								 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
3949								 NULL);
3950	}
3951
3952	return ret_code;
3953}
3954
3955/**
3956 * i40iw_ring_full - check if cqp ring is full
3957 * @cqp: struct for cqp hw
3958 */
3959static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3960{
3961	return I40IW_RING_FULL_ERR(cqp->sq_ring);
3962}
3963
3964/**
3965 * i40iw_est_sd - returns approximate number of SDs for HMC
3966 * @dev: sc device struct
3967 * @hmc_info: hmc structure, size and count for HMC objects
3968 */
3969static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
3970{
3971	int i;
3972	u64 size = 0;
3973	u64 sd;
3974
3975	for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
3976		size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
3977
3978	if (dev->is_pf)
3979		size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3980
3981	if (size & 0x1FFFFF)
3982		sd = (size >> 21) + 1; /* add 1 for remainder */
3983	else
3984		sd = size >> 21;
3985
3986	if (!dev->is_pf) {
3987		/* 2MB alignment for VF PBLE HMC */
3988		size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3989		if (size & 0x1FFFFF)
3990			sd += (size >> 21) + 1; /* add 1 for remainder */
3991		else
3992			sd += size >> 21;
3993	}
3994
3995	return sd;
3996}
3997
3998/**
3999 * i40iw_config_fpm_values - configure HMC objects
4000 * @dev: sc device struct
4001 * @qp_count: desired qp count
4002 */
4003enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
4004{
4005	struct i40iw_virt_mem virt_mem;
4006	u32 i, mem_size;
4007	u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
4008	u64 sd_needed;
4009	u32 loop_count = 0;
4010
4011	struct i40iw_hmc_info *hmc_info;
4012	struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
4013	enum i40iw_status_code ret_code = 0;
4014
4015	hmc_info = dev->hmc_info;
4016	hmc_fpm_misc = &dev->hmc_fpm_misc;
4017
4018	ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
4019	if (ret_code) {
4020		i40iw_debug(dev, I40IW_DEBUG_HMC,
4021			    "i40iw_sc_init_iw_hmc returned error_code = %d\n",
4022			    ret_code);
4023		return ret_code;
4024	}
4025
4026	for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
4027		hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
4028	sd_needed = i40iw_est_sd(dev, hmc_info);
4029	i40iw_debug(dev, I40IW_DEBUG_HMC,
4030		    "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
4031		    __func__, sd_needed, hmc_info->first_sd_index);
4032	i40iw_debug(dev, I40IW_DEBUG_HMC,
4033		    "%s: sd count %d where max sd is %d\n",
4034		    __func__, hmc_info->sd_table.sd_cnt,
4035		    hmc_fpm_misc->max_sds);
4036
4037	qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
4038	qpwantedoriginal = qpwanted;
4039	mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
4040	pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
4041
4042	i40iw_debug(dev, I40IW_DEBUG_HMC,
4043		    "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
4044		    qp_count, hmc_fpm_misc->max_sds,
4045		    hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
4046		    hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
4047		    hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
4048		    hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
4049
4050	do {
4051		++loop_count;
4052		hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
4053		hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
4054			min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
4055		hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
4056		hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
4057					qpwanted * hmc_fpm_misc->ht_multiplier;
4058		hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
4059			hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
4060		hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
4061		hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
4062
4063		hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
4064			roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
4065		hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
4066			roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
4067		hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
4068			hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
4069		hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
4070			hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
4071		hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
4072			((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
4073		hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
4074		hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
4075		hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
4076
4077		/* How much memory is needed for all the objects. */
4078		sd_needed = i40iw_est_sd(dev, hmc_info);
4079		if ((loop_count > 1000) ||
4080		    ((!(loop_count % 10)) &&
4081		    (qpwanted > qpwantedoriginal * 2 / 3))) {
4082			if (qpwanted > FPM_MULTIPLIER)
4083				qpwanted = roundup_pow_of_two(qpwanted -
4084							      FPM_MULTIPLIER);
4085			qpwanted >>= 1;
4086		}
4087		if (mrwanted > FPM_MULTIPLIER * 10)
4088			mrwanted -= FPM_MULTIPLIER * 10;
4089		if (pblewanted > FPM_MULTIPLIER * 1000)
4090			pblewanted -= FPM_MULTIPLIER * 1000;
4091	} while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
4092
4093	i40iw_debug(dev, I40IW_DEBUG_HMC,
4094		    "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
4095		    loop_count, sd_needed,
4096		    hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
4097		    hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
4098		    hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
4099		    hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
4100
4101	ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
4102	if (ret_code) {
4103		i40iw_debug(dev, I40IW_DEBUG_HMC,
4104			    "configure_iw_fpm returned error_code[x%08X]\n",
4105			    i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
4106		return ret_code;
4107	}
4108
4109	mem_size = sizeof(struct i40iw_hmc_sd_entry) *
4110		   (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
4111	ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
4112	if (ret_code) {
4113		i40iw_debug(dev, I40IW_DEBUG_HMC,
4114			    "%s: failed to allocate memory for sd_entry buffer\n",
4115			    __func__);
4116		return ret_code;
4117	}
4118	hmc_info->sd_table.sd_entry = virt_mem.va;
4119
4120	return ret_code;
4121}
4122
4123/**
4124 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
4125 * @dev: rdma device
4126 * @pcmdinfo: cqp command info
4127 */
4128static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
4129						 struct cqp_commands_info *pcmdinfo)
4130{
4131	enum i40iw_status_code status;
4132	struct i40iw_dma_mem values_mem;
4133
4134	dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
4135	switch (pcmdinfo->cqp_cmd) {
4136	case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
4137		status = i40iw_sc_del_local_mac_ipaddr_entry(
4138				pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
4139				pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
4140				pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
4141				pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
4142				pcmdinfo->post_sq);
4143		break;
4144	case OP_CEQ_DESTROY:
4145		status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
4146					      pcmdinfo->in.u.ceq_destroy.scratch,
4147					      pcmdinfo->post_sq);
4148		break;
4149	case OP_AEQ_DESTROY:
4150		status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
4151					      pcmdinfo->in.u.aeq_destroy.scratch,
4152					      pcmdinfo->post_sq);
4153
4154		break;
4155	case OP_DELETE_ARP_CACHE_ENTRY:
4156		status = i40iw_sc_del_arp_cache_entry(
4157				pcmdinfo->in.u.del_arp_cache_entry.cqp,
4158				pcmdinfo->in.u.del_arp_cache_entry.scratch,
4159				pcmdinfo->in.u.del_arp_cache_entry.arp_index,
4160				pcmdinfo->post_sq);
4161		break;
4162	case OP_MANAGE_APBVT_ENTRY:
4163		status = i40iw_sc_manage_apbvt_entry(
4164				pcmdinfo->in.u.manage_apbvt_entry.cqp,
4165				&pcmdinfo->in.u.manage_apbvt_entry.info,
4166				pcmdinfo->in.u.manage_apbvt_entry.scratch,
4167				pcmdinfo->post_sq);
4168		break;
4169	case OP_CEQ_CREATE:
4170		status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
4171					     pcmdinfo->in.u.ceq_create.scratch,
4172					     pcmdinfo->post_sq);
4173		break;
4174	case OP_AEQ_CREATE:
4175		status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
4176					     pcmdinfo->in.u.aeq_create.scratch,
4177					     pcmdinfo->post_sq);
4178		break;
4179	case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
4180		status = i40iw_sc_alloc_local_mac_ipaddr_entry(
4181				pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
4182				pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
4183				pcmdinfo->post_sq);
4184		break;
4185	case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
4186		status = i40iw_sc_add_local_mac_ipaddr_entry(
4187				pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
4188				&pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
4189				pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
4190				pcmdinfo->post_sq);
4191		break;
4192	case OP_MANAGE_QHASH_TABLE_ENTRY:
4193		status = i40iw_sc_manage_qhash_table_entry(
4194				pcmdinfo->in.u.manage_qhash_table_entry.cqp,
4195				&pcmdinfo->in.u.manage_qhash_table_entry.info,
4196				pcmdinfo->in.u.manage_qhash_table_entry.scratch,
4197				pcmdinfo->post_sq);
4198
4199		break;
4200	case OP_QP_MODIFY:
4201		status = i40iw_sc_qp_modify(
4202				pcmdinfo->in.u.qp_modify.qp,
4203				&pcmdinfo->in.u.qp_modify.info,
4204				pcmdinfo->in.u.qp_modify.scratch,
4205				pcmdinfo->post_sq);
4206
4207		break;
4208	case OP_QP_UPLOAD_CONTEXT:
4209		status = i40iw_sc_qp_upload_context(
4210				pcmdinfo->in.u.qp_upload_context.dev,
4211				&pcmdinfo->in.u.qp_upload_context.info,
4212				pcmdinfo->in.u.qp_upload_context.scratch,
4213				pcmdinfo->post_sq);
4214
4215		break;
4216	case OP_CQ_CREATE:
4217		status = i40iw_sc_cq_create(
4218				pcmdinfo->in.u.cq_create.cq,
4219				pcmdinfo->in.u.cq_create.scratch,
4220				pcmdinfo->in.u.cq_create.check_overflow,
4221				pcmdinfo->post_sq);
4222		break;
4223	case OP_CQ_DESTROY:
4224		status = i40iw_sc_cq_destroy(
4225				pcmdinfo->in.u.cq_destroy.cq,
4226				pcmdinfo->in.u.cq_destroy.scratch,
4227				pcmdinfo->post_sq);
4228
4229		break;
4230	case OP_QP_CREATE:
4231		status = i40iw_sc_qp_create(
4232				pcmdinfo->in.u.qp_create.qp,
4233				&pcmdinfo->in.u.qp_create.info,
4234				pcmdinfo->in.u.qp_create.scratch,
4235				pcmdinfo->post_sq);
4236		break;
4237	case OP_QP_DESTROY:
4238		status = i40iw_sc_qp_destroy(
4239				pcmdinfo->in.u.qp_destroy.qp,
4240				pcmdinfo->in.u.qp_destroy.scratch,
4241				pcmdinfo->in.u.qp_destroy.remove_hash_idx,
4242				pcmdinfo->in.u.qp_destroy.
4243				ignore_mw_bnd,
4244				pcmdinfo->post_sq);
4245
4246		break;
4247	case OP_ALLOC_STAG:
4248		status = i40iw_sc_alloc_stag(
4249				pcmdinfo->in.u.alloc_stag.dev,
4250				&pcmdinfo->in.u.alloc_stag.info,
4251				pcmdinfo->in.u.alloc_stag.scratch,
4252				pcmdinfo->post_sq);
4253		break;
4254	case OP_MR_REG_NON_SHARED:
4255		status = i40iw_sc_mr_reg_non_shared(
4256				pcmdinfo->in.u.mr_reg_non_shared.dev,
4257				&pcmdinfo->in.u.mr_reg_non_shared.info,
4258				pcmdinfo->in.u.mr_reg_non_shared.scratch,
4259				pcmdinfo->post_sq);
4260
4261		break;
4262	case OP_DEALLOC_STAG:
4263		status = i40iw_sc_dealloc_stag(
4264				pcmdinfo->in.u.dealloc_stag.dev,
4265				&pcmdinfo->in.u.dealloc_stag.info,
4266				pcmdinfo->in.u.dealloc_stag.scratch,
4267				pcmdinfo->post_sq);
4268
4269		break;
4270	case OP_MW_ALLOC:
4271		status = i40iw_sc_mw_alloc(
4272				pcmdinfo->in.u.mw_alloc.dev,
4273				pcmdinfo->in.u.mw_alloc.scratch,
4274				pcmdinfo->in.u.mw_alloc.mw_stag_index,
4275				pcmdinfo->in.u.mw_alloc.pd_id,
4276				pcmdinfo->post_sq);
4277
4278		break;
4279	case OP_QP_FLUSH_WQES:
4280		status = i40iw_sc_qp_flush_wqes(
4281				pcmdinfo->in.u.qp_flush_wqes.qp,
4282				&pcmdinfo->in.u.qp_flush_wqes.info,
4283				pcmdinfo->in.u.qp_flush_wqes.
4284				scratch, pcmdinfo->post_sq);
4285		break;
4286	case OP_GEN_AE:
4287		status = i40iw_sc_gen_ae(
4288				pcmdinfo->in.u.gen_ae.qp,
4289				&pcmdinfo->in.u.gen_ae.info,
4290				pcmdinfo->in.u.gen_ae.scratch,
4291				pcmdinfo->post_sq);
4292		break;
4293	case OP_ADD_ARP_CACHE_ENTRY:
4294		status = i40iw_sc_add_arp_cache_entry(
4295				pcmdinfo->in.u.add_arp_cache_entry.cqp,
4296				&pcmdinfo->in.u.add_arp_cache_entry.info,
4297				pcmdinfo->in.u.add_arp_cache_entry.scratch,
4298				pcmdinfo->post_sq);
4299		break;
4300	case OP_MANAGE_PUSH_PAGE:
4301		status = i40iw_sc_manage_push_page(
4302				pcmdinfo->in.u.manage_push_page.cqp,
4303				&pcmdinfo->in.u.manage_push_page.info,
4304				pcmdinfo->in.u.manage_push_page.scratch,
4305				pcmdinfo->post_sq);
4306		break;
4307	case OP_UPDATE_PE_SDS:
4308		/* case I40IW_CQP_OP_UPDATE_PE_SDS */
4309		status = i40iw_update_pe_sds(
4310				pcmdinfo->in.u.update_pe_sds.dev,
4311				&pcmdinfo->in.u.update_pe_sds.info,
4312				pcmdinfo->in.u.update_pe_sds.
4313				scratch);
4314
4315		break;
4316	case OP_MANAGE_HMC_PM_FUNC_TABLE:
4317		status = i40iw_sc_manage_hmc_pm_func_table(
4318				pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
4319				pcmdinfo->in.u.manage_hmc_pm.scratch,
4320				(u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
4321				pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
4322				true);
4323		break;
4324	case OP_SUSPEND:
4325		status = i40iw_sc_suspend_qp(
4326				pcmdinfo->in.u.suspend_resume.cqp,
4327				pcmdinfo->in.u.suspend_resume.qp,
4328				pcmdinfo->in.u.suspend_resume.scratch);
4329		break;
4330	case OP_RESUME:
4331		status = i40iw_sc_resume_qp(
4332				pcmdinfo->in.u.suspend_resume.cqp,
4333				pcmdinfo->in.u.suspend_resume.qp,
4334				pcmdinfo->in.u.suspend_resume.scratch);
4335		break;
4336	case OP_MANAGE_VF_PBLE_BP:
4337		status = i40iw_manage_vf_pble_bp(
4338				pcmdinfo->in.u.manage_vf_pble_bp.cqp,
4339				&pcmdinfo->in.u.manage_vf_pble_bp.info,
4340				pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
4341		break;
4342	case OP_QUERY_FPM_VALUES:
4343		values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
4344		values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
4345		status = i40iw_sc_query_fpm_values(
4346				pcmdinfo->in.u.query_fpm_values.cqp,
4347				pcmdinfo->in.u.query_fpm_values.scratch,
4348				pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
4349				&values_mem, true, I40IW_CQP_WAIT_EVENT);
4350		break;
4351	case OP_COMMIT_FPM_VALUES:
4352		values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
4353		values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
4354		status = i40iw_sc_commit_fpm_values(
4355				pcmdinfo->in.u.commit_fpm_values.cqp,
4356				pcmdinfo->in.u.commit_fpm_values.scratch,
4357				pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
4358				&values_mem,
4359				true,
4360				I40IW_CQP_WAIT_EVENT);
4361		break;
4362	case OP_QUERY_RDMA_FEATURES:
4363		values_mem.pa = pcmdinfo->in.u.query_rdma_features.cap_pa;
4364		values_mem.va = pcmdinfo->in.u.query_rdma_features.cap_va;
4365		status = i40iw_sc_query_rdma_features(
4366			pcmdinfo->in.u.query_rdma_features.cqp, &values_mem,
4367			pcmdinfo->in.u.query_rdma_features.scratch);
4368		break;
4369	default:
4370		status = I40IW_NOT_SUPPORTED;
4371		break;
4372	}
4373
4374	return status;
4375}
4376
4377/**
4378 * i40iw_process_cqp_cmd - process all cqp commands
4379 * @dev: sc device struct
4380 * @pcmdinfo: cqp command info
4381 */
4382enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
4383					     struct cqp_commands_info *pcmdinfo)
4384{
4385	enum i40iw_status_code status = 0;
4386	unsigned long flags;
4387
4388	spin_lock_irqsave(&dev->cqp_lock, flags);
4389	if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
4390		status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4391	else
4392		list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
4393	spin_unlock_irqrestore(&dev->cqp_lock, flags);
4394	return status;
4395}
4396
4397/**
4398 * i40iw_process_bh - called from tasklet for cqp list
4399 * @dev: sc device struct
4400 */
4401enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
4402{
4403	enum i40iw_status_code status = 0;
4404	struct cqp_commands_info *pcmdinfo;
4405	unsigned long flags;
4406
4407	spin_lock_irqsave(&dev->cqp_lock, flags);
4408	while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
4409		pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
4410
4411		status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4412		if (status)
4413			break;
4414	}
4415	spin_unlock_irqrestore(&dev->cqp_lock, flags);
4416	return status;
4417}
4418
4419/**
4420 * i40iw_iwarp_opcode - determine if incoming is rdma layer
4421 * @info: aeq info for the packet
4422 * @pkt: packet for error
4423 */
4424static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
4425{
4426	__be16 *mpa;
4427	u32 opcode = 0xffffffff;
4428
4429	if (info->q2_data_written) {
4430		mpa = (__be16 *)pkt;
4431		opcode = ntohs(mpa[1]) & 0xf;
4432	}
4433	return opcode;
4434}
4435
4436/**
4437 * i40iw_locate_mpa - return pointer to mpa in the pkt
4438 * @pkt: packet with data
4439 */
4440static u8 *i40iw_locate_mpa(u8 *pkt)
4441{
4442	/* skip over ethernet header */
4443	pkt += I40IW_MAC_HLEN;
4444
4445	/* Skip over IP and TCP headers */
4446	pkt += 4 * (pkt[0] & 0x0f);
4447	pkt += 4 * ((pkt[12] >> 4) & 0x0f);
4448	return pkt;
4449}
4450
4451/**
4452 * i40iw_setup_termhdr - termhdr for terminate pkt
4453 * @qp: sc qp ptr for pkt
4454 * @hdr: term hdr
4455 * @opcode: flush opcode for termhdr
4456 * @layer_etype: error layer + error type
4457 * @err: error cod ein the header
4458 */
4459static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
4460				struct i40iw_terminate_hdr *hdr,
4461				enum i40iw_flush_opcode opcode,
4462				u8 layer_etype,
4463				u8 err)
4464{
4465	qp->flush_code = opcode;
4466	hdr->layer_etype = layer_etype;
4467	hdr->error_code = err;
4468}
4469
4470/**
4471 * i40iw_bld_terminate_hdr - build terminate message header
4472 * @qp: qp associated with received terminate AE
4473 * @info: the struct contiaing AE information
4474 */
4475static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
4476				   struct i40iw_aeqe_info *info)
4477{
4478	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4479	u16 ddp_seg_len;
4480	int copy_len = 0;
4481	u8 is_tagged = 0;
4482	u32 opcode;
4483	struct i40iw_terminate_hdr *termhdr;
4484
4485	termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
4486	memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
4487
4488	if (info->q2_data_written) {
4489		/* Use data from offending packet to fill in ddp & rdma hdrs */
4490		pkt = i40iw_locate_mpa(pkt);
4491		ddp_seg_len = ntohs(*(__be16 *)pkt);
4492		if (ddp_seg_len) {
4493			copy_len = 2;
4494			termhdr->hdrct = DDP_LEN_FLAG;
4495			if (pkt[2] & 0x80) {
4496				is_tagged = 1;
4497				if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
4498					copy_len += TERM_DDP_LEN_TAGGED;
4499					termhdr->hdrct |= DDP_HDR_FLAG;
4500				}
4501			} else {
4502				if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
4503					copy_len += TERM_DDP_LEN_UNTAGGED;
4504					termhdr->hdrct |= DDP_HDR_FLAG;
4505				}
4506
4507				if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
4508					if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
4509						copy_len += TERM_RDMA_LEN;
4510						termhdr->hdrct |= RDMA_HDR_FLAG;
4511					}
4512				}
4513			}
4514		}
4515	}
4516
4517	opcode = i40iw_iwarp_opcode(info, pkt);
4518
4519	switch (info->ae_id) {
4520	case I40IW_AE_AMP_UNALLOCATED_STAG:
4521		qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4522		if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
4523			i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4524					    (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
4525		else
4526			i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4527					    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4528		break;
4529	case I40IW_AE_AMP_BOUNDS_VIOLATION:
4530		qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4531		if (info->q2_data_written)
4532			i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4533					    (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
4534		else
4535			i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4536					    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
4537		break;
4538	case I40IW_AE_AMP_BAD_PD:
4539		switch (opcode) {
4540		case I40IW_OP_TYPE_RDMA_WRITE:
4541			i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4542					    (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
4543			break;
4544		case I40IW_OP_TYPE_SEND_INV:
4545		case I40IW_OP_TYPE_SEND_SOL_INV:
4546			i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4547					    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
4548			break;
4549		default:
4550			i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4551					    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
4552		}
4553		break;
4554	case I40IW_AE_AMP_INVALID_STAG:
4555		qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4556		i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4557				    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4558		break;
4559	case I40IW_AE_AMP_BAD_QP:
4560		i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4561				    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4562		break;
4563	case I40IW_AE_AMP_BAD_STAG_KEY:
4564	case I40IW_AE_AMP_BAD_STAG_INDEX:
4565		qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4566		switch (opcode) {
4567		case I40IW_OP_TYPE_SEND_INV:
4568		case I40IW_OP_TYPE_SEND_SOL_INV:
4569			i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4570					    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
4571			break;
4572		default:
4573			i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4574					    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
4575		}
4576		break;
4577	case I40IW_AE_AMP_RIGHTS_VIOLATION:
4578	case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
4579	case I40IW_AE_PRIV_OPERATION_DENIED:
4580		qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4581		i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4582				    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
4583		break;
4584	case I40IW_AE_AMP_TO_WRAP:
4585		qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4586		i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4587				    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
4588		break;
4589	case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4590		i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4591				    (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
4592		break;
4593	case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
4594	case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
4595		i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4596				    (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4597		break;
4598	case I40IW_AE_LCE_QP_CATASTROPHIC:
4599	case I40IW_AE_DDP_NO_L_BIT:
4600		i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4601				    (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4602		break;
4603	case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
4604		i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4605				    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
4606		break;
4607	case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
4608		qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4609		i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4610				    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
4611		break;
4612	case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
4613		if (is_tagged)
4614			i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4615					    (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
4616		else
4617			i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4618					    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
4619		break;
4620	case I40IW_AE_DDP_UBE_INVALID_MO:
4621		i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4622				    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
4623		break;
4624	case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
4625		i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4626				    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
4627		break;
4628	case I40IW_AE_DDP_UBE_INVALID_QN:
4629		i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4630				    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4631		break;
4632	case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4633		i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4634				    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
4635		break;
4636	case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4637		i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4638				    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
4639		break;
4640	default:
4641		i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4642				    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
4643		break;
4644	}
4645
4646	if (copy_len)
4647		memcpy(termhdr + 1, pkt, copy_len);
4648
4649	return sizeof(struct i40iw_terminate_hdr) + copy_len;
4650}
4651
4652/**
4653 * i40iw_terminate_send_fin() - Send fin for terminate message
4654 * @qp: qp associated with received terminate AE
4655 */
4656void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
4657{
4658	/* Send the fin only */
4659	i40iw_term_modify_qp(qp,
4660			     I40IW_QP_STATE_TERMINATE,
4661			     I40IWQP_TERM_SEND_FIN_ONLY,
4662			     0);
4663}
4664
4665/**
4666 * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4667 * @qp: qp associated with received terminate AE
4668 * @info: the struct contiaing AE information
4669 */
4670void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4671{
4672	u8 termlen = 0;
4673
4674	if (qp->term_flags & I40IW_TERM_SENT)
4675		return;         /* Sanity check */
4676
4677	/* Eventtype can change from bld_terminate_hdr */
4678	qp->eventtype = TERM_EVENT_QP_FATAL;
4679	termlen = i40iw_bld_terminate_hdr(qp, info);
4680	i40iw_terminate_start_timer(qp);
4681	qp->term_flags |= I40IW_TERM_SENT;
4682	i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
4683			     I40IWQP_TERM_SEND_TERM_ONLY, termlen);
4684}
4685
4686/**
4687 * i40iw_terminate_received - handle terminate received AE
4688 * @qp: qp associated with received terminate AE
4689 * @info: the struct contiaing AE information
4690 */
4691void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4692{
4693	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4694	__be32 *mpa;
4695	u8 ddp_ctl;
4696	u8 rdma_ctl;
4697	u16 aeq_id = 0;
4698	struct i40iw_terminate_hdr *termhdr;
4699
4700	mpa = (__be32 *)i40iw_locate_mpa(pkt);
4701	if (info->q2_data_written) {
4702		/* did not validate the frame - do it now */
4703		ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
4704		rdma_ctl = ntohl(mpa[0]) & 0xff;
4705		if ((ddp_ctl & 0xc0) != 0x40)
4706			aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
4707		else if ((ddp_ctl & 0x03) != 1)
4708			aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
4709		else if (ntohl(mpa[2]) != 2)
4710			aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
4711		else if (ntohl(mpa[3]) != 1)
4712			aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
4713		else if (ntohl(mpa[4]) != 0)
4714			aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
4715		else if ((rdma_ctl & 0xc0) != 0x40)
4716			aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
4717
4718		info->ae_id = aeq_id;
4719		if (info->ae_id) {
4720			/* Bad terminate recvd - send back a terminate */
4721			i40iw_terminate_connection(qp, info);
4722			return;
4723		}
4724	}
4725
4726	qp->term_flags |= I40IW_TERM_RCVD;
4727	qp->eventtype = TERM_EVENT_QP_FATAL;
4728	termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
4729	if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
4730	    termhdr->layer_etype == RDMAP_REMOTE_OP) {
4731		i40iw_terminate_done(qp, 0);
4732	} else {
4733		i40iw_terminate_start_timer(qp);
4734		i40iw_terminate_send_fin(qp);
4735	}
4736}
4737
4738/**
4739 * i40iw_sc_vsi_init - Initialize virtual device
4740 * @vsi: pointer to the vsi structure
4741 * @info: parameters to initialize vsi
4742 **/
4743void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
4744{
4745	int i;
4746
4747	vsi->dev = info->dev;
4748	vsi->back_vsi = info->back_vsi;
4749	vsi->mtu = info->params->mtu;
4750	vsi->exception_lan_queue = info->exception_lan_queue;
4751	i40iw_fill_qos_list(info->params->qs_handle_list);
4752
4753	for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
4754		vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
4755		i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
4756			    vsi->qos[i].qs_handle);
4757		spin_lock_init(&vsi->qos[i].lock);
4758		INIT_LIST_HEAD(&vsi->qos[i].qplist);
4759	}
4760}
4761
4762/**
4763 * i40iw_hw_stats_init - Initiliaze HW stats table
4764 * @stats: pestat struct
4765 * @fcn_idx: PCI fn id
4766 * @is_pf: Is it a PF?
4767 *
4768 * Populate the HW stats table with register offset addr for each
4769 * stats. And start the perioidic stats timer.
4770 */
4771void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
4772{
4773	u32 stats_reg_offset;
4774	u32 stats_index;
4775	struct i40iw_dev_hw_stats_offsets *stats_table =
4776		&stats->hw_stats_offsets;
4777	struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4778
4779	if (is_pf) {
4780		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4781				I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
4782		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4783				I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
4784		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4785				I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
4786		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4787				I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
4788		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4789				I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
4790		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4791				I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
4792		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4793				I40E_GLPES_PFTCPRTXSEG(fcn_idx);
4794		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4795				I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
4796		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4797				I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
4798
4799		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4800				I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
4801		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4802				I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
4803		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4804				I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
4805		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4806				I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
4807		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4808				I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
4809		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4810				I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
4811		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4812				I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
4813		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4814				I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
4815		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4816				I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
4817		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4818				I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
4819		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4820				I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
4821		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4822				I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
4823		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4824				I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
4825		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4826				I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4827		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4828				I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4829		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4830				I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
4831		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4832				I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
4833		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4834				I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
4835		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4836				I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
4837		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4838				I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
4839		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4840				I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
4841		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4842				I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
4843		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4844				I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
4845		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4846				I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
4847		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4848				I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
4849		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4850				I40E_GLPES_PFRDMAVINVLO(fcn_idx);
4851	} else {
4852		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4853				I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
4854		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4855				I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
4856		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4857				I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
4858		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4859				I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
4860		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4861				I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
4862		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4863				I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
4864		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4865				I40E_GLPES_VFTCPRTXSEG(fcn_idx);
4866		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4867				I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
4868		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4869				I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
4870
4871		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4872				I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
4873		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4874				I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
4875		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4876				I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
4877		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4878				I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
4879		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4880				I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
4881		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4882				I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
4883		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4884				I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
4885		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4886				I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
4887		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4888				I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
4889		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4890				I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
4891		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4892				I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
4893		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4894				I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
4895		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4896				I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
4897		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4898				I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4899		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4900				I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4901		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4902				I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
4903		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4904				I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
4905		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4906				I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
4907		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4908				I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
4909		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4910				I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
4911		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4912				I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
4913		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4914				I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
4915		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4916				I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
4917		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4918				I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
4919		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4920				I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
4921		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4922				I40E_GLPES_VFRDMAVINVLO(fcn_idx);
4923	}
4924
4925	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4926	     stats_index++) {
4927		stats_reg_offset = stats_table->stats_offset_64[stats_index];
4928		last_rd_stats->stats_value_64[stats_index] =
4929			readq(stats->hw->hw_addr + stats_reg_offset);
4930	}
4931
4932	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4933	     stats_index++) {
4934		stats_reg_offset = stats_table->stats_offset_32[stats_index];
4935		last_rd_stats->stats_value_32[stats_index] =
4936			i40iw_rd32(stats->hw, stats_reg_offset);
4937	}
4938}
4939
4940/**
4941 * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
4942 * @stat: pestat struct
4943 * @index: index in HW stats table which contains offset reg-addr
4944 * @value: hw stats value
4945 */
4946void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
4947			    enum i40iw_hw_stats_index_32b index,
4948			    u64 *value)
4949{
4950	struct i40iw_dev_hw_stats_offsets *stats_table =
4951		&stats->hw_stats_offsets;
4952	struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4953	struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4954	u64 new_stats_value = 0;
4955	u32 stats_reg_offset = stats_table->stats_offset_32[index];
4956
4957	new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
4958	/*roll-over case */
4959	if (new_stats_value < last_rd_stats->stats_value_32[index])
4960		hw_stats->stats_value_32[index] += new_stats_value;
4961	else
4962		hw_stats->stats_value_32[index] +=
4963			new_stats_value - last_rd_stats->stats_value_32[index];
4964	last_rd_stats->stats_value_32[index] = new_stats_value;
4965	*value = hw_stats->stats_value_32[index];
4966}
4967
4968/**
4969 * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
4970 * @stats: pestat struct
4971 * @index: index in HW stats table which contains offset reg-addr
4972 * @value: hw stats value
4973 */
4974void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
4975			    enum i40iw_hw_stats_index_64b index,
4976			    u64 *value)
4977{
4978	struct i40iw_dev_hw_stats_offsets *stats_table =
4979		&stats->hw_stats_offsets;
4980	struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4981	struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4982	u64 new_stats_value = 0;
4983	u32 stats_reg_offset = stats_table->stats_offset_64[index];
4984
4985	new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
4986	/*roll-over case */
4987	if (new_stats_value < last_rd_stats->stats_value_64[index])
4988		hw_stats->stats_value_64[index] += new_stats_value;
4989	else
4990		hw_stats->stats_value_64[index] +=
4991			new_stats_value - last_rd_stats->stats_value_64[index];
4992	last_rd_stats->stats_value_64[index] = new_stats_value;
4993	*value = hw_stats->stats_value_64[index];
4994}
4995
4996/**
4997 * i40iw_hw_stats_read_all - read all HW stat counters
4998 * @stats: pestat struct
4999 * @stats_values: hw stats structure
5000 *
5001 * Read all the HW stat counters and populates hw_stats structure
5002 * of passed-in vsi's pestat as well as copy created in stat_values.
5003 */
5004void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
5005			     struct i40iw_dev_hw_stats *stats_values)
5006{
5007	u32 stats_index;
5008	unsigned long flags;
5009
5010	spin_lock_irqsave(&stats->lock, flags);
5011
5012	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
5013	     stats_index++)
5014		i40iw_hw_stats_read_32(stats, stats_index,
5015				       &stats_values->stats_value_32[stats_index]);
5016	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
5017	     stats_index++)
5018		i40iw_hw_stats_read_64(stats, stats_index,
5019				       &stats_values->stats_value_64[stats_index]);
5020	spin_unlock_irqrestore(&stats->lock, flags);
5021}
5022
5023/**
5024 * i40iw_hw_stats_refresh_all - Update all HW stats structs
5025 * @stats: pestat struct
5026 *
5027 * Read all the HW stats counters to refresh values in hw_stats structure
5028 * of passed-in dev's pestat
5029 */
5030void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
5031{
5032	u64 stats_value;
5033	u32 stats_index;
5034	unsigned long flags;
5035
5036	spin_lock_irqsave(&stats->lock, flags);
5037
5038	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
5039	     stats_index++)
5040		i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
5041	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
5042	     stats_index++)
5043		i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
5044	spin_unlock_irqrestore(&stats->lock, flags);
5045}
5046
5047/**
5048 * i40iw_get_fcn_id - Return the function id
5049 * @dev: pointer to the device
5050 */
5051static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
5052{
5053	u8 fcn_id = I40IW_INVALID_FCN_ID;
5054	u8 i;
5055
5056	for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
5057		if (!dev->fcn_id_array[i]) {
5058			fcn_id = i;
5059			dev->fcn_id_array[i] = true;
5060			break;
5061		}
5062	return fcn_id;
5063}
5064
5065/**
5066 * i40iw_vsi_stats_init - Initialize the vsi statistics
5067 * @vsi: pointer to the vsi structure
5068 * @info: The info structure used for initialization
5069 */
5070enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
5071{
5072	u8 fcn_id = info->fcn_id;
5073
5074	if (info->alloc_fcn_id)
5075		fcn_id = i40iw_get_fcn_id(vsi->dev);
5076
5077	if (fcn_id == I40IW_INVALID_FCN_ID)
5078		return I40IW_ERR_NOT_READY;
5079
5080	vsi->pestat = info->pestat;
5081	vsi->pestat->hw = vsi->dev->hw;
5082	vsi->pestat->vsi = vsi;
5083
5084	if (info->stats_initialize) {
5085		i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
5086		spin_lock_init(&vsi->pestat->lock);
5087		i40iw_hw_stats_start_timer(vsi);
5088	}
5089	vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
5090	vsi->fcn_id = fcn_id;
5091	return I40IW_SUCCESS;
5092}
5093
5094/**
5095 * i40iw_vsi_stats_free - Free the vsi stats
5096 * @vsi: pointer to the vsi structure
5097 */
5098void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
5099{
5100	u8 fcn_id = vsi->fcn_id;
5101
5102	if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
5103		vsi->dev->fcn_id_array[fcn_id] = false;
5104	i40iw_hw_stats_stop_timer(vsi);
5105}
5106
5107static struct i40iw_cqp_ops iw_cqp_ops = {
5108	.cqp_init = i40iw_sc_cqp_init,
5109	.cqp_create = i40iw_sc_cqp_create,
5110	.cqp_post_sq = i40iw_sc_cqp_post_sq,
5111	.cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
5112	.cqp_destroy = i40iw_sc_cqp_destroy,
5113	.poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
5114};
5115
5116static struct i40iw_ccq_ops iw_ccq_ops = {
5117	.ccq_init = i40iw_sc_ccq_init,
5118	.ccq_create = i40iw_sc_ccq_create,
5119	.ccq_destroy = i40iw_sc_ccq_destroy,
5120	.ccq_create_done = i40iw_sc_ccq_create_done,
5121	.ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
5122	.ccq_arm = i40iw_sc_ccq_arm
5123};
5124
5125static struct i40iw_ceq_ops iw_ceq_ops = {
5126	.ceq_init = i40iw_sc_ceq_init,
5127	.ceq_create = i40iw_sc_ceq_create,
5128	.cceq_create_done = i40iw_sc_cceq_create_done,
5129	.cceq_destroy_done = i40iw_sc_cceq_destroy_done,
5130	.cceq_create = i40iw_sc_cceq_create,
5131	.ceq_destroy = i40iw_sc_ceq_destroy,
5132	.process_ceq = i40iw_sc_process_ceq
5133};
5134
5135static struct i40iw_aeq_ops iw_aeq_ops = {
5136	.aeq_init = i40iw_sc_aeq_init,
5137	.aeq_create = i40iw_sc_aeq_create,
5138	.aeq_destroy = i40iw_sc_aeq_destroy,
5139	.get_next_aeqe = i40iw_sc_get_next_aeqe,
5140	.repost_aeq_entries = i40iw_sc_repost_aeq_entries,
5141	.aeq_create_done = i40iw_sc_aeq_create_done,
5142	.aeq_destroy_done = i40iw_sc_aeq_destroy_done
5143};
5144
5145/* iwarp pd ops */
5146static struct i40iw_pd_ops iw_pd_ops = {
5147	.pd_init = i40iw_sc_pd_init,
5148};
5149
5150static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
5151	.qp_init = i40iw_sc_qp_init,
5152	.qp_create = i40iw_sc_qp_create,
5153	.qp_modify = i40iw_sc_qp_modify,
5154	.qp_destroy = i40iw_sc_qp_destroy,
5155	.qp_flush_wqes = i40iw_sc_qp_flush_wqes,
5156	.qp_upload_context = i40iw_sc_qp_upload_context,
5157	.qp_setctx = i40iw_sc_qp_setctx,
5158	.qp_send_lsmm = i40iw_sc_send_lsmm,
5159	.qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
5160	.qp_send_rtt = i40iw_sc_send_rtt,
5161	.qp_post_wqe0 = i40iw_sc_post_wqe0,
5162	.iw_mr_fast_register = i40iw_sc_mr_fast_register
5163};
5164
5165static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
5166	.cq_init = i40iw_sc_cq_init,
5167	.cq_create = i40iw_sc_cq_create,
5168	.cq_destroy = i40iw_sc_cq_destroy,
5169	.cq_modify = i40iw_sc_cq_modify,
5170};
5171
5172static struct i40iw_mr_ops iw_mr_ops = {
5173	.alloc_stag = i40iw_sc_alloc_stag,
5174	.mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
5175	.mr_reg_shared = i40iw_sc_mr_reg_shared,
5176	.dealloc_stag = i40iw_sc_dealloc_stag,
5177	.query_stag = i40iw_sc_query_stag,
5178	.mw_alloc = i40iw_sc_mw_alloc
5179};
5180
5181static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
5182	.manage_push_page = i40iw_sc_manage_push_page,
5183	.manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
5184	.set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
5185	.commit_fpm_values = i40iw_sc_commit_fpm_values,
5186	.query_fpm_values = i40iw_sc_query_fpm_values,
5187	.static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
5188	.add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
5189	.del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
5190	.query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
5191	.manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
5192	.manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
5193	.alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
5194	.add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
5195	.del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
5196	.cqp_nop = i40iw_sc_cqp_nop,
5197	.commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
5198	.query_fpm_values_done = i40iw_sc_query_fpm_values_done,
5199	.manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
5200	.update_suspend_qp = i40iw_sc_suspend_qp,
5201	.update_resume_qp = i40iw_sc_resume_qp
5202};
5203
5204static struct i40iw_hmc_ops iw_hmc_ops = {
5205	.init_iw_hmc = i40iw_sc_init_iw_hmc,
5206	.parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
5207	.configure_iw_fpm = i40iw_sc_configure_iw_fpm,
5208	.parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
5209	.create_hmc_object = i40iw_sc_create_hmc_obj,
5210	.del_hmc_object = i40iw_sc_del_hmc_obj
5211};
5212
5213/**
5214 * i40iw_device_init - Initialize IWARP device
5215 * @dev: IWARP device pointer
5216 * @info: IWARP init info
5217 */
5218enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
5219					 struct i40iw_device_init_info *info)
5220{
5221	u32 val;
5222	u32 vchnl_ver = 0;
5223	u16 hmc_fcn = 0;
5224	enum i40iw_status_code ret_code = 0;
5225	u8 db_size;
5226
5227	spin_lock_init(&dev->cqp_lock);
5228
5229	i40iw_device_init_uk(&dev->dev_uk);
5230
5231	dev->debug_mask = info->debug_mask;
5232
5233	dev->hmc_fn_id = info->hmc_fn_id;
5234	dev->is_pf = info->is_pf;
5235
5236	dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5237	dev->fpm_query_buf = info->fpm_query_buf;
5238
5239	dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5240	dev->fpm_commit_buf = info->fpm_commit_buf;
5241
5242	dev->hw = info->hw;
5243	dev->hw->hw_addr = info->bar0;
5244
5245	if (dev->is_pf) {
5246		val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
5247		dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
5248
5249		val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
5250		db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
5251		if ((db_size != I40IW_PE_DB_SIZE_4M) &&
5252		    (db_size != I40IW_PE_DB_SIZE_8M)) {
5253			i40iw_debug(dev, I40IW_DEBUG_DEV,
5254				    "%s: PE doorbell is not enabled in CSR val 0x%x\n",
5255				    __func__, val);
5256			ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
5257			return ret_code;
5258		}
5259		dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
5260		dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
5261	} else {
5262		dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
5263	}
5264
5265	dev->cqp_ops = &iw_cqp_ops;
5266	dev->ccq_ops = &iw_ccq_ops;
5267	dev->ceq_ops = &iw_ceq_ops;
5268	dev->aeq_ops = &iw_aeq_ops;
5269	dev->cqp_misc_ops = &iw_cqp_misc_ops;
5270	dev->iw_pd_ops = &iw_pd_ops;
5271	dev->iw_priv_qp_ops = &iw_priv_qp_ops;
5272	dev->iw_priv_cq_ops = &iw_priv_cq_ops;
5273	dev->mr_ops = &iw_mr_ops;
5274	dev->hmc_ops = &iw_hmc_ops;
5275	dev->vchnl_if.vchnl_send = info->vchnl_send;
5276	if (dev->vchnl_if.vchnl_send)
5277		dev->vchnl_up = true;
5278	else
5279		dev->vchnl_up = false;
5280	if (!dev->is_pf) {
5281		dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
5282		ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
5283		if (!ret_code) {
5284			i40iw_debug(dev, I40IW_DEBUG_DEV,
5285				    "%s: Get Channel version rc = 0x%0x, version is %u\n",
5286				__func__, ret_code, vchnl_ver);
5287			ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
5288			if (!ret_code) {
5289				i40iw_debug(dev, I40IW_DEBUG_DEV,
5290					    "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
5291					    __func__, ret_code, hmc_fcn);
5292				dev->hmc_fn_id = (u8)hmc_fcn;
5293			}
5294		}
5295	}
5296	dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
5297
5298	return ret_code;
5299}
5300