1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
21 			    int type, int chan_id);
22 
23 enum mc_tbl_sz {
24 	MC_TBL_SZ_256,
25 	MC_TBL_SZ_512,
26 	MC_TBL_SZ_1K,
27 	MC_TBL_SZ_2K,
28 	MC_TBL_SZ_4K,
29 	MC_TBL_SZ_8K,
30 	MC_TBL_SZ_16K,
31 	MC_TBL_SZ_32K,
32 	MC_TBL_SZ_64K,
33 };
34 
35 enum mc_buf_cnt {
36 	MC_BUF_CNT_8,
37 	MC_BUF_CNT_16,
38 	MC_BUF_CNT_32,
39 	MC_BUF_CNT_64,
40 	MC_BUF_CNT_128,
41 	MC_BUF_CNT_256,
42 	MC_BUF_CNT_512,
43 	MC_BUF_CNT_1024,
44 	MC_BUF_CNT_2048,
45 };
46 
47 enum nix_makr_fmt_indexes {
48 	NIX_MARK_CFG_IP_DSCP_RED,
49 	NIX_MARK_CFG_IP_DSCP_YELLOW,
50 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
51 	NIX_MARK_CFG_IP_ECN_RED,
52 	NIX_MARK_CFG_IP_ECN_YELLOW,
53 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
54 	NIX_MARK_CFG_VLAN_DEI_RED,
55 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
56 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
57 	NIX_MARK_CFG_MAX,
58 };
59 
60 /* For now considering MC resources needed for broadcast
61  * pkt replication only. i.e 256 HWVFs + 12 PFs.
62  */
63 #define MC_TBL_SIZE	MC_TBL_SZ_512
64 #define MC_BUF_CNT	MC_BUF_CNT_128
65 
66 struct mce {
67 	struct hlist_node	node;
68 	u16			pcifunc;
69 };
70 
is_nixlf_attached(struct rvu *rvu, u16 pcifunc)71 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
72 {
73 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
74 	int blkaddr;
75 
76 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
77 	if (!pfvf->nixlf || blkaddr < 0)
78 		return false;
79 	return true;
80 }
81 
rvu_get_nixlf_count(struct rvu *rvu)82 int rvu_get_nixlf_count(struct rvu *rvu)
83 {
84 	struct rvu_block *block;
85 	int blkaddr;
86 
87 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
88 	if (blkaddr < 0)
89 		return 0;
90 	block = &rvu->hw->block[blkaddr];
91 	return block->lf.max;
92 }
93 
nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)94 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
95 {
96 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
97 	struct rvu_hwinfo *hw = rvu->hw;
98 	int blkaddr;
99 
100 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
101 	if (!pfvf->nixlf || blkaddr < 0)
102 		return NIX_AF_ERR_AF_LF_INVALID;
103 
104 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
105 	if (*nixlf < 0)
106 		return NIX_AF_ERR_AF_LF_INVALID;
107 
108 	if (nix_blkaddr)
109 		*nix_blkaddr = blkaddr;
110 
111 	return 0;
112 }
113 
nix_mce_list_init(struct nix_mce_list *list, int max)114 static void nix_mce_list_init(struct nix_mce_list *list, int max)
115 {
116 	INIT_HLIST_HEAD(&list->head);
117 	list->count = 0;
118 	list->max = max;
119 }
120 
nix_alloc_mce_list(struct nix_mcast *mcast, int count)121 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
122 {
123 	int idx;
124 
125 	if (!mcast)
126 		return 0;
127 
128 	idx = mcast->next_free_mce;
129 	mcast->next_free_mce += count;
130 	return idx;
131 }
132 
get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)133 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
134 {
135 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
136 		return hw->nix0;
137 
138 	return NULL;
139 }
140 
nix_rx_sync(struct rvu *rvu, int blkaddr)141 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
142 {
143 	int err;
144 
145 	/*Sync all in flight RX packets to LLC/DRAM */
146 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
147 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
148 	if (err)
149 		dev_err(rvu->dev, "NIX RX software sync failed\n");
150 }
151 
is_valid_txschq(struct rvu *rvu, int blkaddr, int lvl, u16 pcifunc, u16 schq)152 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
153 			    int lvl, u16 pcifunc, u16 schq)
154 {
155 	struct rvu_hwinfo *hw = rvu->hw;
156 	struct nix_txsch *txsch;
157 	struct nix_hw *nix_hw;
158 	u16 map_func;
159 
160 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
161 	if (!nix_hw)
162 		return false;
163 
164 	txsch = &nix_hw->txsch[lvl];
165 	/* Check out of bounds */
166 	if (schq >= txsch->schq.max)
167 		return false;
168 
169 	mutex_lock(&rvu->rsrc_lock);
170 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
171 	mutex_unlock(&rvu->rsrc_lock);
172 
173 	/* TLs aggegating traffic are shared across PF and VFs */
174 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
175 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
176 			return false;
177 		else
178 			return true;
179 	}
180 
181 	if (map_func != pcifunc)
182 		return false;
183 
184 	return true;
185 }
186 
nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)187 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
188 {
189 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
190 	int pkind, pf, vf, lbkid;
191 	u8 cgx_id, lmac_id;
192 	int err;
193 
194 	pf = rvu_get_pf(pcifunc);
195 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
196 		return 0;
197 
198 	switch (type) {
199 	case NIX_INTF_TYPE_CGX:
200 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
201 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
202 
203 		pkind = rvu_npc_get_pkind(rvu, pf);
204 		if (pkind < 0) {
205 			dev_err(rvu->dev,
206 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
207 			return -EINVAL;
208 		}
209 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
210 		pfvf->tx_chan_base = pfvf->rx_chan_base;
211 		pfvf->rx_chan_cnt = 1;
212 		pfvf->tx_chan_cnt = 1;
213 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
214 		rvu_npc_set_pkind(rvu, pkind, pfvf);
215 
216 		/* By default we enable pause frames */
217 		if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
218 			cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
219 					       lmac_id, true, true);
220 		break;
221 	case NIX_INTF_TYPE_LBK:
222 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
223 
224 		/* If NIX1 block is present on the silicon then NIXes are
225 		 * assigned alternatively for lbk interfaces. NIX0 should
226 		 * send packets on lbk link 1 channels and NIX1 should send
227 		 * on lbk link 0 channels for the communication between
228 		 * NIX0 and NIX1.
229 		 */
230 		lbkid = 0;
231 		if (rvu->hw->lbk_links > 1)
232 			lbkid = vf & 0x1 ? 0 : 1;
233 
234 		/* Note that AF's VFs work in pairs and talk over consecutive
235 		 * loopback channels.Therefore if odd number of AF VFs are
236 		 * enabled then the last VF remains with no pair.
237 		 */
238 		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
239 		pfvf->tx_chan_base = vf & 0x1 ?
240 					NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
241 					NIX_CHAN_LBK_CHX(lbkid, vf + 1);
242 		pfvf->rx_chan_cnt = 1;
243 		pfvf->tx_chan_cnt = 1;
244 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
245 					      pfvf->rx_chan_base, false);
246 		break;
247 	}
248 
249 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
250 	 * RVU PF/VF's MAC address.
251 	 */
252 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
253 				    pfvf->rx_chan_base, pfvf->mac_addr);
254 
255 	/* Add this PF_FUNC to bcast pkt replication list */
256 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
257 	if (err) {
258 		dev_err(rvu->dev,
259 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
260 			pcifunc);
261 		return err;
262 	}
263 
264 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
265 					  nixlf, pfvf->rx_chan_base);
266 	pfvf->maxlen = NIC_HW_MIN_FRS;
267 	pfvf->minlen = NIC_HW_MIN_FRS;
268 
269 	return 0;
270 }
271 
nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)272 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
273 {
274 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
275 	int err;
276 
277 	pfvf->maxlen = 0;
278 	pfvf->minlen = 0;
279 	pfvf->rxvlan = false;
280 
281 	/* Remove this PF_FUNC from bcast pkt replication list */
282 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
283 	if (err) {
284 		dev_err(rvu->dev,
285 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
286 			pcifunc);
287 	}
288 
289 	/* Free and disable any MCAM entries used by this NIX LF */
290 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
291 }
292 
rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, struct nix_bp_cfg_req *req, struct msg_rsp *rsp)293 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
294 				    struct nix_bp_cfg_req *req,
295 				    struct msg_rsp *rsp)
296 {
297 	u16 pcifunc = req->hdr.pcifunc;
298 	struct rvu_pfvf *pfvf;
299 	int blkaddr, pf, type;
300 	u16 chan_base, chan;
301 	u64 cfg;
302 
303 	pf = rvu_get_pf(pcifunc);
304 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
305 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
306 		return 0;
307 
308 	pfvf = rvu_get_pfvf(rvu, pcifunc);
309 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
310 
311 	chan_base = pfvf->rx_chan_base + req->chan_base;
312 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
313 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
314 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
315 			    cfg & ~BIT_ULL(16));
316 	}
317 	return 0;
318 }
319 
rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id)320 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
321 			    int type, int chan_id)
322 {
323 	int bpid, blkaddr, lmac_chan_cnt;
324 	struct rvu_hwinfo *hw = rvu->hw;
325 	u16 cgx_bpid_cnt, lbk_bpid_cnt;
326 	struct rvu_pfvf *pfvf;
327 	u8 cgx_id, lmac_id;
328 	u64 cfg;
329 
330 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
331 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
332 	lmac_chan_cnt = cfg & 0xFF;
333 
334 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
335 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
336 
337 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
338 
339 	/* Backpressure IDs range division
340 	 * CGX channles are mapped to (0 - 191) BPIDs
341 	 * LBK channles are mapped to (192 - 255) BPIDs
342 	 * SDP channles are mapped to (256 - 511) BPIDs
343 	 *
344 	 * Lmac channles and bpids mapped as follows
345 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
346 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
347 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
348 	 */
349 	switch (type) {
350 	case NIX_INTF_TYPE_CGX:
351 		if ((req->chan_base + req->chan_cnt) > 15)
352 			return -EINVAL;
353 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
354 		/* Assign bpid based on cgx, lmac and chan id */
355 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
356 			(lmac_id * lmac_chan_cnt) + req->chan_base;
357 
358 		if (req->bpid_per_chan)
359 			bpid += chan_id;
360 		if (bpid > cgx_bpid_cnt)
361 			return -EINVAL;
362 		break;
363 
364 	case NIX_INTF_TYPE_LBK:
365 		if ((req->chan_base + req->chan_cnt) > 63)
366 			return -EINVAL;
367 		bpid = cgx_bpid_cnt + req->chan_base;
368 		if (req->bpid_per_chan)
369 			bpid += chan_id;
370 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
371 			return -EINVAL;
372 		break;
373 	default:
374 		return -EINVAL;
375 	}
376 	return bpid;
377 }
378 
rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, struct nix_bp_cfg_req *req, struct nix_bp_cfg_rsp *rsp)379 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
380 				   struct nix_bp_cfg_req *req,
381 				   struct nix_bp_cfg_rsp *rsp)
382 {
383 	int blkaddr, pf, type, chan_id = 0;
384 	u16 pcifunc = req->hdr.pcifunc;
385 	struct rvu_pfvf *pfvf;
386 	u16 chan_base, chan;
387 	s16 bpid, bpid_base;
388 	u64 cfg;
389 
390 	pf = rvu_get_pf(pcifunc);
391 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
392 
393 	/* Enable backpressure only for CGX mapped PFs and LBK interface */
394 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
395 		return 0;
396 
397 	pfvf = rvu_get_pfvf(rvu, pcifunc);
398 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
399 
400 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
401 	chan_base = pfvf->rx_chan_base + req->chan_base;
402 	bpid = bpid_base;
403 
404 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
405 		if (bpid < 0) {
406 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
407 			return -EINVAL;
408 		}
409 
410 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
411 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
412 			    cfg | (bpid & 0xFF) | BIT_ULL(16));
413 		chan_id++;
414 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
415 	}
416 
417 	for (chan = 0; chan < req->chan_cnt; chan++) {
418 		/* Map channel and bpid assign to it */
419 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
420 					(bpid_base & 0x3FF);
421 		if (req->bpid_per_chan)
422 			bpid_base++;
423 	}
424 	rsp->chan_cnt = req->chan_cnt;
425 
426 	return 0;
427 }
428 
nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, u64 format, bool v4, u64 *fidx)429 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
430 				 u64 format, bool v4, u64 *fidx)
431 {
432 	struct nix_lso_format field = {0};
433 
434 	/* IP's Length field */
435 	field.layer = NIX_TXLAYER_OL3;
436 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
437 	field.offset = v4 ? 2 : 4;
438 	field.sizem1 = 1; /* i.e 2 bytes */
439 	field.alg = NIX_LSOALG_ADD_PAYLEN;
440 	rvu_write64(rvu, blkaddr,
441 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
442 		    *(u64 *)&field);
443 
444 	/* No ID field in IPv6 header */
445 	if (!v4)
446 		return;
447 
448 	/* IP's ID field */
449 	field.layer = NIX_TXLAYER_OL3;
450 	field.offset = 4;
451 	field.sizem1 = 1; /* i.e 2 bytes */
452 	field.alg = NIX_LSOALG_ADD_SEGNUM;
453 	rvu_write64(rvu, blkaddr,
454 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
455 		    *(u64 *)&field);
456 }
457 
nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, u64 format, u64 *fidx)458 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
459 				 u64 format, u64 *fidx)
460 {
461 	struct nix_lso_format field = {0};
462 
463 	/* TCP's sequence number field */
464 	field.layer = NIX_TXLAYER_OL4;
465 	field.offset = 4;
466 	field.sizem1 = 3; /* i.e 4 bytes */
467 	field.alg = NIX_LSOALG_ADD_OFFSET;
468 	rvu_write64(rvu, blkaddr,
469 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
470 		    *(u64 *)&field);
471 
472 	/* TCP's flags field */
473 	field.layer = NIX_TXLAYER_OL4;
474 	field.offset = 12;
475 	field.sizem1 = 1; /* 2 bytes */
476 	field.alg = NIX_LSOALG_TCP_FLAGS;
477 	rvu_write64(rvu, blkaddr,
478 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
479 		    *(u64 *)&field);
480 }
481 
nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)482 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
483 {
484 	u64 cfg, idx, fidx = 0;
485 
486 	/* Get max HW supported format indices */
487 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
488 	nix_hw->lso.total = cfg;
489 
490 	/* Enable LSO */
491 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
492 	/* For TSO, set first and middle segment flags to
493 	 * mask out PSH, RST & FIN flags in TCP packet
494 	 */
495 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
496 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
497 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
498 
499 	/* Setup default static LSO formats
500 	 *
501 	 * Configure format fields for TCPv4 segmentation offload
502 	 */
503 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
504 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
505 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
506 
507 	/* Set rest of the fields to NOP */
508 	for (; fidx < 8; fidx++) {
509 		rvu_write64(rvu, blkaddr,
510 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
511 	}
512 	nix_hw->lso.in_use++;
513 
514 	/* Configure format fields for TCPv6 segmentation offload */
515 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
516 	fidx = 0;
517 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
518 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
519 
520 	/* Set rest of the fields to NOP */
521 	for (; fidx < 8; fidx++) {
522 		rvu_write64(rvu, blkaddr,
523 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
524 	}
525 	nix_hw->lso.in_use++;
526 }
527 
nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)528 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
529 {
530 	kfree(pfvf->rq_bmap);
531 	kfree(pfvf->sq_bmap);
532 	kfree(pfvf->cq_bmap);
533 	if (pfvf->rq_ctx)
534 		qmem_free(rvu->dev, pfvf->rq_ctx);
535 	if (pfvf->sq_ctx)
536 		qmem_free(rvu->dev, pfvf->sq_ctx);
537 	if (pfvf->cq_ctx)
538 		qmem_free(rvu->dev, pfvf->cq_ctx);
539 	if (pfvf->rss_ctx)
540 		qmem_free(rvu->dev, pfvf->rss_ctx);
541 	if (pfvf->nix_qints_ctx)
542 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
543 	if (pfvf->cq_ints_ctx)
544 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
545 
546 	pfvf->rq_bmap = NULL;
547 	pfvf->cq_bmap = NULL;
548 	pfvf->sq_bmap = NULL;
549 	pfvf->rq_ctx = NULL;
550 	pfvf->sq_ctx = NULL;
551 	pfvf->cq_ctx = NULL;
552 	pfvf->rss_ctx = NULL;
553 	pfvf->nix_qints_ctx = NULL;
554 	pfvf->cq_ints_ctx = NULL;
555 }
556 
nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, int rss_sz, int rss_grps, int hwctx_size, u64 way_mask)557 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
558 			      struct rvu_pfvf *pfvf, int nixlf,
559 			      int rss_sz, int rss_grps, int hwctx_size,
560 			      u64 way_mask)
561 {
562 	int err, grp, num_indices;
563 
564 	/* RSS is not requested for this NIXLF */
565 	if (!rss_sz)
566 		return 0;
567 	num_indices = rss_sz * rss_grps;
568 
569 	/* Alloc NIX RSS HW context memory and config the base */
570 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
571 	if (err)
572 		return err;
573 
574 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
575 		    (u64)pfvf->rss_ctx->iova);
576 
577 	/* Config full RSS table size, enable RSS and caching */
578 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
579 		    BIT_ULL(36) | BIT_ULL(4) |
580 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
581 		    way_mask << 20);
582 	/* Config RSS group offset and sizes */
583 	for (grp = 0; grp < rss_grps; grp++)
584 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
585 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
586 	return 0;
587 }
588 
nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, struct nix_aq_inst_s *inst)589 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
590 			       struct nix_aq_inst_s *inst)
591 {
592 	struct admin_queue *aq = block->aq;
593 	struct nix_aq_res_s *result;
594 	int timeout = 1000;
595 	u64 reg, head;
596 
597 	result = (struct nix_aq_res_s *)aq->res->base;
598 
599 	/* Get current head pointer where to append this instruction */
600 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
601 	head = (reg >> 4) & AQ_PTR_MASK;
602 
603 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
604 	       (void *)inst, aq->inst->entry_sz);
605 	memset(result, 0, sizeof(*result));
606 	/* sync into memory */
607 	wmb();
608 
609 	/* Ring the doorbell and wait for result */
610 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
611 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
612 		cpu_relax();
613 		udelay(1);
614 		timeout--;
615 		if (!timeout)
616 			return -EBUSY;
617 	}
618 
619 	if (result->compcode != NIX_AQ_COMP_GOOD)
620 		/* TODO: Replace this with some error code */
621 		return -EBUSY;
622 
623 	return 0;
624 }
625 
rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp)626 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
627 			       struct nix_aq_enq_rsp *rsp)
628 {
629 	struct rvu_hwinfo *hw = rvu->hw;
630 	u16 pcifunc = req->hdr.pcifunc;
631 	int nixlf, blkaddr, rc = 0;
632 	struct nix_aq_inst_s inst;
633 	struct rvu_block *block;
634 	struct admin_queue *aq;
635 	struct rvu_pfvf *pfvf;
636 	void *ctx, *mask;
637 	bool ena;
638 	u64 cfg;
639 
640 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
641 	if (blkaddr < 0)
642 		return NIX_AF_ERR_AF_LF_INVALID;
643 
644 	block = &hw->block[blkaddr];
645 	aq = block->aq;
646 	if (!aq) {
647 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
648 		return NIX_AF_ERR_AQ_ENQUEUE;
649 	}
650 
651 	pfvf = rvu_get_pfvf(rvu, pcifunc);
652 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
653 
654 	/* Skip NIXLF check for broadcast MCE entry init */
655 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
656 		if (!pfvf->nixlf || nixlf < 0)
657 			return NIX_AF_ERR_AF_LF_INVALID;
658 	}
659 
660 	switch (req->ctype) {
661 	case NIX_AQ_CTYPE_RQ:
662 		/* Check if index exceeds max no of queues */
663 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
664 			rc = NIX_AF_ERR_AQ_ENQUEUE;
665 		break;
666 	case NIX_AQ_CTYPE_SQ:
667 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
668 			rc = NIX_AF_ERR_AQ_ENQUEUE;
669 		break;
670 	case NIX_AQ_CTYPE_CQ:
671 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
672 			rc = NIX_AF_ERR_AQ_ENQUEUE;
673 		break;
674 	case NIX_AQ_CTYPE_RSS:
675 		/* Check if RSS is enabled and qidx is within range */
676 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
677 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
678 		    (req->qidx >= (256UL << (cfg & 0xF))))
679 			rc = NIX_AF_ERR_AQ_ENQUEUE;
680 		break;
681 	case NIX_AQ_CTYPE_MCE:
682 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
683 		/* Check if index exceeds MCE list length */
684 		if (!hw->nix0->mcast.mce_ctx ||
685 		    (req->qidx >= (256UL << (cfg & 0xF))))
686 			rc = NIX_AF_ERR_AQ_ENQUEUE;
687 
688 		/* Adding multicast lists for requests from PF/VFs is not
689 		 * yet supported, so ignore this.
690 		 */
691 		if (rsp)
692 			rc = NIX_AF_ERR_AQ_ENQUEUE;
693 		break;
694 	default:
695 		rc = NIX_AF_ERR_AQ_ENQUEUE;
696 	}
697 
698 	if (rc)
699 		return rc;
700 
701 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
702 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
703 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
704 	     (req->op == NIX_AQ_INSTOP_WRITE &&
705 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
706 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
707 				     pcifunc, req->sq.smq))
708 			return NIX_AF_ERR_AQ_ENQUEUE;
709 	}
710 
711 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
712 	inst.lf = nixlf;
713 	inst.cindex = req->qidx;
714 	inst.ctype = req->ctype;
715 	inst.op = req->op;
716 	/* Currently we are not supporting enqueuing multiple instructions,
717 	 * so always choose first entry in result memory.
718 	 */
719 	inst.res_addr = (u64)aq->res->iova;
720 
721 	/* Hardware uses same aq->res->base for updating result of
722 	 * previous instruction hence wait here till it is done.
723 	 */
724 	spin_lock(&aq->lock);
725 
726 	/* Clean result + context memory */
727 	memset(aq->res->base, 0, aq->res->entry_sz);
728 	/* Context needs to be written at RES_ADDR + 128 */
729 	ctx = aq->res->base + 128;
730 	/* Mask needs to be written at RES_ADDR + 256 */
731 	mask = aq->res->base + 256;
732 
733 	switch (req->op) {
734 	case NIX_AQ_INSTOP_WRITE:
735 		if (req->ctype == NIX_AQ_CTYPE_RQ)
736 			memcpy(mask, &req->rq_mask,
737 			       sizeof(struct nix_rq_ctx_s));
738 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
739 			memcpy(mask, &req->sq_mask,
740 			       sizeof(struct nix_sq_ctx_s));
741 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
742 			memcpy(mask, &req->cq_mask,
743 			       sizeof(struct nix_cq_ctx_s));
744 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
745 			memcpy(mask, &req->rss_mask,
746 			       sizeof(struct nix_rsse_s));
747 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
748 			memcpy(mask, &req->mce_mask,
749 			       sizeof(struct nix_rx_mce_s));
750 		fallthrough;
751 	case NIX_AQ_INSTOP_INIT:
752 		if (req->ctype == NIX_AQ_CTYPE_RQ)
753 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
754 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
755 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
756 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
757 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
758 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
759 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
760 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
761 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
762 		break;
763 	case NIX_AQ_INSTOP_NOP:
764 	case NIX_AQ_INSTOP_READ:
765 	case NIX_AQ_INSTOP_LOCK:
766 	case NIX_AQ_INSTOP_UNLOCK:
767 		break;
768 	default:
769 		rc = NIX_AF_ERR_AQ_ENQUEUE;
770 		spin_unlock(&aq->lock);
771 		return rc;
772 	}
773 
774 	/* Submit the instruction to AQ */
775 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
776 	if (rc) {
777 		spin_unlock(&aq->lock);
778 		return rc;
779 	}
780 
781 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
782 	if (req->op == NIX_AQ_INSTOP_INIT) {
783 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
784 			__set_bit(req->qidx, pfvf->rq_bmap);
785 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
786 			__set_bit(req->qidx, pfvf->sq_bmap);
787 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
788 			__set_bit(req->qidx, pfvf->cq_bmap);
789 	}
790 
791 	if (req->op == NIX_AQ_INSTOP_WRITE) {
792 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
793 			ena = (req->rq.ena & req->rq_mask.ena) |
794 				(test_bit(req->qidx, pfvf->rq_bmap) &
795 				~req->rq_mask.ena);
796 			if (ena)
797 				__set_bit(req->qidx, pfvf->rq_bmap);
798 			else
799 				__clear_bit(req->qidx, pfvf->rq_bmap);
800 		}
801 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
802 			ena = (req->rq.ena & req->sq_mask.ena) |
803 				(test_bit(req->qidx, pfvf->sq_bmap) &
804 				~req->sq_mask.ena);
805 			if (ena)
806 				__set_bit(req->qidx, pfvf->sq_bmap);
807 			else
808 				__clear_bit(req->qidx, pfvf->sq_bmap);
809 		}
810 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
811 			ena = (req->rq.ena & req->cq_mask.ena) |
812 				(test_bit(req->qidx, pfvf->cq_bmap) &
813 				~req->cq_mask.ena);
814 			if (ena)
815 				__set_bit(req->qidx, pfvf->cq_bmap);
816 			else
817 				__clear_bit(req->qidx, pfvf->cq_bmap);
818 		}
819 	}
820 
821 	if (rsp) {
822 		/* Copy read context into mailbox */
823 		if (req->op == NIX_AQ_INSTOP_READ) {
824 			if (req->ctype == NIX_AQ_CTYPE_RQ)
825 				memcpy(&rsp->rq, ctx,
826 				       sizeof(struct nix_rq_ctx_s));
827 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
828 				memcpy(&rsp->sq, ctx,
829 				       sizeof(struct nix_sq_ctx_s));
830 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
831 				memcpy(&rsp->cq, ctx,
832 				       sizeof(struct nix_cq_ctx_s));
833 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
834 				memcpy(&rsp->rss, ctx,
835 				       sizeof(struct nix_rsse_s));
836 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
837 				memcpy(&rsp->mce, ctx,
838 				       sizeof(struct nix_rx_mce_s));
839 		}
840 	}
841 
842 	spin_unlock(&aq->lock);
843 	return 0;
844 }
845 
nix_get_ctx_name(int ctype)846 static const char *nix_get_ctx_name(int ctype)
847 {
848 	switch (ctype) {
849 	case NIX_AQ_CTYPE_CQ:
850 		return "CQ";
851 	case NIX_AQ_CTYPE_SQ:
852 		return "SQ";
853 	case NIX_AQ_CTYPE_RQ:
854 		return "RQ";
855 	case NIX_AQ_CTYPE_RSS:
856 		return "RSS";
857 	}
858 	return "";
859 }
860 
nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)861 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
862 {
863 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
864 	struct nix_aq_enq_req aq_req;
865 	unsigned long *bmap;
866 	int qidx, q_cnt = 0;
867 	int err = 0, rc;
868 
869 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
870 		return NIX_AF_ERR_AQ_ENQUEUE;
871 
872 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
873 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
874 
875 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
876 		aq_req.cq.ena = 0;
877 		aq_req.cq_mask.ena = 1;
878 		aq_req.cq.bp_ena = 0;
879 		aq_req.cq_mask.bp_ena = 1;
880 		q_cnt = pfvf->cq_ctx->qsize;
881 		bmap = pfvf->cq_bmap;
882 	}
883 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
884 		aq_req.sq.ena = 0;
885 		aq_req.sq_mask.ena = 1;
886 		q_cnt = pfvf->sq_ctx->qsize;
887 		bmap = pfvf->sq_bmap;
888 	}
889 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
890 		aq_req.rq.ena = 0;
891 		aq_req.rq_mask.ena = 1;
892 		q_cnt = pfvf->rq_ctx->qsize;
893 		bmap = pfvf->rq_bmap;
894 	}
895 
896 	aq_req.ctype = req->ctype;
897 	aq_req.op = NIX_AQ_INSTOP_WRITE;
898 
899 	for (qidx = 0; qidx < q_cnt; qidx++) {
900 		if (!test_bit(qidx, bmap))
901 			continue;
902 		aq_req.qidx = qidx;
903 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
904 		if (rc) {
905 			err = rc;
906 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
907 				nix_get_ctx_name(req->ctype), qidx);
908 		}
909 	}
910 
911 	return err;
912 }
913 
914 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)915 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
916 {
917 	struct nix_aq_enq_req lock_ctx_req;
918 	int err;
919 
920 	if (req->op != NIX_AQ_INSTOP_INIT)
921 		return 0;
922 
923 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
924 	    req->ctype == NIX_AQ_CTYPE_DYNO)
925 		return 0;
926 
927 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
928 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
929 	lock_ctx_req.ctype = req->ctype;
930 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
931 	lock_ctx_req.qidx = req->qidx;
932 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
933 	if (err)
934 		dev_err(rvu->dev,
935 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
936 			req->hdr.pcifunc,
937 			nix_get_ctx_name(req->ctype), req->qidx);
938 	return err;
939 }
940 
rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp)941 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
942 				struct nix_aq_enq_req *req,
943 				struct nix_aq_enq_rsp *rsp)
944 {
945 	int err;
946 
947 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
948 	if (!err)
949 		err = nix_lf_hwctx_lockdown(rvu, req);
950 	return err;
951 }
952 #else
953 
rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp)954 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
955 				struct nix_aq_enq_req *req,
956 				struct nix_aq_enq_rsp *rsp)
957 {
958 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
959 }
960 #endif
961 
rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp)962 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
963 				       struct hwctx_disable_req *req,
964 				       struct msg_rsp *rsp)
965 {
966 	return nix_lf_hwctx_disable(rvu, req);
967 }
968 
rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_req *req, struct nix_lf_alloc_rsp *rsp)969 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
970 				  struct nix_lf_alloc_req *req,
971 				  struct nix_lf_alloc_rsp *rsp)
972 {
973 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
974 	struct rvu_hwinfo *hw = rvu->hw;
975 	u16 pcifunc = req->hdr.pcifunc;
976 	struct rvu_block *block;
977 	struct rvu_pfvf *pfvf;
978 	u64 cfg, ctx_cfg;
979 	int blkaddr;
980 
981 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
982 		return NIX_AF_ERR_PARAM;
983 
984 	if (req->way_mask)
985 		req->way_mask &= 0xFFFF;
986 
987 	pfvf = rvu_get_pfvf(rvu, pcifunc);
988 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
989 	if (!pfvf->nixlf || blkaddr < 0)
990 		return NIX_AF_ERR_AF_LF_INVALID;
991 
992 	block = &hw->block[blkaddr];
993 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
994 	if (nixlf < 0)
995 		return NIX_AF_ERR_AF_LF_INVALID;
996 
997 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
998 	if (req->npa_func) {
999 		/* If default, use 'this' NIXLF's PFFUNC */
1000 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1001 			req->npa_func = pcifunc;
1002 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1003 			return NIX_AF_INVAL_NPA_PF_FUNC;
1004 	}
1005 
1006 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1007 	if (req->sso_func) {
1008 		/* If default, use 'this' NIXLF's PFFUNC */
1009 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1010 			req->sso_func = pcifunc;
1011 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1012 			return NIX_AF_INVAL_SSO_PF_FUNC;
1013 	}
1014 
1015 	/* If RSS is being enabled, check if requested config is valid.
1016 	 * RSS table size should be power of two, otherwise
1017 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1018 	 * won't be able to use entire table.
1019 	 */
1020 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1021 			    !is_power_of_2(req->rss_sz)))
1022 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1023 
1024 	if (req->rss_sz &&
1025 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1026 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1027 
1028 	/* Reset this NIX LF */
1029 	err = rvu_lf_reset(rvu, block, nixlf);
1030 	if (err) {
1031 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1032 			block->addr - BLKADDR_NIX0, nixlf);
1033 		return NIX_AF_ERR_LF_RESET;
1034 	}
1035 
1036 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1037 
1038 	/* Alloc NIX RQ HW context memory and config the base */
1039 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1040 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1041 	if (err)
1042 		goto free_mem;
1043 
1044 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1045 	if (!pfvf->rq_bmap)
1046 		goto free_mem;
1047 
1048 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1049 		    (u64)pfvf->rq_ctx->iova);
1050 
1051 	/* Set caching and queue count in HW */
1052 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1053 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1054 
1055 	/* Alloc NIX SQ HW context memory and config the base */
1056 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1057 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1058 	if (err)
1059 		goto free_mem;
1060 
1061 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1062 	if (!pfvf->sq_bmap)
1063 		goto free_mem;
1064 
1065 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1066 		    (u64)pfvf->sq_ctx->iova);
1067 
1068 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1069 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1070 
1071 	/* Alloc NIX CQ HW context memory and config the base */
1072 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1073 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1074 	if (err)
1075 		goto free_mem;
1076 
1077 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1078 	if (!pfvf->cq_bmap)
1079 		goto free_mem;
1080 
1081 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1082 		    (u64)pfvf->cq_ctx->iova);
1083 
1084 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1085 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1086 
1087 	/* Initialize receive side scaling (RSS) */
1088 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1089 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1090 				 req->rss_grps, hwctx_size, req->way_mask);
1091 	if (err)
1092 		goto free_mem;
1093 
1094 	/* Alloc memory for CQINT's HW contexts */
1095 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1096 	qints = (cfg >> 24) & 0xFFF;
1097 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1098 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1099 	if (err)
1100 		goto free_mem;
1101 
1102 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1103 		    (u64)pfvf->cq_ints_ctx->iova);
1104 
1105 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1106 		    BIT_ULL(36) | req->way_mask << 20);
1107 
1108 	/* Alloc memory for QINT's HW contexts */
1109 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1110 	qints = (cfg >> 12) & 0xFFF;
1111 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1112 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1113 	if (err)
1114 		goto free_mem;
1115 
1116 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1117 		    (u64)pfvf->nix_qints_ctx->iova);
1118 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1119 		    BIT_ULL(36) | req->way_mask << 20);
1120 
1121 	/* Setup VLANX TPID's.
1122 	 * Use VLAN1 for 802.1Q
1123 	 * and VLAN0 for 802.1AD.
1124 	 */
1125 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1126 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1127 
1128 	/* Enable LMTST for this NIX LF */
1129 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1130 
1131 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1132 	if (req->npa_func)
1133 		cfg = req->npa_func;
1134 	if (req->sso_func)
1135 		cfg |= (u64)req->sso_func << 16;
1136 
1137 	cfg |= (u64)req->xqe_sz << 33;
1138 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1139 
1140 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1141 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1142 
1143 	/* Configure pkind for TX parse config */
1144 	cfg = NPC_TX_DEF_PKIND;
1145 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1146 
1147 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1148 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1149 	if (err)
1150 		goto free_mem;
1151 
1152 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1153 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1154 
1155 	goto exit;
1156 
1157 free_mem:
1158 	nix_ctx_free(rvu, pfvf);
1159 	rc = -ENOMEM;
1160 
1161 exit:
1162 	/* Set macaddr of this PF/VF */
1163 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1164 
1165 	/* set SQB size info */
1166 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1167 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1168 	rsp->rx_chan_base = pfvf->rx_chan_base;
1169 	rsp->tx_chan_base = pfvf->tx_chan_base;
1170 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1171 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1172 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1173 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1174 	/* Get HW supported stat count */
1175 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1176 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1177 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1178 	/* Get count of CQ IRQs and error IRQs supported per LF */
1179 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1180 	rsp->qints = ((cfg >> 12) & 0xFFF);
1181 	rsp->cints = ((cfg >> 24) & 0xFFF);
1182 	return rc;
1183 }
1184 
rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)1185 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1186 				 struct msg_rsp *rsp)
1187 {
1188 	struct rvu_hwinfo *hw = rvu->hw;
1189 	u16 pcifunc = req->hdr.pcifunc;
1190 	struct rvu_block *block;
1191 	int blkaddr, nixlf, err;
1192 	struct rvu_pfvf *pfvf;
1193 
1194 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1195 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1196 	if (!pfvf->nixlf || blkaddr < 0)
1197 		return NIX_AF_ERR_AF_LF_INVALID;
1198 
1199 	block = &hw->block[blkaddr];
1200 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1201 	if (nixlf < 0)
1202 		return NIX_AF_ERR_AF_LF_INVALID;
1203 
1204 	nix_interface_deinit(rvu, pcifunc, nixlf);
1205 
1206 	/* Reset this NIX LF */
1207 	err = rvu_lf_reset(rvu, block, nixlf);
1208 	if (err) {
1209 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1210 			block->addr - BLKADDR_NIX0, nixlf);
1211 		return NIX_AF_ERR_LF_RESET;
1212 	}
1213 
1214 	nix_ctx_free(rvu, pfvf);
1215 
1216 	return 0;
1217 }
1218 
rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, struct nix_mark_format_cfg *req, struct nix_mark_format_cfg_rsp *rsp)1219 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1220 					 struct nix_mark_format_cfg  *req,
1221 					 struct nix_mark_format_cfg_rsp *rsp)
1222 {
1223 	u16 pcifunc = req->hdr.pcifunc;
1224 	struct nix_hw *nix_hw;
1225 	struct rvu_pfvf *pfvf;
1226 	int blkaddr, rc;
1227 	u32 cfg;
1228 
1229 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1230 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1231 	if (!pfvf->nixlf || blkaddr < 0)
1232 		return NIX_AF_ERR_AF_LF_INVALID;
1233 
1234 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1235 	if (!nix_hw)
1236 		return -EINVAL;
1237 
1238 	cfg = (((u32)req->offset & 0x7) << 16) |
1239 	      (((u32)req->y_mask & 0xF) << 12) |
1240 	      (((u32)req->y_val & 0xF) << 8) |
1241 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1242 
1243 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1244 	if (rc < 0) {
1245 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1246 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1247 		return NIX_AF_ERR_MARK_CFG_FAIL;
1248 	}
1249 
1250 	rsp->mark_format_idx = rc;
1251 	return 0;
1252 }
1253 
1254 /* Disable shaping of pkts by a scheduler queue
1255  * at a given scheduler level.
1256  */
nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, int lvl, int schq)1257 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1258 				 int lvl, int schq)
1259 {
1260 	u64  cir_reg = 0, pir_reg = 0;
1261 	u64  cfg;
1262 
1263 	switch (lvl) {
1264 	case NIX_TXSCH_LVL_TL1:
1265 		cir_reg = NIX_AF_TL1X_CIR(schq);
1266 		pir_reg = 0; /* PIR not available at TL1 */
1267 		break;
1268 	case NIX_TXSCH_LVL_TL2:
1269 		cir_reg = NIX_AF_TL2X_CIR(schq);
1270 		pir_reg = NIX_AF_TL2X_PIR(schq);
1271 		break;
1272 	case NIX_TXSCH_LVL_TL3:
1273 		cir_reg = NIX_AF_TL3X_CIR(schq);
1274 		pir_reg = NIX_AF_TL3X_PIR(schq);
1275 		break;
1276 	case NIX_TXSCH_LVL_TL4:
1277 		cir_reg = NIX_AF_TL4X_CIR(schq);
1278 		pir_reg = NIX_AF_TL4X_PIR(schq);
1279 		break;
1280 	}
1281 
1282 	if (!cir_reg)
1283 		return;
1284 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1285 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1286 
1287 	if (!pir_reg)
1288 		return;
1289 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1290 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1291 }
1292 
nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, int lvl, int schq)1293 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1294 				 int lvl, int schq)
1295 {
1296 	struct rvu_hwinfo *hw = rvu->hw;
1297 	int link;
1298 
1299 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1300 		return;
1301 
1302 	/* Reset TL4's SDP link config */
1303 	if (lvl == NIX_TXSCH_LVL_TL4)
1304 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1305 
1306 	if (lvl != NIX_TXSCH_LVL_TL2)
1307 		return;
1308 
1309 	/* Reset TL2's CGX or LBK link config */
1310 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1311 		rvu_write64(rvu, blkaddr,
1312 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1313 }
1314 
nix_get_tx_link(struct rvu *rvu, u16 pcifunc)1315 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1316 {
1317 	struct rvu_hwinfo *hw = rvu->hw;
1318 	int pf = rvu_get_pf(pcifunc);
1319 	u8 cgx_id = 0, lmac_id = 0;
1320 
1321 	if (is_afvf(pcifunc)) {/* LBK links */
1322 		return hw->cgx_links;
1323 	} else if (is_pf_cgxmapped(rvu, pf)) {
1324 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1325 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1326 	}
1327 
1328 	/* SDP link */
1329 	return hw->cgx_links + hw->lbk_links;
1330 }
1331 
nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, int link, int *start, int *end)1332 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1333 				 int link, int *start, int *end)
1334 {
1335 	struct rvu_hwinfo *hw = rvu->hw;
1336 	int pf = rvu_get_pf(pcifunc);
1337 
1338 	if (is_afvf(pcifunc)) { /* LBK links */
1339 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1340 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1341 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1342 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1343 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1344 	} else { /* SDP link */
1345 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1346 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1347 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1348 	}
1349 }
1350 
nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, struct nix_hw *nix_hw, struct nix_txsch_alloc_req *req)1351 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1352 				      struct nix_hw *nix_hw,
1353 				      struct nix_txsch_alloc_req *req)
1354 {
1355 	struct rvu_hwinfo *hw = rvu->hw;
1356 	int schq, req_schq, free_cnt;
1357 	struct nix_txsch *txsch;
1358 	int link, start, end;
1359 
1360 	txsch = &nix_hw->txsch[lvl];
1361 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1362 
1363 	if (!req_schq)
1364 		return 0;
1365 
1366 	link = nix_get_tx_link(rvu, pcifunc);
1367 
1368 	/* For traffic aggregating scheduler level, one queue is enough */
1369 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1370 		if (req_schq != 1)
1371 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1372 		return 0;
1373 	}
1374 
1375 	/* Get free SCHQ count and check if request can be accomodated */
1376 	if (hw->cap.nix_fixed_txschq_mapping) {
1377 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1378 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1379 		if (end <= txsch->schq.max && schq < end &&
1380 		    !test_bit(schq, txsch->schq.bmap))
1381 			free_cnt = 1;
1382 		else
1383 			free_cnt = 0;
1384 	} else {
1385 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1386 	}
1387 
1388 	if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
1389 	    req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
1390 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1391 
1392 	/* If contiguous queues are needed, check for availability */
1393 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1394 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1395 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1396 
1397 	return 0;
1398 }
1399 
nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, struct nix_txsch_alloc_rsp *rsp, int lvl, int start, int end)1400 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1401 			    struct nix_txsch_alloc_rsp *rsp,
1402 			    int lvl, int start, int end)
1403 {
1404 	struct rvu_hwinfo *hw = rvu->hw;
1405 	u16 pcifunc = rsp->hdr.pcifunc;
1406 	int idx, schq;
1407 
1408 	/* For traffic aggregating levels, queue alloc is based
1409 	 * on transmit link to which PF_FUNC is mapped to.
1410 	 */
1411 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1412 		/* A single TL queue is allocated */
1413 		if (rsp->schq_contig[lvl]) {
1414 			rsp->schq_contig[lvl] = 1;
1415 			rsp->schq_contig_list[lvl][0] = start;
1416 		}
1417 
1418 		/* Both contig and non-contig reqs doesn't make sense here */
1419 		if (rsp->schq_contig[lvl])
1420 			rsp->schq[lvl] = 0;
1421 
1422 		if (rsp->schq[lvl]) {
1423 			rsp->schq[lvl] = 1;
1424 			rsp->schq_list[lvl][0] = start;
1425 		}
1426 		return;
1427 	}
1428 
1429 	/* Adjust the queue request count if HW supports
1430 	 * only one queue per level configuration.
1431 	 */
1432 	if (hw->cap.nix_fixed_txschq_mapping) {
1433 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1434 		schq = start + idx;
1435 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1436 			rsp->schq_contig[lvl] = 0;
1437 			rsp->schq[lvl] = 0;
1438 			return;
1439 		}
1440 
1441 		if (rsp->schq_contig[lvl]) {
1442 			rsp->schq_contig[lvl] = 1;
1443 			set_bit(schq, txsch->schq.bmap);
1444 			rsp->schq_contig_list[lvl][0] = schq;
1445 			rsp->schq[lvl] = 0;
1446 		} else if (rsp->schq[lvl]) {
1447 			rsp->schq[lvl] = 1;
1448 			set_bit(schq, txsch->schq.bmap);
1449 			rsp->schq_list[lvl][0] = schq;
1450 		}
1451 		return;
1452 	}
1453 
1454 	/* Allocate contiguous queue indices requesty first */
1455 	if (rsp->schq_contig[lvl]) {
1456 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1457 						  txsch->schq.max, start,
1458 						  rsp->schq_contig[lvl], 0);
1459 		if (schq >= end)
1460 			rsp->schq_contig[lvl] = 0;
1461 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1462 			set_bit(schq, txsch->schq.bmap);
1463 			rsp->schq_contig_list[lvl][idx] = schq;
1464 			schq++;
1465 		}
1466 	}
1467 
1468 	/* Allocate non-contiguous queue indices */
1469 	if (rsp->schq[lvl]) {
1470 		idx = 0;
1471 		for (schq = start; schq < end; schq++) {
1472 			if (!test_bit(schq, txsch->schq.bmap)) {
1473 				set_bit(schq, txsch->schq.bmap);
1474 				rsp->schq_list[lvl][idx++] = schq;
1475 			}
1476 			if (idx == rsp->schq[lvl])
1477 				break;
1478 		}
1479 		/* Update how many were allocated */
1480 		rsp->schq[lvl] = idx;
1481 	}
1482 }
1483 
rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp)1484 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1485 				     struct nix_txsch_alloc_req *req,
1486 				     struct nix_txsch_alloc_rsp *rsp)
1487 {
1488 	struct rvu_hwinfo *hw = rvu->hw;
1489 	u16 pcifunc = req->hdr.pcifunc;
1490 	int link, blkaddr, rc = 0;
1491 	int lvl, idx, start, end;
1492 	struct nix_txsch *txsch;
1493 	struct rvu_pfvf *pfvf;
1494 	struct nix_hw *nix_hw;
1495 	u32 *pfvf_map;
1496 	u16 schq;
1497 
1498 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1499 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1500 	if (!pfvf->nixlf || blkaddr < 0)
1501 		return NIX_AF_ERR_AF_LF_INVALID;
1502 
1503 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1504 	if (!nix_hw)
1505 		return -EINVAL;
1506 
1507 	mutex_lock(&rvu->rsrc_lock);
1508 
1509 	/* Check if request is valid as per HW capabilities
1510 	 * and can be accomodated.
1511 	 */
1512 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1513 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1514 		if (rc)
1515 			goto err;
1516 	}
1517 
1518 	/* Allocate requested Tx scheduler queues */
1519 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1520 		txsch = &nix_hw->txsch[lvl];
1521 		pfvf_map = txsch->pfvf_map;
1522 
1523 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1524 			continue;
1525 
1526 		rsp->schq[lvl] = req->schq[lvl];
1527 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1528 
1529 		link = nix_get_tx_link(rvu, pcifunc);
1530 
1531 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1532 			start = link;
1533 			end = link;
1534 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1535 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1536 		} else {
1537 			start = 0;
1538 			end = txsch->schq.max;
1539 		}
1540 
1541 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1542 
1543 		/* Reset queue config */
1544 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1545 			schq = rsp->schq_contig_list[lvl][idx];
1546 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1547 			    NIX_TXSCHQ_CFG_DONE))
1548 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1549 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1550 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1551 		}
1552 
1553 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1554 			schq = rsp->schq_list[lvl][idx];
1555 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1556 			    NIX_TXSCHQ_CFG_DONE))
1557 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1558 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1559 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1560 		}
1561 	}
1562 
1563 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1564 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1565 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1566 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1567 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1568 	goto exit;
1569 err:
1570 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1571 exit:
1572 	mutex_unlock(&rvu->rsrc_lock);
1573 	return rc;
1574 }
1575 
nix_smq_flush(struct rvu *rvu, int blkaddr, int smq, u16 pcifunc, int nixlf)1576 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1577 			  int smq, u16 pcifunc, int nixlf)
1578 {
1579 	int pf = rvu_get_pf(pcifunc);
1580 	u8 cgx_id = 0, lmac_id = 0;
1581 	int err, restore_tx_en = 0;
1582 	u64 cfg;
1583 
1584 	/* enable cgx tx if disabled */
1585 	if (is_pf_cgxmapped(rvu, pf)) {
1586 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1587 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1588 						    lmac_id, true);
1589 	}
1590 
1591 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1592 	/* Do SMQ flush and set enqueue xoff */
1593 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1594 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1595 
1596 	/* Disable backpressure from physical link,
1597 	 * otherwise SMQ flush may stall.
1598 	 */
1599 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1600 
1601 	/* Wait for flush to complete */
1602 	err = rvu_poll_reg(rvu, blkaddr,
1603 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1604 	if (err)
1605 		dev_err(rvu->dev,
1606 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1607 
1608 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1609 	/* restore cgx tx state */
1610 	if (restore_tx_en)
1611 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1612 }
1613 
nix_txschq_free(struct rvu *rvu, u16 pcifunc)1614 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1615 {
1616 	int blkaddr, nixlf, lvl, schq, err;
1617 	struct rvu_hwinfo *hw = rvu->hw;
1618 	struct nix_txsch *txsch;
1619 	struct nix_hw *nix_hw;
1620 
1621 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1622 	if (blkaddr < 0)
1623 		return NIX_AF_ERR_AF_LF_INVALID;
1624 
1625 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1626 	if (!nix_hw)
1627 		return -EINVAL;
1628 
1629 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1630 	if (nixlf < 0)
1631 		return NIX_AF_ERR_AF_LF_INVALID;
1632 
1633 	/* Disable TL2/3 queue links before SMQ flush*/
1634 	mutex_lock(&rvu->rsrc_lock);
1635 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1636 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1637 			continue;
1638 
1639 		txsch = &nix_hw->txsch[lvl];
1640 		for (schq = 0; schq < txsch->schq.max; schq++) {
1641 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1642 				continue;
1643 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1644 		}
1645 	}
1646 
1647 	/* Flush SMQs */
1648 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1649 	for (schq = 0; schq < txsch->schq.max; schq++) {
1650 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1651 			continue;
1652 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1653 	}
1654 
1655 	/* Now free scheduler queues to free pool */
1656 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1657 		 /* TLs above aggregation level are shared across all PF
1658 		  * and it's VFs, hence skip freeing them.
1659 		  */
1660 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1661 			continue;
1662 
1663 		txsch = &nix_hw->txsch[lvl];
1664 		for (schq = 0; schq < txsch->schq.max; schq++) {
1665 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1666 				continue;
1667 			rvu_free_rsrc(&txsch->schq, schq);
1668 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1669 		}
1670 	}
1671 	mutex_unlock(&rvu->rsrc_lock);
1672 
1673 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1674 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1675 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1676 	if (err)
1677 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1678 
1679 	return 0;
1680 }
1681 
nix_txschq_free_one(struct rvu *rvu, struct nix_txsch_free_req *req)1682 static int nix_txschq_free_one(struct rvu *rvu,
1683 			       struct nix_txsch_free_req *req)
1684 {
1685 	struct rvu_hwinfo *hw = rvu->hw;
1686 	u16 pcifunc = req->hdr.pcifunc;
1687 	int lvl, schq, nixlf, blkaddr;
1688 	struct nix_txsch *txsch;
1689 	struct nix_hw *nix_hw;
1690 	u32 *pfvf_map;
1691 
1692 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1693 	if (blkaddr < 0)
1694 		return NIX_AF_ERR_AF_LF_INVALID;
1695 
1696 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1697 	if (!nix_hw)
1698 		return -EINVAL;
1699 
1700 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1701 	if (nixlf < 0)
1702 		return NIX_AF_ERR_AF_LF_INVALID;
1703 
1704 	lvl = req->schq_lvl;
1705 	schq = req->schq;
1706 	txsch = &nix_hw->txsch[lvl];
1707 
1708 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1709 		return 0;
1710 
1711 	pfvf_map = txsch->pfvf_map;
1712 	mutex_lock(&rvu->rsrc_lock);
1713 
1714 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1715 		mutex_unlock(&rvu->rsrc_lock);
1716 		goto err;
1717 	}
1718 
1719 	/* Flush if it is a SMQ. Onus of disabling
1720 	 * TL2/3 queue links before SMQ flush is on user
1721 	 */
1722 	if (lvl == NIX_TXSCH_LVL_SMQ)
1723 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1724 
1725 	/* Free the resource */
1726 	rvu_free_rsrc(&txsch->schq, schq);
1727 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1728 	mutex_unlock(&rvu->rsrc_lock);
1729 	return 0;
1730 err:
1731 	return NIX_AF_ERR_TLX_INVALID;
1732 }
1733 
rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, struct nix_txsch_free_req *req, struct msg_rsp *rsp)1734 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1735 				    struct nix_txsch_free_req *req,
1736 				    struct msg_rsp *rsp)
1737 {
1738 	if (req->flags & TXSCHQ_FREE_ALL)
1739 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1740 	else
1741 		return nix_txschq_free_one(rvu, req);
1742 }
1743 
is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, int lvl, u64 reg, u64 regval)1744 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1745 				      int lvl, u64 reg, u64 regval)
1746 {
1747 	u64 regbase = reg & 0xFFFF;
1748 	u16 schq, parent;
1749 
1750 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1751 		return false;
1752 
1753 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1754 	/* Check if this schq belongs to this PF/VF or not */
1755 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1756 		return false;
1757 
1758 	parent = (regval >> 16) & 0x1FF;
1759 	/* Validate MDQ's TL4 parent */
1760 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1761 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1762 		return false;
1763 
1764 	/* Validate TL4's TL3 parent */
1765 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1766 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1767 		return false;
1768 
1769 	/* Validate TL3's TL2 parent */
1770 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1771 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1772 		return false;
1773 
1774 	/* Validate TL2's TL1 parent */
1775 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1776 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1777 		return false;
1778 
1779 	return true;
1780 }
1781 
is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)1782 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1783 {
1784 	u64 regbase;
1785 
1786 	if (hw->cap.nix_shaping)
1787 		return true;
1788 
1789 	/* If shaping and coloring is not supported, then
1790 	 * *_CIR and *_PIR registers should not be configured.
1791 	 */
1792 	regbase = reg & 0xFFFF;
1793 
1794 	switch (lvl) {
1795 	case NIX_TXSCH_LVL_TL1:
1796 		if (regbase == NIX_AF_TL1X_CIR(0))
1797 			return false;
1798 		break;
1799 	case NIX_TXSCH_LVL_TL2:
1800 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1801 		    regbase == NIX_AF_TL2X_PIR(0))
1802 			return false;
1803 		break;
1804 	case NIX_TXSCH_LVL_TL3:
1805 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1806 		    regbase == NIX_AF_TL3X_PIR(0))
1807 			return false;
1808 		break;
1809 	case NIX_TXSCH_LVL_TL4:
1810 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1811 		    regbase == NIX_AF_TL4X_PIR(0))
1812 			return false;
1813 		break;
1814 	}
1815 	return true;
1816 }
1817 
nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, u16 pcifunc, int blkaddr)1818 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1819 				u16 pcifunc, int blkaddr)
1820 {
1821 	u32 *pfvf_map;
1822 	int schq;
1823 
1824 	schq = nix_get_tx_link(rvu, pcifunc);
1825 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1826 	/* Skip if PF has already done the config */
1827 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1828 		return;
1829 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1830 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1831 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1832 		    TXSCH_TL1_DFLT_RR_QTM);
1833 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1834 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1835 }
1836 
rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp)1837 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1838 				    struct nix_txschq_config *req,
1839 				    struct msg_rsp *rsp)
1840 {
1841 	struct rvu_hwinfo *hw = rvu->hw;
1842 	u16 pcifunc = req->hdr.pcifunc;
1843 	u64 reg, regval, schq_regbase;
1844 	struct nix_txsch *txsch;
1845 	struct nix_hw *nix_hw;
1846 	int blkaddr, idx, err;
1847 	int nixlf, schq;
1848 	u32 *pfvf_map;
1849 
1850 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1851 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1852 		return NIX_AF_INVAL_TXSCHQ_CFG;
1853 
1854 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1855 	if (err)
1856 		return err;
1857 
1858 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1859 	if (!nix_hw)
1860 		return -EINVAL;
1861 
1862 	txsch = &nix_hw->txsch[req->lvl];
1863 	pfvf_map = txsch->pfvf_map;
1864 
1865 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1866 	    pcifunc & RVU_PFVF_FUNC_MASK) {
1867 		mutex_lock(&rvu->rsrc_lock);
1868 		if (req->lvl == NIX_TXSCH_LVL_TL1)
1869 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1870 		mutex_unlock(&rvu->rsrc_lock);
1871 		return 0;
1872 	}
1873 
1874 	for (idx = 0; idx < req->num_regs; idx++) {
1875 		reg = req->reg[idx];
1876 		regval = req->regval[idx];
1877 		schq_regbase = reg & 0xFFFF;
1878 
1879 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1880 					       txsch->lvl, reg, regval))
1881 			return NIX_AF_INVAL_TXSCHQ_CFG;
1882 
1883 		/* Check if shaping and coloring is supported */
1884 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1885 			continue;
1886 
1887 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1888 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1889 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1890 					   pcifunc, 0);
1891 			regval &= ~(0x7FULL << 24);
1892 			regval |= ((u64)nixlf << 24);
1893 		}
1894 
1895 		/* Clear 'BP_ENA' config, if it's not allowed */
1896 		if (!hw->cap.nix_tx_link_bp) {
1897 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1898 			    (schq_regbase & 0xFF00) ==
1899 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1900 				regval &= ~BIT_ULL(13);
1901 		}
1902 
1903 		/* Mark config as done for TL1 by PF */
1904 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1905 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1906 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1907 			mutex_lock(&rvu->rsrc_lock);
1908 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1909 							NIX_TXSCHQ_CFG_DONE);
1910 			mutex_unlock(&rvu->rsrc_lock);
1911 		}
1912 
1913 		/* SMQ flush is special hence split register writes such
1914 		 * that flush first and write rest of the bits later.
1915 		 */
1916 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1917 		    (regval & BIT_ULL(49))) {
1918 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1919 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1920 			regval &= ~BIT_ULL(49);
1921 		}
1922 		rvu_write64(rvu, blkaddr, reg, regval);
1923 	}
1924 
1925 	return 0;
1926 }
1927 
nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, struct nix_vtag_config *req)1928 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1929 			   struct nix_vtag_config *req)
1930 {
1931 	u64 regval = req->vtag_size;
1932 
1933 	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1934 		return -EINVAL;
1935 
1936 	if (req->rx.capture_vtag)
1937 		regval |= BIT_ULL(5);
1938 	if (req->rx.strip_vtag)
1939 		regval |= BIT_ULL(4);
1940 
1941 	rvu_write64(rvu, blkaddr,
1942 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1943 	return 0;
1944 }
1945 
rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, struct msg_rsp *rsp)1946 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1947 				  struct nix_vtag_config *req,
1948 				  struct msg_rsp *rsp)
1949 {
1950 	u16 pcifunc = req->hdr.pcifunc;
1951 	int blkaddr, nixlf, err;
1952 
1953 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1954 	if (err)
1955 		return err;
1956 
1957 	if (req->cfg_type) {
1958 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1959 		if (err)
1960 			return NIX_AF_ERR_PARAM;
1961 	} else {
1962 		/* TODO: handle tx vtag configuration */
1963 		return 0;
1964 	}
1965 
1966 	return 0;
1967 }
1968 
nix_setup_mce(struct rvu *rvu, int mce, u8 op, u16 pcifunc, int next, bool eol)1969 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1970 			 u16 pcifunc, int next, bool eol)
1971 {
1972 	struct nix_aq_enq_req aq_req;
1973 	int err;
1974 
1975 	aq_req.hdr.pcifunc = 0;
1976 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1977 	aq_req.op = op;
1978 	aq_req.qidx = mce;
1979 
1980 	/* Forward bcast pkts to RQ0, RSS not needed */
1981 	aq_req.mce.op = 0;
1982 	aq_req.mce.index = 0;
1983 	aq_req.mce.eol = eol;
1984 	aq_req.mce.pf_func = pcifunc;
1985 	aq_req.mce.next = next;
1986 
1987 	/* All fields valid */
1988 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1989 
1990 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1991 	if (err) {
1992 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1993 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1994 		return err;
1995 	}
1996 	return 0;
1997 }
1998 
nix_update_mce_list(struct nix_mce_list *mce_list, u16 pcifunc, bool add)1999 static int nix_update_mce_list(struct nix_mce_list *mce_list,
2000 			       u16 pcifunc, bool add)
2001 {
2002 	struct mce *mce, *tail = NULL;
2003 	bool delete = false;
2004 
2005 	/* Scan through the current list */
2006 	hlist_for_each_entry(mce, &mce_list->head, node) {
2007 		/* If already exists, then delete */
2008 		if (mce->pcifunc == pcifunc && !add) {
2009 			delete = true;
2010 			break;
2011 		}
2012 		tail = mce;
2013 	}
2014 
2015 	if (delete) {
2016 		hlist_del(&mce->node);
2017 		kfree(mce);
2018 		mce_list->count--;
2019 		return 0;
2020 	}
2021 
2022 	if (!add)
2023 		return 0;
2024 
2025 	/* Add a new one to the list, at the tail */
2026 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2027 	if (!mce)
2028 		return -ENOMEM;
2029 	mce->pcifunc = pcifunc;
2030 	if (!tail)
2031 		hlist_add_head(&mce->node, &mce_list->head);
2032 	else
2033 		hlist_add_behind(&mce->node, &tail->node);
2034 	mce_list->count++;
2035 	return 0;
2036 }
2037 
nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)2038 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2039 {
2040 	int err = 0, idx, next_idx, last_idx;
2041 	struct nix_mce_list *mce_list;
2042 	struct nix_mcast *mcast;
2043 	struct nix_hw *nix_hw;
2044 	struct rvu_pfvf *pfvf;
2045 	struct mce *mce;
2046 	int blkaddr;
2047 
2048 	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2049 	if (is_afvf(pcifunc))
2050 		return 0;
2051 
2052 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2053 	if (blkaddr < 0)
2054 		return 0;
2055 
2056 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2057 	if (!nix_hw)
2058 		return 0;
2059 
2060 	mcast = &nix_hw->mcast;
2061 
2062 	/* Get this PF/VF func's MCE index */
2063 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2064 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2065 
2066 	mce_list = &pfvf->bcast_mce_list;
2067 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2068 		dev_err(rvu->dev,
2069 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2070 			__func__, idx, mce_list->max,
2071 			pcifunc >> RVU_PFVF_PF_SHIFT);
2072 		return -EINVAL;
2073 	}
2074 
2075 	mutex_lock(&mcast->mce_lock);
2076 
2077 	err = nix_update_mce_list(mce_list, pcifunc, add);
2078 	if (err)
2079 		goto end;
2080 
2081 	/* Disable MCAM entry in NPC */
2082 	if (!mce_list->count) {
2083 		rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
2084 		goto end;
2085 	}
2086 
2087 	/* Dump the updated list to HW */
2088 	idx = pfvf->bcast_mce_idx;
2089 	last_idx = idx + mce_list->count - 1;
2090 	hlist_for_each_entry(mce, &mce_list->head, node) {
2091 		if (idx > last_idx)
2092 			break;
2093 
2094 		next_idx = idx + 1;
2095 		/* EOL should be set in last MCE */
2096 		err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
2097 				    mce->pcifunc, next_idx,
2098 				    (next_idx > last_idx) ? true : false);
2099 		if (err)
2100 			goto end;
2101 		idx++;
2102 	}
2103 
2104 end:
2105 	mutex_unlock(&mcast->mce_lock);
2106 	return err;
2107 }
2108 
nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)2109 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2110 {
2111 	struct nix_mcast *mcast = &nix_hw->mcast;
2112 	int err, pf, numvfs, idx;
2113 	struct rvu_pfvf *pfvf;
2114 	u16 pcifunc;
2115 	u64 cfg;
2116 
2117 	/* Skip PF0 (i.e AF) */
2118 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2119 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2120 		/* If PF is not enabled, nothing to do */
2121 		if (!((cfg >> 20) & 0x01))
2122 			continue;
2123 		/* Get numVFs attached to this PF */
2124 		numvfs = (cfg >> 12) & 0xFF;
2125 
2126 		pfvf = &rvu->pf[pf];
2127 		/* Save the start MCE */
2128 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2129 
2130 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2131 
2132 		for (idx = 0; idx < (numvfs + 1); idx++) {
2133 			/* idx-0 is for PF, followed by VFs */
2134 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2135 			pcifunc |= idx;
2136 			/* Add dummy entries now, so that we don't have to check
2137 			 * for whether AQ_OP should be INIT/WRITE later on.
2138 			 * Will be updated when a NIXLF is attached/detached to
2139 			 * these PF/VFs.
2140 			 */
2141 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
2142 					    NIX_AQ_INSTOP_INIT,
2143 					    pcifunc, 0, true);
2144 			if (err)
2145 				return err;
2146 		}
2147 	}
2148 	return 0;
2149 }
2150 
nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)2151 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2152 {
2153 	struct nix_mcast *mcast = &nix_hw->mcast;
2154 	struct rvu_hwinfo *hw = rvu->hw;
2155 	int err, size;
2156 
2157 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2158 	size = (1ULL << size);
2159 
2160 	/* Alloc memory for multicast/mirror replication entries */
2161 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2162 			 (256UL << MC_TBL_SIZE), size);
2163 	if (err)
2164 		return -ENOMEM;
2165 
2166 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2167 		    (u64)mcast->mce_ctx->iova);
2168 
2169 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2170 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2171 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2172 
2173 	/* Alloc memory for multicast replication buffers */
2174 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2175 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2176 			 (8UL << MC_BUF_CNT), size);
2177 	if (err)
2178 		return -ENOMEM;
2179 
2180 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2181 		    (u64)mcast->mcast_buf->iova);
2182 
2183 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2184 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2185 
2186 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2187 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2188 		    BIT_ULL(20) | MC_BUF_CNT);
2189 
2190 	mutex_init(&mcast->mce_lock);
2191 
2192 	return nix_setup_bcast_tables(rvu, nix_hw);
2193 }
2194 
nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)2195 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2196 {
2197 	struct nix_txsch *txsch;
2198 	int err, lvl, schq;
2199 	u64 cfg, reg;
2200 
2201 	/* Get scheduler queue count of each type and alloc
2202 	 * bitmap for each for alloc/free/attach operations.
2203 	 */
2204 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2205 		txsch = &nix_hw->txsch[lvl];
2206 		txsch->lvl = lvl;
2207 		switch (lvl) {
2208 		case NIX_TXSCH_LVL_SMQ:
2209 			reg = NIX_AF_MDQ_CONST;
2210 			break;
2211 		case NIX_TXSCH_LVL_TL4:
2212 			reg = NIX_AF_TL4_CONST;
2213 			break;
2214 		case NIX_TXSCH_LVL_TL3:
2215 			reg = NIX_AF_TL3_CONST;
2216 			break;
2217 		case NIX_TXSCH_LVL_TL2:
2218 			reg = NIX_AF_TL2_CONST;
2219 			break;
2220 		case NIX_TXSCH_LVL_TL1:
2221 			reg = NIX_AF_TL1_CONST;
2222 			break;
2223 		}
2224 		cfg = rvu_read64(rvu, blkaddr, reg);
2225 		txsch->schq.max = cfg & 0xFFFF;
2226 		err = rvu_alloc_bitmap(&txsch->schq);
2227 		if (err)
2228 			return err;
2229 
2230 		/* Allocate memory for scheduler queues to
2231 		 * PF/VF pcifunc mapping info.
2232 		 */
2233 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2234 					       sizeof(u32), GFP_KERNEL);
2235 		if (!txsch->pfvf_map)
2236 			return -ENOMEM;
2237 		for (schq = 0; schq < txsch->schq.max; schq++)
2238 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2239 	}
2240 	return 0;
2241 }
2242 
rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr, u32 cfg)2243 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2244 				int blkaddr, u32 cfg)
2245 {
2246 	int fmt_idx;
2247 
2248 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2249 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2250 			return fmt_idx;
2251 	}
2252 	if (fmt_idx >= nix_hw->mark_format.total)
2253 		return -ERANGE;
2254 
2255 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2256 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2257 	nix_hw->mark_format.in_use++;
2258 	return fmt_idx;
2259 }
2260 
nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)2261 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2262 				    int blkaddr)
2263 {
2264 	u64 cfgs[] = {
2265 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2266 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2267 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2268 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2269 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2270 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2271 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2272 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2273 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2274 	};
2275 	int i, rc;
2276 	u64 total;
2277 
2278 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2279 	nix_hw->mark_format.total = (u8)total;
2280 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2281 					       GFP_KERNEL);
2282 	if (!nix_hw->mark_format.cfg)
2283 		return -ENOMEM;
2284 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2285 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2286 		if (rc < 0)
2287 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2288 				i, rc);
2289 	}
2290 
2291 	return 0;
2292 }
2293 
rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)2294 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2295 				   struct msg_rsp *rsp)
2296 {
2297 	u16 pcifunc = req->hdr.pcifunc;
2298 	int i, nixlf, blkaddr, err;
2299 	u64 stats;
2300 
2301 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2302 	if (err)
2303 		return err;
2304 
2305 	/* Get stats count supported by HW */
2306 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2307 
2308 	/* Reset tx stats */
2309 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2310 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2311 
2312 	/* Reset rx stats */
2313 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2314 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2315 
2316 	return 0;
2317 }
2318 
2319 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)2320 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2321 {
2322 	int i;
2323 
2324 	/* Scan over exiting algo entries to find a match */
2325 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2326 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2327 			return i;
2328 
2329 	return -ERANGE;
2330 }
2331 
set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)2332 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2333 {
2334 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2335 	int max_key_off, max_bit_pos, group_member;
2336 	struct nix_rx_flowkey_alg *field;
2337 	struct nix_rx_flowkey_alg tmp;
2338 	u32 key_type, valid_key;
2339 
2340 	if (!alg)
2341 		return -EINVAL;
2342 
2343 #define FIELDS_PER_ALG  5
2344 #define MAX_KEY_OFF	40
2345 	/* Clear all fields */
2346 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2347 
2348 	/* Each of the 32 possible flow key algorithm definitions should
2349 	 * fall into above incremental config (except ALG0). Otherwise a
2350 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2351 	 *
2352 	 * If a different definition or combination needed then NPC MCAM
2353 	 * has to be programmed to filter such pkts and it's action should
2354 	 * point to this definition to calculate flowtag or hash.
2355 	 *
2356 	 * The `for loop` goes over _all_ protocol field and the following
2357 	 * variables depicts the state machine forward progress logic.
2358 	 *
2359 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2360 	 * in field->key_offset update.
2361 	 * field_marker - Enabled when a new field needs to be selected.
2362 	 * group_member - Enabled when protocol is part of a group.
2363 	 */
2364 
2365 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2366 	nr_field = 0; key_off = 0; field_marker = 1;
2367 	field = &tmp; max_bit_pos = fls(flow_cfg);
2368 	for (idx = 0;
2369 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2370 	     key_off < MAX_KEY_OFF; idx++) {
2371 		key_type = BIT(idx);
2372 		valid_key = flow_cfg & key_type;
2373 		/* Found a field marker, reset the field values */
2374 		if (field_marker)
2375 			memset(&tmp, 0, sizeof(tmp));
2376 
2377 		field_marker = true;
2378 		keyoff_marker = true;
2379 		switch (key_type) {
2380 		case NIX_FLOW_KEY_TYPE_PORT:
2381 			field->sel_chan = true;
2382 			/* This should be set to 1, when SEL_CHAN is set */
2383 			field->bytesm1 = 1;
2384 			break;
2385 		case NIX_FLOW_KEY_TYPE_IPV4:
2386 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2387 			field->lid = NPC_LID_LC;
2388 			field->ltype_match = NPC_LT_LC_IP;
2389 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2390 				field->lid = NPC_LID_LG;
2391 				field->ltype_match = NPC_LT_LG_TU_IP;
2392 			}
2393 			field->hdr_offset = 12; /* SIP offset */
2394 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2395 			field->ltype_mask = 0xF; /* Match only IPv4 */
2396 			keyoff_marker = false;
2397 			break;
2398 		case NIX_FLOW_KEY_TYPE_IPV6:
2399 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2400 			field->lid = NPC_LID_LC;
2401 			field->ltype_match = NPC_LT_LC_IP6;
2402 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2403 				field->lid = NPC_LID_LG;
2404 				field->ltype_match = NPC_LT_LG_TU_IP6;
2405 			}
2406 			field->hdr_offset = 8; /* SIP offset */
2407 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2408 			field->ltype_mask = 0xF; /* Match only IPv6 */
2409 			break;
2410 		case NIX_FLOW_KEY_TYPE_TCP:
2411 		case NIX_FLOW_KEY_TYPE_UDP:
2412 		case NIX_FLOW_KEY_TYPE_SCTP:
2413 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2414 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2415 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2416 			field->lid = NPC_LID_LD;
2417 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2418 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2419 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2420 				field->lid = NPC_LID_LH;
2421 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2422 
2423 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2424 			 * so no need to change the ltype_match, just change
2425 			 * the lid for inner protocols
2426 			 */
2427 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2428 				     (int)NPC_LT_LH_TU_TCP);
2429 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2430 				     (int)NPC_LT_LH_TU_UDP);
2431 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2432 				     (int)NPC_LT_LH_TU_SCTP);
2433 
2434 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2435 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2436 			    valid_key) {
2437 				field->ltype_match |= NPC_LT_LD_TCP;
2438 				group_member = true;
2439 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2440 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2441 				   valid_key) {
2442 				field->ltype_match |= NPC_LT_LD_UDP;
2443 				group_member = true;
2444 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2445 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2446 				   valid_key) {
2447 				field->ltype_match |= NPC_LT_LD_SCTP;
2448 				group_member = true;
2449 			}
2450 			field->ltype_mask = ~field->ltype_match;
2451 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2452 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2453 				/* Handle the case where any of the group item
2454 				 * is enabled in the group but not the final one
2455 				 */
2456 				if (group_member) {
2457 					valid_key = true;
2458 					group_member = false;
2459 				}
2460 			} else {
2461 				field_marker = false;
2462 				keyoff_marker = false;
2463 			}
2464 			break;
2465 		case NIX_FLOW_KEY_TYPE_NVGRE:
2466 			field->lid = NPC_LID_LD;
2467 			field->hdr_offset = 4; /* VSID offset */
2468 			field->bytesm1 = 2;
2469 			field->ltype_match = NPC_LT_LD_NVGRE;
2470 			field->ltype_mask = 0xF;
2471 			break;
2472 		case NIX_FLOW_KEY_TYPE_VXLAN:
2473 		case NIX_FLOW_KEY_TYPE_GENEVE:
2474 			field->lid = NPC_LID_LE;
2475 			field->bytesm1 = 2;
2476 			field->hdr_offset = 4;
2477 			field->ltype_mask = 0xF;
2478 			field_marker = false;
2479 			keyoff_marker = false;
2480 
2481 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2482 				field->ltype_match |= NPC_LT_LE_VXLAN;
2483 				group_member = true;
2484 			}
2485 
2486 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2487 				field->ltype_match |= NPC_LT_LE_GENEVE;
2488 				group_member = true;
2489 			}
2490 
2491 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2492 				if (group_member) {
2493 					field->ltype_mask = ~field->ltype_match;
2494 					field_marker = true;
2495 					keyoff_marker = true;
2496 					valid_key = true;
2497 					group_member = false;
2498 				}
2499 			}
2500 			break;
2501 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2502 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2503 			field->lid = NPC_LID_LA;
2504 			field->ltype_match = NPC_LT_LA_ETHER;
2505 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2506 				field->lid = NPC_LID_LF;
2507 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2508 			}
2509 			field->hdr_offset = 0;
2510 			field->bytesm1 = 5; /* DMAC 6 Byte */
2511 			field->ltype_mask = 0xF;
2512 			break;
2513 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2514 			field->lid = NPC_LID_LC;
2515 			field->hdr_offset = 40; /* IPV6 hdr */
2516 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2517 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2518 			field->ltype_mask = 0xF;
2519 			break;
2520 		case NIX_FLOW_KEY_TYPE_GTPU:
2521 			field->lid = NPC_LID_LE;
2522 			field->hdr_offset = 4;
2523 			field->bytesm1 = 3; /* 4 bytes TID*/
2524 			field->ltype_match = NPC_LT_LE_GTPU;
2525 			field->ltype_mask = 0xF;
2526 			break;
2527 		case NIX_FLOW_KEY_TYPE_VLAN:
2528 			field->lid = NPC_LID_LB;
2529 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2530 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2531 			field->ltype_match = NPC_LT_LB_CTAG;
2532 			field->ltype_mask = 0xF;
2533 			field->fn_mask = 1; /* Mask out the first nibble */
2534 			break;
2535 		}
2536 		field->ena = 1;
2537 
2538 		/* Found a valid flow key type */
2539 		if (valid_key) {
2540 			field->key_offset = key_off;
2541 			memcpy(&alg[nr_field], field, sizeof(*field));
2542 			max_key_off = max(max_key_off, field->bytesm1 + 1);
2543 
2544 			/* Found a field marker, get the next field */
2545 			if (field_marker)
2546 				nr_field++;
2547 		}
2548 
2549 		/* Found a keyoff marker, update the new key_off */
2550 		if (keyoff_marker) {
2551 			key_off += max_key_off;
2552 			max_key_off = 0;
2553 		}
2554 	}
2555 	/* Processed all the flow key types */
2556 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2557 		return 0;
2558 	else
2559 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
2560 }
2561 
reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)2562 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2563 {
2564 	u64 field[FIELDS_PER_ALG];
2565 	struct nix_hw *hw;
2566 	int fid, rc;
2567 
2568 	hw = get_nix_hw(rvu->hw, blkaddr);
2569 	if (!hw)
2570 		return -EINVAL;
2571 
2572 	/* No room to add new flow hash algoritham */
2573 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2574 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
2575 
2576 	/* Generate algo fields for the given flow_cfg */
2577 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2578 	if (rc)
2579 		return rc;
2580 
2581 	/* Update ALGX_FIELDX register with generated fields */
2582 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2583 		rvu_write64(rvu, blkaddr,
2584 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2585 							   fid), field[fid]);
2586 
2587 	/* Store the flow_cfg for futher lookup */
2588 	rc = hw->flowkey.in_use;
2589 	hw->flowkey.flowkey[rc] = flow_cfg;
2590 	hw->flowkey.in_use++;
2591 
2592 	return rc;
2593 }
2594 
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, struct nix_rss_flowkey_cfg *req, struct nix_rss_flowkey_cfg_rsp *rsp)2595 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2596 					 struct nix_rss_flowkey_cfg *req,
2597 					 struct nix_rss_flowkey_cfg_rsp *rsp)
2598 {
2599 	u16 pcifunc = req->hdr.pcifunc;
2600 	int alg_idx, nixlf, blkaddr;
2601 	struct nix_hw *nix_hw;
2602 	int err;
2603 
2604 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2605 	if (err)
2606 		return err;
2607 
2608 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2609 	if (!nix_hw)
2610 		return -EINVAL;
2611 
2612 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2613 	/* Failed to get algo index from the exiting list, reserve new  */
2614 	if (alg_idx < 0) {
2615 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2616 						  req->flowkey_cfg);
2617 		if (alg_idx < 0)
2618 			return alg_idx;
2619 	}
2620 	rsp->alg_idx = alg_idx;
2621 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2622 				       alg_idx, req->mcam_index);
2623 	return 0;
2624 }
2625 
nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)2626 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2627 {
2628 	u32 flowkey_cfg, minkey_cfg;
2629 	int alg, fid, rc;
2630 
2631 	/* Disable all flow key algx fieldx */
2632 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2633 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2634 			rvu_write64(rvu, blkaddr,
2635 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2636 				    0);
2637 	}
2638 
2639 	/* IPv4/IPv6 SIP/DIPs */
2640 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2641 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2642 	if (rc < 0)
2643 		return rc;
2644 
2645 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2646 	minkey_cfg = flowkey_cfg;
2647 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2648 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2649 	if (rc < 0)
2650 		return rc;
2651 
2652 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2653 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2654 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2655 	if (rc < 0)
2656 		return rc;
2657 
2658 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2659 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2660 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2661 	if (rc < 0)
2662 		return rc;
2663 
2664 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2665 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2666 			NIX_FLOW_KEY_TYPE_UDP;
2667 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2668 	if (rc < 0)
2669 		return rc;
2670 
2671 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2672 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2673 			NIX_FLOW_KEY_TYPE_SCTP;
2674 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2675 	if (rc < 0)
2676 		return rc;
2677 
2678 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2679 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2680 			NIX_FLOW_KEY_TYPE_SCTP;
2681 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2682 	if (rc < 0)
2683 		return rc;
2684 
2685 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2686 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2687 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2688 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2689 	if (rc < 0)
2690 		return rc;
2691 
2692 	return 0;
2693 }
2694 
rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp)2695 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2696 				      struct nix_set_mac_addr *req,
2697 				      struct msg_rsp *rsp)
2698 {
2699 	u16 pcifunc = req->hdr.pcifunc;
2700 	int blkaddr, nixlf, err;
2701 	struct rvu_pfvf *pfvf;
2702 
2703 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2704 	if (err)
2705 		return err;
2706 
2707 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2708 
2709 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2710 
2711 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2712 				    pfvf->rx_chan_base, req->mac_addr);
2713 
2714 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2715 
2716 	return 0;
2717 }
2718 
rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, struct msg_req *req, struct nix_get_mac_addr_rsp *rsp)2719 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
2720 				      struct msg_req *req,
2721 				      struct nix_get_mac_addr_rsp *rsp)
2722 {
2723 	u16 pcifunc = req->hdr.pcifunc;
2724 	struct rvu_pfvf *pfvf;
2725 
2726 	if (!is_nixlf_attached(rvu, pcifunc))
2727 		return NIX_AF_ERR_AF_LF_INVALID;
2728 
2729 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2730 
2731 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
2732 
2733 	return 0;
2734 }
2735 
rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp)2736 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2737 				     struct msg_rsp *rsp)
2738 {
2739 	bool allmulti = false, disable_promisc = false;
2740 	u16 pcifunc = req->hdr.pcifunc;
2741 	int blkaddr, nixlf, err;
2742 	struct rvu_pfvf *pfvf;
2743 
2744 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2745 	if (err)
2746 		return err;
2747 
2748 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2749 
2750 	if (req->mode & NIX_RX_MODE_PROMISC)
2751 		allmulti = false;
2752 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
2753 		allmulti = true;
2754 	else
2755 		disable_promisc = true;
2756 
2757 	if (disable_promisc)
2758 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2759 	else
2760 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2761 					      pfvf->rx_chan_base, allmulti);
2762 
2763 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2764 
2765 	return 0;
2766 }
2767 
nix_find_link_frs(struct rvu *rvu, struct nix_frs_cfg *req, u16 pcifunc)2768 static void nix_find_link_frs(struct rvu *rvu,
2769 			      struct nix_frs_cfg *req, u16 pcifunc)
2770 {
2771 	int pf = rvu_get_pf(pcifunc);
2772 	struct rvu_pfvf *pfvf;
2773 	int maxlen, minlen;
2774 	int numvfs, hwvf;
2775 	int vf;
2776 
2777 	/* Update with requester's min/max lengths */
2778 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2779 	pfvf->maxlen = req->maxlen;
2780 	if (req->update_minlen)
2781 		pfvf->minlen = req->minlen;
2782 
2783 	maxlen = req->maxlen;
2784 	minlen = req->update_minlen ? req->minlen : 0;
2785 
2786 	/* Get this PF's numVFs and starting hwvf */
2787 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2788 
2789 	/* For each VF, compare requested max/minlen */
2790 	for (vf = 0; vf < numvfs; vf++) {
2791 		pfvf =  &rvu->hwvf[hwvf + vf];
2792 		if (pfvf->maxlen > maxlen)
2793 			maxlen = pfvf->maxlen;
2794 		if (req->update_minlen &&
2795 		    pfvf->minlen && pfvf->minlen < minlen)
2796 			minlen = pfvf->minlen;
2797 	}
2798 
2799 	/* Compare requested max/minlen with PF's max/minlen */
2800 	pfvf = &rvu->pf[pf];
2801 	if (pfvf->maxlen > maxlen)
2802 		maxlen = pfvf->maxlen;
2803 	if (req->update_minlen &&
2804 	    pfvf->minlen && pfvf->minlen < minlen)
2805 		minlen = pfvf->minlen;
2806 
2807 	/* Update the request with max/min PF's and it's VF's max/min */
2808 	req->maxlen = maxlen;
2809 	if (req->update_minlen)
2810 		req->minlen = minlen;
2811 }
2812 
rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp)2813 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2814 				    struct msg_rsp *rsp)
2815 {
2816 	struct rvu_hwinfo *hw = rvu->hw;
2817 	u16 pcifunc = req->hdr.pcifunc;
2818 	int pf = rvu_get_pf(pcifunc);
2819 	int blkaddr, schq, link = -1;
2820 	struct nix_txsch *txsch;
2821 	u64 cfg, lmac_fifo_len;
2822 	struct nix_hw *nix_hw;
2823 	u8 cgx = 0, lmac = 0;
2824 
2825 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2826 	if (blkaddr < 0)
2827 		return NIX_AF_ERR_AF_LF_INVALID;
2828 
2829 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2830 	if (!nix_hw)
2831 		return -EINVAL;
2832 
2833 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2834 		return NIX_AF_ERR_FRS_INVALID;
2835 
2836 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2837 		return NIX_AF_ERR_FRS_INVALID;
2838 
2839 	/* Check if requester wants to update SMQ's */
2840 	if (!req->update_smq)
2841 		goto rx_frscfg;
2842 
2843 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
2844 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2845 	mutex_lock(&rvu->rsrc_lock);
2846 	for (schq = 0; schq < txsch->schq.max; schq++) {
2847 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2848 			continue;
2849 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2850 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2851 		if (req->update_minlen)
2852 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2853 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2854 	}
2855 	mutex_unlock(&rvu->rsrc_lock);
2856 
2857 rx_frscfg:
2858 	/* Check if config is for SDP link */
2859 	if (req->sdp_link) {
2860 		if (!hw->sdp_links)
2861 			return NIX_AF_ERR_RX_LINK_INVALID;
2862 		link = hw->cgx_links + hw->lbk_links;
2863 		goto linkcfg;
2864 	}
2865 
2866 	/* Check if the request is from CGX mapped RVU PF */
2867 	if (is_pf_cgxmapped(rvu, pf)) {
2868 		/* Get CGX and LMAC to which this PF is mapped and find link */
2869 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2870 		link = (cgx * hw->lmac_per_cgx) + lmac;
2871 	} else if (pf == 0) {
2872 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
2873 		link = hw->cgx_links;
2874 	}
2875 
2876 	if (link < 0)
2877 		return NIX_AF_ERR_RX_LINK_INVALID;
2878 
2879 
2880 linkcfg:
2881 	nix_find_link_frs(rvu, req, pcifunc);
2882 
2883 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2884 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2885 	if (req->update_minlen)
2886 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
2887 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2888 
2889 	if (req->sdp_link || pf == 0)
2890 		return 0;
2891 
2892 	/* Update transmit credits for CGX links */
2893 	lmac_fifo_len =
2894 		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2895 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2896 	cfg &= ~(0xFFFFFULL << 12);
2897 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
2898 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2899 	return 0;
2900 }
2901 
rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)2902 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2903 				      struct msg_rsp *rsp)
2904 {
2905 	struct npc_mcam_alloc_entry_req alloc_req = { };
2906 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2907 	struct npc_mcam_free_entry_req free_req = { };
2908 	u16 pcifunc = req->hdr.pcifunc;
2909 	int blkaddr, nixlf, err;
2910 	struct rvu_pfvf *pfvf;
2911 
2912 	/* LBK VFs do not have separate MCAM UCAST entry hence
2913 	 * skip allocating rxvlan for them
2914 	 */
2915 	if (is_afvf(pcifunc))
2916 		return 0;
2917 
2918 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2919 	if (pfvf->rxvlan)
2920 		return 0;
2921 
2922 	/* alloc new mcam entry */
2923 	alloc_req.hdr.pcifunc = pcifunc;
2924 	alloc_req.count = 1;
2925 
2926 	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2927 						    &alloc_rsp);
2928 	if (err)
2929 		return err;
2930 
2931 	/* update entry to enable rxvlan offload */
2932 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2933 	if (blkaddr < 0) {
2934 		err = NIX_AF_ERR_AF_LF_INVALID;
2935 		goto free_entry;
2936 	}
2937 
2938 	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2939 	if (nixlf < 0) {
2940 		err = NIX_AF_ERR_AF_LF_INVALID;
2941 		goto free_entry;
2942 	}
2943 
2944 	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2945 	/* all it means is that rxvlan_index is valid */
2946 	pfvf->rxvlan = true;
2947 
2948 	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2949 	if (err)
2950 		goto free_entry;
2951 
2952 	return 0;
2953 free_entry:
2954 	free_req.hdr.pcifunc = pcifunc;
2955 	free_req.entry = alloc_rsp.entry_list[0];
2956 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2957 	pfvf->rxvlan = false;
2958 	return err;
2959 }
2960 
rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, struct msg_rsp *rsp)2961 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2962 				    struct msg_rsp *rsp)
2963 {
2964 	int nixlf, blkaddr, err;
2965 	u64 cfg;
2966 
2967 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
2968 	if (err)
2969 		return err;
2970 
2971 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2972 	/* Set the interface configuration */
2973 	if (req->len_verify & BIT(0))
2974 		cfg |= BIT_ULL(41);
2975 	else
2976 		cfg &= ~BIT_ULL(41);
2977 
2978 	if (req->len_verify & BIT(1))
2979 		cfg |= BIT_ULL(40);
2980 	else
2981 		cfg &= ~BIT_ULL(40);
2982 
2983 	if (req->csum_verify & BIT(0))
2984 		cfg |= BIT_ULL(37);
2985 	else
2986 		cfg &= ~BIT_ULL(37);
2987 
2988 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2989 
2990 	return 0;
2991 }
2992 
nix_link_config(struct rvu *rvu, int blkaddr)2993 static void nix_link_config(struct rvu *rvu, int blkaddr)
2994 {
2995 	struct rvu_hwinfo *hw = rvu->hw;
2996 	int cgx, lmac_cnt, slink, link;
2997 	u64 tx_credits;
2998 
2999 	/* Set default min/max packet lengths allowed on NIX Rx links.
3000 	 *
3001 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3002 	 * as undersize and report them to SW as error pkts, hence
3003 	 * setting it to 40 bytes.
3004 	 */
3005 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
3006 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3007 			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3008 	}
3009 
3010 	if (hw->sdp_links) {
3011 		link = hw->cgx_links + hw->lbk_links;
3012 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3013 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3014 	}
3015 
3016 	/* Set credits for Tx links assuming max packet length allowed.
3017 	 * This will be reconfigured based on MTU set for PF/VF.
3018 	 */
3019 	for (cgx = 0; cgx < hw->cgx; cgx++) {
3020 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3021 		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
3022 		/* Enable credits and set credit pkt count to max allowed */
3023 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3024 		slink = cgx * hw->lmac_per_cgx;
3025 		for (link = slink; link < (slink + lmac_cnt); link++) {
3026 			rvu_write64(rvu, blkaddr,
3027 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3028 				    tx_credits);
3029 		}
3030 	}
3031 
3032 	/* Set Tx credits for LBK link */
3033 	slink = hw->cgx_links;
3034 	for (link = slink; link < (slink + hw->lbk_links); link++) {
3035 		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
3036 		/* Enable credits and set credit pkt count to max allowed */
3037 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3038 		rvu_write64(rvu, blkaddr,
3039 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3040 	}
3041 }
3042 
nix_calibrate_x2p(struct rvu *rvu, int blkaddr)3043 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3044 {
3045 	int idx, err;
3046 	u64 status;
3047 
3048 	/* Start X2P bus calibration */
3049 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3050 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3051 	/* Wait for calibration to complete */
3052 	err = rvu_poll_reg(rvu, blkaddr,
3053 			   NIX_AF_STATUS, BIT_ULL(10), false);
3054 	if (err) {
3055 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3056 		return err;
3057 	}
3058 
3059 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3060 	/* Check if CGX devices are ready */
3061 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3062 		/* Skip when cgx port is not available */
3063 		if (!rvu_cgx_pdata(idx, rvu) ||
3064 		    (status & (BIT_ULL(16 + idx))))
3065 			continue;
3066 		dev_err(rvu->dev,
3067 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
3068 		err = -EBUSY;
3069 	}
3070 
3071 	/* Check if LBK is ready */
3072 	if (!(status & BIT_ULL(19))) {
3073 		dev_err(rvu->dev,
3074 			"LBK didn't respond to NIX X2P calibration\n");
3075 		err = -EBUSY;
3076 	}
3077 
3078 	/* Clear 'calibrate_x2p' bit */
3079 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3080 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3081 	if (err || (status & 0x3FFULL))
3082 		dev_err(rvu->dev,
3083 			"NIX X2P calibration failed, status 0x%llx\n", status);
3084 	if (err)
3085 		return err;
3086 	return 0;
3087 }
3088 
nix_aq_init(struct rvu *rvu, struct rvu_block *block)3089 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3090 {
3091 	u64 cfg;
3092 	int err;
3093 
3094 	/* Set admin queue endianness */
3095 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3096 #ifdef __BIG_ENDIAN
3097 	cfg |= BIT_ULL(8);
3098 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3099 #else
3100 	cfg &= ~BIT_ULL(8);
3101 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3102 #endif
3103 
3104 	/* Do not bypass NDC cache */
3105 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3106 	cfg &= ~0x3FFEULL;
3107 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3108 	/* Disable caching of SQB aka SQEs */
3109 	cfg |= 0x04ULL;
3110 #endif
3111 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3112 
3113 	/* Result structure can be followed by RQ/SQ/CQ context at
3114 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3115 	 * operation type. Alloc sufficient result memory for all operations.
3116 	 */
3117 	err = rvu_aq_alloc(rvu, &block->aq,
3118 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3119 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3120 	if (err)
3121 		return err;
3122 
3123 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3124 	rvu_write64(rvu, block->addr,
3125 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3126 	return 0;
3127 }
3128 
rvu_nix_init(struct rvu *rvu)3129 int rvu_nix_init(struct rvu *rvu)
3130 {
3131 	const struct npc_lt_def_cfg *ltdefs;
3132 	struct rvu_hwinfo *hw = rvu->hw;
3133 	struct rvu_block *block;
3134 	int blkaddr, err;
3135 	u64 cfg;
3136 
3137 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3138 	if (blkaddr < 0)
3139 		return 0;
3140 	block = &hw->block[blkaddr];
3141 
3142 	if (is_rvu_96xx_B0(rvu)) {
3143 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3144 		 * internal state when conditional clocks are turned off.
3145 		 * Hence enable them.
3146 		 */
3147 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3148 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3149 
3150 		/* Set chan/link to backpressure TL3 instead of TL2 */
3151 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3152 
3153 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
3154 		 * This sticky mode is known to cause SQ stalls when multiple
3155 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
3156 		 */
3157 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3158 		cfg &= ~BIT_ULL(15);
3159 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3160 	}
3161 
3162 	ltdefs = rvu->kpu.lt_def;
3163 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
3164 	err = nix_calibrate_x2p(rvu, blkaddr);
3165 	if (err)
3166 		return err;
3167 
3168 	/* Set num of links of each type */
3169 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
3170 	hw->cgx = (cfg >> 12) & 0xF;
3171 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
3172 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
3173 	hw->lbk_links = (cfg >> 24) & 0xF;
3174 	hw->sdp_links = 1;
3175 
3176 	/* Initialize admin queue */
3177 	err = nix_aq_init(rvu, block);
3178 	if (err)
3179 		return err;
3180 
3181 	/* Restore CINT timer delay to HW reset values */
3182 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3183 
3184 	if (blkaddr == BLKADDR_NIX0) {
3185 		hw->nix0 = devm_kzalloc(rvu->dev,
3186 					sizeof(struct nix_hw), GFP_KERNEL);
3187 		if (!hw->nix0)
3188 			return -ENOMEM;
3189 
3190 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
3191 		if (err)
3192 			return err;
3193 
3194 		err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
3195 		if (err)
3196 			return err;
3197 
3198 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
3199 		if (err)
3200 			return err;
3201 
3202 		/* Configure segmentation offload formats */
3203 		nix_setup_lso(rvu, hw->nix0, blkaddr);
3204 
3205 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3206 		 * This helps HW protocol checker to identify headers
3207 		 * and validate length and checksums.
3208 		 */
3209 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3210 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3211 			    ltdefs->rx_ol2.ltype_mask);
3212 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3213 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3214 			    ltdefs->rx_oip4.ltype_mask);
3215 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3216 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3217 			    ltdefs->rx_iip4.ltype_mask);
3218 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3219 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3220 			    ltdefs->rx_oip6.ltype_mask);
3221 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3222 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3223 			    ltdefs->rx_iip6.ltype_mask);
3224 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3225 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3226 			    ltdefs->rx_otcp.ltype_mask);
3227 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3228 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3229 			    ltdefs->rx_itcp.ltype_mask);
3230 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3231 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3232 			    ltdefs->rx_oudp.ltype_mask);
3233 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3234 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3235 			    ltdefs->rx_iudp.ltype_mask);
3236 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3237 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3238 			    ltdefs->rx_osctp.ltype_mask);
3239 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3240 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3241 			    ltdefs->rx_isctp.ltype_mask);
3242 
3243 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3244 		if (err)
3245 			return err;
3246 
3247 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3248 		nix_link_config(rvu, blkaddr);
3249 
3250 		/* Enable Channel backpressure */
3251 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3252 	}
3253 	return 0;
3254 }
3255 
rvu_nix_freemem(struct rvu *rvu)3256 void rvu_nix_freemem(struct rvu *rvu)
3257 {
3258 	struct rvu_hwinfo *hw = rvu->hw;
3259 	struct rvu_block *block;
3260 	struct nix_txsch *txsch;
3261 	struct nix_mcast *mcast;
3262 	struct nix_hw *nix_hw;
3263 	int blkaddr, lvl;
3264 
3265 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3266 	if (blkaddr < 0)
3267 		return;
3268 
3269 	block = &hw->block[blkaddr];
3270 	rvu_aq_free(rvu, block->aq);
3271 
3272 	if (blkaddr == BLKADDR_NIX0) {
3273 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3274 		if (!nix_hw)
3275 			return;
3276 
3277 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3278 			txsch = &nix_hw->txsch[lvl];
3279 			kfree(txsch->schq.bmap);
3280 		}
3281 
3282 		mcast = &nix_hw->mcast;
3283 		qmem_free(rvu->dev, mcast->mce_ctx);
3284 		qmem_free(rvu->dev, mcast->mcast_buf);
3285 		mutex_destroy(&mcast->mce_lock);
3286 	}
3287 }
3288 
rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)3289 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3290 				     struct msg_rsp *rsp)
3291 {
3292 	u16 pcifunc = req->hdr.pcifunc;
3293 	int nixlf, err;
3294 
3295 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3296 	if (err)
3297 		return err;
3298 
3299 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3300 
3301 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3302 }
3303 
rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)3304 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3305 				    struct msg_rsp *rsp)
3306 {
3307 	u16 pcifunc = req->hdr.pcifunc;
3308 	int nixlf, err;
3309 
3310 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3311 	if (err)
3312 		return err;
3313 
3314 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3315 
3316 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3317 }
3318 
rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)3319 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3320 {
3321 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3322 	struct hwctx_disable_req ctx_req;
3323 	int err;
3324 
3325 	ctx_req.hdr.pcifunc = pcifunc;
3326 
3327 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3328 	nix_interface_deinit(rvu, pcifunc, nixlf);
3329 	nix_rx_sync(rvu, blkaddr);
3330 	nix_txschq_free(rvu, pcifunc);
3331 
3332 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3333 
3334 	if (pfvf->sq_ctx) {
3335 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3336 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3337 		if (err)
3338 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3339 	}
3340 
3341 	if (pfvf->rq_ctx) {
3342 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3343 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3344 		if (err)
3345 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3346 	}
3347 
3348 	if (pfvf->cq_ctx) {
3349 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3350 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3351 		if (err)
3352 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3353 	}
3354 
3355 	nix_ctx_free(rvu, pfvf);
3356 }
3357 
3358 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
3359 
rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)3360 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3361 {
3362 	struct rvu_hwinfo *hw = rvu->hw;
3363 	struct rvu_block *block;
3364 	int blkaddr;
3365 	int nixlf;
3366 	u64 cfg;
3367 
3368 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3369 	if (blkaddr < 0)
3370 		return NIX_AF_ERR_AF_LF_INVALID;
3371 
3372 	block = &hw->block[blkaddr];
3373 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3374 	if (nixlf < 0)
3375 		return NIX_AF_ERR_AF_LF_INVALID;
3376 
3377 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3378 
3379 	if (enable)
3380 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3381 	else
3382 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3383 
3384 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3385 
3386 	return 0;
3387 }
3388 
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)3389 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3390 					  struct msg_rsp *rsp)
3391 {
3392 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3393 }
3394 
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp)3395 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3396 					   struct msg_rsp *rsp)
3397 {
3398 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3399 }
3400 
rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, struct nix_lso_format_cfg *req, struct nix_lso_format_cfg_rsp *rsp)3401 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3402 					struct nix_lso_format_cfg *req,
3403 					struct nix_lso_format_cfg_rsp *rsp)
3404 {
3405 	u16 pcifunc = req->hdr.pcifunc;
3406 	struct nix_hw *nix_hw;
3407 	struct rvu_pfvf *pfvf;
3408 	int blkaddr, idx, f;
3409 	u64 reg;
3410 
3411 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3412 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3413 	if (!pfvf->nixlf || blkaddr < 0)
3414 		return NIX_AF_ERR_AF_LF_INVALID;
3415 
3416 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3417 	if (!nix_hw)
3418 		return -EINVAL;
3419 
3420 	/* Find existing matching LSO format, if any */
3421 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3422 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3423 			reg = rvu_read64(rvu, blkaddr,
3424 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3425 			if (req->fields[f] != (reg & req->field_mask))
3426 				break;
3427 		}
3428 
3429 		if (f == NIX_LSO_FIELD_MAX)
3430 			break;
3431 	}
3432 
3433 	if (idx < nix_hw->lso.in_use) {
3434 		/* Match found */
3435 		rsp->lso_format_idx = idx;
3436 		return 0;
3437 	}
3438 
3439 	if (nix_hw->lso.in_use == nix_hw->lso.total)
3440 		return NIX_AF_ERR_LSO_CFG_FAIL;
3441 
3442 	rsp->lso_format_idx = nix_hw->lso.in_use++;
3443 
3444 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3445 		rvu_write64(rvu, blkaddr,
3446 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3447 			    req->fields[f]);
3448 
3449 	return 0;
3450 }
3451