1// SPDX-License-Identifier: GPL-2.0
2/* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/interrupt.h>
10#include <linux/delay.h>
11#include <linux/irq.h>
12#include <linux/pci.h>
13#include <linux/sysfs.h>
14
15#include "cgx.h"
16#include "rvu.h"
17#include "rvu_reg.h"
18#include "ptp.h"
19#include "mcs.h"
20
21#include "rvu_trace.h"
22#include "rvu_npc_hash.h"
23
24#define DRV_NAME	"rvu_af"
25#define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
26
27static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
28				struct rvu_block *block, int lf);
29static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
30				  struct rvu_block *block, int lf);
31static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
32
33static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
34			 int type, int num,
35			 void (mbox_handler)(struct work_struct *),
36			 void (mbox_up_handler)(struct work_struct *));
37enum {
38	TYPE_AFVF,
39	TYPE_AFPF,
40};
41
42/* Supported devices */
43static const struct pci_device_id rvu_id_table[] = {
44	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
45	{ 0, }  /* end of table */
46};
47
48MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49MODULE_DESCRIPTION(DRV_STRING);
50MODULE_LICENSE("GPL v2");
51MODULE_DEVICE_TABLE(pci, rvu_id_table);
52
53static char *mkex_profile; /* MKEX profile name */
54module_param(mkex_profile, charp, 0000);
55MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
56
57static char *kpu_profile; /* KPU profile name */
58module_param(kpu_profile, charp, 0000);
59MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
60
61static void rvu_setup_hw_capabilities(struct rvu *rvu)
62{
63	struct rvu_hwinfo *hw = rvu->hw;
64
65	hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
66	hw->cap.nix_fixed_txschq_mapping = false;
67	hw->cap.nix_shaping = true;
68	hw->cap.nix_tx_link_bp = true;
69	hw->cap.nix_rx_multicast = true;
70	hw->cap.nix_shaper_toggle_wait = false;
71	hw->cap.npc_hash_extract = false;
72	hw->cap.npc_exact_match_enabled = false;
73	hw->rvu = rvu;
74
75	if (is_rvu_pre_96xx_C0(rvu)) {
76		hw->cap.nix_fixed_txschq_mapping = true;
77		hw->cap.nix_txsch_per_cgx_lmac = 4;
78		hw->cap.nix_txsch_per_lbk_lmac = 132;
79		hw->cap.nix_txsch_per_sdp_lmac = 76;
80		hw->cap.nix_shaping = false;
81		hw->cap.nix_tx_link_bp = false;
82		if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
83			hw->cap.nix_rx_multicast = false;
84	}
85	if (!is_rvu_pre_96xx_C0(rvu))
86		hw->cap.nix_shaper_toggle_wait = true;
87
88	if (!is_rvu_otx2(rvu))
89		hw->cap.per_pf_mbox_regs = true;
90
91	if (is_rvu_npc_hash_extract_en(rvu))
92		hw->cap.npc_hash_extract = true;
93}
94
95/* Poll a RVU block's register 'offset', for a 'zero'
96 * or 'nonzero' at bits specified by 'mask'
97 */
98int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
99{
100	unsigned long timeout = jiffies + usecs_to_jiffies(20000);
101	bool twice = false;
102	void __iomem *reg;
103	u64 reg_val;
104
105	reg = rvu->afreg_base + ((block << 28) | offset);
106again:
107	reg_val = readq(reg);
108	if (zero && !(reg_val & mask))
109		return 0;
110	if (!zero && (reg_val & mask))
111		return 0;
112	if (time_before(jiffies, timeout)) {
113		usleep_range(1, 5);
114		goto again;
115	}
116	/* In scenarios where CPU is scheduled out before checking
117	 * 'time_before' (above) and gets scheduled in such that
118	 * jiffies are beyond timeout value, then check again if HW is
119	 * done with the operation in the meantime.
120	 */
121	if (!twice) {
122		twice = true;
123		goto again;
124	}
125	return -EBUSY;
126}
127
128int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
129{
130	int id;
131
132	if (!rsrc->bmap)
133		return -EINVAL;
134
135	id = find_first_zero_bit(rsrc->bmap, rsrc->max);
136	if (id >= rsrc->max)
137		return -ENOSPC;
138
139	__set_bit(id, rsrc->bmap);
140
141	return id;
142}
143
144int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
145{
146	int start;
147
148	if (!rsrc->bmap)
149		return -EINVAL;
150
151	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
152	if (start >= rsrc->max)
153		return -ENOSPC;
154
155	bitmap_set(rsrc->bmap, start, nrsrc);
156	return start;
157}
158
159static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
160{
161	if (!rsrc->bmap)
162		return;
163	if (start >= rsrc->max)
164		return;
165
166	bitmap_clear(rsrc->bmap, start, nrsrc);
167}
168
169bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
170{
171	int start;
172
173	if (!rsrc->bmap)
174		return false;
175
176	start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
177	if (start >= rsrc->max)
178		return false;
179
180	return true;
181}
182
183void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
184{
185	if (!rsrc->bmap)
186		return;
187
188	__clear_bit(id, rsrc->bmap);
189}
190
191int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
192{
193	int used;
194
195	if (!rsrc->bmap)
196		return 0;
197
198	used = bitmap_weight(rsrc->bmap, rsrc->max);
199	return (rsrc->max - used);
200}
201
202bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
203{
204	if (!rsrc->bmap)
205		return false;
206
207	return !test_bit(id, rsrc->bmap);
208}
209
210int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
211{
212	rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
213			     sizeof(long), GFP_KERNEL);
214	if (!rsrc->bmap)
215		return -ENOMEM;
216	return 0;
217}
218
219void rvu_free_bitmap(struct rsrc_bmap *rsrc)
220{
221	kfree(rsrc->bmap);
222}
223
224/* Get block LF's HW index from a PF_FUNC's block slot number */
225int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
226{
227	u16 match = 0;
228	int lf;
229
230	mutex_lock(&rvu->rsrc_lock);
231	for (lf = 0; lf < block->lf.max; lf++) {
232		if (block->fn_map[lf] == pcifunc) {
233			if (slot == match) {
234				mutex_unlock(&rvu->rsrc_lock);
235				return lf;
236			}
237			match++;
238		}
239	}
240	mutex_unlock(&rvu->rsrc_lock);
241	return -ENODEV;
242}
243
244/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
245 * Some silicon variants of OcteonTX2 supports
246 * multiple blocks of same type.
247 *
248 * @pcifunc has to be zero when no LF is yet attached.
249 *
250 * For a pcifunc if LFs are attached from multiple blocks of same type, then
251 * return blkaddr of first encountered block.
252 */
253int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
254{
255	int devnum, blkaddr = -ENODEV;
256	u64 cfg, reg;
257	bool is_pf;
258
259	switch (blktype) {
260	case BLKTYPE_NPC:
261		blkaddr = BLKADDR_NPC;
262		goto exit;
263	case BLKTYPE_NPA:
264		blkaddr = BLKADDR_NPA;
265		goto exit;
266	case BLKTYPE_NIX:
267		/* For now assume NIX0 */
268		if (!pcifunc) {
269			blkaddr = BLKADDR_NIX0;
270			goto exit;
271		}
272		break;
273	case BLKTYPE_SSO:
274		blkaddr = BLKADDR_SSO;
275		goto exit;
276	case BLKTYPE_SSOW:
277		blkaddr = BLKADDR_SSOW;
278		goto exit;
279	case BLKTYPE_TIM:
280		blkaddr = BLKADDR_TIM;
281		goto exit;
282	case BLKTYPE_CPT:
283		/* For now assume CPT0 */
284		if (!pcifunc) {
285			blkaddr = BLKADDR_CPT0;
286			goto exit;
287		}
288		break;
289	}
290
291	/* Check if this is a RVU PF or VF */
292	if (pcifunc & RVU_PFVF_FUNC_MASK) {
293		is_pf = false;
294		devnum = rvu_get_hwvf(rvu, pcifunc);
295	} else {
296		is_pf = true;
297		devnum = rvu_get_pf(pcifunc);
298	}
299
300	/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
301	 * 'BLKADDR_NIX1'.
302	 */
303	if (blktype == BLKTYPE_NIX) {
304		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
305			RVU_PRIV_HWVFX_NIXX_CFG(0);
306		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
307		if (cfg) {
308			blkaddr = BLKADDR_NIX0;
309			goto exit;
310		}
311
312		reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
313			RVU_PRIV_HWVFX_NIXX_CFG(1);
314		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
315		if (cfg)
316			blkaddr = BLKADDR_NIX1;
317	}
318
319	if (blktype == BLKTYPE_CPT) {
320		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
321			RVU_PRIV_HWVFX_CPTX_CFG(0);
322		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
323		if (cfg) {
324			blkaddr = BLKADDR_CPT0;
325			goto exit;
326		}
327
328		reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
329			RVU_PRIV_HWVFX_CPTX_CFG(1);
330		cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
331		if (cfg)
332			blkaddr = BLKADDR_CPT1;
333	}
334
335exit:
336	if (is_block_implemented(rvu->hw, blkaddr))
337		return blkaddr;
338	return -ENODEV;
339}
340
341static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
342				struct rvu_block *block, u16 pcifunc,
343				u16 lf, bool attach)
344{
345	int devnum, num_lfs = 0;
346	bool is_pf;
347	u64 reg;
348
349	if (lf >= block->lf.max) {
350		dev_err(&rvu->pdev->dev,
351			"%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
352			__func__, lf, block->name, block->lf.max);
353		return;
354	}
355
356	/* Check if this is for a RVU PF or VF */
357	if (pcifunc & RVU_PFVF_FUNC_MASK) {
358		is_pf = false;
359		devnum = rvu_get_hwvf(rvu, pcifunc);
360	} else {
361		is_pf = true;
362		devnum = rvu_get_pf(pcifunc);
363	}
364
365	block->fn_map[lf] = attach ? pcifunc : 0;
366
367	switch (block->addr) {
368	case BLKADDR_NPA:
369		pfvf->npalf = attach ? true : false;
370		num_lfs = pfvf->npalf;
371		break;
372	case BLKADDR_NIX0:
373	case BLKADDR_NIX1:
374		pfvf->nixlf = attach ? true : false;
375		num_lfs = pfvf->nixlf;
376		break;
377	case BLKADDR_SSO:
378		attach ? pfvf->sso++ : pfvf->sso--;
379		num_lfs = pfvf->sso;
380		break;
381	case BLKADDR_SSOW:
382		attach ? pfvf->ssow++ : pfvf->ssow--;
383		num_lfs = pfvf->ssow;
384		break;
385	case BLKADDR_TIM:
386		attach ? pfvf->timlfs++ : pfvf->timlfs--;
387		num_lfs = pfvf->timlfs;
388		break;
389	case BLKADDR_CPT0:
390		attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
391		num_lfs = pfvf->cptlfs;
392		break;
393	case BLKADDR_CPT1:
394		attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
395		num_lfs = pfvf->cpt1_lfs;
396		break;
397	}
398
399	reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
400	rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
401}
402
403inline int rvu_get_pf(u16 pcifunc)
404{
405	return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
406}
407
408void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
409{
410	u64 cfg;
411
412	/* Get numVFs attached to this PF and first HWVF */
413	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
414	if (numvfs)
415		*numvfs = (cfg >> 12) & 0xFF;
416	if (hwvf)
417		*hwvf = cfg & 0xFFF;
418}
419
420int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
421{
422	int pf, func;
423	u64 cfg;
424
425	pf = rvu_get_pf(pcifunc);
426	func = pcifunc & RVU_PFVF_FUNC_MASK;
427
428	/* Get first HWVF attached to this PF */
429	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
430
431	return ((cfg & 0xFFF) + func - 1);
432}
433
434struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
435{
436	/* Check if it is a PF or VF */
437	if (pcifunc & RVU_PFVF_FUNC_MASK)
438		return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
439	else
440		return &rvu->pf[rvu_get_pf(pcifunc)];
441}
442
443static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
444{
445	int pf, vf, nvfs;
446	u64 cfg;
447
448	pf = rvu_get_pf(pcifunc);
449	if (pf >= rvu->hw->total_pfs)
450		return false;
451
452	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
453		return true;
454
455	/* Check if VF is within number of VFs attached to this PF */
456	vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
457	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
458	nvfs = (cfg >> 12) & 0xFF;
459	if (vf >= nvfs)
460		return false;
461
462	return true;
463}
464
465bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
466{
467	struct rvu_block *block;
468
469	if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
470		return false;
471
472	block = &hw->block[blkaddr];
473	return block->implemented;
474}
475
476static void rvu_check_block_implemented(struct rvu *rvu)
477{
478	struct rvu_hwinfo *hw = rvu->hw;
479	struct rvu_block *block;
480	int blkid;
481	u64 cfg;
482
483	/* For each block check if 'implemented' bit is set */
484	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
485		block = &hw->block[blkid];
486		cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
487		if (cfg & BIT_ULL(11))
488			block->implemented = true;
489	}
490}
491
492static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
493{
494	rvu_write64(rvu, BLKADDR_RVUM,
495		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
496		    RVU_BLK_RVUM_REVID);
497}
498
499static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
500{
501	rvu_write64(rvu, BLKADDR_RVUM,
502		    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
503}
504
505int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
506{
507	int err;
508
509	if (!block->implemented)
510		return 0;
511
512	rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
513	err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
514			   true);
515	return err;
516}
517
518static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
519{
520	struct rvu_block *block = &rvu->hw->block[blkaddr];
521	int err;
522
523	if (!block->implemented)
524		return;
525
526	rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
527	err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
528	if (err) {
529		dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
530		while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
531			;
532	}
533}
534
535static void rvu_reset_all_blocks(struct rvu *rvu)
536{
537	/* Do a HW reset of all RVU blocks */
538	rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
539	rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
540	rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
541	rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
542	rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
543	rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
544	rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
545	rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
546	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
547	rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
548	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
549	rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
550	rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
551}
552
553static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
554{
555	struct rvu_pfvf *pfvf;
556	u64 cfg;
557	int lf;
558
559	for (lf = 0; lf < block->lf.max; lf++) {
560		cfg = rvu_read64(rvu, block->addr,
561				 block->lfcfg_reg | (lf << block->lfshift));
562		if (!(cfg & BIT_ULL(63)))
563			continue;
564
565		/* Set this resource as being used */
566		__set_bit(lf, block->lf.bmap);
567
568		/* Get, to whom this LF is attached */
569		pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
570		rvu_update_rsrc_map(rvu, pfvf, block,
571				    (cfg >> 8) & 0xFFFF, lf, true);
572
573		/* Set start MSIX vector for this LF within this PF/VF */
574		rvu_set_msix_offset(rvu, pfvf, block, lf);
575	}
576}
577
578static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
579{
580	int min_vecs;
581
582	if (!vf)
583		goto check_pf;
584
585	if (!nvecs) {
586		dev_warn(rvu->dev,
587			 "PF%d:VF%d is configured with zero msix vectors, %d\n",
588			 pf, vf - 1, nvecs);
589	}
590	return;
591
592check_pf:
593	if (pf == 0)
594		min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
595	else
596		min_vecs = RVU_PF_INT_VEC_CNT;
597
598	if (!(nvecs < min_vecs))
599		return;
600	dev_warn(rvu->dev,
601		 "PF%d is configured with too few vectors, %d, min is %d\n",
602		 pf, nvecs, min_vecs);
603}
604
605static int rvu_setup_msix_resources(struct rvu *rvu)
606{
607	struct rvu_hwinfo *hw = rvu->hw;
608	int pf, vf, numvfs, hwvf, err;
609	int nvecs, offset, max_msix;
610	struct rvu_pfvf *pfvf;
611	u64 cfg, phy_addr;
612	dma_addr_t iova;
613
614	for (pf = 0; pf < hw->total_pfs; pf++) {
615		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
616		/* If PF is not enabled, nothing to do */
617		if (!((cfg >> 20) & 0x01))
618			continue;
619
620		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
621
622		pfvf = &rvu->pf[pf];
623		/* Get num of MSIX vectors attached to this PF */
624		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
625		pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
626		rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
627
628		/* Alloc msix bitmap for this PF */
629		err = rvu_alloc_bitmap(&pfvf->msix);
630		if (err)
631			return err;
632
633		/* Allocate memory for MSIX vector to RVU block LF mapping */
634		pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
635						sizeof(u16), GFP_KERNEL);
636		if (!pfvf->msix_lfmap)
637			return -ENOMEM;
638
639		/* For PF0 (AF) firmware will set msix vector offsets for
640		 * AF, block AF and PF0_INT vectors, so jump to VFs.
641		 */
642		if (!pf)
643			goto setup_vfmsix;
644
645		/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
646		 * These are allocated on driver init and never freed,
647		 * so no need to set 'msix_lfmap' for these.
648		 */
649		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
650		nvecs = (cfg >> 12) & 0xFF;
651		cfg &= ~0x7FFULL;
652		offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
653		rvu_write64(rvu, BLKADDR_RVUM,
654			    RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
655setup_vfmsix:
656		/* Alloc msix bitmap for VFs */
657		for (vf = 0; vf < numvfs; vf++) {
658			pfvf =  &rvu->hwvf[hwvf + vf];
659			/* Get num of MSIX vectors attached to this VF */
660			cfg = rvu_read64(rvu, BLKADDR_RVUM,
661					 RVU_PRIV_PFX_MSIX_CFG(pf));
662			pfvf->msix.max = (cfg & 0xFFF) + 1;
663			rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
664
665			/* Alloc msix bitmap for this VF */
666			err = rvu_alloc_bitmap(&pfvf->msix);
667			if (err)
668				return err;
669
670			pfvf->msix_lfmap =
671				devm_kcalloc(rvu->dev, pfvf->msix.max,
672					     sizeof(u16), GFP_KERNEL);
673			if (!pfvf->msix_lfmap)
674				return -ENOMEM;
675
676			/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
677			 * These are allocated on driver init and never freed,
678			 * so no need to set 'msix_lfmap' for these.
679			 */
680			cfg = rvu_read64(rvu, BLKADDR_RVUM,
681					 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
682			nvecs = (cfg >> 12) & 0xFF;
683			cfg &= ~0x7FFULL;
684			offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
685			rvu_write64(rvu, BLKADDR_RVUM,
686				    RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
687				    cfg | offset);
688		}
689	}
690
691	/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
692	 * create an IOMMU mapping for the physical address configured by
693	 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
694	 */
695	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
696	max_msix = cfg & 0xFFFFF;
697	if (rvu->fwdata && rvu->fwdata->msixtr_base)
698		phy_addr = rvu->fwdata->msixtr_base;
699	else
700		phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
701
702	iova = dma_map_resource(rvu->dev, phy_addr,
703				max_msix * PCI_MSIX_ENTRY_SIZE,
704				DMA_BIDIRECTIONAL, 0);
705
706	if (dma_mapping_error(rvu->dev, iova))
707		return -ENOMEM;
708
709	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
710	rvu->msix_base_iova = iova;
711	rvu->msixtr_base_phy = phy_addr;
712
713	return 0;
714}
715
716static void rvu_reset_msix(struct rvu *rvu)
717{
718	/* Restore msixtr base register */
719	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
720		    rvu->msixtr_base_phy);
721}
722
723static void rvu_free_hw_resources(struct rvu *rvu)
724{
725	struct rvu_hwinfo *hw = rvu->hw;
726	struct rvu_block *block;
727	struct rvu_pfvf  *pfvf;
728	int id, max_msix;
729	u64 cfg;
730
731	rvu_npa_freemem(rvu);
732	rvu_npc_freemem(rvu);
733	rvu_nix_freemem(rvu);
734
735	/* Free block LF bitmaps */
736	for (id = 0; id < BLK_COUNT; id++) {
737		block = &hw->block[id];
738		kfree(block->lf.bmap);
739	}
740
741	/* Free MSIX bitmaps */
742	for (id = 0; id < hw->total_pfs; id++) {
743		pfvf = &rvu->pf[id];
744		kfree(pfvf->msix.bmap);
745	}
746
747	for (id = 0; id < hw->total_vfs; id++) {
748		pfvf = &rvu->hwvf[id];
749		kfree(pfvf->msix.bmap);
750	}
751
752	/* Unmap MSIX vector base IOVA mapping */
753	if (!rvu->msix_base_iova)
754		return;
755	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
756	max_msix = cfg & 0xFFFFF;
757	dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
758			   max_msix * PCI_MSIX_ENTRY_SIZE,
759			   DMA_BIDIRECTIONAL, 0);
760
761	rvu_reset_msix(rvu);
762	mutex_destroy(&rvu->rsrc_lock);
763}
764
765static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
766{
767	struct rvu_hwinfo *hw = rvu->hw;
768	int pf, vf, numvfs, hwvf;
769	struct rvu_pfvf *pfvf;
770	u64 *mac;
771
772	for (pf = 0; pf < hw->total_pfs; pf++) {
773		/* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
774		if (!pf)
775			goto lbkvf;
776
777		if (!is_pf_cgxmapped(rvu, pf))
778			continue;
779		/* Assign MAC address to PF */
780		pfvf = &rvu->pf[pf];
781		if (rvu->fwdata && pf < PF_MACNUM_MAX) {
782			mac = &rvu->fwdata->pf_macs[pf];
783			if (*mac)
784				u64_to_ether_addr(*mac, pfvf->mac_addr);
785			else
786				eth_random_addr(pfvf->mac_addr);
787		} else {
788			eth_random_addr(pfvf->mac_addr);
789		}
790		ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
791
792lbkvf:
793		/* Assign MAC address to VFs*/
794		rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
795		for (vf = 0; vf < numvfs; vf++, hwvf++) {
796			pfvf = &rvu->hwvf[hwvf];
797			if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
798				mac = &rvu->fwdata->vf_macs[hwvf];
799				if (*mac)
800					u64_to_ether_addr(*mac, pfvf->mac_addr);
801				else
802					eth_random_addr(pfvf->mac_addr);
803			} else {
804				eth_random_addr(pfvf->mac_addr);
805			}
806			ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
807		}
808	}
809}
810
811static int rvu_fwdata_init(struct rvu *rvu)
812{
813	u64 fwdbase;
814	int err;
815
816	/* Get firmware data base address */
817	err = cgx_get_fwdata_base(&fwdbase);
818	if (err)
819		goto fail;
820	rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
821	if (!rvu->fwdata)
822		goto fail;
823	if (!is_rvu_fwdata_valid(rvu)) {
824		dev_err(rvu->dev,
825			"Mismatch in 'fwdata' struct btw kernel and firmware\n");
826		iounmap(rvu->fwdata);
827		rvu->fwdata = NULL;
828		return -EINVAL;
829	}
830	return 0;
831fail:
832	dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
833	return -EIO;
834}
835
836static void rvu_fwdata_exit(struct rvu *rvu)
837{
838	if (rvu->fwdata)
839		iounmap(rvu->fwdata);
840}
841
842static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
843{
844	struct rvu_hwinfo *hw = rvu->hw;
845	struct rvu_block *block;
846	int blkid;
847	u64 cfg;
848
849	/* Init NIX LF's bitmap */
850	block = &hw->block[blkaddr];
851	if (!block->implemented)
852		return 0;
853	blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
854	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
855	block->lf.max = cfg & 0xFFF;
856	block->addr = blkaddr;
857	block->type = BLKTYPE_NIX;
858	block->lfshift = 8;
859	block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
860	block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
861	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
862	block->lfcfg_reg = NIX_PRIV_LFX_CFG;
863	block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
864	block->lfreset_reg = NIX_AF_LF_RST;
865	block->rvu = rvu;
866	sprintf(block->name, "NIX%d", blkid);
867	rvu->nix_blkaddr[blkid] = blkaddr;
868	return rvu_alloc_bitmap(&block->lf);
869}
870
871static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
872{
873	struct rvu_hwinfo *hw = rvu->hw;
874	struct rvu_block *block;
875	int blkid;
876	u64 cfg;
877
878	/* Init CPT LF's bitmap */
879	block = &hw->block[blkaddr];
880	if (!block->implemented)
881		return 0;
882	blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
883	cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
884	block->lf.max = cfg & 0xFF;
885	block->addr = blkaddr;
886	block->type = BLKTYPE_CPT;
887	block->multislot = true;
888	block->lfshift = 3;
889	block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
890	block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
891	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
892	block->lfcfg_reg = CPT_PRIV_LFX_CFG;
893	block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
894	block->lfreset_reg = CPT_AF_LF_RST;
895	block->rvu = rvu;
896	sprintf(block->name, "CPT%d", blkid);
897	return rvu_alloc_bitmap(&block->lf);
898}
899
900static void rvu_get_lbk_bufsize(struct rvu *rvu)
901{
902	struct pci_dev *pdev = NULL;
903	void __iomem *base;
904	u64 lbk_const;
905
906	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
907			      PCI_DEVID_OCTEONTX2_LBK, pdev);
908	if (!pdev)
909		return;
910
911	base = pci_ioremap_bar(pdev, 0);
912	if (!base)
913		goto err_put;
914
915	lbk_const = readq(base + LBK_CONST);
916
917	/* cache fifo size */
918	rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
919
920	iounmap(base);
921err_put:
922	pci_dev_put(pdev);
923}
924
925static int rvu_setup_hw_resources(struct rvu *rvu)
926{
927	struct rvu_hwinfo *hw = rvu->hw;
928	struct rvu_block *block;
929	int blkid, err;
930	u64 cfg;
931
932	/* Get HW supported max RVU PF & VF count */
933	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
934	hw->total_pfs = (cfg >> 32) & 0xFF;
935	hw->total_vfs = (cfg >> 20) & 0xFFF;
936	hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
937
938	/* Init NPA LF's bitmap */
939	block = &hw->block[BLKADDR_NPA];
940	if (!block->implemented)
941		goto nix;
942	cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
943	block->lf.max = (cfg >> 16) & 0xFFF;
944	block->addr = BLKADDR_NPA;
945	block->type = BLKTYPE_NPA;
946	block->lfshift = 8;
947	block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
948	block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
949	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
950	block->lfcfg_reg = NPA_PRIV_LFX_CFG;
951	block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
952	block->lfreset_reg = NPA_AF_LF_RST;
953	block->rvu = rvu;
954	sprintf(block->name, "NPA");
955	err = rvu_alloc_bitmap(&block->lf);
956	if (err) {
957		dev_err(rvu->dev,
958			"%s: Failed to allocate NPA LF bitmap\n", __func__);
959		return err;
960	}
961
962nix:
963	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
964	if (err) {
965		dev_err(rvu->dev,
966			"%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
967		return err;
968	}
969
970	err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
971	if (err) {
972		dev_err(rvu->dev,
973			"%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
974		return err;
975	}
976
977	/* Init SSO group's bitmap */
978	block = &hw->block[BLKADDR_SSO];
979	if (!block->implemented)
980		goto ssow;
981	cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
982	block->lf.max = cfg & 0xFFFF;
983	block->addr = BLKADDR_SSO;
984	block->type = BLKTYPE_SSO;
985	block->multislot = true;
986	block->lfshift = 3;
987	block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
988	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
989	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
990	block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
991	block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
992	block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
993	block->rvu = rvu;
994	sprintf(block->name, "SSO GROUP");
995	err = rvu_alloc_bitmap(&block->lf);
996	if (err) {
997		dev_err(rvu->dev,
998			"%s: Failed to allocate SSO LF bitmap\n", __func__);
999		return err;
1000	}
1001
1002ssow:
1003	/* Init SSO workslot's bitmap */
1004	block = &hw->block[BLKADDR_SSOW];
1005	if (!block->implemented)
1006		goto tim;
1007	block->lf.max = (cfg >> 56) & 0xFF;
1008	block->addr = BLKADDR_SSOW;
1009	block->type = BLKTYPE_SSOW;
1010	block->multislot = true;
1011	block->lfshift = 3;
1012	block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1013	block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1014	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1015	block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1016	block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1017	block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1018	block->rvu = rvu;
1019	sprintf(block->name, "SSOWS");
1020	err = rvu_alloc_bitmap(&block->lf);
1021	if (err) {
1022		dev_err(rvu->dev,
1023			"%s: Failed to allocate SSOW LF bitmap\n", __func__);
1024		return err;
1025	}
1026
1027tim:
1028	/* Init TIM LF's bitmap */
1029	block = &hw->block[BLKADDR_TIM];
1030	if (!block->implemented)
1031		goto cpt;
1032	cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1033	block->lf.max = cfg & 0xFFFF;
1034	block->addr = BLKADDR_TIM;
1035	block->type = BLKTYPE_TIM;
1036	block->multislot = true;
1037	block->lfshift = 3;
1038	block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1039	block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1040	block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1041	block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1042	block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1043	block->lfreset_reg = TIM_AF_LF_RST;
1044	block->rvu = rvu;
1045	sprintf(block->name, "TIM");
1046	err = rvu_alloc_bitmap(&block->lf);
1047	if (err) {
1048		dev_err(rvu->dev,
1049			"%s: Failed to allocate TIM LF bitmap\n", __func__);
1050		return err;
1051	}
1052
1053cpt:
1054	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1055	if (err) {
1056		dev_err(rvu->dev,
1057			"%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1058		return err;
1059	}
1060	err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1061	if (err) {
1062		dev_err(rvu->dev,
1063			"%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1064		return err;
1065	}
1066
1067	/* Allocate memory for PFVF data */
1068	rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1069			       sizeof(struct rvu_pfvf), GFP_KERNEL);
1070	if (!rvu->pf) {
1071		dev_err(rvu->dev,
1072			"%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1073		return -ENOMEM;
1074	}
1075
1076	rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1077				 sizeof(struct rvu_pfvf), GFP_KERNEL);
1078	if (!rvu->hwvf) {
1079		dev_err(rvu->dev,
1080			"%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1081		return -ENOMEM;
1082	}
1083
1084	mutex_init(&rvu->rsrc_lock);
1085
1086	rvu_fwdata_init(rvu);
1087
1088	err = rvu_setup_msix_resources(rvu);
1089	if (err) {
1090		dev_err(rvu->dev,
1091			"%s: Failed to setup MSIX resources\n", __func__);
1092		return err;
1093	}
1094
1095	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1096		block = &hw->block[blkid];
1097		if (!block->lf.bmap)
1098			continue;
1099
1100		/* Allocate memory for block LF/slot to pcifunc mapping info */
1101		block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1102					     sizeof(u16), GFP_KERNEL);
1103		if (!block->fn_map) {
1104			err = -ENOMEM;
1105			goto msix_err;
1106		}
1107
1108		/* Scan all blocks to check if low level firmware has
1109		 * already provisioned any of the resources to a PF/VF.
1110		 */
1111		rvu_scan_block(rvu, block);
1112	}
1113
1114	err = rvu_set_channels_base(rvu);
1115	if (err)
1116		goto msix_err;
1117
1118	err = rvu_npc_init(rvu);
1119	if (err) {
1120		dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1121		goto npc_err;
1122	}
1123
1124	err = rvu_cgx_init(rvu);
1125	if (err) {
1126		dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1127		goto cgx_err;
1128	}
1129
1130	err = rvu_npc_exact_init(rvu);
1131	if (err) {
1132		dev_err(rvu->dev, "failed to initialize exact match table\n");
1133		return err;
1134	}
1135
1136	/* Assign MACs for CGX mapped functions */
1137	rvu_setup_pfvf_macaddress(rvu);
1138
1139	err = rvu_npa_init(rvu);
1140	if (err) {
1141		dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1142		goto npa_err;
1143	}
1144
1145	rvu_get_lbk_bufsize(rvu);
1146
1147	err = rvu_nix_init(rvu);
1148	if (err) {
1149		dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1150		goto nix_err;
1151	}
1152
1153	err = rvu_sdp_init(rvu);
1154	if (err) {
1155		dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1156		goto nix_err;
1157	}
1158
1159	rvu_program_channels(rvu);
1160
1161	err = rvu_mcs_init(rvu);
1162	if (err) {
1163		dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
1164		goto nix_err;
1165	}
1166
1167	err = rvu_cpt_init(rvu);
1168	if (err) {
1169		dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
1170		goto mcs_err;
1171	}
1172
1173	return 0;
1174
1175mcs_err:
1176	rvu_mcs_exit(rvu);
1177nix_err:
1178	rvu_nix_freemem(rvu);
1179npa_err:
1180	rvu_npa_freemem(rvu);
1181cgx_err:
1182	rvu_cgx_exit(rvu);
1183npc_err:
1184	rvu_npc_freemem(rvu);
1185	rvu_fwdata_exit(rvu);
1186msix_err:
1187	rvu_reset_msix(rvu);
1188	return err;
1189}
1190
1191/* NPA and NIX admin queue APIs */
1192void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1193{
1194	if (!aq)
1195		return;
1196
1197	qmem_free(rvu->dev, aq->inst);
1198	qmem_free(rvu->dev, aq->res);
1199	devm_kfree(rvu->dev, aq);
1200}
1201
1202int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1203		 int qsize, int inst_size, int res_size)
1204{
1205	struct admin_queue *aq;
1206	int err;
1207
1208	*ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1209	if (!*ad_queue)
1210		return -ENOMEM;
1211	aq = *ad_queue;
1212
1213	/* Alloc memory for instructions i.e AQ */
1214	err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1215	if (err) {
1216		devm_kfree(rvu->dev, aq);
1217		return err;
1218	}
1219
1220	/* Alloc memory for results */
1221	err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1222	if (err) {
1223		rvu_aq_free(rvu, aq);
1224		return err;
1225	}
1226
1227	spin_lock_init(&aq->lock);
1228	return 0;
1229}
1230
1231int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1232			   struct ready_msg_rsp *rsp)
1233{
1234	if (rvu->fwdata) {
1235		rsp->rclk_freq = rvu->fwdata->rclk;
1236		rsp->sclk_freq = rvu->fwdata->sclk;
1237	}
1238	return 0;
1239}
1240
1241/* Get current count of a RVU block's LF/slots
1242 * provisioned to a given RVU func.
1243 */
1244u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1245{
1246	switch (blkaddr) {
1247	case BLKADDR_NPA:
1248		return pfvf->npalf ? 1 : 0;
1249	case BLKADDR_NIX0:
1250	case BLKADDR_NIX1:
1251		return pfvf->nixlf ? 1 : 0;
1252	case BLKADDR_SSO:
1253		return pfvf->sso;
1254	case BLKADDR_SSOW:
1255		return pfvf->ssow;
1256	case BLKADDR_TIM:
1257		return pfvf->timlfs;
1258	case BLKADDR_CPT0:
1259		return pfvf->cptlfs;
1260	case BLKADDR_CPT1:
1261		return pfvf->cpt1_lfs;
1262	}
1263	return 0;
1264}
1265
1266/* Return true if LFs of block type are attached to pcifunc */
1267static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1268{
1269	switch (blktype) {
1270	case BLKTYPE_NPA:
1271		return pfvf->npalf ? 1 : 0;
1272	case BLKTYPE_NIX:
1273		return pfvf->nixlf ? 1 : 0;
1274	case BLKTYPE_SSO:
1275		return !!pfvf->sso;
1276	case BLKTYPE_SSOW:
1277		return !!pfvf->ssow;
1278	case BLKTYPE_TIM:
1279		return !!pfvf->timlfs;
1280	case BLKTYPE_CPT:
1281		return pfvf->cptlfs || pfvf->cpt1_lfs;
1282	}
1283
1284	return false;
1285}
1286
1287bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1288{
1289	struct rvu_pfvf *pfvf;
1290
1291	if (!is_pf_func_valid(rvu, pcifunc))
1292		return false;
1293
1294	pfvf = rvu_get_pfvf(rvu, pcifunc);
1295
1296	/* Check if this PFFUNC has a LF of type blktype attached */
1297	if (!is_blktype_attached(pfvf, blktype))
1298		return false;
1299
1300	return true;
1301}
1302
1303static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1304			   int pcifunc, int slot)
1305{
1306	u64 val;
1307
1308	val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1309	rvu_write64(rvu, block->addr, block->lookup_reg, val);
1310	/* Wait for the lookup to finish */
1311	/* TODO: put some timeout here */
1312	while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1313		;
1314
1315	val = rvu_read64(rvu, block->addr, block->lookup_reg);
1316
1317	/* Check LF valid bit */
1318	if (!(val & (1ULL << 12)))
1319		return -1;
1320
1321	return (val & 0xFFF);
1322}
1323
1324int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1325			      u16 global_slot, u16 *slot_in_block)
1326{
1327	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1328	int numlfs, total_lfs = 0, nr_blocks = 0;
1329	int i, num_blkaddr[BLK_COUNT] = { 0 };
1330	struct rvu_block *block;
1331	int blkaddr;
1332	u16 start_slot;
1333
1334	if (!is_blktype_attached(pfvf, blktype))
1335		return -ENODEV;
1336
1337	/* Get all the block addresses from which LFs are attached to
1338	 * the given pcifunc in num_blkaddr[].
1339	 */
1340	for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1341		block = &rvu->hw->block[blkaddr];
1342		if (block->type != blktype)
1343			continue;
1344		if (!is_block_implemented(rvu->hw, blkaddr))
1345			continue;
1346
1347		numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1348		if (numlfs) {
1349			total_lfs += numlfs;
1350			num_blkaddr[nr_blocks] = blkaddr;
1351			nr_blocks++;
1352		}
1353	}
1354
1355	if (global_slot >= total_lfs)
1356		return -ENODEV;
1357
1358	/* Based on the given global slot number retrieve the
1359	 * correct block address out of all attached block
1360	 * addresses and slot number in that block.
1361	 */
1362	total_lfs = 0;
1363	blkaddr = -ENODEV;
1364	for (i = 0; i < nr_blocks; i++) {
1365		numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1366		total_lfs += numlfs;
1367		if (global_slot < total_lfs) {
1368			blkaddr = num_blkaddr[i];
1369			start_slot = total_lfs - numlfs;
1370			*slot_in_block = global_slot - start_slot;
1371			break;
1372		}
1373	}
1374
1375	return blkaddr;
1376}
1377
1378static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1379{
1380	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1381	struct rvu_hwinfo *hw = rvu->hw;
1382	struct rvu_block *block;
1383	int slot, lf, num_lfs;
1384	int blkaddr;
1385
1386	blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1387	if (blkaddr < 0)
1388		return;
1389
1390	if (blktype == BLKTYPE_NIX)
1391		rvu_nix_reset_mac(pfvf, pcifunc);
1392
1393	block = &hw->block[blkaddr];
1394
1395	num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1396	if (!num_lfs)
1397		return;
1398
1399	for (slot = 0; slot < num_lfs; slot++) {
1400		lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1401		if (lf < 0) /* This should never happen */
1402			continue;
1403
1404		/* Disable the LF */
1405		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1406			    (lf << block->lfshift), 0x00ULL);
1407
1408		/* Update SW maintained mapping info as well */
1409		rvu_update_rsrc_map(rvu, pfvf, block,
1410				    pcifunc, lf, false);
1411
1412		/* Free the resource */
1413		rvu_free_rsrc(&block->lf, lf);
1414
1415		/* Clear MSIX vector offset for this LF */
1416		rvu_clear_msix_offset(rvu, pfvf, block, lf);
1417	}
1418}
1419
1420static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1421			    u16 pcifunc)
1422{
1423	struct rvu_hwinfo *hw = rvu->hw;
1424	bool detach_all = true;
1425	struct rvu_block *block;
1426	int blkid;
1427
1428	mutex_lock(&rvu->rsrc_lock);
1429
1430	/* Check for partial resource detach */
1431	if (detach && detach->partial)
1432		detach_all = false;
1433
1434	/* Check for RVU block's LFs attached to this func,
1435	 * if so, detach them.
1436	 */
1437	for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1438		block = &hw->block[blkid];
1439		if (!block->lf.bmap)
1440			continue;
1441		if (!detach_all && detach) {
1442			if (blkid == BLKADDR_NPA && !detach->npalf)
1443				continue;
1444			else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1445				continue;
1446			else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1447				continue;
1448			else if ((blkid == BLKADDR_SSO) && !detach->sso)
1449				continue;
1450			else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1451				continue;
1452			else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1453				continue;
1454			else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1455				continue;
1456			else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1457				continue;
1458		}
1459		rvu_detach_block(rvu, pcifunc, block->type);
1460	}
1461
1462	mutex_unlock(&rvu->rsrc_lock);
1463	return 0;
1464}
1465
1466int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1467				      struct rsrc_detach *detach,
1468				      struct msg_rsp *rsp)
1469{
1470	return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1471}
1472
1473int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1474{
1475	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1476	int blkaddr = BLKADDR_NIX0, vf;
1477	struct rvu_pfvf *pf;
1478
1479	pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1480
1481	/* All CGX mapped PFs are set with assigned NIX block during init */
1482	if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1483		blkaddr = pf->nix_blkaddr;
1484	} else if (is_afvf(pcifunc)) {
1485		vf = pcifunc - 1;
1486		/* Assign NIX based on VF number. All even numbered VFs get
1487		 * NIX0 and odd numbered gets NIX1
1488		 */
1489		blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1490		/* NIX1 is not present on all silicons */
1491		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1492			blkaddr = BLKADDR_NIX0;
1493	}
1494
1495	/* if SDP1 then the blkaddr is NIX1 */
1496	if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1497		blkaddr = BLKADDR_NIX1;
1498
1499	switch (blkaddr) {
1500	case BLKADDR_NIX1:
1501		pfvf->nix_blkaddr = BLKADDR_NIX1;
1502		pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1503		pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1504		break;
1505	case BLKADDR_NIX0:
1506	default:
1507		pfvf->nix_blkaddr = BLKADDR_NIX0;
1508		pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1509		pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1510		break;
1511	}
1512
1513	return pfvf->nix_blkaddr;
1514}
1515
1516static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1517				  u16 pcifunc, struct rsrc_attach *attach)
1518{
1519	int blkaddr;
1520
1521	switch (blktype) {
1522	case BLKTYPE_NIX:
1523		blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1524		break;
1525	case BLKTYPE_CPT:
1526		if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1527			return rvu_get_blkaddr(rvu, blktype, 0);
1528		blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1529			  BLKADDR_CPT0;
1530		if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1531			return -ENODEV;
1532		break;
1533	default:
1534		return rvu_get_blkaddr(rvu, blktype, 0);
1535	}
1536
1537	if (is_block_implemented(rvu->hw, blkaddr))
1538		return blkaddr;
1539
1540	return -ENODEV;
1541}
1542
1543static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1544			     int num_lfs, struct rsrc_attach *attach)
1545{
1546	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1547	struct rvu_hwinfo *hw = rvu->hw;
1548	struct rvu_block *block;
1549	int slot, lf;
1550	int blkaddr;
1551	u64 cfg;
1552
1553	if (!num_lfs)
1554		return;
1555
1556	blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1557	if (blkaddr < 0)
1558		return;
1559
1560	block = &hw->block[blkaddr];
1561	if (!block->lf.bmap)
1562		return;
1563
1564	for (slot = 0; slot < num_lfs; slot++) {
1565		/* Allocate the resource */
1566		lf = rvu_alloc_rsrc(&block->lf);
1567		if (lf < 0)
1568			return;
1569
1570		cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1571		rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1572			    (lf << block->lfshift), cfg);
1573		rvu_update_rsrc_map(rvu, pfvf, block,
1574				    pcifunc, lf, true);
1575
1576		/* Set start MSIX vector for this LF within this PF/VF */
1577		rvu_set_msix_offset(rvu, pfvf, block, lf);
1578	}
1579}
1580
1581static int rvu_check_rsrc_availability(struct rvu *rvu,
1582				       struct rsrc_attach *req, u16 pcifunc)
1583{
1584	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1585	int free_lfs, mappedlfs, blkaddr;
1586	struct rvu_hwinfo *hw = rvu->hw;
1587	struct rvu_block *block;
1588
1589	/* Only one NPA LF can be attached */
1590	if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1591		block = &hw->block[BLKADDR_NPA];
1592		free_lfs = rvu_rsrc_free_count(&block->lf);
1593		if (!free_lfs)
1594			goto fail;
1595	} else if (req->npalf) {
1596		dev_err(&rvu->pdev->dev,
1597			"Func 0x%x: Invalid req, already has NPA\n",
1598			 pcifunc);
1599		return -EINVAL;
1600	}
1601
1602	/* Only one NIX LF can be attached */
1603	if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1604		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1605						 pcifunc, req);
1606		if (blkaddr < 0)
1607			return blkaddr;
1608		block = &hw->block[blkaddr];
1609		free_lfs = rvu_rsrc_free_count(&block->lf);
1610		if (!free_lfs)
1611			goto fail;
1612	} else if (req->nixlf) {
1613		dev_err(&rvu->pdev->dev,
1614			"Func 0x%x: Invalid req, already has NIX\n",
1615			pcifunc);
1616		return -EINVAL;
1617	}
1618
1619	if (req->sso) {
1620		block = &hw->block[BLKADDR_SSO];
1621		/* Is request within limits ? */
1622		if (req->sso > block->lf.max) {
1623			dev_err(&rvu->pdev->dev,
1624				"Func 0x%x: Invalid SSO req, %d > max %d\n",
1625				 pcifunc, req->sso, block->lf.max);
1626			return -EINVAL;
1627		}
1628		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1629		free_lfs = rvu_rsrc_free_count(&block->lf);
1630		/* Check if additional resources are available */
1631		if (req->sso > mappedlfs &&
1632		    ((req->sso - mappedlfs) > free_lfs))
1633			goto fail;
1634	}
1635
1636	if (req->ssow) {
1637		block = &hw->block[BLKADDR_SSOW];
1638		if (req->ssow > block->lf.max) {
1639			dev_err(&rvu->pdev->dev,
1640				"Func 0x%x: Invalid SSOW req, %d > max %d\n",
1641				 pcifunc, req->sso, block->lf.max);
1642			return -EINVAL;
1643		}
1644		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1645		free_lfs = rvu_rsrc_free_count(&block->lf);
1646		if (req->ssow > mappedlfs &&
1647		    ((req->ssow - mappedlfs) > free_lfs))
1648			goto fail;
1649	}
1650
1651	if (req->timlfs) {
1652		block = &hw->block[BLKADDR_TIM];
1653		if (req->timlfs > block->lf.max) {
1654			dev_err(&rvu->pdev->dev,
1655				"Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1656				 pcifunc, req->timlfs, block->lf.max);
1657			return -EINVAL;
1658		}
1659		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1660		free_lfs = rvu_rsrc_free_count(&block->lf);
1661		if (req->timlfs > mappedlfs &&
1662		    ((req->timlfs - mappedlfs) > free_lfs))
1663			goto fail;
1664	}
1665
1666	if (req->cptlfs) {
1667		blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1668						 pcifunc, req);
1669		if (blkaddr < 0)
1670			return blkaddr;
1671		block = &hw->block[blkaddr];
1672		if (req->cptlfs > block->lf.max) {
1673			dev_err(&rvu->pdev->dev,
1674				"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1675				 pcifunc, req->cptlfs, block->lf.max);
1676			return -EINVAL;
1677		}
1678		mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1679		free_lfs = rvu_rsrc_free_count(&block->lf);
1680		if (req->cptlfs > mappedlfs &&
1681		    ((req->cptlfs - mappedlfs) > free_lfs))
1682			goto fail;
1683	}
1684
1685	return 0;
1686
1687fail:
1688	dev_info(rvu->dev, "Request for %s failed\n", block->name);
1689	return -ENOSPC;
1690}
1691
1692static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1693				       struct rsrc_attach *attach)
1694{
1695	int blkaddr, num_lfs;
1696
1697	blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1698					 attach->hdr.pcifunc, attach);
1699	if (blkaddr < 0)
1700		return false;
1701
1702	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1703					blkaddr);
1704	/* Requester already has LFs from given block ? */
1705	return !!num_lfs;
1706}
1707
1708int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1709				      struct rsrc_attach *attach,
1710				      struct msg_rsp *rsp)
1711{
1712	u16 pcifunc = attach->hdr.pcifunc;
1713	int err;
1714
1715	/* If first request, detach all existing attached resources */
1716	if (!attach->modify)
1717		rvu_detach_rsrcs(rvu, NULL, pcifunc);
1718
1719	mutex_lock(&rvu->rsrc_lock);
1720
1721	/* Check if the request can be accommodated */
1722	err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1723	if (err)
1724		goto exit;
1725
1726	/* Now attach the requested resources */
1727	if (attach->npalf)
1728		rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1729
1730	if (attach->nixlf)
1731		rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1732
1733	if (attach->sso) {
1734		/* RVU func doesn't know which exact LF or slot is attached
1735		 * to it, it always sees as slot 0,1,2. So for a 'modify'
1736		 * request, simply detach all existing attached LFs/slots
1737		 * and attach a fresh.
1738		 */
1739		if (attach->modify)
1740			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1741		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1742				 attach->sso, attach);
1743	}
1744
1745	if (attach->ssow) {
1746		if (attach->modify)
1747			rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1748		rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1749				 attach->ssow, attach);
1750	}
1751
1752	if (attach->timlfs) {
1753		if (attach->modify)
1754			rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1755		rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1756				 attach->timlfs, attach);
1757	}
1758
1759	if (attach->cptlfs) {
1760		if (attach->modify &&
1761		    rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1762			rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1763		rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1764				 attach->cptlfs, attach);
1765	}
1766
1767exit:
1768	mutex_unlock(&rvu->rsrc_lock);
1769	return err;
1770}
1771
1772static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1773			       int blkaddr, int lf)
1774{
1775	u16 vec;
1776
1777	if (lf < 0)
1778		return MSIX_VECTOR_INVALID;
1779
1780	for (vec = 0; vec < pfvf->msix.max; vec++) {
1781		if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1782			return vec;
1783	}
1784	return MSIX_VECTOR_INVALID;
1785}
1786
1787static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1788				struct rvu_block *block, int lf)
1789{
1790	u16 nvecs, vec, offset;
1791	u64 cfg;
1792
1793	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1794			 (lf << block->lfshift));
1795	nvecs = (cfg >> 12) & 0xFF;
1796
1797	/* Check and alloc MSIX vectors, must be contiguous */
1798	if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1799		return;
1800
1801	offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1802
1803	/* Config MSIX offset in LF */
1804	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1805		    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1806
1807	/* Update the bitmap as well */
1808	for (vec = 0; vec < nvecs; vec++)
1809		pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1810}
1811
1812static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1813				  struct rvu_block *block, int lf)
1814{
1815	u16 nvecs, vec, offset;
1816	u64 cfg;
1817
1818	cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1819			 (lf << block->lfshift));
1820	nvecs = (cfg >> 12) & 0xFF;
1821
1822	/* Clear MSIX offset in LF */
1823	rvu_write64(rvu, block->addr, block->msixcfg_reg |
1824		    (lf << block->lfshift), cfg & ~0x7FFULL);
1825
1826	offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1827
1828	/* Update the mapping */
1829	for (vec = 0; vec < nvecs; vec++)
1830		pfvf->msix_lfmap[offset + vec] = 0;
1831
1832	/* Free the same in MSIX bitmap */
1833	rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1834}
1835
1836int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1837				 struct msix_offset_rsp *rsp)
1838{
1839	struct rvu_hwinfo *hw = rvu->hw;
1840	u16 pcifunc = req->hdr.pcifunc;
1841	struct rvu_pfvf *pfvf;
1842	int lf, slot, blkaddr;
1843
1844	pfvf = rvu_get_pfvf(rvu, pcifunc);
1845	if (!pfvf->msix.bmap)
1846		return 0;
1847
1848	/* Set MSIX offsets for each block's LFs attached to this PF/VF */
1849	lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1850	rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1851
1852	/* Get BLKADDR from which LFs are attached to pcifunc */
1853	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1854	if (blkaddr < 0) {
1855		rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1856	} else {
1857		lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1858		rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1859	}
1860
1861	rsp->sso = pfvf->sso;
1862	for (slot = 0; slot < rsp->sso; slot++) {
1863		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1864		rsp->sso_msixoff[slot] =
1865			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1866	}
1867
1868	rsp->ssow = pfvf->ssow;
1869	for (slot = 0; slot < rsp->ssow; slot++) {
1870		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1871		rsp->ssow_msixoff[slot] =
1872			rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1873	}
1874
1875	rsp->timlfs = pfvf->timlfs;
1876	for (slot = 0; slot < rsp->timlfs; slot++) {
1877		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1878		rsp->timlf_msixoff[slot] =
1879			rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1880	}
1881
1882	rsp->cptlfs = pfvf->cptlfs;
1883	for (slot = 0; slot < rsp->cptlfs; slot++) {
1884		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1885		rsp->cptlf_msixoff[slot] =
1886			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1887	}
1888
1889	rsp->cpt1_lfs = pfvf->cpt1_lfs;
1890	for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1891		lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1892		rsp->cpt1_lf_msixoff[slot] =
1893			rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1894	}
1895
1896	return 0;
1897}
1898
1899int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1900				   struct free_rsrcs_rsp *rsp)
1901{
1902	struct rvu_hwinfo *hw = rvu->hw;
1903	struct rvu_block *block;
1904	struct nix_txsch *txsch;
1905	struct nix_hw *nix_hw;
1906
1907	mutex_lock(&rvu->rsrc_lock);
1908
1909	block = &hw->block[BLKADDR_NPA];
1910	rsp->npa = rvu_rsrc_free_count(&block->lf);
1911
1912	block = &hw->block[BLKADDR_NIX0];
1913	rsp->nix = rvu_rsrc_free_count(&block->lf);
1914
1915	block = &hw->block[BLKADDR_NIX1];
1916	rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1917
1918	block = &hw->block[BLKADDR_SSO];
1919	rsp->sso = rvu_rsrc_free_count(&block->lf);
1920
1921	block = &hw->block[BLKADDR_SSOW];
1922	rsp->ssow = rvu_rsrc_free_count(&block->lf);
1923
1924	block = &hw->block[BLKADDR_TIM];
1925	rsp->tim = rvu_rsrc_free_count(&block->lf);
1926
1927	block = &hw->block[BLKADDR_CPT0];
1928	rsp->cpt = rvu_rsrc_free_count(&block->lf);
1929
1930	block = &hw->block[BLKADDR_CPT1];
1931	rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1932
1933	if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1934		rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1935		rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1936		rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1937		rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1938		/* NIX1 */
1939		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1940			goto out;
1941		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1942		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1943		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1944		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1945	} else {
1946		nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1947		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1948		rsp->schq[NIX_TXSCH_LVL_SMQ] =
1949				rvu_rsrc_free_count(&txsch->schq);
1950
1951		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1952		rsp->schq[NIX_TXSCH_LVL_TL4] =
1953				rvu_rsrc_free_count(&txsch->schq);
1954
1955		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1956		rsp->schq[NIX_TXSCH_LVL_TL3] =
1957				rvu_rsrc_free_count(&txsch->schq);
1958
1959		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1960		rsp->schq[NIX_TXSCH_LVL_TL2] =
1961				rvu_rsrc_free_count(&txsch->schq);
1962
1963		if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1964			goto out;
1965
1966		nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1967		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1968		rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1969				rvu_rsrc_free_count(&txsch->schq);
1970
1971		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1972		rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1973				rvu_rsrc_free_count(&txsch->schq);
1974
1975		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1976		rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1977				rvu_rsrc_free_count(&txsch->schq);
1978
1979		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1980		rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1981				rvu_rsrc_free_count(&txsch->schq);
1982	}
1983
1984	rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1985out:
1986	rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1987	mutex_unlock(&rvu->rsrc_lock);
1988
1989	return 0;
1990}
1991
1992int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1993			    struct msg_rsp *rsp)
1994{
1995	u16 pcifunc = req->hdr.pcifunc;
1996	u16 vf, numvfs;
1997	u64 cfg;
1998
1999	vf = pcifunc & RVU_PFVF_FUNC_MASK;
2000	cfg = rvu_read64(rvu, BLKADDR_RVUM,
2001			 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
2002	numvfs = (cfg >> 12) & 0xFF;
2003
2004	if (vf && vf <= numvfs)
2005		__rvu_flr_handler(rvu, pcifunc);
2006	else
2007		return RVU_INVALID_VF_ID;
2008
2009	return 0;
2010}
2011
2012int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
2013				struct get_hw_cap_rsp *rsp)
2014{
2015	struct rvu_hwinfo *hw = rvu->hw;
2016
2017	rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
2018	rsp->nix_shaping = hw->cap.nix_shaping;
2019	rsp->npc_hash_extract = hw->cap.npc_hash_extract;
2020
2021	return 0;
2022}
2023
2024int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
2025				 struct msg_rsp *rsp)
2026{
2027	struct rvu_hwinfo *hw = rvu->hw;
2028	u16 pcifunc = req->hdr.pcifunc;
2029	struct rvu_pfvf *pfvf;
2030	int blkaddr, nixlf;
2031	u16 target;
2032
2033	/* Only PF can add VF permissions */
2034	if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
2035		return -EOPNOTSUPP;
2036
2037	target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2038	pfvf = rvu_get_pfvf(rvu, target);
2039
2040	if (req->flags & RESET_VF_PERM) {
2041		pfvf->flags &= RVU_CLEAR_VF_PERM;
2042	} else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2043		 (req->flags & VF_TRUSTED)) {
2044		change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2045		/* disable multicast and promisc entries */
2046		if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2047			blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2048			if (blkaddr < 0)
2049				return 0;
2050			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2051					   target, 0);
2052			if (nixlf < 0)
2053				return 0;
2054			npc_enadis_default_mce_entry(rvu, target, nixlf,
2055						     NIXLF_ALLMULTI_ENTRY,
2056						     false);
2057			npc_enadis_default_mce_entry(rvu, target, nixlf,
2058						     NIXLF_PROMISC_ENTRY,
2059						     false);
2060		}
2061	}
2062
2063	return 0;
2064}
2065
2066static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2067				struct mbox_msghdr *req)
2068{
2069	struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2070
2071	/* Check if valid, if not reply with a invalid msg */
2072	if (req->sig != OTX2_MBOX_REQ_SIG)
2073		goto bad_message;
2074
2075	switch (req->id) {
2076#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
2077	case _id: {							\
2078		struct _rsp_type *rsp;					\
2079		int err;						\
2080									\
2081		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
2082			mbox, devid,					\
2083			sizeof(struct _rsp_type));			\
2084		/* some handlers should complete even if reply */	\
2085		/* could not be allocated */				\
2086		if (!rsp &&						\
2087		    _id != MBOX_MSG_DETACH_RESOURCES &&			\
2088		    _id != MBOX_MSG_NIX_TXSCH_FREE &&			\
2089		    _id != MBOX_MSG_VF_FLR)				\
2090			return -ENOMEM;					\
2091		if (rsp) {						\
2092			rsp->hdr.id = _id;				\
2093			rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
2094			rsp->hdr.pcifunc = req->pcifunc;		\
2095			rsp->hdr.rc = 0;				\
2096		}							\
2097									\
2098		err = rvu_mbox_handler_ ## _fn_name(rvu,		\
2099						    (struct _req_type *)req, \
2100						    rsp);		\
2101		if (rsp && err)						\
2102			rsp->hdr.rc = err;				\
2103									\
2104		trace_otx2_msg_process(mbox->pdev, _id, err);		\
2105		return rsp ? err : -ENOMEM;				\
2106	}
2107MBOX_MESSAGES
2108#undef M
2109
2110bad_message:
2111	default:
2112		otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2113		return -ENODEV;
2114	}
2115}
2116
2117static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
2118{
2119	struct rvu *rvu = mwork->rvu;
2120	int offset, err, id, devid;
2121	struct otx2_mbox_dev *mdev;
2122	struct mbox_hdr *req_hdr;
2123	struct mbox_msghdr *msg;
2124	struct mbox_wq_info *mw;
2125	struct otx2_mbox *mbox;
2126
2127	switch (type) {
2128	case TYPE_AFPF:
2129		mw = &rvu->afpf_wq_info;
2130		break;
2131	case TYPE_AFVF:
2132		mw = &rvu->afvf_wq_info;
2133		break;
2134	default:
2135		return;
2136	}
2137
2138	devid = mwork - mw->mbox_wrk;
2139	mbox = &mw->mbox;
2140	mdev = &mbox->dev[devid];
2141
2142	/* Process received mbox messages */
2143	req_hdr = mdev->mbase + mbox->rx_start;
2144	if (mw->mbox_wrk[devid].num_msgs == 0)
2145		return;
2146
2147	offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2148
2149	for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2150		msg = mdev->mbase + offset;
2151
2152		/* Set which PF/VF sent this message based on mbox IRQ */
2153		switch (type) {
2154		case TYPE_AFPF:
2155			msg->pcifunc &=
2156				~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2157			msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2158			break;
2159		case TYPE_AFVF:
2160			msg->pcifunc &=
2161				~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2162			msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2163			break;
2164		}
2165
2166		err = rvu_process_mbox_msg(mbox, devid, msg);
2167		if (!err) {
2168			offset = mbox->rx_start + msg->next_msgoff;
2169			continue;
2170		}
2171
2172		if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2173			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2174				 err, otx2_mbox_id2name(msg->id),
2175				 msg->id, rvu_get_pf(msg->pcifunc),
2176				 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2177		else
2178			dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2179				 err, otx2_mbox_id2name(msg->id),
2180				 msg->id, devid);
2181	}
2182	mw->mbox_wrk[devid].num_msgs = 0;
2183
2184	if (poll)
2185		otx2_mbox_wait_for_zero(mbox, devid);
2186
2187	/* Send mbox responses to VF/PF */
2188	otx2_mbox_msg_send(mbox, devid);
2189}
2190
2191static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2192{
2193	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2194	struct rvu *rvu = mwork->rvu;
2195
2196	mutex_lock(&rvu->mbox_lock);
2197	__rvu_mbox_handler(mwork, TYPE_AFPF, true);
2198	mutex_unlock(&rvu->mbox_lock);
2199}
2200
2201static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2202{
2203	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2204
2205	__rvu_mbox_handler(mwork, TYPE_AFVF, false);
2206}
2207
2208static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2209{
2210	struct rvu *rvu = mwork->rvu;
2211	struct otx2_mbox_dev *mdev;
2212	struct mbox_hdr *rsp_hdr;
2213	struct mbox_msghdr *msg;
2214	struct mbox_wq_info *mw;
2215	struct otx2_mbox *mbox;
2216	int offset, id, devid;
2217
2218	switch (type) {
2219	case TYPE_AFPF:
2220		mw = &rvu->afpf_wq_info;
2221		break;
2222	case TYPE_AFVF:
2223		mw = &rvu->afvf_wq_info;
2224		break;
2225	default:
2226		return;
2227	}
2228
2229	devid = mwork - mw->mbox_wrk_up;
2230	mbox = &mw->mbox_up;
2231	mdev = &mbox->dev[devid];
2232
2233	rsp_hdr = mdev->mbase + mbox->rx_start;
2234	if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2235		dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2236		return;
2237	}
2238
2239	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2240
2241	for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2242		msg = mdev->mbase + offset;
2243
2244		if (msg->id >= MBOX_MSG_MAX) {
2245			dev_err(rvu->dev,
2246				"Mbox msg with unknown ID 0x%x\n", msg->id);
2247			goto end;
2248		}
2249
2250		if (msg->sig != OTX2_MBOX_RSP_SIG) {
2251			dev_err(rvu->dev,
2252				"Mbox msg with wrong signature %x, ID 0x%x\n",
2253				msg->sig, msg->id);
2254			goto end;
2255		}
2256
2257		switch (msg->id) {
2258		case MBOX_MSG_CGX_LINK_EVENT:
2259			break;
2260		default:
2261			if (msg->rc)
2262				dev_err(rvu->dev,
2263					"Mbox msg response has err %d, ID 0x%x\n",
2264					msg->rc, msg->id);
2265			break;
2266		}
2267end:
2268		offset = mbox->rx_start + msg->next_msgoff;
2269		mdev->msgs_acked++;
2270	}
2271	mw->mbox_wrk_up[devid].up_num_msgs = 0;
2272
2273	otx2_mbox_reset(mbox, devid);
2274}
2275
2276static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2277{
2278	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2279
2280	__rvu_mbox_up_handler(mwork, TYPE_AFPF);
2281}
2282
2283static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2284{
2285	struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2286
2287	__rvu_mbox_up_handler(mwork, TYPE_AFVF);
2288}
2289
2290static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2291				int num, int type, unsigned long *pf_bmap)
2292{
2293	struct rvu_hwinfo *hw = rvu->hw;
2294	int region;
2295	u64 bar4;
2296
2297	/* For cn10k platform VF mailbox regions of a PF follows after the
2298	 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2299	 * RVU_PF_VF_BAR4_ADDR register.
2300	 */
2301	if (type == TYPE_AFVF) {
2302		for (region = 0; region < num; region++) {
2303			if (!test_bit(region, pf_bmap))
2304				continue;
2305
2306			if (hw->cap.per_pf_mbox_regs) {
2307				bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2308						  RVU_AF_PFX_BAR4_ADDR(0)) +
2309						  MBOX_SIZE;
2310				bar4 += region * MBOX_SIZE;
2311			} else {
2312				bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2313				bar4 += region * MBOX_SIZE;
2314			}
2315			mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2316			if (!mbox_addr[region])
2317				goto error;
2318		}
2319		return 0;
2320	}
2321
2322	/* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2323	 * PF registers. Whereas for Octeontx2 it is read from
2324	 * RVU_AF_PF_BAR4_ADDR register.
2325	 */
2326	for (region = 0; region < num; region++) {
2327		if (!test_bit(region, pf_bmap))
2328			continue;
2329
2330		if (hw->cap.per_pf_mbox_regs) {
2331			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2332					  RVU_AF_PFX_BAR4_ADDR(region));
2333		} else {
2334			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2335					  RVU_AF_PF_BAR4_ADDR);
2336			bar4 += region * MBOX_SIZE;
2337		}
2338		mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2339		if (!mbox_addr[region])
2340			goto error;
2341	}
2342	return 0;
2343
2344error:
2345	while (region--)
2346		iounmap((void __iomem *)mbox_addr[region]);
2347	return -ENOMEM;
2348}
2349
2350static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2351			 int type, int num,
2352			 void (mbox_handler)(struct work_struct *),
2353			 void (mbox_up_handler)(struct work_struct *))
2354{
2355	int err = -EINVAL, i, dir, dir_up;
2356	void __iomem *reg_base;
2357	struct rvu_work *mwork;
2358	unsigned long *pf_bmap;
2359	void **mbox_regions;
2360	const char *name;
2361	u64 cfg;
2362
2363	pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
2364	if (!pf_bmap)
2365		return -ENOMEM;
2366
2367	/* RVU VFs */
2368	if (type == TYPE_AFVF)
2369		bitmap_set(pf_bmap, 0, num);
2370
2371	if (type == TYPE_AFPF) {
2372		/* Mark enabled PFs in bitmap */
2373		for (i = 0; i < num; i++) {
2374			cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
2375			if (cfg & BIT_ULL(20))
2376				set_bit(i, pf_bmap);
2377		}
2378	}
2379
2380	mutex_init(&rvu->mbox_lock);
2381
2382	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2383	if (!mbox_regions) {
2384		err = -ENOMEM;
2385		goto free_bitmap;
2386	}
2387
2388	switch (type) {
2389	case TYPE_AFPF:
2390		name = "rvu_afpf_mailbox";
2391		dir = MBOX_DIR_AFPF;
2392		dir_up = MBOX_DIR_AFPF_UP;
2393		reg_base = rvu->afreg_base;
2394		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
2395		if (err)
2396			goto free_regions;
2397		break;
2398	case TYPE_AFVF:
2399		name = "rvu_afvf_mailbox";
2400		dir = MBOX_DIR_PFVF;
2401		dir_up = MBOX_DIR_PFVF_UP;
2402		reg_base = rvu->pfreg_base;
2403		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
2404		if (err)
2405			goto free_regions;
2406		break;
2407	default:
2408		goto free_regions;
2409	}
2410
2411	mw->mbox_wq = alloc_workqueue(name,
2412				      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2413				      num);
2414	if (!mw->mbox_wq) {
2415		err = -ENOMEM;
2416		goto unmap_regions;
2417	}
2418
2419	mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2420				    sizeof(struct rvu_work), GFP_KERNEL);
2421	if (!mw->mbox_wrk) {
2422		err = -ENOMEM;
2423		goto exit;
2424	}
2425
2426	mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2427				       sizeof(struct rvu_work), GFP_KERNEL);
2428	if (!mw->mbox_wrk_up) {
2429		err = -ENOMEM;
2430		goto exit;
2431	}
2432
2433	err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2434				     reg_base, dir, num, pf_bmap);
2435	if (err)
2436		goto exit;
2437
2438	err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2439				     reg_base, dir_up, num, pf_bmap);
2440	if (err)
2441		goto exit;
2442
2443	for (i = 0; i < num; i++) {
2444		if (!test_bit(i, pf_bmap))
2445			continue;
2446
2447		mwork = &mw->mbox_wrk[i];
2448		mwork->rvu = rvu;
2449		INIT_WORK(&mwork->work, mbox_handler);
2450
2451		mwork = &mw->mbox_wrk_up[i];
2452		mwork->rvu = rvu;
2453		INIT_WORK(&mwork->work, mbox_up_handler);
2454	}
2455	goto free_regions;
2456
2457exit:
2458	destroy_workqueue(mw->mbox_wq);
2459unmap_regions:
2460	while (num--)
2461		iounmap((void __iomem *)mbox_regions[num]);
2462free_regions:
2463	kfree(mbox_regions);
2464free_bitmap:
2465	bitmap_free(pf_bmap);
2466	return err;
2467}
2468
2469static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2470{
2471	struct otx2_mbox *mbox = &mw->mbox;
2472	struct otx2_mbox_dev *mdev;
2473	int devid;
2474
2475	if (mw->mbox_wq) {
2476		destroy_workqueue(mw->mbox_wq);
2477		mw->mbox_wq = NULL;
2478	}
2479
2480	for (devid = 0; devid < mbox->ndevs; devid++) {
2481		mdev = &mbox->dev[devid];
2482		if (mdev->hwbase)
2483			iounmap((void __iomem *)mdev->hwbase);
2484	}
2485
2486	otx2_mbox_destroy(&mw->mbox);
2487	otx2_mbox_destroy(&mw->mbox_up);
2488}
2489
2490static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2491			   int mdevs, u64 intr)
2492{
2493	struct otx2_mbox_dev *mdev;
2494	struct otx2_mbox *mbox;
2495	struct mbox_hdr *hdr;
2496	int i;
2497
2498	for (i = first; i < mdevs; i++) {
2499		/* start from 0 */
2500		if (!(intr & BIT_ULL(i - first)))
2501			continue;
2502
2503		mbox = &mw->mbox;
2504		mdev = &mbox->dev[i];
2505		hdr = mdev->mbase + mbox->rx_start;
2506
2507		/*The hdr->num_msgs is set to zero immediately in the interrupt
2508		 * handler to  ensure that it holds a correct value next time
2509		 * when the interrupt handler is called.
2510		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2511		 * pf>mbox.up_num_msgs holds the data for use in
2512		 * pfaf_mbox_up_handler.
2513		 */
2514
2515		if (hdr->num_msgs) {
2516			mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2517			hdr->num_msgs = 0;
2518			queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2519		}
2520		mbox = &mw->mbox_up;
2521		mdev = &mbox->dev[i];
2522		hdr = mdev->mbase + mbox->rx_start;
2523		if (hdr->num_msgs) {
2524			mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2525			hdr->num_msgs = 0;
2526			queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2527		}
2528	}
2529}
2530
2531static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
2532{
2533	struct rvu *rvu = (struct rvu *)rvu_irq;
2534	u64 intr;
2535
2536	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2537	/* Clear interrupts */
2538	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2539	if (intr)
2540		trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2541
2542	/* Sync with mbox memory region */
2543	rmb();
2544
2545	rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2546
2547	return IRQ_HANDLED;
2548}
2549
2550static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2551{
2552	struct rvu *rvu = (struct rvu *)rvu_irq;
2553	int vfs = rvu->vfs;
2554	u64 intr;
2555
2556	/* Sync with mbox memory region */
2557	rmb();
2558
2559	/* Handle VF interrupts */
2560	if (vfs > 64) {
2561		intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2562		rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2563
2564		rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2565		vfs -= 64;
2566	}
2567
2568	intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2569	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2570	if (intr)
2571		trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2572
2573	rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2574
2575	return IRQ_HANDLED;
2576}
2577
2578static void rvu_enable_mbox_intr(struct rvu *rvu)
2579{
2580	struct rvu_hwinfo *hw = rvu->hw;
2581
2582	/* Clear spurious irqs, if any */
2583	rvu_write64(rvu, BLKADDR_RVUM,
2584		    RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2585
2586	/* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2587	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2588		    INTR_MASK(hw->total_pfs) & ~1ULL);
2589}
2590
2591static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2592{
2593	struct rvu_block *block;
2594	int slot, lf, num_lfs;
2595	int err;
2596
2597	block = &rvu->hw->block[blkaddr];
2598	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2599					block->addr);
2600	if (!num_lfs)
2601		return;
2602	for (slot = 0; slot < num_lfs; slot++) {
2603		lf = rvu_get_lf(rvu, block, pcifunc, slot);
2604		if (lf < 0)
2605			continue;
2606
2607		/* Cleanup LF and reset it */
2608		if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2609			rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2610		else if (block->addr == BLKADDR_NPA)
2611			rvu_npa_lf_teardown(rvu, pcifunc, lf);
2612		else if ((block->addr == BLKADDR_CPT0) ||
2613			 (block->addr == BLKADDR_CPT1))
2614			rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2615					    slot);
2616
2617		err = rvu_lf_reset(rvu, block, lf);
2618		if (err) {
2619			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2620				block->addr, lf);
2621		}
2622	}
2623}
2624
2625static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2626{
2627	if (rvu_npc_exact_has_match_table(rvu))
2628		rvu_npc_exact_reset(rvu, pcifunc);
2629
2630	mutex_lock(&rvu->flr_lock);
2631	/* Reset order should reflect inter-block dependencies:
2632	 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2633	 * 2. Flush and reset SSO/SSOW
2634	 * 3. Cleanup pools (NPA)
2635	 */
2636	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2637	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2638	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2639	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2640	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2641	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2642	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2643	rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2644	rvu_reset_lmt_map_tbl(rvu, pcifunc);
2645	rvu_detach_rsrcs(rvu, NULL, pcifunc);
2646	/* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
2647	 * entries, check and free the MCAM entries explicitly to avoid leak.
2648	 * Since LF is detached use LF number as -1.
2649	 */
2650	rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
2651	rvu_mac_reset(rvu, pcifunc);
2652
2653	if (rvu->mcs_blk_cnt)
2654		rvu_mcs_flr_handler(rvu, pcifunc);
2655
2656	mutex_unlock(&rvu->flr_lock);
2657}
2658
2659static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2660{
2661	int reg = 0;
2662
2663	/* pcifunc = 0(PF0) | (vf + 1) */
2664	__rvu_flr_handler(rvu, vf + 1);
2665
2666	if (vf >= 64) {
2667		reg = 1;
2668		vf = vf - 64;
2669	}
2670
2671	/* Signal FLR finish and enable IRQ */
2672	rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2673	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2674}
2675
2676static void rvu_flr_handler(struct work_struct *work)
2677{
2678	struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2679	struct rvu *rvu = flrwork->rvu;
2680	u16 pcifunc, numvfs, vf;
2681	u64 cfg;
2682	int pf;
2683
2684	pf = flrwork - rvu->flr_wrk;
2685	if (pf >= rvu->hw->total_pfs) {
2686		rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2687		return;
2688	}
2689
2690	cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2691	numvfs = (cfg >> 12) & 0xFF;
2692	pcifunc  = pf << RVU_PFVF_PF_SHIFT;
2693
2694	for (vf = 0; vf < numvfs; vf++)
2695		__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2696
2697	__rvu_flr_handler(rvu, pcifunc);
2698
2699	/* Signal FLR finish */
2700	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2701
2702	/* Enable interrupt */
2703	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2704}
2705
2706static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2707{
2708	int dev, vf, reg = 0;
2709	u64 intr;
2710
2711	if (start_vf >= 64)
2712		reg = 1;
2713
2714	intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2715	if (!intr)
2716		return;
2717
2718	for (vf = 0; vf < numvfs; vf++) {
2719		if (!(intr & BIT_ULL(vf)))
2720			continue;
2721		/* Clear and disable the interrupt */
2722		rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2723		rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2724
2725		dev = vf + start_vf + rvu->hw->total_pfs;
2726		queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2727	}
2728}
2729
2730static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2731{
2732	struct rvu *rvu = (struct rvu *)rvu_irq;
2733	u64 intr;
2734	u8  pf;
2735
2736	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2737	if (!intr)
2738		goto afvf_flr;
2739
2740	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2741		if (intr & (1ULL << pf)) {
2742			/* clear interrupt */
2743			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2744				    BIT_ULL(pf));
2745			/* Disable the interrupt */
2746			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2747				    BIT_ULL(pf));
2748			/* PF is already dead do only AF related operations */
2749			queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2750		}
2751	}
2752
2753afvf_flr:
2754	rvu_afvf_queue_flr_work(rvu, 0, 64);
2755	if (rvu->vfs > 64)
2756		rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2757
2758	return IRQ_HANDLED;
2759}
2760
2761static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2762{
2763	int vf;
2764
2765	/* Nothing to be done here other than clearing the
2766	 * TRPEND bit.
2767	 */
2768	for (vf = 0; vf < 64; vf++) {
2769		if (intr & (1ULL << vf)) {
2770			/* clear the trpend due to ME(master enable) */
2771			rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2772			/* clear interrupt */
2773			rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2774		}
2775	}
2776}
2777
2778/* Handles ME interrupts from VFs of AF */
2779static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2780{
2781	struct rvu *rvu = (struct rvu *)rvu_irq;
2782	int vfset;
2783	u64 intr;
2784
2785	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2786
2787	for (vfset = 0; vfset <= 1; vfset++) {
2788		intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2789		if (intr)
2790			rvu_me_handle_vfset(rvu, vfset, intr);
2791	}
2792
2793	return IRQ_HANDLED;
2794}
2795
2796/* Handles ME interrupts from PFs */
2797static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2798{
2799	struct rvu *rvu = (struct rvu *)rvu_irq;
2800	u64 intr;
2801	u8  pf;
2802
2803	intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2804
2805	/* Nothing to be done here other than clearing the
2806	 * TRPEND bit.
2807	 */
2808	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2809		if (intr & (1ULL << pf)) {
2810			/* clear the trpend due to ME(master enable) */
2811			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2812				    BIT_ULL(pf));
2813			/* clear interrupt */
2814			rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2815				    BIT_ULL(pf));
2816		}
2817	}
2818
2819	return IRQ_HANDLED;
2820}
2821
2822static void rvu_unregister_interrupts(struct rvu *rvu)
2823{
2824	int irq;
2825
2826	rvu_cpt_unregister_interrupts(rvu);
2827
2828	/* Disable the Mbox interrupt */
2829	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2830		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2831
2832	/* Disable the PF FLR interrupt */
2833	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2834		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2835
2836	/* Disable the PF ME interrupt */
2837	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2838		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2839
2840	for (irq = 0; irq < rvu->num_vec; irq++) {
2841		if (rvu->irq_allocated[irq]) {
2842			free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2843			rvu->irq_allocated[irq] = false;
2844		}
2845	}
2846
2847	pci_free_irq_vectors(rvu->pdev);
2848	rvu->num_vec = 0;
2849}
2850
2851static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2852{
2853	struct rvu_pfvf *pfvf = &rvu->pf[0];
2854	int offset;
2855
2856	pfvf = &rvu->pf[0];
2857	offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2858
2859	/* Make sure there are enough MSIX vectors configured so that
2860	 * VF interrupts can be handled. Offset equal to zero means
2861	 * that PF vectors are not configured and overlapping AF vectors.
2862	 */
2863	return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2864	       offset;
2865}
2866
2867static int rvu_register_interrupts(struct rvu *rvu)
2868{
2869	int ret, offset, pf_vec_start;
2870
2871	rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2872
2873	rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2874					   NAME_SIZE, GFP_KERNEL);
2875	if (!rvu->irq_name)
2876		return -ENOMEM;
2877
2878	rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2879					  sizeof(bool), GFP_KERNEL);
2880	if (!rvu->irq_allocated)
2881		return -ENOMEM;
2882
2883	/* Enable MSI-X */
2884	ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2885				    rvu->num_vec, PCI_IRQ_MSIX);
2886	if (ret < 0) {
2887		dev_err(rvu->dev,
2888			"RVUAF: Request for %d msix vectors failed, ret %d\n",
2889			rvu->num_vec, ret);
2890		return ret;
2891	}
2892
2893	/* Register mailbox interrupt handler */
2894	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2895	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2896			  rvu_mbox_pf_intr_handler, 0,
2897			  &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2898	if (ret) {
2899		dev_err(rvu->dev,
2900			"RVUAF: IRQ registration failed for mbox irq\n");
2901		goto fail;
2902	}
2903
2904	rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2905
2906	/* Enable mailbox interrupts from all PFs */
2907	rvu_enable_mbox_intr(rvu);
2908
2909	/* Register FLR interrupt handler */
2910	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2911		"RVUAF FLR");
2912	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2913			  rvu_flr_intr_handler, 0,
2914			  &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2915			  rvu);
2916	if (ret) {
2917		dev_err(rvu->dev,
2918			"RVUAF: IRQ registration failed for FLR\n");
2919		goto fail;
2920	}
2921	rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2922
2923	/* Enable FLR interrupt for all PFs*/
2924	rvu_write64(rvu, BLKADDR_RVUM,
2925		    RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2926
2927	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2928		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2929
2930	/* Register ME interrupt handler */
2931	sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2932		"RVUAF ME");
2933	ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2934			  rvu_me_pf_intr_handler, 0,
2935			  &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2936			  rvu);
2937	if (ret) {
2938		dev_err(rvu->dev,
2939			"RVUAF: IRQ registration failed for ME\n");
2940	}
2941	rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2942
2943	/* Clear TRPEND bit for all PF */
2944	rvu_write64(rvu, BLKADDR_RVUM,
2945		    RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2946	/* Enable ME interrupt for all PFs*/
2947	rvu_write64(rvu, BLKADDR_RVUM,
2948		    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2949
2950	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2951		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2952
2953	if (!rvu_afvf_msix_vectors_num_ok(rvu))
2954		return 0;
2955
2956	/* Get PF MSIX vectors offset. */
2957	pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2958				  RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2959
2960	/* Register MBOX0 interrupt. */
2961	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2962	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2963	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2964			  rvu_mbox_intr_handler, 0,
2965			  &rvu->irq_name[offset * NAME_SIZE],
2966			  rvu);
2967	if (ret)
2968		dev_err(rvu->dev,
2969			"RVUAF: IRQ registration failed for Mbox0\n");
2970
2971	rvu->irq_allocated[offset] = true;
2972
2973	/* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2974	 * simply increment current offset by 1.
2975	 */
2976	offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2977	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2978	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2979			  rvu_mbox_intr_handler, 0,
2980			  &rvu->irq_name[offset * NAME_SIZE],
2981			  rvu);
2982	if (ret)
2983		dev_err(rvu->dev,
2984			"RVUAF: IRQ registration failed for Mbox1\n");
2985
2986	rvu->irq_allocated[offset] = true;
2987
2988	/* Register FLR interrupt handler for AF's VFs */
2989	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2990	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2991	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2992			  rvu_flr_intr_handler, 0,
2993			  &rvu->irq_name[offset * NAME_SIZE], rvu);
2994	if (ret) {
2995		dev_err(rvu->dev,
2996			"RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2997		goto fail;
2998	}
2999	rvu->irq_allocated[offset] = true;
3000
3001	offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
3002	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
3003	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3004			  rvu_flr_intr_handler, 0,
3005			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3006	if (ret) {
3007		dev_err(rvu->dev,
3008			"RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
3009		goto fail;
3010	}
3011	rvu->irq_allocated[offset] = true;
3012
3013	/* Register ME interrupt handler for AF's VFs */
3014	offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
3015	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
3016	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3017			  rvu_me_vf_intr_handler, 0,
3018			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3019	if (ret) {
3020		dev_err(rvu->dev,
3021			"RVUAF: IRQ registration failed for RVUAFVF ME0\n");
3022		goto fail;
3023	}
3024	rvu->irq_allocated[offset] = true;
3025
3026	offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
3027	sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
3028	ret = request_irq(pci_irq_vector(rvu->pdev, offset),
3029			  rvu_me_vf_intr_handler, 0,
3030			  &rvu->irq_name[offset * NAME_SIZE], rvu);
3031	if (ret) {
3032		dev_err(rvu->dev,
3033			"RVUAF: IRQ registration failed for RVUAFVF ME1\n");
3034		goto fail;
3035	}
3036	rvu->irq_allocated[offset] = true;
3037
3038	ret = rvu_cpt_register_interrupts(rvu);
3039	if (ret)
3040		goto fail;
3041
3042	return 0;
3043
3044fail:
3045	rvu_unregister_interrupts(rvu);
3046	return ret;
3047}
3048
3049static void rvu_flr_wq_destroy(struct rvu *rvu)
3050{
3051	if (rvu->flr_wq) {
3052		destroy_workqueue(rvu->flr_wq);
3053		rvu->flr_wq = NULL;
3054	}
3055}
3056
3057static int rvu_flr_init(struct rvu *rvu)
3058{
3059	int dev, num_devs;
3060	u64 cfg;
3061	int pf;
3062
3063	/* Enable FLR for all PFs*/
3064	for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
3065		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3066		rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
3067			    cfg | BIT_ULL(22));
3068	}
3069
3070	rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr",
3071					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
3072	if (!rvu->flr_wq)
3073		return -ENOMEM;
3074
3075	num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
3076	rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
3077				    sizeof(struct rvu_work), GFP_KERNEL);
3078	if (!rvu->flr_wrk) {
3079		destroy_workqueue(rvu->flr_wq);
3080		return -ENOMEM;
3081	}
3082
3083	for (dev = 0; dev < num_devs; dev++) {
3084		rvu->flr_wrk[dev].rvu = rvu;
3085		INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
3086	}
3087
3088	mutex_init(&rvu->flr_lock);
3089
3090	return 0;
3091}
3092
3093static void rvu_disable_afvf_intr(struct rvu *rvu)
3094{
3095	int vfs = rvu->vfs;
3096
3097	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3098	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3099	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3100	if (vfs <= 64)
3101		return;
3102
3103	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3104		      INTR_MASK(vfs - 64));
3105	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3106	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3107}
3108
3109static void rvu_enable_afvf_intr(struct rvu *rvu)
3110{
3111	int vfs = rvu->vfs;
3112
3113	/* Clear any pending interrupts and enable AF VF interrupts for
3114	 * the first 64 VFs.
3115	 */
3116	/* Mbox */
3117	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3118	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3119
3120	/* FLR */
3121	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3122	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3123	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3124
3125	/* Same for remaining VFs, if any. */
3126	if (vfs <= 64)
3127		return;
3128
3129	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3130	rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3131		      INTR_MASK(vfs - 64));
3132
3133	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3134	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3135	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3136}
3137
3138int rvu_get_num_lbk_chans(void)
3139{
3140	struct pci_dev *pdev;
3141	void __iomem *base;
3142	int ret = -EIO;
3143
3144	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3145			      NULL);
3146	if (!pdev)
3147		goto err;
3148
3149	base = pci_ioremap_bar(pdev, 0);
3150	if (!base)
3151		goto err_put;
3152
3153	/* Read number of available LBK channels from LBK(0)_CONST register. */
3154	ret = (readq(base + 0x10) >> 32) & 0xffff;
3155	iounmap(base);
3156err_put:
3157	pci_dev_put(pdev);
3158err:
3159	return ret;
3160}
3161
3162static int rvu_enable_sriov(struct rvu *rvu)
3163{
3164	struct pci_dev *pdev = rvu->pdev;
3165	int err, chans, vfs;
3166
3167	if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3168		dev_warn(&pdev->dev,
3169			 "Skipping SRIOV enablement since not enough IRQs are available\n");
3170		return 0;
3171	}
3172
3173	chans = rvu_get_num_lbk_chans();
3174	if (chans < 0)
3175		return chans;
3176
3177	vfs = pci_sriov_get_totalvfs(pdev);
3178
3179	/* Limit VFs in case we have more VFs than LBK channels available. */
3180	if (vfs > chans)
3181		vfs = chans;
3182
3183	if (!vfs)
3184		return 0;
3185
3186	/* LBK channel number 63 is used for switching packets between
3187	 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3188	 */
3189	if (vfs > 62)
3190		vfs = 62;
3191
3192	/* Save VFs number for reference in VF interrupts handlers.
3193	 * Since interrupts might start arriving during SRIOV enablement
3194	 * ordinary API cannot be used to get number of enabled VFs.
3195	 */
3196	rvu->vfs = vfs;
3197
3198	err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3199			    rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3200	if (err)
3201		return err;
3202
3203	rvu_enable_afvf_intr(rvu);
3204	/* Make sure IRQs are enabled before SRIOV. */
3205	mb();
3206
3207	err = pci_enable_sriov(pdev, vfs);
3208	if (err) {
3209		rvu_disable_afvf_intr(rvu);
3210		rvu_mbox_destroy(&rvu->afvf_wq_info);
3211		return err;
3212	}
3213
3214	return 0;
3215}
3216
3217static void rvu_disable_sriov(struct rvu *rvu)
3218{
3219	rvu_disable_afvf_intr(rvu);
3220	rvu_mbox_destroy(&rvu->afvf_wq_info);
3221	pci_disable_sriov(rvu->pdev);
3222}
3223
3224static void rvu_update_module_params(struct rvu *rvu)
3225{
3226	const char *default_pfl_name = "default";
3227
3228	strscpy(rvu->mkex_pfl_name,
3229		mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3230	strscpy(rvu->kpu_pfl_name,
3231		kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3232}
3233
3234static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3235{
3236	struct device *dev = &pdev->dev;
3237	struct rvu *rvu;
3238	int    err;
3239
3240	rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3241	if (!rvu)
3242		return -ENOMEM;
3243
3244	rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3245	if (!rvu->hw) {
3246		devm_kfree(dev, rvu);
3247		return -ENOMEM;
3248	}
3249
3250	pci_set_drvdata(pdev, rvu);
3251	rvu->pdev = pdev;
3252	rvu->dev = &pdev->dev;
3253
3254	err = pci_enable_device(pdev);
3255	if (err) {
3256		dev_err(dev, "Failed to enable PCI device\n");
3257		goto err_freemem;
3258	}
3259
3260	err = pci_request_regions(pdev, DRV_NAME);
3261	if (err) {
3262		dev_err(dev, "PCI request regions failed 0x%x\n", err);
3263		goto err_disable_device;
3264	}
3265
3266	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3267	if (err) {
3268		dev_err(dev, "DMA mask config failed, abort\n");
3269		goto err_release_regions;
3270	}
3271
3272	pci_set_master(pdev);
3273
3274	rvu->ptp = ptp_get();
3275	if (IS_ERR(rvu->ptp)) {
3276		err = PTR_ERR(rvu->ptp);
3277		if (err)
3278			goto err_release_regions;
3279		rvu->ptp = NULL;
3280	}
3281
3282	/* Map Admin function CSRs */
3283	rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3284	rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3285	if (!rvu->afreg_base || !rvu->pfreg_base) {
3286		dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3287		err = -ENOMEM;
3288		goto err_put_ptp;
3289	}
3290
3291	/* Store module params in rvu structure */
3292	rvu_update_module_params(rvu);
3293
3294	/* Check which blocks the HW supports */
3295	rvu_check_block_implemented(rvu);
3296
3297	rvu_reset_all_blocks(rvu);
3298
3299	rvu_setup_hw_capabilities(rvu);
3300
3301	err = rvu_setup_hw_resources(rvu);
3302	if (err)
3303		goto err_put_ptp;
3304
3305	/* Init mailbox btw AF and PFs */
3306	err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3307			    rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3308			    rvu_afpf_mbox_up_handler);
3309	if (err) {
3310		dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3311		goto err_hwsetup;
3312	}
3313
3314	err = rvu_flr_init(rvu);
3315	if (err) {
3316		dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3317		goto err_mbox;
3318	}
3319
3320	err = rvu_register_interrupts(rvu);
3321	if (err) {
3322		dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3323		goto err_flr;
3324	}
3325
3326	err = rvu_register_dl(rvu);
3327	if (err) {
3328		dev_err(dev, "%s: Failed to register devlink\n", __func__);
3329		goto err_irq;
3330	}
3331
3332	rvu_setup_rvum_blk_revid(rvu);
3333
3334	/* Enable AF's VFs (if any) */
3335	err = rvu_enable_sriov(rvu);
3336	if (err) {
3337		dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3338		goto err_dl;
3339	}
3340
3341	/* Initialize debugfs */
3342	rvu_dbg_init(rvu);
3343
3344	mutex_init(&rvu->rswitch.switch_lock);
3345
3346	if (rvu->fwdata)
3347		ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3348			  rvu->fwdata->ptp_ext_tstamp);
3349
3350	return 0;
3351err_dl:
3352	rvu_unregister_dl(rvu);
3353err_irq:
3354	rvu_unregister_interrupts(rvu);
3355err_flr:
3356	rvu_flr_wq_destroy(rvu);
3357err_mbox:
3358	rvu_mbox_destroy(&rvu->afpf_wq_info);
3359err_hwsetup:
3360	rvu_cgx_exit(rvu);
3361	rvu_fwdata_exit(rvu);
3362	rvu_mcs_exit(rvu);
3363	rvu_reset_all_blocks(rvu);
3364	rvu_free_hw_resources(rvu);
3365	rvu_clear_rvum_blk_revid(rvu);
3366err_put_ptp:
3367	ptp_put(rvu->ptp);
3368err_release_regions:
3369	pci_release_regions(pdev);
3370err_disable_device:
3371	pci_disable_device(pdev);
3372err_freemem:
3373	pci_set_drvdata(pdev, NULL);
3374	devm_kfree(&pdev->dev, rvu->hw);
3375	devm_kfree(dev, rvu);
3376	return err;
3377}
3378
3379static void rvu_remove(struct pci_dev *pdev)
3380{
3381	struct rvu *rvu = pci_get_drvdata(pdev);
3382
3383	rvu_dbg_exit(rvu);
3384	rvu_unregister_dl(rvu);
3385	rvu_unregister_interrupts(rvu);
3386	rvu_flr_wq_destroy(rvu);
3387	rvu_cgx_exit(rvu);
3388	rvu_fwdata_exit(rvu);
3389	rvu_mcs_exit(rvu);
3390	rvu_mbox_destroy(&rvu->afpf_wq_info);
3391	rvu_disable_sriov(rvu);
3392	rvu_reset_all_blocks(rvu);
3393	rvu_free_hw_resources(rvu);
3394	rvu_clear_rvum_blk_revid(rvu);
3395	ptp_put(rvu->ptp);
3396	pci_release_regions(pdev);
3397	pci_disable_device(pdev);
3398	pci_set_drvdata(pdev, NULL);
3399
3400	devm_kfree(&pdev->dev, rvu->hw);
3401	devm_kfree(&pdev->dev, rvu);
3402}
3403
3404static struct pci_driver rvu_driver = {
3405	.name = DRV_NAME,
3406	.id_table = rvu_id_table,
3407	.probe = rvu_probe,
3408	.remove = rvu_remove,
3409};
3410
3411static int __init rvu_init_module(void)
3412{
3413	int err;
3414
3415	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3416
3417	err = pci_register_driver(&cgx_driver);
3418	if (err < 0)
3419		return err;
3420
3421	err = pci_register_driver(&ptp_driver);
3422	if (err < 0)
3423		goto ptp_err;
3424
3425	err = pci_register_driver(&mcs_driver);
3426	if (err < 0)
3427		goto mcs_err;
3428
3429	err =  pci_register_driver(&rvu_driver);
3430	if (err < 0)
3431		goto rvu_err;
3432
3433	return 0;
3434rvu_err:
3435	pci_unregister_driver(&mcs_driver);
3436mcs_err:
3437	pci_unregister_driver(&ptp_driver);
3438ptp_err:
3439	pci_unregister_driver(&cgx_driver);
3440
3441	return err;
3442}
3443
3444static void __exit rvu_cleanup_module(void)
3445{
3446	pci_unregister_driver(&rvu_driver);
3447	pci_unregister_driver(&mcs_driver);
3448	pci_unregister_driver(&ptp_driver);
3449	pci_unregister_driver(&cgx_driver);
3450}
3451
3452module_init(rvu_init_module);
3453module_exit(rvu_cleanup_module);
3454