1// SPDX-License-Identifier: GPL-2.0
2#include <linux/delay.h>
3
4#include "nitrox_dev.h"
5#include "nitrox_csr.h"
6
7#define PLL_REF_CLK 50
8#define MAX_CSR_RETRIES 10
9
10/**
11 * emu_enable_cores - Enable EMU cluster cores.
12 * @ndev: NITROX device
13 */
14static void emu_enable_cores(struct nitrox_device *ndev)
15{
16	union emu_se_enable emu_se;
17	union emu_ae_enable emu_ae;
18	int i;
19
20	/* AE cores 20 per cluster */
21	emu_ae.value = 0;
22	emu_ae.s.enable = 0xfffff;
23
24	/* SE cores 16 per cluster */
25	emu_se.value = 0;
26	emu_se.s.enable = 0xffff;
27
28	/* enable per cluster cores */
29	for (i = 0; i < NR_CLUSTERS; i++) {
30		nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
31		nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
32	}
33}
34
35/**
36 * nitrox_config_emu_unit - configure EMU unit.
37 * @ndev: NITROX device
38 */
39void nitrox_config_emu_unit(struct nitrox_device *ndev)
40{
41	union emu_wd_int_ena_w1s emu_wd_int;
42	union emu_ge_int_ena_w1s emu_ge_int;
43	u64 offset;
44	int i;
45
46	/* enable cores */
47	emu_enable_cores(ndev);
48
49	/* enable general error and watch dog interrupts */
50	emu_ge_int.value = 0;
51	emu_ge_int.s.se_ge = 0xffff;
52	emu_ge_int.s.ae_ge = 0xfffff;
53	emu_wd_int.value = 0;
54	emu_wd_int.s.se_wd = 1;
55
56	for (i = 0; i < NR_CLUSTERS; i++) {
57		offset = EMU_WD_INT_ENA_W1SX(i);
58		nitrox_write_csr(ndev, offset, emu_wd_int.value);
59		offset = EMU_GE_INT_ENA_W1SX(i);
60		nitrox_write_csr(ndev, offset, emu_ge_int.value);
61	}
62}
63
64static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
65{
66	union nps_pkt_in_instr_ctl pkt_in_ctl;
67	union nps_pkt_in_done_cnts pkt_in_cnts;
68	int max_retries = MAX_CSR_RETRIES;
69	u64 offset;
70
71	/* step 1: disable the ring, clear enable bit */
72	offset = NPS_PKT_IN_INSTR_CTLX(ring);
73	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
74	pkt_in_ctl.s.enb = 0;
75	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
76
77	/* step 2: wait to clear [ENB] */
78	usleep_range(100, 150);
79	do {
80		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
81		if (!pkt_in_ctl.s.enb)
82			break;
83		udelay(50);
84	} while (max_retries--);
85
86	/* step 3: clear done counts */
87	offset = NPS_PKT_IN_DONE_CNTSX(ring);
88	pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
89	nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
90	usleep_range(50, 100);
91}
92
93void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
94{
95	union nps_pkt_in_instr_ctl pkt_in_ctl;
96	int max_retries = MAX_CSR_RETRIES;
97	u64 offset;
98
99	/* 64-byte instruction size */
100	offset = NPS_PKT_IN_INSTR_CTLX(ring);
101	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
102	pkt_in_ctl.s.is64b = 1;
103	pkt_in_ctl.s.enb = 1;
104	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
105
106	/* wait for set [ENB] */
107	do {
108		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
109		if (pkt_in_ctl.s.enb)
110			break;
111		udelay(50);
112	} while (max_retries--);
113}
114
115/**
116 * nitrox_config_pkt_input_rings - configure Packet Input Rings
117 * @ndev: NITROX device
118 */
119void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
120{
121	int i;
122
123	for (i = 0; i < ndev->nr_queues; i++) {
124		struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
125		union nps_pkt_in_instr_rsize pkt_in_rsize;
126		union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
127		u64 offset;
128
129		reset_pkt_input_ring(ndev, i);
130
131		/**
132		 * step 4:
133		 * configure ring base address 16-byte aligned,
134		 * size and interrupt threshold.
135		 */
136		offset = NPS_PKT_IN_INSTR_BADDRX(i);
137		nitrox_write_csr(ndev, offset, cmdq->dma);
138
139		/* configure ring size */
140		offset = NPS_PKT_IN_INSTR_RSIZEX(i);
141		pkt_in_rsize.value = 0;
142		pkt_in_rsize.s.rsize = ndev->qlen;
143		nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
144
145		/* set high threshold for pkt input ring interrupts */
146		offset = NPS_PKT_IN_INT_LEVELSX(i);
147		nitrox_write_csr(ndev, offset, 0xffffffff);
148
149		/* step 5: clear off door bell counts */
150		offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
151		pkt_in_dbell.value = 0;
152		pkt_in_dbell.s.dbell = 0xffffffff;
153		nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
154
155		/* enable the ring */
156		enable_pkt_input_ring(ndev, i);
157	}
158}
159
160static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
161{
162	union nps_pkt_slc_ctl pkt_slc_ctl;
163	union nps_pkt_slc_cnts pkt_slc_cnts;
164	int max_retries = MAX_CSR_RETRIES;
165	u64 offset;
166
167	/* step 1: disable slc port */
168	offset = NPS_PKT_SLC_CTLX(port);
169	pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
170	pkt_slc_ctl.s.enb = 0;
171	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
172
173	/* step 2 */
174	usleep_range(100, 150);
175	/* wait to clear [ENB] */
176	do {
177		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
178		if (!pkt_slc_ctl.s.enb)
179			break;
180		udelay(50);
181	} while (max_retries--);
182
183	/* step 3: clear slc counters */
184	offset = NPS_PKT_SLC_CNTSX(port);
185	pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
186	nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
187	usleep_range(50, 100);
188}
189
190void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
191{
192	union nps_pkt_slc_ctl pkt_slc_ctl;
193	int max_retries = MAX_CSR_RETRIES;
194	u64 offset;
195
196	offset = NPS_PKT_SLC_CTLX(port);
197	pkt_slc_ctl.value = 0;
198	pkt_slc_ctl.s.enb = 1;
199	/*
200	 * 8 trailing 0x00 bytes will be added
201	 * to the end of the outgoing packet.
202	 */
203	pkt_slc_ctl.s.z = 1;
204	/* enable response header */
205	pkt_slc_ctl.s.rh = 1;
206	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
207
208	/* wait to set [ENB] */
209	do {
210		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
211		if (pkt_slc_ctl.s.enb)
212			break;
213		udelay(50);
214	} while (max_retries--);
215}
216
217static void config_pkt_solicit_port(struct nitrox_device *ndev, int port)
218{
219	union nps_pkt_slc_int_levels pkt_slc_int;
220	u64 offset;
221
222	reset_pkt_solicit_port(ndev, port);
223
224	/* step 4: configure interrupt levels */
225	offset = NPS_PKT_SLC_INT_LEVELSX(port);
226	pkt_slc_int.value = 0;
227	/* time interrupt threshold */
228	pkt_slc_int.s.timet = 0x3fffff;
229	nitrox_write_csr(ndev, offset, pkt_slc_int.value);
230
231	/* enable the solicit port */
232	enable_pkt_solicit_port(ndev, port);
233}
234
235void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
236{
237	int i;
238
239	for (i = 0; i < ndev->nr_queues; i++)
240		config_pkt_solicit_port(ndev, i);
241}
242
243/**
244 * enable_nps_core_interrupts - enable NPS core interrutps
245 * @ndev: NITROX device.
246 *
247 * This includes NPS core interrupts.
248 */
249static void enable_nps_core_interrupts(struct nitrox_device *ndev)
250{
251	union nps_core_int_ena_w1s core_int;
252
253	/* NPS core interrutps */
254	core_int.value = 0;
255	core_int.s.host_wr_err = 1;
256	core_int.s.host_wr_timeout = 1;
257	core_int.s.exec_wr_timeout = 1;
258	core_int.s.npco_dma_malform = 1;
259	core_int.s.host_nps_wr_err = 1;
260	nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
261}
262
263void nitrox_config_nps_core_unit(struct nitrox_device *ndev)
264{
265	union nps_core_gbl_vfcfg core_gbl_vfcfg;
266
267	/* endian control information */
268	nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
269
270	/* disable ILK interface */
271	core_gbl_vfcfg.value = 0;
272	core_gbl_vfcfg.s.ilk_disable = 1;
273	core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
274	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
275
276	/* enable nps core interrupts */
277	enable_nps_core_interrupts(ndev);
278}
279
280/**
281 * enable_nps_pkt_interrupts - enable NPS packet interrutps
282 * @ndev: NITROX device.
283 *
284 * This includes NPS packet in and slc interrupts.
285 */
286static void enable_nps_pkt_interrupts(struct nitrox_device *ndev)
287{
288	/* NPS packet in ring interrupts */
289	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
290	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
291	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
292	/* NPS packet slc port interrupts */
293	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
294	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
295	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
296}
297
298void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev)
299{
300	/* config input and solicit ports */
301	nitrox_config_pkt_input_rings(ndev);
302	nitrox_config_pkt_solicit_ports(ndev);
303
304	/* enable nps packet interrupts */
305	enable_nps_pkt_interrupts(ndev);
306}
307
308static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
309{
310	union aqmq_en aqmq_en_reg;
311	union aqmq_activity_stat activity_stat;
312	union aqmq_cmp_cnt cmp_cnt;
313	int max_retries = MAX_CSR_RETRIES;
314	u64 offset;
315
316	/* step 1: disable the queue */
317	offset = AQMQ_ENX(ring);
318	aqmq_en_reg.value = 0;
319	aqmq_en_reg.queue_enable = 0;
320	nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
321
322	/* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */
323	usleep_range(100, 150);
324	offset = AQMQ_ACTIVITY_STATX(ring);
325	do {
326		activity_stat.value = nitrox_read_csr(ndev, offset);
327		if (!activity_stat.queue_active)
328			break;
329		udelay(50);
330	} while (max_retries--);
331
332	/* step 3: clear commands completed count */
333	offset = AQMQ_CMP_CNTX(ring);
334	cmp_cnt.value = nitrox_read_csr(ndev, offset);
335	nitrox_write_csr(ndev, offset, cmp_cnt.value);
336	usleep_range(50, 100);
337}
338
339void enable_aqm_ring(struct nitrox_device *ndev, int ring)
340{
341	union aqmq_en aqmq_en_reg;
342	u64 offset;
343
344	offset = AQMQ_ENX(ring);
345	aqmq_en_reg.value = 0;
346	aqmq_en_reg.queue_enable = 1;
347	nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
348	usleep_range(50, 100);
349}
350
351void nitrox_config_aqm_rings(struct nitrox_device *ndev)
352{
353	int ring;
354
355	for (ring = 0; ring < ndev->nr_queues; ring++) {
356		struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
357		union aqmq_drbl drbl;
358		union aqmq_qsz qsize;
359		union aqmq_cmp_thr cmp_thr;
360		u64 offset;
361
362		/* steps 1 - 3 */
363		reset_aqm_ring(ndev, ring);
364
365		/* step 4: clear doorbell count of ring */
366		offset = AQMQ_DRBLX(ring);
367		drbl.value = 0;
368		drbl.dbell_count = 0xFFFFFFFF;
369		nitrox_write_csr(ndev, offset, drbl.value);
370
371		/* step 5: configure host ring details */
372
373		/* set host address for next command of ring */
374		offset = AQMQ_NXT_CMDX(ring);
375		nitrox_write_csr(ndev, offset, 0ULL);
376
377		/* set host address of ring base */
378		offset = AQMQ_BADRX(ring);
379		nitrox_write_csr(ndev, offset, cmdq->dma);
380
381		/* set ring size */
382		offset = AQMQ_QSZX(ring);
383		qsize.value = 0;
384		qsize.host_queue_size = ndev->qlen;
385		nitrox_write_csr(ndev, offset, qsize.value);
386
387		/* set command completion threshold */
388		offset = AQMQ_CMP_THRX(ring);
389		cmp_thr.value = 0;
390		cmp_thr.commands_completed_threshold = 1;
391		nitrox_write_csr(ndev, offset, cmp_thr.value);
392
393		/* step 6: enable the queue */
394		enable_aqm_ring(ndev, ring);
395	}
396}
397
398static void enable_aqm_interrupts(struct nitrox_device *ndev)
399{
400	/* clear interrupt enable bits */
401	nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL));
402	nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL));
403	nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL));
404	nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL));
405	nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL));
406	nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL));
407	nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL));
408	nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL));
409}
410
411void nitrox_config_aqm_unit(struct nitrox_device *ndev)
412{
413	/* config aqm command queues */
414	nitrox_config_aqm_rings(ndev);
415
416	/* enable aqm interrupts */
417	enable_aqm_interrupts(ndev);
418}
419
420void nitrox_config_pom_unit(struct nitrox_device *ndev)
421{
422	union pom_int_ena_w1s pom_int;
423	int i;
424
425	/* enable pom interrupts */
426	pom_int.value = 0;
427	pom_int.s.illegal_dport = 1;
428	nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
429
430	/* enable perf counters */
431	for (i = 0; i < ndev->hw.se_cores; i++)
432		nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
433}
434
435/**
436 * nitrox_config_rand_unit - enable NITROX random number unit
437 * @ndev: NITROX device
438 */
439void nitrox_config_rand_unit(struct nitrox_device *ndev)
440{
441	union efl_rnm_ctl_status efl_rnm_ctl;
442	u64 offset;
443
444	offset = EFL_RNM_CTL_STATUS;
445	efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
446	efl_rnm_ctl.s.ent_en = 1;
447	efl_rnm_ctl.s.rng_en = 1;
448	nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
449}
450
451void nitrox_config_efl_unit(struct nitrox_device *ndev)
452{
453	int i;
454
455	for (i = 0; i < NR_CLUSTERS; i++) {
456		union efl_core_int_ena_w1s efl_core_int;
457		u64 offset;
458
459		/* EFL core interrupts */
460		offset = EFL_CORE_INT_ENA_W1SX(i);
461		efl_core_int.value = 0;
462		efl_core_int.s.len_ovr = 1;
463		efl_core_int.s.d_left = 1;
464		efl_core_int.s.epci_decode_err = 1;
465		nitrox_write_csr(ndev, offset, efl_core_int.value);
466
467		offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
468		nitrox_write_csr(ndev, offset, (~0ULL));
469		offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
470		nitrox_write_csr(ndev, offset, (~0ULL));
471	}
472}
473
474void nitrox_config_bmi_unit(struct nitrox_device *ndev)
475{
476	union bmi_ctl bmi_ctl;
477	union bmi_int_ena_w1s bmi_int_ena;
478	u64 offset;
479
480	/* no threshold limits for PCIe */
481	offset = BMI_CTL;
482	bmi_ctl.value = nitrox_read_csr(ndev, offset);
483	bmi_ctl.s.max_pkt_len = 0xff;
484	bmi_ctl.s.nps_free_thrsh = 0xff;
485	bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
486	nitrox_write_csr(ndev, offset, bmi_ctl.value);
487
488	/* enable interrupts */
489	offset = BMI_INT_ENA_W1S;
490	bmi_int_ena.value = 0;
491	bmi_int_ena.s.max_len_err_nps = 1;
492	bmi_int_ena.s.pkt_rcv_err_nps = 1;
493	bmi_int_ena.s.fpf_undrrn = 1;
494	nitrox_write_csr(ndev, offset, bmi_int_ena.value);
495}
496
497void nitrox_config_bmo_unit(struct nitrox_device *ndev)
498{
499	union bmo_ctl2 bmo_ctl2;
500	u64 offset;
501
502	/* no threshold limits for PCIe */
503	offset = BMO_CTL2;
504	bmo_ctl2.value = nitrox_read_csr(ndev, offset);
505	bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
506	nitrox_write_csr(ndev, offset, bmo_ctl2.value);
507}
508
509void invalidate_lbc(struct nitrox_device *ndev)
510{
511	union lbc_inval_ctl lbc_ctl;
512	union lbc_inval_status lbc_stat;
513	int max_retries = MAX_CSR_RETRIES;
514	u64 offset;
515
516	/* invalidate LBC */
517	offset = LBC_INVAL_CTL;
518	lbc_ctl.value = nitrox_read_csr(ndev, offset);
519	lbc_ctl.s.cam_inval_start = 1;
520	nitrox_write_csr(ndev, offset, lbc_ctl.value);
521
522	offset = LBC_INVAL_STATUS;
523	do {
524		lbc_stat.value = nitrox_read_csr(ndev, offset);
525		if (lbc_stat.s.done)
526			break;
527		udelay(50);
528	} while (max_retries--);
529}
530
531void nitrox_config_lbc_unit(struct nitrox_device *ndev)
532{
533	union lbc_int_ena_w1s lbc_int_ena;
534	u64 offset;
535
536	invalidate_lbc(ndev);
537
538	/* enable interrupts */
539	offset = LBC_INT_ENA_W1S;
540	lbc_int_ena.value = 0;
541	lbc_int_ena.s.dma_rd_err = 1;
542	lbc_int_ena.s.over_fetch_err = 1;
543	lbc_int_ena.s.cam_inval_abort = 1;
544	lbc_int_ena.s.cam_hard_err = 1;
545	nitrox_write_csr(ndev, offset, lbc_int_ena.value);
546
547	offset = LBC_PLM_VF1_64_INT_ENA_W1S;
548	nitrox_write_csr(ndev, offset, (~0ULL));
549	offset = LBC_PLM_VF65_128_INT_ENA_W1S;
550	nitrox_write_csr(ndev, offset, (~0ULL));
551
552	offset = LBC_ELM_VF1_64_INT_ENA_W1S;
553	nitrox_write_csr(ndev, offset, (~0ULL));
554	offset = LBC_ELM_VF65_128_INT_ENA_W1S;
555	nitrox_write_csr(ndev, offset, (~0ULL));
556}
557
558void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
559{
560	union nps_core_gbl_vfcfg vfcfg;
561
562	vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
563	vfcfg.s.cfg = mode & 0x7;
564
565	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
566}
567
568static const char *get_core_option(u8 se_cores, u8 ae_cores)
569{
570	const char *option = "";
571
572	if (ae_cores == AE_MAX_CORES) {
573		switch (se_cores) {
574		case SE_MAX_CORES:
575			option = "60";
576			break;
577		case 40:
578			option = "60s";
579			break;
580		}
581	} else if (ae_cores == (AE_MAX_CORES / 2)) {
582		option = "30";
583	} else {
584		option = "60i";
585	}
586
587	return option;
588}
589
590static const char *get_feature_option(u8 zip_cores, int core_freq)
591{
592	if (zip_cores == 0)
593		return "";
594	else if (zip_cores < ZIP_MAX_CORES)
595		return "-C15";
596
597	if (core_freq >= 850)
598		return "-C45";
599	else if (core_freq >= 750)
600		return "-C35";
601	else if (core_freq >= 550)
602		return "-C25";
603
604	return "";
605}
606
607void nitrox_get_hwinfo(struct nitrox_device *ndev)
608{
609	union emu_fuse_map emu_fuse;
610	union rst_boot rst_boot;
611	union fus_dat1 fus_dat1;
612	unsigned char name[IFNAMSIZ * 2] = {};
613	int i, dead_cores;
614	u64 offset;
615
616	/* get core frequency */
617	offset = RST_BOOT;
618	rst_boot.value = nitrox_read_csr(ndev, offset);
619	ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
620
621	for (i = 0; i < NR_CLUSTERS; i++) {
622		offset = EMU_FUSE_MAPX(i);
623		emu_fuse.value = nitrox_read_csr(ndev, offset);
624		if (emu_fuse.s.valid) {
625			dead_cores = hweight32(emu_fuse.s.ae_fuse);
626			ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
627			dead_cores = hweight16(emu_fuse.s.se_fuse);
628			ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
629		}
630	}
631	/* find zip hardware availability */
632	offset = FUS_DAT1;
633	fus_dat1.value = nitrox_read_csr(ndev, offset);
634	if (!fus_dat1.nozip) {
635		dead_cores = hweight8(fus_dat1.zip_info);
636		ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
637	}
638
639	/* determine the partname
640	 * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
641	 */
642	snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
643		 get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
644		 ndev->hw.freq,
645		 get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
646		 ndev->hw.revision_id);
647
648	/* copy partname */
649	strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
650}
651
652void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
653{
654	u64 value = ~0ULL;
655	u64 reg_addr;
656
657	/* Mailbox interrupt low enable set register */
658	reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S;
659	nitrox_write_csr(ndev, reg_addr, value);
660
661	/* Mailbox interrupt high enable set register */
662	reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S;
663	nitrox_write_csr(ndev, reg_addr, value);
664}
665
666void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
667{
668	u64 value = ~0ULL;
669	u64 reg_addr;
670
671	/* Mailbox interrupt low enable clear register */
672	reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C;
673	nitrox_write_csr(ndev, reg_addr, value);
674
675	/* Mailbox interrupt high enable clear register */
676	reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C;
677	nitrox_write_csr(ndev, reg_addr, value);
678}
679