1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2020 Marvell. */
3
4#include <linux/ctype.h>
5#include <linux/firmware.h>
6#include "otx2_cptpf_ucode.h"
7#include "otx2_cpt_common.h"
8#include "otx2_cptpf.h"
9#include "otx2_cptlf.h"
10#include "otx2_cpt_reqmgr.h"
11#include "rvu_reg.h"
12
13#define CSR_DELAY 30
14
15#define LOADFVC_RLEN 8
16#define LOADFVC_MAJOR_OP 0x01
17#define LOADFVC_MINOR_OP 0x08
18
19#define CTX_FLUSH_TIMER_CNT 0xFFFFFF
20
21struct fw_info_t {
22	struct list_head ucodes;
23};
24
25static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
26					struct otx2_cpt_eng_grp_info *eng_grp)
27{
28	struct otx2_cpt_bitmap bmap = { {0} };
29	bool found = false;
30	int i;
31
32	if (eng_grp->g->engs_num < 0 ||
33	    eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
34		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
35			eng_grp->g->engs_num);
36		return bmap;
37	}
38
39	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
40		if (eng_grp->engs[i].type) {
41			bitmap_or(bmap.bits, bmap.bits,
42				  eng_grp->engs[i].bmap,
43				  eng_grp->g->engs_num);
44			bmap.size = eng_grp->g->engs_num;
45			found = true;
46		}
47	}
48
49	if (!found)
50		dev_err(dev, "No engines reserved for engine group %d\n",
51			eng_grp->idx);
52	return bmap;
53}
54
55static int is_eng_type(int val, int eng_type)
56{
57	return val & (1 << eng_type);
58}
59
60static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
61{
62	if (eng_grp->ucode[1].type)
63		return true;
64	else
65		return false;
66}
67
68static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
69			       const char *filename)
70{
71	strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
72}
73
74static char *get_eng_type_str(int eng_type)
75{
76	char *str = "unknown";
77
78	switch (eng_type) {
79	case OTX2_CPT_SE_TYPES:
80		str = "SE";
81		break;
82
83	case OTX2_CPT_IE_TYPES:
84		str = "IE";
85		break;
86
87	case OTX2_CPT_AE_TYPES:
88		str = "AE";
89		break;
90	}
91	return str;
92}
93
94static char *get_ucode_type_str(int ucode_type)
95{
96	char *str = "unknown";
97
98	switch (ucode_type) {
99	case (1 << OTX2_CPT_SE_TYPES):
100		str = "SE";
101		break;
102
103	case (1 << OTX2_CPT_IE_TYPES):
104		str = "IE";
105		break;
106
107	case (1 << OTX2_CPT_AE_TYPES):
108		str = "AE";
109		break;
110
111	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
112		str = "SE+IPSEC";
113		break;
114	}
115	return str;
116}
117
118static int get_ucode_type(struct device *dev,
119			  struct otx2_cpt_ucode_hdr *ucode_hdr,
120			  int *ucode_type)
121{
122	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
123	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
124	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
125	struct pci_dev *pdev = cptpf->pdev;
126	int i, val = 0;
127	u8 nn;
128
129	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
130	for (i = 0; i < strlen(tmp_ver_str); i++)
131		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
132
133	sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
134	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
135		return -EINVAL;
136
137	nn = ucode_hdr->ver_num.nn;
138	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
139	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
140	     nn == OTX2_CPT_SE_UC_TYPE3))
141		val |= 1 << OTX2_CPT_SE_TYPES;
142	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
143	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
144	     nn == OTX2_CPT_IE_UC_TYPE3))
145		val |= 1 << OTX2_CPT_IE_TYPES;
146	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
147	    nn == OTX2_CPT_AE_UC_TYPE)
148		val |= 1 << OTX2_CPT_AE_TYPES;
149
150	*ucode_type = val;
151
152	if (!val)
153		return -EINVAL;
154
155	return 0;
156}
157
158static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
159			      dma_addr_t dma_addr, int blkaddr)
160{
161	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
162				     CPT_AF_EXEX_UCODE_BASE(eng),
163				     (u64)dma_addr, blkaddr);
164}
165
166static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
167			       struct otx2_cptpf_dev *cptpf, int blkaddr)
168{
169	struct otx2_cpt_engs_rsvd *engs;
170	dma_addr_t dma_addr;
171	int i, bit, ret;
172
173	/* Set PF number for microcode fetches */
174	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
175				    CPT_AF_PF_FUNC,
176				    cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
177	if (ret)
178		return ret;
179
180	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
181		engs = &eng_grp->engs[i];
182		if (!engs->type)
183			continue;
184
185		dma_addr = engs->ucode->dma;
186
187		/*
188		 * Set UCODE_BASE only for the cores which are not used,
189		 * other cores should have already valid UCODE_BASE set
190		 */
191		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
192			if (!eng_grp->g->eng_ref_cnt[bit]) {
193				ret = __write_ucode_base(cptpf, bit, dma_addr,
194							 blkaddr);
195				if (ret)
196					return ret;
197			}
198	}
199	return 0;
200}
201
202static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
203{
204	struct otx2_cptpf_dev *cptpf = obj;
205	int ret;
206
207	if (cptpf->has_cpt1) {
208		ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
209		if (ret)
210			return ret;
211	}
212	return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
213}
214
215static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
216					 struct otx2_cptpf_dev *cptpf,
217					 struct otx2_cpt_bitmap bmap,
218					 int blkaddr)
219{
220	int i, timeout = 10;
221	int busy, ret;
222	u64 reg = 0;
223
224	/* Detach the cores from group */
225	for_each_set_bit(i, bmap.bits, bmap.size) {
226		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
227					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
228		if (ret)
229			return ret;
230
231		if (reg & (1ull << eng_grp->idx)) {
232			eng_grp->g->eng_ref_cnt[i]--;
233			reg &= ~(1ull << eng_grp->idx);
234
235			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
236						    cptpf->pdev,
237						    CPT_AF_EXEX_CTL2(i), reg,
238						    blkaddr);
239			if (ret)
240				return ret;
241		}
242	}
243
244	/* Wait for cores to become idle */
245	do {
246		busy = 0;
247		usleep_range(10000, 20000);
248		if (timeout-- < 0)
249			return -EBUSY;
250
251		for_each_set_bit(i, bmap.bits, bmap.size) {
252			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
253						   cptpf->pdev,
254						   CPT_AF_EXEX_STS(i), &reg,
255						   blkaddr);
256			if (ret)
257				return ret;
258
259			if (reg & 0x1) {
260				busy = 1;
261				break;
262			}
263		}
264	} while (busy);
265
266	/* Disable the cores only if they are not used anymore */
267	for_each_set_bit(i, bmap.bits, bmap.size) {
268		if (!eng_grp->g->eng_ref_cnt[i]) {
269			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
270						    cptpf->pdev,
271						    CPT_AF_EXEX_CTL(i), 0x0,
272						    blkaddr);
273			if (ret)
274				return ret;
275		}
276	}
277
278	return 0;
279}
280
281static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
282					void *obj)
283{
284	struct otx2_cptpf_dev *cptpf = obj;
285	struct otx2_cpt_bitmap bmap;
286	int ret;
287
288	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
289	if (!bmap.size)
290		return -EINVAL;
291
292	if (cptpf->has_cpt1) {
293		ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
294						    BLKADDR_CPT1);
295		if (ret)
296			return ret;
297	}
298	return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
299					     BLKADDR_CPT0);
300}
301
302static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
303					struct otx2_cptpf_dev *cptpf,
304					struct otx2_cpt_bitmap bmap,
305					int blkaddr)
306{
307	u64 reg = 0;
308	int i, ret;
309
310	/* Attach the cores to the group */
311	for_each_set_bit(i, bmap.bits, bmap.size) {
312		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
313					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
314		if (ret)
315			return ret;
316
317		if (!(reg & (1ull << eng_grp->idx))) {
318			eng_grp->g->eng_ref_cnt[i]++;
319			reg |= 1ull << eng_grp->idx;
320
321			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
322						    cptpf->pdev,
323						    CPT_AF_EXEX_CTL2(i), reg,
324						    blkaddr);
325			if (ret)
326				return ret;
327		}
328	}
329
330	/* Enable the cores */
331	for_each_set_bit(i, bmap.bits, bmap.size) {
332		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
333						CPT_AF_EXEX_CTL(i), 0x1,
334						blkaddr);
335		if (ret)
336			return ret;
337	}
338	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
339}
340
341static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
342				       void *obj)
343{
344	struct otx2_cptpf_dev *cptpf = obj;
345	struct otx2_cpt_bitmap bmap;
346	int ret;
347
348	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
349	if (!bmap.size)
350		return -EINVAL;
351
352	if (cptpf->has_cpt1) {
353		ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
354						   BLKADDR_CPT1);
355		if (ret)
356			return ret;
357	}
358	return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
359}
360
361static int load_fw(struct device *dev, struct fw_info_t *fw_info,
362		   char *filename)
363{
364	struct otx2_cpt_ucode_hdr *ucode_hdr;
365	struct otx2_cpt_uc_info_t *uc_info;
366	int ucode_type, ucode_size;
367	int ret;
368
369	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
370	if (!uc_info)
371		return -ENOMEM;
372
373	ret = request_firmware(&uc_info->fw, filename, dev);
374	if (ret)
375		goto free_uc_info;
376
377	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
378	ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
379	if (ret)
380		goto release_fw;
381
382	ucode_size = ntohl(ucode_hdr->code_length) * 2;
383	if (!ucode_size) {
384		dev_err(dev, "Ucode %s invalid size\n", filename);
385		ret = -EINVAL;
386		goto release_fw;
387	}
388
389	set_ucode_filename(&uc_info->ucode, filename);
390	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
391	       OTX2_CPT_UCODE_VER_STR_SZ);
392	uc_info->ucode.ver_num = ucode_hdr->ver_num;
393	uc_info->ucode.type = ucode_type;
394	uc_info->ucode.size = ucode_size;
395	list_add_tail(&uc_info->list, &fw_info->ucodes);
396
397	return 0;
398
399release_fw:
400	release_firmware(uc_info->fw);
401free_uc_info:
402	kfree(uc_info);
403	return ret;
404}
405
406static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
407{
408	struct otx2_cpt_uc_info_t *curr, *temp;
409
410	if (!fw_info)
411		return;
412
413	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
414		list_del(&curr->list);
415		release_firmware(curr->fw);
416		kfree(curr);
417	}
418}
419
420static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
421					    int ucode_type)
422{
423	struct otx2_cpt_uc_info_t *curr;
424
425	list_for_each_entry(curr, &fw_info->ucodes, list) {
426		if (!is_eng_type(curr->ucode.type, ucode_type))
427			continue;
428
429		return curr;
430	}
431	return NULL;
432}
433
434static void print_uc_info(struct fw_info_t *fw_info)
435{
436	struct otx2_cpt_uc_info_t *curr;
437
438	list_for_each_entry(curr, &fw_info->ucodes, list) {
439		pr_debug("Ucode filename %s\n", curr->ucode.filename);
440		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
441		pr_debug("Ucode version %d.%d.%d.%d\n",
442			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
443			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
444		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
445			 get_ucode_type_str(curr->ucode.type));
446		pr_debug("Ucode size %d\n", curr->ucode.size);
447		pr_debug("Ucode ptr %p\n", curr->fw->data);
448	}
449}
450
451static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
452{
453	char filename[OTX2_CPT_NAME_LENGTH];
454	char eng_type[8] = {0};
455	int ret, e, i;
456
457	INIT_LIST_HEAD(&fw_info->ucodes);
458
459	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
460		strcpy(eng_type, get_eng_type_str(e));
461		for (i = 0; i < strlen(eng_type); i++)
462			eng_type[i] = tolower(eng_type[i]);
463
464		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
465			 pdev->revision, eng_type);
466		/* Request firmware for each engine type */
467		ret = load_fw(&pdev->dev, fw_info, filename);
468		if (ret)
469			goto release_fw;
470	}
471	print_uc_info(fw_info);
472	return 0;
473
474release_fw:
475	cpt_ucode_release_fw(fw_info);
476	return ret;
477}
478
479struct otx2_cpt_engs_rsvd *find_engines_by_type(
480					struct otx2_cpt_eng_grp_info *eng_grp,
481					int eng_type)
482{
483	int i;
484
485	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
486		if (!eng_grp->engs[i].type)
487			continue;
488
489		if (eng_grp->engs[i].type == eng_type)
490			return &eng_grp->engs[i];
491	}
492	return NULL;
493}
494
495static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
496				int eng_type)
497{
498	struct otx2_cpt_engs_rsvd *engs;
499
500	engs = find_engines_by_type(eng_grp, eng_type);
501
502	return (engs != NULL ? 1 : 0);
503}
504
505static int update_engines_avail_count(struct device *dev,
506				      struct otx2_cpt_engs_available *avail,
507				      struct otx2_cpt_engs_rsvd *engs, int val)
508{
509	switch (engs->type) {
510	case OTX2_CPT_SE_TYPES:
511		avail->se_cnt += val;
512		break;
513
514	case OTX2_CPT_IE_TYPES:
515		avail->ie_cnt += val;
516		break;
517
518	case OTX2_CPT_AE_TYPES:
519		avail->ae_cnt += val;
520		break;
521
522	default:
523		dev_err(dev, "Invalid engine type %d\n", engs->type);
524		return -EINVAL;
525	}
526	return 0;
527}
528
529static int update_engines_offset(struct device *dev,
530				 struct otx2_cpt_engs_available *avail,
531				 struct otx2_cpt_engs_rsvd *engs)
532{
533	switch (engs->type) {
534	case OTX2_CPT_SE_TYPES:
535		engs->offset = 0;
536		break;
537
538	case OTX2_CPT_IE_TYPES:
539		engs->offset = avail->max_se_cnt;
540		break;
541
542	case OTX2_CPT_AE_TYPES:
543		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
544		break;
545
546	default:
547		dev_err(dev, "Invalid engine type %d\n", engs->type);
548		return -EINVAL;
549	}
550	return 0;
551}
552
553static int release_engines(struct device *dev,
554			   struct otx2_cpt_eng_grp_info *grp)
555{
556	int i, ret = 0;
557
558	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
559		if (!grp->engs[i].type)
560			continue;
561
562		if (grp->engs[i].count > 0) {
563			ret = update_engines_avail_count(dev, &grp->g->avail,
564							 &grp->engs[i],
565							 grp->engs[i].count);
566			if (ret)
567				return ret;
568		}
569
570		grp->engs[i].type = 0;
571		grp->engs[i].count = 0;
572		grp->engs[i].offset = 0;
573		grp->engs[i].ucode = NULL;
574		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
575	}
576	return 0;
577}
578
579static int do_reserve_engines(struct device *dev,
580			      struct otx2_cpt_eng_grp_info *grp,
581			      struct otx2_cpt_engines *req_engs)
582{
583	struct otx2_cpt_engs_rsvd *engs = NULL;
584	int i, ret;
585
586	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
587		if (!grp->engs[i].type) {
588			engs = &grp->engs[i];
589			break;
590		}
591	}
592
593	if (!engs)
594		return -ENOMEM;
595
596	engs->type = req_engs->type;
597	engs->count = req_engs->count;
598
599	ret = update_engines_offset(dev, &grp->g->avail, engs);
600	if (ret)
601		return ret;
602
603	if (engs->count > 0) {
604		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
605						 -engs->count);
606		if (ret)
607			return ret;
608	}
609
610	return 0;
611}
612
613static int check_engines_availability(struct device *dev,
614				      struct otx2_cpt_eng_grp_info *grp,
615				      struct otx2_cpt_engines *req_eng)
616{
617	int avail_cnt = 0;
618
619	switch (req_eng->type) {
620	case OTX2_CPT_SE_TYPES:
621		avail_cnt = grp->g->avail.se_cnt;
622		break;
623
624	case OTX2_CPT_IE_TYPES:
625		avail_cnt = grp->g->avail.ie_cnt;
626		break;
627
628	case OTX2_CPT_AE_TYPES:
629		avail_cnt = grp->g->avail.ae_cnt;
630		break;
631
632	default:
633		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
634		return -EINVAL;
635	}
636
637	if (avail_cnt < req_eng->count) {
638		dev_err(dev,
639			"Error available %s engines %d < than requested %d\n",
640			get_eng_type_str(req_eng->type),
641			avail_cnt, req_eng->count);
642		return -EBUSY;
643	}
644	return 0;
645}
646
647static int reserve_engines(struct device *dev,
648			   struct otx2_cpt_eng_grp_info *grp,
649			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
650{
651	int i, ret = 0;
652
653	/* Validate if a number of requested engines are available */
654	for (i = 0; i < ucodes_cnt; i++) {
655		ret = check_engines_availability(dev, grp, &req_engs[i]);
656		if (ret)
657			return ret;
658	}
659
660	/* Reserve requested engines for this engine group */
661	for (i = 0; i < ucodes_cnt; i++) {
662		ret = do_reserve_engines(dev, grp, &req_engs[i]);
663		if (ret)
664			return ret;
665	}
666	return 0;
667}
668
669static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
670{
671	if (ucode->va) {
672		dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
673				  ucode->dma);
674		ucode->va = NULL;
675		ucode->dma = 0;
676		ucode->size = 0;
677	}
678
679	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
680	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
681	set_ucode_filename(ucode, "");
682	ucode->type = 0;
683}
684
685static int copy_ucode_to_dma_mem(struct device *dev,
686				 struct otx2_cpt_ucode *ucode,
687				 const u8 *ucode_data)
688{
689	u32 i;
690
691	/*  Allocate DMAable space */
692	ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
693				       GFP_KERNEL);
694	if (!ucode->va)
695		return -ENOMEM;
696
697	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
698	       ucode->size);
699
700	/* Byte swap 64-bit */
701	for (i = 0; i < (ucode->size / 8); i++)
702		cpu_to_be64s(&((u64 *)ucode->va)[i]);
703	/*  Ucode needs 16-bit swap */
704	for (i = 0; i < (ucode->size / 2); i++)
705		cpu_to_be16s(&((u16 *)ucode->va)[i]);
706	return 0;
707}
708
709static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
710			  void *obj)
711{
712	int ret;
713
714	/* Point microcode to each core of the group */
715	ret = cpt_set_ucode_base(eng_grp, obj);
716	if (ret)
717		return ret;
718
719	/* Attach the cores to the group and enable them */
720	ret = cpt_attach_and_enable_cores(eng_grp, obj);
721
722	return ret;
723}
724
725static int disable_eng_grp(struct device *dev,
726			   struct otx2_cpt_eng_grp_info *eng_grp,
727			   void *obj)
728{
729	int i, ret;
730
731	/* Disable all engines used by this group */
732	ret = cpt_detach_and_disable_cores(eng_grp, obj);
733	if (ret)
734		return ret;
735
736	/* Unload ucode used by this engine group */
737	ucode_unload(dev, &eng_grp->ucode[0]);
738	ucode_unload(dev, &eng_grp->ucode[1]);
739
740	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
741		if (!eng_grp->engs[i].type)
742			continue;
743
744		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
745	}
746
747	/* Clear UCODE_BASE register for each engine used by this group */
748	ret = cpt_set_ucode_base(eng_grp, obj);
749
750	return ret;
751}
752
753static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
754				    struct otx2_cpt_eng_grp_info *src_grp)
755{
756	/* Setup fields for engine group which is mirrored */
757	src_grp->mirror.is_ena = false;
758	src_grp->mirror.idx = 0;
759	src_grp->mirror.ref_count++;
760
761	/* Setup fields for mirroring engine group */
762	dst_grp->mirror.is_ena = true;
763	dst_grp->mirror.idx = src_grp->idx;
764	dst_grp->mirror.ref_count = 0;
765}
766
767static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
768{
769	struct otx2_cpt_eng_grp_info *src_grp;
770
771	if (!dst_grp->mirror.is_ena)
772		return;
773
774	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
775
776	src_grp->mirror.ref_count--;
777	dst_grp->mirror.is_ena = false;
778	dst_grp->mirror.idx = 0;
779	dst_grp->mirror.ref_count = 0;
780}
781
782static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
783				  struct otx2_cpt_engines *engs, int engs_cnt)
784{
785	struct otx2_cpt_engs_rsvd *mirrored_engs;
786	int i;
787
788	for (i = 0; i < engs_cnt; i++) {
789		mirrored_engs = find_engines_by_type(mirror_eng_grp,
790						     engs[i].type);
791		if (!mirrored_engs)
792			continue;
793
794		/*
795		 * If mirrored group has this type of engines attached then
796		 * there are 3 scenarios possible:
797		 * 1) mirrored_engs.count == engs[i].count then all engines
798		 * from mirrored engine group will be shared with this engine
799		 * group
800		 * 2) mirrored_engs.count > engs[i].count then only a subset of
801		 * engines from mirrored engine group will be shared with this
802		 * engine group
803		 * 3) mirrored_engs.count < engs[i].count then all engines
804		 * from mirrored engine group will be shared with this group
805		 * and additional engines will be reserved for exclusively use
806		 * by this engine group
807		 */
808		engs[i].count -= mirrored_engs->count;
809	}
810}
811
812static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
813					struct otx2_cpt_eng_grp_info *grp)
814{
815	struct otx2_cpt_eng_grps *eng_grps = grp->g;
816	int i;
817
818	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
819		if (!eng_grps->grp[i].is_enabled)
820			continue;
821		if (eng_grps->grp[i].ucode[0].type &&
822		    eng_grps->grp[i].ucode[1].type)
823			continue;
824		if (grp->idx == i)
825			continue;
826		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
827				 grp->ucode[0].ver_str,
828				 OTX2_CPT_UCODE_VER_STR_SZ))
829			return &eng_grps->grp[i];
830	}
831
832	return NULL;
833}
834
835static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
836					struct otx2_cpt_eng_grps *eng_grps)
837{
838	int i;
839
840	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
841		if (!eng_grps->grp[i].is_enabled)
842			return &eng_grps->grp[i];
843	}
844	return NULL;
845}
846
847static int eng_grp_update_masks(struct device *dev,
848				struct otx2_cpt_eng_grp_info *eng_grp)
849{
850	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
851	struct otx2_cpt_bitmap tmp_bmap = { {0} };
852	int i, j, cnt, max_cnt;
853	int bit;
854
855	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
856		engs = &eng_grp->engs[i];
857		if (!engs->type)
858			continue;
859		if (engs->count <= 0)
860			continue;
861
862		switch (engs->type) {
863		case OTX2_CPT_SE_TYPES:
864			max_cnt = eng_grp->g->avail.max_se_cnt;
865			break;
866
867		case OTX2_CPT_IE_TYPES:
868			max_cnt = eng_grp->g->avail.max_ie_cnt;
869			break;
870
871		case OTX2_CPT_AE_TYPES:
872			max_cnt = eng_grp->g->avail.max_ae_cnt;
873			break;
874
875		default:
876			dev_err(dev, "Invalid engine type %d\n", engs->type);
877			return -EINVAL;
878		}
879
880		cnt = engs->count;
881		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
882		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
883		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
884			if (!eng_grp->g->eng_ref_cnt[j]) {
885				bitmap_set(tmp_bmap.bits, j, 1);
886				cnt--;
887				if (!cnt)
888					break;
889			}
890		}
891
892		if (cnt)
893			return -ENOSPC;
894
895		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
896	}
897
898	if (!eng_grp->mirror.is_ena)
899		return 0;
900
901	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
902		engs = &eng_grp->engs[i];
903		if (!engs->type)
904			continue;
905
906		mirrored_engs = find_engines_by_type(
907					&eng_grp->g->grp[eng_grp->mirror.idx],
908					engs->type);
909		WARN_ON(!mirrored_engs && engs->count <= 0);
910		if (!mirrored_engs)
911			continue;
912
913		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
914			    eng_grp->g->engs_num);
915		if (engs->count < 0) {
916			bit = find_first_bit(mirrored_engs->bmap,
917					     eng_grp->g->engs_num);
918			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
919		}
920		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
921			  eng_grp->g->engs_num);
922	}
923	return 0;
924}
925
926static int delete_engine_group(struct device *dev,
927			       struct otx2_cpt_eng_grp_info *eng_grp)
928{
929	int ret;
930
931	if (!eng_grp->is_enabled)
932		return 0;
933
934	if (eng_grp->mirror.ref_count)
935		return -EINVAL;
936
937	/* Removing engine group mirroring if enabled */
938	remove_eng_grp_mirroring(eng_grp);
939
940	/* Disable engine group */
941	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
942	if (ret)
943		return ret;
944
945	/* Release all engines held by this engine group */
946	ret = release_engines(dev, eng_grp);
947	if (ret)
948		return ret;
949
950	eng_grp->is_enabled = false;
951
952	return 0;
953}
954
955static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
956{
957	struct otx2_cpt_ucode *ucode;
958
959	if (eng_grp->mirror.is_ena)
960		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
961	else
962		ucode = &eng_grp->ucode[0];
963	WARN_ON(!eng_grp->engs[0].type);
964	eng_grp->engs[0].ucode = ucode;
965
966	if (eng_grp->engs[1].type) {
967		if (is_2nd_ucode_used(eng_grp))
968			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
969		else
970			eng_grp->engs[1].ucode = ucode;
971	}
972}
973
974static int create_engine_group(struct device *dev,
975			       struct otx2_cpt_eng_grps *eng_grps,
976			       struct otx2_cpt_engines *engs, int ucodes_cnt,
977			       void *ucode_data[], int is_print)
978{
979	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
980	struct otx2_cpt_eng_grp_info *eng_grp;
981	struct otx2_cpt_uc_info_t *uc_info;
982	int i, ret = 0;
983
984	/* Find engine group which is not used */
985	eng_grp = find_unused_eng_grp(eng_grps);
986	if (!eng_grp) {
987		dev_err(dev, "Error all engine groups are being used\n");
988		return -ENOSPC;
989	}
990	/* Load ucode */
991	for (i = 0; i < ucodes_cnt; i++) {
992		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
993		eng_grp->ucode[i] = uc_info->ucode;
994		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
995					    uc_info->fw->data);
996		if (ret)
997			goto unload_ucode;
998	}
999
1000	/* Check if this group mirrors another existing engine group */
1001	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1002	if (mirrored_eng_grp) {
1003		/* Setup mirroring */
1004		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1005
1006		/*
1007		 * Update count of requested engines because some
1008		 * of them might be shared with mirrored group
1009		 */
1010		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1011	}
1012	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1013	if (ret)
1014		goto unload_ucode;
1015
1016	/* Update ucode pointers used by engines */
1017	update_ucode_ptrs(eng_grp);
1018
1019	/* Update engine masks used by this group */
1020	ret = eng_grp_update_masks(dev, eng_grp);
1021	if (ret)
1022		goto release_engs;
1023
1024	/* Enable engine group */
1025	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1026	if (ret)
1027		goto release_engs;
1028
1029	/*
1030	 * If this engine group mirrors another engine group
1031	 * then we need to unload ucode as we will use ucode
1032	 * from mirrored engine group
1033	 */
1034	if (eng_grp->mirror.is_ena)
1035		ucode_unload(dev, &eng_grp->ucode[0]);
1036
1037	eng_grp->is_enabled = true;
1038
1039	if (!is_print)
1040		return 0;
1041
1042	if (mirrored_eng_grp)
1043		dev_info(dev,
1044			 "Engine_group%d: reuse microcode %s from group %d\n",
1045			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1046			 mirrored_eng_grp->idx);
1047	else
1048		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1049			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1050	if (is_2nd_ucode_used(eng_grp))
1051		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1052			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1053
1054	return 0;
1055
1056release_engs:
1057	release_engines(dev, eng_grp);
1058unload_ucode:
1059	ucode_unload(dev, &eng_grp->ucode[0]);
1060	ucode_unload(dev, &eng_grp->ucode[1]);
1061	return ret;
1062}
1063
1064static void delete_engine_grps(struct pci_dev *pdev,
1065			       struct otx2_cpt_eng_grps *eng_grps)
1066{
1067	int i;
1068
1069	/* First delete all mirroring engine groups */
1070	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1071		if (eng_grps->grp[i].mirror.is_ena)
1072			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1073
1074	/* Delete remaining engine groups */
1075	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1076		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1077}
1078
1079#define PCI_DEVID_CN10K_RNM 0xA098
1080#define RNM_ENTROPY_STATUS  0x8
1081
1082static void rnm_to_cpt_errata_fixup(struct device *dev)
1083{
1084	struct pci_dev *pdev;
1085	void __iomem *base;
1086	int timeout = 5000;
1087
1088	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
1089	if (!pdev)
1090		return;
1091
1092	base = pci_ioremap_bar(pdev, 0);
1093	if (!base)
1094		goto put_pdev;
1095
1096	while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
1097		cpu_relax();
1098		udelay(1);
1099		timeout--;
1100		if (!timeout) {
1101			dev_warn(dev, "RNM is not producing entropy\n");
1102			break;
1103		}
1104	}
1105
1106	iounmap(base);
1107
1108put_pdev:
1109	pci_dev_put(pdev);
1110}
1111
1112int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1113{
1114
1115	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1116	struct otx2_cpt_eng_grp_info *grp;
1117	int i;
1118
1119	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1120		grp = &eng_grps->grp[i];
1121		if (!grp->is_enabled)
1122			continue;
1123
1124		if (eng_type == OTX2_CPT_SE_TYPES) {
1125			if (eng_grp_has_eng_type(grp, eng_type) &&
1126			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1127				eng_grp_num = i;
1128				break;
1129			}
1130		} else {
1131			if (eng_grp_has_eng_type(grp, eng_type)) {
1132				eng_grp_num = i;
1133				break;
1134			}
1135		}
1136	}
1137	return eng_grp_num;
1138}
1139
1140int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1141			     struct otx2_cpt_eng_grps *eng_grps)
1142{
1143	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1144	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1145	struct pci_dev *pdev = cptpf->pdev;
1146	struct fw_info_t fw_info;
1147	u64 reg_val;
1148	int ret = 0;
1149
1150	mutex_lock(&eng_grps->lock);
1151	/*
1152	 * We don't create engine groups if it was already
1153	 * made (when user enabled VFs for the first time)
1154	 */
1155	if (eng_grps->is_grps_created)
1156		goto unlock;
1157
1158	ret = cpt_ucode_load_fw(pdev, &fw_info);
1159	if (ret)
1160		goto unlock;
1161
1162	/*
1163	 * Create engine group with SE engines for kernel
1164	 * crypto functionality (symmetric crypto)
1165	 */
1166	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1167	if (uc_info[0] == NULL) {
1168		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1169		ret = -EINVAL;
1170		goto release_fw;
1171	}
1172	engs[0].type = OTX2_CPT_SE_TYPES;
1173	engs[0].count = eng_grps->avail.max_se_cnt;
1174
1175	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1176				  (void **) uc_info, 1);
1177	if (ret)
1178		goto release_fw;
1179
1180	/*
1181	 * Create engine group with SE+IE engines for IPSec.
1182	 * All SE engines will be shared with engine group 0.
1183	 */
1184	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1185	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1186
1187	if (uc_info[1] == NULL) {
1188		dev_err(&pdev->dev, "Unable to find firmware for IE");
1189		ret = -EINVAL;
1190		goto delete_eng_grp;
1191	}
1192	engs[0].type = OTX2_CPT_SE_TYPES;
1193	engs[0].count = eng_grps->avail.max_se_cnt;
1194	engs[1].type = OTX2_CPT_IE_TYPES;
1195	engs[1].count = eng_grps->avail.max_ie_cnt;
1196
1197	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1198				  (void **) uc_info, 1);
1199	if (ret)
1200		goto delete_eng_grp;
1201
1202	/*
1203	 * Create engine group with AE engines for asymmetric
1204	 * crypto functionality.
1205	 */
1206	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1207	if (uc_info[0] == NULL) {
1208		dev_err(&pdev->dev, "Unable to find firmware for AE");
1209		ret = -EINVAL;
1210		goto delete_eng_grp;
1211	}
1212	engs[0].type = OTX2_CPT_AE_TYPES;
1213	engs[0].count = eng_grps->avail.max_ae_cnt;
1214
1215	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1216				  (void **) uc_info, 1);
1217	if (ret)
1218		goto delete_eng_grp;
1219
1220	eng_grps->is_grps_created = true;
1221
1222	cpt_ucode_release_fw(&fw_info);
1223
1224	if (is_dev_otx2(pdev))
1225		goto unlock;
1226
1227	/*
1228	 * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
1229	 * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
1230	 */
1231	rnm_to_cpt_errata_fixup(&pdev->dev);
1232
1233	/*
1234	 * Configure engine group mask to allow context prefetching
1235	 * for the groups and enable random number request, to enable
1236	 * CPT to request random numbers from RNM.
1237	 */
1238	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1239			      OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
1240			      BLKADDR_CPT0);
1241	/*
1242	 * Set interval to periodically flush dirty data for the next
1243	 * CTX cache entry. Set the interval count to maximum supported
1244	 * value.
1245	 */
1246	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1247			      CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1248
1249	/*
1250	 * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
1251	 * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
1252	 * encounters a fault/poison, a rare case may result in
1253	 * unpredictable data being delivered to a CPT engine.
1254	 */
1255	otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
1256			     BLKADDR_CPT0);
1257	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1258			      reg_val | BIT_ULL(24), BLKADDR_CPT0);
1259
1260	mutex_unlock(&eng_grps->lock);
1261	return 0;
1262
1263delete_eng_grp:
1264	delete_engine_grps(pdev, eng_grps);
1265release_fw:
1266	cpt_ucode_release_fw(&fw_info);
1267unlock:
1268	mutex_unlock(&eng_grps->lock);
1269	return ret;
1270}
1271
1272static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1273				  int blkaddr)
1274{
1275	int timeout = 10, ret;
1276	int i, busy;
1277	u64 reg;
1278
1279	/* Disengage the cores from groups */
1280	for (i = 0; i < total_cores; i++) {
1281		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1282						CPT_AF_EXEX_CTL2(i), 0x0,
1283						blkaddr);
1284		if (ret)
1285			return ret;
1286
1287		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1288	}
1289	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1290	if (ret)
1291		return ret;
1292
1293	/* Wait for cores to become idle */
1294	do {
1295		busy = 0;
1296		usleep_range(10000, 20000);
1297		if (timeout-- < 0)
1298			return -EBUSY;
1299
1300		for (i = 0; i < total_cores; i++) {
1301			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1302						   cptpf->pdev,
1303						   CPT_AF_EXEX_STS(i), &reg,
1304						   blkaddr);
1305			if (ret)
1306				return ret;
1307
1308			if (reg & 0x1) {
1309				busy = 1;
1310				break;
1311			}
1312		}
1313	} while (busy);
1314
1315	/* Disable the cores */
1316	for (i = 0; i < total_cores; i++) {
1317		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1318						CPT_AF_EXEX_CTL(i), 0x0,
1319						blkaddr);
1320		if (ret)
1321			return ret;
1322	}
1323	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1324}
1325
1326int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1327{
1328	int total_cores, ret;
1329
1330	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1331		      cptpf->eng_grps.avail.max_ie_cnt +
1332		      cptpf->eng_grps.avail.max_ae_cnt;
1333
1334	if (cptpf->has_cpt1) {
1335		ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1336		if (ret)
1337			return ret;
1338	}
1339	return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1340}
1341
1342void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1343			       struct otx2_cpt_eng_grps *eng_grps)
1344{
1345	struct otx2_cpt_eng_grp_info *grp;
1346	int i, j;
1347
1348	mutex_lock(&eng_grps->lock);
1349	delete_engine_grps(pdev, eng_grps);
1350	/* Release memory */
1351	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1352		grp = &eng_grps->grp[i];
1353		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1354			kfree(grp->engs[j].bmap);
1355			grp->engs[j].bmap = NULL;
1356		}
1357	}
1358	mutex_unlock(&eng_grps->lock);
1359}
1360
1361int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1362			   struct otx2_cpt_eng_grps *eng_grps)
1363{
1364	struct otx2_cpt_eng_grp_info *grp;
1365	int i, j, ret;
1366
1367	mutex_init(&eng_grps->lock);
1368	eng_grps->obj = pci_get_drvdata(pdev);
1369	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1370	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1371	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1372
1373	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1374			     eng_grps->avail.max_ie_cnt +
1375			     eng_grps->avail.max_ae_cnt;
1376	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1377		dev_err(&pdev->dev,
1378			"Number of engines %d > than max supported %d\n",
1379			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1380		ret = -EINVAL;
1381		goto cleanup_eng_grps;
1382	}
1383
1384	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1385		grp = &eng_grps->grp[i];
1386		grp->g = eng_grps;
1387		grp->idx = i;
1388
1389		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1390			grp->engs[j].bmap =
1391				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1392					sizeof(long), GFP_KERNEL);
1393			if (!grp->engs[j].bmap) {
1394				ret = -ENOMEM;
1395				goto cleanup_eng_grps;
1396			}
1397		}
1398	}
1399	return 0;
1400
1401cleanup_eng_grps:
1402	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1403	return ret;
1404}
1405
1406static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1407					  struct otx2_cpt_eng_grps *eng_grps)
1408{
1409	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1410	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1411	struct fw_info_t fw_info;
1412	int ret;
1413
1414	mutex_lock(&eng_grps->lock);
1415	ret = cpt_ucode_load_fw(pdev, &fw_info);
1416	if (ret) {
1417		mutex_unlock(&eng_grps->lock);
1418		return ret;
1419	}
1420
1421	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1422	if (uc_info[0] == NULL) {
1423		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1424		ret = -EINVAL;
1425		goto release_fw;
1426	}
1427	engs[0].type = OTX2_CPT_AE_TYPES;
1428	engs[0].count = 2;
1429
1430	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1431				  (void **) uc_info, 0);
1432	if (ret)
1433		goto release_fw;
1434
1435	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1436	if (uc_info[0] == NULL) {
1437		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1438		ret = -EINVAL;
1439		goto delete_eng_grp;
1440	}
1441	engs[0].type = OTX2_CPT_SE_TYPES;
1442	engs[0].count = 2;
1443
1444	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1445				  (void **) uc_info, 0);
1446	if (ret)
1447		goto delete_eng_grp;
1448
1449	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1450	if (uc_info[0] == NULL) {
1451		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1452		ret = -EINVAL;
1453		goto delete_eng_grp;
1454	}
1455	engs[0].type = OTX2_CPT_IE_TYPES;
1456	engs[0].count = 2;
1457
1458	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1459				  (void **) uc_info, 0);
1460	if (ret)
1461		goto delete_eng_grp;
1462
1463	cpt_ucode_release_fw(&fw_info);
1464	mutex_unlock(&eng_grps->lock);
1465	return 0;
1466
1467delete_eng_grp:
1468	delete_engine_grps(pdev, eng_grps);
1469release_fw:
1470	cpt_ucode_release_fw(&fw_info);
1471	mutex_unlock(&eng_grps->lock);
1472	return ret;
1473}
1474
1475/*
1476 * Get CPT HW capabilities using LOAD_FVC operation.
1477 */
1478int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1479{
1480	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1481	struct otx2_cpt_iq_command iq_cmd;
1482	union otx2_cpt_opcode opcode;
1483	union otx2_cpt_res_s *result;
1484	union otx2_cpt_inst_s inst;
1485	dma_addr_t rptr_baddr;
1486	struct pci_dev *pdev;
1487	u32 len, compl_rlen;
1488	int ret, etype;
1489	void *rptr;
1490
1491	/*
1492	 * We don't get capabilities if it was already done
1493	 * (when user enabled VFs for the first time)
1494	 */
1495	if (cptpf->is_eng_caps_discovered)
1496		return 0;
1497
1498	pdev = cptpf->pdev;
1499	/*
1500	 * Create engine groups for each type to submit LOAD_FVC op and
1501	 * get engine's capabilities.
1502	 */
1503	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1504	if (ret)
1505		goto delete_grps;
1506
1507	otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
1508				&cptpf->afpf_mbox, BLKADDR_CPT0);
1509	ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1510			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1511	if (ret)
1512		goto delete_grps;
1513
1514	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1515	len = compl_rlen + LOADFVC_RLEN;
1516
1517	result = kzalloc(len, GFP_KERNEL);
1518	if (!result) {
1519		ret = -ENOMEM;
1520		goto lf_cleanup;
1521	}
1522	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1523				    DMA_BIDIRECTIONAL);
1524	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1525		dev_err(&pdev->dev, "DMA mapping failed\n");
1526		ret = -EFAULT;
1527		goto free_result;
1528	}
1529	rptr = (u8 *)result + compl_rlen;
1530
1531	/* Fill in the command */
1532	opcode.s.major = LOADFVC_MAJOR_OP;
1533	opcode.s.minor = LOADFVC_MINOR_OP;
1534
1535	iq_cmd.cmd.u = 0;
1536	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1537
1538	/* 64-bit swap for microcode data reads, not needed for addresses */
1539	cpu_to_be64s(&iq_cmd.cmd.u);
1540	iq_cmd.dptr = 0;
1541	iq_cmd.rptr = rptr_baddr + compl_rlen;
1542	iq_cmd.cptr.u = 0;
1543
1544	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1545		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1546		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1547							 etype);
1548		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1549		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1550
1551		while (lfs->ops->cpt_get_compcode(result) ==
1552						OTX2_CPT_COMPLETION_CODE_INIT)
1553			cpu_relax();
1554
1555		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1556	}
1557	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1558	cptpf->is_eng_caps_discovered = true;
1559
1560free_result:
1561	kfree(result);
1562lf_cleanup:
1563	otx2_cptlf_shutdown(lfs);
1564delete_grps:
1565	delete_engine_grps(pdev, &cptpf->eng_grps);
1566
1567	return ret;
1568}
1569
1570int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1571				   struct devlink_param_gset_ctx *ctx)
1572{
1573	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1574	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1575	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1576	char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1577	char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1578	struct device *dev = &cptpf->pdev->dev;
1579	char *start, *val, *err_msg, *tmp;
1580	int grp_idx = 0, ret = -EINVAL;
1581	bool has_se, has_ie, has_ae;
1582	struct fw_info_t fw_info;
1583	int ucode_idx = 0;
1584
1585	if (!eng_grps->is_grps_created) {
1586		dev_err(dev, "Not allowed before creating the default groups\n");
1587		return -EINVAL;
1588	}
1589	err_msg = "Invalid engine group format";
1590	strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
1591	start = tmp_buf;
1592
1593	has_se = has_ie = has_ae = false;
1594
1595	for (;;) {
1596		val = strsep(&start, ";");
1597		if (!val)
1598			break;
1599		val = strim(val);
1600		if (!*val)
1601			continue;
1602
1603		if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1604			if (has_se || ucode_idx)
1605				goto err_print;
1606			tmp = strsep(&val, ":");
1607			if (!tmp)
1608				goto err_print;
1609			tmp = strim(tmp);
1610			if (!val)
1611				goto err_print;
1612			if (strlen(tmp) != 2)
1613				goto err_print;
1614			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1615				goto err_print;
1616			engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1617			has_se = true;
1618		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1619			if (has_ae || ucode_idx)
1620				goto err_print;
1621			tmp = strsep(&val, ":");
1622			if (!tmp)
1623				goto err_print;
1624			tmp = strim(tmp);
1625			if (!val)
1626				goto err_print;
1627			if (strlen(tmp) != 2)
1628				goto err_print;
1629			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1630				goto err_print;
1631			engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1632			has_ae = true;
1633		} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1634			if (has_ie || ucode_idx)
1635				goto err_print;
1636			tmp = strsep(&val, ":");
1637			if (!tmp)
1638				goto err_print;
1639			tmp = strim(tmp);
1640			if (!val)
1641				goto err_print;
1642			if (strlen(tmp) != 2)
1643				goto err_print;
1644			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1645				goto err_print;
1646			engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1647			has_ie = true;
1648		} else {
1649			if (ucode_idx > 1)
1650				goto err_print;
1651			if (!strlen(val))
1652				goto err_print;
1653			if (strnstr(val, " ", strlen(val)))
1654				goto err_print;
1655			ucode_filename[ucode_idx++] = val;
1656		}
1657	}
1658
1659	/* Validate input parameters */
1660	if (!(grp_idx && ucode_idx))
1661		goto err_print;
1662
1663	if (ucode_idx > 1 && grp_idx < 2)
1664		goto err_print;
1665
1666	if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1667		err_msg = "Error max 2 engine types can be attached";
1668		goto err_print;
1669	}
1670
1671	if (grp_idx > 1) {
1672		if ((engs[0].type + engs[1].type) !=
1673		    (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1674			err_msg = "Only combination of SE+IE engines is allowed";
1675			goto err_print;
1676		}
1677		/* Keep SE engines at zero index */
1678		if (engs[1].type == OTX2_CPT_SE_TYPES)
1679			swap(engs[0], engs[1]);
1680	}
1681	mutex_lock(&eng_grps->lock);
1682
1683	if (cptpf->enabled_vfs) {
1684		dev_err(dev, "Disable VFs before modifying engine groups\n");
1685		ret = -EACCES;
1686		goto err_unlock;
1687	}
1688	INIT_LIST_HEAD(&fw_info.ucodes);
1689	ret = load_fw(dev, &fw_info, ucode_filename[0]);
1690	if (ret) {
1691		dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1692		goto err_unlock;
1693	}
1694	if (ucode_idx > 1) {
1695		ret = load_fw(dev, &fw_info, ucode_filename[1]);
1696		if (ret) {
1697			dev_err(dev, "Unable to load firmware %s\n",
1698				ucode_filename[1]);
1699			goto release_fw;
1700		}
1701	}
1702	uc_info[0] = get_ucode(&fw_info, engs[0].type);
1703	if (uc_info[0] == NULL) {
1704		dev_err(dev, "Unable to find firmware for %s\n",
1705			get_eng_type_str(engs[0].type));
1706		ret = -EINVAL;
1707		goto release_fw;
1708	}
1709	if (ucode_idx > 1) {
1710		uc_info[1] = get_ucode(&fw_info, engs[1].type);
1711		if (uc_info[1] == NULL) {
1712			dev_err(dev, "Unable to find firmware for %s\n",
1713				get_eng_type_str(engs[1].type));
1714			ret = -EINVAL;
1715			goto release_fw;
1716		}
1717	}
1718	ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1719				  (void **)uc_info, 1);
1720
1721release_fw:
1722	cpt_ucode_release_fw(&fw_info);
1723err_unlock:
1724	mutex_unlock(&eng_grps->lock);
1725	return ret;
1726err_print:
1727	dev_err(dev, "%s\n", err_msg);
1728	return ret;
1729}
1730
1731int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1732				   struct devlink_param_gset_ctx *ctx)
1733{
1734	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1735	struct device *dev = &cptpf->pdev->dev;
1736	char *tmp, *err_msg;
1737	int egrp;
1738	int ret;
1739
1740	err_msg = "Invalid input string format(ex: egrp:0)";
1741	if (strncasecmp(ctx->val.vstr, "egrp", 4))
1742		goto err_print;
1743	tmp = ctx->val.vstr;
1744	strsep(&tmp, ":");
1745	if (!tmp)
1746		goto err_print;
1747	if (kstrtoint(tmp, 10, &egrp))
1748		goto err_print;
1749
1750	if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1751		dev_err(dev, "Invalid engine group %d", egrp);
1752		return -EINVAL;
1753	}
1754	if (!eng_grps->grp[egrp].is_enabled) {
1755		dev_err(dev, "Error engine_group%d is not configured", egrp);
1756		return -EINVAL;
1757	}
1758	mutex_lock(&eng_grps->lock);
1759	ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1760	mutex_unlock(&eng_grps->lock);
1761
1762	return ret;
1763
1764err_print:
1765	dev_err(dev, "%s\n", err_msg);
1766	return -EINVAL;
1767}
1768
1769static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
1770			  int size, int idx)
1771{
1772	struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
1773	struct otx2_cpt_engs_rsvd *engs;
1774	int len, i;
1775
1776	buf[0] = '\0';
1777	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
1778		engs = &eng_grp->engs[i];
1779		if (!engs->type)
1780			continue;
1781		if (idx != -1 && idx != i)
1782			continue;
1783
1784		if (eng_grp->mirror.is_ena)
1785			mirrored_engs = find_engines_by_type(
1786				&eng_grp->g->grp[eng_grp->mirror.idx],
1787				engs->type);
1788		if (i > 0 && idx == -1) {
1789			len = strlen(buf);
1790			scnprintf(buf + len, size - len, ", ");
1791		}
1792
1793		len = strlen(buf);
1794		scnprintf(buf + len, size - len, "%d %s ",
1795			  mirrored_engs ? engs->count + mirrored_engs->count :
1796					  engs->count,
1797			  get_eng_type_str(engs->type));
1798		if (mirrored_engs) {
1799			len = strlen(buf);
1800			scnprintf(buf + len, size - len,
1801				  "(%d shared with engine_group%d) ",
1802				  engs->count <= 0 ?
1803					  engs->count + mirrored_engs->count :
1804					  mirrored_engs->count,
1805				  eng_grp->mirror.idx);
1806		}
1807	}
1808}
1809
1810void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
1811{
1812	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1813	struct otx2_cpt_eng_grp_info *mirrored_grp;
1814	char engs_info[2 * OTX2_CPT_NAME_LENGTH];
1815	struct otx2_cpt_eng_grp_info *grp;
1816	struct otx2_cpt_engs_rsvd *engs;
1817	int i, j;
1818
1819	pr_debug("Engine groups global info");
1820	pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
1821		 eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
1822	pr_debug("free SE %d", eng_grps->avail.se_cnt);
1823	pr_debug("free IE %d", eng_grps->avail.ie_cnt);
1824	pr_debug("free AE %d", eng_grps->avail.ae_cnt);
1825
1826	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1827		grp = &eng_grps->grp[i];
1828		pr_debug("engine_group%d, state %s", i,
1829			 grp->is_enabled ? "enabled" : "disabled");
1830		if (grp->is_enabled) {
1831			mirrored_grp = &eng_grps->grp[grp->mirror.idx];
1832			pr_debug("Ucode0 filename %s, version %s",
1833				 grp->mirror.is_ena ?
1834					 mirrored_grp->ucode[0].filename :
1835					 grp->ucode[0].filename,
1836				 grp->mirror.is_ena ?
1837					 mirrored_grp->ucode[0].ver_str :
1838					 grp->ucode[0].ver_str);
1839			if (is_2nd_ucode_used(grp))
1840				pr_debug("Ucode1 filename %s, version %s",
1841					 grp->ucode[1].filename,
1842					 grp->ucode[1].ver_str);
1843		}
1844
1845		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1846			engs = &grp->engs[j];
1847			if (engs->type) {
1848				u32 mask[5] = { };
1849
1850				get_engs_info(grp, engs_info,
1851					      2 * OTX2_CPT_NAME_LENGTH, j);
1852				pr_debug("Slot%d: %s", j, engs_info);
1853				bitmap_to_arr32(mask, engs->bmap,
1854						eng_grps->engs_num);
1855				if (is_dev_otx2(cptpf->pdev))
1856					pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
1857						 mask[3], mask[2], mask[1],
1858						 mask[0]);
1859				else
1860					pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
1861						 mask[4], mask[3], mask[2], mask[1],
1862						 mask[0]);
1863			}
1864		}
1865	}
1866}
1867