1// SPDX-License-Identifier: GPL-2.0
2/* Marvell OcteonTX CPT driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/ctype.h>
12#include <linux/firmware.h>
13#include "otx_cpt_common.h"
14#include "otx_cptpf_ucode.h"
15#include "otx_cptpf.h"
16
17#define CSR_DELAY 30
18/* Tar archive defines */
19#define TAR_MAGIC		"ustar"
20#define TAR_MAGIC_LEN		6
21#define TAR_BLOCK_LEN		512
22#define REGTYPE			'0'
23#define AREGTYPE		'\0'
24
25/* tar header as defined in POSIX 1003.1-1990. */
26struct tar_hdr_t {
27	char name[100];
28	char mode[8];
29	char uid[8];
30	char gid[8];
31	char size[12];
32	char mtime[12];
33	char chksum[8];
34	char typeflag;
35	char linkname[100];
36	char magic[6];
37	char version[2];
38	char uname[32];
39	char gname[32];
40	char devmajor[8];
41	char devminor[8];
42	char prefix[155];
43};
44
45struct tar_blk_t {
46	union {
47		struct tar_hdr_t hdr;
48		char block[TAR_BLOCK_LEN];
49	};
50};
51
52struct tar_arch_info_t {
53	struct list_head ucodes;
54	const struct firmware *fw;
55};
56
57static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
58					   struct otx_cpt_eng_grp_info *eng_grp)
59{
60	struct otx_cpt_bitmap bmap = { {0} };
61	bool found = false;
62	int i;
63
64	if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
65		dev_err(dev, "unsupported number of engines %d on octeontx\n",
66			eng_grp->g->engs_num);
67		return bmap;
68	}
69
70	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
71		if (eng_grp->engs[i].type) {
72			bitmap_or(bmap.bits, bmap.bits,
73				  eng_grp->engs[i].bmap,
74				  eng_grp->g->engs_num);
75			bmap.size = eng_grp->g->engs_num;
76			found = true;
77		}
78	}
79
80	if (!found)
81		dev_err(dev, "No engines reserved for engine group %d\n",
82			eng_grp->idx);
83	return bmap;
84}
85
86static int is_eng_type(int val, int eng_type)
87{
88	return val & (1 << eng_type);
89}
90
91static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
92				 int eng_type)
93{
94	return is_eng_type(eng_grps->eng_types_supported, eng_type);
95}
96
97static void set_ucode_filename(struct otx_cpt_ucode *ucode,
98			       const char *filename)
99{
100	strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
101}
102
103static char *get_eng_type_str(int eng_type)
104{
105	char *str = "unknown";
106
107	switch (eng_type) {
108	case OTX_CPT_SE_TYPES:
109		str = "SE";
110		break;
111
112	case OTX_CPT_AE_TYPES:
113		str = "AE";
114		break;
115	}
116	return str;
117}
118
119static char *get_ucode_type_str(int ucode_type)
120{
121	char *str = "unknown";
122
123	switch (ucode_type) {
124	case (1 << OTX_CPT_SE_TYPES):
125		str = "SE";
126		break;
127
128	case (1 << OTX_CPT_AE_TYPES):
129		str = "AE";
130		break;
131	}
132	return str;
133}
134
135static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
136{
137	char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
138	u32 i, val = 0;
139	u8 nn;
140
141	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
142	for (i = 0; i < strlen(tmp_ver_str); i++)
143		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
144
145	nn = ucode_hdr->ver_num.nn;
146	if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
147	    (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
148	     nn == OTX_CPT_SE_UC_TYPE3))
149		val |= 1 << OTX_CPT_SE_TYPES;
150	if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
151	    nn == OTX_CPT_AE_UC_TYPE)
152		val |= 1 << OTX_CPT_AE_TYPES;
153
154	*ucode_type = val;
155
156	if (!val)
157		return -EINVAL;
158	if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
159	    is_eng_type(val, OTX_CPT_SE_TYPES))
160		return -EINVAL;
161	return 0;
162}
163
164static int is_mem_zero(const char *ptr, int size)
165{
166	int i;
167
168	for (i = 0; i < size; i++) {
169		if (ptr[i])
170			return 0;
171	}
172	return 1;
173}
174
175static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
176{
177	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
178	dma_addr_t dma_addr;
179	struct otx_cpt_bitmap bmap;
180	int i;
181
182	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
183	if (!bmap.size)
184		return -EINVAL;
185
186	if (eng_grp->mirror.is_ena)
187		dma_addr =
188		       eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
189	else
190		dma_addr = eng_grp->ucode[0].align_dma;
191
192	/*
193	 * Set UCODE_BASE only for the cores which are not used,
194	 * other cores should have already valid UCODE_BASE set
195	 */
196	for_each_set_bit(i, bmap.bits, bmap.size)
197		if (!eng_grp->g->eng_ref_cnt[i])
198			writeq((u64) dma_addr, cpt->reg_base +
199				OTX_CPT_PF_ENGX_UCODE_BASE(i));
200	return 0;
201}
202
203static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
204					void *obj)
205{
206	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
207	struct otx_cpt_bitmap bmap = { {0} };
208	int timeout = 10;
209	int i, busy;
210	u64 reg;
211
212	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
213	if (!bmap.size)
214		return -EINVAL;
215
216	/* Detach the cores from group */
217	reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
218	for_each_set_bit(i, bmap.bits, bmap.size) {
219		if (reg & (1ull << i)) {
220			eng_grp->g->eng_ref_cnt[i]--;
221			reg &= ~(1ull << i);
222		}
223	}
224	writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
225
226	/* Wait for cores to become idle */
227	do {
228		busy = 0;
229		usleep_range(10000, 20000);
230		if (timeout-- < 0)
231			return -EBUSY;
232
233		reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
234		for_each_set_bit(i, bmap.bits, bmap.size)
235			if (reg & (1ull << i)) {
236				busy = 1;
237				break;
238			}
239	} while (busy);
240
241	/* Disable the cores only if they are not used anymore */
242	reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
243	for_each_set_bit(i, bmap.bits, bmap.size)
244		if (!eng_grp->g->eng_ref_cnt[i])
245			reg &= ~(1ull << i);
246	writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
247
248	return 0;
249}
250
251static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
252				       void *obj)
253{
254	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
255	struct otx_cpt_bitmap bmap;
256	u64 reg;
257	int i;
258
259	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
260	if (!bmap.size)
261		return -EINVAL;
262
263	/* Attach the cores to the group */
264	reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
265	for_each_set_bit(i, bmap.bits, bmap.size) {
266		if (!(reg & (1ull << i))) {
267			eng_grp->g->eng_ref_cnt[i]++;
268			reg |= 1ull << i;
269		}
270	}
271	writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
272
273	/* Enable the cores */
274	reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
275	for_each_set_bit(i, bmap.bits, bmap.size)
276		reg |= 1ull << i;
277	writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
278
279	return 0;
280}
281
282static int process_tar_file(struct device *dev,
283			    struct tar_arch_info_t *tar_arch, char *filename,
284			    const u8 *data, u32 size)
285{
286	struct tar_ucode_info_t *tar_info;
287	struct otx_cpt_ucode_hdr *ucode_hdr;
288	int ucode_type, ucode_size;
289	unsigned int code_length;
290
291	/*
292	 * If size is less than microcode header size then don't report
293	 * an error because it might not be microcode file, just process
294	 * next file from archive
295	 */
296	if (size < sizeof(struct otx_cpt_ucode_hdr))
297		return 0;
298
299	ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
300	/*
301	 * If microcode version can't be found don't report an error
302	 * because it might not be microcode file, just process next file
303	 */
304	if (get_ucode_type(ucode_hdr, &ucode_type))
305		return 0;
306
307	code_length = ntohl(ucode_hdr->code_length);
308	if (code_length >= INT_MAX / 2) {
309		dev_err(dev, "Invalid code_length %u\n", code_length);
310		return -EINVAL;
311	}
312
313	ucode_size = code_length * 2;
314	if (!ucode_size || (size < round_up(ucode_size, 16) +
315	    sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
316		dev_err(dev, "Ucode %s invalid size\n", filename);
317		return -EINVAL;
318	}
319
320	tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
321	if (!tar_info)
322		return -ENOMEM;
323
324	tar_info->ucode_ptr = data;
325	set_ucode_filename(&tar_info->ucode, filename);
326	memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
327	       OTX_CPT_UCODE_VER_STR_SZ);
328	tar_info->ucode.ver_num = ucode_hdr->ver_num;
329	tar_info->ucode.type = ucode_type;
330	tar_info->ucode.size = ucode_size;
331	list_add_tail(&tar_info->list, &tar_arch->ucodes);
332
333	return 0;
334}
335
336static void release_tar_archive(struct tar_arch_info_t *tar_arch)
337{
338	struct tar_ucode_info_t *curr, *temp;
339
340	if (!tar_arch)
341		return;
342
343	list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
344		list_del(&curr->list);
345		kfree(curr);
346	}
347
348	release_firmware(tar_arch->fw);
349	kfree(tar_arch);
350}
351
352static struct tar_ucode_info_t *get_uc_from_tar_archive(
353					struct tar_arch_info_t *tar_arch,
354					int ucode_type)
355{
356	struct tar_ucode_info_t *curr, *uc_found = NULL;
357
358	list_for_each_entry(curr, &tar_arch->ucodes, list) {
359		if (!is_eng_type(curr->ucode.type, ucode_type))
360			continue;
361
362		if (!uc_found) {
363			uc_found = curr;
364			continue;
365		}
366
367		switch (ucode_type) {
368		case OTX_CPT_AE_TYPES:
369			break;
370
371		case OTX_CPT_SE_TYPES:
372			if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
373			    (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
374			     && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
375				uc_found = curr;
376			break;
377		}
378	}
379
380	return uc_found;
381}
382
383static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
384			       char *tar_filename)
385{
386	struct tar_ucode_info_t *curr;
387
388	pr_debug("Tar archive filename %s\n", tar_filename);
389	pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
390		 tar_arch->fw->size);
391	list_for_each_entry(curr, &tar_arch->ucodes, list) {
392		pr_debug("Ucode filename %s\n", curr->ucode.filename);
393		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
394		pr_debug("Ucode version %d.%d.%d.%d\n",
395			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
396			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
397		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
398			 get_ucode_type_str(curr->ucode.type));
399		pr_debug("Ucode size %d\n", curr->ucode.size);
400		pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
401	}
402}
403
404static struct tar_arch_info_t *load_tar_archive(struct device *dev,
405						char *tar_filename)
406{
407	struct tar_arch_info_t *tar_arch = NULL;
408	struct tar_blk_t *tar_blk;
409	unsigned int cur_size;
410	size_t tar_offs = 0;
411	size_t tar_size;
412	int ret;
413
414	tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
415	if (!tar_arch)
416		return NULL;
417
418	INIT_LIST_HEAD(&tar_arch->ucodes);
419
420	/* Load tar archive */
421	ret = request_firmware(&tar_arch->fw, tar_filename, dev);
422	if (ret)
423		goto release_tar_arch;
424
425	if (tar_arch->fw->size < TAR_BLOCK_LEN) {
426		dev_err(dev, "Invalid tar archive %s\n", tar_filename);
427		goto release_tar_arch;
428	}
429
430	tar_size = tar_arch->fw->size;
431	tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
432	if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
433		dev_err(dev, "Unsupported format of tar archive %s\n",
434			tar_filename);
435		goto release_tar_arch;
436	}
437
438	while (1) {
439		/* Read current file size */
440		ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
441		if (ret)
442			goto release_tar_arch;
443
444		if (tar_offs + cur_size > tar_size ||
445		    tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
446			dev_err(dev, "Invalid tar archive %s\n", tar_filename);
447			goto release_tar_arch;
448		}
449
450		tar_offs += TAR_BLOCK_LEN;
451		if (tar_blk->hdr.typeflag == REGTYPE ||
452		    tar_blk->hdr.typeflag == AREGTYPE) {
453			ret = process_tar_file(dev, tar_arch,
454					       tar_blk->hdr.name,
455					       &tar_arch->fw->data[tar_offs],
456					       cur_size);
457			if (ret)
458				goto release_tar_arch;
459		}
460
461		tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
462		if (cur_size % TAR_BLOCK_LEN)
463			tar_offs += TAR_BLOCK_LEN;
464
465		/* Check for the end of the archive */
466		if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
467			dev_err(dev, "Invalid tar archive %s\n", tar_filename);
468			goto release_tar_arch;
469		}
470
471		if (is_mem_zero(&tar_arch->fw->data[tar_offs],
472		    2*TAR_BLOCK_LEN))
473			break;
474
475		/* Read next block from tar archive */
476		tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
477	}
478
479	print_tar_dbg_info(tar_arch, tar_filename);
480	return tar_arch;
481release_tar_arch:
482	release_tar_archive(tar_arch);
483	return NULL;
484}
485
486static struct otx_cpt_engs_rsvd *find_engines_by_type(
487					struct otx_cpt_eng_grp_info *eng_grp,
488					int eng_type)
489{
490	int i;
491
492	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
493		if (!eng_grp->engs[i].type)
494			continue;
495
496		if (eng_grp->engs[i].type == eng_type)
497			return &eng_grp->engs[i];
498	}
499	return NULL;
500}
501
502int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
503{
504	return is_eng_type(ucode->type, eng_type);
505}
506EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
507
508int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
509				 int eng_type)
510{
511	struct otx_cpt_engs_rsvd *engs;
512
513	engs = find_engines_by_type(eng_grp, eng_type);
514
515	return (engs != NULL ? 1 : 0);
516}
517EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
518
519static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
520			     char *buf, int size)
521{
522	if (eng_grp->mirror.is_ena) {
523		scnprintf(buf, size, "%s (shared with engine_group%d)",
524			  eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
525			  eng_grp->mirror.idx);
526	} else {
527		scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
528	}
529}
530
531static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
532			    char *buf, int size, int idx)
533{
534	struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
535	struct otx_cpt_engs_rsvd *engs;
536	int len, i;
537
538	buf[0] = '\0';
539	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
540		engs = &eng_grp->engs[i];
541		if (!engs->type)
542			continue;
543		if (idx != -1 && idx != i)
544			continue;
545
546		if (eng_grp->mirror.is_ena)
547			mirrored_engs = find_engines_by_type(
548					&eng_grp->g->grp[eng_grp->mirror.idx],
549					engs->type);
550		if (i > 0 && idx == -1) {
551			len = strlen(buf);
552			scnprintf(buf+len, size-len, ", ");
553		}
554
555		len = strlen(buf);
556		scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
557			  engs->count + mirrored_engs->count : engs->count,
558			  get_eng_type_str(engs->type));
559		if (mirrored_engs) {
560			len = strlen(buf);
561			scnprintf(buf+len, size-len,
562				  "(%d shared with engine_group%d) ",
563				  engs->count <= 0 ? engs->count +
564				  mirrored_engs->count : mirrored_engs->count,
565				  eng_grp->mirror.idx);
566		}
567	}
568}
569
570static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
571{
572	pr_debug("Ucode info\n");
573	pr_debug("Ucode version string %s\n", ucode->ver_str);
574	pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
575		 ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
576	pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
577	pr_debug("Ucode size %d\n", ucode->size);
578	pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
579	pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
580}
581
582static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
583				   struct device *dev, char *buf, int size)
584{
585	struct otx_cpt_bitmap bmap;
586	u32 mask[2];
587
588	bmap = get_cores_bmap(dev, eng_grp);
589	if (!bmap.size) {
590		scnprintf(buf, size, "unknown");
591		return;
592	}
593	bitmap_to_arr32(mask, bmap.bits, bmap.size);
594	scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
595}
596
597
598static void print_dbg_info(struct device *dev,
599			   struct otx_cpt_eng_grps *eng_grps)
600{
601	char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
602	struct otx_cpt_eng_grp_info *mirrored_grp;
603	char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
604	struct otx_cpt_eng_grp_info *grp;
605	struct otx_cpt_engs_rsvd *engs;
606	u32 mask[4];
607	int i, j;
608
609	pr_debug("Engine groups global info\n");
610	pr_debug("max SE %d, max AE %d\n",
611		 eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
612	pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
613	pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
614
615	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
616		grp = &eng_grps->grp[i];
617		pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
618			 "enabled" : "disabled");
619		if (grp->is_enabled) {
620			mirrored_grp = &eng_grps->grp[grp->mirror.idx];
621			pr_debug("Ucode0 filename %s, version %s\n",
622				 grp->mirror.is_ena ?
623				 mirrored_grp->ucode[0].filename :
624				 grp->ucode[0].filename,
625				 grp->mirror.is_ena ?
626				 mirrored_grp->ucode[0].ver_str :
627				 grp->ucode[0].ver_str);
628		}
629
630		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
631			engs = &grp->engs[j];
632			if (engs->type) {
633				print_engs_info(grp, engs_info,
634						2*OTX_CPT_UCODE_NAME_LENGTH, j);
635				pr_debug("Slot%d: %s\n", j, engs_info);
636				bitmap_to_arr32(mask, engs->bmap,
637						eng_grps->engs_num);
638				pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
639					 mask[3], mask[2], mask[1], mask[0]);
640			} else
641				pr_debug("Slot%d not used\n", j);
642		}
643		if (grp->is_enabled) {
644			cpt_print_engines_mask(grp, dev, engs_mask,
645					       OTX_CPT_UCODE_NAME_LENGTH);
646			pr_debug("Cmask: %s\n", engs_mask);
647		}
648	}
649}
650
651static int update_engines_avail_count(struct device *dev,
652				      struct otx_cpt_engs_available *avail,
653				      struct otx_cpt_engs_rsvd *engs, int val)
654{
655	switch (engs->type) {
656	case OTX_CPT_SE_TYPES:
657		avail->se_cnt += val;
658		break;
659
660	case OTX_CPT_AE_TYPES:
661		avail->ae_cnt += val;
662		break;
663
664	default:
665		dev_err(dev, "Invalid engine type %d\n", engs->type);
666		return -EINVAL;
667	}
668
669	return 0;
670}
671
672static int update_engines_offset(struct device *dev,
673				 struct otx_cpt_engs_available *avail,
674				 struct otx_cpt_engs_rsvd *engs)
675{
676	switch (engs->type) {
677	case OTX_CPT_SE_TYPES:
678		engs->offset = 0;
679		break;
680
681	case OTX_CPT_AE_TYPES:
682		engs->offset = avail->max_se_cnt;
683		break;
684
685	default:
686		dev_err(dev, "Invalid engine type %d\n", engs->type);
687		return -EINVAL;
688	}
689
690	return 0;
691}
692
693static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
694{
695	int i, ret = 0;
696
697	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
698		if (!grp->engs[i].type)
699			continue;
700
701		if (grp->engs[i].count > 0) {
702			ret = update_engines_avail_count(dev, &grp->g->avail,
703							 &grp->engs[i],
704							 grp->engs[i].count);
705			if (ret)
706				return ret;
707		}
708
709		grp->engs[i].type = 0;
710		grp->engs[i].count = 0;
711		grp->engs[i].offset = 0;
712		grp->engs[i].ucode = NULL;
713		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
714	}
715
716	return 0;
717}
718
719static int do_reserve_engines(struct device *dev,
720			      struct otx_cpt_eng_grp_info *grp,
721			      struct otx_cpt_engines *req_engs)
722{
723	struct otx_cpt_engs_rsvd *engs = NULL;
724	int i, ret;
725
726	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
727		if (!grp->engs[i].type) {
728			engs = &grp->engs[i];
729			break;
730		}
731	}
732
733	if (!engs)
734		return -ENOMEM;
735
736	engs->type = req_engs->type;
737	engs->count = req_engs->count;
738
739	ret = update_engines_offset(dev, &grp->g->avail, engs);
740	if (ret)
741		return ret;
742
743	if (engs->count > 0) {
744		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
745						 -engs->count);
746		if (ret)
747			return ret;
748	}
749
750	return 0;
751}
752
753static int check_engines_availability(struct device *dev,
754				      struct otx_cpt_eng_grp_info *grp,
755				      struct otx_cpt_engines *req_eng)
756{
757	int avail_cnt = 0;
758
759	switch (req_eng->type) {
760	case OTX_CPT_SE_TYPES:
761		avail_cnt = grp->g->avail.se_cnt;
762		break;
763
764	case OTX_CPT_AE_TYPES:
765		avail_cnt = grp->g->avail.ae_cnt;
766		break;
767
768	default:
769		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
770		return -EINVAL;
771	}
772
773	if (avail_cnt < req_eng->count) {
774		dev_err(dev,
775			"Error available %s engines %d < than requested %d\n",
776			get_eng_type_str(req_eng->type),
777			avail_cnt, req_eng->count);
778		return -EBUSY;
779	}
780
781	return 0;
782}
783
784static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
785			   struct otx_cpt_engines *req_engs, int req_cnt)
786{
787	int i, ret;
788
789	/* Validate if a number of requested engines is available */
790	for (i = 0; i < req_cnt; i++) {
791		ret = check_engines_availability(dev, grp, &req_engs[i]);
792		if (ret)
793			return ret;
794	}
795
796	/* Reserve requested engines for this engine group */
797	for (i = 0; i < req_cnt; i++) {
798		ret = do_reserve_engines(dev, grp, &req_engs[i]);
799		if (ret)
800			return ret;
801	}
802	return 0;
803}
804
805static ssize_t eng_grp_info_show(struct device *dev,
806				 struct device_attribute *attr,
807				 char *buf)
808{
809	char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
810	char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
811	char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
812	struct otx_cpt_eng_grp_info *eng_grp;
813	int ret;
814
815	eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
816	mutex_lock(&eng_grp->g->lock);
817
818	print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
819	print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
820	cpt_print_engines_mask(eng_grp, dev, engs_mask,
821			       OTX_CPT_UCODE_NAME_LENGTH);
822	ret = scnprintf(buf, PAGE_SIZE,
823			"Microcode : %s\nEngines: %s\nEngines mask: %s\n",
824			ucode_info, engs_info, engs_mask);
825
826	mutex_unlock(&eng_grp->g->lock);
827	return ret;
828}
829
830static int create_sysfs_eng_grps_info(struct device *dev,
831				      struct otx_cpt_eng_grp_info *eng_grp)
832{
833	eng_grp->info_attr.show = eng_grp_info_show;
834	eng_grp->info_attr.store = NULL;
835	eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
836	eng_grp->info_attr.attr.mode = 0440;
837	sysfs_attr_init(&eng_grp->info_attr.attr);
838	return device_create_file(dev, &eng_grp->info_attr);
839}
840
841static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
842{
843	if (ucode->va) {
844		dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
845				  ucode->va, ucode->dma);
846		ucode->va = NULL;
847		ucode->align_va = NULL;
848		ucode->dma = 0;
849		ucode->align_dma = 0;
850		ucode->size = 0;
851	}
852
853	memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
854	memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
855	set_ucode_filename(ucode, "");
856	ucode->type = 0;
857}
858
859static int copy_ucode_to_dma_mem(struct device *dev,
860				 struct otx_cpt_ucode *ucode,
861				 const u8 *ucode_data)
862{
863	u32 i;
864
865	/*  Allocate DMAable space */
866	ucode->va = dma_alloc_coherent(dev, ucode->size +
867				       OTX_CPT_UCODE_ALIGNMENT,
868				       &ucode->dma, GFP_KERNEL);
869	if (!ucode->va) {
870		dev_err(dev, "Unable to allocate space for microcode\n");
871		return -ENOMEM;
872	}
873	ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
874	ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
875
876	memcpy((void *) ucode->align_va, (void *) ucode_data +
877	       sizeof(struct otx_cpt_ucode_hdr), ucode->size);
878
879	/* Byte swap 64-bit */
880	for (i = 0; i < (ucode->size / 8); i++)
881		((__be64 *)ucode->align_va)[i] =
882				cpu_to_be64(((u64 *)ucode->align_va)[i]);
883	/*  Ucode needs 16-bit swap */
884	for (i = 0; i < (ucode->size / 2); i++)
885		((__be16 *)ucode->align_va)[i] =
886				cpu_to_be16(((u16 *)ucode->align_va)[i]);
887	return 0;
888}
889
890static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
891		      const char *ucode_filename)
892{
893	struct otx_cpt_ucode_hdr *ucode_hdr;
894	const struct firmware *fw;
895	unsigned int code_length;
896	int ret;
897
898	set_ucode_filename(ucode, ucode_filename);
899	ret = request_firmware(&fw, ucode->filename, dev);
900	if (ret)
901		return ret;
902
903	ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
904	memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
905	ucode->ver_num = ucode_hdr->ver_num;
906	code_length = ntohl(ucode_hdr->code_length);
907	if (code_length >= INT_MAX / 2) {
908		dev_err(dev, "Ucode invalid code_length %u\n", code_length);
909		ret = -EINVAL;
910		goto release_fw;
911	}
912	ucode->size = code_length * 2;
913	if (!ucode->size || (fw->size < round_up(ucode->size, 16)
914	    + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
915		dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
916		ret = -EINVAL;
917		goto release_fw;
918	}
919
920	ret = get_ucode_type(ucode_hdr, &ucode->type);
921	if (ret) {
922		dev_err(dev, "Microcode %s unknown type 0x%x\n",
923			ucode->filename, ucode->type);
924		goto release_fw;
925	}
926
927	ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
928	if (ret)
929		goto release_fw;
930
931	print_ucode_dbg_info(ucode);
932release_fw:
933	release_firmware(fw);
934	return ret;
935}
936
937static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
938			  void *obj)
939{
940	int ret;
941
942	ret = cpt_set_ucode_base(eng_grp, obj);
943	if (ret)
944		return ret;
945
946	ret = cpt_attach_and_enable_cores(eng_grp, obj);
947	return ret;
948}
949
950static int disable_eng_grp(struct device *dev,
951			   struct otx_cpt_eng_grp_info *eng_grp,
952			   void *obj)
953{
954	int i, ret;
955
956	ret = cpt_detach_and_disable_cores(eng_grp, obj);
957	if (ret)
958		return ret;
959
960	/* Unload ucode used by this engine group */
961	ucode_unload(dev, &eng_grp->ucode[0]);
962
963	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
964		if (!eng_grp->engs[i].type)
965			continue;
966
967		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
968	}
969
970	ret = cpt_set_ucode_base(eng_grp, obj);
971
972	return ret;
973}
974
975static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
976				    struct otx_cpt_eng_grp_info *src_grp)
977{
978	/* Setup fields for engine group which is mirrored */
979	src_grp->mirror.is_ena = false;
980	src_grp->mirror.idx = 0;
981	src_grp->mirror.ref_count++;
982
983	/* Setup fields for mirroring engine group */
984	dst_grp->mirror.is_ena = true;
985	dst_grp->mirror.idx = src_grp->idx;
986	dst_grp->mirror.ref_count = 0;
987}
988
989static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
990{
991	struct otx_cpt_eng_grp_info *src_grp;
992
993	if (!dst_grp->mirror.is_ena)
994		return;
995
996	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
997
998	src_grp->mirror.ref_count--;
999	dst_grp->mirror.is_ena = false;
1000	dst_grp->mirror.idx = 0;
1001	dst_grp->mirror.ref_count = 0;
1002}
1003
1004static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
1005				  struct otx_cpt_engines *engs, int engs_cnt)
1006{
1007	struct otx_cpt_engs_rsvd *mirrored_engs;
1008	int i;
1009
1010	for (i = 0; i < engs_cnt; i++) {
1011		mirrored_engs = find_engines_by_type(mirrored_eng_grp,
1012						     engs[i].type);
1013		if (!mirrored_engs)
1014			continue;
1015
1016		/*
1017		 * If mirrored group has this type of engines attached then
1018		 * there are 3 scenarios possible:
1019		 * 1) mirrored_engs.count == engs[i].count then all engines
1020		 * from mirrored engine group will be shared with this engine
1021		 * group
1022		 * 2) mirrored_engs.count > engs[i].count then only a subset of
1023		 * engines from mirrored engine group will be shared with this
1024		 * engine group
1025		 * 3) mirrored_engs.count < engs[i].count then all engines
1026		 * from mirrored engine group will be shared with this group
1027		 * and additional engines will be reserved for exclusively use
1028		 * by this engine group
1029		 */
1030		engs[i].count -= mirrored_engs->count;
1031	}
1032}
1033
1034static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
1035					struct otx_cpt_eng_grp_info *grp)
1036{
1037	struct otx_cpt_eng_grps *eng_grps = grp->g;
1038	int i;
1039
1040	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1041		if (!eng_grps->grp[i].is_enabled)
1042			continue;
1043		if (eng_grps->grp[i].ucode[0].type)
1044			continue;
1045		if (grp->idx == i)
1046			continue;
1047		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
1048				 grp->ucode[0].ver_str,
1049				 OTX_CPT_UCODE_VER_STR_SZ))
1050			return &eng_grps->grp[i];
1051	}
1052
1053	return NULL;
1054}
1055
1056static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
1057					struct otx_cpt_eng_grps *eng_grps)
1058{
1059	int i;
1060
1061	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1062		if (!eng_grps->grp[i].is_enabled)
1063			return &eng_grps->grp[i];
1064	}
1065	return NULL;
1066}
1067
1068static int eng_grp_update_masks(struct device *dev,
1069				struct otx_cpt_eng_grp_info *eng_grp)
1070{
1071	struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
1072	struct otx_cpt_bitmap tmp_bmap = { {0} };
1073	int i, j, cnt, max_cnt;
1074	int bit;
1075
1076	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1077		engs = &eng_grp->engs[i];
1078		if (!engs->type)
1079			continue;
1080		if (engs->count <= 0)
1081			continue;
1082
1083		switch (engs->type) {
1084		case OTX_CPT_SE_TYPES:
1085			max_cnt = eng_grp->g->avail.max_se_cnt;
1086			break;
1087
1088		case OTX_CPT_AE_TYPES:
1089			max_cnt = eng_grp->g->avail.max_ae_cnt;
1090			break;
1091
1092		default:
1093			dev_err(dev, "Invalid engine type %d\n", engs->type);
1094			return -EINVAL;
1095		}
1096
1097		cnt = engs->count;
1098		WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
1099		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
1100		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
1101			if (!eng_grp->g->eng_ref_cnt[j]) {
1102				bitmap_set(tmp_bmap.bits, j, 1);
1103				cnt--;
1104				if (!cnt)
1105					break;
1106			}
1107		}
1108
1109		if (cnt)
1110			return -ENOSPC;
1111
1112		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
1113	}
1114
1115	if (!eng_grp->mirror.is_ena)
1116		return 0;
1117
1118	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1119		engs = &eng_grp->engs[i];
1120		if (!engs->type)
1121			continue;
1122
1123		mirrored_engs = find_engines_by_type(
1124					&eng_grp->g->grp[eng_grp->mirror.idx],
1125					engs->type);
1126		WARN_ON(!mirrored_engs && engs->count <= 0);
1127		if (!mirrored_engs)
1128			continue;
1129
1130		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
1131			    eng_grp->g->engs_num);
1132		if (engs->count < 0) {
1133			bit = find_first_bit(mirrored_engs->bmap,
1134					     eng_grp->g->engs_num);
1135			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
1136		}
1137		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
1138			  eng_grp->g->engs_num);
1139	}
1140	return 0;
1141}
1142
1143static int delete_engine_group(struct device *dev,
1144			       struct otx_cpt_eng_grp_info *eng_grp)
1145{
1146	int i, ret;
1147
1148	if (!eng_grp->is_enabled)
1149		return -EINVAL;
1150
1151	if (eng_grp->mirror.ref_count) {
1152		dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
1153			eng_grp->idx);
1154		for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1155			if (eng_grp->g->grp[i].mirror.is_ena &&
1156			    eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
1157				pr_cont(" %d", i);
1158		}
1159		pr_cont("\n");
1160		return -EINVAL;
1161	}
1162
1163	/* Removing engine group mirroring if enabled */
1164	remove_eng_grp_mirroring(eng_grp);
1165
1166	/* Disable engine group */
1167	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
1168	if (ret)
1169		return ret;
1170
1171	/* Release all engines held by this engine group */
1172	ret = release_engines(dev, eng_grp);
1173	if (ret)
1174		return ret;
1175
1176	device_remove_file(dev, &eng_grp->info_attr);
1177	eng_grp->is_enabled = false;
1178
1179	return 0;
1180}
1181
1182static int validate_1_ucode_scenario(struct device *dev,
1183				     struct otx_cpt_eng_grp_info *eng_grp,
1184				     struct otx_cpt_engines *engs, int engs_cnt)
1185{
1186	int i;
1187
1188	/* Verify that ucode loaded supports requested engine types */
1189	for (i = 0; i < engs_cnt; i++) {
1190		if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
1191						  engs[i].type)) {
1192			dev_err(dev,
1193				"Microcode %s does not support %s engines\n",
1194				eng_grp->ucode[0].filename,
1195				get_eng_type_str(engs[i].type));
1196			return -EINVAL;
1197		}
1198	}
1199	return 0;
1200}
1201
1202static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
1203{
1204	struct otx_cpt_ucode *ucode;
1205
1206	if (eng_grp->mirror.is_ena)
1207		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
1208	else
1209		ucode = &eng_grp->ucode[0];
1210	WARN_ON(!eng_grp->engs[0].type);
1211	eng_grp->engs[0].ucode = ucode;
1212}
1213
1214static int create_engine_group(struct device *dev,
1215			       struct otx_cpt_eng_grps *eng_grps,
1216			       struct otx_cpt_engines *engs, int engs_cnt,
1217			       void *ucode_data[], int ucodes_cnt,
1218			       bool use_uc_from_tar_arch)
1219{
1220	struct otx_cpt_eng_grp_info *mirrored_eng_grp;
1221	struct tar_ucode_info_t *tar_info;
1222	struct otx_cpt_eng_grp_info *eng_grp;
1223	int i, ret = 0;
1224
1225	if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
1226		return -EINVAL;
1227
1228	/* Validate if requested engine types are supported by this device */
1229	for (i = 0; i < engs_cnt; i++)
1230		if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
1231			dev_err(dev, "Device does not support %s engines\n",
1232				get_eng_type_str(engs[i].type));
1233			return -EPERM;
1234		}
1235
1236	/* Find engine group which is not used */
1237	eng_grp = find_unused_eng_grp(eng_grps);
1238	if (!eng_grp) {
1239		dev_err(dev, "Error all engine groups are being used\n");
1240		return -ENOSPC;
1241	}
1242
1243	/* Load ucode */
1244	for (i = 0; i < ucodes_cnt; i++) {
1245		if (use_uc_from_tar_arch) {
1246			tar_info = (struct tar_ucode_info_t *) ucode_data[i];
1247			eng_grp->ucode[i] = tar_info->ucode;
1248			ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1249						    tar_info->ucode_ptr);
1250		} else
1251			ret = ucode_load(dev, &eng_grp->ucode[i],
1252					 (char *) ucode_data[i]);
1253		if (ret)
1254			goto err_ucode_unload;
1255	}
1256
1257	/* Validate scenario where 1 ucode is used */
1258	ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
1259	if (ret)
1260		goto err_ucode_unload;
1261
1262	/* Check if this group mirrors another existing engine group */
1263	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1264	if (mirrored_eng_grp) {
1265		/* Setup mirroring */
1266		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1267
1268		/*
1269		 * Update count of requested engines because some
1270		 * of them might be shared with mirrored group
1271		 */
1272		update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
1273	}
1274
1275	/* Reserve engines */
1276	ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
1277	if (ret)
1278		goto err_ucode_unload;
1279
1280	/* Update ucode pointers used by engines */
1281	update_ucode_ptrs(eng_grp);
1282
1283	/* Update engine masks used by this group */
1284	ret = eng_grp_update_masks(dev, eng_grp);
1285	if (ret)
1286		goto err_release_engs;
1287
1288	/* Create sysfs entry for engine group info */
1289	ret = create_sysfs_eng_grps_info(dev, eng_grp);
1290	if (ret)
1291		goto err_release_engs;
1292
1293	/* Enable engine group */
1294	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1295	if (ret)
1296		goto err_release_engs;
1297
1298	/*
1299	 * If this engine group mirrors another engine group
1300	 * then we need to unload ucode as we will use ucode
1301	 * from mirrored engine group
1302	 */
1303	if (eng_grp->mirror.is_ena)
1304		ucode_unload(dev, &eng_grp->ucode[0]);
1305
1306	eng_grp->is_enabled = true;
1307	if (eng_grp->mirror.is_ena)
1308		dev_info(dev,
1309			 "Engine_group%d: reuse microcode %s from group %d\n",
1310			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1311			 mirrored_eng_grp->idx);
1312	else
1313		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1314			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1315
1316	return 0;
1317
1318err_release_engs:
1319	release_engines(dev, eng_grp);
1320err_ucode_unload:
1321	ucode_unload(dev, &eng_grp->ucode[0]);
1322	return ret;
1323}
1324
1325static ssize_t ucode_load_store(struct device *dev,
1326				struct device_attribute *attr,
1327				const char *buf, size_t count)
1328{
1329	struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1330	char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
1331	char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
1332	char *start, *val, *err_msg, *tmp;
1333	struct otx_cpt_eng_grps *eng_grps;
1334	int grp_idx = 0, ret = -EINVAL;
1335	bool has_se, has_ie, has_ae;
1336	int del_grp_idx = -1;
1337	int ucode_idx = 0;
1338
1339	if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
1340		return -EINVAL;
1341
1342	eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
1343	err_msg = "Invalid engine group format";
1344	strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
1345	start = tmp_buf;
1346
1347	has_se = has_ie = has_ae = false;
1348
1349	for (;;) {
1350		val = strsep(&start, ";");
1351		if (!val)
1352			break;
1353		val = strim(val);
1354		if (!*val)
1355			continue;
1356
1357		if (!strncasecmp(val, "engine_group", 12)) {
1358			if (del_grp_idx != -1)
1359				goto err_print;
1360			tmp = strim(strsep(&val, ":"));
1361			if (!val)
1362				goto err_print;
1363			if (strlen(tmp) != 13)
1364				goto err_print;
1365			if (kstrtoint((tmp + 12), 10, &del_grp_idx))
1366				goto err_print;
1367			val = strim(val);
1368			if (strncasecmp(val, "null", 4))
1369				goto err_print;
1370			if (strlen(val) != 4)
1371				goto err_print;
1372		} else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1373			if (has_se || ucode_idx)
1374				goto err_print;
1375			tmp = strim(strsep(&val, ":"));
1376			if (!val)
1377				goto err_print;
1378			if (strlen(tmp) != 2)
1379				goto err_print;
1380			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1381				goto err_print;
1382			engs[grp_idx++].type = OTX_CPT_SE_TYPES;
1383			has_se = true;
1384		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1385			if (has_ae || ucode_idx)
1386				goto err_print;
1387			tmp = strim(strsep(&val, ":"));
1388			if (!val)
1389				goto err_print;
1390			if (strlen(tmp) != 2)
1391				goto err_print;
1392			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1393				goto err_print;
1394			engs[grp_idx++].type = OTX_CPT_AE_TYPES;
1395			has_ae = true;
1396		} else {
1397			if (ucode_idx > 1)
1398				goto err_print;
1399			if (!strlen(val))
1400				goto err_print;
1401			if (strnstr(val, " ", strlen(val)))
1402				goto err_print;
1403			ucode_filename[ucode_idx++] = val;
1404		}
1405	}
1406
1407	/* Validate input parameters */
1408	if (del_grp_idx == -1) {
1409		if (!(grp_idx && ucode_idx))
1410			goto err_print;
1411
1412		if (ucode_idx > 1 && grp_idx < 2)
1413			goto err_print;
1414
1415		if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
1416			err_msg = "Error max 2 engine types can be attached";
1417			goto err_print;
1418		}
1419
1420	} else {
1421		if (del_grp_idx < 0 ||
1422		    del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
1423			dev_err(dev, "Invalid engine group index %d\n",
1424				del_grp_idx);
1425			ret = -EINVAL;
1426			return ret;
1427		}
1428
1429		if (!eng_grps->grp[del_grp_idx].is_enabled) {
1430			dev_err(dev, "Error engine_group%d is not configured\n",
1431				del_grp_idx);
1432			ret = -EINVAL;
1433			return ret;
1434		}
1435
1436		if (grp_idx || ucode_idx)
1437			goto err_print;
1438	}
1439
1440	mutex_lock(&eng_grps->lock);
1441
1442	if (eng_grps->is_rdonly) {
1443		dev_err(dev, "Disable VFs before modifying engine groups\n");
1444		ret = -EACCES;
1445		goto err_unlock;
1446	}
1447
1448	if (del_grp_idx == -1)
1449		/* create engine group */
1450		ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1451					  (void **) ucode_filename,
1452					  ucode_idx, false);
1453	else
1454		/* delete engine group */
1455		ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
1456	if (ret)
1457		goto err_unlock;
1458
1459	print_dbg_info(dev, eng_grps);
1460err_unlock:
1461	mutex_unlock(&eng_grps->lock);
1462	return ret ? ret : count;
1463err_print:
1464	dev_err(dev, "%s\n", err_msg);
1465
1466	return ret;
1467}
1468
1469int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
1470					struct otx_cpt_eng_grps *eng_grps,
1471					int pf_type)
1472{
1473	struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1474	struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1475	struct tar_arch_info_t *tar_arch = NULL;
1476	char *tar_filename;
1477	int i, ret = 0;
1478
1479	mutex_lock(&eng_grps->lock);
1480
1481	/*
1482	 * We don't create engine group for kernel crypto if attempt to create
1483	 * it was already made (when user enabled VFs for the first time)
1484	 */
1485	if (eng_grps->is_first_try)
1486		goto unlock_mutex;
1487	eng_grps->is_first_try = true;
1488
1489	/* We create group for kcrypto only if no groups are configured */
1490	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1491		if (eng_grps->grp[i].is_enabled)
1492			goto unlock_mutex;
1493
1494	switch (pf_type) {
1495	case OTX_CPT_AE:
1496	case OTX_CPT_SE:
1497		tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
1498		break;
1499
1500	default:
1501		dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1502		ret = -EINVAL;
1503		goto unlock_mutex;
1504	}
1505
1506	tar_arch = load_tar_archive(&pdev->dev, tar_filename);
1507	if (!tar_arch)
1508		goto unlock_mutex;
1509
1510	/*
1511	 * If device supports SE engines and there is SE microcode in tar
1512	 * archive try to create engine group with SE engines for kernel
1513	 * crypto functionality (symmetric crypto)
1514	 */
1515	tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
1516	if (tar_info[0] &&
1517	    dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
1518
1519		engs[0].type = OTX_CPT_SE_TYPES;
1520		engs[0].count = eng_grps->avail.max_se_cnt;
1521
1522		ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1523					  (void **) tar_info, 1, true);
1524		if (ret)
1525			goto release_tar_arch;
1526	}
1527	/*
1528	 * If device supports AE engines and there is AE microcode in tar
1529	 * archive try to create engine group with AE engines for asymmetric
1530	 * crypto functionality.
1531	 */
1532	tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
1533	if (tar_info[0] &&
1534	    dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
1535
1536		engs[0].type = OTX_CPT_AE_TYPES;
1537		engs[0].count = eng_grps->avail.max_ae_cnt;
1538
1539		ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1540					  (void **) tar_info, 1, true);
1541		if (ret)
1542			goto release_tar_arch;
1543	}
1544
1545	print_dbg_info(&pdev->dev, eng_grps);
1546release_tar_arch:
1547	release_tar_archive(tar_arch);
1548unlock_mutex:
1549	mutex_unlock(&eng_grps->lock);
1550	return ret;
1551}
1552
1553void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
1554				    bool is_rdonly)
1555{
1556	mutex_lock(&eng_grps->lock);
1557
1558	eng_grps->is_rdonly = is_rdonly;
1559
1560	mutex_unlock(&eng_grps->lock);
1561}
1562
1563void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
1564{
1565	int grp, timeout = 100;
1566	u64 reg;
1567
1568	/* Disengage the cores from groups */
1569	for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
1570		writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
1571		udelay(CSR_DELAY);
1572	}
1573
1574	reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1575	while (reg) {
1576		udelay(CSR_DELAY);
1577		reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1578		if (timeout--) {
1579			dev_warn(&cpt->pdev->dev, "Cores still busy\n");
1580			break;
1581		}
1582	}
1583
1584	/* Disable the cores */
1585	writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
1586}
1587
1588void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1589			      struct otx_cpt_eng_grps *eng_grps)
1590{
1591	struct otx_cpt_eng_grp_info *grp;
1592	int i, j;
1593
1594	mutex_lock(&eng_grps->lock);
1595	if (eng_grps->is_ucode_load_created) {
1596		device_remove_file(&pdev->dev,
1597				   &eng_grps->ucode_load_attr);
1598		eng_grps->is_ucode_load_created = false;
1599	}
1600
1601	/* First delete all mirroring engine groups */
1602	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1603		if (eng_grps->grp[i].mirror.is_ena)
1604			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1605
1606	/* Delete remaining engine groups */
1607	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1608		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1609
1610	/* Release memory */
1611	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1612		grp = &eng_grps->grp[i];
1613		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1614			kfree(grp->engs[j].bmap);
1615			grp->engs[j].bmap = NULL;
1616		}
1617	}
1618
1619	mutex_unlock(&eng_grps->lock);
1620}
1621
1622int otx_cpt_init_eng_grps(struct pci_dev *pdev,
1623			  struct otx_cpt_eng_grps *eng_grps, int pf_type)
1624{
1625	struct otx_cpt_eng_grp_info *grp;
1626	int i, j, ret = 0;
1627
1628	mutex_init(&eng_grps->lock);
1629	eng_grps->obj = pci_get_drvdata(pdev);
1630	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1631	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1632
1633	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1634			     eng_grps->avail.max_ae_cnt;
1635	if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
1636		dev_err(&pdev->dev,
1637			"Number of engines %d > than max supported %d\n",
1638			eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
1639		ret = -EINVAL;
1640		goto err;
1641	}
1642
1643	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1644		grp = &eng_grps->grp[i];
1645		grp->g = eng_grps;
1646		grp->idx = i;
1647
1648		snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
1649			 "engine_group%d", i);
1650		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1651			grp->engs[j].bmap =
1652				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1653					sizeof(long), GFP_KERNEL);
1654			if (!grp->engs[j].bmap) {
1655				ret = -ENOMEM;
1656				goto err;
1657			}
1658		}
1659	}
1660
1661	switch (pf_type) {
1662	case OTX_CPT_SE:
1663		/* OcteonTX 83XX SE CPT PF has only SE engines attached */
1664		eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
1665		break;
1666
1667	case OTX_CPT_AE:
1668		/* OcteonTX 83XX AE CPT PF has only AE engines attached */
1669		eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
1670		break;
1671
1672	default:
1673		dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1674		ret = -EINVAL;
1675		goto err;
1676	}
1677
1678	eng_grps->ucode_load_attr.show = NULL;
1679	eng_grps->ucode_load_attr.store = ucode_load_store;
1680	eng_grps->ucode_load_attr.attr.name = "ucode_load";
1681	eng_grps->ucode_load_attr.attr.mode = 0220;
1682	sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
1683	ret = device_create_file(&pdev->dev,
1684				 &eng_grps->ucode_load_attr);
1685	if (ret)
1686		goto err;
1687	eng_grps->is_ucode_load_created = true;
1688
1689	print_dbg_info(&pdev->dev, eng_grps);
1690	return ret;
1691err:
1692	otx_cpt_cleanup_eng_grps(pdev, eng_grps);
1693	return ret;
1694}
1695