1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/ctype.h>
12 #include <linux/firmware.h>
13 #include "otx_cpt_common.h"
14 #include "otx_cptpf_ucode.h"
15 #include "otx_cptpf.h"
16 
17 #define CSR_DELAY 30
18 /* Tar archive defines */
19 #define TAR_MAGIC		"ustar"
20 #define TAR_MAGIC_LEN		6
21 #define TAR_BLOCK_LEN		512
22 #define REGTYPE			'0'
23 #define AREGTYPE		'\0'
24 
25 /* tar header as defined in POSIX 1003.1-1990. */
26 struct tar_hdr_t {
27 	char name[100];
28 	char mode[8];
29 	char uid[8];
30 	char gid[8];
31 	char size[12];
32 	char mtime[12];
33 	char chksum[8];
34 	char typeflag;
35 	char linkname[100];
36 	char magic[6];
37 	char version[2];
38 	char uname[32];
39 	char gname[32];
40 	char devmajor[8];
41 	char devminor[8];
42 	char prefix[155];
43 };
44 
45 struct tar_blk_t {
46 	union {
47 		struct tar_hdr_t hdr;
48 		char block[TAR_BLOCK_LEN];
49 	};
50 };
51 
52 struct tar_arch_info_t {
53 	struct list_head ucodes;
54 	const struct firmware *fw;
55 };
56 
get_cores_bmap(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)57 static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
58 					   struct otx_cpt_eng_grp_info *eng_grp)
59 {
60 	struct otx_cpt_bitmap bmap = { {0} };
61 	bool found = false;
62 	int i;
63 
64 	if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
65 		dev_err(dev, "unsupported number of engines %d on octeontx\n",
66 			eng_grp->g->engs_num);
67 		return bmap;
68 	}
69 
70 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
71 		if (eng_grp->engs[i].type) {
72 			bitmap_or(bmap.bits, bmap.bits,
73 				  eng_grp->engs[i].bmap,
74 				  eng_grp->g->engs_num);
75 			bmap.size = eng_grp->g->engs_num;
76 			found = true;
77 		}
78 	}
79 
80 	if (!found)
81 		dev_err(dev, "No engines reserved for engine group %d\n",
82 			eng_grp->idx);
83 	return bmap;
84 }
85 
is_eng_type(int val,int eng_type)86 static int is_eng_type(int val, int eng_type)
87 {
88 	return val & (1 << eng_type);
89 }
90 
dev_supports_eng_type(struct otx_cpt_eng_grps * eng_grps,int eng_type)91 static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
92 				 int eng_type)
93 {
94 	return is_eng_type(eng_grps->eng_types_supported, eng_type);
95 }
96 
set_ucode_filename(struct otx_cpt_ucode * ucode,const char * filename)97 static void set_ucode_filename(struct otx_cpt_ucode *ucode,
98 			       const char *filename)
99 {
100 	strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
101 }
102 
get_eng_type_str(int eng_type)103 static char *get_eng_type_str(int eng_type)
104 {
105 	char *str = "unknown";
106 
107 	switch (eng_type) {
108 	case OTX_CPT_SE_TYPES:
109 		str = "SE";
110 		break;
111 
112 	case OTX_CPT_AE_TYPES:
113 		str = "AE";
114 		break;
115 	}
116 	return str;
117 }
118 
get_ucode_type_str(int ucode_type)119 static char *get_ucode_type_str(int ucode_type)
120 {
121 	char *str = "unknown";
122 
123 	switch (ucode_type) {
124 	case (1 << OTX_CPT_SE_TYPES):
125 		str = "SE";
126 		break;
127 
128 	case (1 << OTX_CPT_AE_TYPES):
129 		str = "AE";
130 		break;
131 	}
132 	return str;
133 }
134 
get_ucode_type(struct otx_cpt_ucode_hdr * ucode_hdr,int * ucode_type)135 static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
136 {
137 	char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
138 	u32 i, val = 0;
139 	u8 nn;
140 
141 	strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
142 	for (i = 0; i < strlen(tmp_ver_str); i++)
143 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
144 
145 	nn = ucode_hdr->ver_num.nn;
146 	if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
147 	    (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
148 	     nn == OTX_CPT_SE_UC_TYPE3))
149 		val |= 1 << OTX_CPT_SE_TYPES;
150 	if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
151 	    nn == OTX_CPT_AE_UC_TYPE)
152 		val |= 1 << OTX_CPT_AE_TYPES;
153 
154 	*ucode_type = val;
155 
156 	if (!val)
157 		return -EINVAL;
158 	if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
159 	    is_eng_type(val, OTX_CPT_SE_TYPES))
160 		return -EINVAL;
161 	return 0;
162 }
163 
is_mem_zero(const char * ptr,int size)164 static int is_mem_zero(const char *ptr, int size)
165 {
166 	int i;
167 
168 	for (i = 0; i < size; i++) {
169 		if (ptr[i])
170 			return 0;
171 	}
172 	return 1;
173 }
174 
cpt_set_ucode_base(struct otx_cpt_eng_grp_info * eng_grp,void * obj)175 static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
176 {
177 	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
178 	dma_addr_t dma_addr;
179 	struct otx_cpt_bitmap bmap;
180 	int i;
181 
182 	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
183 	if (!bmap.size)
184 		return -EINVAL;
185 
186 	if (eng_grp->mirror.is_ena)
187 		dma_addr =
188 		       eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
189 	else
190 		dma_addr = eng_grp->ucode[0].align_dma;
191 
192 	/*
193 	 * Set UCODE_BASE only for the cores which are not used,
194 	 * other cores should have already valid UCODE_BASE set
195 	 */
196 	for_each_set_bit(i, bmap.bits, bmap.size)
197 		if (!eng_grp->g->eng_ref_cnt[i])
198 			writeq((u64) dma_addr, cpt->reg_base +
199 				OTX_CPT_PF_ENGX_UCODE_BASE(i));
200 	return 0;
201 }
202 
cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info * eng_grp,void * obj)203 static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
204 					void *obj)
205 {
206 	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
207 	struct otx_cpt_bitmap bmap = { {0} };
208 	int timeout = 10;
209 	int i, busy;
210 	u64 reg;
211 
212 	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
213 	if (!bmap.size)
214 		return -EINVAL;
215 
216 	/* Detach the cores from group */
217 	reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
218 	for_each_set_bit(i, bmap.bits, bmap.size) {
219 		if (reg & (1ull << i)) {
220 			eng_grp->g->eng_ref_cnt[i]--;
221 			reg &= ~(1ull << i);
222 		}
223 	}
224 	writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
225 
226 	/* Wait for cores to become idle */
227 	do {
228 		busy = 0;
229 		usleep_range(10000, 20000);
230 		if (timeout-- < 0)
231 			return -EBUSY;
232 
233 		reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
234 		for_each_set_bit(i, bmap.bits, bmap.size)
235 			if (reg & (1ull << i)) {
236 				busy = 1;
237 				break;
238 			}
239 	} while (busy);
240 
241 	/* Disable the cores only if they are not used anymore */
242 	reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
243 	for_each_set_bit(i, bmap.bits, bmap.size)
244 		if (!eng_grp->g->eng_ref_cnt[i])
245 			reg &= ~(1ull << i);
246 	writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
247 
248 	return 0;
249 }
250 
cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info * eng_grp,void * obj)251 static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
252 				       void *obj)
253 {
254 	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
255 	struct otx_cpt_bitmap bmap;
256 	u64 reg;
257 	int i;
258 
259 	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
260 	if (!bmap.size)
261 		return -EINVAL;
262 
263 	/* Attach the cores to the group */
264 	reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
265 	for_each_set_bit(i, bmap.bits, bmap.size) {
266 		if (!(reg & (1ull << i))) {
267 			eng_grp->g->eng_ref_cnt[i]++;
268 			reg |= 1ull << i;
269 		}
270 	}
271 	writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
272 
273 	/* Enable the cores */
274 	reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
275 	for_each_set_bit(i, bmap.bits, bmap.size)
276 		reg |= 1ull << i;
277 	writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
278 
279 	return 0;
280 }
281 
process_tar_file(struct device * dev,struct tar_arch_info_t * tar_arch,char * filename,const u8 * data,u32 size)282 static int process_tar_file(struct device *dev,
283 			    struct tar_arch_info_t *tar_arch, char *filename,
284 			    const u8 *data, u32 size)
285 {
286 	struct tar_ucode_info_t *tar_info;
287 	struct otx_cpt_ucode_hdr *ucode_hdr;
288 	int ucode_type, ucode_size;
289 
290 	/*
291 	 * If size is less than microcode header size then don't report
292 	 * an error because it might not be microcode file, just process
293 	 * next file from archive
294 	 */
295 	if (size < sizeof(struct otx_cpt_ucode_hdr))
296 		return 0;
297 
298 	ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
299 	/*
300 	 * If microcode version can't be found don't report an error
301 	 * because it might not be microcode file, just process next file
302 	 */
303 	if (get_ucode_type(ucode_hdr, &ucode_type))
304 		return 0;
305 
306 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
307 	if (!ucode_size || (size < round_up(ucode_size, 16) +
308 	    sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
309 		dev_err(dev, "Ucode %s invalid size\n", filename);
310 		return -EINVAL;
311 	}
312 
313 	tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
314 	if (!tar_info)
315 		return -ENOMEM;
316 
317 	tar_info->ucode_ptr = data;
318 	set_ucode_filename(&tar_info->ucode, filename);
319 	memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
320 	       OTX_CPT_UCODE_VER_STR_SZ);
321 	tar_info->ucode.ver_num = ucode_hdr->ver_num;
322 	tar_info->ucode.type = ucode_type;
323 	tar_info->ucode.size = ucode_size;
324 	list_add_tail(&tar_info->list, &tar_arch->ucodes);
325 
326 	return 0;
327 }
328 
release_tar_archive(struct tar_arch_info_t * tar_arch)329 static void release_tar_archive(struct tar_arch_info_t *tar_arch)
330 {
331 	struct tar_ucode_info_t *curr, *temp;
332 
333 	if (!tar_arch)
334 		return;
335 
336 	list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
337 		list_del(&curr->list);
338 		kfree(curr);
339 	}
340 
341 	if (tar_arch->fw)
342 		release_firmware(tar_arch->fw);
343 	kfree(tar_arch);
344 }
345 
get_uc_from_tar_archive(struct tar_arch_info_t * tar_arch,int ucode_type)346 static struct tar_ucode_info_t *get_uc_from_tar_archive(
347 					struct tar_arch_info_t *tar_arch,
348 					int ucode_type)
349 {
350 	struct tar_ucode_info_t *curr, *uc_found = NULL;
351 
352 	list_for_each_entry(curr, &tar_arch->ucodes, list) {
353 		if (!is_eng_type(curr->ucode.type, ucode_type))
354 			continue;
355 
356 		if (!uc_found) {
357 			uc_found = curr;
358 			continue;
359 		}
360 
361 		switch (ucode_type) {
362 		case OTX_CPT_AE_TYPES:
363 			break;
364 
365 		case OTX_CPT_SE_TYPES:
366 			if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
367 			    (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
368 			     && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
369 				uc_found = curr;
370 			break;
371 		}
372 	}
373 
374 	return uc_found;
375 }
376 
print_tar_dbg_info(struct tar_arch_info_t * tar_arch,char * tar_filename)377 static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
378 			       char *tar_filename)
379 {
380 	struct tar_ucode_info_t *curr;
381 
382 	pr_debug("Tar archive filename %s\n", tar_filename);
383 	pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
384 		 tar_arch->fw->size);
385 	list_for_each_entry(curr, &tar_arch->ucodes, list) {
386 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
387 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
388 		pr_debug("Ucode version %d.%d.%d.%d\n",
389 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
390 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
391 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
392 			 get_ucode_type_str(curr->ucode.type));
393 		pr_debug("Ucode size %d\n", curr->ucode.size);
394 		pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
395 	}
396 }
397 
load_tar_archive(struct device * dev,char * tar_filename)398 static struct tar_arch_info_t *load_tar_archive(struct device *dev,
399 						char *tar_filename)
400 {
401 	struct tar_arch_info_t *tar_arch = NULL;
402 	struct tar_blk_t *tar_blk;
403 	unsigned int cur_size;
404 	size_t tar_offs = 0;
405 	size_t tar_size;
406 	int ret;
407 
408 	tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
409 	if (!tar_arch)
410 		return NULL;
411 
412 	INIT_LIST_HEAD(&tar_arch->ucodes);
413 
414 	/* Load tar archive */
415 	ret = request_firmware(&tar_arch->fw, tar_filename, dev);
416 	if (ret)
417 		goto release_tar_arch;
418 
419 	if (tar_arch->fw->size < TAR_BLOCK_LEN) {
420 		dev_err(dev, "Invalid tar archive %s\n", tar_filename);
421 		goto release_tar_arch;
422 	}
423 
424 	tar_size = tar_arch->fw->size;
425 	tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
426 	if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
427 		dev_err(dev, "Unsupported format of tar archive %s\n",
428 			tar_filename);
429 		goto release_tar_arch;
430 	}
431 
432 	while (1) {
433 		/* Read current file size */
434 		ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
435 		if (ret)
436 			goto release_tar_arch;
437 
438 		if (tar_offs + cur_size > tar_size ||
439 		    tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
440 			dev_err(dev, "Invalid tar archive %s\n", tar_filename);
441 			goto release_tar_arch;
442 		}
443 
444 		tar_offs += TAR_BLOCK_LEN;
445 		if (tar_blk->hdr.typeflag == REGTYPE ||
446 		    tar_blk->hdr.typeflag == AREGTYPE) {
447 			ret = process_tar_file(dev, tar_arch,
448 					       tar_blk->hdr.name,
449 					       &tar_arch->fw->data[tar_offs],
450 					       cur_size);
451 			if (ret)
452 				goto release_tar_arch;
453 		}
454 
455 		tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
456 		if (cur_size % TAR_BLOCK_LEN)
457 			tar_offs += TAR_BLOCK_LEN;
458 
459 		/* Check for the end of the archive */
460 		if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
461 			dev_err(dev, "Invalid tar archive %s\n", tar_filename);
462 			goto release_tar_arch;
463 		}
464 
465 		if (is_mem_zero(&tar_arch->fw->data[tar_offs],
466 		    2*TAR_BLOCK_LEN))
467 			break;
468 
469 		/* Read next block from tar archive */
470 		tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
471 	}
472 
473 	print_tar_dbg_info(tar_arch, tar_filename);
474 	return tar_arch;
475 release_tar_arch:
476 	release_tar_archive(tar_arch);
477 	return NULL;
478 }
479 
find_engines_by_type(struct otx_cpt_eng_grp_info * eng_grp,int eng_type)480 static struct otx_cpt_engs_rsvd *find_engines_by_type(
481 					struct otx_cpt_eng_grp_info *eng_grp,
482 					int eng_type)
483 {
484 	int i;
485 
486 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
487 		if (!eng_grp->engs[i].type)
488 			continue;
489 
490 		if (eng_grp->engs[i].type == eng_type)
491 			return &eng_grp->engs[i];
492 	}
493 	return NULL;
494 }
495 
otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode * ucode,int eng_type)496 int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
497 {
498 	return is_eng_type(ucode->type, eng_type);
499 }
500 EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
501 
otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info * eng_grp,int eng_type)502 int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
503 				 int eng_type)
504 {
505 	struct otx_cpt_engs_rsvd *engs;
506 
507 	engs = find_engines_by_type(eng_grp, eng_type);
508 
509 	return (engs != NULL ? 1 : 0);
510 }
511 EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
512 
print_ucode_info(struct otx_cpt_eng_grp_info * eng_grp,char * buf,int size)513 static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
514 			     char *buf, int size)
515 {
516 	if (eng_grp->mirror.is_ena) {
517 		scnprintf(buf, size, "%s (shared with engine_group%d)",
518 			  eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
519 			  eng_grp->mirror.idx);
520 	} else {
521 		scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
522 	}
523 }
524 
print_engs_info(struct otx_cpt_eng_grp_info * eng_grp,char * buf,int size,int idx)525 static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
526 			    char *buf, int size, int idx)
527 {
528 	struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
529 	struct otx_cpt_engs_rsvd *engs;
530 	int len, i;
531 
532 	buf[0] = '\0';
533 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
534 		engs = &eng_grp->engs[i];
535 		if (!engs->type)
536 			continue;
537 		if (idx != -1 && idx != i)
538 			continue;
539 
540 		if (eng_grp->mirror.is_ena)
541 			mirrored_engs = find_engines_by_type(
542 					&eng_grp->g->grp[eng_grp->mirror.idx],
543 					engs->type);
544 		if (i > 0 && idx == -1) {
545 			len = strlen(buf);
546 			scnprintf(buf+len, size-len, ", ");
547 		}
548 
549 		len = strlen(buf);
550 		scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
551 			  engs->count + mirrored_engs->count : engs->count,
552 			  get_eng_type_str(engs->type));
553 		if (mirrored_engs) {
554 			len = strlen(buf);
555 			scnprintf(buf+len, size-len,
556 				  "(%d shared with engine_group%d) ",
557 				  engs->count <= 0 ? engs->count +
558 				  mirrored_engs->count : mirrored_engs->count,
559 				  eng_grp->mirror.idx);
560 		}
561 	}
562 }
563 
print_ucode_dbg_info(struct otx_cpt_ucode * ucode)564 static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
565 {
566 	pr_debug("Ucode info\n");
567 	pr_debug("Ucode version string %s\n", ucode->ver_str);
568 	pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
569 		 ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
570 	pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
571 	pr_debug("Ucode size %d\n", ucode->size);
572 	pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
573 	pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
574 }
575 
cpt_print_engines_mask(struct otx_cpt_eng_grp_info * eng_grp,struct device * dev,char * buf,int size)576 static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
577 				   struct device *dev, char *buf, int size)
578 {
579 	struct otx_cpt_bitmap bmap;
580 	u32 mask[2];
581 
582 	bmap = get_cores_bmap(dev, eng_grp);
583 	if (!bmap.size) {
584 		scnprintf(buf, size, "unknown");
585 		return;
586 	}
587 	bitmap_to_arr32(mask, bmap.bits, bmap.size);
588 	scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
589 }
590 
591 
print_dbg_info(struct device * dev,struct otx_cpt_eng_grps * eng_grps)592 static void print_dbg_info(struct device *dev,
593 			   struct otx_cpt_eng_grps *eng_grps)
594 {
595 	char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
596 	struct otx_cpt_eng_grp_info *mirrored_grp;
597 	char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
598 	struct otx_cpt_eng_grp_info *grp;
599 	struct otx_cpt_engs_rsvd *engs;
600 	u32 mask[4];
601 	int i, j;
602 
603 	pr_debug("Engine groups global info\n");
604 	pr_debug("max SE %d, max AE %d\n",
605 		 eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
606 	pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
607 	pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
608 
609 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
610 		grp = &eng_grps->grp[i];
611 		pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
612 			 "enabled" : "disabled");
613 		if (grp->is_enabled) {
614 			mirrored_grp = &eng_grps->grp[grp->mirror.idx];
615 			pr_debug("Ucode0 filename %s, version %s\n",
616 				 grp->mirror.is_ena ?
617 				 mirrored_grp->ucode[0].filename :
618 				 grp->ucode[0].filename,
619 				 grp->mirror.is_ena ?
620 				 mirrored_grp->ucode[0].ver_str :
621 				 grp->ucode[0].ver_str);
622 		}
623 
624 		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
625 			engs = &grp->engs[j];
626 			if (engs->type) {
627 				print_engs_info(grp, engs_info,
628 						2*OTX_CPT_UCODE_NAME_LENGTH, j);
629 				pr_debug("Slot%d: %s\n", j, engs_info);
630 				bitmap_to_arr32(mask, engs->bmap,
631 						eng_grps->engs_num);
632 				pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
633 					 mask[3], mask[2], mask[1], mask[0]);
634 			} else
635 				pr_debug("Slot%d not used\n", j);
636 		}
637 		if (grp->is_enabled) {
638 			cpt_print_engines_mask(grp, dev, engs_mask,
639 					       OTX_CPT_UCODE_NAME_LENGTH);
640 			pr_debug("Cmask: %s\n", engs_mask);
641 		}
642 	}
643 }
644 
update_engines_avail_count(struct device * dev,struct otx_cpt_engs_available * avail,struct otx_cpt_engs_rsvd * engs,int val)645 static int update_engines_avail_count(struct device *dev,
646 				      struct otx_cpt_engs_available *avail,
647 				      struct otx_cpt_engs_rsvd *engs, int val)
648 {
649 	switch (engs->type) {
650 	case OTX_CPT_SE_TYPES:
651 		avail->se_cnt += val;
652 		break;
653 
654 	case OTX_CPT_AE_TYPES:
655 		avail->ae_cnt += val;
656 		break;
657 
658 	default:
659 		dev_err(dev, "Invalid engine type %d\n", engs->type);
660 		return -EINVAL;
661 	}
662 
663 	return 0;
664 }
665 
update_engines_offset(struct device * dev,struct otx_cpt_engs_available * avail,struct otx_cpt_engs_rsvd * engs)666 static int update_engines_offset(struct device *dev,
667 				 struct otx_cpt_engs_available *avail,
668 				 struct otx_cpt_engs_rsvd *engs)
669 {
670 	switch (engs->type) {
671 	case OTX_CPT_SE_TYPES:
672 		engs->offset = 0;
673 		break;
674 
675 	case OTX_CPT_AE_TYPES:
676 		engs->offset = avail->max_se_cnt;
677 		break;
678 
679 	default:
680 		dev_err(dev, "Invalid engine type %d\n", engs->type);
681 		return -EINVAL;
682 	}
683 
684 	return 0;
685 }
686 
release_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp)687 static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
688 {
689 	int i, ret = 0;
690 
691 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
692 		if (!grp->engs[i].type)
693 			continue;
694 
695 		if (grp->engs[i].count > 0) {
696 			ret = update_engines_avail_count(dev, &grp->g->avail,
697 							 &grp->engs[i],
698 							 grp->engs[i].count);
699 			if (ret)
700 				return ret;
701 		}
702 
703 		grp->engs[i].type = 0;
704 		grp->engs[i].count = 0;
705 		grp->engs[i].offset = 0;
706 		grp->engs[i].ucode = NULL;
707 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
708 	}
709 
710 	return 0;
711 }
712 
do_reserve_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_engs)713 static int do_reserve_engines(struct device *dev,
714 			      struct otx_cpt_eng_grp_info *grp,
715 			      struct otx_cpt_engines *req_engs)
716 {
717 	struct otx_cpt_engs_rsvd *engs = NULL;
718 	int i, ret;
719 
720 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
721 		if (!grp->engs[i].type) {
722 			engs = &grp->engs[i];
723 			break;
724 		}
725 	}
726 
727 	if (!engs)
728 		return -ENOMEM;
729 
730 	engs->type = req_engs->type;
731 	engs->count = req_engs->count;
732 
733 	ret = update_engines_offset(dev, &grp->g->avail, engs);
734 	if (ret)
735 		return ret;
736 
737 	if (engs->count > 0) {
738 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
739 						 -engs->count);
740 		if (ret)
741 			return ret;
742 	}
743 
744 	return 0;
745 }
746 
check_engines_availability(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_eng)747 static int check_engines_availability(struct device *dev,
748 				      struct otx_cpt_eng_grp_info *grp,
749 				      struct otx_cpt_engines *req_eng)
750 {
751 	int avail_cnt = 0;
752 
753 	switch (req_eng->type) {
754 	case OTX_CPT_SE_TYPES:
755 		avail_cnt = grp->g->avail.se_cnt;
756 		break;
757 
758 	case OTX_CPT_AE_TYPES:
759 		avail_cnt = grp->g->avail.ae_cnt;
760 		break;
761 
762 	default:
763 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
764 		return -EINVAL;
765 	}
766 
767 	if (avail_cnt < req_eng->count) {
768 		dev_err(dev,
769 			"Error available %s engines %d < than requested %d\n",
770 			get_eng_type_str(req_eng->type),
771 			avail_cnt, req_eng->count);
772 		return -EBUSY;
773 	}
774 
775 	return 0;
776 }
777 
reserve_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_engs,int req_cnt)778 static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
779 			   struct otx_cpt_engines *req_engs, int req_cnt)
780 {
781 	int i, ret;
782 
783 	/* Validate if a number of requested engines is available */
784 	for (i = 0; i < req_cnt; i++) {
785 		ret = check_engines_availability(dev, grp, &req_engs[i]);
786 		if (ret)
787 			return ret;
788 	}
789 
790 	/* Reserve requested engines for this engine group */
791 	for (i = 0; i < req_cnt; i++) {
792 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
793 		if (ret)
794 			return ret;
795 	}
796 	return 0;
797 }
798 
eng_grp_info_show(struct device * dev,struct device_attribute * attr,char * buf)799 static ssize_t eng_grp_info_show(struct device *dev,
800 				 struct device_attribute *attr,
801 				 char *buf)
802 {
803 	char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
804 	char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
805 	char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
806 	struct otx_cpt_eng_grp_info *eng_grp;
807 	int ret;
808 
809 	eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
810 	mutex_lock(&eng_grp->g->lock);
811 
812 	print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
813 	print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
814 	cpt_print_engines_mask(eng_grp, dev, engs_mask,
815 			       OTX_CPT_UCODE_NAME_LENGTH);
816 	ret = scnprintf(buf, PAGE_SIZE,
817 			"Microcode : %s\nEngines: %s\nEngines mask: %s\n",
818 			ucode_info, engs_info, engs_mask);
819 
820 	mutex_unlock(&eng_grp->g->lock);
821 	return ret;
822 }
823 
create_sysfs_eng_grps_info(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)824 static int create_sysfs_eng_grps_info(struct device *dev,
825 				      struct otx_cpt_eng_grp_info *eng_grp)
826 {
827 	eng_grp->info_attr.show = eng_grp_info_show;
828 	eng_grp->info_attr.store = NULL;
829 	eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
830 	eng_grp->info_attr.attr.mode = 0440;
831 	sysfs_attr_init(&eng_grp->info_attr.attr);
832 	return device_create_file(dev, &eng_grp->info_attr);
833 }
834 
ucode_unload(struct device * dev,struct otx_cpt_ucode * ucode)835 static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
836 {
837 	if (ucode->va) {
838 		dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
839 				  ucode->va, ucode->dma);
840 		ucode->va = NULL;
841 		ucode->align_va = NULL;
842 		ucode->dma = 0;
843 		ucode->align_dma = 0;
844 		ucode->size = 0;
845 	}
846 
847 	memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
848 	memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
849 	set_ucode_filename(ucode, "");
850 	ucode->type = 0;
851 }
852 
copy_ucode_to_dma_mem(struct device * dev,struct otx_cpt_ucode * ucode,const u8 * ucode_data)853 static int copy_ucode_to_dma_mem(struct device *dev,
854 				 struct otx_cpt_ucode *ucode,
855 				 const u8 *ucode_data)
856 {
857 	u32 i;
858 
859 	/*  Allocate DMAable space */
860 	ucode->va = dma_alloc_coherent(dev, ucode->size +
861 				       OTX_CPT_UCODE_ALIGNMENT,
862 				       &ucode->dma, GFP_KERNEL);
863 	if (!ucode->va) {
864 		dev_err(dev, "Unable to allocate space for microcode\n");
865 		return -ENOMEM;
866 	}
867 	ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
868 	ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
869 
870 	memcpy((void *) ucode->align_va, (void *) ucode_data +
871 	       sizeof(struct otx_cpt_ucode_hdr), ucode->size);
872 
873 	/* Byte swap 64-bit */
874 	for (i = 0; i < (ucode->size / 8); i++)
875 		((__be64 *)ucode->align_va)[i] =
876 				cpu_to_be64(((u64 *)ucode->align_va)[i]);
877 	/*  Ucode needs 16-bit swap */
878 	for (i = 0; i < (ucode->size / 2); i++)
879 		((__be16 *)ucode->align_va)[i] =
880 				cpu_to_be16(((u16 *)ucode->align_va)[i]);
881 	return 0;
882 }
883 
ucode_load(struct device * dev,struct otx_cpt_ucode * ucode,const char * ucode_filename)884 static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
885 		      const char *ucode_filename)
886 {
887 	struct otx_cpt_ucode_hdr *ucode_hdr;
888 	const struct firmware *fw;
889 	int ret;
890 
891 	set_ucode_filename(ucode, ucode_filename);
892 	ret = request_firmware(&fw, ucode->filename, dev);
893 	if (ret)
894 		return ret;
895 
896 	ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
897 	memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
898 	ucode->ver_num = ucode_hdr->ver_num;
899 	ucode->size = ntohl(ucode_hdr->code_length) * 2;
900 	if (!ucode->size || (fw->size < round_up(ucode->size, 16)
901 	    + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
902 		dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
903 		ret = -EINVAL;
904 		goto release_fw;
905 	}
906 
907 	ret = get_ucode_type(ucode_hdr, &ucode->type);
908 	if (ret) {
909 		dev_err(dev, "Microcode %s unknown type 0x%x\n",
910 			ucode->filename, ucode->type);
911 		goto release_fw;
912 	}
913 
914 	ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
915 	if (ret)
916 		goto release_fw;
917 
918 	print_ucode_dbg_info(ucode);
919 release_fw:
920 	release_firmware(fw);
921 	return ret;
922 }
923 
enable_eng_grp(struct otx_cpt_eng_grp_info * eng_grp,void * obj)924 static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
925 			  void *obj)
926 {
927 	int ret;
928 
929 	ret = cpt_set_ucode_base(eng_grp, obj);
930 	if (ret)
931 		return ret;
932 
933 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
934 	return ret;
935 }
936 
disable_eng_grp(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp,void * obj)937 static int disable_eng_grp(struct device *dev,
938 			   struct otx_cpt_eng_grp_info *eng_grp,
939 			   void *obj)
940 {
941 	int i, ret;
942 
943 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
944 	if (ret)
945 		return ret;
946 
947 	/* Unload ucode used by this engine group */
948 	ucode_unload(dev, &eng_grp->ucode[0]);
949 
950 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
951 		if (!eng_grp->engs[i].type)
952 			continue;
953 
954 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
955 	}
956 
957 	ret = cpt_set_ucode_base(eng_grp, obj);
958 
959 	return ret;
960 }
961 
setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info * dst_grp,struct otx_cpt_eng_grp_info * src_grp)962 static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
963 				    struct otx_cpt_eng_grp_info *src_grp)
964 {
965 	/* Setup fields for engine group which is mirrored */
966 	src_grp->mirror.is_ena = false;
967 	src_grp->mirror.idx = 0;
968 	src_grp->mirror.ref_count++;
969 
970 	/* Setup fields for mirroring engine group */
971 	dst_grp->mirror.is_ena = true;
972 	dst_grp->mirror.idx = src_grp->idx;
973 	dst_grp->mirror.ref_count = 0;
974 }
975 
remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info * dst_grp)976 static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
977 {
978 	struct otx_cpt_eng_grp_info *src_grp;
979 
980 	if (!dst_grp->mirror.is_ena)
981 		return;
982 
983 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
984 
985 	src_grp->mirror.ref_count--;
986 	dst_grp->mirror.is_ena = false;
987 	dst_grp->mirror.idx = 0;
988 	dst_grp->mirror.ref_count = 0;
989 }
990 
update_requested_engs(struct otx_cpt_eng_grp_info * mirrored_eng_grp,struct otx_cpt_engines * engs,int engs_cnt)991 static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
992 				  struct otx_cpt_engines *engs, int engs_cnt)
993 {
994 	struct otx_cpt_engs_rsvd *mirrored_engs;
995 	int i;
996 
997 	for (i = 0; i < engs_cnt; i++) {
998 		mirrored_engs = find_engines_by_type(mirrored_eng_grp,
999 						     engs[i].type);
1000 		if (!mirrored_engs)
1001 			continue;
1002 
1003 		/*
1004 		 * If mirrored group has this type of engines attached then
1005 		 * there are 3 scenarios possible:
1006 		 * 1) mirrored_engs.count == engs[i].count then all engines
1007 		 * from mirrored engine group will be shared with this engine
1008 		 * group
1009 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
1010 		 * engines from mirrored engine group will be shared with this
1011 		 * engine group
1012 		 * 3) mirrored_engs.count < engs[i].count then all engines
1013 		 * from mirrored engine group will be shared with this group
1014 		 * and additional engines will be reserved for exclusively use
1015 		 * by this engine group
1016 		 */
1017 		engs[i].count -= mirrored_engs->count;
1018 	}
1019 }
1020 
find_mirrored_eng_grp(struct otx_cpt_eng_grp_info * grp)1021 static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
1022 					struct otx_cpt_eng_grp_info *grp)
1023 {
1024 	struct otx_cpt_eng_grps *eng_grps = grp->g;
1025 	int i;
1026 
1027 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1028 		if (!eng_grps->grp[i].is_enabled)
1029 			continue;
1030 		if (eng_grps->grp[i].ucode[0].type)
1031 			continue;
1032 		if (grp->idx == i)
1033 			continue;
1034 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
1035 				 grp->ucode[0].ver_str,
1036 				 OTX_CPT_UCODE_VER_STR_SZ))
1037 			return &eng_grps->grp[i];
1038 	}
1039 
1040 	return NULL;
1041 }
1042 
find_unused_eng_grp(struct otx_cpt_eng_grps * eng_grps)1043 static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
1044 					struct otx_cpt_eng_grps *eng_grps)
1045 {
1046 	int i;
1047 
1048 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1049 		if (!eng_grps->grp[i].is_enabled)
1050 			return &eng_grps->grp[i];
1051 	}
1052 	return NULL;
1053 }
1054 
eng_grp_update_masks(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)1055 static int eng_grp_update_masks(struct device *dev,
1056 				struct otx_cpt_eng_grp_info *eng_grp)
1057 {
1058 	struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
1059 	struct otx_cpt_bitmap tmp_bmap = { {0} };
1060 	int i, j, cnt, max_cnt;
1061 	int bit;
1062 
1063 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1064 		engs = &eng_grp->engs[i];
1065 		if (!engs->type)
1066 			continue;
1067 		if (engs->count <= 0)
1068 			continue;
1069 
1070 		switch (engs->type) {
1071 		case OTX_CPT_SE_TYPES:
1072 			max_cnt = eng_grp->g->avail.max_se_cnt;
1073 			break;
1074 
1075 		case OTX_CPT_AE_TYPES:
1076 			max_cnt = eng_grp->g->avail.max_ae_cnt;
1077 			break;
1078 
1079 		default:
1080 			dev_err(dev, "Invalid engine type %d\n", engs->type);
1081 			return -EINVAL;
1082 		}
1083 
1084 		cnt = engs->count;
1085 		WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
1086 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
1087 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
1088 			if (!eng_grp->g->eng_ref_cnt[j]) {
1089 				bitmap_set(tmp_bmap.bits, j, 1);
1090 				cnt--;
1091 				if (!cnt)
1092 					break;
1093 			}
1094 		}
1095 
1096 		if (cnt)
1097 			return -ENOSPC;
1098 
1099 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
1100 	}
1101 
1102 	if (!eng_grp->mirror.is_ena)
1103 		return 0;
1104 
1105 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1106 		engs = &eng_grp->engs[i];
1107 		if (!engs->type)
1108 			continue;
1109 
1110 		mirrored_engs = find_engines_by_type(
1111 					&eng_grp->g->grp[eng_grp->mirror.idx],
1112 					engs->type);
1113 		WARN_ON(!mirrored_engs && engs->count <= 0);
1114 		if (!mirrored_engs)
1115 			continue;
1116 
1117 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
1118 			    eng_grp->g->engs_num);
1119 		if (engs->count < 0) {
1120 			bit = find_first_bit(mirrored_engs->bmap,
1121 					     eng_grp->g->engs_num);
1122 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
1123 		}
1124 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
1125 			  eng_grp->g->engs_num);
1126 	}
1127 	return 0;
1128 }
1129 
delete_engine_group(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)1130 static int delete_engine_group(struct device *dev,
1131 			       struct otx_cpt_eng_grp_info *eng_grp)
1132 {
1133 	int i, ret;
1134 
1135 	if (!eng_grp->is_enabled)
1136 		return -EINVAL;
1137 
1138 	if (eng_grp->mirror.ref_count) {
1139 		dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
1140 			eng_grp->idx);
1141 		for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1142 			if (eng_grp->g->grp[i].mirror.is_ena &&
1143 			    eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
1144 				pr_cont(" %d", i);
1145 		}
1146 		pr_cont("\n");
1147 		return -EINVAL;
1148 	}
1149 
1150 	/* Removing engine group mirroring if enabled */
1151 	remove_eng_grp_mirroring(eng_grp);
1152 
1153 	/* Disable engine group */
1154 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
1155 	if (ret)
1156 		return ret;
1157 
1158 	/* Release all engines held by this engine group */
1159 	ret = release_engines(dev, eng_grp);
1160 	if (ret)
1161 		return ret;
1162 
1163 	device_remove_file(dev, &eng_grp->info_attr);
1164 	eng_grp->is_enabled = false;
1165 
1166 	return 0;
1167 }
1168 
validate_1_ucode_scenario(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp,struct otx_cpt_engines * engs,int engs_cnt)1169 static int validate_1_ucode_scenario(struct device *dev,
1170 				     struct otx_cpt_eng_grp_info *eng_grp,
1171 				     struct otx_cpt_engines *engs, int engs_cnt)
1172 {
1173 	int i;
1174 
1175 	/* Verify that ucode loaded supports requested engine types */
1176 	for (i = 0; i < engs_cnt; i++) {
1177 		if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
1178 						  engs[i].type)) {
1179 			dev_err(dev,
1180 				"Microcode %s does not support %s engines\n",
1181 				eng_grp->ucode[0].filename,
1182 				get_eng_type_str(engs[i].type));
1183 			return -EINVAL;
1184 		}
1185 	}
1186 	return 0;
1187 }
1188 
update_ucode_ptrs(struct otx_cpt_eng_grp_info * eng_grp)1189 static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
1190 {
1191 	struct otx_cpt_ucode *ucode;
1192 
1193 	if (eng_grp->mirror.is_ena)
1194 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
1195 	else
1196 		ucode = &eng_grp->ucode[0];
1197 	WARN_ON(!eng_grp->engs[0].type);
1198 	eng_grp->engs[0].ucode = ucode;
1199 }
1200 
create_engine_group(struct device * dev,struct otx_cpt_eng_grps * eng_grps,struct otx_cpt_engines * engs,int engs_cnt,void * ucode_data[],int ucodes_cnt,bool use_uc_from_tar_arch)1201 static int create_engine_group(struct device *dev,
1202 			       struct otx_cpt_eng_grps *eng_grps,
1203 			       struct otx_cpt_engines *engs, int engs_cnt,
1204 			       void *ucode_data[], int ucodes_cnt,
1205 			       bool use_uc_from_tar_arch)
1206 {
1207 	struct otx_cpt_eng_grp_info *mirrored_eng_grp;
1208 	struct tar_ucode_info_t *tar_info;
1209 	struct otx_cpt_eng_grp_info *eng_grp;
1210 	int i, ret = 0;
1211 
1212 	if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
1213 		return -EINVAL;
1214 
1215 	/* Validate if requested engine types are supported by this device */
1216 	for (i = 0; i < engs_cnt; i++)
1217 		if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
1218 			dev_err(dev, "Device does not support %s engines\n",
1219 				get_eng_type_str(engs[i].type));
1220 			return -EPERM;
1221 		}
1222 
1223 	/* Find engine group which is not used */
1224 	eng_grp = find_unused_eng_grp(eng_grps);
1225 	if (!eng_grp) {
1226 		dev_err(dev, "Error all engine groups are being used\n");
1227 		return -ENOSPC;
1228 	}
1229 
1230 	/* Load ucode */
1231 	for (i = 0; i < ucodes_cnt; i++) {
1232 		if (use_uc_from_tar_arch) {
1233 			tar_info = (struct tar_ucode_info_t *) ucode_data[i];
1234 			eng_grp->ucode[i] = tar_info->ucode;
1235 			ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1236 						    tar_info->ucode_ptr);
1237 		} else
1238 			ret = ucode_load(dev, &eng_grp->ucode[i],
1239 					 (char *) ucode_data[i]);
1240 		if (ret)
1241 			goto err_ucode_unload;
1242 	}
1243 
1244 	/* Validate scenario where 1 ucode is used */
1245 	ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
1246 	if (ret)
1247 		goto err_ucode_unload;
1248 
1249 	/* Check if this group mirrors another existing engine group */
1250 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1251 	if (mirrored_eng_grp) {
1252 		/* Setup mirroring */
1253 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1254 
1255 		/*
1256 		 * Update count of requested engines because some
1257 		 * of them might be shared with mirrored group
1258 		 */
1259 		update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
1260 	}
1261 
1262 	/* Reserve engines */
1263 	ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
1264 	if (ret)
1265 		goto err_ucode_unload;
1266 
1267 	/* Update ucode pointers used by engines */
1268 	update_ucode_ptrs(eng_grp);
1269 
1270 	/* Update engine masks used by this group */
1271 	ret = eng_grp_update_masks(dev, eng_grp);
1272 	if (ret)
1273 		goto err_release_engs;
1274 
1275 	/* Create sysfs entry for engine group info */
1276 	ret = create_sysfs_eng_grps_info(dev, eng_grp);
1277 	if (ret)
1278 		goto err_release_engs;
1279 
1280 	/* Enable engine group */
1281 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1282 	if (ret)
1283 		goto err_release_engs;
1284 
1285 	/*
1286 	 * If this engine group mirrors another engine group
1287 	 * then we need to unload ucode as we will use ucode
1288 	 * from mirrored engine group
1289 	 */
1290 	if (eng_grp->mirror.is_ena)
1291 		ucode_unload(dev, &eng_grp->ucode[0]);
1292 
1293 	eng_grp->is_enabled = true;
1294 	if (eng_grp->mirror.is_ena)
1295 		dev_info(dev,
1296 			 "Engine_group%d: reuse microcode %s from group %d\n",
1297 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1298 			 mirrored_eng_grp->idx);
1299 	else
1300 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1301 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1302 
1303 	return 0;
1304 
1305 err_release_engs:
1306 	release_engines(dev, eng_grp);
1307 err_ucode_unload:
1308 	ucode_unload(dev, &eng_grp->ucode[0]);
1309 	return ret;
1310 }
1311 
ucode_load_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1312 static ssize_t ucode_load_store(struct device *dev,
1313 				struct device_attribute *attr,
1314 				const char *buf, size_t count)
1315 {
1316 	struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1317 	char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
1318 	char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
1319 	char *start, *val, *err_msg, *tmp;
1320 	struct otx_cpt_eng_grps *eng_grps;
1321 	int grp_idx = 0, ret = -EINVAL;
1322 	bool has_se, has_ie, has_ae;
1323 	int del_grp_idx = -1;
1324 	int ucode_idx = 0;
1325 
1326 	if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
1327 		return -EINVAL;
1328 
1329 	eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
1330 	err_msg = "Invalid engine group format";
1331 	strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
1332 	start = tmp_buf;
1333 
1334 	has_se = has_ie = has_ae = false;
1335 
1336 	for (;;) {
1337 		val = strsep(&start, ";");
1338 		if (!val)
1339 			break;
1340 		val = strim(val);
1341 		if (!*val)
1342 			continue;
1343 
1344 		if (!strncasecmp(val, "engine_group", 12)) {
1345 			if (del_grp_idx != -1)
1346 				goto err_print;
1347 			tmp = strim(strsep(&val, ":"));
1348 			if (!val)
1349 				goto err_print;
1350 			if (strlen(tmp) != 13)
1351 				goto err_print;
1352 			if (kstrtoint((tmp + 12), 10, &del_grp_idx))
1353 				goto err_print;
1354 			val = strim(val);
1355 			if (strncasecmp(val, "null", 4))
1356 				goto err_print;
1357 			if (strlen(val) != 4)
1358 				goto err_print;
1359 		} else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1360 			if (has_se || ucode_idx)
1361 				goto err_print;
1362 			tmp = strim(strsep(&val, ":"));
1363 			if (!val)
1364 				goto err_print;
1365 			if (strlen(tmp) != 2)
1366 				goto err_print;
1367 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1368 				goto err_print;
1369 			engs[grp_idx++].type = OTX_CPT_SE_TYPES;
1370 			has_se = true;
1371 		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1372 			if (has_ae || ucode_idx)
1373 				goto err_print;
1374 			tmp = strim(strsep(&val, ":"));
1375 			if (!val)
1376 				goto err_print;
1377 			if (strlen(tmp) != 2)
1378 				goto err_print;
1379 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1380 				goto err_print;
1381 			engs[grp_idx++].type = OTX_CPT_AE_TYPES;
1382 			has_ae = true;
1383 		} else {
1384 			if (ucode_idx > 1)
1385 				goto err_print;
1386 			if (!strlen(val))
1387 				goto err_print;
1388 			if (strnstr(val, " ", strlen(val)))
1389 				goto err_print;
1390 			ucode_filename[ucode_idx++] = val;
1391 		}
1392 	}
1393 
1394 	/* Validate input parameters */
1395 	if (del_grp_idx == -1) {
1396 		if (!(grp_idx && ucode_idx))
1397 			goto err_print;
1398 
1399 		if (ucode_idx > 1 && grp_idx < 2)
1400 			goto err_print;
1401 
1402 		if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
1403 			err_msg = "Error max 2 engine types can be attached";
1404 			goto err_print;
1405 		}
1406 
1407 	} else {
1408 		if (del_grp_idx < 0 ||
1409 		    del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
1410 			dev_err(dev, "Invalid engine group index %d\n",
1411 				del_grp_idx);
1412 			ret = -EINVAL;
1413 			return ret;
1414 		}
1415 
1416 		if (!eng_grps->grp[del_grp_idx].is_enabled) {
1417 			dev_err(dev, "Error engine_group%d is not configured\n",
1418 				del_grp_idx);
1419 			ret = -EINVAL;
1420 			return ret;
1421 		}
1422 
1423 		if (grp_idx || ucode_idx)
1424 			goto err_print;
1425 	}
1426 
1427 	mutex_lock(&eng_grps->lock);
1428 
1429 	if (eng_grps->is_rdonly) {
1430 		dev_err(dev, "Disable VFs before modifying engine groups\n");
1431 		ret = -EACCES;
1432 		goto err_unlock;
1433 	}
1434 
1435 	if (del_grp_idx == -1)
1436 		/* create engine group */
1437 		ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1438 					  (void **) ucode_filename,
1439 					  ucode_idx, false);
1440 	else
1441 		/* delete engine group */
1442 		ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
1443 	if (ret)
1444 		goto err_unlock;
1445 
1446 	print_dbg_info(dev, eng_grps);
1447 err_unlock:
1448 	mutex_unlock(&eng_grps->lock);
1449 	return ret ? ret : count;
1450 err_print:
1451 	dev_err(dev, "%s\n", err_msg);
1452 
1453 	return ret;
1454 }
1455 
otx_cpt_try_create_default_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps,int pf_type)1456 int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
1457 					struct otx_cpt_eng_grps *eng_grps,
1458 					int pf_type)
1459 {
1460 	struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1461 	struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1462 	struct tar_arch_info_t *tar_arch = NULL;
1463 	char *tar_filename;
1464 	int i, ret = 0;
1465 
1466 	mutex_lock(&eng_grps->lock);
1467 
1468 	/*
1469 	 * We don't create engine group for kernel crypto if attempt to create
1470 	 * it was already made (when user enabled VFs for the first time)
1471 	 */
1472 	if (eng_grps->is_first_try)
1473 		goto unlock_mutex;
1474 	eng_grps->is_first_try = true;
1475 
1476 	/* We create group for kcrypto only if no groups are configured */
1477 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1478 		if (eng_grps->grp[i].is_enabled)
1479 			goto unlock_mutex;
1480 
1481 	switch (pf_type) {
1482 	case OTX_CPT_AE:
1483 	case OTX_CPT_SE:
1484 		tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
1485 		break;
1486 
1487 	default:
1488 		dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1489 		ret = -EINVAL;
1490 		goto unlock_mutex;
1491 	}
1492 
1493 	tar_arch = load_tar_archive(&pdev->dev, tar_filename);
1494 	if (!tar_arch)
1495 		goto unlock_mutex;
1496 
1497 	/*
1498 	 * If device supports SE engines and there is SE microcode in tar
1499 	 * archive try to create engine group with SE engines for kernel
1500 	 * crypto functionality (symmetric crypto)
1501 	 */
1502 	tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
1503 	if (tar_info[0] &&
1504 	    dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
1505 
1506 		engs[0].type = OTX_CPT_SE_TYPES;
1507 		engs[0].count = eng_grps->avail.max_se_cnt;
1508 
1509 		ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1510 					  (void **) tar_info, 1, true);
1511 		if (ret)
1512 			goto release_tar_arch;
1513 	}
1514 	/*
1515 	 * If device supports AE engines and there is AE microcode in tar
1516 	 * archive try to create engine group with AE engines for asymmetric
1517 	 * crypto functionality.
1518 	 */
1519 	tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
1520 	if (tar_info[0] &&
1521 	    dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
1522 
1523 		engs[0].type = OTX_CPT_AE_TYPES;
1524 		engs[0].count = eng_grps->avail.max_ae_cnt;
1525 
1526 		ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1527 					  (void **) tar_info, 1, true);
1528 		if (ret)
1529 			goto release_tar_arch;
1530 	}
1531 
1532 	print_dbg_info(&pdev->dev, eng_grps);
1533 release_tar_arch:
1534 	release_tar_archive(tar_arch);
1535 unlock_mutex:
1536 	mutex_unlock(&eng_grps->lock);
1537 	return ret;
1538 }
1539 
otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps * eng_grps,bool is_rdonly)1540 void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
1541 				    bool is_rdonly)
1542 {
1543 	mutex_lock(&eng_grps->lock);
1544 
1545 	eng_grps->is_rdonly = is_rdonly;
1546 
1547 	mutex_unlock(&eng_grps->lock);
1548 }
1549 
otx_cpt_disable_all_cores(struct otx_cpt_device * cpt)1550 void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
1551 {
1552 	int grp, timeout = 100;
1553 	u64 reg;
1554 
1555 	/* Disengage the cores from groups */
1556 	for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
1557 		writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
1558 		udelay(CSR_DELAY);
1559 	}
1560 
1561 	reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1562 	while (reg) {
1563 		udelay(CSR_DELAY);
1564 		reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1565 		if (timeout--) {
1566 			dev_warn(&cpt->pdev->dev, "Cores still busy\n");
1567 			break;
1568 		}
1569 	}
1570 
1571 	/* Disable the cores */
1572 	writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
1573 }
1574 
otx_cpt_cleanup_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps)1575 void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1576 			      struct otx_cpt_eng_grps *eng_grps)
1577 {
1578 	struct otx_cpt_eng_grp_info *grp;
1579 	int i, j;
1580 
1581 	mutex_lock(&eng_grps->lock);
1582 	if (eng_grps->is_ucode_load_created) {
1583 		device_remove_file(&pdev->dev,
1584 				   &eng_grps->ucode_load_attr);
1585 		eng_grps->is_ucode_load_created = false;
1586 	}
1587 
1588 	/* First delete all mirroring engine groups */
1589 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1590 		if (eng_grps->grp[i].mirror.is_ena)
1591 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1592 
1593 	/* Delete remaining engine groups */
1594 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1595 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1596 
1597 	/* Release memory */
1598 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1599 		grp = &eng_grps->grp[i];
1600 		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1601 			kfree(grp->engs[j].bmap);
1602 			grp->engs[j].bmap = NULL;
1603 		}
1604 	}
1605 
1606 	mutex_unlock(&eng_grps->lock);
1607 }
1608 
otx_cpt_init_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps,int pf_type)1609 int otx_cpt_init_eng_grps(struct pci_dev *pdev,
1610 			  struct otx_cpt_eng_grps *eng_grps, int pf_type)
1611 {
1612 	struct otx_cpt_eng_grp_info *grp;
1613 	int i, j, ret = 0;
1614 
1615 	mutex_init(&eng_grps->lock);
1616 	eng_grps->obj = pci_get_drvdata(pdev);
1617 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1618 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1619 
1620 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1621 			     eng_grps->avail.max_ae_cnt;
1622 	if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
1623 		dev_err(&pdev->dev,
1624 			"Number of engines %d > than max supported %d\n",
1625 			eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
1626 		ret = -EINVAL;
1627 		goto err;
1628 	}
1629 
1630 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1631 		grp = &eng_grps->grp[i];
1632 		grp->g = eng_grps;
1633 		grp->idx = i;
1634 
1635 		snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
1636 			 "engine_group%d", i);
1637 		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1638 			grp->engs[j].bmap =
1639 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1640 					sizeof(long), GFP_KERNEL);
1641 			if (!grp->engs[j].bmap) {
1642 				ret = -ENOMEM;
1643 				goto err;
1644 			}
1645 		}
1646 	}
1647 
1648 	switch (pf_type) {
1649 	case OTX_CPT_SE:
1650 		/* OcteonTX 83XX SE CPT PF has only SE engines attached */
1651 		eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
1652 		break;
1653 
1654 	case OTX_CPT_AE:
1655 		/* OcteonTX 83XX AE CPT PF has only AE engines attached */
1656 		eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
1657 		break;
1658 
1659 	default:
1660 		dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1661 		ret = -EINVAL;
1662 		goto err;
1663 	}
1664 
1665 	eng_grps->ucode_load_attr.show = NULL;
1666 	eng_grps->ucode_load_attr.store = ucode_load_store;
1667 	eng_grps->ucode_load_attr.attr.name = "ucode_load";
1668 	eng_grps->ucode_load_attr.attr.mode = 0220;
1669 	sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
1670 	ret = device_create_file(&pdev->dev,
1671 				 &eng_grps->ucode_load_attr);
1672 	if (ret)
1673 		goto err;
1674 	eng_grps->is_ucode_load_created = true;
1675 
1676 	print_dbg_info(&pdev->dev, eng_grps);
1677 	return ret;
1678 err:
1679 	otx_cpt_cleanup_eng_grps(pdev, eng_grps);
1680 	return ret;
1681 }
1682