1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2020 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
11	"Error due to un-priv read",
12	"Error due to un-secure read",
13	"Error due to read from unmapped reg",
14	"Error due to un-priv write",
15	"Error due to un-secure write",
16	"Error due to write to unmapped reg",
17	"External I/F write sec violation",
18	"External I/F write to un-mapped reg",
19	"Read to write only",
20	"Write to read only"
21};
22
23/**
24 * hl_get_pb_block - return the relevant block within the block array
25 *
26 * @hdev: pointer to hl_device structure
27 * @mm_reg_addr: register address in the desired block
28 * @pb_blocks: blocks array
29 * @array_size: blocks array size
30 *
31 */
32static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
33		const u32 pb_blocks[], int array_size)
34{
35	int i;
36	u32 start_addr, end_addr;
37
38	for (i = 0 ; i < array_size ; i++) {
39		start_addr = pb_blocks[i];
40		end_addr = start_addr + HL_BLOCK_SIZE;
41
42		if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
43			return i;
44	}
45
46	dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
47			mm_reg_addr);
48	return -EDOM;
49}
50
51/**
52 * hl_unset_pb_in_block - clear a specific protection bit in a block
53 *
54 * @hdev: pointer to hl_device structure
55 * @reg_offset: register offset will be converted to bit offset in pb block
56 * @sgs_entry: pb array
57 *
58 */
59static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
60				struct hl_block_glbl_sec *sgs_entry)
61{
62	if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
63		dev_err(hdev->dev,
64			"Register offset(%d) is out of range(%d) or invalid\n",
65			reg_offset, HL_BLOCK_SIZE);
66		return -EINVAL;
67	}
68
69	UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
70			 (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
71
72	return 0;
73}
74
75/**
76 * hl_unsecure_register - locate the relevant block for this register and
77 *                        remove corresponding protection bit
78 *
79 * @hdev: pointer to hl_device structure
80 * @mm_reg_addr: register address to unsecure
81 * @offset: additional offset to the register address
82 * @pb_blocks: blocks array
83 * @sgs_array: pb array
84 * @array_size: blocks array size
85 *
86 */
87int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
88		const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
89		int array_size)
90{
91	u32 reg_offset;
92	int block_num;
93
94	block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
95			array_size);
96	if (block_num < 0)
97		return block_num;
98
99	reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
100
101	return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
102}
103
104/**
105 * hl_unsecure_register_range - locate the relevant block for this register
106 *                              range and remove corresponding protection bit
107 *
108 * @hdev: pointer to hl_device structure
109 * @mm_reg_range: register address range to unsecure
110 * @offset: additional offset to the register address
111 * @pb_blocks: blocks array
112 * @sgs_array: pb array
113 * @array_size: blocks array size
114 *
115 */
116static int hl_unsecure_register_range(struct hl_device *hdev,
117		struct range mm_reg_range, int offset, const u32 pb_blocks[],
118		struct hl_block_glbl_sec sgs_array[],
119		int array_size)
120{
121	u32 reg_offset;
122	int i, block_num, rc = 0;
123
124	block_num = hl_get_pb_block(hdev,
125			mm_reg_range.start + offset, pb_blocks,
126			array_size);
127	if (block_num < 0)
128		return block_num;
129
130	for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
131		reg_offset = (i + offset) - pb_blocks[block_num];
132		rc |= hl_unset_pb_in_block(hdev, reg_offset,
133					&sgs_array[block_num]);
134	}
135
136	return rc;
137}
138
139/**
140 * hl_unsecure_registers - locate the relevant block for all registers and
141 *                        remove corresponding protection bit
142 *
143 * @hdev: pointer to hl_device structure
144 * @mm_reg_array: register address array to unsecure
145 * @mm_array_size: register array size
146 * @offset: additional offset to the register address
147 * @pb_blocks: blocks array
148 * @sgs_array: pb array
149 * @blocks_array_size: blocks array size
150 *
151 */
152int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
153		int mm_array_size, int offset, const u32 pb_blocks[],
154		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
155{
156	int i, rc = 0;
157
158	for (i = 0 ; i < mm_array_size ; i++) {
159		rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
160				pb_blocks, sgs_array, blocks_array_size);
161
162		if (rc)
163			return rc;
164	}
165
166	return rc;
167}
168
169/**
170 * hl_unsecure_registers_range - locate the relevant block for all register
171 *                        ranges and remove corresponding protection bit
172 *
173 * @hdev: pointer to hl_device structure
174 * @mm_reg_range_array: register address range array to unsecure
175 * @mm_array_size: register array size
176 * @offset: additional offset to the register address
177 * @pb_blocks: blocks array
178 * @sgs_array: pb array
179 * @blocks_array_size: blocks array size
180 *
181 */
182static int hl_unsecure_registers_range(struct hl_device *hdev,
183		const struct range mm_reg_range_array[], int mm_array_size,
184		int offset, const u32 pb_blocks[],
185		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
186{
187	int i, rc = 0;
188
189	for (i = 0 ; i < mm_array_size ; i++) {
190		rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
191			offset, pb_blocks, sgs_array, blocks_array_size);
192
193		if (rc)
194			return rc;
195	}
196
197	return rc;
198}
199
200/**
201 * hl_ack_pb_security_violations - Ack security violation
202 *
203 * @hdev: pointer to hl_device structure
204 * @pb_blocks: blocks array
205 * @block_offset: additional offset to the block
206 * @array_size: blocks array size
207 *
208 */
209static void hl_ack_pb_security_violations(struct hl_device *hdev,
210		const u32 pb_blocks[], u32 block_offset, int array_size)
211{
212	int i;
213	u32 cause, addr, block_base;
214
215	for (i = 0 ; i < array_size ; i++) {
216		block_base = pb_blocks[i] + block_offset;
217		cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
218		if (cause) {
219			addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
220			hdev->asic_funcs->pb_print_security_errors(hdev,
221					block_base, cause, addr);
222			WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
223		}
224	}
225}
226
227/**
228 * hl_config_glbl_sec - set pb in HW according to given pb array
229 *
230 * @hdev: pointer to hl_device structure
231 * @pb_blocks: blocks array
232 * @sgs_array: pb array
233 * @block_offset: additional offset to the block
234 * @array_size: blocks array size
235 *
236 */
237void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
238		struct hl_block_glbl_sec sgs_array[], u32 block_offset,
239		int array_size)
240{
241	int i, j;
242	u32 sgs_base;
243
244	if (hdev->pldm)
245		usleep_range(100, 1000);
246
247	for (i = 0 ; i < array_size ; i++) {
248		sgs_base = block_offset + pb_blocks[i] +
249				HL_BLOCK_GLBL_SEC_OFFS;
250
251		for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
252			WREG32(sgs_base + j * sizeof(u32),
253				sgs_array[i].sec_array[j]);
254	}
255}
256
257/**
258 * hl_secure_block - locally memsets a block to 0
259 *
260 * @hdev: pointer to hl_device structure
261 * @sgs_array: pb array to clear
262 * @array_size: blocks array size
263 *
264 */
265void hl_secure_block(struct hl_device *hdev,
266		struct hl_block_glbl_sec sgs_array[], int array_size)
267{
268	int i;
269
270	for (i = 0 ; i < array_size ; i++)
271		memset((char *)(sgs_array[i].sec_array), 0,
272			HL_BLOCK_GLBL_SEC_SIZE);
273}
274
275/**
276 * hl_init_pb_with_mask - set selected pb instances with mask in HW according
277 *                        to given configuration
278 *
279 * @hdev: pointer to hl_device structure
280 * @num_dcores: number of decores to apply configuration to
281 *              set to HL_PB_SHARED if need to apply only once
282 * @dcore_offset: offset between dcores
283 * @num_instances: number of instances to apply configuration to
284 * @instance_offset: offset between instances
285 * @pb_blocks: blocks array
286 * @blocks_array_size: blocks array size
287 * @user_regs_array: unsecured register array
288 * @user_regs_array_size: unsecured register array size
289 * @mask: enabled instances mask: 1- enabled, 0- disabled
290 */
291int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
292		u32 dcore_offset, u32 num_instances, u32 instance_offset,
293		const u32 pb_blocks[], u32 blocks_array_size,
294		const u32 *user_regs_array, u32 user_regs_array_size, u64 mask)
295{
296	int i, j;
297	struct hl_block_glbl_sec *glbl_sec;
298
299	glbl_sec = kcalloc(blocks_array_size,
300			sizeof(struct hl_block_glbl_sec),
301			GFP_KERNEL);
302	if (!glbl_sec)
303		return -ENOMEM;
304
305	hl_secure_block(hdev, glbl_sec, blocks_array_size);
306	hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size, 0,
307			pb_blocks, glbl_sec, blocks_array_size);
308
309	/* Fill all blocks with the same configuration */
310	for (i = 0 ; i < num_dcores ; i++) {
311		for (j = 0 ; j < num_instances ; j++) {
312			int seq = i * num_instances + j;
313
314			if (!(mask & BIT_ULL(seq)))
315				continue;
316
317			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
318					i * dcore_offset + j * instance_offset,
319					blocks_array_size);
320		}
321	}
322
323	kfree(glbl_sec);
324
325	return 0;
326}
327
328/**
329 * hl_init_pb - set pb in HW according to given configuration
330 *
331 * @hdev: pointer to hl_device structure
332 * @num_dcores: number of decores to apply configuration to
333 *              set to HL_PB_SHARED if need to apply only once
334 * @dcore_offset: offset between dcores
335 * @num_instances: number of instances to apply configuration to
336 * @instance_offset: offset between instances
337 * @pb_blocks: blocks array
338 * @blocks_array_size: blocks array size
339 * @user_regs_array: unsecured register array
340 * @user_regs_array_size: unsecured register array size
341 *
342 */
343int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
344		u32 num_instances, u32 instance_offset,
345		const u32 pb_blocks[], u32 blocks_array_size,
346		const u32 *user_regs_array, u32 user_regs_array_size)
347{
348	return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
349			num_instances, instance_offset, pb_blocks,
350			blocks_array_size, user_regs_array,
351			user_regs_array_size, ULLONG_MAX);
352}
353
354/**
355 * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
356 *                               given configuration unsecurring registers
357 *                               ranges instead of specific registers
358 *
359 * @hdev: pointer to hl_device structure
360 * @num_dcores: number of decores to apply configuration to
361 *              set to HL_PB_SHARED if need to apply only once
362 * @dcore_offset: offset between dcores
363 * @num_instances: number of instances to apply configuration to
364 * @instance_offset: offset between instances
365 * @pb_blocks: blocks array
366 * @blocks_array_size: blocks array size
367 * @user_regs_range_array: unsecured register range array
368 * @user_regs_range_array_size: unsecured register range array size
369 * @mask: enabled instances mask: 1- enabled, 0- disabled
370 */
371int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
372		u32 dcore_offset, u32 num_instances, u32 instance_offset,
373		const u32 pb_blocks[], u32 blocks_array_size,
374		const struct range *user_regs_range_array,
375		u32 user_regs_range_array_size, u64 mask)
376{
377	int i, j, rc = 0;
378	struct hl_block_glbl_sec *glbl_sec;
379
380	glbl_sec = kcalloc(blocks_array_size,
381			sizeof(struct hl_block_glbl_sec),
382			GFP_KERNEL);
383	if (!glbl_sec)
384		return -ENOMEM;
385
386	hl_secure_block(hdev, glbl_sec, blocks_array_size);
387	rc = hl_unsecure_registers_range(hdev, user_regs_range_array,
388			user_regs_range_array_size, 0, pb_blocks, glbl_sec,
389			blocks_array_size);
390	if (rc)
391		goto free_glbl_sec;
392
393	/* Fill all blocks with the same configuration */
394	for (i = 0 ; i < num_dcores ; i++) {
395		for (j = 0 ; j < num_instances ; j++) {
396			int seq = i * num_instances + j;
397
398			if (!(mask & BIT_ULL(seq)))
399				continue;
400
401			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
402					i * dcore_offset + j * instance_offset,
403					blocks_array_size);
404		}
405	}
406
407free_glbl_sec:
408	kfree(glbl_sec);
409
410	return rc;
411}
412
413/**
414 * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
415 *                     registers ranges instead of specific registers
416 *
417 * @hdev: pointer to hl_device structure
418 * @num_dcores: number of decores to apply configuration to
419 *              set to HL_PB_SHARED if need to apply only once
420 * @dcore_offset: offset between dcores
421 * @num_instances: number of instances to apply configuration to
422 * @instance_offset: offset between instances
423 * @pb_blocks: blocks array
424 * @blocks_array_size: blocks array size
425 * @user_regs_range_array: unsecured register range array
426 * @user_regs_range_array_size: unsecured register range array size
427 *
428 */
429int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
430		u32 dcore_offset, u32 num_instances, u32 instance_offset,
431		const u32 pb_blocks[], u32 blocks_array_size,
432		const struct range *user_regs_range_array,
433		u32 user_regs_range_array_size)
434{
435	return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
436			num_instances, instance_offset, pb_blocks,
437			blocks_array_size, user_regs_range_array,
438			user_regs_range_array_size, ULLONG_MAX);
439}
440
441/**
442 * hl_init_pb_single_dcore - set pb for a single docre in HW
443 * according to given configuration
444 *
445 * @hdev: pointer to hl_device structure
446 * @dcore_offset: offset from the dcore0
447 * @num_instances: number of instances to apply configuration to
448 * @instance_offset: offset between instances
449 * @pb_blocks: blocks array
450 * @blocks_array_size: blocks array size
451 * @user_regs_array: unsecured register array
452 * @user_regs_array_size: unsecured register array size
453 *
454 */
455int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
456		u32 num_instances, u32 instance_offset,
457		const u32 pb_blocks[], u32 blocks_array_size,
458		const u32 *user_regs_array, u32 user_regs_array_size)
459{
460	int i, rc = 0;
461	struct hl_block_glbl_sec *glbl_sec;
462
463	glbl_sec = kcalloc(blocks_array_size,
464			sizeof(struct hl_block_glbl_sec),
465			GFP_KERNEL);
466	if (!glbl_sec)
467		return -ENOMEM;
468
469	hl_secure_block(hdev, glbl_sec, blocks_array_size);
470	rc = hl_unsecure_registers(hdev, user_regs_array, user_regs_array_size,
471			0, pb_blocks, glbl_sec, blocks_array_size);
472	if (rc)
473		goto free_glbl_sec;
474
475	/* Fill all blocks with the same configuration */
476	for (i = 0 ; i < num_instances ; i++)
477		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
478				dcore_offset + i * instance_offset,
479				blocks_array_size);
480
481free_glbl_sec:
482	kfree(glbl_sec);
483
484	return rc;
485}
486
487/**
488 * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
489 *                                  to given configuration unsecurring
490 *                                  registers ranges instead of specific
491 *                                  registers
492 *
493 * @hdev: pointer to hl_device structure
494 * @dcore_offset: offset from the dcore0
495 * @num_instances: number of instances to apply configuration to
496 * @instance_offset: offset between instances
497 * @pb_blocks: blocks array
498 * @blocks_array_size: blocks array size
499 * @user_regs_range_array: unsecured register range array
500 * @user_regs_range_array_size: unsecured register range array size
501 *
502 */
503int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
504		u32 num_instances, u32 instance_offset,
505		const u32 pb_blocks[], u32 blocks_array_size,
506		const struct range *user_regs_range_array, u32 user_regs_range_array_size)
507{
508	int i;
509	struct hl_block_glbl_sec *glbl_sec;
510
511	glbl_sec = kcalloc(blocks_array_size,
512			sizeof(struct hl_block_glbl_sec),
513			GFP_KERNEL);
514	if (!glbl_sec)
515		return -ENOMEM;
516
517	hl_secure_block(hdev, glbl_sec, blocks_array_size);
518	hl_unsecure_registers_range(hdev, user_regs_range_array,
519			user_regs_range_array_size, 0, pb_blocks, glbl_sec,
520			blocks_array_size);
521
522	/* Fill all blocks with the same configuration */
523	for (i = 0 ; i < num_instances ; i++)
524		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
525				dcore_offset + i * instance_offset,
526				blocks_array_size);
527
528	kfree(glbl_sec);
529
530	return 0;
531}
532
533/**
534 * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
535 *
536 * @hdev: pointer to hl_device structure
537 * @num_dcores: number of decores to apply configuration to
538 *              set to HL_PB_SHARED if need to apply only once
539 * @dcore_offset: offset between dcores
540 * @num_instances: number of instances to apply configuration to
541 * @instance_offset: offset between instances
542 * @pb_blocks: blocks array
543 * @blocks_array_size: blocks array size
544 * @mask: enabled instances mask: 1- enabled, 0- disabled
545 *
546 */
547void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
548		u32 dcore_offset, u32 num_instances, u32 instance_offset,
549		const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
550{
551	int i, j;
552
553	/* ack all blocks */
554	for (i = 0 ; i < num_dcores ; i++) {
555		for (j = 0 ; j < num_instances ; j++) {
556			int seq = i * num_instances + j;
557
558			if (!(mask & BIT_ULL(seq)))
559				continue;
560
561			hl_ack_pb_security_violations(hdev, pb_blocks,
562					i * dcore_offset + j * instance_offset,
563					blocks_array_size);
564		}
565	}
566}
567
568/**
569 * hl_ack_pb - ack pb in HW according to given configuration
570 *
571 * @hdev: pointer to hl_device structure
572 * @num_dcores: number of decores to apply configuration to
573 *              set to HL_PB_SHARED if need to apply only once
574 * @dcore_offset: offset between dcores
575 * @num_instances: number of instances to apply configuration to
576 * @instance_offset: offset between instances
577 * @pb_blocks: blocks array
578 * @blocks_array_size: blocks array size
579 *
580 */
581void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
582		u32 num_instances, u32 instance_offset,
583		const u32 pb_blocks[], u32 blocks_array_size)
584{
585	hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
586			instance_offset, pb_blocks, blocks_array_size,
587			ULLONG_MAX);
588}
589
590/**
591 * hl_ack_pb_single_dcore - ack pb for single docre in HW
592 * according to given configuration
593 *
594 * @hdev: pointer to hl_device structure
595 * @dcore_offset: offset from dcore0
596 * @num_instances: number of instances to apply configuration to
597 * @instance_offset: offset between instances
598 * @pb_blocks: blocks array
599 * @blocks_array_size: blocks array size
600 *
601 */
602void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
603		u32 num_instances, u32 instance_offset,
604		const u32 pb_blocks[], u32 blocks_array_size)
605{
606	int i;
607
608	/* ack all blocks */
609	for (i = 0 ; i < num_instances ; i++)
610		hl_ack_pb_security_violations(hdev, pb_blocks,
611				dcore_offset + i * instance_offset,
612				blocks_array_size);
613
614}
615
616static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
617		struct hl_special_block_info *block_info,
618		u32 major, u32 minor, u32 sub_minor)
619{
620	u32 fw_block_base_address = block_info->base_addr +
621			major * block_info->major_offset +
622			minor * block_info->minor_offset +
623			sub_minor * block_info->sub_minor_offset;
624	struct asic_fixed_properties *prop = &hdev->asic_prop;
625
626	/* Calculation above returns an address for FW use, and therefore should
627	 * be casted for driver use.
628	 */
629	return (fw_block_base_address - lower_32_bits(prop->cfg_base_address));
630}
631
632static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg,
633		int block_type)
634{
635	int i;
636
637	/* Check if block type is listed in the exclusion list of block types */
638	for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++)
639		if (block_type == skip_blocks_cfg->block_types[i])
640			return true;
641
642	return false;
643}
644
645static bool hl_check_block_range_exclusion(struct hl_device *hdev,
646		struct hl_skip_blocks_cfg *skip_blocks_cfg,
647		struct hl_special_block_info *block_info,
648		u32 major, u32 minor, u32 sub_minor)
649{
650	u32 blocks_in_range, block_base_addr_in_range, block_base_addr;
651	int i, j;
652
653	block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
654			major, minor, sub_minor);
655
656	for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) {
657		blocks_in_range = (skip_blocks_cfg->block_ranges[i].end -
658				skip_blocks_cfg->block_ranges[i].start) /
659				HL_BLOCK_SIZE + 1;
660		for (j = 0 ; j < blocks_in_range ; j++) {
661			block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start +
662					j * HL_BLOCK_SIZE;
663			if (block_base_addr == block_base_addr_in_range)
664				return true;
665		}
666	}
667
668	return false;
669}
670
671static int hl_read_glbl_errors(struct hl_device *hdev,
672		u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
673{
674	struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
675	struct hl_special_block_info *current_block = &special_blocks[blk_idx];
676	u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
677		base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
678	int i;
679
680	block_base = base + major * current_block->major_offset +
681			minor * current_block->minor_offset +
682			sub_minor * current_block->sub_minor_offset;
683
684	glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET;
685	cause_val = RREG32(glbl_err_cause);
686	if (!cause_val)
687		return 0;
688
689	glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
690	addr_val = RREG32(glbl_err_addr);
691
692	for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
693		if (cause_val & BIT(i))
694			dev_err_ratelimited(hdev->dev,
695				"%s, addr %#llx\n",
696				hl_glbl_error_cause[i],
697				hdev->asic_prop.cfg_base_address + block_base +
698				FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
699	}
700
701	WREG32(glbl_err_cause, cause_val);
702
703	return 0;
704}
705
706void hl_check_for_glbl_errors(struct hl_device *hdev)
707{
708	struct asic_fixed_properties *prop = &hdev->asic_prop;
709	struct hl_special_blocks_cfg special_blocks_cfg;
710	struct iterate_special_ctx glbl_err_iter;
711	int rc;
712
713	memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg));
714	special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg;
715
716	glbl_err_iter.fn = &hl_read_glbl_errors;
717	glbl_err_iter.data = &special_blocks_cfg;
718
719	rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
720	if (rc)
721		dev_err_ratelimited(hdev->dev,
722			"Could not iterate special blocks, glbl error check failed\n");
723}
724
725int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
726{
727	struct hl_special_blocks_cfg *special_blocks_cfg =
728			(struct hl_special_blocks_cfg *)ctx->data;
729	struct hl_skip_blocks_cfg *skip_blocks_cfg =
730			special_blocks_cfg->skip_blocks_cfg;
731	u32 major, minor, sub_minor, blk_idx, num_blocks;
732	struct hl_special_block_info *block_info_arr;
733	int rc;
734
735	block_info_arr = hdev->asic_prop.special_blocks;
736	if (!block_info_arr)
737		return -EINVAL;
738
739	num_blocks = hdev->asic_prop.num_of_special_blocks;
740
741	for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) {
742		if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type))
743			continue;
744
745		for (major = 0 ; major < block_info_arr->major ; major++) {
746			minor = 0;
747			do {
748				sub_minor = 0;
749				do {
750					if ((hl_check_block_range_exclusion(hdev,
751							skip_blocks_cfg, block_info_arr,
752							major, minor, sub_minor)) ||
753						(skip_blocks_cfg->skip_block_hook &&
754						skip_blocks_cfg->skip_block_hook(hdev,
755							special_blocks_cfg,
756							blk_idx, major, minor, sub_minor))) {
757						sub_minor++;
758						continue;
759					}
760
761					rc = ctx->fn(hdev, blk_idx, major, minor,
762								sub_minor, ctx->data);
763					if (rc)
764						return rc;
765
766					sub_minor++;
767				} while (sub_minor < block_info_arr->sub_minor);
768
769				minor++;
770			} while (minor < block_info_arr->minor);
771		}
772	}
773
774	return 0;
775}
776