1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2022 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9#include "../include/common/hl_boot_if.h"
10
11#include <linux/firmware.h>
12#include <linux/crc32.h>
13#include <linux/slab.h>
14#include <linux/ctype.h>
15#include <linux/vmalloc.h>
16
17#include <trace/events/habanalabs.h>
18
19#define FW_FILE_MAX_SIZE		0x1400000 /* maximum size of 20MB */
20
21static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = {
22	[COMMS_NOOP] = __stringify(COMMS_NOOP),
23	[COMMS_CLR_STS] = __stringify(COMMS_CLR_STS),
24	[COMMS_RST_STATE] = __stringify(COMMS_RST_STATE),
25	[COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC),
26	[COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY),
27	[COMMS_EXEC] = __stringify(COMMS_EXEC),
28	[COMMS_RST_DEV] = __stringify(COMMS_RST_DEV),
29	[COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE),
30	[COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC),
31	[COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI),
32};
33
34static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
35	[COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP),
36	[COMMS_STS_ACK] = __stringify(COMMS_STS_ACK),
37	[COMMS_STS_OK] = __stringify(COMMS_STS_OK),
38	[COMMS_STS_ERR] = __stringify(COMMS_STS_ERR),
39	[COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR),
40	[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
41};
42
43static char *extract_fw_ver_from_str(const char *fw_str)
44{
45	char *str, *fw_ver, *whitespace;
46	u32 ver_offset;
47
48	fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
49	if (!fw_ver)
50		return NULL;
51
52	str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
53	if (!str)
54		goto free_fw_ver;
55
56	/* Skip the fw- part */
57	str += 3;
58	ver_offset = str - fw_str;
59
60	/* Copy until the next whitespace */
61	whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
62	if (!whitespace)
63		goto free_fw_ver;
64
65	strscpy(fw_ver, str, whitespace - str + 1);
66
67	return fw_ver;
68
69free_fw_ver:
70	kfree(fw_ver);
71	return NULL;
72}
73
74/**
75 * extract_u32_until_given_char() - given a string of the format "<u32><char>*", extract the u32.
76 * @str: the given string
77 * @ver_num: the pointer to the extracted u32 to be returned to the caller.
78 * @given_char: the given char at the end of the u32 in the string
79 *
80 * Return: Upon success, return a pointer to the given_char in the string. Upon failure, return NULL
81 */
82static char *extract_u32_until_given_char(char *str, u32 *ver_num, char given_char)
83{
84	char num_str[8] = {}, *ch;
85
86	ch = strchrnul(str, given_char);
87	if (*ch == '\0' || ch == str || ch - str >= sizeof(num_str))
88		return NULL;
89
90	memcpy(num_str, str, ch - str);
91	if (kstrtou32(num_str, 10, ver_num))
92		return NULL;
93	return ch;
94}
95
96/**
97 * hl_get_sw_major_minor_subminor() - extract the FW's SW version major, minor, sub-minor
98 *				      from the version string
99 * @hdev: pointer to the hl_device
100 * @fw_str: the FW's version string
101 *
102 * The extracted version is set in the hdev fields: fw_sw_{major/minor/sub_minor}_ver.
103 *
104 * fw_str is expected to have one of two possible formats, examples:
105 * 1) 'Preboot version hl-gaudi2-1.9.0-fw-42.0.1-sec-3'
106 * 2) 'Preboot version hl-gaudi2-1.9.0-rc-fw-42.0.1-sec-3'
107 * In those examples, the SW major,minor,subminor are correspondingly: 1,9,0.
108 *
109 * Return: 0 for success or a negative error code for failure.
110 */
111static int hl_get_sw_major_minor_subminor(struct hl_device *hdev, const char *fw_str)
112{
113	char *end, *start;
114
115	end = strnstr(fw_str, "-rc-", VERSION_MAX_LEN);
116	if (end == fw_str)
117		return -EINVAL;
118
119	if (!end)
120		end = strnstr(fw_str, "-fw-", VERSION_MAX_LEN);
121
122	if (end == fw_str)
123		return -EINVAL;
124
125	if (!end)
126		return -EINVAL;
127
128	for (start = end - 1; start != fw_str; start--) {
129		if (*start == '-')
130			break;
131	}
132
133	if (start == fw_str)
134		return -EINVAL;
135
136	/* start/end point each to the starting and ending hyphen of the sw version e.g. -1.9.0- */
137	start++;
138	start = extract_u32_until_given_char(start, &hdev->fw_sw_major_ver, '.');
139	if (!start)
140		goto err_zero_ver;
141
142	start++;
143	start = extract_u32_until_given_char(start, &hdev->fw_sw_minor_ver, '.');
144	if (!start)
145		goto err_zero_ver;
146
147	start++;
148	start = extract_u32_until_given_char(start, &hdev->fw_sw_sub_minor_ver, '-');
149	if (!start)
150		goto err_zero_ver;
151
152	return 0;
153
154err_zero_ver:
155	hdev->fw_sw_major_ver = 0;
156	hdev->fw_sw_minor_ver = 0;
157	hdev->fw_sw_sub_minor_ver = 0;
158	return -EINVAL;
159}
160
161/**
162 * hl_get_preboot_major_minor() - extract the FW's version major, minor from the version string.
163 * @hdev: pointer to the hl_device
164 * @preboot_ver: the FW's version string
165 *
166 * preboot_ver is expected to be the format of <major>.<minor>.<sub minor>*, e.g: 42.0.1-sec-3
167 * The extracted version is set in the hdev fields: fw_inner_{major/minor}_ver.
168 *
169 * Return: 0 on success, negative error code for failure.
170 */
171static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver)
172{
173	preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_major_ver, '.');
174	if (!preboot_ver) {
175		dev_err(hdev->dev, "Error parsing preboot major version\n");
176		goto err_zero_ver;
177	}
178
179	preboot_ver++;
180
181	preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_minor_ver, '.');
182	if (!preboot_ver) {
183		dev_err(hdev->dev, "Error parsing preboot minor version\n");
184		goto err_zero_ver;
185	}
186	return 0;
187
188err_zero_ver:
189	hdev->fw_inner_major_ver = 0;
190	hdev->fw_inner_minor_ver = 0;
191	return -EINVAL;
192}
193
194static int hl_request_fw(struct hl_device *hdev,
195				const struct firmware **firmware_p,
196				const char *fw_name)
197{
198	size_t fw_size;
199	int rc;
200
201	rc = request_firmware(firmware_p, fw_name, hdev->dev);
202	if (rc) {
203		dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
204				fw_name, rc);
205		goto out;
206	}
207
208	fw_size = (*firmware_p)->size;
209	if ((fw_size % 4) != 0) {
210		dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
211				fw_name, fw_size);
212		rc = -EINVAL;
213		goto release_fw;
214	}
215
216	dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
217
218	if (fw_size > FW_FILE_MAX_SIZE) {
219		dev_err(hdev->dev,
220			"FW file size %zu exceeds maximum of %u bytes\n",
221			fw_size, FW_FILE_MAX_SIZE);
222		rc = -EINVAL;
223		goto release_fw;
224	}
225
226	return 0;
227
228release_fw:
229	release_firmware(*firmware_p);
230out:
231	return rc;
232}
233
234/**
235 * hl_release_firmware() - release FW
236 *
237 * @fw: fw descriptor
238 *
239 * note: this inline function added to serve as a comprehensive mirror for the
240 *       hl_request_fw function.
241 */
242static inline void hl_release_firmware(const struct firmware *fw)
243{
244	release_firmware(fw);
245}
246
247/**
248 * hl_fw_copy_fw_to_device() - copy FW to device
249 *
250 * @hdev: pointer to hl_device structure.
251 * @fw: fw descriptor
252 * @dst: IO memory mapped address space to copy firmware to
253 * @src_offset: offset in src FW to copy from
254 * @size: amount of bytes to copy (0 to copy the whole binary)
255 *
256 * actual copy of FW binary data to device, shared by static and dynamic loaders
257 */
258static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
259				const struct firmware *fw, void __iomem *dst,
260				u32 src_offset, u32 size)
261{
262	const void *fw_data;
263
264	/* size 0 indicates to copy the whole file */
265	if (!size)
266		size = fw->size;
267
268	if (src_offset + size > fw->size) {
269		dev_err(hdev->dev,
270			"size to copy(%u) and offset(%u) are invalid\n",
271			size, src_offset);
272		return -EINVAL;
273	}
274
275	fw_data = (const void *) fw->data;
276
277	memcpy_toio(dst, fw_data + src_offset, size);
278	return 0;
279}
280
281/**
282 * hl_fw_copy_msg_to_device() - copy message to device
283 *
284 * @hdev: pointer to hl_device structure.
285 * @msg: message
286 * @dst: IO memory mapped address space to copy firmware to
287 * @src_offset: offset in src message to copy from
288 * @size: amount of bytes to copy (0 to copy the whole binary)
289 *
290 * actual copy of message data to device.
291 */
292static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
293		struct lkd_msg_comms *msg, void __iomem *dst,
294		u32 src_offset, u32 size)
295{
296	void *msg_data;
297
298	/* size 0 indicates to copy the whole file */
299	if (!size)
300		size = sizeof(struct lkd_msg_comms);
301
302	if (src_offset + size > sizeof(struct lkd_msg_comms)) {
303		dev_err(hdev->dev,
304			"size to copy(%u) and offset(%u) are invalid\n",
305			size, src_offset);
306		return -EINVAL;
307	}
308
309	msg_data = (void *) msg;
310
311	memcpy_toio(dst, msg_data + src_offset, size);
312
313	return 0;
314}
315
316/**
317 * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
318 *
319 * @hdev: pointer to hl_device structure.
320 * @fw_name: the firmware image name
321 * @dst: IO memory mapped address space to copy firmware to
322 * @src_offset: offset in src FW to copy from
323 * @size: amount of bytes to copy (0 to copy the whole binary)
324 *
325 * Copy fw code from firmware file to device memory.
326 *
327 * Return: 0 on success, non-zero for failure.
328 */
329int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
330				void __iomem *dst, u32 src_offset, u32 size)
331{
332	const struct firmware *fw;
333	int rc;
334
335	rc = hl_request_fw(hdev, &fw, fw_name);
336	if (rc)
337		return rc;
338
339	rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
340
341	hl_release_firmware(fw);
342	return rc;
343}
344
345int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
346{
347	struct cpucp_packet pkt = {};
348
349	pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
350	pkt.value = cpu_to_le64(value);
351
352	return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
353}
354
355int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
356				u16 len, u32 timeout, u64 *result)
357{
358	struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
359	struct asic_fixed_properties *prop = &hdev->asic_prop;
360	struct cpucp_packet *pkt;
361	dma_addr_t pkt_dma_addr;
362	struct hl_bd *sent_bd;
363	u32 tmp, expected_ack_val, pi, opcode;
364	int rc;
365
366	pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
367	if (!pkt) {
368		dev_err(hdev->dev,
369			"Failed to allocate DMA memory for packet to CPU\n");
370		return -ENOMEM;
371	}
372
373	memcpy(pkt, msg, len);
374
375	mutex_lock(&hdev->send_cpu_message_lock);
376
377	/* CPU-CP messages can be sent during soft-reset */
378	if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
379		rc = 0;
380		goto out;
381	}
382
383	if (hdev->device_cpu_disabled) {
384		rc = -EIO;
385		goto out;
386	}
387
388	/* set fence to a non valid value */
389	pkt->fence = cpu_to_le32(UINT_MAX);
390	pi = queue->pi;
391
392	/*
393	 * The CPU queue is a synchronous queue with an effective depth of
394	 * a single entry (although it is allocated with room for multiple
395	 * entries). We lock on it using 'send_cpu_message_lock' which
396	 * serializes accesses to the CPU queue.
397	 * Which means that we don't need to lock the access to the entire H/W
398	 * queues module when submitting a JOB to the CPU queue.
399	 */
400	hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
401
402	if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
403		expected_ack_val = queue->pi;
404	else
405		expected_ack_val = CPUCP_PACKET_FENCE_VAL;
406
407	rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
408				(tmp == expected_ack_val), 1000,
409				timeout, true);
410
411	hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
412
413	if (rc == -ETIMEDOUT) {
414		/* If FW performed reset just before sending it a packet, we will get a timeout.
415		 * This is expected behavior, hence no need for error message.
416		 */
417		if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
418			dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
419					tmp);
420		else
421			dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
422		hdev->device_cpu_disabled = true;
423		goto out;
424	}
425
426	tmp = le32_to_cpu(pkt->ctl);
427
428	rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
429	if (rc) {
430		opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
431
432		if (!prop->supports_advanced_cpucp_rc) {
433			dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
434			rc = -EIO;
435			goto scrub_descriptor;
436		}
437
438		switch (rc) {
439		case cpucp_packet_invalid:
440			dev_err(hdev->dev,
441				"CPU packet %d is not supported by F/W\n", opcode);
442			break;
443		case cpucp_packet_fault:
444			dev_err(hdev->dev,
445				"F/W failed processing CPU packet %d\n", opcode);
446			break;
447		case cpucp_packet_invalid_pkt:
448			dev_dbg(hdev->dev,
449				"CPU packet %d is not supported by F/W\n", opcode);
450			break;
451		case cpucp_packet_invalid_params:
452			dev_err(hdev->dev,
453				"F/W reports invalid parameters for CPU packet %d\n", opcode);
454			break;
455
456		default:
457			dev_err(hdev->dev,
458				"Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
459		}
460
461		/* propagate the return code from the f/w to the callers who want to check it */
462		if (result)
463			*result = rc;
464
465		rc = -EIO;
466
467	} else if (result) {
468		*result = le64_to_cpu(pkt->result);
469	}
470
471scrub_descriptor:
472	/* Scrub previous buffer descriptor 'ctl' field which contains the
473	 * previous PI value written during packet submission.
474	 * We must do this or else F/W can read an old value upon queue wraparound.
475	 */
476	sent_bd = queue->kernel_address;
477	sent_bd += hl_pi_2_offset(pi);
478	sent_bd->ctl = cpu_to_le32(UINT_MAX);
479
480out:
481	mutex_unlock(&hdev->send_cpu_message_lock);
482
483	hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
484
485	return rc;
486}
487
488int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
489{
490	struct cpucp_packet pkt;
491	u64 result;
492	int rc;
493
494	memset(&pkt, 0, sizeof(pkt));
495
496	pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
497				CPUCP_PKT_CTL_OPCODE_SHIFT);
498	pkt.value = cpu_to_le64(event_type);
499
500	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
501						0, &result);
502
503	if (rc)
504		dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
505
506	return rc;
507}
508
509int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
510		size_t irq_arr_size)
511{
512	struct cpucp_unmask_irq_arr_packet *pkt;
513	size_t total_pkt_size;
514	u64 result;
515	int rc;
516
517	total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
518			irq_arr_size;
519
520	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
521	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
522
523	/* total_pkt_size is casted to u16 later on */
524	if (total_pkt_size > USHRT_MAX) {
525		dev_err(hdev->dev, "too many elements in IRQ array\n");
526		return -EINVAL;
527	}
528
529	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
530	if (!pkt)
531		return -ENOMEM;
532
533	pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
534	memcpy(&pkt->irqs, irq_arr, irq_arr_size);
535
536	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
537						CPUCP_PKT_CTL_OPCODE_SHIFT);
538
539	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
540						total_pkt_size, 0, &result);
541
542	if (rc)
543		dev_err(hdev->dev, "failed to unmask IRQ array\n");
544
545	kfree(pkt);
546
547	return rc;
548}
549
550int hl_fw_test_cpu_queue(struct hl_device *hdev)
551{
552	struct cpucp_packet test_pkt = {};
553	u64 result;
554	int rc;
555
556	test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
557					CPUCP_PKT_CTL_OPCODE_SHIFT);
558	test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
559
560	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
561						sizeof(test_pkt), 0, &result);
562
563	if (!rc) {
564		if (result != CPUCP_PACKET_FENCE_VAL)
565			dev_err(hdev->dev,
566				"CPU queue test failed (%#08llx)\n", result);
567	} else {
568		dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
569	}
570
571	return rc;
572}
573
574void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
575						dma_addr_t *dma_handle)
576{
577	u64 kernel_addr;
578
579	kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
580
581	*dma_handle = hdev->cpu_accessible_dma_address +
582		(kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
583
584	return (void *) (uintptr_t) kernel_addr;
585}
586
587void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
588					void *vaddr)
589{
590	gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
591			size);
592}
593
594int hl_fw_send_soft_reset(struct hl_device *hdev)
595{
596	struct cpucp_packet pkt;
597	int rc;
598
599	memset(&pkt, 0, sizeof(pkt));
600	pkt.ctl = cpu_to_le32(CPUCP_PACKET_SOFT_RESET << CPUCP_PKT_CTL_OPCODE_SHIFT);
601	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
602	if (rc)
603		dev_err(hdev->dev, "failed to send soft-reset msg (err = %d)\n", rc);
604
605	return rc;
606}
607
608int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
609{
610	struct cpucp_packet pkt;
611	int rc;
612
613	memset(&pkt, 0, sizeof(pkt));
614	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET <<	CPUCP_PKT_CTL_OPCODE_SHIFT);
615	pkt.value = cpu_to_le64(open);
616	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
617	if (rc)
618		dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
619
620	return rc;
621}
622
623int hl_fw_send_heartbeat(struct hl_device *hdev)
624{
625	struct cpucp_packet hb_pkt;
626	u64 result;
627	int rc;
628
629	memset(&hb_pkt, 0, sizeof(hb_pkt));
630	hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
631					CPUCP_PKT_CTL_OPCODE_SHIFT);
632	hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
633
634	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
635						sizeof(hb_pkt), 0, &result);
636
637	if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
638		return -EIO;
639
640	if (le32_to_cpu(hb_pkt.status_mask) &
641					CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
642		dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
643		rc = -EIO;
644	}
645
646	return rc;
647}
648
649static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
650								u32 sts_val)
651{
652	bool err_exists = false;
653
654	if (!(err_val & CPU_BOOT_ERR0_ENABLED))
655		return false;
656
657	if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
658		dev_err(hdev->dev,
659			"Device boot error - DRAM initialization failed\n");
660		err_exists = true;
661	}
662
663	if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
664		dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
665		err_exists = true;
666	}
667
668	if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
669		dev_err(hdev->dev,
670			"Device boot error - Thermal Sensor initialization failed\n");
671		err_exists = true;
672	}
673
674	if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
675		if (hdev->bmc_enable) {
676			dev_err(hdev->dev,
677				"Device boot error - Skipped waiting for BMC\n");
678			err_exists = true;
679		} else {
680			dev_info(hdev->dev,
681				"Device boot message - Skipped waiting for BMC\n");
682			/* This is an info so we don't want it to disable the
683			 * device
684			 */
685			err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
686		}
687	}
688
689	if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
690		dev_err(hdev->dev,
691			"Device boot error - Serdes data from BMC not available\n");
692		err_exists = true;
693	}
694
695	if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
696		dev_err(hdev->dev,
697			"Device boot error - NIC F/W initialization failed\n");
698		err_exists = true;
699	}
700
701	if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
702		dev_err(hdev->dev,
703			"Device boot warning - security not ready\n");
704		err_exists = true;
705	}
706
707	if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
708		dev_err(hdev->dev, "Device boot error - security failure\n");
709		err_exists = true;
710	}
711
712	if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
713		dev_err(hdev->dev, "Device boot error - eFuse failure\n");
714		err_exists = true;
715	}
716
717	if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
718		dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
719		err_exists = true;
720	}
721
722	if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
723		dev_err(hdev->dev, "Device boot error - PLL failure\n");
724		err_exists = true;
725	}
726
727	if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
728		/* Ignore this bit, don't prevent driver loading */
729		dev_dbg(hdev->dev, "device unusable status is set\n");
730		err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
731	}
732
733	if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
734		dev_err(hdev->dev, "Device boot error - binning failure\n");
735		err_exists = true;
736	}
737
738	if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
739		dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
740
741	if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
742		dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
743		err_exists = true;
744	}
745
746	/* All warnings should go here in order not to reach the unknown error validation */
747	if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
748		dev_warn(hdev->dev,
749			"Device boot warning - Skipped DRAM initialization\n");
750		/* This is a warning so we don't want it to disable the
751		 * device
752		 */
753		err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
754	}
755
756	if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
757		dev_warn(hdev->dev,
758			"Device boot warning - Failed to load preboot primary image\n");
759		/* This is a warning so we don't want it to disable the
760		 * device as we have a secondary preboot image
761		 */
762		err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
763	}
764
765	if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
766		dev_warn(hdev->dev,
767			"Device boot warning - TPM failure\n");
768		/* This is a warning so we don't want it to disable the
769		 * device
770		 */
771		err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
772	}
773
774	if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
775		dev_err(hdev->dev,
776			"Device boot error - unknown ERR0 error 0x%08x\n", err_val);
777		err_exists = true;
778	}
779
780	/* return error only if it's in the predefined mask */
781	if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
782				lower_32_bits(hdev->boot_error_status_mask)))
783		return true;
784
785	return false;
786}
787
788/* placeholder for ERR1 as no errors defined there yet */
789static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
790								u32 sts_val)
791{
792	/*
793	 * keep this variable to preserve the logic of the function.
794	 * this way it would require less modifications when error will be
795	 * added to DEV_ERR1
796	 */
797	bool err_exists = false;
798
799	if (!(err_val & CPU_BOOT_ERR1_ENABLED))
800		return false;
801
802	if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
803		dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
804
805	if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
806		dev_err(hdev->dev,
807			"Device boot error - unknown ERR1 error 0x%08x\n",
808								err_val);
809		err_exists = true;
810	}
811
812	/* return error only if it's in the predefined mask */
813	if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
814				upper_32_bits(hdev->boot_error_status_mask)))
815		return true;
816
817	return false;
818}
819
820static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
821				u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
822				u32 cpu_boot_dev_status1_reg)
823{
824	u32 err_val, status_val;
825	bool err_exists = false;
826
827	/* Some of the firmware status codes are deprecated in newer f/w
828	 * versions. In those versions, the errors are reported
829	 * in different registers. Therefore, we need to check those
830	 * registers and print the exact errors. Moreover, there
831	 * may be multiple errors, so we need to report on each error
832	 * separately. Some of the error codes might indicate a state
833	 * that is not an error per-se, but it is an error in production
834	 * environment
835	 */
836	err_val = RREG32(boot_err0_reg);
837	status_val = RREG32(cpu_boot_dev_status0_reg);
838	err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
839
840	err_val = RREG32(boot_err1_reg);
841	status_val = RREG32(cpu_boot_dev_status1_reg);
842	err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
843
844	if (err_exists)
845		return -EIO;
846
847	return 0;
848}
849
850int hl_fw_cpucp_info_get(struct hl_device *hdev,
851				u32 sts_boot_dev_sts0_reg,
852				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
853				u32 boot_err1_reg)
854{
855	struct asic_fixed_properties *prop = &hdev->asic_prop;
856	struct cpucp_packet pkt = {};
857	dma_addr_t cpucp_info_dma_addr;
858	void *cpucp_info_cpu_addr;
859	char *kernel_ver;
860	u64 result;
861	int rc;
862
863	cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
864								&cpucp_info_dma_addr);
865	if (!cpucp_info_cpu_addr) {
866		dev_err(hdev->dev,
867			"Failed to allocate DMA memory for CPU-CP info packet\n");
868		return -ENOMEM;
869	}
870
871	memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
872
873	pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
874				CPUCP_PKT_CTL_OPCODE_SHIFT);
875	pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
876	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
877
878	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
879					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
880	if (rc) {
881		dev_err(hdev->dev,
882			"Failed to handle CPU-CP info pkt, error %d\n", rc);
883		goto out;
884	}
885
886	rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
887				sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
888	if (rc) {
889		dev_err(hdev->dev, "Errors in device boot\n");
890		goto out;
891	}
892
893	memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
894			sizeof(prop->cpucp_info));
895
896	rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
897	if (rc) {
898		dev_err(hdev->dev,
899			"Failed to build hwmon channel info, error %d\n", rc);
900		rc = -EFAULT;
901		goto out;
902	}
903
904	kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
905	if (kernel_ver) {
906		dev_info(hdev->dev, "Linux version %s", kernel_ver);
907		kfree(kernel_ver);
908	}
909
910	/* assume EQ code doesn't need to check eqe index */
911	hdev->event_queue.check_eqe_index = false;
912
913	/* Read FW application security bits again */
914	if (prop->fw_cpu_boot_dev_sts0_valid) {
915		prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
916		if (prop->fw_app_cpu_boot_dev_sts0 &
917				CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
918			hdev->event_queue.check_eqe_index = true;
919	}
920
921	if (prop->fw_cpu_boot_dev_sts1_valid)
922		prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
923
924out:
925	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
926
927	return rc;
928}
929
930static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
931{
932	struct cpucp_array_data_packet *pkt;
933	size_t total_pkt_size, data_size;
934	u64 result;
935	int rc;
936
937	/* skip sending this info for unsupported ASICs */
938	if (!hdev->asic_funcs->get_msi_info)
939		return 0;
940
941	data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
942	total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
943
944	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
945	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
946
947	/* total_pkt_size is casted to u16 later on */
948	if (total_pkt_size > USHRT_MAX) {
949		dev_err(hdev->dev, "CPUCP array data is too big\n");
950		return -EINVAL;
951	}
952
953	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
954	if (!pkt)
955		return -ENOMEM;
956
957	pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
958
959	memset((void *) &pkt->data, 0xFF, data_size);
960	hdev->asic_funcs->get_msi_info(pkt->data);
961
962	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
963						CPUCP_PKT_CTL_OPCODE_SHIFT);
964
965	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
966						total_pkt_size, 0, &result);
967
968	/*
969	 * in case packet result is invalid it means that FW does not support
970	 * this feature and will use default/hard coded MSI values. no reason
971	 * to stop the boot
972	 */
973	if (rc && result == cpucp_packet_invalid)
974		rc = 0;
975
976	if (rc)
977		dev_err(hdev->dev, "failed to send CPUCP array data\n");
978
979	kfree(pkt);
980
981	return rc;
982}
983
984int hl_fw_cpucp_handshake(struct hl_device *hdev,
985				u32 sts_boot_dev_sts0_reg,
986				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
987				u32 boot_err1_reg)
988{
989	int rc;
990
991	rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
992					sts_boot_dev_sts1_reg, boot_err0_reg,
993					boot_err1_reg);
994	if (rc)
995		return rc;
996
997	return hl_fw_send_msi_info_msg(hdev);
998}
999
1000int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
1001{
1002	struct cpucp_packet pkt = {};
1003	void *eeprom_info_cpu_addr;
1004	dma_addr_t eeprom_info_dma_addr;
1005	u64 result;
1006	int rc;
1007
1008	eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
1009									&eeprom_info_dma_addr);
1010	if (!eeprom_info_cpu_addr) {
1011		dev_err(hdev->dev,
1012			"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
1013		return -ENOMEM;
1014	}
1015
1016	memset(eeprom_info_cpu_addr, 0, max_size);
1017
1018	pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
1019				CPUCP_PKT_CTL_OPCODE_SHIFT);
1020	pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
1021	pkt.data_max_size = cpu_to_le32(max_size);
1022
1023	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1024			HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
1025
1026	if (rc) {
1027		dev_err(hdev->dev,
1028			"Failed to handle CPU-CP EEPROM packet, error %d\n",
1029			rc);
1030		goto out;
1031	}
1032
1033	/* result contains the actual size */
1034	memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
1035
1036out:
1037	hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
1038
1039	return rc;
1040}
1041
1042int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
1043{
1044	struct cpucp_monitor_dump *mon_dump_cpu_addr;
1045	dma_addr_t mon_dump_dma_addr;
1046	struct cpucp_packet pkt = {};
1047	size_t data_size;
1048	__le32 *src_ptr;
1049	u32 *dst_ptr;
1050	u64 result;
1051	int i, rc;
1052
1053	data_size = sizeof(struct cpucp_monitor_dump);
1054	mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
1055	if (!mon_dump_cpu_addr) {
1056		dev_err(hdev->dev,
1057			"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
1058		return -ENOMEM;
1059	}
1060
1061	memset(mon_dump_cpu_addr, 0, data_size);
1062
1063	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1064	pkt.addr = cpu_to_le64(mon_dump_dma_addr);
1065	pkt.data_max_size = cpu_to_le32(data_size);
1066
1067	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1068							HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
1069	if (rc) {
1070		dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
1071		goto out;
1072	}
1073
1074	/* result contains the actual size */
1075	src_ptr = (__le32 *) mon_dump_cpu_addr;
1076	dst_ptr = data;
1077	for (i = 0; i < (data_size / sizeof(u32)); i++) {
1078		*dst_ptr = le32_to_cpu(*src_ptr);
1079		src_ptr++;
1080		dst_ptr++;
1081	}
1082
1083out:
1084	hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
1085
1086	return rc;
1087}
1088
1089int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
1090		struct hl_info_pci_counters *counters)
1091{
1092	struct cpucp_packet pkt = {};
1093	u64 result;
1094	int rc;
1095
1096	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1097			CPUCP_PKT_CTL_OPCODE_SHIFT);
1098
1099	/* Fetch PCI rx counter */
1100	pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
1101	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1102					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1103	if (rc) {
1104		dev_err(hdev->dev,
1105			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1106		return rc;
1107	}
1108	counters->rx_throughput = result;
1109
1110	memset(&pkt, 0, sizeof(pkt));
1111	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1112			CPUCP_PKT_CTL_OPCODE_SHIFT);
1113
1114	/* Fetch PCI tx counter */
1115	pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
1116	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1117					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1118	if (rc) {
1119		dev_err(hdev->dev,
1120			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1121		return rc;
1122	}
1123	counters->tx_throughput = result;
1124
1125	/* Fetch PCI replay counter */
1126	memset(&pkt, 0, sizeof(pkt));
1127	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
1128			CPUCP_PKT_CTL_OPCODE_SHIFT);
1129
1130	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1131			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1132	if (rc) {
1133		dev_err(hdev->dev,
1134			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1135		return rc;
1136	}
1137	counters->replay_cnt = (u32) result;
1138
1139	return rc;
1140}
1141
1142int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
1143{
1144	struct cpucp_packet pkt = {};
1145	u64 result;
1146	int rc;
1147
1148	pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
1149				CPUCP_PKT_CTL_OPCODE_SHIFT);
1150
1151	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1152					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1153	if (rc) {
1154		dev_err(hdev->dev,
1155			"Failed to handle CpuCP total energy pkt, error %d\n",
1156				rc);
1157		return rc;
1158	}
1159
1160	*total_energy = result;
1161
1162	return rc;
1163}
1164
1165int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
1166						enum pll_index *pll_index)
1167{
1168	struct asic_fixed_properties *prop = &hdev->asic_prop;
1169	u8 pll_byte, pll_bit_off;
1170	bool dynamic_pll;
1171	int fw_pll_idx;
1172
1173	dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
1174						CPU_BOOT_DEV_STS0_DYN_PLL_EN);
1175
1176	if (!dynamic_pll) {
1177		/*
1178		 * in case we are working with legacy FW (each asic has unique
1179		 * PLL numbering) use the driver based index as they are
1180		 * aligned with fw legacy numbering
1181		 */
1182		*pll_index = input_pll_index;
1183		return 0;
1184	}
1185
1186	/* retrieve a FW compatible PLL index based on
1187	 * ASIC specific user request
1188	 */
1189	fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
1190	if (fw_pll_idx < 0) {
1191		dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
1192			input_pll_index, fw_pll_idx);
1193		return -EINVAL;
1194	}
1195
1196	/* PLL map is a u8 array */
1197	pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
1198	pll_bit_off = fw_pll_idx & 0x7;
1199
1200	if (!(pll_byte & BIT(pll_bit_off))) {
1201		dev_err(hdev->dev, "PLL index %d is not supported\n",
1202			fw_pll_idx);
1203		return -EINVAL;
1204	}
1205
1206	*pll_index = fw_pll_idx;
1207
1208	return 0;
1209}
1210
1211int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
1212		u16 *pll_freq_arr)
1213{
1214	struct cpucp_packet pkt;
1215	enum pll_index used_pll_idx;
1216	u64 result;
1217	int rc;
1218
1219	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
1220	if (rc)
1221		return rc;
1222
1223	memset(&pkt, 0, sizeof(pkt));
1224
1225	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
1226				CPUCP_PKT_CTL_OPCODE_SHIFT);
1227	pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
1228
1229	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1230			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1231	if (rc) {
1232		dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
1233		return rc;
1234	}
1235
1236	pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
1237	pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
1238	pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
1239	pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
1240
1241	return 0;
1242}
1243
1244int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
1245{
1246	struct cpucp_packet pkt;
1247	u64 result;
1248	int rc;
1249
1250	memset(&pkt, 0, sizeof(pkt));
1251
1252	pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
1253				CPUCP_PKT_CTL_OPCODE_SHIFT);
1254	pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
1255
1256	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1257			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1258	if (rc) {
1259		dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
1260		return rc;
1261	}
1262
1263	*power = result;
1264
1265	return rc;
1266}
1267
1268int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
1269				struct cpucp_hbm_row_info *info)
1270{
1271	struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
1272	dma_addr_t cpucp_repl_rows_info_dma_addr;
1273	struct cpucp_packet pkt = {};
1274	u64 result;
1275	int rc;
1276
1277	cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
1278							sizeof(struct cpucp_hbm_row_info),
1279							&cpucp_repl_rows_info_dma_addr);
1280	if (!cpucp_repl_rows_info_cpu_addr) {
1281		dev_err(hdev->dev,
1282			"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
1283		return -ENOMEM;
1284	}
1285
1286	memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
1287
1288	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
1289					CPUCP_PKT_CTL_OPCODE_SHIFT);
1290	pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
1291	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
1292
1293	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1294					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1295	if (rc) {
1296		dev_err(hdev->dev,
1297			"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
1298		goto out;
1299	}
1300
1301	memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
1302
1303out:
1304	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
1305						cpucp_repl_rows_info_cpu_addr);
1306
1307	return rc;
1308}
1309
1310int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
1311{
1312	struct cpucp_packet pkt;
1313	u64 result;
1314	int rc;
1315
1316	memset(&pkt, 0, sizeof(pkt));
1317
1318	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
1319
1320	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
1321	if (rc) {
1322		dev_err(hdev->dev,
1323				"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
1324		goto out;
1325	}
1326
1327	*pend_rows_num = (u32) result;
1328out:
1329	return rc;
1330}
1331
1332int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
1333{
1334	struct cpucp_packet pkt;
1335	int rc;
1336
1337	memset(&pkt, 0, sizeof(pkt));
1338
1339	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1340	pkt.value = cpu_to_le64(asid);
1341
1342	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1343						HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
1344	if (rc)
1345		dev_err(hdev->dev,
1346			"Failed on ASID configuration request for engine core, error %d\n",
1347			rc);
1348
1349	return rc;
1350}
1351
1352void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
1353{
1354	struct static_fw_load_mgr *static_loader =
1355			&hdev->fw_loader.static_loader;
1356	int rc;
1357
1358	if (hdev->asic_prop.dynamic_fw_load) {
1359		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1360				COMMS_RST_DEV, 0, false,
1361				hdev->fw_loader.cpu_timeout);
1362		if (rc)
1363			dev_err(hdev->dev, "Failed sending COMMS_RST_DEV\n");
1364	} else {
1365		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
1366	}
1367}
1368
1369void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
1370{
1371	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1372	u32 status, cpu_boot_status_reg, cpu_timeout;
1373	struct static_fw_load_mgr *static_loader;
1374	struct pre_fw_load_props *pre_fw_load;
1375	int rc;
1376
1377	if (hdev->device_cpu_is_halted)
1378		return;
1379
1380	/* Stop device CPU to make sure nothing bad happens */
1381	if (hdev->asic_prop.dynamic_fw_load) {
1382		pre_fw_load = &fw_loader->pre_fw_load;
1383		cpu_timeout = fw_loader->cpu_timeout;
1384		cpu_boot_status_reg = pre_fw_load->cpu_boot_status_reg;
1385
1386		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1387				COMMS_GOTO_WFE, 0, false, cpu_timeout);
1388		if (rc) {
1389			dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
1390		} else {
1391			rc = hl_poll_timeout(
1392				hdev,
1393				cpu_boot_status_reg,
1394				status,
1395				status == CPU_BOOT_STATUS_IN_WFE,
1396				hdev->fw_poll_interval_usec,
1397				cpu_timeout);
1398			if (rc)
1399				dev_err(hdev->dev, "Current status=%u. Timed-out updating to WFE\n",
1400						status);
1401		}
1402	} else {
1403		static_loader = &hdev->fw_loader.static_loader;
1404		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
1405		msleep(static_loader->cpu_reset_wait_msec);
1406
1407		/* Must clear this register in order to prevent preboot
1408		 * from reading WFE after reboot
1409		 */
1410		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
1411	}
1412
1413	hdev->device_cpu_is_halted = true;
1414}
1415
1416static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
1417{
1418	/* Some of the status codes below are deprecated in newer f/w
1419	 * versions but we keep them here for backward compatibility
1420	 */
1421	switch (status) {
1422	case CPU_BOOT_STATUS_NA:
1423		dev_err(hdev->dev,
1424			"Device boot progress - BTL/ROM did NOT run\n");
1425		break;
1426	case CPU_BOOT_STATUS_IN_WFE:
1427		dev_err(hdev->dev,
1428			"Device boot progress - Stuck inside WFE loop\n");
1429		break;
1430	case CPU_BOOT_STATUS_IN_BTL:
1431		dev_err(hdev->dev,
1432			"Device boot progress - Stuck in BTL\n");
1433		break;
1434	case CPU_BOOT_STATUS_IN_PREBOOT:
1435		dev_err(hdev->dev,
1436			"Device boot progress - Stuck in Preboot\n");
1437		break;
1438	case CPU_BOOT_STATUS_IN_SPL:
1439		dev_err(hdev->dev,
1440			"Device boot progress - Stuck in SPL\n");
1441		break;
1442	case CPU_BOOT_STATUS_IN_UBOOT:
1443		dev_err(hdev->dev,
1444			"Device boot progress - Stuck in u-boot\n");
1445		break;
1446	case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
1447		dev_err(hdev->dev,
1448			"Device boot progress - DRAM initialization failed\n");
1449		break;
1450	case CPU_BOOT_STATUS_UBOOT_NOT_READY:
1451		dev_err(hdev->dev,
1452			"Device boot progress - Cannot boot\n");
1453		break;
1454	case CPU_BOOT_STATUS_TS_INIT_FAIL:
1455		dev_err(hdev->dev,
1456			"Device boot progress - Thermal Sensor initialization failed\n");
1457		break;
1458	case CPU_BOOT_STATUS_SECURITY_READY:
1459		dev_err(hdev->dev,
1460			"Device boot progress - Stuck in preboot after security initialization\n");
1461		break;
1462	default:
1463		dev_err(hdev->dev,
1464			"Device boot progress - Invalid or unexpected status code %d\n", status);
1465		break;
1466	}
1467}
1468
1469int hl_fw_wait_preboot_ready(struct hl_device *hdev)
1470{
1471	struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
1472	u32 status;
1473	int rc;
1474
1475	/* Need to check two possible scenarios:
1476	 *
1477	 * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
1478	 * the preboot is waiting for the boot fit
1479	 *
1480	 * All other status values - for older firmwares where the uboot was
1481	 * loaded from the FLASH
1482	 */
1483	rc = hl_poll_timeout(
1484		hdev,
1485		pre_fw_load->cpu_boot_status_reg,
1486		status,
1487		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
1488		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
1489		(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
1490		hdev->fw_poll_interval_usec,
1491		pre_fw_load->wait_for_preboot_timeout);
1492
1493	if (rc) {
1494		detect_cpu_boot_status(hdev, status);
1495		dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
1496
1497		/* If we read all FF, then something is totally wrong, no point
1498		 * of reading specific errors
1499		 */
1500		if (status != -1)
1501			fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
1502						pre_fw_load->boot_err1_reg,
1503						pre_fw_load->sts_boot_dev_sts0_reg,
1504						pre_fw_load->sts_boot_dev_sts1_reg);
1505		return -EIO;
1506	}
1507
1508	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
1509
1510	return 0;
1511}
1512
1513static int hl_fw_read_preboot_caps(struct hl_device *hdev)
1514{
1515	struct pre_fw_load_props *pre_fw_load;
1516	struct asic_fixed_properties *prop;
1517	u32 reg_val;
1518	int rc;
1519
1520	prop = &hdev->asic_prop;
1521	pre_fw_load = &hdev->fw_loader.pre_fw_load;
1522
1523	rc = hl_fw_wait_preboot_ready(hdev);
1524	if (rc)
1525		return rc;
1526
1527	/*
1528	 * the registers DEV_STS* contain FW capabilities/features.
1529	 * We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
1530	 * is set.
1531	 * In the first read of this register we store the value of this
1532	 * register ONLY if the register is enabled (which will be propagated
1533	 * to next stages) and also mark the register as valid.
1534	 * In case it is not enabled the stored value will be left 0- all
1535	 * caps/features are off
1536	 */
1537	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
1538	if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
1539		prop->fw_cpu_boot_dev_sts0_valid = true;
1540		prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
1541	}
1542
1543	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
1544	if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
1545		prop->fw_cpu_boot_dev_sts1_valid = true;
1546		prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
1547	}
1548
1549	prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
1550						CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
1551
1552	/* initialize FW loader once we know what load protocol is used */
1553	hdev->asic_funcs->init_firmware_loader(hdev);
1554
1555	dev_dbg(hdev->dev, "Attempting %s FW load\n",
1556			prop->dynamic_fw_load ? "dynamic" : "legacy");
1557	return 0;
1558}
1559
1560static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
1561					enum hl_fw_component fwc)
1562{
1563	struct asic_fixed_properties *prop = &hdev->asic_prop;
1564	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1565	struct static_fw_load_mgr *static_loader;
1566	char *dest, *boot_ver, *preboot_ver;
1567	u32 ver_off, limit;
1568	const char *name;
1569	char btl_ver[32];
1570
1571	static_loader = &hdev->fw_loader.static_loader;
1572
1573	switch (fwc) {
1574	case FW_COMP_BOOT_FIT:
1575		ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
1576		dest = prop->uboot_ver;
1577		name = "Boot-fit";
1578		limit = static_loader->boot_fit_version_max_off;
1579		break;
1580	case FW_COMP_PREBOOT:
1581		ver_off = RREG32(static_loader->preboot_version_offset_reg);
1582		dest = prop->preboot_ver;
1583		name = "Preboot";
1584		limit = static_loader->preboot_version_max_off;
1585		break;
1586	default:
1587		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
1588		return -EIO;
1589	}
1590
1591	ver_off &= static_loader->sram_offset_mask;
1592
1593	if (ver_off < limit) {
1594		memcpy_fromio(dest,
1595			hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
1596			VERSION_MAX_LEN);
1597	} else {
1598		dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
1599								name, ver_off);
1600		strscpy(dest, "unavailable", VERSION_MAX_LEN);
1601		return -EIO;
1602	}
1603
1604	if (fwc == FW_COMP_BOOT_FIT) {
1605		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
1606		if (boot_ver) {
1607			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
1608			kfree(boot_ver);
1609		}
1610	} else if (fwc == FW_COMP_PREBOOT) {
1611		preboot_ver = strnstr(prop->preboot_ver, "Preboot",
1612						VERSION_MAX_LEN);
1613		if (preboot_ver && preboot_ver != prop->preboot_ver) {
1614			strscpy(btl_ver, prop->preboot_ver,
1615				min((int) (preboot_ver - prop->preboot_ver),
1616									31));
1617			dev_info(hdev->dev, "%s\n", btl_ver);
1618		}
1619
1620		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
1621		if (preboot_ver) {
1622			dev_info(hdev->dev, "preboot version %s\n",
1623								preboot_ver);
1624			kfree(preboot_ver);
1625		}
1626	}
1627
1628	return 0;
1629}
1630
1631/**
1632 * hl_fw_preboot_update_state - update internal data structures during
1633 *                              handshake with preboot
1634 *
1635 *
1636 * @hdev: pointer to the habanalabs device structure
1637 *
1638 * @return 0 on success, otherwise non-zero error code
1639 */
1640static void hl_fw_preboot_update_state(struct hl_device *hdev)
1641{
1642	struct asic_fixed_properties *prop = &hdev->asic_prop;
1643	u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
1644
1645	cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
1646	cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
1647
1648	/* We read boot_dev_sts registers multiple times during boot:
1649	 * 1. preboot - a. Check whether the security status bits are valid
1650	 *              b. Check whether fw security is enabled
1651	 *              c. Check whether hard reset is done by preboot
1652	 * 2. boot cpu - a. Fetch boot cpu security status
1653	 *               b. Check whether hard reset is done by boot cpu
1654	 * 3. FW application - a. Fetch fw application security status
1655	 *                     b. Check whether hard reset is done by fw app
1656	 */
1657	prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
1658
1659	prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
1660
1661	dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
1662							cpu_boot_dev_sts0);
1663
1664	dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
1665							cpu_boot_dev_sts1);
1666
1667	dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
1668			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
1669
1670	dev_dbg(hdev->dev, "firmware-level security is %s\n",
1671			prop->fw_security_enabled ? "enabled" : "disabled");
1672
1673	dev_dbg(hdev->dev, "GIC controller is %s\n",
1674			prop->gic_interrupts_enable ? "enabled" : "disabled");
1675}
1676
1677static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
1678{
1679	int rc;
1680
1681	rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
1682	if (rc)
1683		return rc;
1684
1685	return 0;
1686}
1687
1688int hl_fw_read_preboot_status(struct hl_device *hdev)
1689{
1690	int rc;
1691
1692	if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
1693		return 0;
1694
1695	/* get FW pre-load parameters  */
1696	hdev->asic_funcs->init_firmware_preload_params(hdev);
1697
1698	/*
1699	 * In order to determine boot method (static VS dynamic) we need to
1700	 * read the boot caps register
1701	 */
1702	rc = hl_fw_read_preboot_caps(hdev);
1703	if (rc)
1704		return rc;
1705
1706	hl_fw_preboot_update_state(hdev);
1707
1708	/* no need to read preboot status in dynamic load */
1709	if (hdev->asic_prop.dynamic_fw_load)
1710		return 0;
1711
1712	return hl_fw_static_read_preboot_status(hdev);
1713}
1714
1715/* associate string with COMM status */
1716static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
1717	[COMMS_STS_NOOP] = "NOOP",
1718	[COMMS_STS_ACK] = "ACK",
1719	[COMMS_STS_OK] = "OK",
1720	[COMMS_STS_ERR] = "ERR",
1721	[COMMS_STS_VALID_ERR] = "VALID_ERR",
1722	[COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
1723};
1724
1725/**
1726 * hl_fw_dynamic_report_error_status - report error status
1727 *
1728 * @hdev: pointer to the habanalabs device structure
1729 * @status: value of FW status register
1730 * @expected_status: the expected status
1731 */
1732static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
1733						u32 status,
1734						enum comms_sts expected_status)
1735{
1736	enum comms_sts comm_status =
1737				FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1738
1739	if (comm_status < COMMS_STS_INVLD_LAST)
1740		dev_err(hdev->dev, "Device status %s, expected status: %s\n",
1741				hl_dynamic_fw_status_str[comm_status],
1742				hl_dynamic_fw_status_str[expected_status]);
1743	else
1744		dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
1745				comm_status,
1746				hl_dynamic_fw_status_str[expected_status]);
1747}
1748
1749/**
1750 * hl_fw_dynamic_send_cmd - send LKD to FW cmd
1751 *
1752 * @hdev: pointer to the habanalabs device structure
1753 * @fw_loader: managing structure for loading device's FW
1754 * @cmd: LKD to FW cmd code
1755 * @size: size of next FW component to be loaded (0 if not necessary)
1756 *
1757 * LDK to FW exact command layout is defined at struct comms_command.
1758 * note: the size argument is used only when the next FW component should be
1759 *       loaded, otherwise it shall be 0. the size is used by the FW in later
1760 *       protocol stages and when sending only indicating the amount of memory
1761 *       to be allocated by the FW to receive the next boot component.
1762 */
1763static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
1764				struct fw_load_mgr *fw_loader,
1765				enum comms_cmd cmd, unsigned int size)
1766{
1767	struct cpu_dyn_regs *dyn_regs;
1768	u32 val;
1769
1770	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1771
1772	val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
1773	val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
1774
1775	trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1776	WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
1777}
1778
1779/**
1780 * hl_fw_dynamic_extract_fw_response - update the FW response
1781 *
1782 * @hdev: pointer to the habanalabs device structure
1783 * @fw_loader: managing structure for loading device's FW
1784 * @response: FW response
1785 * @status: the status read from CPU status register
1786 *
1787 * @return 0 on success, otherwise non-zero error code
1788 */
1789static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
1790						struct fw_load_mgr *fw_loader,
1791						struct fw_response *response,
1792						u32 status)
1793{
1794	response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1795	response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
1796						COMMS_STATUS_OFFSET_ALIGN_SHIFT;
1797	response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
1798
1799	if ((response->ram_type != COMMS_SRAM) &&
1800					(response->ram_type != COMMS_DRAM)) {
1801		dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
1802							response->ram_type);
1803		return -EIO;
1804	}
1805
1806	return 0;
1807}
1808
1809/**
1810 * hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
1811 *
1812 * @hdev: pointer to the habanalabs device structure
1813 * @fw_loader: managing structure for loading device's FW
1814 * @expected_status: expected status to wait for
1815 * @timeout: timeout for status wait
1816 *
1817 * @return 0 on success, otherwise non-zero error code
1818 *
1819 * waiting for status from FW include polling the FW status register until
1820 * expected status is received or timeout occurs (whatever occurs first).
1821 */
1822static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
1823						struct fw_load_mgr *fw_loader,
1824						enum comms_sts expected_status,
1825						u32 timeout)
1826{
1827	struct cpu_dyn_regs *dyn_regs;
1828	u32 status;
1829	int rc;
1830
1831	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1832
1833	trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
1834
1835	/* Wait for expected status */
1836	rc = hl_poll_timeout(
1837		hdev,
1838		le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
1839		status,
1840		FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
1841		hdev->fw_comms_poll_interval_usec,
1842		timeout);
1843
1844	if (rc) {
1845		hl_fw_dynamic_report_error_status(hdev, status,
1846							expected_status);
1847		return -EIO;
1848	}
1849
1850	trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
1851
1852	/*
1853	 * skip storing FW response for NOOP to preserve the actual desired
1854	 * FW status
1855	 */
1856	if (expected_status == COMMS_STS_NOOP)
1857		return 0;
1858
1859	rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
1860					&fw_loader->dynamic_loader.response,
1861					status);
1862	return rc;
1863}
1864
1865/**
1866 * hl_fw_dynamic_send_clear_cmd - send clear command to FW
1867 *
1868 * @hdev: pointer to the habanalabs device structure
1869 * @fw_loader: managing structure for loading device's FW
1870 *
1871 * @return 0 on success, otherwise non-zero error code
1872 *
1873 * after command cycle between LKD to FW CPU (i.e. LKD got an expected status
1874 * from FW) we need to clear the CPU status register in order to avoid garbage
1875 * between command cycles.
1876 * This is done by sending clear command and polling the CPU to LKD status
1877 * register to hold the status NOOP
1878 */
1879static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
1880						struct fw_load_mgr *fw_loader)
1881{
1882	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
1883
1884	return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
1885							fw_loader->cpu_timeout);
1886}
1887
1888/**
1889 * hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
1890 *
1891 * @hdev: pointer to the habanalabs device structure
1892 * @fw_loader: managing structure for loading device's FW
1893 * @cmd: LKD to FW cmd code
1894 * @size: size of next FW component to be loaded (0 if not necessary)
1895 * @wait_ok: if true also wait for OK response from FW
1896 * @timeout: timeout for status wait
1897 *
1898 * @return 0 on success, otherwise non-zero error code
1899 *
1900 * brief:
1901 * when sending protocol command we have the following steps:
1902 * - send clear (clear command and verify clear status register)
1903 * - send the actual protocol command
1904 * - wait for ACK on the protocol command
1905 * - send clear
1906 * - send NOOP
1907 * if, in addition, the specific protocol command should wait for OK then:
1908 * - wait for OK
1909 * - send clear
1910 * - send NOOP
1911 *
1912 * NOTES:
1913 * send clear: this is necessary in order to clear the status register to avoid
1914 *             leftovers between command
1915 * NOOP command: necessary to avoid loop on the clear command by the FW
1916 */
1917int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
1918				struct fw_load_mgr *fw_loader,
1919				enum comms_cmd cmd, unsigned int size,
1920				bool wait_ok, u32 timeout)
1921{
1922	int rc;
1923
1924	trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1925
1926	/* first send clear command to clean former commands */
1927	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1928	if (rc)
1929		return rc;
1930
1931	/* send the actual command */
1932	hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
1933
1934	/* wait for ACK for the command */
1935	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
1936								timeout);
1937	if (rc)
1938		return rc;
1939
1940	/* clear command to prepare for NOOP command */
1941	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1942	if (rc)
1943		return rc;
1944
1945	/* send the actual NOOP command */
1946	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1947
1948	if (!wait_ok)
1949		return 0;
1950
1951	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
1952								timeout);
1953	if (rc)
1954		return rc;
1955
1956	/* clear command to prepare for NOOP command */
1957	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1958	if (rc)
1959		return rc;
1960
1961	/* send the actual NOOP command */
1962	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1963
1964	return 0;
1965}
1966
1967/**
1968 * hl_fw_compat_crc32 - CRC compatible with FW
1969 *
1970 * @data: pointer to the data
1971 * @size: size of the data
1972 *
1973 * @return the CRC32 result
1974 *
1975 * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
1976 *       in order to be aligned we need to flip the bits of both the input
1977 *       initial CRC and kernel's CRC32 result.
1978 *       in addition both sides use initial CRC of 0,
1979 */
1980static u32 hl_fw_compat_crc32(u8 *data, size_t size)
1981{
1982	return ~crc32_le(~((u32)0), data, size);
1983}
1984
1985/**
1986 * hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
1987 *                                        transfer (image or descriptor) between
1988 *                                        host and FW
1989 *
1990 * @hdev: pointer to the habanalabs device structure
1991 * @addr: device address of memory transfer
1992 * @size: memory transfer size
1993 * @region: PCI memory region
1994 *
1995 * @return 0 on success, otherwise non-zero error code
1996 */
1997static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
1998						u64 addr, size_t size,
1999						struct pci_mem_region *region)
2000{
2001	u64 end_addr;
2002
2003	/* now make sure that the memory transfer is within region's bounds */
2004	end_addr = addr + size;
2005	if (end_addr >= region->region_base + region->region_size) {
2006		dev_err(hdev->dev,
2007			"dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
2008							end_addr);
2009		return -EIO;
2010	}
2011
2012	/*
2013	 * now make sure memory transfer is within predefined BAR bounds.
2014	 * this is to make sure we do not need to set the bar (e.g. for DRAM
2015	 * memory transfers)
2016	 */
2017	if (end_addr >= region->region_base - region->offset_in_bar +
2018							region->bar_size) {
2019		dev_err(hdev->dev,
2020			"FW image beyond PCI BAR bounds\n");
2021		return -EIO;
2022	}
2023
2024	return 0;
2025}
2026
2027/**
2028 * hl_fw_dynamic_validate_descriptor - validate FW descriptor
2029 *
2030 * @hdev: pointer to the habanalabs device structure
2031 * @fw_loader: managing structure for loading device's FW
2032 * @fw_desc: the descriptor from FW
2033 *
2034 * @return 0 on success, otherwise non-zero error code
2035 */
2036static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
2037					struct fw_load_mgr *fw_loader,
2038					struct lkd_fw_comms_desc *fw_desc)
2039{
2040	struct pci_mem_region *region;
2041	enum pci_region region_id;
2042	size_t data_size;
2043	u32 data_crc32;
2044	u8 *data_ptr;
2045	u64 addr;
2046	int rc;
2047
2048	if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
2049		dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
2050				fw_desc->header.magic);
2051
2052	if (fw_desc->header.version != HL_COMMS_DESC_VER)
2053		dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
2054				fw_desc->header.version);
2055
2056	/*
2057	 * Calc CRC32 of data without header. use the size of the descriptor
2058	 * reported by firmware, without calculating it ourself, to allow adding
2059	 * more fields to the lkd_fw_comms_desc structure.
2060	 * note that no alignment/stride address issues here as all structures
2061	 * are 64 bit padded.
2062	 */
2063	data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
2064	data_size = le16_to_cpu(fw_desc->header.size);
2065
2066	data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
2067	if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
2068		dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
2069			data_crc32, fw_desc->header.crc32);
2070		return -EIO;
2071	}
2072
2073	/* find memory region to which to copy the image */
2074	addr = le64_to_cpu(fw_desc->img_addr);
2075	region_id = hl_get_pci_memory_region(hdev, addr);
2076	if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
2077		dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
2078		return -EIO;
2079	}
2080
2081	region = &hdev->pci_mem_region[region_id];
2082
2083	/* store the region for the copy stage */
2084	fw_loader->dynamic_loader.image_region = region;
2085
2086	/*
2087	 * here we know that the start address is valid, now make sure that the
2088	 * image is within region's bounds
2089	 */
2090	rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
2091					fw_loader->dynamic_loader.fw_image_size,
2092					region);
2093	if (rc) {
2094		dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
2095		return rc;
2096	}
2097
2098	/* here we can mark the descriptor as valid as the content has been validated */
2099	fw_loader->dynamic_loader.fw_desc_valid = true;
2100
2101	return 0;
2102}
2103
2104static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
2105						struct fw_response *response,
2106						struct pci_mem_region *region)
2107{
2108	u64 device_addr;
2109	int rc;
2110
2111	device_addr = region->region_base + response->ram_offset;
2112
2113	/*
2114	 * validate that the descriptor is within region's bounds
2115	 * Note that as the start address was supplied according to the RAM
2116	 * type- testing only the end address is enough
2117	 */
2118	rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
2119					sizeof(struct lkd_fw_comms_desc),
2120					region);
2121	return rc;
2122}
2123
2124/*
2125 * hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw
2126 *
2127 * @hdev: pointer to the habanalabs device structure
2128 * @fw_desc: the descriptor from FW
2129 */
2130static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev,
2131					struct lkd_fw_comms_desc *fw_desc)
2132{
2133	int i;
2134	char *msg;
2135
2136	for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) {
2137		if (!fw_desc->ascii_msg[i].valid)
2138			return;
2139
2140		/* force NULL termination */
2141		msg = fw_desc->ascii_msg[i].msg;
2142		msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0';
2143
2144		switch (fw_desc->ascii_msg[i].msg_lvl) {
2145		case LKD_FW_ASCII_MSG_ERR:
2146			dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2147			break;
2148		case LKD_FW_ASCII_MSG_WRN:
2149			dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2150			break;
2151		case LKD_FW_ASCII_MSG_INF:
2152			dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2153			break;
2154		default:
2155			dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2156			break;
2157		}
2158	}
2159}
2160
2161/**
2162 * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
2163 *
2164 * @hdev: pointer to the habanalabs device structure
2165 * @fw_loader: managing structure for loading device's FW
2166 *
2167 * @return 0 on success, otherwise non-zero error code
2168 */
2169static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
2170						struct fw_load_mgr *fw_loader)
2171{
2172	struct lkd_fw_comms_desc *fw_desc;
2173	struct pci_mem_region *region;
2174	struct fw_response *response;
2175	void *temp_fw_desc;
2176	void __iomem *src;
2177	u16 fw_data_size;
2178	enum pci_region region_id;
2179	int rc;
2180
2181	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2182	response = &fw_loader->dynamic_loader.response;
2183
2184	region_id = (response->ram_type == COMMS_SRAM) ?
2185					PCI_REGION_SRAM : PCI_REGION_DRAM;
2186
2187	region = &hdev->pci_mem_region[region_id];
2188
2189	rc = hl_fw_dynamic_validate_response(hdev, response, region);
2190	if (rc) {
2191		dev_err(hdev->dev,
2192			"invalid mem transfer request for FW descriptor\n");
2193		return rc;
2194	}
2195
2196	/*
2197	 * extract address to copy the descriptor from
2198	 * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
2199	 * as invalid.
2200	 * it will be marked again as valid once validated
2201	 */
2202	fw_loader->dynamic_loader.fw_desc_valid = false;
2203	src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2204							response->ram_offset;
2205
2206	/*
2207	 * We do the copy of the fw descriptor in 2 phases:
2208	 * 1. copy the header + data info according to our lkd_fw_comms_desc definition.
2209	 *    then we're able to read the actual data size provided by fw.
2210	 *    this is needed for cases where data in descriptor was changed(add/remove)
2211	 *    in embedded specs header file before updating lkd copy of the header file
2212	 * 2. copy descriptor to temporary buffer with aligned size and send it to validation
2213	 */
2214	memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
2215	fw_data_size = le16_to_cpu(fw_desc->header.size);
2216
2217	temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
2218	if (!temp_fw_desc)
2219		return -ENOMEM;
2220
2221	memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
2222
2223	rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
2224					(struct lkd_fw_comms_desc *) temp_fw_desc);
2225
2226	if (!rc)
2227		hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc);
2228
2229	vfree(temp_fw_desc);
2230
2231	return rc;
2232}
2233
2234/**
2235 * hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
2236 *
2237 * @hdev: pointer to the habanalabs device structure
2238 * @fw_loader: managing structure for loading device's FW
2239 * @next_image_size: size to allocate for next FW component
2240 *
2241 * @return 0 on success, otherwise non-zero error code
2242 */
2243static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
2244						struct fw_load_mgr *fw_loader,
2245						size_t next_image_size)
2246{
2247	int rc;
2248
2249	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
2250						next_image_size, true,
2251						fw_loader->cpu_timeout);
2252	if (rc)
2253		return rc;
2254
2255	return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
2256}
2257
2258/**
2259 * hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
2260 *
2261 * @hdev: pointer to the habanalabs device structure
2262 * @fwc: the firmware component
2263 * @fw_version: fw component's version string
2264 */
2265static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
2266					enum hl_fw_component fwc,
2267					const char *fw_version)
2268{
2269	struct asic_fixed_properties *prop = &hdev->asic_prop;
2270	char *preboot_ver, *boot_ver;
2271	char btl_ver[32];
2272	int rc;
2273
2274	switch (fwc) {
2275	case FW_COMP_BOOT_FIT:
2276		strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
2277		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
2278		if (boot_ver) {
2279			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
2280			kfree(boot_ver);
2281		}
2282
2283		break;
2284	case FW_COMP_PREBOOT:
2285		strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
2286		preboot_ver = strnstr(prop->preboot_ver, "Preboot", VERSION_MAX_LEN);
2287		dev_info(hdev->dev, "preboot full version: '%s'\n", preboot_ver);
2288
2289		if (preboot_ver && preboot_ver != prop->preboot_ver) {
2290			strscpy(btl_ver, prop->preboot_ver,
2291				min((int) (preboot_ver - prop->preboot_ver), 31));
2292			dev_info(hdev->dev, "%s\n", btl_ver);
2293		}
2294
2295		rc = hl_get_sw_major_minor_subminor(hdev, preboot_ver);
2296		if (rc)
2297			return rc;
2298		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
2299		if (preboot_ver) {
2300			rc = hl_get_preboot_major_minor(hdev, preboot_ver);
2301			kfree(preboot_ver);
2302			if (rc)
2303				return rc;
2304		}
2305
2306		break;
2307	default:
2308		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2309		return -EINVAL;
2310	}
2311
2312	return 0;
2313}
2314
2315/**
2316 * hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
2317 *
2318 * @hdev: pointer to the habanalabs device structure
2319 * @fw: fw descriptor
2320 * @fw_loader: managing structure for loading device's FW
2321 */
2322static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
2323						const struct firmware *fw,
2324						struct fw_load_mgr *fw_loader)
2325{
2326	struct lkd_fw_comms_desc *fw_desc;
2327	struct pci_mem_region *region;
2328	void __iomem *dest;
2329	u64 addr;
2330	int rc;
2331
2332	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2333	addr = le64_to_cpu(fw_desc->img_addr);
2334
2335	/* find memory region to which to copy the image */
2336	region = fw_loader->dynamic_loader.image_region;
2337
2338	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2339					(addr - region->region_base);
2340
2341	rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
2342					fw_loader->boot_fit_img.src_off,
2343					fw_loader->boot_fit_img.copy_size);
2344
2345	return rc;
2346}
2347
2348/**
2349 * hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
2350 *
2351 * @hdev: pointer to the habanalabs device structure
2352 * @msg: message
2353 * @fw_loader: managing structure for loading device's FW
2354 */
2355static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
2356		struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
2357{
2358	struct lkd_fw_comms_desc *fw_desc;
2359	struct pci_mem_region *region;
2360	void __iomem *dest;
2361	u64 addr;
2362	int rc;
2363
2364	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2365	addr = le64_to_cpu(fw_desc->img_addr);
2366
2367	/* find memory region to which to copy the image */
2368	region = fw_loader->dynamic_loader.image_region;
2369
2370	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2371					(addr - region->region_base);
2372
2373	rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
2374
2375	return rc;
2376}
2377
2378/**
2379 * hl_fw_boot_fit_update_state - update internal data structures after boot-fit
2380 *                               is loaded
2381 *
2382 * @hdev: pointer to the habanalabs device structure
2383 * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2384 * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2385 *
2386 * @return 0 on success, otherwise non-zero error code
2387 */
2388static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
2389						u32 cpu_boot_dev_sts0_reg,
2390						u32 cpu_boot_dev_sts1_reg)
2391{
2392	struct asic_fixed_properties *prop = &hdev->asic_prop;
2393
2394	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
2395
2396	/* Read boot_cpu status bits */
2397	if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
2398		prop->fw_bootfit_cpu_boot_dev_sts0 =
2399				RREG32(cpu_boot_dev_sts0_reg);
2400
2401		prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
2402							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2403
2404		dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
2405					prop->fw_bootfit_cpu_boot_dev_sts0);
2406	}
2407
2408	if (prop->fw_cpu_boot_dev_sts1_valid) {
2409		prop->fw_bootfit_cpu_boot_dev_sts1 =
2410				RREG32(cpu_boot_dev_sts1_reg);
2411
2412		dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
2413					prop->fw_bootfit_cpu_boot_dev_sts1);
2414	}
2415
2416	dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
2417			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2418}
2419
2420static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
2421{
2422	struct cpu_dyn_regs *dyn_regs =
2423			&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2424
2425	/* Check whether all 3 interrupt interfaces are set, if not use a
2426	 * single interface
2427	 */
2428	if (!hdev->asic_prop.gic_interrupts_enable &&
2429			!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
2430				CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
2431		dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
2432		dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
2433
2434		dev_warn(hdev->dev,
2435			"Using a single interrupt interface towards cpucp");
2436	}
2437}
2438/**
2439 * hl_fw_dynamic_load_image - load FW image using dynamic protocol
2440 *
2441 * @hdev: pointer to the habanalabs device structure
2442 * @fw_loader: managing structure for loading device's FW
2443 * @load_fwc: the FW component to be loaded
2444 * @img_ld_timeout: image load timeout
2445 *
2446 * @return 0 on success, otherwise non-zero error code
2447 */
2448static int hl_fw_dynamic_load_image(struct hl_device *hdev,
2449						struct fw_load_mgr *fw_loader,
2450						enum hl_fw_component load_fwc,
2451						u32 img_ld_timeout)
2452{
2453	enum hl_fw_component cur_fwc;
2454	const struct firmware *fw;
2455	char *fw_name;
2456	int rc = 0;
2457
2458	/*
2459	 * when loading image we have one of 2 scenarios:
2460	 * 1. current FW component is preboot and we want to load boot-fit
2461	 * 2. current FW component is boot-fit and we want to load linux
2462	 */
2463	if (load_fwc == FW_COMP_BOOT_FIT) {
2464		cur_fwc = FW_COMP_PREBOOT;
2465		fw_name = fw_loader->boot_fit_img.image_name;
2466	} else {
2467		cur_fwc = FW_COMP_BOOT_FIT;
2468		fw_name = fw_loader->linux_img.image_name;
2469	}
2470
2471	/* request FW in order to communicate to FW the size to be allocated */
2472	rc = hl_request_fw(hdev, &fw, fw_name);
2473	if (rc)
2474		return rc;
2475
2476	/* store the image size for future validation */
2477	fw_loader->dynamic_loader.fw_image_size = fw->size;
2478
2479	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
2480	if (rc)
2481		goto release_fw;
2482
2483	/* read preboot version */
2484	rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
2485				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2486	if (rc)
2487		goto release_fw;
2488
2489	/* copy boot fit to space allocated by FW */
2490	rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
2491	if (rc)
2492		goto release_fw;
2493
2494	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2495						0, true,
2496						fw_loader->cpu_timeout);
2497	if (rc)
2498		goto release_fw;
2499
2500	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2501						0, false,
2502						img_ld_timeout);
2503
2504release_fw:
2505	hl_release_firmware(fw);
2506	return rc;
2507}
2508
2509static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
2510					struct fw_load_mgr *fw_loader)
2511{
2512	struct dynamic_fw_load_mgr *dyn_loader;
2513	u32 status;
2514	int rc;
2515
2516	dyn_loader = &fw_loader->dynamic_loader;
2517
2518	/*
2519	 * Make sure CPU boot-loader is running
2520	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2521	 * yet there is a debug scenario in which we loading uboot (without Linux)
2522	 * which at later stage is relocated to DRAM. In this case we expect
2523	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2524	 * poll flags
2525	 */
2526	rc = hl_poll_timeout(
2527		hdev,
2528		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2529		status,
2530		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2531		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2532		hdev->fw_poll_interval_usec,
2533		dyn_loader->wait_for_bl_timeout);
2534	if (rc) {
2535		dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status);
2536		return rc;
2537	}
2538
2539	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2540	return 0;
2541}
2542
2543static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
2544						struct fw_load_mgr *fw_loader)
2545{
2546	struct dynamic_fw_load_mgr *dyn_loader;
2547	u32 status;
2548	int rc;
2549
2550	dyn_loader = &fw_loader->dynamic_loader;
2551
2552	/* Make sure CPU linux is running */
2553
2554	rc = hl_poll_timeout(
2555		hdev,
2556		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2557		status,
2558		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2559		hdev->fw_poll_interval_usec,
2560		fw_loader->cpu_timeout);
2561	if (rc) {
2562		dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status);
2563		return rc;
2564	}
2565
2566	dev_dbg(hdev->dev, "Boot status = %d\n", status);
2567	return 0;
2568}
2569
2570/**
2571 * hl_fw_linux_update_state -	update internal data structures after Linux
2572 *				is loaded.
2573 *				Note: Linux initialization is comprised mainly
2574 *				of two stages - loading kernel (SRAM_AVAIL)
2575 *				& loading ARMCP.
2576 *				Therefore reading boot device status in any of
2577 *				these stages might result in different values.
2578 *
2579 * @hdev: pointer to the habanalabs device structure
2580 * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2581 * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2582 *
2583 * @return 0 on success, otherwise non-zero error code
2584 */
2585static void hl_fw_linux_update_state(struct hl_device *hdev,
2586						u32 cpu_boot_dev_sts0_reg,
2587						u32 cpu_boot_dev_sts1_reg)
2588{
2589	struct asic_fixed_properties *prop = &hdev->asic_prop;
2590
2591	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
2592
2593	/* Read FW application security bits */
2594	if (prop->fw_cpu_boot_dev_sts0_valid) {
2595		prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
2596
2597		prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
2598							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2599
2600		if (prop->fw_app_cpu_boot_dev_sts0 &
2601				CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
2602			prop->gic_interrupts_enable = false;
2603
2604		dev_dbg(hdev->dev,
2605			"Firmware application CPU status0 %#x\n",
2606			prop->fw_app_cpu_boot_dev_sts0);
2607
2608		dev_dbg(hdev->dev, "GIC controller is %s\n",
2609				prop->gic_interrupts_enable ?
2610						"enabled" : "disabled");
2611	}
2612
2613	if (prop->fw_cpu_boot_dev_sts1_valid) {
2614		prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
2615
2616		dev_dbg(hdev->dev,
2617			"Firmware application CPU status1 %#x\n",
2618			prop->fw_app_cpu_boot_dev_sts1);
2619	}
2620
2621	dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
2622			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2623
2624	dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2625}
2626
2627/**
2628 * hl_fw_dynamic_send_msg - send a COMMS message with attached data
2629 *
2630 * @hdev: pointer to the habanalabs device structure
2631 * @fw_loader: managing structure for loading device's FW
2632 * @msg_type: message type
2633 * @data: data to be sent
2634 *
2635 * @return 0 on success, otherwise non-zero error code
2636 */
2637static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
2638		struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
2639{
2640	struct lkd_msg_comms *msg;
2641	int rc;
2642
2643	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
2644	if (!msg)
2645		return -ENOMEM;
2646
2647	/* create message to be sent */
2648	msg->header.type = msg_type;
2649	msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header));
2650	msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
2651
2652	switch (msg_type) {
2653	case HL_COMMS_RESET_CAUSE_TYPE:
2654		msg->reset_cause = *(__u8 *) data;
2655		break;
2656
2657	default:
2658		dev_err(hdev->dev,
2659			"Send COMMS message - invalid message type %u\n",
2660			msg_type);
2661		rc = -EINVAL;
2662		goto out;
2663	}
2664
2665	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
2666			sizeof(struct lkd_msg_comms));
2667	if (rc)
2668		goto out;
2669
2670	/* copy message to space allocated by FW */
2671	rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader);
2672	if (rc)
2673		goto out;
2674
2675	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2676						0, true,
2677						fw_loader->cpu_timeout);
2678	if (rc)
2679		goto out;
2680
2681	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2682						0, true,
2683						fw_loader->cpu_timeout);
2684
2685out:
2686	kfree(msg);
2687	return rc;
2688}
2689
2690/**
2691 * hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
2692 *
2693 * @hdev: pointer to the habanalabs device structure
2694 * @fw_loader: managing structure for loading device's FW
2695 *
2696 * @return 0 on success, otherwise non-zero error code
2697 *
2698 * brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
2699 * the communication is done using registers:
2700 * - LKD command register
2701 * - FW status register
2702 * the protocol is race free. this goal is achieved by splitting the requests
2703 * and response to known synchronization points between the LKD and the FW.
2704 * each response to LKD request is known and bound to a predefined timeout.
2705 * in case of timeout expiration without the desired status from FW- the
2706 * protocol (and hence the boot) will fail.
2707 */
2708static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
2709					struct fw_load_mgr *fw_loader)
2710{
2711	struct cpu_dyn_regs *dyn_regs;
2712	int rc, fw_error_rc;
2713
2714	dev_info(hdev->dev,
2715		"Loading %sfirmware to device, may take some time...\n",
2716		hdev->asic_prop.fw_security_enabled ? "secured " : "");
2717
2718	/* initialize FW descriptor as invalid */
2719	fw_loader->dynamic_loader.fw_desc_valid = false;
2720
2721	/*
2722	 * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
2723	 * It will be updated from FW after hl_fw_dynamic_request_descriptor().
2724	 */
2725	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
2726
2727	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
2728						0, true,
2729						fw_loader->cpu_timeout);
2730	if (rc)
2731		goto protocol_err;
2732
2733	if (hdev->reset_info.curr_reset_cause) {
2734		rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
2735				HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
2736		if (rc)
2737			goto protocol_err;
2738
2739		/* Clear current reset cause */
2740		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
2741	}
2742
2743	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
2744		struct lkd_fw_binning_info *binning_info;
2745
2746		rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
2747		if (rc)
2748			goto protocol_err;
2749
2750		/* read preboot version */
2751		rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
2752				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2753
2754		if (rc)
2755			return rc;
2756
2757		/* read binning info from preboot */
2758		if (hdev->support_preboot_binning) {
2759			binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info;
2760			hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l);
2761			hdev->dram_binning = le32_to_cpu(binning_info->dram_mask);
2762			hdev->edma_binning = le32_to_cpu(binning_info->edma_mask);
2763			hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask);
2764			hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask);
2765
2766			rc = hdev->asic_funcs->set_dram_properties(hdev);
2767			if (rc)
2768				return rc;
2769
2770			rc = hdev->asic_funcs->set_binning_masks(hdev);
2771			if (rc)
2772				return rc;
2773
2774			dev_dbg(hdev->dev,
2775				"Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n",
2776				hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
2777				hdev->decoder_binning, hdev->rotator_binning);
2778		}
2779
2780		return 0;
2781	}
2782
2783	/* load boot fit to FW */
2784	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
2785						fw_loader->boot_fit_timeout);
2786	if (rc) {
2787		dev_err(hdev->dev, "failed to load boot fit\n");
2788		goto protocol_err;
2789	}
2790
2791	rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
2792	if (rc)
2793		goto protocol_err;
2794
2795	hl_fw_boot_fit_update_state(hdev,
2796			le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2797			le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2798
2799	/*
2800	 * when testing FW load (without Linux) on PLDM we don't want to
2801	 * wait until boot fit is active as it may take several hours.
2802	 * instead, we load the bootfit and let it do all initialization in
2803	 * the background.
2804	 */
2805	if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
2806		return 0;
2807
2808	/* Enable DRAM scrambling before Linux boot and after successful
2809	 *  UBoot
2810	 */
2811	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2812
2813	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2814		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2815		return 0;
2816	}
2817
2818	if (fw_loader->skip_bmc) {
2819		rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
2820							COMMS_SKIP_BMC, 0,
2821							true,
2822							fw_loader->cpu_timeout);
2823		if (rc) {
2824			dev_err(hdev->dev, "failed to load boot fit\n");
2825			goto protocol_err;
2826		}
2827	}
2828
2829	/* load Linux image to FW */
2830	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
2831							fw_loader->cpu_timeout);
2832	if (rc) {
2833		dev_err(hdev->dev, "failed to load Linux\n");
2834		goto protocol_err;
2835	}
2836
2837	rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
2838	if (rc)
2839		goto protocol_err;
2840
2841	hl_fw_linux_update_state(hdev,
2842				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2843				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2844
2845	hl_fw_dynamic_update_linux_interrupt_if(hdev);
2846
2847protocol_err:
2848	if (fw_loader->dynamic_loader.fw_desc_valid) {
2849		fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
2850				le32_to_cpu(dyn_regs->cpu_boot_err1),
2851				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2852				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2853
2854		if (fw_error_rc)
2855			return fw_error_rc;
2856	}
2857
2858	return rc;
2859}
2860
2861/**
2862 * hl_fw_static_init_cpu - initialize the device CPU using static protocol
2863 *
2864 * @hdev: pointer to the habanalabs device structure
2865 * @fw_loader: managing structure for loading device's FW
2866 *
2867 * @return 0 on success, otherwise non-zero error code
2868 */
2869static int hl_fw_static_init_cpu(struct hl_device *hdev,
2870					struct fw_load_mgr *fw_loader)
2871{
2872	u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
2873	u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
2874	struct static_fw_load_mgr *static_loader;
2875	u32 cpu_boot_status_reg;
2876	int rc;
2877
2878	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
2879		return 0;
2880
2881	/* init common loader parameters */
2882	cpu_timeout = fw_loader->cpu_timeout;
2883
2884	/* init static loader parameters */
2885	static_loader = &fw_loader->static_loader;
2886	cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
2887	msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
2888	cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
2889	cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
2890	cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
2891
2892	dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
2893		cpu_timeout / USEC_PER_SEC);
2894
2895	/* Wait for boot FIT request */
2896	rc = hl_poll_timeout(
2897		hdev,
2898		cpu_boot_status_reg,
2899		status,
2900		status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
2901		hdev->fw_poll_interval_usec,
2902		fw_loader->boot_fit_timeout);
2903
2904	if (rc) {
2905		dev_dbg(hdev->dev,
2906			"No boot fit request received (status = %d), resuming boot\n", status);
2907	} else {
2908		rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
2909		if (rc)
2910			goto out;
2911
2912		/* Clear device CPU message status */
2913		WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
2914
2915		/* Signal device CPU that boot loader is ready */
2916		WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
2917
2918		/* Poll for CPU device ack */
2919		rc = hl_poll_timeout(
2920			hdev,
2921			cpu_msg_status_reg,
2922			status,
2923			status == CPU_MSG_OK,
2924			hdev->fw_poll_interval_usec,
2925			fw_loader->boot_fit_timeout);
2926
2927		if (rc) {
2928			dev_err(hdev->dev,
2929				"Timeout waiting for boot fit load ack (status = %d)\n", status);
2930			goto out;
2931		}
2932
2933		/* Clear message */
2934		WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2935	}
2936
2937	/*
2938	 * Make sure CPU boot-loader is running
2939	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2940	 * yet there is a debug scenario in which we loading uboot (without Linux)
2941	 * which at later stage is relocated to DRAM. In this case we expect
2942	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2943	 * poll flags
2944	 */
2945	rc = hl_poll_timeout(
2946		hdev,
2947		cpu_boot_status_reg,
2948		status,
2949		(status == CPU_BOOT_STATUS_DRAM_RDY) ||
2950		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
2951		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2952		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2953		hdev->fw_poll_interval_usec,
2954		cpu_timeout);
2955
2956	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2957
2958	/* Read U-Boot version now in case we will later fail */
2959	hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
2960
2961	/* update state according to boot stage */
2962	hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
2963						cpu_boot_dev_status1_reg);
2964
2965	if (rc) {
2966		detect_cpu_boot_status(hdev, status);
2967		rc = -EIO;
2968		goto out;
2969	}
2970
2971	/* Enable DRAM scrambling before Linux boot and after successful
2972	 *  UBoot
2973	 */
2974	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2975
2976	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2977		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2978		rc = 0;
2979		goto out;
2980	}
2981
2982	if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
2983		rc = 0;
2984		goto out;
2985	}
2986
2987	dev_info(hdev->dev,
2988		"Loading firmware to device, may take some time...\n");
2989
2990	rc = hdev->asic_funcs->load_firmware_to_device(hdev);
2991	if (rc)
2992		goto out;
2993
2994	if (fw_loader->skip_bmc) {
2995		WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
2996
2997		rc = hl_poll_timeout(
2998			hdev,
2999			cpu_boot_status_reg,
3000			status,
3001			(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
3002			hdev->fw_poll_interval_usec,
3003			cpu_timeout);
3004
3005		if (rc) {
3006			dev_err(hdev->dev,
3007				"Failed to get ACK on skipping BMC (status = %d)\n",
3008				status);
3009			WREG32(msg_to_cpu_reg, KMD_MSG_NA);
3010			rc = -EIO;
3011			goto out;
3012		}
3013	}
3014
3015	WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
3016
3017	rc = hl_poll_timeout(
3018		hdev,
3019		cpu_boot_status_reg,
3020		status,
3021		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
3022		hdev->fw_poll_interval_usec,
3023		cpu_timeout);
3024
3025	/* Clear message */
3026	WREG32(msg_to_cpu_reg, KMD_MSG_NA);
3027
3028	if (rc) {
3029		if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
3030			dev_err(hdev->dev,
3031				"Device reports FIT image is corrupted\n");
3032		else
3033			dev_err(hdev->dev,
3034				"Failed to load firmware to device (status = %d)\n",
3035				status);
3036
3037		rc = -EIO;
3038		goto out;
3039	}
3040
3041	rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
3042					fw_loader->static_loader.boot_err1_reg,
3043					cpu_boot_dev_status0_reg,
3044					cpu_boot_dev_status1_reg);
3045	if (rc)
3046		return rc;
3047
3048	hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
3049						cpu_boot_dev_status1_reg);
3050
3051	return 0;
3052
3053out:
3054	fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
3055					fw_loader->static_loader.boot_err1_reg,
3056					cpu_boot_dev_status0_reg,
3057					cpu_boot_dev_status1_reg);
3058
3059	return rc;
3060}
3061
3062/**
3063 * hl_fw_init_cpu - initialize the device CPU
3064 *
3065 * @hdev: pointer to the habanalabs device structure
3066 *
3067 * @return 0 on success, otherwise non-zero error code
3068 *
3069 * perform necessary initializations for device's CPU. takes into account if
3070 * init protocol is static or dynamic.
3071 */
3072int hl_fw_init_cpu(struct hl_device *hdev)
3073{
3074	struct asic_fixed_properties *prop = &hdev->asic_prop;
3075	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
3076
3077	return  prop->dynamic_fw_load ?
3078			hl_fw_dynamic_init_cpu(hdev, fw_loader) :
3079			hl_fw_static_init_cpu(hdev, fw_loader);
3080}
3081
3082void hl_fw_set_pll_profile(struct hl_device *hdev)
3083{
3084	hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
3085				hdev->asic_prop.max_freq_value);
3086}
3087
3088int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
3089{
3090	long value;
3091
3092	if (!hl_device_operational(hdev, NULL))
3093		return -ENODEV;
3094
3095	if (!hdev->pdev) {
3096		*cur_clk = 0;
3097		*max_clk = 0;
3098		return 0;
3099	}
3100
3101	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
3102
3103	if (value < 0) {
3104		dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", value);
3105		return value;
3106	}
3107
3108	*max_clk = (value / 1000 / 1000);
3109
3110	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
3111
3112	if (value < 0) {
3113		dev_err(hdev->dev, "Failed to retrieve device current clock %ld\n", value);
3114		return value;
3115	}
3116
3117	*cur_clk = (value / 1000 / 1000);
3118
3119	return 0;
3120}
3121
3122long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
3123{
3124	struct cpucp_packet pkt;
3125	u32 used_pll_idx;
3126	u64 result;
3127	int rc;
3128
3129	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3130	if (rc)
3131		return rc;
3132
3133	memset(&pkt, 0, sizeof(pkt));
3134
3135	if (curr)
3136		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
3137						CPUCP_PKT_CTL_OPCODE_SHIFT);
3138	else
3139		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3140
3141	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3142
3143	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3144
3145	if (rc) {
3146		dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
3147			used_pll_idx, rc);
3148		return rc;
3149	}
3150
3151	return (long) result;
3152}
3153
3154void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
3155{
3156	struct cpucp_packet pkt;
3157	u32 used_pll_idx;
3158	int rc;
3159
3160	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3161	if (rc)
3162		return;
3163
3164	memset(&pkt, 0, sizeof(pkt));
3165
3166	pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3167	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3168	pkt.value = cpu_to_le64(freq);
3169
3170	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3171
3172	if (rc)
3173		dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
3174			used_pll_idx, rc);
3175}
3176
3177long hl_fw_get_max_power(struct hl_device *hdev)
3178{
3179	struct cpucp_packet pkt;
3180	u64 result;
3181	int rc;
3182
3183	memset(&pkt, 0, sizeof(pkt));
3184
3185	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3186
3187	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3188
3189	if (rc) {
3190		dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
3191		return rc;
3192	}
3193
3194	return result;
3195}
3196
3197void hl_fw_set_max_power(struct hl_device *hdev)
3198{
3199	struct cpucp_packet pkt;
3200	int rc;
3201
3202	/* TODO: remove this after simulator supports this packet */
3203	if (!hdev->pdev)
3204		return;
3205
3206	memset(&pkt, 0, sizeof(pkt));
3207
3208	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3209	pkt.value = cpu_to_le64(hdev->max_power);
3210
3211	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3212
3213	if (rc)
3214		dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
3215}
3216
3217static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
3218					u32 nonce, u32 timeout)
3219{
3220	struct cpucp_packet pkt = {};
3221	dma_addr_t req_dma_addr;
3222	void *req_cpu_addr;
3223	int rc;
3224
3225	req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
3226	if (!req_cpu_addr) {
3227		dev_err(hdev->dev,
3228			"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
3229		return -ENOMEM;
3230	}
3231
3232	memset(data, 0, size);
3233
3234	pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
3235	pkt.addr = cpu_to_le64(req_dma_addr);
3236	pkt.data_max_size = cpu_to_le32(size);
3237	pkt.nonce = cpu_to_le32(nonce);
3238
3239	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
3240					timeout, NULL);
3241	if (rc) {
3242		dev_err(hdev->dev,
3243			"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
3244		goto out;
3245	}
3246
3247	memcpy(data, req_cpu_addr, size);
3248
3249out:
3250	hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
3251
3252	return rc;
3253}
3254
3255int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
3256				u32 nonce)
3257{
3258	return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
3259					sizeof(struct cpucp_sec_attest_info), nonce,
3260					HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
3261}
3262
3263int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
3264						dma_addr_t buff, u32 *size)
3265{
3266	struct cpucp_packet pkt = {};
3267	u64 result;
3268	int rc = 0;
3269
3270	pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT);
3271	pkt.addr = cpu_to_le64(buff);
3272	pkt.data_max_size = cpu_to_le32(*size);
3273	pkt.pkt_subidx = cpu_to_le32(sub_opcode);
3274
3275	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
3276						HL_CPUCP_INFO_TIMEOUT_USEC, &result);
3277	if (rc)
3278		dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
3279	else
3280		dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
3281
3282	*size = (u32)result;
3283
3284	return rc;
3285}
3286