1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#include <linux/firmware.h>
7#include <linux/highmem.h>
8#include <linux/moduleparam.h>
9#include <linux/pci.h>
10
11#include "vpu_boot_api.h"
12#include "ivpu_drv.h"
13#include "ivpu_fw.h"
14#include "ivpu_fw_log.h"
15#include "ivpu_gem.h"
16#include "ivpu_hw.h"
17#include "ivpu_ipc.h"
18#include "ivpu_pm.h"
19
20#define FW_GLOBAL_MEM_START	(2ull * SZ_1G)
21#define FW_GLOBAL_MEM_END	(3ull * SZ_1G)
22#define FW_SHARED_MEM_SIZE	SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
23#define FW_SHARED_MEM_ALIGNMENT	SZ_128K /* VPU MTRR limitation */
24#define FW_RUNTIME_MAX_SIZE	SZ_512M
25#define FW_SHAVE_NN_MAX_SIZE	SZ_2M
26#define FW_RUNTIME_MIN_ADDR	(FW_GLOBAL_MEM_START)
27#define FW_RUNTIME_MAX_ADDR	(FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
28#define FW_VERSION_HEADER_SIZE	SZ_4K
29#define FW_FILE_IMAGE_OFFSET	(VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
30
31#define WATCHDOG_MSS_REDIRECT	32
32#define WATCHDOG_NCE_REDIRECT	33
33
34#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
35
36#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
37	ivpu_fw_check_api(vdev, fw_hdr, #name, \
38			  VPU_##name##_API_VER_INDEX, \
39			  VPU_##name##_API_VER_MAJOR, \
40			  VPU_##name##_API_VER_MINOR, min_major)
41
42static char *ivpu_firmware;
43module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
44MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
45
46/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
47static struct {
48	int gen;
49	const char *name;
50} fw_names[] = {
51	{ IVPU_HW_37XX, "vpu_37xx.bin" },
52	{ IVPU_HW_37XX, "mtl_vpu.bin" },
53	{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
54	{ IVPU_HW_40XX, "vpu_40xx.bin" },
55	{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
56};
57
58static int ivpu_fw_request(struct ivpu_device *vdev)
59{
60	int ret = -ENOENT;
61	int i;
62
63	if (ivpu_firmware) {
64		ret = request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
65		if (!ret)
66			vdev->fw->name = ivpu_firmware;
67		return ret;
68	}
69
70	for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
71		if (fw_names[i].gen != ivpu_hw_gen(vdev))
72			continue;
73
74		ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
75		if (!ret) {
76			vdev->fw->name = fw_names[i].name;
77			return 0;
78		}
79	}
80
81	ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
82	return ret;
83}
84
85static int
86ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
87		  const char *str, int index, u16 expected_major, u16 expected_minor,
88		  u16 min_major)
89{
90	u16 major = (u16)(fw_hdr->api_version[index] >> 16);
91	u16 minor = (u16)(fw_hdr->api_version[index]);
92
93	if (major < min_major) {
94		ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n",
95			 str, major, minor, min_major);
96		return -EINVAL;
97	}
98	if (major != expected_major) {
99		ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n",
100			  str, major, minor, expected_major, expected_minor);
101	}
102	ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
103		 str, major, minor, expected_major, expected_minor);
104
105	return 0;
106}
107
108static int ivpu_fw_parse(struct ivpu_device *vdev)
109{
110	struct ivpu_fw_info *fw = vdev->fw;
111	const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
112	u64 runtime_addr, image_load_addr, runtime_size, image_size;
113
114	if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
115		ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
116		return -EINVAL;
117	}
118
119	if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) {
120		ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version);
121		return -EINVAL;
122	}
123
124	runtime_addr = fw_hdr->boot_params_load_address;
125	runtime_size = fw_hdr->runtime_size;
126	image_load_addr = fw_hdr->image_load_address;
127	image_size = fw_hdr->image_size;
128
129	if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
130		ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
131		return -EINVAL;
132	}
133
134	if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
135		ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
136		return -EINVAL;
137	}
138
139	if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) {
140		ivpu_err(vdev, "Invalid image size: %llu\n", image_size);
141		return -EINVAL;
142	}
143
144	if (image_load_addr < runtime_addr ||
145	    image_load_addr + image_size > runtime_addr + runtime_size) {
146		ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
147			 image_load_addr, image_size);
148		return -EINVAL;
149	}
150
151	if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
152		ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
153		return -EINVAL;
154	}
155
156	if (fw_hdr->entry_point < image_load_addr ||
157	    fw_hdr->entry_point >= image_load_addr + image_size) {
158		ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
159		return -EINVAL;
160	}
161	ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
162		 fw_hdr->header_version, fw_hdr->image_format);
163
164	ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
165		  (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
166
167	if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
168		return -EINVAL;
169	if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
170		return -EINVAL;
171
172	fw->runtime_addr = runtime_addr;
173	fw->runtime_size = runtime_size;
174	fw->image_load_offset = image_load_addr - runtime_addr;
175	fw->image_size = image_size;
176	fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size);
177
178	fw->cold_boot_entry_point = fw_hdr->entry_point;
179	fw->entry_point = fw->cold_boot_entry_point;
180
181	fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
182	fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
183	fw->trace_hw_component_mask = -1;
184
185	ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
186		 fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
187	ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
188		 fw->runtime_addr, image_load_addr, fw->entry_point);
189
190	return 0;
191}
192
193static void ivpu_fw_release(struct ivpu_device *vdev)
194{
195	release_firmware(vdev->fw->file);
196}
197
198static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
199{
200	struct ivpu_fw_info *fw = vdev->fw;
201	u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
202	u64 size = FW_SHARED_MEM_SIZE;
203
204	if (start + size > FW_GLOBAL_MEM_END) {
205		ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
206		return -EINVAL;
207	}
208
209	ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
210	return 0;
211}
212
213static int ivpu_fw_mem_init(struct ivpu_device *vdev)
214{
215	struct ivpu_fw_info *fw = vdev->fw;
216	int log_verb_size;
217	int ret;
218
219	ret = ivpu_fw_update_global_range(vdev);
220	if (ret)
221		return ret;
222
223	fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
224	if (!fw->mem) {
225		ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
226		return -ENOMEM;
227	}
228
229	fw->mem_log_crit = ivpu_bo_alloc_internal(vdev, 0, IVPU_FW_CRITICAL_BUFFER_SIZE,
230						  DRM_IVPU_BO_CACHED);
231	if (!fw->mem_log_crit) {
232		ivpu_err(vdev, "Failed to allocate critical log buffer\n");
233		ret = -ENOMEM;
234		goto err_free_fw_mem;
235	}
236
237	if (ivpu_log_level <= IVPU_FW_LOG_INFO)
238		log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
239	else
240		log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
241
242	fw->mem_log_verb = ivpu_bo_alloc_internal(vdev, 0, log_verb_size, DRM_IVPU_BO_CACHED);
243	if (!fw->mem_log_verb) {
244		ivpu_err(vdev, "Failed to allocate verbose log buffer\n");
245		ret = -ENOMEM;
246		goto err_free_log_crit;
247	}
248
249	if (fw->shave_nn_size) {
250		fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
251							  fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
252		if (!fw->mem_shave_nn) {
253			ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
254			ret = -ENOMEM;
255			goto err_free_log_verb;
256		}
257	}
258
259	return 0;
260
261err_free_log_verb:
262	ivpu_bo_free_internal(fw->mem_log_verb);
263err_free_log_crit:
264	ivpu_bo_free_internal(fw->mem_log_crit);
265err_free_fw_mem:
266	ivpu_bo_free_internal(fw->mem);
267	return ret;
268}
269
270static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
271{
272	struct ivpu_fw_info *fw = vdev->fw;
273
274	if (fw->mem_shave_nn) {
275		ivpu_bo_free_internal(fw->mem_shave_nn);
276		fw->mem_shave_nn = NULL;
277	}
278
279	ivpu_bo_free_internal(fw->mem_log_verb);
280	ivpu_bo_free_internal(fw->mem_log_crit);
281	ivpu_bo_free_internal(fw->mem);
282
283	fw->mem_log_verb = NULL;
284	fw->mem_log_crit = NULL;
285	fw->mem = NULL;
286}
287
288int ivpu_fw_init(struct ivpu_device *vdev)
289{
290	int ret;
291
292	ret = ivpu_fw_request(vdev);
293	if (ret)
294		return ret;
295
296	ret = ivpu_fw_parse(vdev);
297	if (ret)
298		goto err_fw_release;
299
300	ret = ivpu_fw_mem_init(vdev);
301	if (ret)
302		goto err_fw_release;
303
304	return 0;
305
306err_fw_release:
307	ivpu_fw_release(vdev);
308	return ret;
309}
310
311void ivpu_fw_fini(struct ivpu_device *vdev)
312{
313	ivpu_fw_mem_fini(vdev);
314	ivpu_fw_release(vdev);
315}
316
317int ivpu_fw_load(struct ivpu_device *vdev)
318{
319	struct ivpu_fw_info *fw = vdev->fw;
320	u64 image_end_offset = fw->image_load_offset + fw->image_size;
321
322	memset(fw->mem->kvaddr, 0, fw->image_load_offset);
323	memcpy(fw->mem->kvaddr + fw->image_load_offset,
324	       fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
325
326	if (IVPU_WA(clear_runtime_mem)) {
327		u8 *start = fw->mem->kvaddr + image_end_offset;
328		u64 size = fw->mem->base.size - image_end_offset;
329
330		memset(start, 0, size);
331	}
332
333	wmb(); /* Flush WC buffers after writing fw->mem */
334
335	return 0;
336}
337
338static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
339{
340	ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n",
341		 boot_params->magic);
342	ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n",
343		 boot_params->vpu_id);
344	ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n",
345		 boot_params->vpu_count);
346	ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n",
347		 boot_params->frequency);
348	ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n",
349		 boot_params->perf_clk_frequency);
350
351	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n",
352		 boot_params->ipc_header_area_start);
353	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n",
354		 boot_params->ipc_header_area_size);
355	ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n",
356		 boot_params->shared_region_base);
357	ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n",
358		 boot_params->shared_region_size);
359	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n",
360		 boot_params->ipc_payload_area_start);
361	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n",
362		 boot_params->ipc_payload_area_size);
363	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n",
364		 boot_params->global_aliased_pio_base);
365	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n",
366		 boot_params->global_aliased_pio_size);
367
368	ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n",
369		 boot_params->autoconfig);
370
371	ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n",
372		 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use);
373	ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
374		 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
375
376	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
377		 boot_params->global_memory_allocator_base);
378	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
379		 boot_params->global_memory_allocator_size);
380
381	ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
382		 boot_params->shave_nn_fw_base);
383
384	ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n",
385		 boot_params->watchdog_irq_mss);
386	ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
387		 boot_params->watchdog_irq_nce);
388	ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
389		 boot_params->host_to_vpu_irq);
390	ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
391		 boot_params->job_done_irq);
392
393	ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
394		 boot_params->host_version_id);
395	ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n",
396		 boot_params->si_stepping);
397	ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n",
398		 boot_params->device_id);
399	ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n",
400		 boot_params->feature_exclusion);
401	ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n",
402		 boot_params->sku);
403	ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n",
404		 boot_params->min_freq_pll_ratio);
405	ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n",
406		 boot_params->pn_freq_pll_ratio);
407	ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n",
408		 boot_params->max_freq_pll_ratio);
409	ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n",
410		 boot_params->default_trace_level);
411	ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n",
412		 boot_params->tracing_buff_message_format_mask);
413	ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n",
414		 boot_params->trace_destination_mask);
415	ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n",
416		 boot_params->trace_hw_component_mask);
417	ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n",
418		 boot_params->boot_type);
419	ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n",
420		 boot_params->punit_telemetry_sram_base);
421	ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n",
422		 boot_params->punit_telemetry_sram_size);
423	ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
424		 boot_params->vpu_telemetry_enable);
425}
426
427void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
428{
429	struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
430
431	/* In case of warm boot we only have to reset the entrypoint addr */
432	if (!ivpu_fw_is_cold_boot(vdev)) {
433		boot_params->save_restore_ret_address = 0;
434		vdev->pm->is_warmboot = true;
435		wmb(); /* Flush WC buffers after writing save_restore_ret_address */
436		return;
437	}
438
439	vdev->pm->is_warmboot = false;
440
441	boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
442	boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
443	boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
444
445	/*
446	 * Uncached region of VPU address space, covers IPC buffers, job queues
447	 * and log buffers, programmable to L2$ Uncached by VPU MTRR
448	 */
449	boot_params->shared_region_base = vdev->hw->ranges.global.start;
450	boot_params->shared_region_size = vdev->hw->ranges.global.end -
451					  vdev->hw->ranges.global.start;
452
453	boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
454	boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
455
456	boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
457	boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
458
459	boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
460	boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
461
462	/* Allow configuration for L2C_PAGE_TABLE with boot param value */
463	boot_params->autoconfig = 1;
464
465	/* Enable L2 cache for first 2GB of high memory */
466	boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
467	boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
468		ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
469
470	if (vdev->fw->mem_shave_nn)
471		boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
472
473	boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT;
474	boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT;
475	boot_params->si_stepping = ivpu_revision(vdev);
476	boot_params->device_id = ivpu_device_id(vdev);
477	boot_params->feature_exclusion = vdev->hw->tile_fuse;
478	boot_params->sku = vdev->hw->sku;
479
480	boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio;
481	boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
482	boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
483
484	boot_params->default_trace_level = vdev->fw->trace_level;
485	boot_params->tracing_buff_message_format_mask = BIT(VPU_TRACING_FORMAT_STRING);
486	boot_params->trace_destination_mask = vdev->fw->trace_destination_mask;
487	boot_params->trace_hw_component_mask = vdev->fw->trace_hw_component_mask;
488	boot_params->crit_tracing_buff_addr = vdev->fw->mem_log_crit->vpu_addr;
489	boot_params->crit_tracing_buff_size = vdev->fw->mem_log_crit->base.size;
490	boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
491	boot_params->verbose_tracing_buff_size = vdev->fw->mem_log_verb->base.size;
492
493	boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
494	boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
495	boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
496
497	wmb(); /* Flush WC buffers after writing bootparams */
498
499	ivpu_fw_boot_params_print(vdev, boot_params);
500}
501