1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2020-2022 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef GAUDI2P_H_
9#define GAUDI2P_H_
10
11#include <uapi/drm/habanalabs_accel.h>
12#include "../common/habanalabs.h"
13#include "../include/common/hl_boot_if.h"
14#include "../include/gaudi2/gaudi2.h"
15#include "../include/gaudi2/gaudi2_packets.h"
16#include "../include/gaudi2/gaudi2_fw_if.h"
17#include "../include/gaudi2/gaudi2_async_events.h"
18
19#define GAUDI2_LINUX_FW_FILE	"habanalabs/gaudi2/gaudi2-fit.itb"
20#define GAUDI2_BOOT_FIT_FILE	"habanalabs/gaudi2/gaudi2-boot-fit.itb"
21
22#define MMU_PAGE_TABLES_INITIAL_SIZE	0x10000000	/* 256MB */
23
24#define GAUDI2_CPU_TIMEOUT_USEC		30000000	/* 30s */
25
26#define NUMBER_OF_PDMA_QUEUES		2
27#define NUMBER_OF_EDMA_QUEUES		8
28#define NUMBER_OF_MME_QUEUES		4
29#define NUMBER_OF_TPC_QUEUES		25
30#define NUMBER_OF_NIC_QUEUES		24
31#define NUMBER_OF_ROT_QUEUES		2
32#define NUMBER_OF_CPU_QUEUES		1
33
34#define NUMBER_OF_HW_QUEUES		((NUMBER_OF_PDMA_QUEUES + \
35					NUMBER_OF_EDMA_QUEUES + \
36					NUMBER_OF_MME_QUEUES + \
37					NUMBER_OF_TPC_QUEUES + \
38					NUMBER_OF_NIC_QUEUES + \
39					NUMBER_OF_ROT_QUEUES + \
40					NUMBER_OF_CPU_QUEUES) * \
41					NUM_OF_PQ_PER_QMAN)
42
43#define NUMBER_OF_QUEUES		(NUMBER_OF_CPU_QUEUES + NUMBER_OF_HW_QUEUES)
44
45#define DCORE_NUM_OF_SOB		\
46	(((mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8191 - \
47	mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
48
49#define DCORE_NUM_OF_MONITORS		\
50	(((mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2047 - \
51	mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
52
53#define NUMBER_OF_DEC		((NUM_OF_DEC_PER_DCORE * NUM_OF_DCORES) + NUMBER_OF_PCIE_DEC)
54
55/* Map all arcs dccm + arc schedulers acp blocks */
56#define NUM_OF_USER_ACP_BLOCKS		(NUM_OF_SCHEDULER_ARC + 2)
57#define NUM_OF_USER_NIC_UMR_BLOCKS	15
58#define NUM_OF_EXPOSED_SM_BLOCKS	((NUM_OF_DCORES - 1) * 2)
59#define NUM_USER_MAPPED_BLOCKS \
60	(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
61	NUM_OF_EXPOSED_SM_BLOCKS + \
62	(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
63
64/* Within the user mapped array, decoder entries start post all the ARC related
65 * entries
66 */
67#define USR_MAPPED_BLK_DEC_START_IDX \
68	(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + \
69	(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
70
71#define USR_MAPPED_BLK_SM_START_IDX \
72	(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
73	(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
74
75#define SM_OBJS_BLOCK_SIZE		(mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - \
76					 mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0)
77
78#define GAUDI2_MAX_PENDING_CS		64
79
80#if !IS_MAX_PENDING_CS_VALID(GAUDI2_MAX_PENDING_CS)
81#error "GAUDI2_MAX_PENDING_CS must be power of 2 and greater than 1"
82#endif
83
84#define CORESIGHT_TIMEOUT_USEC			100000		/* 100 ms */
85
86#define GAUDI2_PREBOOT_REQ_TIMEOUT_USEC		25000000	/* 25s */
87
88#define GAUDI2_BOOT_FIT_REQ_TIMEOUT_USEC	10000000	/* 10s */
89
90#define GAUDI2_NIC_CLK_FREQ			450000000ull	/* 450 MHz */
91
92#define DC_POWER_DEFAULT			60000		/* 60W */
93
94#define GAUDI2_HBM_NUM				6
95
96#define DMA_MAX_TRANSFER_SIZE			U32_MAX
97
98#define GAUDI2_DEFAULT_CARD_NAME		"HL225"
99
100#define QMAN_STREAMS				4
101
102#define NUM_OF_MME_SBTE_PORTS			5
103#define NUM_OF_MME_WB_PORTS			2
104
105#define GAUDI2_ENGINE_ID_DCORE_OFFSET \
106	(GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)
107
108/* DRAM Memory Map */
109
110#define CPU_FW_IMAGE_SIZE			0x10000000	/* 256MB */
111
112/* This define should be used only when working in a debug mode without dram.
113 * When working with dram, the driver size will be calculated dynamically.
114 */
115#define NIC_DEFAULT_DRV_SIZE			0x20000000	/* 512MB */
116
117#define CPU_FW_IMAGE_ADDR			DRAM_PHYS_BASE
118
119#define NIC_NUMBER_OF_PORTS			NIC_NUMBER_OF_ENGINES
120
121#define NUMBER_OF_PCIE_DEC			2
122#define PCIE_DEC_SHIFT				8
123
124#define SRAM_USER_BASE_OFFSET			0
125
126/* cluster binning */
127#define MAX_FAULTY_HBMS				1
128#define GAUDI2_XBAR_EDGE_FULL_MASK		0xF
129#define GAUDI2_EDMA_FULL_MASK			0xFF
130#define GAUDI2_DRAM_FULL_MASK			0x3F
131
132/* Host virtual address space. */
133
134#define VA_HOST_SPACE_PAGE_START		0xFFF0000000000000ull
135#define VA_HOST_SPACE_PAGE_END			0xFFF0800000000000ull /* 140TB */
136
137#define VA_HOST_SPACE_HPAGE_START		0xFFF0800000000000ull
138#define VA_HOST_SPACE_HPAGE_END			0xFFF1000000000000ull /* 140TB */
139
140/* 140TB */
141#define VA_HOST_SPACE_PAGE_SIZE		(VA_HOST_SPACE_PAGE_END - VA_HOST_SPACE_PAGE_START)
142
143/* 140TB */
144#define VA_HOST_SPACE_HPAGE_SIZE	(VA_HOST_SPACE_HPAGE_END - VA_HOST_SPACE_HPAGE_START)
145
146#define VA_HOST_SPACE_SIZE		(VA_HOST_SPACE_PAGE_SIZE + VA_HOST_SPACE_HPAGE_SIZE)
147
148#define HOST_SPACE_INTERNAL_CB_SZ		SZ_2M
149
150/*
151 * HBM virtual address space
152 * Gaudi2 has 6 HBM devices, each supporting 16GB total of 96GB at most.
153 * No core separation is supported so we can have one chunk of virtual address
154 * space just above the physical ones.
155 * The virtual address space starts immediately after the end of the physical
156 * address space which is determined at run-time.
157 */
158#define VA_HBM_SPACE_END		0x1002000000000000ull
159
160#define HW_CAP_PLL			BIT_ULL(0)
161#define HW_CAP_DRAM			BIT_ULL(1)
162#define HW_CAP_PMMU			BIT_ULL(2)
163#define HW_CAP_CPU			BIT_ULL(3)
164#define HW_CAP_MSIX			BIT_ULL(4)
165
166#define HW_CAP_CPU_Q			BIT_ULL(5)
167#define HW_CAP_CPU_Q_SHIFT		5
168
169#define HW_CAP_CLK_GATE			BIT_ULL(6)
170#define HW_CAP_KDMA			BIT_ULL(7)
171#define HW_CAP_SRAM_SCRAMBLER		BIT_ULL(8)
172
173#define HW_CAP_DCORE0_DMMU0		BIT_ULL(9)
174#define HW_CAP_DCORE0_DMMU1		BIT_ULL(10)
175#define HW_CAP_DCORE0_DMMU2		BIT_ULL(11)
176#define HW_CAP_DCORE0_DMMU3		BIT_ULL(12)
177#define HW_CAP_DCORE1_DMMU0		BIT_ULL(13)
178#define HW_CAP_DCORE1_DMMU1		BIT_ULL(14)
179#define HW_CAP_DCORE1_DMMU2		BIT_ULL(15)
180#define HW_CAP_DCORE1_DMMU3		BIT_ULL(16)
181#define HW_CAP_DCORE2_DMMU0		BIT_ULL(17)
182#define HW_CAP_DCORE2_DMMU1		BIT_ULL(18)
183#define HW_CAP_DCORE2_DMMU2		BIT_ULL(19)
184#define HW_CAP_DCORE2_DMMU3		BIT_ULL(20)
185#define HW_CAP_DCORE3_DMMU0		BIT_ULL(21)
186#define HW_CAP_DCORE3_DMMU1		BIT_ULL(22)
187#define HW_CAP_DCORE3_DMMU2		BIT_ULL(23)
188#define HW_CAP_DCORE3_DMMU3		BIT_ULL(24)
189#define HW_CAP_DMMU_MASK		GENMASK_ULL(24, 9)
190#define HW_CAP_DMMU_SHIFT		9
191#define HW_CAP_PDMA_MASK		BIT_ULL(26)
192#define HW_CAP_EDMA_MASK		GENMASK_ULL(34, 27)
193#define HW_CAP_EDMA_SHIFT		27
194#define HW_CAP_MME_MASK			GENMASK_ULL(38, 35)
195#define HW_CAP_MME_SHIFT		35
196#define HW_CAP_ROT_MASK			GENMASK_ULL(40, 39)
197#define HW_CAP_ROT_SHIFT		39
198#define HW_CAP_HBM_SCRAMBLER_HW_RESET	BIT_ULL(41)
199#define HW_CAP_HBM_SCRAMBLER_SW_RESET	BIT_ULL(42)
200#define HW_CAP_HBM_SCRAMBLER_MASK	(HW_CAP_HBM_SCRAMBLER_HW_RESET | \
201						HW_CAP_HBM_SCRAMBLER_SW_RESET)
202#define HW_CAP_HBM_SCRAMBLER_SHIFT	41
203#define HW_CAP_RESERVED			BIT(43)
204#define HW_CAP_MMU_MASK			(HW_CAP_PMMU | HW_CAP_DMMU_MASK)
205
206/* Range Registers */
207#define RR_TYPE_SHORT			0
208#define RR_TYPE_LONG			1
209#define RR_TYPE_SHORT_PRIV		2
210#define RR_TYPE_LONG_PRIV		3
211#define NUM_SHORT_LBW_RR		14
212#define NUM_LONG_LBW_RR			4
213#define NUM_SHORT_HBW_RR		6
214#define NUM_LONG_HBW_RR			4
215
216/* RAZWI initiator coordinates- X- 5 bits, Y- 4 bits */
217#define RAZWI_INITIATOR_X_SHIFT		0
218#define RAZWI_INITIATOR_X_MASK		0x1F
219#define RAZWI_INITIATOR_Y_SHIFT		5
220#define RAZWI_INITIATOR_Y_MASK		0xF
221
222#define RTR_ID_X_Y(x, y) \
223	((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
224		(((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
225
226/* decoders have separate mask */
227#define HW_CAP_DEC_SHIFT		0
228#define HW_CAP_DEC_MASK			GENMASK_ULL(9, 0)
229
230/* TPCs have separate mask */
231#define HW_CAP_TPC_SHIFT		0
232#define HW_CAP_TPC_MASK			GENMASK_ULL(24, 0)
233
234/* nics have separate mask */
235#define HW_CAP_NIC_SHIFT		0
236#define HW_CAP_NIC_MASK			GENMASK_ULL(NIC_NUMBER_OF_ENGINES - 1, 0)
237
238#define GAUDI2_ARC_PCI_MSB_ADDR(addr)	(((addr) & GENMASK_ULL(49, 28)) >> 28)
239
240#define GAUDI2_SOB_INCREMENT_BY_ONE	(FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
241					FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
242
243#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
244
245#define GAUDI2_NUM_OF_GLBL_ERR_CAUSE		8
246
247enum gaudi2_reserved_sob_id {
248	GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
249	GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
250			GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
251	GAUDI2_RESERVED_SOB_KDMA_COMPLETION,
252	GAUDI2_RESERVED_SOB_DEC_NRM_FIRST,
253	GAUDI2_RESERVED_SOB_DEC_NRM_LAST =
254			GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + NUMBER_OF_DEC - 1,
255	GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST,
256	GAUDI2_RESERVED_SOB_DEC_ABNRM_LAST =
257			GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + NUMBER_OF_DEC - 1,
258	GAUDI2_RESERVED_SOB_NUMBER
259};
260
261enum gaudi2_reserved_mon_id {
262	GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST,
263	GAUDI2_RESERVED_MON_CS_COMPLETION_LAST =
264			GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
265	GAUDI2_RESERVED_MON_KDMA_COMPLETION,
266	GAUDI2_RESERVED_MON_DEC_NRM_FIRST,
267	GAUDI2_RESERVED_MON_DEC_NRM_LAST =
268			GAUDI2_RESERVED_MON_DEC_NRM_FIRST + 3 * NUMBER_OF_DEC - 1,
269	GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST,
270	GAUDI2_RESERVED_MON_DEC_ABNRM_LAST =
271			GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST + 3 * NUMBER_OF_DEC - 1,
272	GAUDI2_RESERVED_MON_NUMBER
273};
274
275enum gaudi2_reserved_cq_id {
276	GAUDI2_RESERVED_CQ_CS_COMPLETION,
277	GAUDI2_RESERVED_CQ_KDMA_COMPLETION,
278	GAUDI2_RESERVED_CQ_NUMBER
279};
280
281/*
282 * Gaudi2 subtitute TPCs Numbering
283 * At most- two faulty TPCs are allowed
284 * First replacement to a faulty TPC will be TPC24, second- TPC23
285 */
286enum substitude_tpc {
287	FAULTY_TPC_SUBTS_1_TPC_24,
288	FAULTY_TPC_SUBTS_2_TPC_23,
289	MAX_FAULTY_TPCS
290};
291
292enum gaudi2_dma_core_id {
293	DMA_CORE_ID_PDMA0, /* Dcore 0 */
294	DMA_CORE_ID_PDMA1, /* Dcore 0 */
295	DMA_CORE_ID_EDMA0, /* Dcore 0 */
296	DMA_CORE_ID_EDMA1, /* Dcore 0 */
297	DMA_CORE_ID_EDMA2, /* Dcore 1 */
298	DMA_CORE_ID_EDMA3, /* Dcore 1 */
299	DMA_CORE_ID_EDMA4, /* Dcore 2 */
300	DMA_CORE_ID_EDMA5, /* Dcore 2 */
301	DMA_CORE_ID_EDMA6, /* Dcore 3 */
302	DMA_CORE_ID_EDMA7, /* Dcore 3 */
303	DMA_CORE_ID_KDMA, /* Dcore 0 */
304	DMA_CORE_ID_SIZE
305};
306
307enum gaudi2_rotator_id {
308	ROTATOR_ID_0,
309	ROTATOR_ID_1,
310	ROTATOR_ID_SIZE,
311};
312
313enum gaudi2_mme_id {
314	MME_ID_DCORE0,
315	MME_ID_DCORE1,
316	MME_ID_DCORE2,
317	MME_ID_DCORE3,
318	MME_ID_SIZE,
319};
320
321enum gaudi2_tpc_id {
322	TPC_ID_DCORE0_TPC0,
323	TPC_ID_DCORE0_TPC1,
324	TPC_ID_DCORE0_TPC2,
325	TPC_ID_DCORE0_TPC3,
326	TPC_ID_DCORE0_TPC4,
327	TPC_ID_DCORE0_TPC5,
328	TPC_ID_DCORE1_TPC0,
329	TPC_ID_DCORE1_TPC1,
330	TPC_ID_DCORE1_TPC2,
331	TPC_ID_DCORE1_TPC3,
332	TPC_ID_DCORE1_TPC4,
333	TPC_ID_DCORE1_TPC5,
334	TPC_ID_DCORE2_TPC0,
335	TPC_ID_DCORE2_TPC1,
336	TPC_ID_DCORE2_TPC2,
337	TPC_ID_DCORE2_TPC3,
338	TPC_ID_DCORE2_TPC4,
339	TPC_ID_DCORE2_TPC5,
340	TPC_ID_DCORE3_TPC0,
341	TPC_ID_DCORE3_TPC1,
342	TPC_ID_DCORE3_TPC2,
343	TPC_ID_DCORE3_TPC3,
344	TPC_ID_DCORE3_TPC4,
345	TPC_ID_DCORE3_TPC5,
346	/* the PCI TPC is placed last (mapped liked HW) */
347	TPC_ID_DCORE0_TPC6,
348	TPC_ID_SIZE,
349};
350
351enum gaudi2_dec_id {
352	DEC_ID_DCORE0_DEC0,
353	DEC_ID_DCORE0_DEC1,
354	DEC_ID_DCORE1_DEC0,
355	DEC_ID_DCORE1_DEC1,
356	DEC_ID_DCORE2_DEC0,
357	DEC_ID_DCORE2_DEC1,
358	DEC_ID_DCORE3_DEC0,
359	DEC_ID_DCORE3_DEC1,
360	DEC_ID_PCIE_VDEC0,
361	DEC_ID_PCIE_VDEC1,
362	DEC_ID_SIZE,
363};
364
365enum gaudi2_hbm_id {
366	HBM_ID0,
367	HBM_ID1,
368	HBM_ID2,
369	HBM_ID3,
370	HBM_ID4,
371	HBM_ID5,
372	HBM_ID_SIZE,
373};
374
375/* specific EDMA enumeration */
376enum gaudi2_edma_id {
377	EDMA_ID_DCORE0_INSTANCE0,
378	EDMA_ID_DCORE0_INSTANCE1,
379	EDMA_ID_DCORE1_INSTANCE0,
380	EDMA_ID_DCORE1_INSTANCE1,
381	EDMA_ID_DCORE2_INSTANCE0,
382	EDMA_ID_DCORE2_INSTANCE1,
383	EDMA_ID_DCORE3_INSTANCE0,
384	EDMA_ID_DCORE3_INSTANCE1,
385	EDMA_ID_SIZE,
386};
387
388/* User interrupt count is aligned with HW CQ count.
389 * We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
390 */
391#define GAUDI2_NUM_USER_INTERRUPTS 255
392#define GAUDI2_NUM_RESERVED_INTERRUPTS 1
393#define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
394
395enum gaudi2_irq_num {
396	GAUDI2_IRQ_NUM_EVENT_QUEUE = GAUDI2_EVENT_QUEUE_MSIX_IDX,
397	GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM,
398	GAUDI2_IRQ_NUM_DCORE0_DEC0_ABNRM,
399	GAUDI2_IRQ_NUM_DCORE0_DEC1_NRM,
400	GAUDI2_IRQ_NUM_DCORE0_DEC1_ABNRM,
401	GAUDI2_IRQ_NUM_DCORE1_DEC0_NRM,
402	GAUDI2_IRQ_NUM_DCORE1_DEC0_ABNRM,
403	GAUDI2_IRQ_NUM_DCORE1_DEC1_NRM,
404	GAUDI2_IRQ_NUM_DCORE1_DEC1_ABNRM,
405	GAUDI2_IRQ_NUM_DCORE2_DEC0_NRM,
406	GAUDI2_IRQ_NUM_DCORE2_DEC0_ABNRM,
407	GAUDI2_IRQ_NUM_DCORE2_DEC1_NRM,
408	GAUDI2_IRQ_NUM_DCORE2_DEC1_ABNRM,
409	GAUDI2_IRQ_NUM_DCORE3_DEC0_NRM,
410	GAUDI2_IRQ_NUM_DCORE3_DEC0_ABNRM,
411	GAUDI2_IRQ_NUM_DCORE3_DEC1_NRM,
412	GAUDI2_IRQ_NUM_DCORE3_DEC1_ABNRM,
413	GAUDI2_IRQ_NUM_SHARED_DEC0_NRM,
414	GAUDI2_IRQ_NUM_SHARED_DEC0_ABNRM,
415	GAUDI2_IRQ_NUM_SHARED_DEC1_NRM,
416	GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
417	GAUDI2_IRQ_NUM_DEC_LAST = GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
418	GAUDI2_IRQ_NUM_COMPLETION,
419	GAUDI2_IRQ_NUM_NIC_PORT_FIRST,
420	GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
421	GAUDI2_IRQ_NUM_TPC_ASSERT,
422	GAUDI2_IRQ_NUM_RESERVED_FIRST,
423	GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_TOTAL_USER_INTERRUPTS - 1),
424	GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
425	GAUDI2_IRQ_NUM_USER_FIRST = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR + 1,
426	GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
427	GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
428};
429
430static_assert(GAUDI2_IRQ_NUM_USER_FIRST > GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM);
431
432/**
433 * struct dup_block_ctx - context to initialize unit instances across multiple
434 *                        blocks where block can be either a dcore of duplicated
435 *                        common module. this code relies on constant offsets
436 *                        of blocks and unit instances in a block.
437 * @instance_cfg_fn: instance specific configuration function.
438 * @data: private configuration data.
439 * @base: base address of the first instance in the first block.
440 * @block_off: subsequent blocks address spacing.
441 * @instance_off: subsequent block's instances address spacing.
442 * @enabled_mask: mask of enabled instances (1- enabled, 0- disabled).
443 * @blocks: number of blocks.
444 * @instances: unit instances per block.
445 */
446struct dup_block_ctx {
447	void (*instance_cfg_fn)(struct hl_device *hdev, u64 base, void *data);
448	void *data;
449	u64 base;
450	u64 block_off;
451	u64 instance_off;
452	u64 enabled_mask;
453	unsigned int blocks;
454	unsigned int instances;
455};
456
457/**
458 * struct gaudi2_queues_test_info - Holds the address of a the messages used for testing the
459 *                                  device queues.
460 * @dma_addr: the address used by the HW for accessing the message.
461 * @kern_addr: The address used by the driver for accessing the message.
462 */
463struct gaudi2_queues_test_info {
464	dma_addr_t dma_addr;
465	void *kern_addr;
466};
467
468/**
469 * struct gaudi2_device - ASIC specific manage structure.
470 * @cpucp_info_get: get information on device from CPU-CP
471 * @mapped_blocks: array that holds the base address and size of all blocks
472 *                 the user can map.
473 * @lfsr_rand_seeds: array of MME ACC random seeds to set.
474 * @hw_queues_lock: protects the H/W queues from concurrent access.
475 * @scratchpad_kernel_address: general purpose PAGE_SIZE contiguous memory,
476 *                             this memory region should be write-only.
477 *                             currently used for HBW QMAN writes which is
478 *                             redundant.
479 * @scratchpad_bus_address: scratchpad bus address
480 * @virt_msix_db_cpu_addr: host memory page for the virtual MSI-X doorbell.
481 * @virt_msix_db_dma_addr: bus address of the page for the virtual MSI-X doorbell.
482 * @dram_bar_cur_addr: current address of DRAM PCI bar.
483 * @hw_cap_initialized: This field contains a bit per H/W engine. When that
484 *                      engine is initialized, that bit is set by the driver to
485 *                      signal we can use this engine in later code paths.
486 *                      Each bit is cleared upon reset of its corresponding H/W
487 *                      engine.
488 * @active_hw_arc: This field contains a bit per ARC of an H/W engine with
489 *                 exception of TPC and NIC engines. Once an engine arc is
490 *                 initialized, its respective bit is set. Driver can uniquely
491 *                 identify each initialized ARC and use this information in
492 *                 later code paths. Each respective bit is cleared upon reset
493 *                 of its corresponding ARC of the H/W engine.
494 * @dec_hw_cap_initialized: This field contains a bit per decoder H/W engine.
495 *                      When that engine is initialized, that bit is set by
496 *                      the driver to signal we can use this engine in later
497 *                      code paths.
498 *                      Each bit is cleared upon reset of its corresponding H/W
499 *                      engine.
500 * @tpc_hw_cap_initialized: This field contains a bit per TPC H/W engine.
501 *                      When that engine is initialized, that bit is set by
502 *                      the driver to signal we can use this engine in later
503 *                      code paths.
504 *                      Each bit is cleared upon reset of its corresponding H/W
505 *                      engine.
506 * @active_tpc_arc: This field contains a bit per ARC of the TPC engines.
507 *                  Once an engine arc is initialized, its respective bit is
508 *                  set. Each respective bit is cleared upon reset of its
509 *                  corresponding ARC of the TPC engine.
510 * @nic_hw_cap_initialized: This field contains a bit per nic H/W engine.
511 * @active_nic_arc: This field contains a bit per ARC of the NIC engines.
512 *                  Once an engine arc is initialized, its respective bit is
513 *                  set. Each respective bit is cleared upon reset of its
514 *                  corresponding ARC of the NIC engine.
515 * @hw_events: array that holds all H/W events that are defined valid.
516 * @events_stat: array that holds histogram of all received events.
517 * @events_stat_aggregate: same as events_stat but doesn't get cleared on reset.
518 * @num_of_valid_hw_events: used to hold the number of valid H/W events.
519 * @nic_ports: array that holds all NIC ports manage structures.
520 * @nic_macros: array that holds all NIC macro manage structures.
521 * @core_info: core info to be used by the Ethernet driver.
522 * @aux_ops: functions for core <-> aux drivers communication.
523 * @flush_db_fifo: flag to force flush DB FIFO after a write.
524 * @hbm_cfg: HBM subsystem settings
525 * @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
526 * @queues_test_info: information used by the driver when testing the HW queues.
527 */
528struct gaudi2_device {
529	int (*cpucp_info_get)(struct hl_device *hdev);
530
531	struct user_mapped_block	mapped_blocks[NUM_USER_MAPPED_BLOCKS];
532	int				lfsr_rand_seeds[MME_NUM_OF_LFSR_SEEDS];
533
534	spinlock_t			hw_queues_lock;
535
536	void				*scratchpad_kernel_address;
537	dma_addr_t			scratchpad_bus_address;
538
539	void				*virt_msix_db_cpu_addr;
540	dma_addr_t			virt_msix_db_dma_addr;
541
542	u64				dram_bar_cur_addr;
543	u64				hw_cap_initialized;
544	u64				active_hw_arc;
545	u64				dec_hw_cap_initialized;
546	u64				tpc_hw_cap_initialized;
547	u64				active_tpc_arc;
548	u64				nic_hw_cap_initialized;
549	u64				active_nic_arc;
550	u32				hw_events[GAUDI2_EVENT_SIZE];
551	u32				events_stat[GAUDI2_EVENT_SIZE];
552	u32				events_stat_aggregate[GAUDI2_EVENT_SIZE];
553	u32				num_of_valid_hw_events;
554
555	/* Queue testing */
556	struct gaudi2_queues_test_info	queues_test_info[GAUDI2_NUM_TESTED_QS];
557};
558
559/*
560 * Types of the Gaudi2 IP blocks, used by special blocks iterator.
561 * Required for scenarios where only particular block types can be
562 * addressed (e.g., special PLDM images).
563 */
564enum gaudi2_block_types {
565	GAUDI2_BLOCK_TYPE_PLL,
566	GAUDI2_BLOCK_TYPE_RTR,
567	GAUDI2_BLOCK_TYPE_CPU,
568	GAUDI2_BLOCK_TYPE_HIF,
569	GAUDI2_BLOCK_TYPE_HBM,
570	GAUDI2_BLOCK_TYPE_NIC,
571	GAUDI2_BLOCK_TYPE_PCIE,
572	GAUDI2_BLOCK_TYPE_PCIE_PMA,
573	GAUDI2_BLOCK_TYPE_PDMA,
574	GAUDI2_BLOCK_TYPE_EDMA,
575	GAUDI2_BLOCK_TYPE_PMMU,
576	GAUDI2_BLOCK_TYPE_PSOC,
577	GAUDI2_BLOCK_TYPE_ROT,
578	GAUDI2_BLOCK_TYPE_ARC_FARM,
579	GAUDI2_BLOCK_TYPE_DEC,
580	GAUDI2_BLOCK_TYPE_MME,
581	GAUDI2_BLOCK_TYPE_EU_BIST,
582	GAUDI2_BLOCK_TYPE_SYNC_MNGR,
583	GAUDI2_BLOCK_TYPE_STLB,
584	GAUDI2_BLOCK_TYPE_TPC,
585	GAUDI2_BLOCK_TYPE_HMMU,
586	GAUDI2_BLOCK_TYPE_SRAM,
587	GAUDI2_BLOCK_TYPE_XBAR,
588	GAUDI2_BLOCK_TYPE_KDMA,
589	GAUDI2_BLOCK_TYPE_XDMA,
590	GAUDI2_BLOCK_TYPE_XFT,
591	GAUDI2_BLOCK_TYPE_MAX
592};
593
594extern const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE];
595extern const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE];
596extern const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE];
597extern const u32 gaudi2_mme_ctrl_lo_blocks_bases[MME_ID_SIZE];
598extern const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES];
599extern const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE];
600
601void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx);
602int gaudi2_coresight_init(struct hl_device *hdev);
603int gaudi2_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
604void gaudi2_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
605void gaudi2_init_blocks(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx);
606bool gaudi2_is_hmmu_enabled(struct hl_device *hdev, int dcore_id, int hmmu_id);
607void gaudi2_write_rr_to_all_lbw_rtrs(struct hl_device *hdev, u8 rr_type, u32 rr_index, u64 min_val,
608					u64 max_val);
609void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32 cause,
610					u32 offended_addr);
611int gaudi2_init_security(struct hl_device *hdev);
612void gaudi2_ack_protection_bits_errors(struct hl_device *hdev);
613int gaudi2_send_device_activity(struct hl_device *hdev, bool open);
614
615#endif /* GAUDI2P_H_ */
616