1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/*
3 * Copyright (C) 2020-2023 Intel Corporation
4 */
5
6#ifndef __UAPI_IVPU_DRM_H__
7#define __UAPI_IVPU_DRM_H__
8
9#include "drm.h"
10
11#if defined(__cplusplus)
12extern "C" {
13#endif
14
15#define DRM_IVPU_DRIVER_MAJOR 1
16#define DRM_IVPU_DRIVER_MINOR 0
17
18#define DRM_IVPU_GET_PARAM		  0x00
19#define DRM_IVPU_SET_PARAM		  0x01
20#define DRM_IVPU_BO_CREATE		  0x02
21#define DRM_IVPU_BO_INFO		  0x03
22#define DRM_IVPU_SUBMIT			  0x05
23#define DRM_IVPU_BO_WAIT		  0x06
24
25#define DRM_IOCTL_IVPU_GET_PARAM                                               \
26	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
27
28#define DRM_IOCTL_IVPU_SET_PARAM                                               \
29	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
30
31#define DRM_IOCTL_IVPU_BO_CREATE                                               \
32	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
33
34#define DRM_IOCTL_IVPU_BO_INFO                                                 \
35	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
36
37#define DRM_IOCTL_IVPU_SUBMIT                                                  \
38	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
39
40#define DRM_IOCTL_IVPU_BO_WAIT                                                 \
41	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
42
43/**
44 * DOC: contexts
45 *
46 * VPU contexts have private virtual address space, job queues and priority.
47 * Each context is identified by an unique ID. Context is created on open().
48 */
49
50#define DRM_IVPU_PARAM_DEVICE_ID	    0
51#define DRM_IVPU_PARAM_DEVICE_REVISION	    1
52#define DRM_IVPU_PARAM_PLATFORM_TYPE	    2
53#define DRM_IVPU_PARAM_CORE_CLOCK_RATE	    3
54#define DRM_IVPU_PARAM_NUM_CONTEXTS	    4
55#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
56#define DRM_IVPU_PARAM_CONTEXT_PRIORITY	    6
57#define DRM_IVPU_PARAM_CONTEXT_ID	    7
58#define DRM_IVPU_PARAM_FW_API_VERSION	    8
59#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT	    9
60#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID  10
61#define DRM_IVPU_PARAM_TILE_CONFIG	    11
62#define DRM_IVPU_PARAM_SKU		    12
63#define DRM_IVPU_PARAM_CAPABILITIES	    13
64
65#define DRM_IVPU_PLATFORM_TYPE_SILICON	    0
66
67#define DRM_IVPU_CONTEXT_PRIORITY_IDLE	    0
68#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL    1
69#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS	    2
70#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME  3
71
72#define DRM_IVPU_CAP_METRIC_STREAMER	    1
73#define DRM_IVPU_CAP_DMA_MEMORY_RANGE       2
74
75/**
76 * struct drm_ivpu_param - Get/Set VPU parameters
77 */
78struct drm_ivpu_param {
79	/**
80	 * @param:
81	 *
82	 * Supported params:
83	 *
84	 * %DRM_IVPU_PARAM_DEVICE_ID:
85	 * PCI Device ID of the VPU device (read-only)
86	 *
87	 * %DRM_IVPU_PARAM_DEVICE_REVISION:
88	 * VPU device revision (read-only)
89	 *
90	 * %DRM_IVPU_PARAM_PLATFORM_TYPE:
91	 * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
92	 * platform type when executing on a simulator or emulator (read-only)
93	 *
94	 * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
95	 * Current PLL frequency (read-only)
96	 *
97	 * %DRM_IVPU_PARAM_NUM_CONTEXTS:
98	 * Maximum number of simultaneously existing contexts (read-only)
99	 *
100	 * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
101	 * Lowest VPU virtual address available in the current context (read-only)
102	 *
103	 * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
104	 * Value of current context scheduling priority (read-write).
105	 * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
106	 *
107	 * %DRM_IVPU_PARAM_CONTEXT_ID:
108	 * Current context ID, always greater than 0 (read-only)
109	 *
110	 * %DRM_IVPU_PARAM_FW_API_VERSION:
111	 * Firmware API version array (read-only)
112	 *
113	 * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
114	 * Heartbeat value from an engine (read-only).
115	 * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
116	 *
117	 * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
118	 * Device-unique inference ID (read-only)
119	 *
120	 * %DRM_IVPU_PARAM_TILE_CONFIG:
121	 * VPU tile configuration  (read-only)
122	 *
123	 * %DRM_IVPU_PARAM_SKU:
124	 * VPU SKU ID (read-only)
125	 *
126	 */
127	__u32 param;
128
129	/** @index: Index for params that have multiple instances */
130	__u32 index;
131
132	/** @value: Param value */
133	__u64 value;
134};
135
136#define DRM_IVPU_BO_SHAVE_MEM  0x00000001
137#define DRM_IVPU_BO_HIGH_MEM   DRM_IVPU_BO_SHAVE_MEM
138#define DRM_IVPU_BO_MAPPABLE   0x00000002
139#define DRM_IVPU_BO_DMA_MEM    0x00000004
140
141#define DRM_IVPU_BO_CACHED     0x00000000
142#define DRM_IVPU_BO_UNCACHED   0x00010000
143#define DRM_IVPU_BO_WC	       0x00020000
144#define DRM_IVPU_BO_CACHE_MASK 0x00030000
145
146#define DRM_IVPU_BO_FLAGS \
147	(DRM_IVPU_BO_HIGH_MEM | \
148	 DRM_IVPU_BO_MAPPABLE | \
149	 DRM_IVPU_BO_DMA_MEM | \
150	 DRM_IVPU_BO_CACHE_MASK)
151
152/**
153 * struct drm_ivpu_bo_create - Create BO backed by SHMEM
154 *
155 * Create GEM buffer object allocated in SHMEM memory.
156 */
157struct drm_ivpu_bo_create {
158	/** @size: The size in bytes of the allocated memory */
159	__u64 size;
160
161	/**
162	 * @flags:
163	 *
164	 * Supported flags:
165	 *
166	 * %DRM_IVPU_BO_HIGH_MEM:
167	 *
168	 * Allocate VPU address from >4GB range.
169	 * Buffer object with vpu address >4GB can be always accessed by the
170	 * VPU DMA engine, but some HW generation may not be able to access
171	 * this memory from then firmware running on the VPU management processor.
172	 * Suitable for input, output and some scratch buffers.
173	 *
174	 * %DRM_IVPU_BO_MAPPABLE:
175	 *
176	 * Buffer object can be mapped using mmap().
177	 *
178	 * %DRM_IVPU_BO_CACHED:
179	 *
180	 * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
181	 * This is the default caching mode.
182	 *
183	 * %DRM_IVPU_BO_UNCACHED:
184	 *
185	 * Allocated BO will not be cached on host side nor snooped on the VPU side.
186	 *
187	 * %DRM_IVPU_BO_WC:
188	 *
189	 * Allocated BO will use write combining buffer for writes but reads will be
190	 * uncached.
191	 */
192	__u32 flags;
193
194	/** @handle: Returned GEM object handle */
195	__u32 handle;
196
197	/** @vpu_addr: Returned VPU virtual address */
198	__u64 vpu_addr;
199};
200
201/**
202 * struct drm_ivpu_bo_info - Query buffer object info
203 */
204struct drm_ivpu_bo_info {
205	/** @handle: Handle of the queried BO */
206	__u32 handle;
207
208	/** @flags: Returned flags used to create the BO */
209	__u32 flags;
210
211	/** @vpu_addr: Returned VPU virtual address */
212	__u64 vpu_addr;
213
214	/**
215	 * @mmap_offset:
216	 *
217	 * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
218	 */
219	__u64 mmap_offset;
220
221	/** @size: Returned GEM object size, aligned to PAGE_SIZE */
222	__u64 size;
223};
224
225/* drm_ivpu_submit engines */
226#define DRM_IVPU_ENGINE_COMPUTE 0
227#define DRM_IVPU_ENGINE_COPY    1
228
229/**
230 * struct drm_ivpu_submit - Submit commands to the VPU
231 *
232 * Execute a single command buffer on a given VPU engine.
233 * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
234 *
235 * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
236 */
237struct drm_ivpu_submit {
238	/**
239	 * @buffers_ptr:
240	 *
241	 * A pointer to an u32 array of GEM handles of the BOs required for this job.
242	 * The number of elements in the array must be equal to the value given by @buffer_count.
243	 *
244	 * The first BO is the command buffer. The rest of array has to contain all
245	 * BOs referenced from the command buffer.
246	 */
247	__u64 buffers_ptr;
248
249	/** @buffer_count: Number of elements in the @buffers_ptr */
250	__u32 buffer_count;
251
252	/**
253	 * @engine: Select the engine this job should be executed on
254	 *
255	 * %DRM_IVPU_ENGINE_COMPUTE:
256	 *
257	 * Performs Deep Learning Neural Compute Inference Operations
258	 *
259	 * %DRM_IVPU_ENGINE_COPY:
260	 *
261	 * Performs memory copy operations to/from system memory allocated for VPU
262	 */
263	__u32 engine;
264
265	/** @flags: Reserved for future use - must be zero */
266	__u32 flags;
267
268	/**
269	 * @commands_offset:
270	 *
271	 * Offset inside the first buffer in @buffers_ptr containing commands
272	 * to be executed. The offset has to be 8-byte aligned.
273	 */
274	__u32 commands_offset;
275};
276
277/* drm_ivpu_bo_wait job status codes */
278#define DRM_IVPU_JOB_STATUS_SUCCESS 0
279
280/**
281 * struct drm_ivpu_bo_wait - Wait for BO to become inactive
282 *
283 * Blocks until a given buffer object becomes inactive.
284 * With @timeout_ms set to 0 returns immediately.
285 */
286struct drm_ivpu_bo_wait {
287	/** @handle: Handle to the buffer object to be waited on */
288	__u32 handle;
289
290	/** @flags: Reserved for future use - must be zero */
291	__u32 flags;
292
293	/** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
294	__s64 timeout_ns;
295
296	/**
297	 * @job_status:
298	 *
299	 * Job status code which is updated after the job is completed.
300	 * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
301	 * Valid only if @handle points to a command buffer.
302	 */
303	__u32 job_status;
304
305	/** @pad: Padding - must be zero */
306	__u32 pad;
307};
308
309#if defined(__cplusplus)
310}
311#endif
312
313#endif /* __UAPI_IVPU_DRM_H__ */
314