1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#ifndef __DRM_GPUVA_MGR_H__
4#define __DRM_GPUVA_MGR_H__
5
6/*
7 * Copyright (c) 2022 Red Hat.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <linux/list.h>
29#include <linux/rbtree.h>
30#include <linux/types.h>
31
32#include <drm/drm_gem.h>
33
34struct drm_gpuva_manager;
35struct drm_gpuva_fn_ops;
36
37/**
38 * enum drm_gpuva_flags - flags for struct drm_gpuva
39 */
40enum drm_gpuva_flags {
41	/**
42	 * @DRM_GPUVA_INVALIDATED:
43	 *
44	 * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
45	 */
46	DRM_GPUVA_INVALIDATED = (1 << 0),
47
48	/**
49	 * @DRM_GPUVA_SPARSE:
50	 *
51	 * Flag indicating that the &drm_gpuva is a sparse mapping.
52	 */
53	DRM_GPUVA_SPARSE = (1 << 1),
54
55	/**
56	 * @DRM_GPUVA_USERBITS: user defined bits
57	 */
58	DRM_GPUVA_USERBITS = (1 << 2),
59};
60
61/**
62 * struct drm_gpuva - structure to track a GPU VA mapping
63 *
64 * This structure represents a GPU VA mapping and is associated with a
65 * &drm_gpuva_manager.
66 *
67 * Typically, this structure is embedded in bigger driver structures.
68 */
69struct drm_gpuva {
70	/**
71	 * @mgr: the &drm_gpuva_manager this object is associated with
72	 */
73	struct drm_gpuva_manager *mgr;
74
75	/**
76	 * @flags: the &drm_gpuva_flags for this mapping
77	 */
78	enum drm_gpuva_flags flags;
79
80	/**
81	 * @va: structure containing the address and range of the &drm_gpuva
82	 */
83	struct {
84		/**
85		 * @addr: the start address
86		 */
87		u64 addr;
88
89		/*
90		 * @range: the range
91		 */
92		u64 range;
93	} va;
94
95	/**
96	 * @gem: structure containing the &drm_gem_object and it's offset
97	 */
98	struct {
99		/**
100		 * @offset: the offset within the &drm_gem_object
101		 */
102		u64 offset;
103
104		/**
105		 * @obj: the mapped &drm_gem_object
106		 */
107		struct drm_gem_object *obj;
108
109		/**
110		 * @entry: the &list_head to attach this object to a &drm_gem_object
111		 */
112		struct list_head entry;
113	} gem;
114
115	/**
116	 * @rb: structure containing data to store &drm_gpuvas in a rb-tree
117	 */
118	struct {
119		/**
120		 * @rb: the rb-tree node
121		 */
122		struct rb_node node;
123
124		/**
125		 * @entry: The &list_head to additionally connect &drm_gpuvas
126		 * in the same order they appear in the interval tree. This is
127		 * useful to keep iterating &drm_gpuvas from a start node found
128		 * through the rb-tree while doing modifications on the rb-tree
129		 * itself.
130		 */
131		struct list_head entry;
132
133		/**
134		 * @__subtree_last: needed by the interval tree, holding last-in-subtree
135		 */
136		u64 __subtree_last;
137	} rb;
138};
139
140int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
141void drm_gpuva_remove(struct drm_gpuva *va);
142
143void drm_gpuva_link(struct drm_gpuva *va);
144void drm_gpuva_unlink(struct drm_gpuva *va);
145
146struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
147				 u64 addr, u64 range);
148struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
149				       u64 addr, u64 range);
150struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
151struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
152
153bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
154
155static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
156				  struct drm_gem_object *obj, u64 offset)
157{
158	va->va.addr = addr;
159	va->va.range = range;
160	va->gem.obj = obj;
161	va->gem.offset = offset;
162}
163
164/**
165 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
166 * invalidated
167 * @va: the &drm_gpuva to set the invalidate flag for
168 * @invalidate: indicates whether the &drm_gpuva is invalidated
169 */
170static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
171{
172	if (invalidate)
173		va->flags |= DRM_GPUVA_INVALIDATED;
174	else
175		va->flags &= ~DRM_GPUVA_INVALIDATED;
176}
177
178/**
179 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
180 * is invalidated
181 * @va: the &drm_gpuva to check
182 */
183static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
184{
185	return va->flags & DRM_GPUVA_INVALIDATED;
186}
187
188/**
189 * struct drm_gpuva_manager - DRM GPU VA Manager
190 *
191 * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
192 * &maple_tree structures. Typically, this structure is embedded in bigger
193 * driver structures.
194 *
195 * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
196 * pages.
197 *
198 * There should be one manager instance per GPU virtual address space.
199 */
200struct drm_gpuva_manager {
201	/**
202	 * @name: the name of the DRM GPU VA space
203	 */
204	const char *name;
205
206	/**
207	 * @mm_start: start of the VA space
208	 */
209	u64 mm_start;
210
211	/**
212	 * @mm_range: length of the VA space
213	 */
214	u64 mm_range;
215
216	/**
217	 * @rb: structures to track &drm_gpuva entries
218	 */
219	struct {
220		/**
221		 * @tree: the rb-tree to track GPU VA mappings
222		 */
223		struct rb_root_cached tree;
224
225		/**
226		 * @list: the &list_head to track GPU VA mappings
227		 */
228		struct list_head list;
229	} rb;
230
231	/**
232	 * @kernel_alloc_node:
233	 *
234	 * &drm_gpuva representing the address space cutout reserved for
235	 * the kernel
236	 */
237	struct drm_gpuva kernel_alloc_node;
238
239	/**
240	 * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
241	 */
242	const struct drm_gpuva_fn_ops *ops;
243};
244
245void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
246			    const char *name,
247			    u64 start_offset, u64 range,
248			    u64 reserve_offset, u64 reserve_range,
249			    const struct drm_gpuva_fn_ops *ops);
250void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
251
252static inline struct drm_gpuva *
253__drm_gpuva_next(struct drm_gpuva *va)
254{
255	if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
256		return list_next_entry(va, rb.entry);
257
258	return NULL;
259}
260
261/**
262 * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
263 * @va__: &drm_gpuva structure to assign to in each iteration step
264 * @mgr__: &drm_gpuva_manager to walk over
265 * @start__: starting offset, the first gpuva will overlap this
266 * @end__: ending offset, the last gpuva will start before this (but may
267 * overlap)
268 *
269 * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
270 * between @start__ and @end__. It is implemented similarly to list_for_each(),
271 * but is using the &drm_gpuva_manager's internal interval tree to accelerate
272 * the search for the starting &drm_gpuva, and hence isn't safe against removal
273 * of elements. It assumes that @end__ is within (or is the upper limit of) the
274 * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
275 * @kernel_alloc_node.
276 */
277#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
278	for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
279	     va__ && (va__->va.addr < (end__)); \
280	     va__ = __drm_gpuva_next(va__))
281
282/**
283 * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
284 * &drm_gpuvas
285 * @va__: &drm_gpuva to assign to in each iteration step
286 * @next__: another &drm_gpuva to use as temporary storage
287 * @mgr__: &drm_gpuva_manager to walk over
288 * @start__: starting offset, the first gpuva will overlap this
289 * @end__: ending offset, the last gpuva will start before this (but may
290 * overlap)
291 *
292 * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
293 * between @start__ and @end__. It is implemented similarly to
294 * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
295 * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
296 * against removal of elements. It assumes that @end__ is within (or is the
297 * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
298 * &drm_gpuva_manager's @kernel_alloc_node.
299 */
300#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
301	for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
302	     next__ = __drm_gpuva_next(va__); \
303	     va__ && (va__->va.addr < (end__)); \
304	     va__ = next__, next__ = __drm_gpuva_next(va__))
305
306/**
307 * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
308 * @va__: &drm_gpuva to assign to in each iteration step
309 * @mgr__: &drm_gpuva_manager to walk over
310 *
311 * This iterator walks over all &drm_gpuva structures associated with the given
312 * &drm_gpuva_manager.
313 */
314#define drm_gpuva_for_each_va(va__, mgr__) \
315	list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
316
317/**
318 * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
319 * @va__: &drm_gpuva to assign to in each iteration step
320 * @next__: another &drm_gpuva to use as temporary storage
321 * @mgr__: &drm_gpuva_manager to walk over
322 *
323 * This iterator walks over all &drm_gpuva structures associated with the given
324 * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
325 * hence safe against the removal of elements.
326 */
327#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
328	list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
329
330/**
331 * enum drm_gpuva_op_type - GPU VA operation type
332 *
333 * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
334 */
335enum drm_gpuva_op_type {
336	/**
337	 * @DRM_GPUVA_OP_MAP: the map op type
338	 */
339	DRM_GPUVA_OP_MAP,
340
341	/**
342	 * @DRM_GPUVA_OP_REMAP: the remap op type
343	 */
344	DRM_GPUVA_OP_REMAP,
345
346	/**
347	 * @DRM_GPUVA_OP_UNMAP: the unmap op type
348	 */
349	DRM_GPUVA_OP_UNMAP,
350
351	/**
352	 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
353	 */
354	DRM_GPUVA_OP_PREFETCH,
355};
356
357/**
358 * struct drm_gpuva_op_map - GPU VA map operation
359 *
360 * This structure represents a single map operation generated by the
361 * DRM GPU VA manager.
362 */
363struct drm_gpuva_op_map {
364	/**
365	 * @va: structure containing address and range of a map
366	 * operation
367	 */
368	struct {
369		/**
370		 * @addr: the base address of the new mapping
371		 */
372		u64 addr;
373
374		/**
375		 * @range: the range of the new mapping
376		 */
377		u64 range;
378	} va;
379
380	/**
381	 * @gem: structure containing the &drm_gem_object and it's offset
382	 */
383	struct {
384		/**
385		 * @offset: the offset within the &drm_gem_object
386		 */
387		u64 offset;
388
389		/**
390		 * @obj: the &drm_gem_object to map
391		 */
392		struct drm_gem_object *obj;
393	} gem;
394};
395
396/**
397 * struct drm_gpuva_op_unmap - GPU VA unmap operation
398 *
399 * This structure represents a single unmap operation generated by the
400 * DRM GPU VA manager.
401 */
402struct drm_gpuva_op_unmap {
403	/**
404	 * @va: the &drm_gpuva to unmap
405	 */
406	struct drm_gpuva *va;
407
408	/**
409	 * @keep:
410	 *
411	 * Indicates whether this &drm_gpuva is physically contiguous with the
412	 * original mapping request.
413	 *
414	 * Optionally, if &keep is set, drivers may keep the actual page table
415	 * mappings for this &drm_gpuva, adding the missing page table entries
416	 * only and update the &drm_gpuva_manager accordingly.
417	 */
418	bool keep;
419};
420
421/**
422 * struct drm_gpuva_op_remap - GPU VA remap operation
423 *
424 * This represents a single remap operation generated by the DRM GPU VA manager.
425 *
426 * A remap operation is generated when an existing GPU VA mmapping is split up
427 * by inserting a new GPU VA mapping or by partially unmapping existent
428 * mapping(s), hence it consists of a maximum of two map and one unmap
429 * operation.
430 *
431 * The @unmap operation takes care of removing the original existing mapping.
432 * @prev is used to remap the preceding part, @next the subsequent part.
433 *
434 * If either a new mapping's start address is aligned with the start address
435 * of the old mapping or the new mapping's end address is aligned with the
436 * end address of the old mapping, either @prev or @next is NULL.
437 *
438 * Note, the reason for a dedicated remap operation, rather than arbitrary
439 * unmap and map operations, is to give drivers the chance of extracting driver
440 * specific data for creating the new mappings from the unmap operations's
441 * &drm_gpuva structure which typically is embedded in larger driver specific
442 * structures.
443 */
444struct drm_gpuva_op_remap {
445	/**
446	 * @prev: the preceding part of a split mapping
447	 */
448	struct drm_gpuva_op_map *prev;
449
450	/**
451	 * @next: the subsequent part of a split mapping
452	 */
453	struct drm_gpuva_op_map *next;
454
455	/**
456	 * @unmap: the unmap operation for the original existing mapping
457	 */
458	struct drm_gpuva_op_unmap *unmap;
459};
460
461/**
462 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
463 *
464 * This structure represents a single prefetch operation generated by the
465 * DRM GPU VA manager.
466 */
467struct drm_gpuva_op_prefetch {
468	/**
469	 * @va: the &drm_gpuva to prefetch
470	 */
471	struct drm_gpuva *va;
472};
473
474/**
475 * struct drm_gpuva_op - GPU VA operation
476 *
477 * This structure represents a single generic operation.
478 *
479 * The particular type of the operation is defined by @op.
480 */
481struct drm_gpuva_op {
482	/**
483	 * @entry:
484	 *
485	 * The &list_head used to distribute instances of this struct within
486	 * &drm_gpuva_ops.
487	 */
488	struct list_head entry;
489
490	/**
491	 * @op: the type of the operation
492	 */
493	enum drm_gpuva_op_type op;
494
495	union {
496		/**
497		 * @map: the map operation
498		 */
499		struct drm_gpuva_op_map map;
500
501		/**
502		 * @remap: the remap operation
503		 */
504		struct drm_gpuva_op_remap remap;
505
506		/**
507		 * @unmap: the unmap operation
508		 */
509		struct drm_gpuva_op_unmap unmap;
510
511		/**
512		 * @prefetch: the prefetch operation
513		 */
514		struct drm_gpuva_op_prefetch prefetch;
515	};
516};
517
518/**
519 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
520 */
521struct drm_gpuva_ops {
522	/**
523	 * @list: the &list_head
524	 */
525	struct list_head list;
526};
527
528/**
529 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
530 * @op: &drm_gpuva_op to assign in each iteration step
531 * @ops: &drm_gpuva_ops to walk
532 *
533 * This iterator walks over all ops within a given list of operations.
534 */
535#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
536
537/**
538 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
539 * @op: &drm_gpuva_op to assign in each iteration step
540 * @next: &next &drm_gpuva_op to store the next step
541 * @ops: &drm_gpuva_ops to walk
542 *
543 * This iterator walks over all ops within a given list of operations. It is
544 * implemented with list_for_each_safe(), so save against removal of elements.
545 */
546#define drm_gpuva_for_each_op_safe(op, next, ops) \
547	list_for_each_entry_safe(op, next, &(ops)->list, entry)
548
549/**
550 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
551 * @op: &drm_gpuva_op to assign in each iteration step
552 * @ops: &drm_gpuva_ops to walk
553 *
554 * This iterator walks over all ops within a given list of operations beginning
555 * from the given operation in reverse order.
556 */
557#define drm_gpuva_for_each_op_from_reverse(op, ops) \
558	list_for_each_entry_from_reverse(op, &(ops)->list, entry)
559
560/**
561 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
562 * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
563 */
564#define drm_gpuva_first_op(ops) \
565	list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
566
567/**
568 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
569 * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
570 */
571#define drm_gpuva_last_op(ops) \
572	list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
573
574/**
575 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
576 * @op: the current &drm_gpuva_op
577 */
578#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
579
580/**
581 * drm_gpuva_next_op() - next &drm_gpuva_op in the list
582 * @op: the current &drm_gpuva_op
583 */
584#define drm_gpuva_next_op(op) list_next_entry(op, entry)
585
586struct drm_gpuva_ops *
587drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
588			    u64 addr, u64 range,
589			    struct drm_gem_object *obj, u64 offset);
590struct drm_gpuva_ops *
591drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
592			      u64 addr, u64 range);
593
594struct drm_gpuva_ops *
595drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
596				 u64 addr, u64 range);
597
598struct drm_gpuva_ops *
599drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
600			       struct drm_gem_object *obj);
601
602void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
603			struct drm_gpuva_ops *ops);
604
605static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
606					  struct drm_gpuva_op_map *op)
607{
608	drm_gpuva_init(va, op->va.addr, op->va.range,
609		       op->gem.obj, op->gem.offset);
610}
611
612/**
613 * struct drm_gpuva_fn_ops - callbacks for split/merge steps
614 *
615 * This structure defines the callbacks used by &drm_gpuva_sm_map and
616 * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
617 * operations to drivers.
618 */
619struct drm_gpuva_fn_ops {
620	/**
621	 * @op_alloc: called when the &drm_gpuva_manager allocates
622	 * a struct drm_gpuva_op
623	 *
624	 * Some drivers may want to embed struct drm_gpuva_op into driver
625	 * specific structures. By implementing this callback drivers can
626	 * allocate memory accordingly.
627	 *
628	 * This callback is optional.
629	 */
630	struct drm_gpuva_op *(*op_alloc)(void);
631
632	/**
633	 * @op_free: called when the &drm_gpuva_manager frees a
634	 * struct drm_gpuva_op
635	 *
636	 * Some drivers may want to embed struct drm_gpuva_op into driver
637	 * specific structures. By implementing this callback drivers can
638	 * free the previously allocated memory accordingly.
639	 *
640	 * This callback is optional.
641	 */
642	void (*op_free)(struct drm_gpuva_op *op);
643
644	/**
645	 * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
646	 * mapping once all previous steps were completed
647	 *
648	 * The &priv pointer matches the one the driver passed to
649	 * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
650	 *
651	 * Can be NULL if &drm_gpuva_sm_map is used.
652	 */
653	int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
654
655	/**
656	 * @sm_step_remap: called from &drm_gpuva_sm_map and
657	 * &drm_gpuva_sm_unmap to split up an existent mapping
658	 *
659	 * This callback is called when existent mapping needs to be split up.
660	 * This is the case when either a newly requested mapping overlaps or
661	 * is enclosed by an existent mapping or a partial unmap of an existent
662	 * mapping is requested.
663	 *
664	 * The &priv pointer matches the one the driver passed to
665	 * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
666	 *
667	 * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
668	 * used.
669	 */
670	int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
671
672	/**
673	 * @sm_step_unmap: called from &drm_gpuva_sm_map and
674	 * &drm_gpuva_sm_unmap to unmap an existent mapping
675	 *
676	 * This callback is called when existent mapping needs to be unmapped.
677	 * This is the case when either a newly requested mapping encloses an
678	 * existent mapping or an unmap of an existent mapping is requested.
679	 *
680	 * The &priv pointer matches the one the driver passed to
681	 * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
682	 *
683	 * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
684	 * used.
685	 */
686	int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
687};
688
689int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
690		     u64 addr, u64 range,
691		     struct drm_gem_object *obj, u64 offset);
692
693int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
694		       u64 addr, u64 range);
695
696void drm_gpuva_map(struct drm_gpuva_manager *mgr,
697		   struct drm_gpuva *va,
698		   struct drm_gpuva_op_map *op);
699
700void drm_gpuva_remap(struct drm_gpuva *prev,
701		     struct drm_gpuva *next,
702		     struct drm_gpuva_op_remap *op);
703
704void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
705
706#endif /* __DRM_GPUVA_MGR_H__ */
707