xref: /third_party/libdrm/amdgpu/amdgpu_bo.c (revision d722e3fb)
1/*
2 * Copyright © 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#include <stdlib.h>
26#include <stdio.h>
27#include <stdint.h>
28#include <string.h>
29#include <errno.h>
30#include <fcntl.h>
31#include <unistd.h>
32#include <sys/ioctl.h>
33#include <sys/mman.h>
34#include <sys/time.h>
35
36#include "libdrm_macros.h"
37#include "xf86drm.h"
38#include "amdgpu_drm.h"
39#include "amdgpu_internal.h"
40#include "util_math.h"
41
42static int amdgpu_bo_create(amdgpu_device_handle dev,
43			    uint64_t size,
44			    uint32_t handle,
45			    amdgpu_bo_handle *buf_handle)
46{
47	struct amdgpu_bo *bo;
48	int r;
49
50	bo = calloc(1, sizeof(struct amdgpu_bo));
51	if (!bo)
52		return -ENOMEM;
53
54	r = handle_table_insert(&dev->bo_handles, handle, bo);
55	if (r) {
56		free(bo);
57		return r;
58	}
59
60	atomic_set(&bo->refcount, 1);
61	bo->dev = dev;
62	bo->alloc_size = size;
63	bo->handle = handle;
64	pthread_mutex_init(&bo->cpu_access_mutex, NULL);
65
66	*buf_handle = bo;
67	return 0;
68}
69
70drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
71			       struct amdgpu_bo_alloc_request *alloc_buffer,
72			       amdgpu_bo_handle *buf_handle)
73{
74	union drm_amdgpu_gem_create args;
75	int r;
76
77	memset(&args, 0, sizeof(args));
78	args.in.bo_size = alloc_buffer->alloc_size;
79	args.in.alignment = alloc_buffer->phys_alignment;
80
81	/* Set the placement. */
82	args.in.domains = alloc_buffer->preferred_heap;
83	args.in.domain_flags = alloc_buffer->flags;
84
85	/* Allocate the buffer with the preferred heap. */
86	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
87				&args, sizeof(args));
88	if (r)
89		goto out;
90
91	pthread_mutex_lock(&dev->bo_table_mutex);
92	r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
93			     buf_handle);
94	pthread_mutex_unlock(&dev->bo_table_mutex);
95	if (r) {
96		drmCloseBufferHandle(dev->fd, args.out.handle);
97	}
98
99out:
100	return r;
101}
102
103drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
104				      struct amdgpu_bo_metadata *info)
105{
106	struct drm_amdgpu_gem_metadata args = {};
107
108	args.handle = bo->handle;
109	args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
110	args.data.flags = info->flags;
111	args.data.tiling_info = info->tiling_info;
112
113	if (info->size_metadata > sizeof(args.data.data))
114		return -EINVAL;
115
116	if (info->size_metadata) {
117		args.data.data_size_bytes = info->size_metadata;
118		memcpy(args.data.data, info->umd_metadata, info->size_metadata);
119	}
120
121	return drmCommandWriteRead(bo->dev->fd,
122				   DRM_AMDGPU_GEM_METADATA,
123				   &args, sizeof(args));
124}
125
126drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
127				    struct amdgpu_bo_info *info)
128{
129	struct drm_amdgpu_gem_metadata metadata = {};
130	struct drm_amdgpu_gem_create_in bo_info = {};
131	struct drm_amdgpu_gem_op gem_op = {};
132	int r;
133
134	/* Validate the BO passed in */
135	if (!bo->handle)
136		return -EINVAL;
137
138	/* Query metadata. */
139	metadata.handle = bo->handle;
140	metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
141
142	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
143				&metadata, sizeof(metadata));
144	if (r)
145		return r;
146
147	if (metadata.data.data_size_bytes >
148	    sizeof(info->metadata.umd_metadata))
149		return -EINVAL;
150
151	/* Query buffer info. */
152	gem_op.handle = bo->handle;
153	gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
154	gem_op.value = (uintptr_t)&bo_info;
155
156	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
157				&gem_op, sizeof(gem_op));
158	if (r)
159		return r;
160
161	memset(info, 0, sizeof(*info));
162	info->alloc_size = bo_info.bo_size;
163	info->phys_alignment = bo_info.alignment;
164	info->preferred_heap = bo_info.domains;
165	info->alloc_flags = bo_info.domain_flags;
166	info->metadata.flags = metadata.data.flags;
167	info->metadata.tiling_info = metadata.data.tiling_info;
168
169	info->metadata.size_metadata = metadata.data.data_size_bytes;
170	if (metadata.data.data_size_bytes > 0)
171		memcpy(info->metadata.umd_metadata, metadata.data.data,
172		       metadata.data.data_size_bytes);
173
174	return 0;
175}
176
177static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
178{
179	struct drm_gem_flink flink;
180	int fd, dma_fd;
181	uint32_t handle;
182	int r;
183
184	fd = bo->dev->fd;
185	handle = bo->handle;
186	if (bo->flink_name)
187		return 0;
188
189
190	if (bo->dev->flink_fd != bo->dev->fd) {
191		r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
192				       &dma_fd);
193		if (!r) {
194			r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
195			close(dma_fd);
196		}
197		if (r)
198			return r;
199		fd = bo->dev->flink_fd;
200	}
201	memset(&flink, 0, sizeof(flink));
202	flink.handle = handle;
203
204	r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
205	if (r)
206		return r;
207
208	bo->flink_name = flink.name;
209
210	if (bo->dev->flink_fd != bo->dev->fd)
211		drmCloseBufferHandle(bo->dev->flink_fd, handle);
212
213	pthread_mutex_lock(&bo->dev->bo_table_mutex);
214	r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
215	pthread_mutex_unlock(&bo->dev->bo_table_mutex);
216
217	return r;
218}
219
220drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
221				enum amdgpu_bo_handle_type type,
222				uint32_t *shared_handle)
223{
224	int r;
225
226	switch (type) {
227	case amdgpu_bo_handle_type_gem_flink_name:
228		r = amdgpu_bo_export_flink(bo);
229		if (r)
230			return r;
231
232		*shared_handle = bo->flink_name;
233		return 0;
234
235	case amdgpu_bo_handle_type_kms:
236	case amdgpu_bo_handle_type_kms_noimport:
237		*shared_handle = bo->handle;
238		return 0;
239
240	case amdgpu_bo_handle_type_dma_buf_fd:
241		return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
242					  DRM_CLOEXEC | DRM_RDWR,
243					  (int*)shared_handle);
244	}
245	return -EINVAL;
246}
247
248drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
249				enum amdgpu_bo_handle_type type,
250				uint32_t shared_handle,
251		     struct amdgpu_bo_import_result *output)
252{
253	struct drm_gem_open open_arg = {};
254	struct amdgpu_bo *bo = NULL;
255	uint32_t handle = 0, flink_name = 0;
256	uint64_t alloc_size = 0;
257	int r = 0;
258	int dma_fd;
259	uint64_t dma_buf_size = 0;
260
261	/* We must maintain a list of pairs <handle, bo>, so that we always
262	 * return the same amdgpu_bo instance for the same handle. */
263	pthread_mutex_lock(&dev->bo_table_mutex);
264
265	/* Convert a DMA buf handle to a KMS handle now. */
266	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
267		off_t size;
268
269		/* Get a KMS handle. */
270		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
271		if (r)
272			goto unlock;
273
274		/* Query the buffer size. */
275		size = lseek(shared_handle, 0, SEEK_END);
276		if (size == (off_t)-1) {
277			r = -errno;
278			goto free_bo_handle;
279		}
280		lseek(shared_handle, 0, SEEK_SET);
281
282		dma_buf_size = size;
283		shared_handle = handle;
284	}
285
286	/* If we have already created a buffer with this handle, find it. */
287	switch (type) {
288	case amdgpu_bo_handle_type_gem_flink_name:
289		bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
290		break;
291
292	case amdgpu_bo_handle_type_dma_buf_fd:
293		bo = handle_table_lookup(&dev->bo_handles, shared_handle);
294		break;
295
296	case amdgpu_bo_handle_type_kms:
297	case amdgpu_bo_handle_type_kms_noimport:
298		/* Importing a KMS handle in not allowed. */
299		r = -EPERM;
300		goto unlock;
301
302	default:
303		r = -EINVAL;
304		goto unlock;
305	}
306
307	if (bo) {
308		/* The buffer already exists, just bump the refcount. */
309		atomic_inc(&bo->refcount);
310		pthread_mutex_unlock(&dev->bo_table_mutex);
311
312		output->buf_handle = bo;
313		output->alloc_size = bo->alloc_size;
314		return 0;
315	}
316
317	/* Open the handle. */
318	switch (type) {
319	case amdgpu_bo_handle_type_gem_flink_name:
320		open_arg.name = shared_handle;
321		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
322		if (r)
323			goto unlock;
324
325		flink_name = shared_handle;
326		handle = open_arg.handle;
327		alloc_size = open_arg.size;
328		if (dev->flink_fd != dev->fd) {
329			r = drmPrimeHandleToFD(dev->flink_fd, handle,
330					       DRM_CLOEXEC, &dma_fd);
331			if (r)
332				goto free_bo_handle;
333			r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
334			close(dma_fd);
335			if (r)
336				goto free_bo_handle;
337			r = drmCloseBufferHandle(dev->flink_fd,
338						 open_arg.handle);
339			if (r)
340				goto free_bo_handle;
341		}
342		open_arg.handle = 0;
343		break;
344
345	case amdgpu_bo_handle_type_dma_buf_fd:
346		handle = shared_handle;
347		alloc_size = dma_buf_size;
348		break;
349
350	case amdgpu_bo_handle_type_kms:
351	case amdgpu_bo_handle_type_kms_noimport:
352		assert(0); /* unreachable */
353	}
354
355	/* Initialize it. */
356	r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
357	if (r)
358		goto free_bo_handle;
359
360	if (flink_name) {
361		bo->flink_name = flink_name;
362		r = handle_table_insert(&dev->bo_flink_names, flink_name,
363					bo);
364		if (r)
365			goto free_bo_handle;
366
367	}
368
369	output->buf_handle = bo;
370	output->alloc_size = bo->alloc_size;
371	pthread_mutex_unlock(&dev->bo_table_mutex);
372	return 0;
373
374free_bo_handle:
375	if (flink_name && open_arg.handle)
376		drmCloseBufferHandle(dev->flink_fd, open_arg.handle);
377
378	if (bo)
379		amdgpu_bo_free(bo);
380	else
381		drmCloseBufferHandle(dev->fd, handle);
382unlock:
383	pthread_mutex_unlock(&dev->bo_table_mutex);
384	return r;
385}
386
387drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
388{
389	struct amdgpu_device *dev;
390	struct amdgpu_bo *bo = buf_handle;
391
392	assert(bo != NULL);
393	dev = bo->dev;
394	pthread_mutex_lock(&dev->bo_table_mutex);
395
396	if (update_references(&bo->refcount, NULL)) {
397		/* Remove the buffer from the hash tables. */
398		handle_table_remove(&dev->bo_handles, bo->handle);
399
400		if (bo->flink_name)
401			handle_table_remove(&dev->bo_flink_names,
402					    bo->flink_name);
403
404		/* Release CPU access. */
405		if (bo->cpu_map_count > 0) {
406			bo->cpu_map_count = 1;
407			amdgpu_bo_cpu_unmap(bo);
408		}
409
410		drmCloseBufferHandle(dev->fd, bo->handle);
411		pthread_mutex_destroy(&bo->cpu_access_mutex);
412		free(bo);
413	}
414
415	pthread_mutex_unlock(&dev->bo_table_mutex);
416
417	return 0;
418}
419
420drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
421{
422	atomic_inc(&bo->refcount);
423}
424
425drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
426{
427	union drm_amdgpu_gem_mmap args;
428	void *ptr;
429	int r;
430
431	pthread_mutex_lock(&bo->cpu_access_mutex);
432
433	if (bo->cpu_ptr) {
434		/* already mapped */
435		assert(bo->cpu_map_count > 0);
436		bo->cpu_map_count++;
437		*cpu = bo->cpu_ptr;
438		pthread_mutex_unlock(&bo->cpu_access_mutex);
439		return 0;
440	}
441
442	assert(bo->cpu_map_count == 0);
443
444	memset(&args, 0, sizeof(args));
445
446	/* Query the buffer address (args.addr_ptr).
447	 * The kernel driver ignores the offset and size parameters. */
448	args.in.handle = bo->handle;
449
450	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
451				sizeof(args));
452	if (r) {
453		pthread_mutex_unlock(&bo->cpu_access_mutex);
454		return r;
455	}
456
457	/* Map the buffer. */
458	ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
459		       bo->dev->fd, args.out.addr_ptr);
460	if (ptr == MAP_FAILED) {
461		pthread_mutex_unlock(&bo->cpu_access_mutex);
462		return -errno;
463	}
464
465	bo->cpu_ptr = ptr;
466	bo->cpu_map_count = 1;
467	pthread_mutex_unlock(&bo->cpu_access_mutex);
468
469	*cpu = ptr;
470	return 0;
471}
472
473drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
474{
475	int r;
476
477	pthread_mutex_lock(&bo->cpu_access_mutex);
478	assert(bo->cpu_map_count >= 0);
479
480	if (bo->cpu_map_count == 0) {
481		/* not mapped */
482		pthread_mutex_unlock(&bo->cpu_access_mutex);
483		return -EINVAL;
484	}
485
486	bo->cpu_map_count--;
487	if (bo->cpu_map_count > 0) {
488		/* mapped multiple times */
489		pthread_mutex_unlock(&bo->cpu_access_mutex);
490		return 0;
491	}
492
493	r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
494	bo->cpu_ptr = NULL;
495	pthread_mutex_unlock(&bo->cpu_access_mutex);
496	return r;
497}
498
499drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
500				struct amdgpu_buffer_size_alignments *info)
501{
502	info->size_local = dev->dev_info.pte_fragment_size;
503	info->size_remote = dev->dev_info.gart_page_size;
504	return 0;
505}
506
507drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
508				       uint64_t timeout_ns,
509			    bool *busy)
510{
511	union drm_amdgpu_gem_wait_idle args;
512	int r;
513
514	memset(&args, 0, sizeof(args));
515	args.in.handle = bo->handle;
516	args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
517
518	r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
519				&args, sizeof(args));
520
521	if (r == 0) {
522		*busy = args.out.status;
523		return 0;
524	} else {
525		fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
526		return r;
527	}
528}
529
530drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
531					     void *cpu,
532					     uint64_t size,
533					     amdgpu_bo_handle *buf_handle,
534					     uint64_t *offset_in_bo)
535{
536	struct amdgpu_bo *bo;
537	uint32_t i;
538	int r = 0;
539
540	if (cpu == NULL || size == 0)
541		return -EINVAL;
542
543	/*
544	 * Workaround for a buggy application which tries to import previously
545	 * exposed CPU pointers. If we find a real world use case we should
546	 * improve that by asking the kernel for the right handle.
547	 */
548	pthread_mutex_lock(&dev->bo_table_mutex);
549	for (i = 0; i < dev->bo_handles.max_key; i++) {
550		bo = handle_table_lookup(&dev->bo_handles, i);
551		if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
552			continue;
553		if (cpu >= bo->cpu_ptr &&
554		    cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
555			break;
556	}
557
558	if (i < dev->bo_handles.max_key) {
559		atomic_inc(&bo->refcount);
560		*buf_handle = bo;
561		*offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
562	} else {
563		*buf_handle = NULL;
564		*offset_in_bo = 0;
565		r = -ENXIO;
566	}
567	pthread_mutex_unlock(&dev->bo_table_mutex);
568
569	return r;
570}
571
572drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
573					      void *cpu,
574					      uint64_t size,
575					      amdgpu_bo_handle *buf_handle)
576{
577	int r;
578	struct drm_amdgpu_gem_userptr args;
579
580	args.addr = (uintptr_t)cpu;
581	args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
582		AMDGPU_GEM_USERPTR_VALIDATE;
583	args.size = size;
584	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
585				&args, sizeof(args));
586	if (r)
587		goto out;
588
589	pthread_mutex_lock(&dev->bo_table_mutex);
590	r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
591	pthread_mutex_unlock(&dev->bo_table_mutex);
592	if (r) {
593		drmCloseBufferHandle(dev->fd, args.handle);
594	}
595
596out:
597	return r;
598}
599
600drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
601					 uint32_t number_of_buffers,
602					 struct drm_amdgpu_bo_list_entry *buffers,
603					 uint32_t *result)
604{
605	union drm_amdgpu_bo_list args;
606	int r;
607
608	memset(&args, 0, sizeof(args));
609	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
610	args.in.bo_number = number_of_buffers;
611	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
612	args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
613
614	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
615				&args, sizeof(args));
616	if (!r)
617		*result = args.out.list_handle;
618	return r;
619}
620
621drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
622					  uint32_t bo_list)
623{
624	union drm_amdgpu_bo_list args;
625
626	memset(&args, 0, sizeof(args));
627	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
628	args.in.list_handle = bo_list;
629
630	return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
631				   &args, sizeof(args));
632}
633
634drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
635				     uint32_t number_of_resources,
636				     amdgpu_bo_handle *resources,
637				     uint8_t *resource_prios,
638				     amdgpu_bo_list_handle *result)
639{
640	struct drm_amdgpu_bo_list_entry *list;
641	union drm_amdgpu_bo_list args;
642	unsigned i;
643	int r;
644
645	if (!number_of_resources)
646		return -EINVAL;
647
648	/* overflow check for multiplication */
649	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
650		return -EINVAL;
651
652	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
653	if (!list)
654		return -ENOMEM;
655
656	*result = malloc(sizeof(struct amdgpu_bo_list));
657	if (!*result) {
658		free(list);
659		return -ENOMEM;
660	}
661
662	memset(&args, 0, sizeof(args));
663	args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
664	args.in.bo_number = number_of_resources;
665	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
666	args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
667
668	for (i = 0; i < number_of_resources; i++) {
669		list[i].bo_handle = resources[i]->handle;
670		if (resource_prios)
671			list[i].bo_priority = resource_prios[i];
672		else
673			list[i].bo_priority = 0;
674	}
675
676	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
677				&args, sizeof(args));
678	free(list);
679	if (r) {
680		free(*result);
681		return r;
682	}
683
684	(*result)->dev = dev;
685	(*result)->handle = args.out.list_handle;
686	return 0;
687}
688
689drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
690{
691	union drm_amdgpu_bo_list args;
692	int r;
693
694	memset(&args, 0, sizeof(args));
695	args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
696	args.in.list_handle = list->handle;
697
698	r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
699				&args, sizeof(args));
700
701	if (!r)
702		free(list);
703
704	return r;
705}
706
707drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
708				     uint32_t number_of_resources,
709				     amdgpu_bo_handle *resources,
710				     uint8_t *resource_prios)
711{
712	struct drm_amdgpu_bo_list_entry *list;
713	union drm_amdgpu_bo_list args;
714	unsigned i;
715	int r;
716
717	if (!number_of_resources)
718		return -EINVAL;
719
720	/* overflow check for multiplication */
721	if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
722		return -EINVAL;
723
724	list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
725	if (!list)
726		return -ENOMEM;
727
728	args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
729	args.in.list_handle = handle->handle;
730	args.in.bo_number = number_of_resources;
731	args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
732	args.in.bo_info_ptr = (uintptr_t)list;
733
734	for (i = 0; i < number_of_resources; i++) {
735		list[i].bo_handle = resources[i]->handle;
736		if (resource_prios)
737			list[i].bo_priority = resource_prios[i];
738		else
739			list[i].bo_priority = 0;
740	}
741
742	r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
743				&args, sizeof(args));
744	free(list);
745	return r;
746}
747
748drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
749			       uint64_t offset,
750			       uint64_t size,
751			       uint64_t addr,
752			       uint64_t flags,
753			       uint32_t ops)
754{
755	amdgpu_device_handle dev = bo->dev;
756
757	size = ALIGN(size, getpagesize());
758
759	return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
760				   AMDGPU_VM_PAGE_READABLE |
761				   AMDGPU_VM_PAGE_WRITEABLE |
762				   AMDGPU_VM_PAGE_EXECUTABLE, ops);
763}
764
765drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
766				   amdgpu_bo_handle bo,
767				   uint64_t offset,
768				   uint64_t size,
769				   uint64_t addr,
770				   uint64_t flags,
771				   uint32_t ops)
772{
773	struct drm_amdgpu_gem_va va;
774	int r;
775
776	if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
777	    ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
778		return -EINVAL;
779
780	memset(&va, 0, sizeof(va));
781	va.handle = bo ? bo->handle : 0;
782	va.operation = ops;
783	va.flags = flags;
784	va.va_address = addr;
785	va.offset_in_bo = offset;
786	va.map_size = size;
787
788	r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
789
790	return r;
791}
792