1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "amdgpu_amdkfd.h"
24#include "amd_shared.h"
25
26#include "amdgpu.h"
27#include "amdgpu_gfx.h"
28#include "amdgpu_dma_buf.h"
29#include <linux/module.h>
30#include <linux/dma-buf.h>
31#include "amdgpu_xgmi.h"
32#include <uapi/linux/kfd_ioctl.h>
33
34/* Total memory size in system memory and all GPU VRAM. Used to
35 * estimate worst case amount of memory to reserve for page tables
36 */
37uint64_t amdgpu_amdkfd_total_mem_size;
38
39static bool kfd_initialized;
40
41int amdgpu_amdkfd_init(void)
42{
43	struct sysinfo si;
44	int ret;
45
46	si_meminfo(&si);
47	amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
48	amdgpu_amdkfd_total_mem_size *= si.mem_unit;
49
50	ret = kgd2kfd_init();
51	amdgpu_amdkfd_gpuvm_init_mem_limits();
52	kfd_initialized = !ret;
53
54	return ret;
55}
56
57void amdgpu_amdkfd_fini(void)
58{
59	if (kfd_initialized) {
60		kgd2kfd_exit();
61		kfd_initialized = false;
62	}
63}
64
65void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
66{
67	bool vf = amdgpu_sriov_vf(adev);
68
69	if (!kfd_initialized)
70		return;
71
72	adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
73				      adev->pdev, adev->asic_type, vf);
74
75	if (adev->kfd.dev)
76		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
77}
78
79/**
80 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
81 *                                setup amdkfd
82 *
83 * @adev: amdgpu_device pointer
84 * @aperture_base: output returning doorbell aperture base physical address
85 * @aperture_size: output returning doorbell aperture size in bytes
86 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
87 *
88 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
89 * takes doorbells required for its own rings and reports the setup to amdkfd.
90 * amdgpu reserved doorbells are at the start of the doorbell aperture.
91 */
92static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
93					 phys_addr_t *aperture_base,
94					 size_t *aperture_size,
95					 size_t *start_offset)
96{
97	/*
98	 * The first num_doorbells are used by amdgpu.
99	 * amdkfd takes whatever's left in the aperture.
100	 */
101	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
102		*aperture_base = adev->doorbell.base;
103		*aperture_size = adev->doorbell.size;
104		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
105	} else {
106		*aperture_base = 0;
107		*aperture_size = 0;
108		*start_offset = 0;
109	}
110}
111
112void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
113{
114	int i;
115	int last_valid_bit;
116
117	if (adev->kfd.dev) {
118		struct kgd2kfd_shared_resources gpu_resources = {
119			.compute_vmid_bitmap =
120				((1 << AMDGPU_NUM_VMID) - 1) -
121				((1 << adev->vm_manager.first_kfd_vmid) - 1),
122			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
123			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
124			.gpuvm_size = min(adev->vm_manager.max_pfn
125					  << AMDGPU_GPU_PAGE_SHIFT,
126					  AMDGPU_GMC_HOLE_START),
127			.drm_render_minor = adev_to_drm(adev)->render->index,
128			.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
129
130		};
131
132		/* this is going to have a few of the MSBs set that we need to
133		 * clear
134		 */
135		bitmap_complement(gpu_resources.cp_queue_bitmap,
136				  adev->gfx.mec.queue_bitmap,
137				  KGD_MAX_QUEUES);
138
139		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
140		 * nbits is not compile time constant
141		 */
142		last_valid_bit = 1 /* only first MEC can have compute queues */
143				* adev->gfx.mec.num_pipe_per_mec
144				* adev->gfx.mec.num_queue_per_pipe;
145		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
146			clear_bit(i, gpu_resources.cp_queue_bitmap);
147
148		amdgpu_doorbell_get_kfd_info(adev,
149				&gpu_resources.doorbell_physical_address,
150				&gpu_resources.doorbell_aperture_size,
151				&gpu_resources.doorbell_start_offset);
152
153		/* Since SOC15, BIF starts to statically use the
154		 * lower 12 bits of doorbell addresses for routing
155		 * based on settings in registers like
156		 * SDMA0_DOORBELL_RANGE etc..
157		 * In order to route a doorbell to CP engine, the lower
158		 * 12 bits of its address has to be outside the range
159		 * set for SDMA, VCN, and IH blocks.
160		 */
161		if (adev->asic_type >= CHIP_VEGA10) {
162			gpu_resources.non_cp_doorbells_start =
163					adev->doorbell_index.first_non_cp;
164			gpu_resources.non_cp_doorbells_end =
165					adev->doorbell_index.last_non_cp;
166		}
167
168		kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
169	}
170}
171
172void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
173{
174	if (adev->kfd.dev) {
175		kgd2kfd_device_exit(adev->kfd.dev);
176		adev->kfd.dev = NULL;
177	}
178}
179
180void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
181		const void *ih_ring_entry)
182{
183	if (adev->kfd.dev)
184		kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
185}
186
187void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
188{
189	if (adev->kfd.dev)
190		kgd2kfd_suspend(adev->kfd.dev, run_pm);
191}
192
193int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
194{
195	int r = 0;
196
197	if (adev->kfd.dev)
198		r = kgd2kfd_resume_iommu(adev->kfd.dev);
199
200	return r;
201}
202
203int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
204{
205	int r = 0;
206
207	if (adev->kfd.dev)
208		r = kgd2kfd_resume(adev->kfd.dev, run_pm);
209
210	return r;
211}
212
213int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
214{
215	int r = 0;
216
217	if (adev->kfd.dev)
218		r = kgd2kfd_pre_reset(adev->kfd.dev);
219
220	return r;
221}
222
223int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
224{
225	int r = 0;
226
227	if (adev->kfd.dev)
228		r = kgd2kfd_post_reset(adev->kfd.dev);
229
230	return r;
231}
232
233void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
234{
235	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
236
237	if (amdgpu_device_should_recover_gpu(adev))
238		amdgpu_device_gpu_recover(adev, NULL);
239}
240
241int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
242				void **mem_obj, uint64_t *gpu_addr,
243				void **cpu_ptr, bool cp_mqd_gfx9)
244{
245	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
246	struct amdgpu_bo *bo = NULL;
247	struct amdgpu_bo_param bp;
248	int r;
249	void *cpu_ptr_tmp = NULL;
250
251	memset(&bp, 0, sizeof(bp));
252	bp.size = size;
253	bp.byte_align = PAGE_SIZE;
254	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
255	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
256	bp.type = ttm_bo_type_kernel;
257	bp.resv = NULL;
258
259	if (cp_mqd_gfx9)
260		bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
261
262	r = amdgpu_bo_create(adev, &bp, &bo);
263	if (r) {
264		dev_err(adev->dev,
265			"failed to allocate BO for amdkfd (%d)\n", r);
266		return r;
267	}
268
269	/* map the buffer */
270	r = amdgpu_bo_reserve(bo, true);
271	if (r) {
272		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
273		goto allocate_mem_reserve_bo_failed;
274	}
275
276	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
277	if (r) {
278		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
279		goto allocate_mem_pin_bo_failed;
280	}
281
282	r = amdgpu_ttm_alloc_gart(&bo->tbo);
283	if (r) {
284		dev_err(adev->dev, "%p bind failed\n", bo);
285		goto allocate_mem_kmap_bo_failed;
286	}
287
288	r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
289	if (r) {
290		dev_err(adev->dev,
291			"(%d) failed to map bo to kernel for amdkfd\n", r);
292		goto allocate_mem_kmap_bo_failed;
293	}
294
295	*mem_obj = bo;
296	*gpu_addr = amdgpu_bo_gpu_offset(bo);
297	*cpu_ptr = cpu_ptr_tmp;
298
299	amdgpu_bo_unreserve(bo);
300
301	return 0;
302
303allocate_mem_kmap_bo_failed:
304	amdgpu_bo_unpin(bo);
305allocate_mem_pin_bo_failed:
306	amdgpu_bo_unreserve(bo);
307allocate_mem_reserve_bo_failed:
308	amdgpu_bo_unref(&bo);
309
310	return r;
311}
312
313void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
314{
315	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
316
317	amdgpu_bo_reserve(bo, true);
318	amdgpu_bo_kunmap(bo);
319	amdgpu_bo_unpin(bo);
320	amdgpu_bo_unreserve(bo);
321	amdgpu_bo_unref(&(bo));
322}
323
324int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
325				void **mem_obj)
326{
327	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
328	struct amdgpu_bo *bo = NULL;
329	struct amdgpu_bo_param bp;
330	int r;
331
332	memset(&bp, 0, sizeof(bp));
333	bp.size = size;
334	bp.byte_align = 1;
335	bp.domain = AMDGPU_GEM_DOMAIN_GWS;
336	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
337	bp.type = ttm_bo_type_device;
338	bp.resv = NULL;
339
340	r = amdgpu_bo_create(adev, &bp, &bo);
341	if (r) {
342		dev_err(adev->dev,
343			"failed to allocate gws BO for amdkfd (%d)\n", r);
344		return r;
345	}
346
347	*mem_obj = bo;
348	return 0;
349}
350
351void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
352{
353	struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
354
355	amdgpu_bo_unref(&bo);
356}
357
358uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
359				      enum kgd_engine_type type)
360{
361	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
362
363	switch (type) {
364	case KGD_ENGINE_PFP:
365		return adev->gfx.pfp_fw_version;
366
367	case KGD_ENGINE_ME:
368		return adev->gfx.me_fw_version;
369
370	case KGD_ENGINE_CE:
371		return adev->gfx.ce_fw_version;
372
373	case KGD_ENGINE_MEC1:
374		return adev->gfx.mec_fw_version;
375
376	case KGD_ENGINE_MEC2:
377		return adev->gfx.mec2_fw_version;
378
379	case KGD_ENGINE_RLC:
380		return adev->gfx.rlc_fw_version;
381
382	case KGD_ENGINE_SDMA1:
383		return adev->sdma.instance[0].fw_version;
384
385	case KGD_ENGINE_SDMA2:
386		return adev->sdma.instance[1].fw_version;
387
388	default:
389		return 0;
390	}
391
392	return 0;
393}
394
395void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
396				      struct kfd_local_mem_info *mem_info)
397{
398	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
399	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
400					     ~((1ULL << 32) - 1);
401	resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
402
403	memset(mem_info, 0, sizeof(*mem_info));
404	if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
405		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
406		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
407				adev->gmc.visible_vram_size;
408	} else {
409		mem_info->local_mem_size_public = 0;
410		mem_info->local_mem_size_private = adev->gmc.real_vram_size;
411	}
412	mem_info->vram_width = adev->gmc.vram_width;
413
414	pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
415			&adev->gmc.aper_base, &aper_limit,
416			mem_info->local_mem_size_public,
417			mem_info->local_mem_size_private);
418
419	if (amdgpu_sriov_vf(adev))
420		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
421	else if (adev->pm.dpm_enabled) {
422		if (amdgpu_emu_mode == 1)
423			mem_info->mem_clk_max = 0;
424		else
425			mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
426	} else
427		mem_info->mem_clk_max = 100;
428}
429
430uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
431{
432	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
433
434	if (adev->gfx.funcs->get_gpu_clock_counter)
435		return adev->gfx.funcs->get_gpu_clock_counter(adev);
436	return 0;
437}
438
439uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
440{
441	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
442
443	/* the sclk is in quantas of 10kHz */
444	if (amdgpu_sriov_vf(adev))
445		return adev->clock.default_sclk / 100;
446	else if (adev->pm.dpm_enabled)
447		return amdgpu_dpm_get_sclk(adev, false) / 100;
448	else
449		return 100;
450}
451
452void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
453{
454	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
455	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
456
457	memset(cu_info, 0, sizeof(*cu_info));
458	if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
459		return;
460
461	cu_info->cu_active_number = acu_info.number;
462	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
463	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
464	       sizeof(acu_info.bitmap));
465	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
466	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
467	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
468	cu_info->simd_per_cu = acu_info.simd_per_cu;
469	cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
470	cu_info->wave_front_size = acu_info.wave_front_size;
471	cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
472	cu_info->lds_size = acu_info.lds_size;
473}
474
475int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
476				  struct kgd_dev **dma_buf_kgd,
477				  uint64_t *bo_size, void *metadata_buffer,
478				  size_t buffer_size, uint32_t *metadata_size,
479				  uint32_t *flags)
480{
481	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
482	struct dma_buf *dma_buf;
483	struct drm_gem_object *obj;
484	struct amdgpu_bo *bo;
485	uint64_t metadata_flags;
486	int r = -EINVAL;
487
488	dma_buf = dma_buf_get(dma_buf_fd);
489	if (IS_ERR(dma_buf))
490		return PTR_ERR(dma_buf);
491
492	if (dma_buf->ops != &amdgpu_dmabuf_ops)
493		/* Can't handle non-graphics buffers */
494		goto out_put;
495
496	obj = dma_buf->priv;
497	if (obj->dev->driver != adev_to_drm(adev)->driver)
498		/* Can't handle buffers from different drivers */
499		goto out_put;
500
501	adev = drm_to_adev(obj->dev);
502	bo = gem_to_amdgpu_bo(obj);
503	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
504				    AMDGPU_GEM_DOMAIN_GTT)))
505		/* Only VRAM and GTT BOs are supported */
506		goto out_put;
507
508	r = 0;
509	if (dma_buf_kgd)
510		*dma_buf_kgd = (struct kgd_dev *)adev;
511	if (bo_size)
512		*bo_size = amdgpu_bo_size(bo);
513	if (metadata_size)
514		*metadata_size = bo->metadata_size;
515	if (metadata_buffer)
516		r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
517					   metadata_size, &metadata_flags);
518	if (flags) {
519		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
520				KFD_IOC_ALLOC_MEM_FLAGS_VRAM
521				: KFD_IOC_ALLOC_MEM_FLAGS_GTT;
522
523		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
524			*flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
525	}
526
527out_put:
528	dma_buf_put(dma_buf);
529	return r;
530}
531
532uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
533{
534	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
535	struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
536
537	return amdgpu_vram_mgr_usage(vram_man);
538}
539
540uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
541{
542	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
543
544	return adev->gmc.xgmi.hive_id;
545}
546
547uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
548{
549	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
550
551	return adev->unique_id;
552}
553
554uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
555{
556	struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
557	struct amdgpu_device *adev = (struct amdgpu_device *)dst;
558	int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
559
560	if (ret < 0) {
561		DRM_ERROR("amdgpu: failed to get  xgmi hops count between node %d and %d. ret = %d\n",
562			adev->gmc.xgmi.physical_node_id,
563			peer_adev->gmc.xgmi.physical_node_id, ret);
564		ret = 0;
565	}
566	return  (uint8_t)ret;
567}
568
569uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
570{
571	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
572
573	return adev->rmmio_remap.bus_addr;
574}
575
576uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
577{
578	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
579
580	return adev->gds.gws_size;
581}
582
583uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
584{
585	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
586
587	return adev->rev_id;
588}
589
590int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
591{
592	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
593
594	return adev->gmc.noretry;
595}
596
597int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
598				uint32_t vmid, uint64_t gpu_addr,
599				uint32_t *ib_cmd, uint32_t ib_len)
600{
601	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
602	struct amdgpu_job *job;
603	struct amdgpu_ib *ib;
604	struct amdgpu_ring *ring;
605	struct dma_fence *f = NULL;
606	int ret;
607
608	switch (engine) {
609	case KGD_ENGINE_MEC1:
610		ring = &adev->gfx.compute_ring[0];
611		break;
612	case KGD_ENGINE_SDMA1:
613		ring = &adev->sdma.instance[0].ring;
614		break;
615	case KGD_ENGINE_SDMA2:
616		ring = &adev->sdma.instance[1].ring;
617		break;
618	default:
619		pr_err("Invalid engine in IB submission: %d\n", engine);
620		ret = -EINVAL;
621		goto err;
622	}
623
624	ret = amdgpu_job_alloc(adev, 1, &job, NULL);
625	if (ret)
626		goto err;
627
628	ib = &job->ibs[0];
629	memset(ib, 0, sizeof(struct amdgpu_ib));
630
631	ib->gpu_addr = gpu_addr;
632	ib->ptr = ib_cmd;
633	ib->length_dw = ib_len;
634	/* This works for NO_HWS. TODO: need to handle without knowing VMID */
635	job->vmid = vmid;
636
637	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
638
639	if (ret) {
640		DRM_ERROR("amdgpu: failed to schedule IB.\n");
641		goto err_ib_sched;
642	}
643
644	ret = dma_fence_wait(f, false);
645
646err_ib_sched:
647	dma_fence_put(f);
648	amdgpu_job_free(job);
649err:
650	return ret;
651}
652
653void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
654{
655	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
656
657	amdgpu_dpm_switch_power_profile(adev,
658					PP_SMC_POWER_PROFILE_COMPUTE,
659					!idle);
660}
661
662bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
663{
664	if (adev->kfd.dev)
665		return vmid >= adev->vm_manager.first_kfd_vmid;
666
667	return false;
668}
669
670int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
671{
672	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
673
674	if (adev->family == AMDGPU_FAMILY_AI) {
675		int i;
676
677		for (i = 0; i < adev->num_vmhubs; i++)
678			amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
679	} else {
680		amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
681	}
682
683	return 0;
684}
685
686int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
687{
688	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
689	const uint32_t flush_type = 0;
690	bool all_hub = false;
691
692	if (adev->family == AMDGPU_FAMILY_AI ||
693	    adev->family == AMDGPU_FAMILY_RV)
694		all_hub = true;
695
696	return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
697}
698
699bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
700{
701	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
702
703	return adev->have_atomics_support;
704}
705