/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_device.c | 37 * kfd_locked is used to lock the kfd driver during suspend or reset 38 * once locked, kfd driver will stop any further GPU execution. 527 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 529 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 531 static int kfd_resume(struct kfd_dev *kfd); 536 struct kfd_dev *kfd; in kgd2kfd_probe() local 550 dev_err(kfd_device, "%s %s not supported in kfd\n", in kgd2kfd_probe() 555 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); in kgd2kfd_probe() 556 if (!kfd) in kgd2kfd_probe() 591 kfd_cwsr_init(struct kfd_dev *kfd) kfd_cwsr_init() argument 620 kfd_gws_init(struct kfd_dev *kfd) kfd_gws_init() argument 646 kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) kgd2kfd_device_init() argument 796 kgd2kfd_device_exit(struct kfd_dev *kfd) kgd2kfd_device_exit() argument 814 kgd2kfd_pre_reset(struct kfd_dev *kfd) kgd2kfd_pre_reset() argument 835 kgd2kfd_post_reset(struct kfd_dev *kfd) kgd2kfd_post_reset() argument 859 kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kgd2kfd_suspend() argument 875 kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) kgd2kfd_resume() argument 897 kgd2kfd_resume_iommu(struct kfd_dev *kfd) kgd2kfd_resume_iommu() argument 909 kfd_resume(struct kfd_dev *kfd) kfd_resume() argument 944 kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) kgd2kfd_interrupt() argument 1059 kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size) kfd_gtt_sa_init() argument 1091 kfd_gtt_sa_fini(struct kfd_dev *kfd) kfd_gtt_sa_fini() argument 1111 kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, struct kfd_mem_obj **mem_obj) kfd_gtt_sa_allocate() argument 1216 kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) kfd_gtt_sa_free() argument 1241 kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) kgd2kfd_set_sram_ecc_flag() argument 1247 kfd_inc_compute_active(struct kfd_dev *kfd) kfd_inc_compute_active() argument 1253 kfd_dec_compute_active(struct kfd_dev *kfd) kfd_dec_compute_active() argument 1262 kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) kgd2kfd_smi_event_throttle() argument [all...] |
H A D | kfd_doorbell.c | 43 * the /dev/kfd with the particular device encoded in the mmap offset. 44 * There will be other uses for mmap of /dev/kfd, so only a range of 49 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) in kfd_doorbell_process_slice() argument 51 return roundup(kfd->device_info->doorbell_size * in kfd_doorbell_process_slice() 57 int kfd_doorbell_init(struct kfd_dev *kfd) in kfd_doorbell_init() argument 70 roundup(kfd->shared_resources.doorbell_start_offset, in kfd_doorbell_init() 71 kfd_doorbell_process_slice(kfd)); in kfd_doorbell_init() 74 rounddown(kfd->shared_resources.doorbell_aperture_size, in kfd_doorbell_init() 75 kfd_doorbell_process_slice(kfd)); in kfd_doorbell_init() 80 kfd_doorbell_process_slice(kfd); in kfd_doorbell_init() 120 kfd_doorbell_fini(struct kfd_dev *kfd) kfd_doorbell_fini() argument 168 kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off) kfd_get_kernel_doorbell() argument 199 kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) kfd_release_kernel_doorbell() argument 229 kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, struct kfd_process_device *pdd, unsigned int doorbell_id) kfd_get_doorbell_dw_offset_in_bar() argument 245 kfd_get_number_elems(struct kfd_dev *kfd) kfd_get_number_elems() argument 261 kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index) kfd_alloc_process_doorbells() argument 271 kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index) kfd_free_process_doorbells() argument [all...] |
H A D | kfd_interrupt.c | 52 int kfd_interrupt_init(struct kfd_dev *kfd) in kfd_interrupt_init() argument 56 r = kfifo_alloc(&kfd->ih_fifo, in kfd_interrupt_init() 57 KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size, in kfd_interrupt_init() 64 kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); in kfd_interrupt_init() 65 if (unlikely(!kfd->ih_wq)) { in kfd_interrupt_init() 66 kfifo_free(&kfd->ih_fifo); in kfd_interrupt_init() 70 spin_lock_init(&kfd->interrupt_lock); in kfd_interrupt_init() 72 INIT_WORK(&kfd->interrupt_work, interrupt_wq); in kfd_interrupt_init() 74 kfd->interrupts_active = true; in kfd_interrupt_init() 86 void kfd_interrupt_exit(struct kfd_dev *kfd) in kfd_interrupt_exit() argument 112 enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) enqueue_ih_ring_entry() argument 131 dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) dequeue_ih_ring_entry() argument [all...] |
H A D | kfd_iommu.c | 43 int kfd_iommu_check_device(struct kfd_dev *kfd) in kfd_iommu_check_device() argument 48 if (!kfd->use_iommu_v2) in kfd_iommu_check_device() 52 err = amd_iommu_device_info(kfd->pdev, &iommu_info); in kfd_iommu_check_device() 64 int kfd_iommu_device_init(struct kfd_dev *kfd) in kfd_iommu_device_init() argument 70 if (!kfd->use_iommu_v2) in kfd_iommu_device_init() 74 err = amd_iommu_device_info(kfd->pdev, &iommu_info); in kfd_iommu_device_init() 92 (unsigned int)(1 << kfd->device_info->max_pasid_bits), in kfd_iommu_device_init() 218 static int kfd_bind_processes_to_device(struct kfd_dev *kfd) in kfd_bind_processes_to_device() argument 229 pdd = kfd_get_process_device_data(kfd, p); in kfd_bind_processes_to_device() 236 err = amd_iommu_bind_pasid(kfd in kfd_bind_processes_to_device() 259 kfd_unbind_processes_from_device(struct kfd_dev *kfd) kfd_unbind_processes_from_device() argument 289 kfd_iommu_suspend(struct kfd_dev *kfd) kfd_iommu_suspend() argument 306 kfd_iommu_resume(struct kfd_dev *kfd) kfd_iommu_resume() argument [all...] |
H A D | kfd_iommu.h | 32 int kfd_iommu_check_device(struct kfd_dev *kfd); 33 int kfd_iommu_device_init(struct kfd_dev *kfd); 38 void kfd_iommu_suspend(struct kfd_dev *kfd); 39 int kfd_iommu_resume(struct kfd_dev *kfd); 45 static inline int kfd_iommu_check_device(struct kfd_dev *kfd) in kfd_iommu_check_device() argument 49 static inline int kfd_iommu_device_init(struct kfd_dev *kfd) in kfd_iommu_device_init() argument 67 static inline void kfd_iommu_suspend(struct kfd_dev *kfd) in kfd_iommu_suspend() argument 71 static inline int kfd_iommu_resume(struct kfd_dev *kfd) in kfd_iommu_resume() argument
|
H A D | kfd_priv.h | 498 * @process: The kfd process that created this queue. 500 * @device: The kfd device that created this queue. 890 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 891 int kfd_doorbell_init(struct kfd_dev *kfd); 892 void kfd_doorbell_fini(struct kfd_dev *kfd); 895 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 897 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 901 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 905 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 907 void kfd_free_process_doorbells(struct kfd_dev *kfd, 1128 kfd_devcgroup_check_permission(struct kfd_dev *kfd) kfd_devcgroup_check_permission() argument [all...] |
H A D | kfd_mqd_manager_v9.c | 84 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, in allocate_mqd() argument 106 if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { in allocate_mqd() 110 retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, in allocate_mqd() 122 retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), in allocate_mqd() 293 struct kfd_dev *kfd = mm->dev; in free_mqd() local 296 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); in free_mqd()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_device.c | 42 * kfd_locked is used to lock the kfd driver during suspend or reset 43 * once locked, kfd driver will stop any further GPU execution. 60 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 62 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 64 static int kfd_resume(struct kfd_node *kfd); 66 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) in kfd_device_info_set_sdma_info() argument 68 uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; in kfd_device_info_set_sdma_info() 80 kfd->device_info.num_sdma_queues_per_engine = 2; in kfd_device_info_set_sdma_info() 98 kfd->device_info.num_sdma_queues_per_engine = 8; in kfd_device_info_set_sdma_info() 104 kfd in kfd_device_info_set_sdma_info() 126 kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) kfd_device_info_set_event_interrupt_class() argument 174 kfd_device_info_init(struct kfd_dev *kfd, bool vf, uint32_t gfx_target_version) kfd_device_info_init() argument 236 struct kfd_dev *kfd = NULL; kgd2kfd_probe() local 449 kfd_cwsr_init(struct kfd_dev *kfd) kfd_cwsr_init() argument 493 struct kfd_dev *kfd = node->kfd; kfd_gws_init() local 575 kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes) kfd_cleanup_nodes() argument 627 kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) kgd2kfd_device_init() argument 857 kgd2kfd_device_exit(struct kfd_dev *kfd) kgd2kfd_device_exit() argument 872 kgd2kfd_pre_reset(struct kfd_dev *kfd) kgd2kfd_pre_reset() argument 900 kgd2kfd_post_reset(struct kfd_dev *kfd) kgd2kfd_post_reset() argument 934 kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kgd2kfd_suspend() argument 960 kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) kgd2kfd_resume() argument 1016 kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) kgd2kfd_interrupt() argument 1138 kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size) kfd_gtt_sa_init() argument 1164 kfd_gtt_sa_fini(struct kfd_dev *kfd) kfd_gtt_sa_fini() argument 1188 struct kfd_dev *kfd = node->kfd; kfd_gtt_sa_allocate() local 1290 struct kfd_dev *kfd = node->kfd; kfd_gtt_sa_free() local 1311 kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) kgd2kfd_set_sram_ecc_flag() argument 1337 kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) kgd2kfd_smi_event_throttle() argument [all...] |
H A D | kfd_doorbell.c | 44 * the /dev/kfd with the particular device encoded in the mmap offset. 45 * There will be other uses for mmap of /dev/kfd, so only a range of 50 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) in kfd_doorbell_process_slice() argument 52 if (!kfd->shared_resources.enable_mes) in kfd_doorbell_process_slice() 53 return roundup(kfd->device_info.doorbell_size * in kfd_doorbell_process_slice() 58 (struct amdgpu_device *)kfd->adev); in kfd_doorbell_process_slice() 62 int kfd_doorbell_init(struct kfd_dev *kfd) in kfd_doorbell_init() argument 75 kfd->doorbell_bitmap = bitmap_zalloc(size / sizeof(u32), GFP_KERNEL); in kfd_doorbell_init() 76 if (!kfd->doorbell_bitmap) { in kfd_doorbell_init() 82 r = amdgpu_bo_create_kernel(kfd in kfd_doorbell_init() 99 kfd_doorbell_fini(struct kfd_dev *kfd) kfd_doorbell_fini() argument 150 kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off) kfd_get_kernel_doorbell() argument 178 kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) kfd_release_kernel_doorbell() argument 253 kfd_alloc_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd) kfd_alloc_process_doorbells() argument 293 kfd_free_process_doorbells(struct kfd_dev *kfd, struct kfd_process_device *pdd) kfd_free_process_doorbells() argument [all...] |
H A D | kfd_interrupt.c | 58 KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, in kfd_interrupt_init() 118 node->kfd->device_info.ih_ring_entry_size); in enqueue_ih_ring_entry() 119 if (count != node->kfd->device_info.ih_ring_entry_size) { in enqueue_ih_ring_entry() 137 node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 139 WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 141 return count == node->kfd->device_info.ih_ring_entry_size; in dequeue_ih_ring_entry() 151 if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { in interrupt_wq() 157 dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev, in interrupt_wq() 176 wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev, in interrupt_is_wanted()
|
H A D | kfd_packet_manager_v9.c | 37 struct kfd_node *kfd = pm->dqm->dev; in pm_map_process_v9() local 55 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && in pm_map_process_v9() 57 packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; in pm_map_process_v9() 91 struct kfd_dev *kfd = pm->dqm->dev->kfd; in pm_map_process_aldebaran() local 113 for (i = 0; i < kfd->device_info.num_of_watch_points; i++) in pm_map_process_aldebaran() 146 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_v9() local 151 * of processes in the runlist and kfd module parameter in pm_runlist_v9() 158 kfd->max_proc_per_quantum); in pm_runlist_v9() 248 !pm_use_ext_eng(q->device->kfd)) in pm_map_queues_v9() [all...] |
H A D | kfd_debug.h | 113 && dev->kfd->mec2_fw_version < 0x81b6) || in kfd_dbg_has_gws_support() 116 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 118 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 120 && dev->kfd->mec2_fw_version < 0x30) || in kfd_dbg_has_gws_support()
|
H A D | kfd_debug.c | 366 spin_lock(&pdd->dev->kfd->watch_points_lock); in kfd_dbg_get_dev_watch_id() 370 if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1) in kfd_dbg_get_dev_watch_id() 374 pdd->dev->kfd->alloc_watch_ids |= 0x1 << i; in kfd_dbg_get_dev_watch_id() 376 spin_unlock(&pdd->dev->kfd->watch_points_lock); in kfd_dbg_get_dev_watch_id() 380 spin_unlock(&pdd->dev->kfd->watch_points_lock); in kfd_dbg_get_dev_watch_id() 387 spin_lock(&pdd->dev->kfd->watch_points_lock); in kfd_dbg_clear_dev_watch_id() 392 pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id); in kfd_dbg_clear_dev_watch_id() 395 spin_unlock(&pdd->dev->kfd->watch_points_lock); in kfd_dbg_clear_dev_watch_id() 402 spin_lock(&pdd->dev->kfd->watch_points_lock); in kfd_dbg_owns_dev_watch_id() 406 spin_unlock(&pdd->dev->kfd in kfd_dbg_owns_dev_watch_id() [all...] |
H A D | kfd_device_queue_manager.c | 81 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec in is_pipe_enabled() 82 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; in is_pipe_enabled() 85 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) in is_pipe_enabled() 87 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) in is_pipe_enabled() 94 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, in get_cp_queues_num() 100 return dqm->dev->kfd->shared_resources.num_queue_per_pipe; in get_queues_per_pipe() 105 return dqm->dev->kfd->shared_resources.num_pipe_per_mec; in get_pipes_per_mec() 117 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; in get_num_sdma_queues() 123 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; in get_num_xgmi_sdma_queues() 136 dqm->dev->kfd in init_sdma_bitmaps() [all...] |
H A D | kfd_priv.h | 311 struct kfd_dev *kfd; member 568 * @process: The kfd process that created this queue. 570 * @device: The kfd device that created this queue. 690 /* doorbells for kfd process */ 939 /* If the process is a kfd debugger, we need to know so we can clean 1074 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1075 int kfd_doorbell_init(struct kfd_dev *kfd); 1076 void kfd_doorbell_fini(struct kfd_dev *kfd); 1079 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 1081 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u3 [all...] |
H A D | kfd_device_queue_manager_v9.c | 63 if (dqm->dev->kfd->noretry) in update_qpd_v9() 66 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3)) in update_qpd_v9()
|
H A D | kfd_mqd_manager_v11.c | 112 if (node->kfd->shared_resources.enable_mes) in allocate_mqd() 135 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd() 167 * DISPATCH_PTR. This is required for the kfd debugger in init_mqd() 183 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 272 if (mm->dev->kfd->cwsr_enabled) in update_mqd() 405 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd_sdma() 556 if (dev->kfd->shared_resources.enable_mes) { in mqd_manager_init_v11()
|
H A D | kfd_kernel_queue.c | 78 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off); in kq_initialize() 115 retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size, in kq_initialize() 192 kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr); in kq_initialize() 223 kfd_release_kernel_doorbell(kq->dev->kfd, in kq_uninitialize() 301 if (kq->dev->kfd->device_info.doorbell_size == 8) { in kq_submit_packet() 314 if (kq->dev->kfd->device_info.doorbell_size == 8) { in kq_rollback_packet()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_amdkfd.h | 304 bool kgd2kfd_device_init(struct kfd_dev *kfd, 307 void kgd2kfd_device_exit(struct kfd_dev *kfd); 308 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 309 int kgd2kfd_resume_iommu(struct kfd_dev *kfd); 310 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 311 int kgd2kfd_pre_reset(struct kfd_dev *kfd); 312 int kgd2kfd_post_reset(struct kfd_dev *kfd); 313 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); 314 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 315 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_ 334 kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) kgd2kfd_device_init() argument 340 kgd2kfd_device_exit(struct kfd_dev *kfd) kgd2kfd_device_exit() argument 344 kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kgd2kfd_suspend() argument 348 kgd2kfd_resume_iommu(struct kfd_dev *kfd) kgd2kfd_resume_iommu() argument 353 kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) kgd2kfd_resume() argument 358 kgd2kfd_pre_reset(struct kfd_dev *kfd) kgd2kfd_pre_reset() argument 363 kgd2kfd_post_reset(struct kfd_dev *kfd) kgd2kfd_post_reset() argument 369 kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) kgd2kfd_interrupt() argument 374 kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) kgd2kfd_set_sram_ecc_flag() argument 379 kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) kgd2kfd_smi_event_throttle() argument [all...] |
H A D | amdgpu_amdkfd.c | 72 adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, in amdgpu_amdkfd_device_probe() 75 if (adev->kfd.dev) in amdgpu_amdkfd_device_probe() 117 if (adev->kfd.dev) { in amdgpu_amdkfd_device_init() 168 kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources); in amdgpu_amdkfd_device_init() 174 if (adev->kfd.dev) { in amdgpu_amdkfd_device_fini() 175 kgd2kfd_device_exit(adev->kfd.dev); in amdgpu_amdkfd_device_fini() 176 adev->kfd.dev = NULL; in amdgpu_amdkfd_device_fini() 183 if (adev->kfd.dev) in amdgpu_amdkfd_interrupt() 184 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); in amdgpu_amdkfd_interrupt() 189 if (adev->kfd in amdgpu_amdkfd_suspend() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_amdkfd.h | 397 bool kgd2kfd_device_init(struct kfd_dev *kfd, 399 void kgd2kfd_device_exit(struct kfd_dev *kfd); 400 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 401 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 402 int kgd2kfd_pre_reset(struct kfd_dev *kfd); 403 int kgd2kfd_post_reset(struct kfd_dev *kfd); 404 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); 405 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 406 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); 426 bool kgd2kfd_device_init(struct kfd_dev *kfd, in kgd2kfd_device_init() argument 432 kgd2kfd_device_exit(struct kfd_dev *kfd) kgd2kfd_device_exit() argument 436 kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kgd2kfd_suspend() argument 440 kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) kgd2kfd_resume() argument 445 kgd2kfd_pre_reset(struct kfd_dev *kfd) kgd2kfd_pre_reset() argument 450 kgd2kfd_post_reset(struct kfd_dev *kfd) kgd2kfd_post_reset() argument 456 kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) kgd2kfd_interrupt() argument 461 kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) kgd2kfd_set_sram_ecc_flag() argument 466 kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) kgd2kfd_smi_event_throttle() argument [all...] |
H A D | amdgpu_amdkfd.c | 76 adev->kfd.dev = kgd2kfd_probe(adev, vf); in amdgpu_amdkfd_device_probe() 127 kfd.reset_work); in amdgpu_amdkfd_reset_work() 147 if (adev->kfd.dev) { in amdgpu_amdkfd_device_init() 198 adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, in amdgpu_amdkfd_device_init() 203 INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work); in amdgpu_amdkfd_device_init() 209 if (adev->kfd.dev) { in amdgpu_amdkfd_device_fini_sw() 210 kgd2kfd_device_exit(adev->kfd.dev); in amdgpu_amdkfd_device_fini_sw() 211 adev->kfd.dev = NULL; in amdgpu_amdkfd_device_fini_sw() 219 if (adev->kfd in amdgpu_amdkfd_interrupt() [all...] |
/kernel/linux/linux-5.10/samples/bpf/ |
H A D | task_fd_query_user.c | 223 int err, res, kfd, efd; in test_debug_fs_uprobe() local 228 kfd = open(buf, O_WRONLY | O_APPEND, 0); in test_debug_fs_uprobe() 229 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 238 CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0); in test_debug_fs_uprobe() 240 close(kfd); in test_debug_fs_uprobe() 241 kfd = -1; in test_debug_fs_uprobe() 257 kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); in test_debug_fs_uprobe() 258 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 259 CHECK_PERROR_RET(ioctl(kfd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) < 0); in test_debug_fs_uprobe() 260 CHECK_PERROR_RET(ioctl(kfd, PERF_EVENT_IOC_ENABL in test_debug_fs_uprobe() [all...] |
/kernel/linux/linux-6.6/samples/bpf/ |
H A D | task_fd_query_user.c | 234 int err = -1, res, kfd, efd; in test_debug_fs_uprobe() local 240 kfd = open(buf, O_WRONLY | O_TRUNC, 0); in test_debug_fs_uprobe() 241 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 250 CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0); in test_debug_fs_uprobe() 252 close(kfd); in test_debug_fs_uprobe() 253 kfd = -1; in test_debug_fs_uprobe() 270 kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); in test_debug_fs_uprobe() 271 link = bpf_program__attach_perf_event(progs[0], kfd); in test_debug_fs_uprobe() 275 close(kfd); in test_debug_fs_uprobe() 280 err = bpf_task_fd_query(getpid(), kfd, in test_debug_fs_uprobe() [all...] |
/kernel/linux/linux-5.10/tools/perf/ |
H A D | builtin-probe.c | 422 int ret, ret2, ufd = -1, kfd = -1; in perf_del_probe_events() local 436 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); in perf_del_probe_events() 447 ret = probe_file__get_events(kfd, filter, klist); in perf_del_probe_events() 452 ret = probe_file__del_strlist(kfd, klist); in perf_del_probe_events() 475 if (kfd >= 0) in perf_del_probe_events() 476 close(kfd); in perf_del_probe_events()
|