Home
last modified time | relevance | path

Searched refs:dqm (Results 1 - 25 of 44) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device_queue_manager.c42 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
45 static int execute_queues_cpsch(struct device_queue_manager *dqm,
48 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
52 static int map_queues_cpsch(struct device_queue_manager *dqm);
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
57 static inline void deallocate_hqd(struct device_queue_manager *dqm,
59 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60 static int allocate_sdma_queue(struct device_queue_manager *dqm,
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) in is_pipe_enabled() argument
75 int pipe_offset = mec * dqm in is_pipe_enabled()
86 get_cp_queues_num(struct device_queue_manager *dqm) get_cp_queues_num() argument
92 get_queues_per_pipe(struct device_queue_manager *dqm) get_queues_per_pipe() argument
97 get_pipes_per_mec(struct device_queue_manager *dqm) get_pipes_per_mec() argument
102 get_num_sdma_engines(struct device_queue_manager *dqm) get_num_sdma_engines() argument
107 get_num_xgmi_sdma_engines(struct device_queue_manager *dqm) get_num_xgmi_sdma_engines() argument
112 get_num_all_sdma_engines(struct device_queue_manager *dqm) get_num_all_sdma_engines() argument
117 get_num_sdma_queues(struct device_queue_manager *dqm) get_num_sdma_queues() argument
123 get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) get_num_xgmi_sdma_queues() argument
129 program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) program_sh_mem_settings() argument
140 increment_queue_count(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) increment_queue_count() argument
155 decrement_queue_count(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) decrement_queue_count() argument
228 allocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) allocate_vmid() argument
292 deallocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) deallocate_vmid() argument
311 create_queue_nocpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) create_queue_nocpsch() argument
423 allocate_hqd(struct device_queue_manager *dqm, struct queue *q) allocate_hqd() argument
457 deallocate_hqd(struct device_queue_manager *dqm, struct queue *q) deallocate_hqd() argument
466 destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) destroy_queue_nocpsch_locked() argument
525 destroy_queue_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) destroy_queue_nocpsch() argument
556 update_queue(struct device_queue_manager *dqm, struct queue *q) update_queue() argument
648 evict_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) evict_process_queues_nocpsch() argument
696 evict_process_queues_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) evict_process_queues_cpsch() argument
733 restore_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) restore_process_queues_nocpsch() argument
814 restore_process_queues_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) restore_process_queues_cpsch() argument
861 register_process(struct device_queue_manager *dqm, struct qcm_process_device *qpd) register_process() argument
900 unregister_process(struct device_queue_manager *dqm, struct qcm_process_device *qpd) unregister_process() argument
935 set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) set_pasid_vmid_mapping() argument
942 init_interrupts(struct device_queue_manager *dqm) init_interrupts() argument
951 initialize_nocpsch(struct device_queue_manager *dqm) initialize_nocpsch() argument
985 uninitialize(struct device_queue_manager *dqm) uninitialize() argument
997 start_nocpsch(struct device_queue_manager *dqm) start_nocpsch() argument
1009 stop_nocpsch(struct device_queue_manager *dqm) stop_nocpsch() argument
1018 pre_reset(struct device_queue_manager *dqm) pre_reset() argument
1025 allocate_sdma_queue(struct device_queue_manager *dqm, struct queue *q) allocate_sdma_queue() argument
1069 deallocate_sdma_queue(struct device_queue_manager *dqm, struct queue *q) deallocate_sdma_queue() argument
1087 set_sched_resources(struct device_queue_manager *dqm) set_sched_resources() argument
1130 initialize_cpsch(struct device_queue_manager *dqm) initialize_cpsch() argument
1161 start_cpsch(struct device_queue_manager *dqm) start_cpsch() argument
1205 stop_cpsch(struct device_queue_manager *dqm) stop_cpsch() argument
1229 create_kernel_queue_cpsch(struct device_queue_manager *dqm, struct kernel_queue *kq, struct qcm_process_device *qpd) create_kernel_queue_cpsch() argument
1258 destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, struct kernel_queue *kq, struct qcm_process_device *qpd) destroy_kernel_queue_cpsch() argument
1277 create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) create_queue_cpsch() argument
1387 map_queues_cpsch(struct device_queue_manager *dqm) map_queues_cpsch() argument
1410 unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, uint32_t filter_param) unmap_queues_cpsch() argument
1453 execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, uint32_t filter_param) execute_queues_cpsch() argument
1468 destroy_queue_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) destroy_queue_cpsch() argument
1552 set_cache_memory_policy(struct device_queue_manager *dqm, struct qcm_process_device *qpd, enum cache_policy default_policy, enum cache_policy alternate_policy, void __user *alternate_aperture_base, uint64_t alternate_aperture_size) set_cache_memory_policy() argument
1614 set_trap_handler(struct device_queue_manager *dqm, struct qcm_process_device *qpd, uint64_t tba_addr, uint64_t tma_addr) set_trap_handler() argument
1634 process_termination_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) process_termination_nocpsch() argument
1682 get_wave_state(struct device_queue_manager *dqm, struct queue *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) get_wave_state() argument
1712 process_termination_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) process_termination_cpsch() argument
1792 init_mqd_managers(struct device_queue_manager *dqm) init_mqd_managers() argument
1818 allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) allocate_hiq_sdma_mqd() argument
1837 struct device_queue_manager *dqm; device_queue_manager_init() local
1977 device_queue_manager_uninit(struct device_queue_manager *dqm) device_queue_manager_uninit() argument
1984 kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid) kfd_process_vm_fault() argument
2003 struct device_queue_manager *dqm = container_of(work, kfd_process_hw_exception() local
2033 struct device_queue_manager *dqm = data; dqm_debugfs_hqds() local
2098 dqm_debugfs_execute_queues(struct device_queue_manager *dqm) dqm_debugfs_execute_queues() argument
[all...]
H A Dkfd_device_queue_manager.h87 int (*create_queue)(struct device_queue_manager *dqm,
91 int (*destroy_queue)(struct device_queue_manager *dqm,
95 int (*update_queue)(struct device_queue_manager *dqm,
98 int (*register_process)(struct device_queue_manager *dqm,
101 int (*unregister_process)(struct device_queue_manager *dqm,
104 int (*initialize)(struct device_queue_manager *dqm);
105 int (*start)(struct device_queue_manager *dqm);
106 int (*stop)(struct device_queue_manager *dqm);
107 void (*pre_reset)(struct device_queue_manager *dqm);
108 void (*uninitialize)(struct device_queue_manager *dqm);
243 dqm_lock(struct device_queue_manager *dqm) dqm_lock() argument
248 dqm_unlock(struct device_queue_manager *dqm) dqm_unlock() argument
[all...]
H A Dkfd_process_queue_manager.c74 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device()
123 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws()
234 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
256 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); in pqm_create_queue()
262 if ((dev->dqm->sched_policy == in pqm_create_queue()
264 ((dev->dqm in pqm_create_queue()
344 struct device_queue_manager *dqm; pqm_destroy_queue() local
[all...]
H A Dkfd_device_queue_manager_vi.c29 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
35 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm,
41 static int update_qpd_vi(struct device_queue_manager *dqm,
43 static int update_qpd_vi_tonga(struct device_queue_manager *dqm,
45 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
47 static void init_sdma_vm_tonga(struct device_queue_manager *dqm,
97 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument
126 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, in set_cache_memory_policy_vi_tonga() argument
153 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument
194 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, in update_qpd_vi_tonga() argument
228 init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) init_sdma_vm() argument
244 init_sdma_vm_tonga(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) init_sdma_vm_tonga() argument
[all...]
H A Dkfd_device_queue_manager_cik.c29 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
35 static int update_qpd_cik(struct device_queue_manager *dqm,
37 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm,
39 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
41 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm,
90 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument
116 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument
150 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, in update_qpd_cik_hawaii() argument
180 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm() argument
196 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, in init_sdma_vm_hawaii() argument
[all...]
H A Dkfd_device_queue_manager_v9.c30 static int update_qpd_v9(struct device_queue_manager *dqm,
32 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
52 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument
64 if (dqm->dev->noretry && in update_qpd_v9()
65 !dqm->dev->use_iommu_v2) in update_qpd_v9()
80 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
H A Dkfd_mqd_manager.c56 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd()
57 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd()
58 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd()
76 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd()
78 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in allocate_sdma_mqd()
80 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem in allocate_sdma_mqd()
82 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd()
84 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd()
H A Dkfd_packet_manager.c47 struct kfd_dev *dev = pm->dqm->dev; in pm_calc_rlib_size()
49 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
50 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
51 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
52 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
65 compute_queue_count > get_cp_queues_num(pm->dqm) || in pm_calc_rlib_size()
101 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib()
145 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
151 if (proccesses_mapped >= pm->dqm in pm_create_runlist_ib()
224 pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) pm_init() argument
[all...]
H A Dkfd_device_queue_manager_v10.c29 static int update_qpd_v10(struct device_queue_manager *dqm,
31 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
51 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument
84 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
H A Dkfd_device.c624 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init()
723 kfd->dqm = device_queue_manager_init(kfd); in kgd2kfd_device_init()
724 if (!kfd->dqm) { in kgd2kfd_device_init()
769 kfd->dqm->sched_policy); in kgd2kfd_device_init()
777 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_init()
800 device_queue_manager_uninit(kfd->dqm); in kgd2kfd_device_exit()
821 kfd->dqm->ops.pre_reset(kfd->dqm); in kgd2kfd_pre_reset()
871 kfd->dqm->ops.stop(kfd->dqm); in kgd2kfd_suspend()
[all...]
H A Dkfd_process.c104 struct device_queue_manager *dqm; in kfd_sdma_activity_worker() local
117 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker()
119 if (!dqm || !qpd) in kfd_sdma_activity_worker()
148 dqm_lock(dqm); in kfd_sdma_activity_worker()
157 dqm_unlock(dqm); in kfd_sdma_activity_worker()
174 dqm_unlock(dqm); in kfd_sdma_activity_worker()
178 dqm_unlock(dqm); in kfd_sdma_activity_worker()
208 dqm_lock(dqm); in kfd_sdma_activity_worker()
230 dqm_unlock(dqm); in kfd_sdma_activity_worker()
[all...]
H A Dkfd_int_process_v9.c51 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in event_interrupt_isr_v9()
58 pasid = dev->dqm->vmid_pasid[vmid]; in event_interrupt_isr_v9()
122 kfd_process_vm_fault(dev->dqm, pasid); in event_interrupt_wq_v9()
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device_queue_manager.c45 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
48 static int execute_queues_cpsch(struct device_queue_manager *dqm,
52 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
58 static int map_queues_cpsch(struct device_queue_manager *dqm);
60 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
63 static inline void deallocate_hqd(struct device_queue_manager *dqm,
65 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
66 static int allocate_sdma_queue(struct device_queue_manager *dqm,
78 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) in is_pipe_enabled() argument
81 int pipe_offset = (mec * dqm in is_pipe_enabled()
92 get_cp_queues_num(struct device_queue_manager *dqm) get_cp_queues_num() argument
98 get_queues_per_pipe(struct device_queue_manager *dqm) get_queues_per_pipe() argument
103 get_pipes_per_mec(struct device_queue_manager *dqm) get_pipes_per_mec() argument
108 get_num_all_sdma_engines(struct device_queue_manager *dqm) get_num_all_sdma_engines() argument
114 get_num_sdma_queues(struct device_queue_manager *dqm) get_num_sdma_queues() argument
120 get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) get_num_xgmi_sdma_queues() argument
126 init_sdma_bitmaps(struct device_queue_manager *dqm) init_sdma_bitmaps() argument
140 program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) program_sh_mem_settings() argument
153 kfd_hws_hang(struct device_queue_manager *dqm) kfd_hws_hang() argument
188 add_queue_mes(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) add_queue_mes() argument
256 remove_queue_mes(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) remove_queue_mes() argument
284 remove_all_queues_mes(struct device_queue_manager *dqm) remove_all_queues_mes() argument
310 increment_queue_count(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) increment_queue_count() argument
325 decrement_queue_count(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) decrement_queue_count() argument
430 program_trap_handler_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) program_trap_handler_settings() argument
443 allocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) allocate_vmid() argument
510 deallocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) deallocate_vmid() argument
529 create_queue_nocpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd, const struct kfd_criu_queue_priv_data *qd, const void *restore_mqd, const void *restore_ctl_stack) create_queue_nocpsch() argument
650 allocate_hqd(struct device_queue_manager *dqm, struct queue *q) allocate_hqd() argument
684 deallocate_hqd(struct device_queue_manager *dqm, struct queue *q) deallocate_hqd() argument
760 destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) destroy_queue_nocpsch_locked() argument
819 destroy_queue_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) destroy_queue_nocpsch() argument
850 update_queue(struct device_queue_manager *dqm, struct queue *q, struct mqd_update_info *minfo) update_queue() argument
960 suspend_single_queue(struct device_queue_manager *dqm, struct kfd_process_device *pdd, struct queue *q) suspend_single_queue() argument
1006 resume_single_queue(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) resume_single_queue() argument
1038 evict_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) evict_process_queues_nocpsch() argument
1088 evict_process_queues_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) evict_process_queues_cpsch() argument
1144 restore_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) restore_process_queues_nocpsch() argument
1225 restore_process_queues_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) restore_process_queues_cpsch() argument
1287 register_process(struct device_queue_manager *dqm, struct qcm_process_device *qpd) register_process() argument
1326 unregister_process(struct device_queue_manager *dqm, struct qcm_process_device *qpd) unregister_process() argument
1361 set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) set_pasid_vmid_mapping() argument
1377 init_interrupts(struct device_queue_manager *dqm) init_interrupts() argument
1392 initialize_nocpsch(struct device_queue_manager *dqm) initialize_nocpsch() argument
1425 uninitialize(struct device_queue_manager *dqm) uninitialize() argument
1437 start_nocpsch(struct device_queue_manager *dqm) start_nocpsch() argument
1452 stop_nocpsch(struct device_queue_manager *dqm) stop_nocpsch() argument
1468 pre_reset(struct device_queue_manager *dqm) pre_reset() argument
1475 allocate_sdma_queue(struct device_queue_manager *dqm, struct queue *q, const uint32_t *restore_sdma_id) allocate_sdma_queue() argument
1544 deallocate_sdma_queue(struct device_queue_manager *dqm, struct queue *q) deallocate_sdma_queue() argument
1562 set_sched_resources(struct device_queue_manager *dqm) set_sched_resources() argument
1605 initialize_cpsch(struct device_queue_manager *dqm) initialize_cpsch() argument
1627 start_cpsch(struct device_queue_manager *dqm) start_cpsch() argument
1696 stop_cpsch(struct device_queue_manager *dqm) stop_cpsch() argument
1727 create_kernel_queue_cpsch(struct device_queue_manager *dqm, struct kernel_queue *kq, struct qcm_process_device *qpd) create_kernel_queue_cpsch() argument
1757 destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, struct kernel_queue *kq, struct qcm_process_device *qpd) destroy_kernel_queue_cpsch() argument
1777 create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd, const struct kfd_criu_queue_priv_data *qd, const void *restore_mqd, const void *restore_ctl_stack) create_queue_cpsch() argument
1909 map_queues_cpsch(struct device_queue_manager *dqm) map_queues_cpsch() argument
1932 unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, uint32_t filter_param, uint32_t grace_period, bool reset) unmap_queues_cpsch() argument
2000 reset_queues_cpsch(struct device_queue_manager *dqm, uint16_t pasid) reset_queues_cpsch() argument
2015 execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, uint32_t filter_param, uint32_t grace_period) execute_queues_cpsch() argument
2031 wait_on_destroy_queue(struct device_queue_manager *dqm, struct queue *q) wait_on_destroy_queue() argument
2056 destroy_queue_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) destroy_queue_cpsch() argument
2157 set_cache_memory_policy(struct device_queue_manager *dqm, struct qcm_process_device *qpd, enum cache_policy default_policy, enum cache_policy alternate_policy, void __user *alternate_aperture_base, uint64_t alternate_aperture_size) set_cache_memory_policy() argument
2219 process_termination_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) process_termination_nocpsch() argument
2267 get_wave_state(struct device_queue_manager *dqm, struct queue *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) get_wave_state() argument
2297 get_queue_checkpoint_info(struct device_queue_manager *dqm, const struct queue *q, u32 *mqd_size, u32 *ctl_stack_size) get_queue_checkpoint_info() argument
2317 checkpoint_mqd(struct device_queue_manager *dqm, const struct queue *q, void *mqd, void *ctl_stack) checkpoint_mqd() argument
2347 process_termination_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) process_termination_cpsch() argument
2437 init_mqd_managers(struct device_queue_manager *dqm) init_mqd_managers() argument
2463 allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) allocate_hiq_sdma_mqd() argument
2483 struct device_queue_manager *dqm; device_queue_manager_init() local
2615 device_queue_manager_uninit(struct device_queue_manager *dqm) device_queue_manager_uninit() argument
2624 kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid) kfd_dqm_evict_pasid() argument
2643 struct device_queue_manager *dqm = container_of(work, kfd_process_hw_exception() local
2648 reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd) reserve_debug_trap_vmid() argument
2695 release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd) release_debug_trap_vmid() argument
2792 struct device_queue_manager *dqm = pdd->dev->dqm; copy_context_work_handler() local
2846 struct device_queue_manager *dqm = pdd->dev->dqm; resume_queues() local
2948 struct device_queue_manager *dqm = pdd->dev->dqm; suspend_queues() local
3082 debug_lock_and_unmap(struct device_queue_manager *dqm) debug_lock_and_unmap() argument
3103 debug_map_and_unlock(struct device_queue_manager *dqm) debug_map_and_unlock() argument
3122 debug_refresh_runlist(struct device_queue_manager *dqm) debug_refresh_runlist() argument
3157 struct device_queue_manager *dqm = data; dqm_debugfs_hqds() local
3235 dqm_debugfs_hang_hws(struct device_queue_manager *dqm) dqm_debugfs_hang_hws() argument
[all...]
H A Dkfd_device_queue_manager.h132 int (*create_queue)(struct device_queue_manager *dqm,
139 int (*destroy_queue)(struct device_queue_manager *dqm,
143 int (*update_queue)(struct device_queue_manager *dqm,
146 int (*register_process)(struct device_queue_manager *dqm,
149 int (*unregister_process)(struct device_queue_manager *dqm,
152 int (*initialize)(struct device_queue_manager *dqm);
153 int (*start)(struct device_queue_manager *dqm);
154 int (*stop)(struct device_queue_manager *dqm);
155 void (*pre_reset)(struct device_queue_manager *dqm);
156 void (*uninitialize)(struct device_queue_manager *dqm);
321 dqm_lock(struct device_queue_manager *dqm) dqm_lock() argument
326 dqm_unlock(struct device_queue_manager *dqm) dqm_unlock() argument
[all...]
H A Dkfd_device_queue_manager_v9.c29 static int update_qpd_v9(struct device_queue_manager *dqm,
31 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
51 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument
63 if (dqm->dev->kfd->noretry) in update_qpd_v9()
66 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3)) in update_qpd_v9()
74 if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) { in update_qpd_v9()
89 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
H A Dkfd_packet_manager.c48 struct kfd_node *dev = pm->dqm->dev; in pm_calc_rlib_size()
50 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
51 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
52 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
53 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
66 compute_queue_count > get_cp_queues_num(pm->dqm) || in pm_calc_rlib_size()
102 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib()
146 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
152 if (processes_mapped >= pm->dqm in pm_create_runlist_ib()
225 pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) pm_init() argument
[all...]
H A Dkfd_process_queue_manager.c89 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device()
150 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws()
326 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
348 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data, in pqm_create_queue()
355 if ((dev->dqm->sched_policy == in pqm_create_queue()
357 ((dev->dqm in pqm_create_queue()
446 struct device_queue_manager *dqm; pqm_destroy_queue() local
[all...]
H A Dkfd_device_queue_manager_vi.c30 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
36 static int update_qpd_vi(struct device_queue_manager *dqm,
38 static void init_sdma_vm(struct device_queue_manager *dqm,
79 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument
106 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument
140 static void init_sdma_vm(struct device_queue_manager *dqm, in init_sdma_vm() argument
H A Dkfd_device_queue_manager_cik.c30 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
36 static int update_qpd_cik(struct device_queue_manager *dqm,
38 static void init_sdma_vm(struct device_queue_manager *dqm,
78 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument
104 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument
134 static void init_sdma_vm(struct device_queue_manager *dqm, in init_sdma_vm() argument
H A Dkfd_mqd_manager.c57 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd()
58 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd()
59 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd()
77 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd()
79 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * in allocate_sdma_mqd()
82 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem in allocate_sdma_mqd()
84 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd()
86 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd()
271 return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in kfd_hiq_mqd_stride()
282 dev->dqm in kfd_get_hiq_xcc_mqd()
[all...]
H A Dkfd_device_queue_manager_v10.c30 static int update_qpd_v10(struct device_queue_manager *dqm,
32 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
52 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument
76 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
H A Dkfd_device_queue_manager_v11.c29 static int update_qpd_v11(struct device_queue_manager *dqm,
31 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
51 static int update_qpd_v11(struct device_queue_manager *dqm, in update_qpd_v11() argument
76 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v11() argument
H A Dkfd_packet_manager_v9.c37 struct kfd_node *kfd = pm->dqm->dev; in pm_map_process_v9()
55 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && in pm_map_process_v9()
57 packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; in pm_map_process_v9()
91 struct kfd_dev *kfd = pm->dqm->dev->kfd; in pm_map_process_aldebaran()
146 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_v9()
157 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_v9()
297 pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( in pm_set_grace_period_v9()
298 pm->dqm->dev->adev, in pm_set_grace_period_v9()
299 pm->dqm->wait_times, in pm_set_grace_period_v9()
305 reg_data = pm->dqm in pm_set_grace_period_v9()
[all...]
H A Dkfd_debug.c239 kfd_dqm_evict_pasid(dev->dqm, p->pasid); in kfd_set_dbg_ev_from_interrupt()
277 kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid); in kfd_dbg_send_exception_to_runtime()
314 err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo); in kfd_dbg_set_queue_workaround()
420 r = debug_lock_and_unmap(pdd->dev->dqm); in kfd_dbg_trap_clear_dev_address_watch()
432 r = debug_map_and_unlock(pdd->dev->dqm); in kfd_dbg_trap_clear_dev_address_watch()
454 r = debug_lock_and_unmap(pdd->dev->dqm); in kfd_dbg_trap_set_dev_address_watch()
474 r = debug_map_and_unlock(pdd->dev->dqm); in kfd_dbg_trap_set_dev_address_watch()
516 r = debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_set_flags()
539 debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_set_flags()
[all...]
H A Dkfd_device.c496 if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init()
536 node->dqm = device_queue_manager_init(node); in kfd_init_node()
537 if (!node->dqm) { in kfd_init_node()
563 device_queue_manager_uninit(node->dqm); in kfd_init_node()
582 device_queue_manager_uninit(knode->dqm); in kfd_cleanup_nodes()
837 node->dqm->sched_policy); in kgd2kfd_device_init()
883 node->dqm->ops.pre_reset(node->dqm); in kgd2kfd_pre_reset()
956 node->dqm->ops.stop(node->dqm); in kgd2kfd_suspend()
[all...]

Completed in 16 milliseconds

12