/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/csf/ |
H A D | mali_kbase_csf.h | 98 int kbase_csf_event_wait_add(struct kbase_context *kctx, kbase_csf_event_callback *callback, void *param); 111 void kbase_csf_event_wait_remove(struct kbase_context *kctx, kbase_csf_event_callback *callback, void *param); 121 void kbase_csf_event_wait_remove_all(struct kbase_context *kctx); 134 bool kbase_csf_read_error(struct kbase_context *kctx, struct base_csf_notification *event_data); 143 bool kbase_csf_error_pending(struct kbase_context *kctx); 157 void kbase_csf_event_signal(struct kbase_context *kctx, bool notify_gpu); 159 static inline void kbase_csf_event_signal_notify_gpu(struct kbase_context *kctx) in kbase_csf_event_signal_notify_gpu() 164 static inline void kbase_csf_event_signal_cpu_only(struct kbase_context *kctx) in kbase_csf_event_signal_cpu_only() 177 int kbase_csf_ctx_init(struct kbase_context *kctx); 189 void kbase_csf_ctx_handle_fault(struct kbase_context *kct [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/context/ |
H A D | mali_kbase_context_internal.h | 24 typedef int kbase_context_init_method(struct kbase_context *kctx); 25 typedef void kbase_context_term_method(struct kbase_context *kctx); 39 int kbase_context_common_init(struct kbase_context *kctx); 40 void kbase_context_common_term(struct kbase_context *kctx); 42 int kbase_context_mem_pool_group_init(struct kbase_context *kctx); 43 void kbase_context_mem_pool_group_term(struct kbase_context *kctx); 45 int kbase_context_mmu_init(struct kbase_context *kctx); 46 void kbase_context_mmu_term(struct kbase_context *kctx); 48 int kbase_context_mem_alloc_page(struct kbase_context *kctx); 49 void kbase_context_mem_pool_free(struct kbase_context *kct [all...] |
H A D | mali_kbase_context.h | 36 void kbase_context_debugfs_init(struct kbase_context *const kctx); 47 void kbase_context_debugfs_term(struct kbase_context *const kctx); 68 struct kbase_context * 80 void kbase_destroy_context(struct kbase_context *kctx); 89 static inline bool kbase_ctx_flag(struct kbase_context *kctx, in kbase_ctx_flag() 106 static inline void kbase_ctx_flag_clear(struct kbase_context *kctx, in kbase_ctx_flag_clear() 123 static inline void kbase_ctx_flag_set(struct kbase_context *kctx, in kbase_ctx_flag_set()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem.h | 68 struct kbase_context *kctx; 149 struct kbase_context *kctx; 221 struct kbase_context *kctx; /* Backlink to base context */ 399 static inline int kbase_reg_prepare_native(struct kbase_va_region *reg, struct kbase_context *kctx) in kbase_reg_prepare_native() 614 int kbase_region_tracker_init(struct kbase_context *kctx); 615 int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages); 616 void kbase_region_tracker_term(struct kbase_context *kctx); 618 struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr); 625 struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr); 627 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kct [all...] |
H A D | mali_kbase_mem_linux.h | 36 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, 38 int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages); 39 int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, void __user *phandle, u32 padding, 41 u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, 43 int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask); 54 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 65 int kbase_mem_evictable_init(struct kbase_context *kctx); 72 void kbase_mem_evictable_deinit(struct kbase_context *kctx); 88 int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages); 170 void *kbase_vmap_prot(struct kbase_context *kct [all...] |
H A D | mali_kbase_js.h | 68 * It is a Programming Error to call this whilst there are still kbase_context
85 * It is a Programming Error to call this whilst there are still kbase_context
91 * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler.
93 * This effectively registers a struct kbase_context with a Job Scheduler.
95 * It does not register any jobs owned by the struct kbase_context with the scheduler.
98 * The struct kbase_context must be zero intitialized before passing to the
101 int kbasep_js_kctx_init(struct kbase_context *const kctx);
104 * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler
106 * This effectively de-registers a struct kbase_context from its Job Scheduler
108 * It is safe to call this on a struct kbase_context tha [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem.h | 70 struct kbase_context *kctx; 151 struct kbase_context *kctx; 221 struct kbase_context *kctx; /* Backlink to base context */ 396 struct kbase_context *kctx) in kbase_reg_prepare_native() 618 int kbase_region_tracker_init(struct kbase_context *kctx); 619 int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages); 620 void kbase_region_tracker_term(struct kbase_context *kctx); 622 struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr); 629 struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr); 631 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kct [all...] |
H A D | mali_kbase_mem_linux.h | 35 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, 38 int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages); 39 int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, 42 u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages); 43 int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask); 54 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 65 int kbase_mem_evictable_init(struct kbase_context *kctx); 72 void kbase_mem_evictable_deinit(struct kbase_context *kctx); 88 int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, 173 void *kbase_vmap_prot(struct kbase_context *kct [all...] |
H A D | mali_kbase_js.h | 72 * It is a Programming Error to call this whilst there are still kbase_context 89 * It is a Programming Error to call this whilst there are still kbase_context 95 * @brief Initialize the Scheduling Component of a struct kbase_context on the Job Scheduler. 97 * This effectively registers a struct kbase_context with a Job Scheduler. 99 * It does not register any jobs owned by the struct kbase_context with the scheduler. 102 * The struct kbase_context must be zero intitialized before passing to the 105 int kbasep_js_kctx_init(struct kbase_context * const kctx); 108 * @brief Terminate the Scheduling Component of a struct kbase_context on the Job Scheduler 110 * This effectively de-registers a struct kbase_context from its Job Scheduler 112 * It is safe to call this on a struct kbase_context tha [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem_linux.h | 53 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, 70 int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, u64 query, 87 int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, 104 u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, 118 int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, 130 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 141 int kbase_mem_shrink(struct kbase_context *kctx, 153 int kbase_context_mmap(struct kbase_context *kctx, struct vm_area_struct *vma); 162 int kbase_mem_evictable_init(struct kbase_context *kctx); 169 void kbase_mem_evictable_deinit(struct kbase_context *kct [all...] |
H A D | mali_kbase_ctx_sched.h | 30 * counting to kbase_context. The interface has been designed to minimise 64 * kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context 80 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx); 95 void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx); 98 * kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context 108 void kbase_ctx_sched_release_ctx(struct kbase_context *kctx); 121 void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx); 151 * Return: a valid struct kbase_context on success, which has been refcounted 155 struct kbase_context *kbase_ctx_sched_as_to_ctx_refcount(struct kbase_device *kbdev, size_t as_nr); 163 * Return: a valid struct kbase_context o [all...] |
H A D | mali_kbase_mem.h | 43 static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages); 68 struct kbase_context *kctx; 143 struct kbase_context *kctx; 156 struct kbase_context *kctx; 488 static inline struct kbase_va_region *kbase_va_region_alloc_get(struct kbase_context *kctx, in kbase_va_region_alloc_get() 502 static inline struct kbase_va_region *kbase_va_region_alloc_put(struct kbase_context *kctx, in kbase_va_region_alloc_put() 558 static inline struct kbase_mem_phy_alloc *kbase_alloc_create(struct kbase_context *kctx, size_t nr_pages, in kbase_alloc_create() 615 static inline int kbase_reg_prepare_native(struct kbase_va_region *reg, struct kbase_context *kctx, int group_id) in kbase_reg_prepare_native() 974 int kbase_region_tracker_init(struct kbase_context *kctx); 992 int kbase_region_tracker_init_jit(struct kbase_context *kct [all...] |
H A D | mali_kbase.h | 164 unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx, const unsigned long addr, 222 int kbase_jd_init(struct kbase_context *kctx); 223 void kbase_jd_exit(struct kbase_context *kctx); 236 int kbase_jd_submit(struct kbase_context *kctx, void __user *user_addr, u32 nr_atoms, u32 stride, bool uk6_atom); 260 void kbase_jd_zap_context(struct kbase_context *kctx); 289 void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, struct kbase_jd_atom *katom); 304 int kbase_job_slot_softstop_start_rp(struct kbase_context *kctx, struct kbase_va_region *reg); 309 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, struct kbase_jd_atom *target_katom); 316 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom); 318 int kbase_event_dequeue(struct kbase_context *ct [all...] |
H A D | mali_kbase_hwaccess_jm.h | 65 int kbase_backend_find_and_release_free_address_space(struct kbase_device *kbdev, struct kbase_context *kctx); 78 bool kbase_backend_use_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, int as_nr); 97 bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev, struct kbase_context *kctx, int js); 107 void kbase_backend_release_ctx_irq(struct kbase_device *kbdev, struct kbase_context *kctx); 120 void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev, struct kbase_context *kctx); 247 void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx); 257 void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx); 278 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, struct kbase_jd_atom *target_katom);
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem_linux.h | 53 kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, 70 int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, u64 query, 87 int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, 104 u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages); 116 int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask); 127 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 138 int kbase_mem_shrink(struct kbase_context *kctx, 150 int kbase_context_mmap(struct kbase_context *kctx, struct vm_area_struct *vma); 159 int kbase_mem_evictable_init(struct kbase_context *kctx); 166 void kbase_mem_evictable_deinit(struct kbase_context *kct [all...] |
H A D | mali_kbase_ctx_sched.h | 29 * counting to kbase_context. The interface has been designed to minimise 63 * kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context 79 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx); 94 void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx); 97 * kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context 107 void kbase_ctx_sched_release_ctx(struct kbase_context *kctx); 120 void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx); 150 * Return: a valid struct kbase_context on success, which has been refcounted 154 struct kbase_context *kbase_ctx_sched_as_to_ctx_refcount( 163 * Return: a valid struct kbase_context o [all...] |
H A D | mali_kbase_mem.h | 41 static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, 69 struct kbase_context *kctx; 150 struct kbase_context *kctx; 163 struct kbase_context *kctx; 566 struct kbase_context *kctx, struct kbase_va_region *region) in kbase_va_region_alloc_get() 581 struct kbase_context *kctx, struct kbase_va_region *region) in kbase_va_region_alloc_put() 638 struct kbase_context *kctx, size_t nr_pages, in kbase_alloc_create() 697 struct kbase_context *kctx, int group_id) in kbase_reg_prepare_native() 1069 int kbase_region_tracker_init(struct kbase_context *kctx); 1087 int kbase_region_tracker_init_jit(struct kbase_context *kct [all...] |
H A D | mali_kbase_hwaccess_jm.h | 66 struct kbase_device *kbdev, struct kbase_context *kctx); 80 struct kbase_context *kctx, 101 struct kbase_context *kctx, int js); 112 struct kbase_context *kctx); 126 struct kbase_context *kctx); 259 void kbase_backend_jm_kill_running_jobs_from_kctx(struct kbase_context *kctx); 269 void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx); 290 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, 317 void kbase_backend_slot_kctx_purge_locked(struct kbase_device *kbdev, struct kbase_context *kctx);
|
H A D | mali_kbase.h | 163 unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx, 218 int kbase_jd_init(struct kbase_context *kctx); 219 void kbase_jd_exit(struct kbase_context *kctx); 232 int kbase_jd_submit(struct kbase_context *kctx, 258 void kbase_jd_zap_context(struct kbase_context *kctx); 288 void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, 304 int kbase_job_slot_softstop_start_rp(struct kbase_context *kctx, 311 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, 320 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event); 322 int kbase_event_dequeue(struct kbase_context *ct [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/csf/ |
H A D | mali_kbase_csf_event.h | 28 struct kbase_context; 59 int kbase_csf_event_wait_add(struct kbase_context *kctx, 73 void kbase_csf_event_wait_remove(struct kbase_context *kctx, 84 void kbase_csf_event_term(struct kbase_context *kctx); 98 void kbase_csf_event_signal(struct kbase_context *kctx, bool notify_gpu); 100 static inline void kbase_csf_event_signal_notify_gpu(struct kbase_context *kctx) in kbase_csf_event_signal_notify_gpu() 105 static inline void kbase_csf_event_signal_cpu_only(struct kbase_context *kctx) in kbase_csf_event_signal_cpu_only() 117 void kbase_csf_event_init(struct kbase_context *const kctx); 132 bool kbase_csf_event_read_error(struct kbase_context *kctx, 150 void kbase_csf_event_add_error(struct kbase_context *cons [all...] |
H A D | mali_kbase_csf.h | 57 int kbase_csf_ctx_init(struct kbase_context *kctx); 69 void kbase_csf_ctx_handle_fault(struct kbase_context *kctx, 80 void kbase_csf_ctx_term(struct kbase_context *kctx); 93 int kbase_csf_queue_register(struct kbase_context *kctx, 109 int kbase_csf_queue_register_ex(struct kbase_context *kctx, 120 void kbase_csf_queue_terminate(struct kbase_context *kctx, 137 int kbase_csf_alloc_command_stream_user_pages(struct kbase_context *kctx, 149 int kbase_csf_queue_bind(struct kbase_context *kctx, 181 int kbase_csf_queue_kick(struct kbase_context *kctx, 196 int kbase_csf_queue_group_handle_is_valid(struct kbase_context *kct [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/jm/ |
H A D | mali_kbase_jm_js.h | 54 * It is a programming error to call this whilst there are still kbase_context 72 * It is a programming error to call this whilst there are still kbase_context 79 * struct kbase_context on the Job Scheduler. 80 * @kctx: The kbase_context to operate on 82 * This effectively registers a struct kbase_context with a Job Scheduler. 84 * It does not register any jobs owned by the struct kbase_context with 87 * The struct kbase_context must be zero initialized before passing to the 90 int kbasep_js_kctx_init(struct kbase_context *const kctx); 94 * struct kbase_context on the Job Scheduler 95 * @kctx: The kbase_context t [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/context/ |
H A D | mali_kbase_context_internal.h | 36 typedef int kbase_context_init_method(struct kbase_context *kctx); 37 typedef void kbase_context_term_method(struct kbase_context *kctx); 51 int kbase_context_common_init(struct kbase_context *kctx); 52 void kbase_context_common_term(struct kbase_context *kctx); 54 int kbase_context_mem_pool_group_init(struct kbase_context *kctx); 55 void kbase_context_mem_pool_group_term(struct kbase_context *kctx); 57 int kbase_context_mmu_init(struct kbase_context *kctx); 58 void kbase_context_mmu_term(struct kbase_context *kctx); 60 int kbase_context_mem_alloc_page(struct kbase_context *kctx); 61 void kbase_context_mem_pool_free(struct kbase_context *kct [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/jm/ |
H A D | mali_kbase_jm_js.h | 53 * It is a programming error to call this whilst there are still kbase_context 70 * It is a programming error to call this whilst there are still kbase_context 77 * struct kbase_context on the Job Scheduler. 79 * This effectively registers a struct kbase_context with a Job Scheduler. 81 * It does not register any jobs owned by the struct kbase_context with 84 * The struct kbase_context must be zero initialized before passing to the 87 int kbasep_js_kctx_init(struct kbase_context *const kctx); 91 * struct kbase_context on the Job Scheduler 93 * This effectively de-registers a struct kbase_context from its Job Scheduler 95 * It is safe to call this on a struct kbase_context tha [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/context/backend/ |
H A D | mali_kbase_context_jm.c | 41 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() 50 void kbase_context_debugfs_term(struct kbase_context *const kctx) in kbase_context_debugfs_term() 56 void kbase_context_debugfs_init(struct kbase_context *const kctx) in kbase_context_debugfs_init() 62 void kbase_context_debugfs_term(struct kbase_context *const kctx) in kbase_context_debugfs_term() 69 static int kbase_context_kbase_kinstr_jm_init(struct kbase_context *kctx) in kbase_context_kbase_kinstr_jm_init() 74 static void kbase_context_kbase_kinstr_jm_term(struct kbase_context *kctx) in kbase_context_kbase_kinstr_jm_term() 79 static int kbase_context_kbase_timer_setup(struct kbase_context *kctx) in kbase_context_kbase_timer_setup() 87 static int kbase_context_submit_check(struct kbase_context *kctx) in kbase_context_submit_check() 107 static void kbase_context_flush_jobs(struct kbase_context *kctx) in kbase_context_flush_jobs() 113 static void kbase_context_free(struct kbase_context *kct [all...] |