| /device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/backend/gpu/ |
| H A D | mali_kbase_jm_rb.c | 627 struct kbase_context *kctx = katom->kctx; in kbase_gpu_release_atom() local 1289 struct kbase_context *kctx = katom->kctx; kbase_gpu_complete_hw() local 1613 kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev, struct kbase_context *kctx, int js, struct kbase_jd_atom *katom, u32 action) kbase_backend_soft_hard_stop_slot() argument [all...] |
| /device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
| H A D | mali_kbase_replay.c | 67 static void dump_job_head(struct kbase_context *kctx, char *head_str, in dump_job_head() argument 108 static int kbasep_replay_reset_sfbd(struct kbase_context *kctx, in kbasep_replay_reset_sfbd() argument 190 kbasep_replay_reset_mfbd(struct kbase_context *kctx, u64 fbd_address, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight) kbasep_replay_reset_mfbd() argument 289 kbasep_replay_reset_tiler_job(struct kbase_context *kctx, u64 job_header, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight, bool job_64) kbasep_replay_reset_tiler_job() argument 372 kbasep_replay_reset_job(struct kbase_context *kctx, u64 *job_header, u64 prev_jc, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight, u16 hw_job_id_offset, bool first_in_chain, bool fragment_chain) kbasep_replay_reset_job() argument 491 kbasep_replay_find_hw_job_id(struct kbase_context *kctx, u64 jc, u16 *hw_job_id) kbasep_replay_find_hw_job_id() argument 544 kbasep_replay_parse_jc(struct kbase_context *kctx, u64 jc, u64 prev_jc, u64 tiler_heap_free, u16 hierarchy_mask, u32 default_weight, u16 hw_job_id_offset, bool fragment_chain) kbasep_replay_parse_jc() argument 607 kbasep_allocate_katom(struct kbase_context *kctx) kbasep_allocate_katom() argument 635 kbasep_release_katom(struct kbase_context *kctx, int atom_id) kbasep_release_katom() argument 651 kbasep_replay_create_atom(struct kbase_context *kctx, struct base_jd_atom_v2 *atom, int atom_nr, base_jd_prio prio) kbasep_replay_create_atom() argument 688 kbasep_replay_create_atoms(struct kbase_context *kctx, struct base_jd_atom_v2 *t_atom, struct base_jd_atom_v2 *f_atom, base_jd_prio prio) kbasep_replay_create_atoms() argument 717 payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload) payload_dump() argument 754 kbasep_replay_parse_payload(struct kbase_context *kctx, struct kbase_jd_atom *replay_atom, struct base_jd_atom_v2 *t_atom, struct base_jd_atom_v2 *f_atom) kbasep_replay_parse_payload() argument 920 struct kbase_context *kctx; kbase_replay_process_worker() local 994 struct kbase_context *kctx = katom->kctx; kbase_replay_fault_check() local 1100 struct kbase_context *kctx = katom->kctx; kbase_replay_process() local [all...] |
| H A D | mali_kbase_js.c | 82 kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_trace_get_refcnt() argument 88 kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_trace_get_refcnt() argument 139 kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_retain_ctx_nolock() argument 179 jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio) jsctx_rb_none_to_pull_prio() argument 200 jsctx_rb_none_to_pull(struct kbase_context *kctx, int js) jsctx_rb_none_to_pull() argument 232 jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio, kbasep_js_ctx_job_cb callback) jsctx_queue_foreach_prio() argument 269 jsctx_queue_foreach(struct kbase_context *kctx, int js, kbasep_js_ctx_job_cb callback) jsctx_queue_foreach() argument 290 jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio) jsctx_rb_peek_prio() argument 318 jsctx_rb_peek(struct kbase_context *kctx, int js) jsctx_rb_peek() argument 345 jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_rb_pull() argument 362 jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_tree_add() argument 398 jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_rb_unpull() argument 557 kbasep_js_kctx_init(struct kbase_context * const kctx) kbasep_js_kctx_init() argument 599 kbasep_js_kctx_term(struct kbase_context *kctx) kbasep_js_kctx_term() argument 653 kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_nolock() argument 694 kbase_js_ctx_list_add_pullable_head_nolock( struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_head_nolock() argument 736 kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_head() argument 767 kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_unpullable_nolock() argument 808 kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_remove_nolock() argument 850 struct kbase_context *kctx; kbase_js_ctx_list_pop_head_nolock() local 878 struct kbase_context *kctx; kbase_js_ctx_list_pop_head() local 900 kbase_js_ctx_pullable(struct kbase_context *kctx, int js, bool is_scheduled) kbase_js_ctx_pullable() argument 934 kbase_js_dep_validate(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_dep_validate() argument 1064 kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom) kbasep_js_add_job() argument 1172 kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom) kbasep_js_remove_job() argument 1193 kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) kbasep_js_remove_cancelled_job() argument 1225 kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_retain_ctx() argument 1293 kbasep_js_run_jobs_after_ctx_and_atom_release( struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state, bool runpool_ctx_attr_change) kbasep_js_run_jobs_after_ctx_and_atom_release() argument 1350 kbasep_js_runpool_release_ctx_internal( struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) kbasep_js_runpool_release_ctx_internal() argument 1506 kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx_nolock() argument 1518 kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref) kbasep_js_runpool_requeue_or_kill_ctx() argument 1541 kbasep_js_runpool_release_ctx_and_katom_retained_state( struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) kbasep_js_runpool_release_ctx_and_katom_retained_state() argument 1576 kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx() argument 1589 kbasep_js_runpool_release_ctx_no_schedule( struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx_no_schedule() argument 1631 kbasep_js_schedule_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_schedule_ctx() argument 1767 kbase_js_use_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbase_js_use_ctx() argument 1787 kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_schedule_privileged_ctx() argument 1837 kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_release_privileged_ctx() argument 1877 struct kbase_context *kctx = kbdev->as_to_kctx[i]; kbasep_js_suspend() local 1902 struct kbase_context *kctx = kbdev->as_to_kctx[i]; kbasep_js_suspend() local 1923 struct kbase_context *kctx, *n; kbasep_js_resume() local 1991 kbase_js_dep_resolved_submit(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_dep_resolved_submit() argument 2075 kbase_js_evict_deps(struct kbase_context *kctx, struct kbase_jd_atom *katom, int js, int prio) kbase_js_evict_deps() argument 2105 kbase_js_pull(struct kbase_context *kctx, int js) kbase_js_pull() argument 2179 struct kbase_context *kctx = katom->kctx; js_return_worker() local 2285 kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_unpull() argument 2303 kbase_js_complete_atom_wq(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_complete_atom_wq() argument 2409 struct kbase_context *kctx = katom->kctx; kbase_js_complete_atom() local 2492 struct kbase_context *kctx; kbase_js_sched() local 2642 kbase_js_zap_context(struct kbase_context *kctx) kbase_js_zap_context() argument 2789 trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) trace_get_refcnt() argument 2813 kbase_js_foreach_ctx_job(struct kbase_context *kctx, kbasep_js_ctx_job_cb callback) kbase_js_foreach_ctx_job() argument [all...] |
| H A D | mali_kbase_mem.h | 70 struct kbase_context *kctx; member 151 struct kbase_context *kctx; member 221 struct kbase_context *kctx; /* Backlink to base context */ member 395 kbase_reg_prepare_native(struct kbase_va_region *reg, struct kbase_context *kctx) kbase_reg_prepare_native() argument 786 kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) kbase_process_page_usage_inc() argument 801 kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) kbase_process_page_usage_dec() argument [all...] |
| H A D | mali_kbase_mem_linux.c | 84 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, in kbase_mem_alloc() argument 259 kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * const out) kbase_mem_query() argument 340 struct kbase_context *kctx; kbase_mem_evictable_reclaim_count_objects() local 378 struct kbase_context *kctx; kbase_mem_evictable_reclaim_scan_objects() local 439 kbase_mem_evictable_init(struct kbase_context *kctx) kbase_mem_evictable_init() argument 461 kbase_mem_evictable_deinit(struct kbase_context *kctx) kbase_mem_evictable_deinit() argument 472 struct kbase_context *kctx = alloc->imported.kctx; kbase_mem_evictable_mark_reclaim() local 492 struct kbase_context *kctx = alloc->imported.kctx; kbase_mem_evictable_unmark_reclaim() local 511 struct kbase_context *kctx = gpu_alloc->imported.kctx; kbase_mem_evictable_make() local 536 struct kbase_context *kctx = gpu_alloc->imported.kctx; kbase_mem_evictable_unmake() local 580 kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask) kbase_mem_flags_change() argument 694 kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags) kbase_mem_from_ump() argument 812 kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags, u32 padding) kbase_mem_from_umm() argument 908 kbase_get_cache_line_alignment(struct kbase_context *kctx) kbase_get_cache_line_alignment() argument 919 kbase_mem_from_user_buffer( struct kbase_context *kctx, unsigned long address, unsigned long size, u64 *va_pages, u64 *flags) kbase_mem_from_user_buffer() argument 1103 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages) global() argument 1292 kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, u64 *flags) global() argument 1439 kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1457 kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1472 kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1485 kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) global() argument 1813 kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr) global() argument 1898 kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr) global() argument 1956 kbase_os_mem_map_lock(struct kbase_context *kctx) global() argument 1963 kbase_os_mem_map_unlock(struct kbase_context *kctx) global() argument 1970 kbasep_reg_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **regm, size_t *nr_pages, size_t *aligned_offset) global() argument 2040 struct kbase_context *kctx = file->private_data; global() local 2181 kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size, unsigned long prot_request, struct kbase_vmap_struct *map) global() argument 2303 kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size, struct kbase_vmap_struct *map) global() argument 2315 kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map) global() argument 2367 kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages) global() argument 2386 kbasep_os_process_page_usage_drain(struct kbase_context *kctx) global() argument 2414 struct kbase_context *kctx; global() local 2424 kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma) global() argument 2449 kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle) global() argument 2542 kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle) global() argument [all...] |
| /third_party/node/deps/ngtcp2/ngtcp2/crypto/openssl/ |
| H A D | openssl.c | 331 EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); in ngtcp2_crypto_hkdf_extract() local 386 EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); in ngtcp2_crypto_hkdf_expand() local 440 EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); in ngtcp2_crypto_hkdf() local
|
| /third_party/node/deps/openssl/openssl/ssl/ |
| H A D | tls13_enc.c | 41 EVP_KDF_CTX *kctx; in tls13_hkdf_expand() local 169 EVP_KDF_CTX *kctx; in tls13_generate_secret() local [all...] |
| /third_party/openssl/ssl/ |
| H A D | tls13_enc.c | 41 EVP_KDF_CTX *kctx; in tls13_hkdf_expand() local 169 EVP_KDF_CTX *kctx; in tls13_generate_secret() local [all...] |
| /device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
| H A D | mali_kbase_js.c | 77 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
in kbasep_js_trace_get_refcnt() argument 82 static int kbasep_js_trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx)
in kbasep_js_trace_get_refcnt() argument 137 bool kbasep_js_runpool_retain_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx)
in kbasep_js_runpool_retain_ctx_nolock() argument 174 jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio) jsctx_rb_none_to_pull_prio() argument 194 jsctx_rb_none_to_pull(struct kbase_context *kctx, int js) jsctx_rb_none_to_pull() argument 226 jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio, kbasep_js_ctx_job_cb callback) jsctx_queue_foreach_prio() argument 259 jsctx_queue_foreach(struct kbase_context *kctx, int js, kbasep_js_ctx_job_cb callback) jsctx_queue_foreach() argument 279 jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio) jsctx_rb_peek_prio() argument 307 jsctx_rb_peek(struct kbase_context *kctx, int js) jsctx_rb_peek() argument 334 jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_rb_pull() argument 350 jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_tree_add() argument 385 jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_rb_unpull() argument 526 kbasep_js_kctx_init(struct kbase_context *const kctx) kbasep_js_kctx_init() argument 568 kbasep_js_kctx_term(struct kbase_context *kctx) kbasep_js_kctx_term() argument 623 kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_nolock() argument 662 kbase_js_ctx_list_add_pullable_head_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_head_nolock() argument 703 kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_head() argument 732 kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_unpullable_nolock() argument 770 kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_remove_nolock() argument 808 struct kbase_context *kctx; kbase_js_ctx_list_pop_head_nolock() local 835 struct kbase_context *kctx; kbase_js_ctx_list_pop_head() local 857 kbase_js_ctx_pullable(struct kbase_context *kctx, int js, bool is_scheduled) kbase_js_ctx_pullable() argument 894 kbase_js_dep_validate(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_dep_validate() argument 1008 kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom) kbasep_js_add_job() argument 1114 kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom) kbasep_js_remove_job() argument 1133 kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) kbasep_js_remove_cancelled_job() argument 1163 kbasep_js_runpool_retain_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_retain_ctx() argument 1229 kbasep_js_run_jobs_after_ctx_and_atom_release( struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state, bool runpool_ctx_attr_change) kbasep_js_run_jobs_after_ctx_and_atom_release() argument 1284 kbasep_js_runpool_release_ctx_internal( struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) kbasep_js_runpool_release_ctx_internal() argument 1431 kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx_nolock() argument 1441 kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref) kbasep_js_runpool_requeue_or_kill_ctx() argument 1462 kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) kbasep_js_runpool_release_ctx_and_katom_retained_state() argument 1497 kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx() argument 1508 kbasep_js_runpool_release_ctx_no_schedule(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx_no_schedule() argument 1548 kbasep_js_schedule_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_schedule_ctx() argument 1681 kbase_js_use_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbase_js_use_ctx() argument 1699 kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_schedule_privileged_ctx() argument 1748 kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_release_privileged_ctx() argument 1787 struct kbase_context *kctx = kbdev->as_to_kctx[i]; kbasep_js_suspend() local 1810 struct kbase_context *kctx = kbdev->as_to_kctx[i]; kbasep_js_suspend() local 1832 struct kbase_context *kctx, *n; kbasep_js_resume() local 1899 kbase_js_dep_resolved_submit(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_dep_resolved_submit() argument 1978 kbase_js_evict_deps(struct kbase_context *kctx, struct kbase_jd_atom *katom, int js, int prio) kbase_js_evict_deps() argument 2005 kbase_js_pull(struct kbase_context *kctx, int js) kbase_js_pull() argument 2082 struct kbase_context *kctx = katom->kctx; js_return_worker() local 2185 kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_unpull() argument 2203 kbase_js_complete_atom_wq(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_complete_atom_wq() argument 2300 struct kbase_context *kctx = katom->kctx; kbase_js_complete_atom() local 2378 struct kbase_context *kctx; kbase_js_sched() local 2500 kbase_js_zap_context(struct kbase_context *kctx) kbase_js_zap_context() argument 2644 trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) trace_get_refcnt() argument 2667 kbase_js_foreach_ctx_job(struct kbase_context *kctx, kbasep_js_ctx_job_cb callback) kbase_js_foreach_ctx_job() argument [all...] |
| H A D | mali_kbase_jd.c | 56 static void __user *get_compat_pointer(struct kbase_context *kctx, const union kbase_pointer *p)
in get_compat_pointer() argument 75 struct kbase_context *kctx = katom->kctx;
in jd_run_atom() local 145 struct kbase_context *kctx;
in kbase_jd_kds_waiters_add() local 628 struct kbase_context *kctx = katom->kctx; jd_check_force_failure() local 750 struct kbase_context *kctx = katom->kctx; jd_done_nolock() local 929 jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *user_atom, struct kbase_jd_atom *katom) jd_submit_atom() argument 1216 kbase_jd_submit(struct kbase_context *kctx, void __user *user_addr, u32 nr_atoms, u32 stride, bool uk6_atom) kbase_jd_submit() argument 1385 struct kbase_context *kctx; kbase_jd_done_worker() local 1564 struct kbase_context *kctx; jd_cancel_worker() local 1628 struct kbase_context *kctx; kbase_jd_done() local 1666 struct kbase_context *kctx; kbase_jd_cancel() local 1687 kbase_jd_zap_context(struct kbase_context *kctx) kbase_jd_zap_context() argument 1752 kbase_jd_init(struct kbase_context *kctx) kbase_jd_init() argument 1815 kbase_jd_exit(struct kbase_context *kctx) kbase_jd_exit() argument [all...] |
| H A D | mali_kbase_mem_linux.c | 77 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent,
in kbase_mem_alloc() argument 251 kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const out) kbase_mem_query() argument 339 struct kbase_context *kctx; kbase_mem_evictable_reclaim_count_objects() local 374 struct kbase_context *kctx; kbase_mem_evictable_reclaim_scan_objects() local 436 kbase_mem_evictable_init(struct kbase_context *kctx) kbase_mem_evictable_init() argument 458 kbase_mem_evictable_deinit(struct kbase_context *kctx) kbase_mem_evictable_deinit() argument 469 struct kbase_context *kctx = alloc->imported.kctx; kbase_mem_evictable_mark_reclaim() local 485 struct kbase_context *kctx = alloc->imported.kctx; kbase_mem_evictable_unmark_reclaim() local 501 struct kbase_context *kctx = gpu_alloc->imported.kctx; kbase_mem_evictable_make() local 525 struct kbase_context *kctx = gpu_alloc->imported.kctx; kbase_mem_evictable_unmake() local 567 kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask) kbase_mem_flags_change() argument 692 kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags) kbase_mem_from_ump() argument 816 kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags, u32 padding) kbase_mem_from_umm() argument 922 kbase_get_cache_line_alignment(struct kbase_context *kctx) kbase_get_cache_line_alignment() argument 930 kbase_mem_from_user_buffer(struct kbase_context *kctx, unsigned long address, unsigned long size, u64 *va_pages, u64 *flags) kbase_mem_from_user_buffer() argument 1107 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages) global() argument 1306 kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, u64 *flags) global() argument 1447 kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1462 kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1476 kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1487 kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) global() argument 1816 kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr) global() argument 1901 kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr) global() argument 1958 kbase_os_mem_map_lock(struct kbase_context *kctx) global() argument 1965 kbase_os_mem_map_unlock(struct kbase_context *kctx) global() argument 1972 kbasep_reg_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **regm, size_t *nr_pages, size_t *aligned_offset) global() argument 2038 struct kbase_context *kctx = file->private_data; global() local 2173 kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size, unsigned long prot_request, struct kbase_vmap_struct *map) global() argument 2304 kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size, struct kbase_vmap_struct *map) global() argument 2315 kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map) global() argument 2365 kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages) global() argument 2384 kbasep_os_process_page_usage_drain(struct kbase_context *kctx) global() argument 2412 struct kbase_context *kctx; global() local 2422 kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma) global() argument 2447 kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle) global() argument 2543 kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle) global() argument [all...] |
| H A D | mali_kbase_mem.h | 68 struct kbase_context *kctx; member 149 struct kbase_context *kctx; member 221 struct kbase_context *kctx; /* Backlink to base context */ member 399 static inline int kbase_reg_prepare_native(struct kbase_va_region *reg, struct kbase_context *kctx) in kbase_reg_prepare_native() argument 777 kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) kbase_process_page_usage_inc() argument 792 kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) kbase_process_page_usage_dec() argument [all...] |
| H A D | mali_kbase_mmu.c | 122 struct kbase_context *kctx;
in page_fault_worker() local 362 kbase_mmu_alloc_pgd(struct kbase_context *kctx) kbase_mmu_alloc_pgd() argument 410 mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t *pgd, u64 vpfn, int level) mmu_get_next_pgd() argument 456 mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn, phys_addr_t *out_pgd) mmu_get_bottom_pgd() argument 478 mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, int level) mmu_insert_pages_recover_get_next_pgd() argument 508 mmu_insert_pages_recover_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn) mmu_insert_pages_recover_get_bottom_pgd() argument 526 mmu_insert_pages_failure_recovery(struct kbase_context *kctx, u64 vpfn, size_t nr) mmu_insert_pages_failure_recovery() argument 577 kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, phys_addr_t phys, size_t nr, unsigned long flags) kbase_mmu_insert_single_page() argument 682 kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_insert_pages_no_flush() argument 791 kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_insert_pages() argument 813 kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate_noretain() argument 856 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate() argument 932 kbase_mmu_update(struct kbase_context *kctx) kbase_mmu_update() argument 954 kbase_mmu_disable(struct kbase_context *kctx) kbase_mmu_disable() argument 989 kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr) kbase_mmu_teardown_pages() argument 1072 kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_update_pages() argument 1156 mmu_check_unused(struct kbase_context *kctx, phys_addr_t pgd) mmu_check_unused() argument 1175 mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer) mmu_teardown_level() argument 1223 kbase_mmu_init(struct kbase_context *kctx) kbase_mmu_init() argument 1240 kbase_mmu_term(struct kbase_context *kctx) kbase_mmu_term() argument 1249 kbase_mmu_free_pgd(struct kbase_context *kctx) kbase_mmu_free_pgd() argument 1271 kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char **const buffer, size_t *size_left) kbasep_mmu_dump_level() argument 1327 kbase_mmu_dump(struct kbase_context *kctx, int nr_pages) kbase_mmu_dump() argument 1403 struct kbase_context *kctx; bus_fault_worker() local 1672 kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as, const char *reason_str) kbase_mmu_report_fault_and_kill() argument 1768 struct kbase_context *kctx; kbasep_as_do_poke() local 1826 kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_as_poking_timer_retain_atom() argument 1866 kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_as_poking_timer_release_atom() argument 1924 kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_as *as) kbase_mmu_interrupt_process() argument [all...] |
| H A D | mali_kbase_softjobs.c | 47 struct kbase_context *kctx = katom->kctx;
in kbasep_add_waiting_soft_job() local 57 struct kbase_context *kctx = katom->kctx;
in kbasep_remove_waiting_soft_job() local 67 struct kbase_context *kctx in kbasep_add_waiting_with_timeout() local 86 kbasep_read_soft_event_status(struct kbase_context *kctx, u64 evt, unsigned char *status) kbasep_read_soft_event_status() argument 103 kbasep_write_soft_event_status(struct kbase_context *kctx, u64 evt, unsigned char new_status) kbasep_write_soft_event_status() argument 133 struct kbase_context *kctx = katom->kctx; kbase_dump_cpu_gpu_time() local 194 struct kbase_context *kctx = katom->kctx; kbase_soft_event_wait_callback() local 209 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_complete_job() local 221 kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt) kbasep_complete_triggered_soft_events() argument 267 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_check_atom() local 297 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_wait_timeout() local 332 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_wait_timeout_worker() local 344 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_timeout() local 361 struct kbase_context *kctx = from_timer(kctx, t, soft_job_timeout); kbasep_soft_job_timeout_worker() local 406 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_wait() local 427 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_update_locked() local 449 kbase_soft_event_update(struct kbase_context *kctx, u64 event, unsigned char new_status) kbase_soft_event_update() argument 695 kbase_mem_copy_from_extres_page(struct kbase_context *kctx, void *extres_page, struct page **pages, unsigned int nr_pages, unsigned int *target_page_nr, size_t offset, size_t *to_copy) kbase_mem_copy_from_extres_page() argument 771 kbase_mem_copy_from_extres(struct kbase_context *kctx, struct kbase_debug_copy_buffer *buf_data) kbase_mem_copy_from_extres() argument 881 struct kbase_context *kctx = katom->kctx; kbase_jit_allocate_prepare() local 958 struct kbase_context *kctx = katom->kctx; kbase_jit_allocate_process() local 1076 struct kbase_context *kctx = katom->kctx; kbase_jit_free_prepare() local 1086 struct kbase_context *kctx = katom->kctx; kbase_jit_free_process() local 1110 struct kbase_context *kctx = katom->kctx; kbasep_jit_free_finish_worker() local 1126 struct kbase_context *kctx = katom->kctx; kbase_jit_free_finish() local 1492 struct kbase_context *kctx = katom_iter->kctx; kbase_resume_suspended_soft_jobs() local [all...] |
| H A D | mali_kbase_uku.h | 280 union kbase_pointer kctx; /**< base context created by process */ member
|
| /device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/mmu/ |
| H A D | mali_kbase_mmu.c | 340 static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx, in kbase_gpu_mmu_handle_write_fault() argument 416 kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx, struct kbase_as *faulting_as) kbase_gpu_mmu_handle_permission_fault() argument 470 page_fault_try_alloc(struct kbase_context *kctx, struct kbase_va_region *region, size_t new_pages, int *pages_to_grow, bool *grow_2mb_pool, struct kbase_sub_alloc **prealloc_sas) page_fault_try_alloc() argument 647 release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) release_ctx() argument 666 struct kbase_context *kctx; kbase_mmu_page_fault_worker() local 1368 kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys, size_t nr, unsigned long flags, int const group_id, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_mmu_insert_single_page() argument 1730 kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, u64 vpfn, size_t nr) kbase_mmu_flush_invalidate_noretain() argument 1863 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_mmu_flush_invalidate() argument 1914 kbase_mmu_disable(struct kbase_context *kctx) kbase_mmu_disable() argument 2174 kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages_no_flush() argument 2275 kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages() argument 2339 kbase_mmu_init(struct kbase_device *const kbdev, struct kbase_mmu_table *const mmut, struct kbase_context *const kctx, int const group_id) kbase_mmu_init() argument 2402 kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left) kbasep_mmu_dump_level() argument 2466 kbase_mmu_dump(struct kbase_context *kctx, int nr_pages) kbase_mmu_dump() argument 2546 struct kbase_context *kctx; kbase_mmu_bus_fault_worker() local [all...] |
| /device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/jm/ |
| H A D | mali_kbase_jm_defs.h | 506 struct kbase_context *kctx; member
|
| /device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
| H A D | mali_kbase_jd.c | 63 get_compat_pointer(struct kbase_context *kctx, const u64 p) in get_compat_pointer() argument 91 struct kbase_context *kctx = katom->kctx; in jd_run_atom() local 578 struct kbase_context *kctx = katom->kctx; jd_update_jit_usage() local 719 struct kbase_context *kctx = katom->kctx; jd_done_nolock() local 892 jd_trace_atom_submit(struct kbase_context *const kctx, struct kbase_jd_atom *const katom, int *priority) jd_trace_atom_submit() argument 906 jd_submit_atom(struct kbase_context *const kctx, const struct base_jd_atom *const user_atom, const struct base_jd_fragment *const user_jc_incr, struct kbase_jd_atom *const katom) jd_submit_atom() argument 1240 kbase_jd_submit(struct kbase_context *kctx, void __user *user_addr, u32 nr_atoms, u32 stride, bool uk6_atom) kbase_jd_submit() argument 1419 struct kbase_context *kctx; kbase_jd_done_worker() local 1607 struct kbase_context *kctx; jd_cancel_worker() local 1672 struct kbase_context *kctx; kbase_jd_done() local 1708 struct kbase_context *kctx; kbase_jd_cancel() local 1730 kbase_jd_zap_context(struct kbase_context *kctx) kbase_jd_zap_context() argument 1780 kbase_jd_init(struct kbase_context *kctx) kbase_jd_init() argument 1841 kbase_jd_exit(struct kbase_context *kctx) kbase_jd_exit() argument [all...] |
| H A D | mali_kbase_softjobs.c | 51 struct kbase_context *kctx = katom->kctx; in kbasep_add_waiting_soft_job() local 61 struct kbase_context *kctx = katom->kctx; in kbasep_remove_waiting_soft_job() local 71 struct kbase_context *kctx in kbasep_add_waiting_with_timeout() local 92 kbasep_read_soft_event_status( struct kbase_context *kctx, u64 evt, unsigned char *status) kbasep_read_soft_event_status() argument 110 kbasep_write_soft_event_status( struct kbase_context *kctx, u64 evt, unsigned char new_status) kbasep_write_soft_event_status() argument 141 struct kbase_context *kctx = katom->kctx; kbase_dump_cpu_gpu_time() local 211 struct kbase_context *kctx = katom->kctx; kbase_soft_event_wait_callback() local 226 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_complete_job() local 237 kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt) kbasep_complete_triggered_soft_events() argument 284 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_check_atom() local 318 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_wait_timeout() local 358 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_wait_timeout_worker() local 370 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_timeout() local 387 struct kbase_context *kctx = container_of(timer, struct kbase_context, kbasep_soft_job_timeout_worker() local 434 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_wait() local 455 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_update_locked() local 476 kbase_soft_event_update(struct kbase_context *kctx, u64 event, unsigned char new_status) kbase_soft_event_update() argument 763 kbase_mem_copy_from_extres(struct kbase_context *kctx, struct kbase_debug_copy_buffer *buf_data) kbase_mem_copy_from_extres() argument 881 kbasep_jit_alloc_validate(struct kbase_context *kctx, struct base_jit_alloc_info *info) kbasep_jit_alloc_validate() argument 960 struct kbase_context *kctx = katom->kctx; kbase_jit_allocate_prepare() local 1053 struct kbase_context *kctx = katom->kctx; kbase_jit_add_to_pending_alloc_list() local 1072 struct kbase_context *kctx = katom->kctx; kbase_jit_allocate_process() local 1272 struct kbase_context *kctx = katom->kctx; kbase_jit_free_prepare() local 1328 struct kbase_context *kctx = katom->kctx; kbase_jit_free_process() local 1353 struct kbase_context *kctx = katom->kctx; kbasep_jit_finish_worker() local 1365 kbase_jit_retry_pending_alloc(struct kbase_context *kctx) kbase_jit_retry_pending_alloc() argument 1391 struct kbase_context *kctx = katom->kctx; kbase_jit_free_finish() local 1537 struct kbase_context *kctx = katom->kctx; kbase_process_soft_job() local 1779 struct kbase_context *kctx = katom_iter->kctx; kbase_resume_suspended_soft_jobs() local [all...] |
| /device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/jm/ |
| H A D | mali_kbase_jm_defs.h | 483 struct kbase_context *kctx; member
|
| /device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
| H A D | mali_kbase_jd.c | 65 static void __user *get_compat_pointer(struct kbase_context *kctx, const u64 p) in get_compat_pointer() argument 92 struct kbase_context *kctx = katom->kctx; in jd_run_atom() local 548 struct kbase_context *kctx = katom->kctx; jd_update_jit_usage() local 675 struct kbase_context *kctx = katom->kctx; jd_done_nolock() local 833 jd_trace_atom_submit(struct kbase_context *const kctx, struct kbase_jd_atom *const katom, int *priority) jd_trace_atom_submit() argument 846 jd_submit_atom(struct kbase_context *const kctx, const struct base_jd_atom *const user_atom, const struct base_jd_fragment *const user_jc_incr, struct kbase_jd_atom *const katom) jd_submit_atom() argument 1157 kbase_jd_submit(struct kbase_context *kctx, void __user *user_addr, u32 nr_atoms, u32 stride, bool uk6_atom) kbase_jd_submit() argument 1321 struct kbase_context *kctx; kbase_jd_done_worker() local 1499 struct kbase_context *kctx; jd_cancel_worker() local 1563 struct kbase_context *kctx; kbase_jd_done() local 1600 struct kbase_context *kctx; kbase_jd_cancel() local 1621 kbase_jd_zap_context(struct kbase_context *kctx) kbase_jd_zap_context() argument 1671 kbase_jd_init(struct kbase_context *kctx) kbase_jd_init() argument 1723 kbase_jd_exit(struct kbase_context *kctx) kbase_jd_exit() argument [all...] |
| H A D | mali_kbase_js.c | 79 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument 84 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx) in kbase_ktrace_get_ctx_refcnt() argument 150 static inline bool jsctx_rb_none_to_pull_prio(struct kbase_context *kctx, int js, int prio) in jsctx_rb_none_to_pull_prio() argument 176 jsctx_rb_none_to_pull(struct kbase_context *kctx, int js) jsctx_rb_none_to_pull() argument 208 jsctx_queue_foreach_prio(struct kbase_context *kctx, int js, int prio, kbasep_js_ctx_job_cb callback) jsctx_queue_foreach_prio() argument 257 jsctx_queue_foreach(struct kbase_context *kctx, int js, kbasep_js_ctx_job_cb callback) jsctx_queue_foreach() argument 277 jsctx_rb_peek_prio(struct kbase_context *kctx, int js, int prio) jsctx_rb_peek_prio() argument 307 jsctx_rb_peek(struct kbase_context *kctx, int js) jsctx_rb_peek() argument 334 jsctx_rb_pull(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_rb_pull() argument 352 jsctx_tree_add(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_tree_add() argument 392 jsctx_rb_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) jsctx_rb_unpull() argument 526 kbasep_js_kctx_init(struct kbase_context *const kctx) kbasep_js_kctx_init() argument 569 kbasep_js_kctx_term(struct kbase_context *kctx) kbasep_js_kctx_term() argument 627 kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_nolock() argument 667 kbase_js_ctx_list_add_pullable_head_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_head_nolock() argument 709 kbase_js_ctx_list_add_pullable_head(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_pullable_head() argument 738 kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_add_unpullable_nolock() argument 778 kbase_js_ctx_list_remove_nolock(struct kbase_device *kbdev, struct kbase_context *kctx, int js) kbase_js_ctx_list_remove_nolock() argument 816 struct kbase_context *kctx; kbase_js_ctx_list_pop_head_nolock() local 847 struct kbase_context *kctx; kbase_js_ctx_list_pop_head() local 869 kbase_js_ctx_pullable(struct kbase_context *kctx, int js, bool is_scheduled) kbase_js_ctx_pullable() argument 918 kbase_js_dep_validate(struct kbase_context *kctx, struct kbase_jd_atom *katom) kbase_js_dep_validate() argument 1046 kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority) kbase_js_set_ctx_priority() argument 1069 kbase_js_update_ctx_priority(struct kbase_context *kctx) kbase_js_update_ctx_priority() argument 1099 struct kbase_context *const kctx = start_katom->kctx; js_add_start_rp() local 1148 struct kbase_context *const kctx = end_katom->kctx; js_add_end_rp() local 1195 kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom) kbasep_js_add_job() argument 1331 kbasep_js_remove_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *atom) kbasep_js_remove_job() argument 1357 kbasep_js_remove_cancelled_job(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom) kbasep_js_remove_cancelled_job() argument 1403 kbasep_js_run_jobs_after_ctx_and_atom_release(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state, bool runpool_ctx_attr_change) kbasep_js_run_jobs_after_ctx_and_atom_release() argument 1455 kbasep_js_runpool_release_ctx_internal(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) kbasep_js_runpool_release_ctx_internal() argument 1604 kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx_nolock() argument 1614 kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, bool has_pm_ref) kbasep_js_runpool_requeue_or_kill_ctx() argument 1632 kbasep_js_runpool_release_ctx_and_katom_retained_state(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbasep_js_atom_retained_state *katom_retained_state) kbasep_js_runpool_release_ctx_and_katom_retained_state() argument 1667 kbasep_js_runpool_release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx() argument 1678 kbasep_js_runpool_release_ctx_no_schedule(struct kbase_device *kbdev, struct kbase_context *kctx) kbasep_js_runpool_release_ctx_no_schedule() argument 1718 kbasep_js_schedule_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, int js) global() argument 1853 kbase_js_use_ctx(struct kbase_device *kbdev, struct kbase_context *kctx, int js) global() argument 1874 kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) global() argument 1947 kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) global() argument 1985 struct kbase_context *kctx = kbdev->as_to_kctx[i]; global() local 2006 struct kbase_context *kctx = kbdev->as_to_kctx[i]; global() local 2029 struct kbase_context *kctx, *n; global() local 2131 kbase_js_dep_resolved_submit(struct kbase_context *kctx, struct kbase_jd_atom *katom) global() argument 2192 struct kbase_context *const kctx = katom->kctx; global() local 2234 kbase_js_evict_deps(struct kbase_context *kctx, struct kbase_jd_atom *katom, int js, int prio) global() argument 2264 kbase_js_pull(struct kbase_context *kctx, int js) global() argument 2370 struct kbase_context *const kctx = start_katom->kctx; global() local 2471 struct kbase_context *const kctx = end_katom->kctx; global() local 2547 struct kbase_context *kctx = katom->kctx; global() local 2674 kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom) global() argument 2708 js_complete_start_rp(struct kbase_context *kctx, struct kbase_jd_atom *const start_katom) global() argument 2792 js_complete_end_rp(struct kbase_context *kctx, struct kbase_jd_atom *const end_katom) global() argument 2832 kbase_js_complete_atom_wq(struct kbase_context *kctx, struct kbase_jd_atom *katom) global() argument 2960 struct kbase_context *const kctx = end_katom->kctx; global() local 3005 struct kbase_context *kctx = katom->kctx; global() local 3077 struct kbase_context *const kctx = katom->kctx; global() local 3148 struct kbase_context *kctx; global() local 3282 kbase_js_zap_context(struct kbase_context *kctx) global() argument 3428 trace_get_refcnt(struct kbase_device *kbdev, struct kbase_context *kctx) global() argument 3451 kbase_js_foreach_ctx_job(struct kbase_context *kctx, kbasep_js_ctx_job_cb callback) global() argument [all...] |
| H A D | mali_kbase_mem.h | 68 struct kbase_context *kctx; member 143 struct kbase_context *kctx; member 156 struct kbase_context *kctx; member 488 static inline struct kbase_va_region *kbase_va_region_alloc_get(struct kbase_context *kctx, in kbase_va_region_alloc_get() argument 502 static inline struct kbase_va_region *kbase_va_region_alloc_put(struct kbase_context *kctx, in kbase_va_region_alloc_put() argument 558 kbase_alloc_create(struct kbase_context *kctx, size_t nr_pages, enum kbase_memory_type type, int group_id) kbase_alloc_create() argument 615 kbase_reg_prepare_native(struct kbase_va_region *reg, struct kbase_context *kctx, int group_id) kbase_reg_prepare_native() argument 1198 kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) kbase_process_page_usage_inc() argument 1214 kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) kbase_process_page_usage_dec() argument 1574 kbase_jit_request_phys_increase_locked(struct kbase_context *kctx, size_t needed_pages) kbase_jit_request_phys_increase_locked() argument 1612 kbase_jit_request_phys_increase(struct kbase_context *kctx, size_t needed_pages) kbase_jit_request_phys_increase() argument 1647 kbase_jit_done_phys_increase(struct kbase_context *kctx, size_t needed_pages) kbase_jit_done_phys_increase() argument 1790 kbase_link_event_mem_page(struct kbase_context *kctx, struct kbase_va_region *reg) kbase_link_event_mem_page() argument 1809 kbase_unlink_event_mem_page(struct kbase_context *kctx, struct kbase_va_region *reg) kbase_unlink_event_mem_page() argument [all...] |
| H A D | mali_kbase_softjobs.c | 54 struct kbase_context *kctx = katom->kctx; in kbasep_add_waiting_soft_job() local 64 struct kbase_context *kctx = katom->kctx; in kbasep_remove_waiting_soft_job() local 74 struct kbase_context *kctx in kbasep_add_waiting_with_timeout() local 93 kbasep_read_soft_event_status(struct kbase_context *kctx, u64 evt, unsigned char *status) kbasep_read_soft_event_status() argument 110 kbasep_write_soft_event_status(struct kbase_context *kctx, u64 evt, unsigned char new_status) kbasep_write_soft_event_status() argument 140 struct kbase_context *kctx = katom->kctx; kbase_dump_cpu_gpu_time() local 201 struct kbase_context *kctx = katom->kctx; kbase_soft_event_wait_callback() local 216 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_complete_job() local 228 kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt) kbasep_complete_triggered_soft_events() argument 274 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_check_atom() local 304 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_wait_timeout() local 339 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_wait_timeout_worker() local 351 struct kbase_context *kctx = katom->kctx; kbase_fence_debug_timeout() local 368 struct kbase_context *kctx = container_of(timer, struct kbase_context, soft_job_timeout); kbasep_soft_job_timeout_worker() local 412 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_wait() local 433 struct kbase_context *kctx = katom->kctx; kbasep_soft_event_update_locked() local 455 kbase_soft_event_update(struct kbase_context *kctx, u64 event, unsigned char new_status) kbase_soft_event_update() argument 735 kbase_mem_copy_from_extres(struct kbase_context *kctx, struct kbase_debug_copy_buffer *buf_data) kbase_mem_copy_from_extres() argument 848 kbasep_jit_alloc_validate(struct kbase_context *kctx, struct base_jit_alloc_info *info) kbasep_jit_alloc_validate() argument 935 struct kbase_context *kctx = katom->kctx; kbase_jit_allocate_prepare() local 1025 struct kbase_context *kctx = katom->kctx; kbase_jit_add_to_pending_alloc_list() local 1046 struct kbase_context *kctx = katom->kctx; kbase_jit_allocate_process() local 1238 struct kbase_context *kctx = katom->kctx; kbase_jit_free_prepare() local 1295 struct kbase_context *kctx = katom->kctx; kbase_jit_free_process() local 1319 struct kbase_context *kctx = katom->kctx; kbasep_jit_finish_worker() local 1332 kbase_jit_retry_pending_alloc(struct kbase_context *kctx) kbase_jit_retry_pending_alloc() argument 1356 struct kbase_context *kctx = katom->kctx; kbase_jit_free_finish() local 1516 struct kbase_context *kctx = katom->kctx; kbase_process_soft_job() local 1746 struct kbase_context *kctx = katom_iter->kctx; kbase_resume_suspended_soft_jobs() local [all...] |
| /device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/mmu/ |
| H A D | mali_kbase_mmu.c | 206 static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx, struct kbase_as *faulting_as) in kbase_gpu_mmu_handle_write_fault() argument 280 kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx, struct kbase_as *faulting_as) kbase_gpu_mmu_handle_permission_fault() argument 330 page_fault_try_alloc(struct kbase_context *kctx, struct kbase_va_region *region, size_t new_pages, int *pages_to_grow, bool *grow_2mb_pool, struct kbase_sub_alloc **prealloc_sas) page_fault_try_alloc() argument 502 release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx) release_ctx() argument 520 struct kbase_context *kctx; kbase_mmu_page_fault_worker() local 1101 kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_insert_single_page() argument 1408 kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate_noretain() argument 1486 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate() argument 1532 kbase_mmu_disable(struct kbase_context *kctx) kbase_mmu_disable() argument 1707 kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages_no_flush() argument 1786 kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages() argument 1847 kbase_mmu_init(struct kbase_device *const kbdev, struct kbase_mmu_table *const mmut, struct kbase_context *const kctx, int const group_id) kbase_mmu_init() argument 1908 kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char **const buffer, size_t *size_left) kbasep_mmu_dump_level() argument 1970 kbase_mmu_dump(struct kbase_context *kctx, int nr_pages) kbase_mmu_dump() argument 2046 struct kbase_context *kctx; kbase_mmu_bus_fault_worker() local [all...] |