/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_linux.c | 85 u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, in kbase_mem_alloc() 104 if (0 == va_pages) { in kbase_mem_alloc() 105 dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!"); in kbase_mem_alloc() 109 if (va_pages > (U64_MAX / PAGE_SIZE)) in kbase_mem_alloc() 138 (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT))) in kbase_mem_alloc() 149 reg = kbase_alloc_free_region(kctx, 0, va_pages, zone); in kbase_mem_alloc() 168 if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) { in kbase_mem_alloc() 169 dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", in kbase_mem_alloc() 171 (unsigned long long)va_pages); in kbase_mem_alloc() 180 unsigned long va_size = va_pages << PAGE_SHIF in kbase_mem_alloc() 84 kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va) kbase_mem_alloc() argument 694 kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags) kbase_mem_from_ump() argument 812 kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags, u32 padding) kbase_mem_from_umm() argument 919 kbase_mem_from_user_buffer( struct kbase_context *kctx, unsigned long address, unsigned long size, u64 *va_pages, u64 *flags) kbase_mem_from_user_buffer() argument 1292 kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, u64 *flags) global() argument [all...] |
H A D | mali_kbase_ioctl.h | 123 * @va_pages: The number of pages of virtual address space to reserve 135 __u64 va_pages; member 264 * @va_pages: Number of VA pages to reserve for JIT 267 * specified in @va_pages may be ignored. 270 __u64 va_pages; member 376 * @va_pages: Size of the new alias 391 __u64 va_pages; member 405 * @va_pages: Size of the new alias 420 __u64 va_pages; member
|
H A D | mali_kbase_uku.h | 74 u64 va_pages; member 102 u64 va_pages; member 115 u64 va_pages; member 462 * @va_pages: Number of virtual pages required for JIT 469 u64 va_pages; member
|
H A D | mali_kbase_mem_linux.h | 36 u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, 40 void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
|
H A D | mali_kbase_core_linux.c | 338 jit_init->va_pages)) in kbase_legacy_dispatch() 357 reg = kbase_mem_alloc(kctx, mem->va_pages, in kbase_legacy_dispatch() 390 &mem_import->va_pages, in kbase_legacy_dispatch() 437 &alias->va_pages); in kbase_legacy_dispatch() 1393 reg = kbase_mem_alloc(kctx, alloc->in.va_pages, in kbase_api_mem_alloc() 1517 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages); in kbase_api_mem_jit_init() 1606 ai, &alias->out.va_pages); in kbase_api_mem_alias() 1637 &import->out.va_pages, in kbase_api_mem_import()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_linux.c | 77 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent,
in kbase_mem_alloc() argument 96 if (va_pages == 0) {
in kbase_mem_alloc() 97 dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
in kbase_mem_alloc() 101 if (va_pages > (U64_MAX / PAGE_SIZE)) {
in kbase_mem_alloc() 127 if ((*flags & BASE_MEM_PROT_GPU_EX) && (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT))) {
in kbase_mem_alloc() 140 reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
in kbase_mem_alloc() 161 if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) {
in kbase_mem_alloc() 162 dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", (unsigned long long)commit_pages,
in kbase_mem_alloc() 163 (unsigned long long)va_pages);
in kbase_mem_alloc() 172 unsigned long va_size = va_pages << PAGE_SHIF in kbase_mem_alloc() 692 kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags) kbase_mem_from_ump() argument 816 kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags, u32 padding) kbase_mem_from_umm() argument 930 kbase_mem_from_user_buffer(struct kbase_context *kctx, unsigned long address, unsigned long size, u64 *va_pages, u64 *flags) kbase_mem_from_user_buffer() argument 1306 kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, u64 *flags) global() argument [all...] |
H A D | mali_kbase_ioctl.h | 114 * @va_pages: The number of pages of virtual address space to reserve 126 __u64 va_pages; member 246 * @va_pages: Number of VA pages to reserve for JIT 249 * specified in @va_pages may be ignored. 252 __u64 va_pages; member 351 * @va_pages: Size of the new alias 366 __u64 va_pages; member 379 * @va_pages: Size of the new alias 394 __u64 va_pages; member
|
H A D | mali_kbase_uku.h | 70 u64 va_pages; member 98 u64 va_pages; member 111 u64 va_pages; member 458 * @va_pages: Number of virtual pages required for JIT 465 u64 va_pages; member
|
H A D | mali_kbase_mem_linux.h | 36 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, 40 u64 *gpu_va, u64 *va_pages, u64 *flags);
|
H A D | mali_kbase_core_linux.c | 341 if (kbase_region_tracker_init_jit(kctx, jit_init->va_pages)) {
in kbase_legacy_dispatch() 361 reg = kbase_mem_alloc(kctx, mem->va_pages, mem->commit_pages, mem->extent, &mem->flags, &mem->gpu_va);
in kbase_legacy_dispatch() 389 &mem_import->va_pages, &mem_import->flags)) {
in kbase_legacy_dispatch() 431 alias->gpu_va = kbase_mem_alias(kctx, &alias->flags, alias->stride, alias->nents, ai, &alias->va_pages);
in kbase_legacy_dispatch() 1360 reg = kbase_mem_alloc(kctx, alloc->in.va_pages, alloc->in.commit_pages, alloc->in.extent, &flags, &gpu_va);
in kbase_api_mem_alloc() 1470 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages);
in kbase_api_mem_jit_init() 1544 alias->out.gpu_va = kbase_mem_alias(kctx, &flags, alias->in.stride, alias->in.nents, ai, &alias->out.va_pages);
in kbase_api_mem_alias() 1571 &import->out.va_pages, &flags);
in kbase_api_mem_import()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_ioctl.h | 89 * @va_pages: The number of pages of virtual address space to reserve 101 __u64 va_pages; member 242 * @va_pages: Number of VA pages to reserve for JIT 245 * specified in @va_pages may be ignored. 251 __u64 va_pages; member 260 * @va_pages: Number of VA pages to reserve for JIT 267 * specified in @va_pages may be ignored. 273 __u64 va_pages; member 285 * @va_pages: Number of GPU virtual address pages to reserve for just-in-time 294 * specified in @va_pages ma 297 __u64 va_pages; global() member 416 __u64 va_pages; global() member 444 __u64 va_pages; global() member 602 __u64 va_pages; global() member [all...] |
H A D | mali_kbase_mem_linux.c | 308 u64 va_pages, u64 commit_pages, in kbase_mem_alloc() 322 "Allocating %lld va_pages, %lld commit_pages, %lld extent, 0x%llX " in kbase_mem_alloc() 324 va_pages, commit_pages, extent, *flags); in kbase_mem_alloc() 371 if (kbase_check_alloc_sizes(kctx, *flags, va_pages, commit_pages, extent)) { in kbase_mem_alloc() 378 commit_pages = va_pages; in kbase_mem_alloc() 395 reg = kbase_alloc_free_region(rbtree, PFN_DOWN(*gpu_va), va_pages, zone); in kbase_mem_alloc() 427 ((va_pages * ir_threshold) + (IR_THRESHOLD_STEPS / 0x2)) / in kbase_mem_alloc() 445 if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) { in kbase_mem_alloc() 446 dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", in kbase_mem_alloc() 448 (unsigned long long)va_pages); in kbase_mem_alloc() 307 kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, u64 extent, u64 *flags, u64 *gpu_va) kbase_mem_alloc() argument 1409 kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags, u32 padding) kbase_mem_from_umm() argument 1565 kbase_mem_from_user_buffer(struct kbase_context *kctx, unsigned long address, unsigned long size, u64 *va_pages, u64 *flags) kbase_mem_from_user_buffer() argument 1991 kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, u64 *flags) global() argument [all...] |
H A D | mali_base_kernel.h | 219 u64 va_pages; member 246 u64 va_pages; member 262 * @va_pages: The minimum number of virtual pages required. 301 u64 va_pages; member
|
H A D | mali_kbase_mem_linux.h | 42 * @va_pages: The number of pages of virtual address space to reserve 54 u64 va_pages, u64 commit_pages, 81 * @va_pages: Size of the memory region reserved from the GPU address space 89 u64 *va_pages, u64 *flags);
|
H A D | mali_kbase_mem.c | 2691 int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags, u64 va_pages, u64 commit_pages, in kbase_check_alloc_sizes() argument 2704 if (va_pages == 0) { in kbase_check_alloc_sizes() 2705 dev_warn(dev, KBASE_MSG_PRE "0 va_pages!"); in kbase_check_alloc_sizes() 2709 if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) { in kbase_check_alloc_sizes() 2710 dev_warn(dev, KBASE_MSG_PRE "va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!", in kbase_check_alloc_sizes() 2711 (unsigned long long)va_pages); in kbase_check_alloc_sizes() 2715 /* Note: commit_pages is checked against va_pages during in kbase_check_alloc_sizes() 2719 if ((flags & BASE_MEM_PROT_GPU_EX) && (va_pages > gpu_pc_pages_max)) { in kbase_check_alloc_sizes() 2720 dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_PROT_GPU_EX and va_pages==%lld larger than GPU PC range %lld", in kbase_check_alloc_sizes() 2721 (unsigned long long)va_pages, (unsigne in kbase_check_alloc_sizes() 3453 u32 va_pages = 0; trace_jit_stats() local [all...] |
H A D | mali_kbase_core_linux.c | 922 reg = kbase_mem_alloc(kctx, alloc->in.va_pages, alloc->in.commit_pages, in kbase_api_mem_alloc() 1103 /* since no phys_pages parameter, use the maximum: va_pages */ in kbase_api_mem_jit_init_10_2() 1105 kctx, jit_init->va_pages, DEFAULT_MAX_JIT_ALLOCATIONS, in kbase_api_mem_jit_init_10_2() 1106 JIT_LEGACY_TRIM_LEVEL, BASE_MEM_GROUP_DEFAULT, jit_init->va_pages); in kbase_api_mem_jit_init_10_2() 1125 /* since no phys_pages parameter, use the maximum: va_pages */ in kbase_api_mem_jit_init_11_5() 1127 kctx, jit_init->va_pages, jit_init->max_allocations, in kbase_api_mem_jit_init_11_5() 1128 jit_init->trim_level, jit_init->group_id, jit_init->va_pages); in kbase_api_mem_jit_init_11_5() 1148 kctx, jit_init->va_pages, jit_init->max_allocations, in kbase_api_mem_jit_init() 1155 return kbase_region_tracker_init_exec(kctx, exec_init->va_pages); in kbase_api_mem_exec_init() 1247 &alias->out.va_pages); in kbase_api_mem_alias() [all...] |
H A D | mali_kbase_softjobs.c | 857 if (info->va_pages < info->commit_pages) { in kbasep_jit_alloc_validate() 984 KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO(kbdev, katom, info->va_pages, info->commit_pages, info->extent, in kbase_jit_allocate_prepare() 1001 * Complete validation of va_pages, commit_pages and extent in kbase_jit_allocate_prepare() 1202 info->id, info->commit_pages, info->extent, info->va_pages); in kbase_jit_allocate_process()
|
/device/soc/rockchip/rk3588/kernel/include/uapi/gpu/arm/bifrost/ |
H A D | mali_kbase_ioctl.h | 90 * @in.va_pages: The number of pages of virtual address space to reserve 100 __u64 va_pages; member 225 * @va_pages: Number of VA pages to reserve for JIT 228 * specified in @va_pages may be ignored. 234 __u64 va_pages; member 244 * @va_pages: Number of VA pages to reserve for JIT 251 * specified in @va_pages may be ignored. 257 __u64 va_pages; member 270 * @va_pages: Number of GPU virtual address pages to reserve for just-in-time 279 * specified in @va_pages ma 282 __u64 va_pages; global() member 407 __u64 va_pages; global() member 436 __u64 va_pages; global() member 604 __u64 va_pages; global() member [all...] |
H A D | mali_base_kernel.h | 220 __u64 va_pages; member 247 __u64 va_pages; member 263 * @va_pages: The minimum number of virtual pages required. 302 __u64 va_pages; member
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem_linux.c | 295 kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, in kbase_mem_alloc() argument 310 "Allocating %lld va_pages, %lld commit_pages, %lld extension, 0x%llX flags\n", in kbase_mem_alloc() 311 va_pages, commit_pages, extension, *flags); in kbase_mem_alloc() 357 if (kbase_check_alloc_sizes(kctx, *flags, va_pages, commit_pages, in kbase_mem_alloc() 364 commit_pages = va_pages; in kbase_mem_alloc() 380 va_pages, zone); in kbase_mem_alloc() 409 reg->threshold_pages = ((va_pages * ir_threshold) + in kbase_mem_alloc() 427 if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) { in kbase_mem_alloc() 428 dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)", in kbase_mem_alloc() 430 (unsigned long long)va_pages); in kbase_mem_alloc() 1365 kbase_mem_from_umm(struct kbase_context *kctx, int fd, u64 *va_pages, u64 *flags, u32 padding) kbase_mem_from_umm() argument 1513 kbase_mem_from_user_buffer( struct kbase_context *kctx, unsigned long address, unsigned long size, u64 *va_pages, u64 *flags) kbase_mem_from_user_buffer() argument 1952 kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type, void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages, u64 *flags) global() argument [all...] |
H A D | mali_kbase_mem_linux.h | 40 * @va_pages: The number of pages of virtual address space to reserve 53 kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages, u64 commit_pages, 81 * @va_pages: Size of the memory region reserved from the GPU address space 88 void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
|
H A D | mali_kbase_mem.c | 3091 u64 va_pages, u64 commit_pages, u64 large_extension) in kbase_check_alloc_sizes() 3103 if (va_pages == 0) { in kbase_check_alloc_sizes() 3104 dev_warn(dev, KBASE_MSG_PRE "0 va_pages!"); in kbase_check_alloc_sizes() 3108 if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) { in kbase_check_alloc_sizes() 3109 dev_warn(dev, KBASE_MSG_PRE "va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!", in kbase_check_alloc_sizes() 3110 (unsigned long long)va_pages); in kbase_check_alloc_sizes() 3114 /* Note: commit_pages is checked against va_pages during in kbase_check_alloc_sizes() 3119 if ((flags & BASE_MEM_PROT_GPU_EX) && (va_pages > gpu_pc_pages_max)) { in kbase_check_alloc_sizes() 3120 dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_PROT_GPU_EX and va_pages==%lld larger than GPU PC range %lld", in kbase_check_alloc_sizes() 3121 (unsigned long long)va_pages, in kbase_check_alloc_sizes() 3090 kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags, u64 va_pages, u64 commit_pages, u64 large_extension) kbase_check_alloc_sizes() argument 3890 u32 va_pages = 0; trace_jit_stats() local [all...] |
H A D | mali_kbase_core_linux.c | 864 reg = kbase_mem_alloc(kctx, alloc->in.va_pages, alloc->in.commit_pages, in kbase_api_mem_alloc() 988 /* since no phys_pages parameter, use the maximum: va_pages */ in kbase_api_mem_jit_init_10_2() 989 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages, in kbase_api_mem_jit_init_10_2() 992 jit_init->va_pages); in kbase_api_mem_jit_init_10_2() 1010 /* since no phys_pages parameter, use the maximum: va_pages */ in kbase_api_mem_jit_init_11_5() 1011 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages, in kbase_api_mem_jit_init_11_5() 1013 jit_init->group_id, jit_init->va_pages); in kbase_api_mem_jit_init_11_5() 1031 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages, in kbase_api_mem_jit_init() 1039 return kbase_region_tracker_init_exec(kctx, exec_init->va_pages); in kbase_api_mem_exec_init() 1133 ai, &alias->out.va_pages); in kbase_api_mem_alias() [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/tl/ |
H A D | mali_kbase_tracepoints.h | 280 u32 va_pages, 286 u32 va_pages, 1583 * @va_pages: Number of virtual pages allocated in this bin 1592 va_pages, \ 1600 ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages); \ 1610 * @va_pages: Number of virtual pages allocated in this bin 1623 va_pages, \ 1636 ctx_nr, heap_id, va_pages, ph_pages, max_chunks, chunk_size, chunk_count, target_in_flight, nr_in_flight); \
|
H A D | mali_kbase_tracepoints.c | 639 "ctx_nr,bid,max_allocs,allocs,va_pages,ph_pages") \ 643 "ctx_nr,heap_id,va_pages,ph_pages,max_chunks,chunk_size,chunk_count,target_in_flight,nr_in_flight") \ 1910 u32 va_pages, in __kbase_tlstream_aux_jit_stats() 1919 + sizeof(va_pages) in __kbase_tlstream_aux_jit_stats() 1939 pos, &va_pages, sizeof(va_pages)); in __kbase_tlstream_aux_jit_stats() 1950 u32 va_pages, in __kbase_tlstream_aux_tiler_heap_stats() 1962 + sizeof(va_pages) in __kbase_tlstream_aux_tiler_heap_stats() 1983 pos, &va_pages, sizeof(va_pages)); in __kbase_tlstream_aux_tiler_heap_stats() 1904 __kbase_tlstream_aux_jit_stats( struct kbase_tlstream *stream, u32 ctx_nr, u32 bid, u32 max_allocs, u32 allocs, u32 va_pages, u32 ph_pages) __kbase_tlstream_aux_jit_stats() argument 1946 __kbase_tlstream_aux_tiler_heap_stats( struct kbase_tlstream *stream, u32 ctx_nr, u64 heap_id, u32 va_pages, u32 ph_pages, u32 max_chunks, u32 chunk_size, u32 chunk_count, u32 target_in_flight, u32 nr_in_flight) __kbase_tlstream_aux_tiler_heap_stats() argument [all...] |