/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_linux_trace.h | 225 size_t new_pages), 226 TP_ARGS(reg, fault, new_pages), 231 __field(size_t, new_pages) 238 __entry->new_pages = new_pages; 241 TP_printk("start=0x%llx fault_addr=0x%llx fault_extra_addr=0x%llx new_pages=%zu raw_fault_status=0x%x decoded_faultstatus=%s exception_type=0x%x,%s%u access_type=0x%x,%s source_id=0x%x", 243 __entry->fault_extra_addr, __entry->new_pages, 497 size_t old_pages, size_t available_pages, size_t new_pages), 498 TP_ARGS(reg, freed_pages, old_pages, available_pages, new_pages), 504 __field(size_t, new_pages) [all...] |
H A D | mali_kbase_mem_linux.h | 123 * @new_pages: Number of physical pages to back the region with 127 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 134 * @new_pages: Number of physical pages to back the region with 139 struct kbase_va_region *reg, u64 new_pages); 172 * @new_pages: The number of pages after the grow 184 struct kbase_va_region *reg, u64 new_pages, 338 * @new_pages: The number of pages after the shrink 346 u64 new_pages, u64 old_pages);
|
H A D | mali_kbase_mem_linux.c | 105 u64 new_pages, u64 old_pages); 2097 struct kbase_va_region *reg, u64 new_pages, 2102 u64 delta = new_pages - old_pages; 2120 u64 new_pages, u64 old_pages) 2124 if (new_pages == old_pages) 2129 (gpu_va_start + new_pages)<<PAGE_SHIFT, 2130 (old_pages - new_pages)<<PAGE_SHIFT, 1); 2137 * @new_pages: The number of pages after the shrink 2148 u64 const new_pages, u64 const old_pages) 2150 u64 delta = old_pages - new_pages; 2096 kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages, enum kbase_caller_mmu_sync_info mmu_sync_info) global() argument 2118 kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 2146 kbase_mem_shrink_gpu_mapping(struct kbase_context *const kctx, struct kbase_va_region *const reg, u64 const new_pages, u64 const old_pages) global() argument 2159 kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) global() argument 2291 kbase_mem_shrink(struct kbase_context *const kctx, struct kbase_va_region *const reg, u64 const new_pages) global() argument [all...] |
H A D | mali_kbase_mem.c | 2405 struct tagged_addr *new_pages = NULL; in kbase_alloc_phy_pages_helper_locked() local 2440 new_pages = tp; in kbase_alloc_phy_pages_helper_locked() 2547 return new_pages; in kbase_alloc_phy_pages_helper_locked() 3602 size_t new_pages = old_pages; in kbase_mem_jit_trim_pages_from_region() local 3696 new_pages -= to_free; in kbase_mem_jit_trim_pages_from_region() 3698 err = kbase_mem_shrink(kctx, reg, new_pages); in kbase_mem_jit_trim_pages_from_region() 3702 available_pages, new_pages); in kbase_mem_jit_trim_pages_from_region()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_linux_trace.h | 155 mali_mmu_page_fault_grow, TP_PROTO(struct kbase_va_region *reg, struct kbase_fault *fault, size_t new_pages), 156 TP_ARGS(reg, fault, new_pages), 158 __field(size_t, new_pages) __field(u32, status)), 160 __entry->fault_extra_addr = fault->extra_addr; __entry->new_pages = new_pages; 162 TP_printk("start=0x%llx fault_addr=0x%llx fault_extra_addr=0x%llx new_pages=%zu raw_fault_status=0x%x " 164 __entry->start_addr, __entry->fault_addr, __entry->fault_extra_addr, __entry->new_pages, __entry->status, 329 size_t new_pages), 330 TP_ARGS(reg, freed_pages, old_pages, available_pages, new_pages), 332 __field(size_t, available_pages) __field(size_t, new_pages)), [all...] |
H A D | mali_kbase_mem_linux.h | 126 * @new_pages: Number of physical pages to back the region with 130 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 137 * @new_pages: Number of physical pages to back the region with 142 struct kbase_va_region *const reg, u64 const new_pages); 175 * @new_pages: The number of pages after the grow 186 struct kbase_va_region *reg, u64 new_pages, 340 * @new_pages: The number of pages after the shrink 347 struct kbase_va_region *reg, u64 new_pages,
|
H A D | mali_kbase_mem_linux.c | 108 u64 const new_pages, 2134 struct kbase_va_region *reg, u64 new_pages, 2138 u64 delta = new_pages - old_pages; 2154 struct kbase_va_region *reg, u64 new_pages, 2159 if (new_pages == old_pages) { 2165 (gpu_va_start + new_pages) << PAGE_SHIFT, 2166 (old_pages - new_pages) << PAGE_SHIFT, 1); 2173 * @new_pages: The number of pages after the shrink 2184 u64 const new_pages, 2187 u64 delta = old_pages - new_pages; 2133 kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 2153 kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 2182 kbase_mem_shrink_gpu_mapping(struct kbase_context *const kctx, struct kbase_va_region *const reg, u64 const new_pages, u64 const old_pages) global() argument 2197 kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) global() argument 2320 kbase_mem_shrink(struct kbase_context *const kctx, struct kbase_va_region *const reg, u64 const new_pages) global() argument [all...] |
H A D | mali_kbase_mem.c | 2061 struct tagged_addr *new_pages = NULL; in kbase_alloc_phy_pages_helper_locked() local 2096 new_pages = tp; in kbase_alloc_phy_pages_helper_locked() 2191 return new_pages; in kbase_alloc_phy_pages_helper_locked() 3176 size_t new_pages = old_pages; in kbase_mem_jit_trim_pages_from_region() local 3266 new_pages -= to_free; in kbase_mem_jit_trim_pages_from_region() 3268 err = kbase_mem_shrink(kctx, reg, new_pages); in kbase_mem_jit_trim_pages_from_region() 3271 trace_mali_jit_trim_from_region(reg, to_free, old_pages, available_pages, new_pages); in kbase_mem_jit_trim_pages_from_region()
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/mmu/ |
H A D | mali_kbase_mmu.c | 448 * @new_pages: Number of 4 kB pages to allocate 465 * If 2 MB pages are enabled and new_pages is >= 2 MB then pages_to_grow will be 471 struct kbase_va_region *region, size_t new_pages, in page_fault_try_alloc() 491 if (new_pages >= (SZ_2M / SZ_4K)) { in page_fault_try_alloc() 503 new_pages *= 2; in page_fault_try_alloc() 505 pages_still_required = new_pages; in page_fault_try_alloc() 549 pages_to_alloc_4k = MIN(new_pages, pool_size_4k); in page_fault_try_alloc() 580 WARN_ON(!new_pages); in page_fault_try_alloc() 581 WARN_ON(pages_to_alloc_4k >= new_pages); in page_fault_try_alloc() 582 WARN_ON(pages_to_alloc_4k_per_alloc >= new_pages); in page_fault_try_alloc() 470 page_fault_try_alloc(struct kbase_context *kctx, struct kbase_va_region *region, size_t new_pages, int *pages_to_grow, bool *grow_2mb_pool, struct kbase_sub_alloc **prealloc_sas) page_fault_try_alloc() argument 662 size_t new_pages; kbase_mmu_page_fault_worker() local [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/mmu/ |
H A D | mali_kbase_mmu.c | 308 * @new_pages: Number of 4 kB pages to allocate 325 * If 2 MB pages are enabled and new_pages is >= 2 MB then pages_to_grow will be 330 static bool page_fault_try_alloc(struct kbase_context *kctx, struct kbase_va_region *region, size_t new_pages, in page_fault_try_alloc() argument 348 if (new_pages >= (SZ_2M / SZ_4K)) { in page_fault_try_alloc() 360 new_pages *= 0x2; in page_fault_try_alloc() 363 pages_still_required = new_pages; in page_fault_try_alloc() 409 pages_to_alloc_4k = MIN(new_pages, pool_size_4k); in page_fault_try_alloc() 436 WARN_ON(!new_pages); in page_fault_try_alloc() 437 WARN_ON(pages_to_alloc_4k >= new_pages); in page_fault_try_alloc() 438 WARN_ON(pages_to_alloc_4k_per_alloc >= new_pages); in page_fault_try_alloc() 516 size_t new_pages; kbase_mmu_page_fault_worker() local [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_linux.h | 50 * @new_pages: Number of physical pages to back the region with 54 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 78 * @new_pages: The number of pages after the grow 88 int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages);
|
H A D | mali_kbase_mem_linux.c | 50 * @new_pages: The number of pages after the shrink
58 static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages,
65 * @new_pages: The number of pages after the shrink
74 static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages,
1447 int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages)
argument 1450 u64 delta = new_pages - old_pages;
1462 static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages,
argument 1467 if (new_pages == old_pages) {
1472 unmap_mapping_range(kctx->filp->f_inode->i_mapping, (gpu_va_start + new_pages) << PAGE_SHIFT,
1473 (old_pages - new_pages) << PAGE_SHIF 1476 kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1487 kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) global() argument [all...] |
H A D | mali_kbase_mmu.c | 118 size_t new_pages;
in page_fault_worker() local 251 new_pages = make_multiple(fault_rel_pfn - kbase_reg_current_backed_size(region) + 1, region->extent);
in page_fault_worker() 253 if (new_pages + kbase_reg_current_backed_size(region) > region->nr_pages) {
in page_fault_worker() 254 new_pages = region->nr_pages - kbase_reg_current_backed_size(region);
in page_fault_worker() 257 if (new_pages == 0) {
in page_fault_worker() 272 if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) {
in page_fault_worker() 274 if (kbase_alloc_phy_pages_helper(region->cpu_alloc, new_pages) == 0) {
in page_fault_worker() 277 kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
in page_fault_worker() 292 pfn_offset = kbase_reg_current_backed_size(region) - new_pages;
in page_fault_worker() 302 &kbase_get_gpu_phy_pages(region)[pfn_offset], new_pages, regio in page_fault_worker() [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_linux.h | 50 * @new_pages: Number of physical pages to back the region with 54 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages); 78 * @new_pages: The number of pages after the grow 90 u64 new_pages, u64 old_pages);
|
H A D | mali_kbase_mem_linux.c | 55 * @new_pages: The number of pages after the shrink 65 u64 new_pages, u64 old_pages); 71 * @new_pages: The number of pages after the shrink 82 u64 new_pages, u64 old_pages); 1441 u64 new_pages, u64 old_pages) 1444 u64 delta = new_pages - old_pages; 1459 u64 new_pages, u64 old_pages) 1463 if (new_pages == old_pages) 1468 (gpu_va_start + new_pages)<<PAGE_SHIFT, 1469 (old_pages - new_pages)<<PAGE_SHIF 1439 kbase_mem_grow_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1457 kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1472 kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx, struct kbase_va_region *reg, u64 new_pages, u64 old_pages) global() argument 1485 kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages) global() argument [all...] |
H A D | mali_kbase_mmu.c | 116 size_t new_pages; in page_fault_worker() local 269 new_pages = make_multiple(fault_rel_pfn - in page_fault_worker() 274 if (new_pages + kbase_reg_current_backed_size(region) > in page_fault_worker() 276 new_pages = region->nr_pages - in page_fault_worker() 279 if (0 == new_pages) { in page_fault_worker() 297 if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) { in page_fault_worker() 300 region->cpu_alloc, new_pages) == 0) { in page_fault_worker() 304 new_pages); in page_fault_worker() 320 pfn_offset = kbase_reg_current_backed_size(region) - new_pages; in page_fault_worker() 332 new_pages, regio in page_fault_worker() [all...] |