/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem.c | 143 u64 start_pfn = new_reg->start_pfn; in kbase_region_tracker_insert() local 159 KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn); in kbase_region_tracker_insert() 161 if (old_reg->start_pfn > start_pfn) in kbase_region_tracker_insert() 174 struct rb_root *rbtree, u64 start_pfn, size_t nr_pages) in find_region_enclosing_range_rbtree() 178 u64 end_pfn = start_pfn + nr_pages; in find_region_enclosing_range_rbtree() 186 tmp_start_pfn = reg->start_pfn; in find_region_enclosing_range_rbtree() 187 tmp_end_pfn = reg->start_pfn in find_region_enclosing_range_rbtree() 173 find_region_enclosing_range_rbtree( struct rb_root *rbtree, u64 start_pfn, size_t nr_pages) find_region_enclosing_range_rbtree() argument 307 u64 start_pfn = reg->start_pfn; kbase_region_tracker_find_region_meeting_reqs() local 482 kbase_insert_va_region_nolock(struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 664 u64 start_pfn; kbase_add_va_region_rbtree() local 1345 kbase_alloc_free_region(struct rb_root *rbtree, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument [all...] |
H A D | mali_kbase_debug_mem_view.c | 40 u64 start_pfn; member 124 seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn + in debug_mem_show() 138 seq_printf(m, "%016llx:", i + ((map->start_pfn + in debug_mem_show() 191 mapping->start_pfn = reg->start_pfn; in debug_mem_zone_open()
|
H A D | mali_kbase_mem_linux.c | 121 if ((reg->start_pfn <= gpu_pfn) && in kbase_find_event_mem_region() 122 (gpu_pfn < (reg->start_pfn + reg->nr_pages))) { in kbase_find_event_mem_region() 262 mapping_offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT); in kbase_phy_alloc_mapping_get() 480 *gpu_va = reg->start_pfn << PAGE_SHIFT; in kbase_mem_alloc() 1003 ret = kbase_mmu_update_pages(kctx, reg->start_pfn, in kbase_mem_flags_change() 1065 reg->start_pfn); in kbase_mem_do_sync_imported() 1094 reg->start_pfn); in kbase_mem_do_sync_imported() 1117 reg, reg->start_pfn, ret); in kbase_mem_do_sync_imported() 1259 err = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn, in kbase_mem_umm_map() 1277 kctx, reg->start_pfn in kbase_mem_umm_map() [all...] |
H A D | mali_linux_trace.h | 235 __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 274 __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 328 __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 372 __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 507 __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT;
|
H A D | mali_kbase_jd.c | 343 katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */ in kbase_jd_pre_external_resources() 629 reg->start_pfn << PAGE_SHIFT, in jd_update_jit_usage() 669 if (addr_end >= (reg->start_pfn << PAGE_SHIFT)) in jd_update_jit_usage() 670 used_pages = PFN_UP(addr_end) - reg->start_pfn; in jd_update_jit_usage() 685 reg->start_pfn << PAGE_SHIFT, in jd_update_jit_usage()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem.c | 96 u64 start_pfn = new_reg->start_pfn;
in kbase_region_tracker_insert() local 112 KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
in kbase_region_tracker_insert() 114 if (old_reg->start_pfn > start_pfn) {
in kbase_region_tracker_insert() 129 u64 start_pfn, size_t nr_pages)
in kbase_region_tracker_find_region_enclosing_range_free() 135 u64 end_pfn = start_pfn + nr_pages;
in kbase_region_tracker_find_region_enclosing_range_free() 137 rbtree = kbase_gpu_va_to_rbtree(kctx, start_pfn);
in kbase_region_tracker_find_region_enclosing_range_free() 145 tmp_start_pfn = reg->start_pfn;
in kbase_region_tracker_find_region_enclosing_range_free() 128 kbase_region_tracker_find_region_enclosing_range_free(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) kbase_region_tracker_find_region_enclosing_range_free() argument 251 u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1); kbase_region_tracker_find_region_meeting_reqs() local 345 kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 454 u64 start_pfn; kbase_add_va_region() local 762 kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument [all...] |
H A D | mali_kbase_debug_mem_view.c | 34 u64 start_pfn;
member 120 seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn + data->offset) << PAGE_SHIFT);
in debug_mem_show() 135 seq_printf(m, "%016llx:", i + ((map->start_pfn + data->offset) << PAGE_SHIFT));
in debug_mem_show() 181 mapping->start_pfn = reg->start_pfn;
in debug_mem_zone_open()
|
H A D | mali_kbase_mem_linux.c | 228 *gpu_va = reg->start_pfn << PAGE_SHIFT;
in kbase_mem_alloc() 660 ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_cpu_phy_pages(reg), reg->gpu_alloc->nents,
in kbase_mem_flags_change() 1276 gpu_va = reg->start_pfn << PAGE_SHIFT;
1416 *gpu_va = reg->start_pfn << PAGE_SHIFT;
1423 *gpu_va = reg->start_pfn << PAGE_SHIFT;
1457 ret = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages, phy_pages + old_pages, delta, reg->flags);
1465 u64 gpu_va_start = reg->start_pfn;
1482 ret = kbase_mmu_teardown_pages(kctx, reg->start_pfn + new_pages, delta);
1676 rel_pgoff = vmf->pgoff - map->region->start_pfn;
1767 u64 start_off = vma->vm_pgoff - reg->start_pfn [all...] |
H A D | mali_kbase_10969_workaround.c | 76 page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
in kbasep_10969_workaround_clamp_coordinates()
|
H A D | mali_kbase_mem.h | 223 u64 start_pfn; /* The PFN in GPU space */ member 627 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem.c | 101 u64 start_pfn = new_reg->start_pfn; in kbase_region_tracker_insert() local 117 KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn); in kbase_region_tracker_insert() 119 if (old_reg->start_pfn > start_pfn) in kbase_region_tracker_insert() 133 struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) in kbase_region_tracker_find_region_enclosing_range_free() 139 u64 end_pfn = start_pfn + nr_pages; in kbase_region_tracker_find_region_enclosing_range_free() 141 rbtree = kbase_gpu_va_to_rbtree(kctx, start_pfn); in kbase_region_tracker_find_region_enclosing_range_free() 149 tmp_start_pfn = reg->start_pfn; in kbase_region_tracker_find_region_enclosing_range_free() 132 kbase_region_tracker_find_region_enclosing_range_free( struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) kbase_region_tracker_find_region_enclosing_range_free() argument 254 u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1); kbase_region_tracker_find_region_meeting_reqs() local 350 kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 465 u64 start_pfn; kbase_add_va_region() local 790 kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument [all...] |
H A D | mali_kbase_debug_mem_view.c | 36 u64 start_pfn; member 120 seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn + in debug_mem_show() 134 seq_printf(m, "%016llx:", i + ((map->start_pfn + in debug_mem_show() 180 mapping->start_pfn = reg->start_pfn; in debug_mem_zone_open()
|
H A D | mali_kbase_mem_linux.c | 236 *gpu_va = reg->start_pfn << PAGE_SHIFT; in kbase_mem_alloc() 664 ret = kbase_mmu_update_pages(kctx, reg->start_pfn, kbase_get_cpu_phy_pages(reg), reg->gpu_alloc->nents, reg->flags); in kbase_mem_flags_change() 1262 gpu_va = reg->start_pfn << PAGE_SHIFT; 1409 *gpu_va = reg->start_pfn << PAGE_SHIFT; 1415 *gpu_va = reg->start_pfn << PAGE_SHIFT; 1451 ret = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages, 1461 u64 gpu_va_start = reg->start_pfn; 1480 reg->start_pfn + new_pages, delta); 1672 rel_pgoff = vmf->pgoff - map->region->start_pfn; 1765 u64 start_off = vma->vm_pgoff - reg->start_pfn [all...] |
H A D | mali_kbase_10969_workaround.c | 75 page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn; in kbasep_10969_workaround_clamp_coordinates()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem.c | 137 u64 start_pfn = new_reg->start_pfn; in kbase_region_tracker_insert() local 153 KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn); in kbase_region_tracker_insert() 155 if (old_reg->start_pfn > start_pfn) { in kbase_region_tracker_insert() 168 static struct kbase_va_region *find_region_enclosing_range_rbtree(struct rb_root *rbtree, u64 start_pfn, in find_region_enclosing_range_rbtree() argument 173 u64 end_pfn = start_pfn + nr_pages; in find_region_enclosing_range_rbtree() 181 tmp_start_pfn = reg->start_pfn; in find_region_enclosing_range_rbtree() 182 tmp_end_pfn = reg->start_pfn in find_region_enclosing_range_rbtree() 296 u64 start_pfn = reg->start_pfn; kbase_region_tracker_find_region_meeting_reqs() local 427 kbase_insert_va_region_nolock(struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 592 u64 start_pfn; kbase_add_va_region_rbtree() local 1076 kbase_alloc_free_region(struct rb_root *rbtree, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument [all...] |
H A D | mali_kbase_debug_mem_view.c | 45 u64 start_pfn; member 131 seq_printf(m, "%016llx: Unbacked page\n\n", (map->start_pfn + data->offset) << PAGE_SHIFT); in debug_mem_show() 146 seq_printf(m, "%016llx:", i + ((map->start_pfn + data->offset) << PAGE_SHIFT)); in debug_mem_show() 192 mapping->start_pfn = reg->start_pfn; in debug_mem_zone_open()
|
H A D | mali_kbase_mem_linux.c | 126 if ((reg->start_pfn <= gpu_pfn) && in kbase_find_event_mem_region() 127 (gpu_pfn < (reg->start_pfn + reg->nr_pages))) { in kbase_find_event_mem_region() 274 mapping_offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT); in kbase_phy_alloc_mapping_get() 505 *gpu_va = reg->start_pfn << PAGE_SHIFT; in kbase_mem_alloc() 1048 ret = kbase_mmu_update_pages(kctx, reg->start_pfn, in kbase_mem_flags_change() 1115 reg->start_pfn); in kbase_mem_do_sync_imported() 1144 reg->start_pfn); in kbase_mem_do_sync_imported() 1167 reg->start_pfn, ret); in kbase_mem_do_sync_imported() 1309 kctx->kbdev, &kctx->mmu, reg->start_pfn, kbase_get_gpu_phy_pages(reg), in kbase_mem_umm_map() 1326 kctx, reg->start_pfn in kbase_mem_umm_map() [all...] |
H A D | mali_linux_trace.h | 159 TP_fast_assign(__entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; __entry->fault_addr = fault->addr; 180 TP_fast_assign(__entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 223 TP_fast_assign(__entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; __entry->read_val = read_val; 253 TP_fast_assign(__entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 333 TP_fast_assign(__entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT;
|
H A D | mali_kbase_jd.c | 322 reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */ in kbase_jd_pre_external_resources() 592 katom->jit_ids[idx], reg->start_pfn << PAGE_SHIFT, reg->heap_info_gpu_addr); in jd_update_jit_usage() 630 if (addr_end >= (reg->start_pfn << PAGE_SHIFT)) { in jd_update_jit_usage() 631 used_pages = PFN_UP(addr_end) - reg->start_pfn; in jd_update_jit_usage() 645 __func__, idx, katom->jit_ids[idx], reg->start_pfn << PAGE_SHIFT, used_pages, reg->nr_pages, in jd_update_jit_usage()
|
/device/soc/rockchip/common/sdk_linux/include/linux/ |
H A D | iova.h | 74 unsigned long start_pfn; /* Lower limit for this domain */ member 151 void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); 215 static inline void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn) in init_iova_domain() argument
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/csf/ |
H A D | mali_kbase_csf_trace_buffer.c | 180 (kbdev->csf.firmware_trace_buffers.mcu_rw.va_reg->start_pfn << PAGE_SHIFT) + in kbase_csf_firmware_trace_buffers_init() 186 (kbdev->csf.firmware_trace_buffers.mcu_write.va_reg->start_pfn << PAGE_SHIFT) + in kbase_csf_firmware_trace_buffers_init() 192 (trace_buffer->data_mapping.va_reg->start_pfn << PAGE_SHIFT); in kbase_csf_firmware_trace_buffers_init() 324 (kbdev->csf.firmware_trace_buffers.mcu_rw.va_reg->start_pfn << PAGE_SHIFT) + in kbase_csf_firmware_reload_trace_buffers_data() 330 (kbdev->csf.firmware_trace_buffers.mcu_write.va_reg->start_pfn << PAGE_SHIFT) + in kbase_csf_firmware_reload_trace_buffers_data() 336 (trace_buffer->data_mapping.va_reg->start_pfn << PAGE_SHIFT); in kbase_csf_firmware_reload_trace_buffers_data()
|
H A D | mali_kbase_csf.c | 121 reg->start_pfn, num_pages, MCU_AS_NR); in gpu_munmap_user_io_pages() 186 ret = kbase_mmu_insert_pages(kbdev, &kbdev->csf.mcu_mmu, reg->start_pfn, in gpu_mmap_user_io_pages() 195 reg->start_pfn + 1, &phys[1], 1, mem_flags, in gpu_mmap_user_io_pages() 205 reg->start_pfn, 1, MCU_AS_NR); in gpu_mmap_user_io_pages() 504 ((queue_addr >> PAGE_SHIFT) - region->start_pfn))) { in csf_queue_register_internal() 525 ((reg_ex->ex_buffer_base >> PAGE_SHIFT) - region_ex->start_pfn))) { in csf_queue_register_internal() 1131 reg->start_pfn, &s_buf->phy[0], nr_pages, in create_normal_suspend_buffer() 1216 err = kbase_mmu_insert_pages(kbdev, &kbdev->csf.mcu_mmu, reg->start_pfn, in create_protected_suspend_buffer() 1441 s_buf->reg->start_pfn, nr_pages, MCU_AS_NR)); in term_normal_suspend_buffer() 1474 s_buf->reg->start_pfn, nr_page in term_protected_suspend_buffer() [all...] |
/device/soc/rockchip/common/sdk_linux/kernel/power/ |
H A D | snapshot.c | 373 unsigned long start_pfn; /* Zone start page frame */ member 525 zone->start_pfn = start; in create_zone_bm_rtree() 725 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) { in memory_bm_find_bit() 734 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { in memory_bm_find_bit() 756 if (zone == bm->cur.zone && ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) { in memory_bm_find_bit() 761 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; in memory_bm_find_bit() 776 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; in memory_bm_find_bit() 780 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; in memory_bm_find_bit() 898 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn() 902 pfn = bm->cur.zone->start_pfn in memory_bm_next_pfn() 917 unsigned long start_pfn; global() member 954 register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) register_nosave_region() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/mmu/ |
H A D | mali_kbase_mmu.c | 252 ((unsigned long long)reg->start_pfn) << PAGE_SHIFT); in reg_grow_calc_extra_pages() 271 * has been allocated in such a way that (start_pfn + in reg_grow_calc_extra_pages() 306 u64 start_pfn, size_t nr, in kbase_gpu_mmu_handle_write_faulting_as() 322 .vpfn = start_pfn, in kbase_gpu_mmu_handle_write_faulting_as() 404 pfn_offset = fault_pfn - region->start_pfn; in kbase_gpu_mmu_handle_write_fault() 845 * validating the fault_address to be within a size_t from the start_pfn in kbase_mmu_page_fault_worker() 847 fault_rel_pfn = fault_pfn - region->start_pfn; in kbase_mmu_page_fault_worker() 856 fault->addr, region->start_pfn, in kbase_mmu_page_fault_worker() 857 region->start_pfn + in kbase_mmu_page_fault_worker() 957 region->start_pfn in kbase_mmu_page_fault_worker() 304 kbase_gpu_mmu_handle_write_faulting_as(struct kbase_device *kbdev, struct kbase_as *faulting_as, u64 start_pfn, size_t nr, u32 kctx_id) kbase_gpu_mmu_handle_write_faulting_as() argument [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/mmu/ |
H A D | mali_kbase_mmu.c | 141 ((unsigned long long)reg->start_pfn) << PAGE_SHIFT); in reg_grow_calc_extra_pages() 160 * has been allocated in such a way that (start_pfn + in reg_grow_calc_extra_pages() 194 u64 start_pfn, size_t nr, u32 op) in kbase_gpu_mmu_handle_write_faulting_as() 199 kbase_mmu_hw_do_operation(kbdev, faulting_as, start_pfn, nr, op, 1); in kbase_gpu_mmu_handle_write_faulting_as() 267 pfn_offset = fault_pfn - region->start_pfn; in kbase_gpu_mmu_handle_write_fault() 678 * validating the fault_address to be within a size_t from the start_pfn in kbase_mmu_page_fault_worker() 680 fault_rel_pfn = fault_pfn - region->start_pfn; in kbase_mmu_page_fault_worker() 685 fault->addr, region->start_pfn, region->start_pfn + current_backed_size); in kbase_mmu_page_fault_worker() 759 err = kbase_mmu_insert_pages_no_flush(kbdev, &kctx->mmu, region->start_pfn in kbase_mmu_page_fault_worker() 193 kbase_gpu_mmu_handle_write_faulting_as(struct kbase_device *kbdev, struct kbase_as *faulting_as, u64 start_pfn, size_t nr, u32 op) kbase_gpu_mmu_handle_write_faulting_as() argument [all...] |