/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/mmu/ |
H A D | mali_kbase_mmu.c | 131 * @vpfn: The virtual page frame number to start the flush on. 148 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, 155 * @vpfn: The virtual page frame number to start the flush on. 164 struct kbase_device *kbdev, u64 vpfn, size_t nr, bool sync, int as_nr, 195 static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, 206 * @vpfn: The virtual page frame number. 211 phys_addr_t *pgds, u64 vpfn, 322 .vpfn = start_pfn, in kbase_gpu_mmu_handle_write_faulting_as() 873 .vpfn = 0, in kbase_mmu_page_fault_worker() 908 .vpfn in kbase_mmu_page_fault_worker() 1185 mmu_get_next_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t *pgd, u64 vpfn, int level) mmu_get_next_pgd() argument 1237 mmu_get_pgd_at_level(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, int level, phys_addr_t *out_pgd) mmu_get_pgd_at_level() argument 1265 mmu_get_bottom_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, phys_addr_t *out_pgd) mmu_get_bottom_pgd() argument 1279 u64 vpfn = from_vpfn; mmu_insert_pages_failure_recovery() local 1368 kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys, size_t nr, unsigned long flags, int const group_id, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_mmu_insert_single_page() argument 1697 kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int as_nr, int const group_id, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_mmu_insert_pages() argument 1730 kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, u64 vpfn, size_t nr) kbase_mmu_flush_invalidate_noretain() argument 1777 kbase_mmu_flush_invalidate_as(struct kbase_device *kbdev, struct kbase_as *as, u64 vpfn, size_t nr, bool sync, u32 kctx_id, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_mmu_flush_invalidate_as() argument 1850 kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev, u64 vpfn, size_t nr, bool sync, int as_nr, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_mmu_flush_invalidate_no_ctx() argument 1863 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_mmu_flush_invalidate() argument 1948 kbase_mmu_update_and_free_parent_pgds(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t *pgds, u64 vpfn, int level) kbase_mmu_update_and_free_parent_pgds() argument 2001 kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, size_t nr, int as_nr) kbase_mmu_teardown_pages() argument 2174 kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages_no_flush() argument 2275 kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages() argument [all...] |
H A D | mali_kbase_mmu.h | 136 struct kbase_mmu_table *mmut, u64 vpfn, 140 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, 146 struct kbase_mmu_table *mmut, u64 vpfn, 148 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
|
H A D | mali_kbase_mmu_mode_aarch64.c | 192 static void entry_set_pte(u64 *pgd, u64 vpfn, phys_addr_t phy) in entry_set_pte() argument 196 page_table_entry_set(&pgd[vpfn], (phy & PAGE_MASK) | ENTRY_ACCESS_BIT | in entry_set_pte()
|
H A D | mali_kbase_mmu_hw.h | 79 * @vpfn: MMU Virtual Page Frame Number to start the operation on. 86 u64 vpfn; member
|
H A D | mali_kbase_mmu_hw_direct.c | 238 ret = lock_region(op_param->vpfn, op_param->nr, &lock_addr); in kbase_mmu_hw_do_operation()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mmu.c | 56 * @vpfn: The virtual page frame number to start the flush on.
71 static void kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync);
410 static int mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t *pgd, u64 vpfn, int level)
in mmu_get_next_pgd() argument 425 vpfn >>= (MMU_GET_LEVEL_T - level) * MMU_GET_LEVEL_N;
in mmu_get_next_pgd() 426 vpfn &= 0x1FF;
in mmu_get_next_pgd() 435 target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
in mmu_get_next_pgd() 444 kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
in mmu_get_next_pgd() 456 static int mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn, phys_addr_t *out_pgd)
in mmu_get_bottom_pgd() argument 465 int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l);
in mmu_get_bottom_pgd() 478 static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn,
in mmu_insert_pages_recover_get_next_pgd() argument 508 mmu_insert_pages_recover_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn) mmu_insert_pages_recover_get_bottom_pgd() argument 526 mmu_insert_pages_failure_recovery(struct kbase_context *kctx, u64 vpfn, size_t nr) mmu_insert_pages_failure_recovery() argument 577 kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, phys_addr_t phys, size_t nr, unsigned long flags) kbase_mmu_insert_single_page() argument 682 kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_insert_pages_no_flush() argument 791 kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_insert_pages() argument 813 kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate_noretain() argument 856 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate() argument 989 kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr) kbase_mmu_teardown_pages() argument 1072 kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_update_pages() argument [all...] |
H A D | mali_kbase_mem.h | 659 int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, 661 int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags); 662 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, phys_addr_t phys, size_t nr, 665 int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr); 666 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags);
|
H A D | mali_kbase_mmu_hw.h | 78 * @param[in] vpfn MMU Virtual Page Frame Number to start the 87 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, struct kbase_context *kctx, u64 vpfn,
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mmu.c | 51 * @vpfn: The virtual page frame number to start the flush on. 67 u64 vpfn, size_t nr, bool sync); 446 phys_addr_t *pgd, u64 vpfn, int level) in mmu_get_next_pgd() 461 vpfn >>= (3 - level) * 9; in mmu_get_next_pgd() 462 vpfn &= 0x1FF; in mmu_get_next_pgd() 471 target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]); in mmu_get_next_pgd() 481 kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd); in mmu_get_next_pgd() 494 u64 vpfn, phys_addr_t *out_pgd) in mmu_get_bottom_pgd() 503 int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l); in mmu_get_bottom_pgd() 516 static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u64 vpfn, in argument 445 mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t *pgd, u64 vpfn, int level) mmu_get_next_pgd() argument 493 mmu_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn, phys_addr_t *out_pgd) mmu_get_bottom_pgd() argument 545 mmu_insert_pages_recover_get_bottom_pgd(struct kbase_context *kctx, u64 vpfn) mmu_insert_pages_recover_get_bottom_pgd() argument 563 mmu_insert_pages_failure_recovery(struct kbase_context *kctx, u64 vpfn, size_t nr) mmu_insert_pages_failure_recovery() argument 613 kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, phys_addr_t phys, size_t nr, unsigned long flags) kbase_mmu_insert_single_page() argument 725 kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_insert_pages_no_flush() argument 840 kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_insert_pages() argument 864 kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate_noretain() argument 907 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate() argument 1042 kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr) kbase_mmu_teardown_pages() argument 1125 kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags) kbase_mmu_update_pages() argument [all...] |
H A D | mali_kbase_mem.h | 663 int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn, 666 int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn, 669 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, 673 int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr); 674 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags);
|
H A D | mali_kbase_mmu_hw.h | 81 * @param[in] vpfn MMU Virtual Page Frame Number to start the 91 struct kbase_context *kctx, u64 vpfn, u32 nr, u32 type,
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/mmu/ |
H A D | mali_kbase_mmu.c | 55 * @vpfn: The virtual page frame number to start the flush on. 70 static void kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync); 75 * @vpfn: The virtual page frame number to start the flush on. 82 static void kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev, u64 vpfn, size_t nr, bool sync, int as_nr); 112 static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, 953 static int mmu_get_next_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t *pgd, u64 vpfn, in mmu_get_next_pgd() argument 968 vpfn >>= (0x3 - level) * 0x9; in mmu_get_next_pgd() 969 vpfn &= 0x1FF; in mmu_get_next_pgd() 978 target_pgd = kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]); in mmu_get_next_pgd() 987 kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pg in mmu_get_next_pgd() 1002 mmu_get_pgd_at_level(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, int level, phys_addr_t *out_pgd) mmu_get_pgd_at_level() argument 1025 mmu_get_bottom_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, phys_addr_t *out_pgd) mmu_get_bottom_pgd() argument 1034 u64 vpfn = from_vpfn; mmu_insert_pages_failure_recovery() local 1101 kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_insert_single_page() argument 1379 kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int as_nr, int const group_id) kbase_mmu_insert_pages() argument 1408 kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate_noretain() argument 1440 kbase_mmu_flush_invalidate_as(struct kbase_device *kbdev, struct kbase_as *as, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate_as() argument 1478 kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev, u64 vpfn, size_t nr, bool sync, int as_nr) kbase_mmu_flush_invalidate_no_ctx() argument 1486 kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, bool sync) kbase_mmu_flush_invalidate() argument 1568 kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, size_t nr, int as_nr) kbase_mmu_teardown_pages() argument 1707 kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages_no_flush() argument 1786 kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, unsigned long flags, int const group_id) kbase_mmu_update_pages() argument [all...] |
H A D | mali_kbase_mmu.h | 108 int kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, struct tagged_addr *phys, 110 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys, size_t nr, 113 int kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, size_t nr, int as_nr); 114 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr,
|
H A D | mali_kbase_mmu_hw.h | 65 * @vpfn: MMU Virtual Page Frame Number to start the operation on. 76 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, u64 vpfn, u32 nr, u32 type,
|
H A D | mali_kbase_mmu_hw_direct.c | 173 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, u64 vpfn, u32 nr, u32 op, in kbase_mmu_hw_do_operation() argument 186 ret = lock_region(vpfn, nr, &lock_addr); in kbase_mmu_hw_do_operation()
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/backend/gpu/ |
H A D | mali_kbase_mmu_hw_direct.c | 273 int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, struct kbase_context *kctx, u64 vpfn, in kbase_mmu_hw_do_operation() argument 284 u64 lock_addr = lock_region(kbdev, vpfn, nr); in kbase_mmu_hw_do_operation()
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/backend/gpu/ |
H A D | mali_kbase_mmu_hw_direct.c | 303 struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op, in kbase_mmu_hw_do_operation() 314 u64 lock_addr = lock_region(kbdev, vpfn, nr); in kbase_mmu_hw_do_operation() 302 kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as, struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op, unsigned int handling_irq) kbase_mmu_hw_do_operation() argument
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_defs.h | 574 void (*entry_set_pte)(u64 *pgd, u64 vpfn, phys_addr_t phy);
|