Home
last modified time | relevance | path

Searched refs:pgd (Results 1 - 25 of 25) sorted by relevance

/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/mmu/
H A Dmali_kbase_mmu_mode_aarch64.c158 static unsigned int get_num_valid_entries(u64 *pgd) in get_num_valid_entries() argument
163 (unsigned int)((pgd[2] & VALID_ENTRY_MASK) >> in get_num_valid_entries()
166 (unsigned int)((pgd[1] & VALID_ENTRY_MASK) >> in get_num_valid_entries()
169 (unsigned int)((pgd[0] & VALID_ENTRY_MASK) >> in get_num_valid_entries()
175 static void set_num_valid_entries(u64 *pgd, unsigned int num_of_valid_entries) in set_num_valid_entries() argument
179 pgd[0] &= ~VALID_ENTRY_MASK; in set_num_valid_entries()
180 pgd[0] |= ((u64)(num_of_valid_entries & 0xF) in set_num_valid_entries()
183 pgd[1] &= ~VALID_ENTRY_MASK; in set_num_valid_entries()
184 pgd[1] |= ((u64)((num_of_valid_entries >> 4) & 0xF) in set_num_valid_entries()
187 pgd[ in set_num_valid_entries()
192 entry_set_pte(u64 *pgd, u64 vpfn, phys_addr_t phy) entry_set_pte() argument
[all...]
H A Dmali_kbase_mmu.c218 * @pgd: Physical address of page directory to be freed.
222 struct kbase_mmu_table *mmut, phys_addr_t pgd,
1187 phys_addr_t *pgd, u64 vpfn, int level) in mmu_get_next_pgd()
1193 KBASE_DEBUG_ASSERT(*pgd); in mmu_get_next_pgd()
1204 p = pfn_to_page(PFN_DOWN(*pgd)); in mmu_get_next_pgd()
1229 *pgd = target_pgd; in mmu_get_next_pgd()
1243 phys_addr_t pgd; in mmu_get_pgd_at_level() local
1247 pgd = mmut->pgd; in mmu_get_pgd_at_level()
1250 int err = mmu_get_next_pgd(kbdev, mmut, &pgd, vpf in mmu_get_pgd_at_level()
1185 mmu_get_next_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t *pgd, u64 vpfn, int level) mmu_get_next_pgd() argument
1278 phys_addr_t pgd; mmu_insert_pages_failure_recovery() local
1373 phys_addr_t pgd; kbase_mmu_insert_single_page() local
1504 kbase_mmu_free_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t pgd, bool dirty) kbase_mmu_free_pgd() argument
1548 phys_addr_t pgd; kbase_mmu_insert_pages_no_flush() local
2004 phys_addr_t pgd; kbase_mmu_teardown_pages() local
2178 phys_addr_t pgd; kbase_mmu_update_pages_no_flush() local
2292 mmu_teardown_level(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t pgd, int level, u64 *pgd_page_buffer) mmu_teardown_level() argument
2402 kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left) kbasep_mmu_dump_level() argument
[all...]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/mmu/
H A Dmali_kbase_mmu.c953 static int mmu_get_next_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t *pgd, u64 vpfn, in mmu_get_next_pgd() argument
960 KBASE_DEBUG_ASSERT(*pgd); in mmu_get_next_pgd()
971 p = pfn_to_page(PFN_DOWN(*pgd)); in mmu_get_next_pgd()
994 *pgd = target_pgd; in mmu_get_next_pgd()
1005 phys_addr_t pgd; in mmu_get_pgd_at_level() local
1009 pgd = mmut->pgd; in mmu_get_pgd_at_level()
1012 int err = mmu_get_next_pgd(kbdev, mmut, &pgd, vpfn, l); in mmu_get_pgd_at_level()
1020 *out_pgd = pgd; in mmu_get_pgd_at_level()
1033 phys_addr_t pgd; in mmu_insert_pages_failure_recovery() local
1104 phys_addr_t pgd; kbase_mmu_insert_single_page() local
1251 phys_addr_t pgd; kbase_mmu_insert_pages_no_flush() local
1570 phys_addr_t pgd; kbase_mmu_teardown_pages() local
1710 phys_addr_t pgd; kbase_mmu_update_pages_no_flush() local
1796 mmu_teardown_level(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t pgd, int level, u64 *pgd_page_buffer) mmu_teardown_level() argument
1908 kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char **const buffer, size_t *size_left) kbasep_mmu_dump_level() argument
[all...]
H A Dmali_kbase_mmu_mode_lpae.c80 setup->transtab = ((u64)mmut->pgd & ((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) | in mmu_get_as_setup()
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/
H A Dmali_kbase_mmu.c410 static int mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t *pgd, u64 vpfn, int level) in mmu_get_next_pgd() argument
416 KBASE_DEBUG_ASSERT(*pgd); in mmu_get_next_pgd()
428 p = pfn_to_page(PFN_DOWN(*pgd)); in mmu_get_next_pgd()
451 *pgd = target_pgd; in mmu_get_next_pgd()
458 phys_addr_t pgd; in mmu_get_bottom_pgd() local
463 pgd = kctx->pgd; in mmu_get_bottom_pgd()
465 int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l); in mmu_get_bottom_pgd()
473 *out_pgd = pgd; in mmu_get_bottom_pgd()
478 static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u6 argument
510 phys_addr_t pgd; mmu_insert_pages_recover_get_bottom_pgd() local
528 phys_addr_t pgd; mmu_insert_pages_failure_recovery() local
579 phys_addr_t pgd; kbase_mmu_insert_single_page() local
685 phys_addr_t pgd; kbase_mmu_insert_pages_no_flush() local
991 phys_addr_t pgd; kbase_mmu_teardown_pages() local
1074 phys_addr_t pgd; kbase_mmu_update_pages() local
1156 mmu_check_unused(struct kbase_context *kctx, phys_addr_t pgd) mmu_check_unused() argument
1175 mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer) mmu_teardown_level() argument
1271 kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char **const buffer, size_t *size_left) kbasep_mmu_dump_level() argument
[all...]
H A Dmali_kbase_context.c122 kctx->pgd = kbase_mmu_alloc_pgd(kctx); in kbase_create_context()
124 } while (!kctx->pgd); in kbase_create_context()
H A Dmali_kbase_mmu_mode_aarch64.c78 setup->transtab = (u64)kctx->pgd & AS_TRANSTAB_BASE_MASK; in mmu_get_as_setup()
H A Dmali_kbase_mmu_mode_lpae.c78 setup->transtab = ((u64)kctx->pgd & ((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) | in mmu_get_as_setup()
H A Dmali_kbase_defs.h1323 phys_addr_t pgd; member
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/
H A Dmali_kbase_mmu.c446 phys_addr_t *pgd, u64 vpfn, int level) in mmu_get_next_pgd()
452 KBASE_DEBUG_ASSERT(*pgd); in mmu_get_next_pgd()
464 p = pfn_to_page(PFN_DOWN(*pgd)); in mmu_get_next_pgd()
488 *pgd = target_pgd; in mmu_get_next_pgd()
496 phys_addr_t pgd; in mmu_get_bottom_pgd() local
501 pgd = kctx->pgd; in mmu_get_bottom_pgd()
503 int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l); in mmu_get_bottom_pgd()
511 *out_pgd = pgd; in mmu_get_bottom_pgd()
516 static phys_addr_t mmu_insert_pages_recover_get_next_pgd(struct kbase_context *kctx, phys_addr_t pgd, u6 argument
445 mmu_get_next_pgd(struct kbase_context *kctx, phys_addr_t *pgd, u64 vpfn, int level) mmu_get_next_pgd() argument
547 phys_addr_t pgd; mmu_insert_pages_recover_get_bottom_pgd() local
566 phys_addr_t pgd; mmu_insert_pages_failure_recovery() local
617 phys_addr_t pgd; kbase_mmu_insert_single_page() local
729 phys_addr_t pgd; kbase_mmu_insert_pages_no_flush() local
1044 phys_addr_t pgd; kbase_mmu_teardown_pages() local
1127 phys_addr_t pgd; kbase_mmu_update_pages() local
1208 mmu_check_unused(struct kbase_context *kctx, phys_addr_t pgd) mmu_check_unused() argument
1226 mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd, int level, int zap, u64 *pgd_page_buffer) mmu_teardown_level() argument
1324 kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left) kbasep_mmu_dump_level() argument
[all...]
H A Dmali_kbase_context.c121 kctx->pgd = kbase_mmu_alloc_pgd(kctx); in kbase_create_context()
123 } while (!kctx->pgd); in kbase_create_context()
H A Dmali_kbase_mmu_mode_aarch64.c89 setup->transtab = (u64)kctx->pgd & AS_TRANSTAB_BASE_MASK; in mmu_get_as_setup()
H A Dmali_kbase_mmu_mode_lpae.c89 setup->transtab = ((u64)kctx->pgd & in mmu_get_as_setup()
H A Dmali_kbase_defs.h1341 phys_addr_t pgd; member
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/
H A Dmali_kbase_defs.h252 * @pgd: Physical address of the page allocated for the top
266 phys_addr_t pgd; member
558 * @get_num_valid_entries: returns the number of valid entries for a specific pgd.
559 * @set_num_valid_entries: sets the number of valid entries for a specific pgd
574 void (*entry_set_pte)(u64 *pgd, u64 vpfn, phys_addr_t phy);
576 unsigned int (*get_num_valid_entries)(u64 *pgd);
577 void (*set_num_valid_entries)(u64 *pgd,
/device/soc/hisilicon/hi3516dv300/sdk_linux/drv/osal/linux/mmz/
H A Dmedia_mem.c651 pgd_t *pgd = NULL; local
673 pgd = pgd_offset(current->mm, virt);
674 if (pgd_none(*pgd)) {
675 osal_trace("osal_trace: not mapped in pgd!\n");
680 p4d = p4d_offset(pgd, virt);
683 pud = pud_offset(pgd, virt);
/device/soc/rockchip/rk3588/kernel/drivers/video/rockchip/rga3/
H A Drga_mm.c73 pgd_t *pgd; in rga_get_user_pages_from_vma() local
89 pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT); in rga_get_user_pages_from_vma()
90 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { in rga_get_user_pages_from_vma()
91 pr_err("failed to get pgd\n"); in rga_get_user_pages_from_vma()
98 * it will do nothing and return pgd. in rga_get_user_pages_from_vma()
100 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga_get_user_pages_from_vma()
109 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga_get_user_pages_from_vma()
H A Drga_dma_buf.c232 pgd_t *pgd; in rga_MapUserMemory() local
296 pgd = pgd_offset(mm, (Memory + i) << PAGE_SHIFT); in rga_MapUserMemory()
297 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { in rga_MapUserMemory()
298 pr_err("failed to get pgd, result = %d, pageCount = %d\n", in rga_MapUserMemory()
306 * it will do nothing and return pgd. in rga_MapUserMemory()
308 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga_MapUserMemory()
318 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga_MapUserMemory()
H A Drga2_mmu_info.c226 pgd_t *pgd; in rga2_MapUserMemory() local
290 pgd = pgd_offset(mm, (Memory + i) << PAGE_SHIFT); in rga2_MapUserMemory()
291 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { in rga2_MapUserMemory()
292 pr_err("failed to get pgd, result = %d, pageCount = %d\n", in rga2_MapUserMemory()
300 * it will do nothing and return pgd. in rga2_MapUserMemory()
302 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga2_MapUserMemory()
312 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga2_MapUserMemory()
/device/soc/rockchip/common/vendor/drivers/video/rockchip/rga2/
H A Drga2_mmu_info.c837 pgd_t *pgd; in rga2_MapUserMemory() local
892 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); in rga2_MapUserMemory()
893 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { in rga2_MapUserMemory()
894 pr_err("RGA2 failed to get pgd, result = %d, pageCount = %d\n", result, pageCount); in rga2_MapUserMemory()
899 /* In the four-level page table, it will do nothing and return pgd. */ in rga2_MapUserMemory()
900 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga2_MapUserMemory()
909 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); in rga2_MapUserMemory()
/device/soc/rockchip/common/sdk_linux/include/linux/
H A Dmm.h2052 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __p4d_alloc() argument
2057 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2168 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in p4d_alloc() argument
2170 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? NULL : p4d_offset(pgd, address); in p4d_alloc()
3048 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/mmu/backend/
H A Dmali_kbase_mmu_jm.c53 setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK; in kbase_mmu_get_as_setup()
H A Dmali_kbase_mmu_csf.c55 setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK; in kbase_mmu_get_as_setup()
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/mmu/backend/
H A Dmali_kbase_mmu_jm.c53 setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK; in kbase_mmu_get_as_setup()
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/
H A Dmali_kbase_defs.h248 * @pgd: Physical address of the page allocated for the top
262 phys_addr_t pgd; member

Completed in 37 milliseconds