Home
last modified time | relevance | path

Searched refs:pages (Results 1 - 25 of 130) sorted by relevance

123456

/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/
H A Dmali_kbase_mem_pool.c85 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked()
353 pool_dbg(pool, "reclaim freed %ld pages\n", freed); in kbase_mem_pool_reclaim_scan_objects()
423 /* Zero pages first without holding the next_pool lock */ in kbase_mem_pool_term()
431 /* Free remaining pages to kernel */ in kbase_mem_pool_term()
445 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill); in kbase_mem_pool_term()
530 struct tagged_addr *pages, bool partial_allowed) in kbase_mem_pool_alloc_pages()
546 /* Get pages from this pool */ in kbase_mem_pool_alloc_pages()
553 pages[i++] = as_tagged_tag(page_to_phys(p), in kbase_mem_pool_alloc_pages()
556 pages[i++] = as_tagged_tag(page_to_phys(p) + in kbase_mem_pool_alloc_pages()
560 pages[ in kbase_mem_pool_alloc_pages()
529 kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages, bool partial_allowed) kbase_mem_pool_alloc_pages() argument
613 kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages) kbase_mem_pool_alloc_pages_locked() argument
656 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
696 kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array_locked() argument
738 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
789 kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages_locked() argument
[all...]
H A Dmali_kbase_trace_gpu_mem.h47 struct kbase_context *kctx, size_t pages) in kbase_trace_gpu_mem_usage_dec()
52 kctx->kprcs->total_gpu_pages -= pages; in kbase_trace_gpu_mem_usage_dec()
54 kbdev->total_gpu_pages -= pages; in kbase_trace_gpu_mem_usage_dec()
62 struct kbase_context *kctx, size_t pages) in kbase_trace_gpu_mem_usage_inc()
67 kctx->kprcs->total_gpu_pages += pages; in kbase_trace_gpu_mem_usage_inc()
69 kbdev->total_gpu_pages += pages; in kbase_trace_gpu_mem_usage_inc()
46 kbase_trace_gpu_mem_usage_dec(struct kbase_device *kbdev, struct kbase_context *kctx, size_t pages) kbase_trace_gpu_mem_usage_dec() argument
61 kbase_trace_gpu_mem_usage_inc(struct kbase_device *kbdev, struct kbase_context *kctx, size_t pages) kbase_trace_gpu_mem_usage_inc() argument
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/
H A Dmali_kbase_mem_pool.c81 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked()
345 pool_dbg(pool, "reclaim freed %ld pages\n", freed); in kbase_mem_pool_reclaim_scan_objects()
427 /* Zero pages first without holding the next_pool lock */ in kbase_mem_pool_term()
435 /* Free remaining pages to kernel */ in kbase_mem_pool_term()
448 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill); in kbase_mem_pool_term()
533 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages, in kbase_mem_pool_alloc_pages() argument
551 /* Get pages from this pool */ in kbase_mem_pool_alloc_pages()
558 pages[i++] = as_tagged_tag(page_to_phys(p), HUGE_HEAD | HUGE_PAGE); in kbase_mem_pool_alloc_pages()
560 pages[i++] = as_tagged_tag(page_to_phys(p) + PAGE_SIZE * j, HUGE_PAGE); in kbase_mem_pool_alloc_pages()
563 pages[ in kbase_mem_pool_alloc_pages()
613 kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages) kbase_mem_pool_alloc_pages_locked() argument
652 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
692 kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array_locked() argument
734 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
784 kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages_locked() argument
[all...]
H A Dmali_kbase_trace_gpu_mem.h47 static inline void kbase_trace_gpu_mem_usage_dec(struct kbase_device *kbdev, struct kbase_context *kctx, size_t pages) in kbase_trace_gpu_mem_usage_dec() argument
52 kctx->kprcs->total_gpu_pages -= pages; in kbase_trace_gpu_mem_usage_dec()
55 kbdev->total_gpu_pages -= pages; in kbase_trace_gpu_mem_usage_dec()
62 static inline void kbase_trace_gpu_mem_usage_inc(struct kbase_device *kbdev, struct kbase_context *kctx, size_t pages) in kbase_trace_gpu_mem_usage_inc() argument
67 kctx->kprcs->total_gpu_pages += pages; in kbase_trace_gpu_mem_usage_inc()
70 kbdev->total_gpu_pages += pages; in kbase_trace_gpu_mem_usage_inc()
H A Dmali_kbase_mem.h43 static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages);
45 /* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
46 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
48 /* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
49 The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages
52 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
86 u64 offset; /* in pages */
87 u64 length; /* in pages */
91 * @brief Physical pages trackin
131 struct tagged_addr *pages; global() member
166 struct page **pages; global() member
1198 kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) kbase_process_page_usage_inc() argument
1214 kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) kbase_process_page_usage_dec() argument
[all...]
/device/soc/rockchip/common/sdk_linux/drivers/gpu/drm/rockchip/
H A Drockchip_drm_gem.c121 struct page **pages, **dst_pages; in rockchip_gem_get_pages() local
138 pages = drm_gem_get_pages(&rk_obj->base); in rockchip_gem_get_pages()
139 if (IS_ERR(pages)) { in rockchip_gem_get_pages()
140 return PTR_ERR(pages); in rockchip_gem_get_pages()
143 rk_obj->pages = pages; in rockchip_gem_get_pages()
162 if (page_to_pfn(pages[j]) != page_to_pfn(pages[j - 1]) + 1) { in rockchip_gem_get_pages()
170 dst_pages[end + i] = pages[cur_page + i]; in rockchip_gem_get_pages()
182 info->page = pages[cur_pag in rockchip_gem_get_pages()
[all...]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/mali400/mali/linux/
H A Dmali_memory_defer_bind.c65 /* allocate pages from OS memory */
74 /* add to free pages list */ in mali_mem_defer_alloc_mem()
77 ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages)); in mali_mem_defer_alloc_mem()
78 list_splice(&os_mem.pages, &dblock->free_pages); in mali_mem_defer_alloc_mem()
103 /* allocate more pages from OS */ in mali_mem_prepare_mem_for_job()
144 INIT_LIST_HEAD(&mem_bkend->os_mem.pages); in mali_mem_defer_bind_allocation_prepare()
162 struct list_head *pages) in mali_mem_defer_bind_allocation()
171 list_splice(pages, &mem_bkend->os_mem.pages); in mali_mem_defer_bind_allocation()
185 static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, in mali_mem_defer_get_free_page_list() argument
161 mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node, struct list_head *pages) mali_mem_defer_bind_allocation() argument
[all...]
H A Dmali_memory_cow.c37 * allocate pages for COW backend and flush cache
47 /* allocate pages from os mem */ in mali_mem_cow_alloc_page()
55 node = MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list); in mali_mem_cow_alloc_page()
72 return &target_bk->os_mem.pages; in _mali_memory_cow_get_node_list()
76 return &target_bk->cow_mem.pages; in _mali_memory_cow_get_node_list()
84 return &target_bk->swap_mem.pages; in _mali_memory_cow_get_node_list()
94 * This function allocate new pages for COW backend from os mem for a modified range
112 struct list_head *pages = NULL; in mali_memory_cow_os_memory() local
114 pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size); in mali_memory_cow_os_memory()
115 if (pages in mali_memory_cow_os_memory()
184 struct list_head *pages = NULL; mali_memory_cow_swap_memory() local
[all...]
H A Dmali_memory_os_alloc.c75 LIST_HEAD(pages); in mali_mem_os_free()
85 list_move(&m_page->list, &pages); in mali_mem_os_free()
97 list_cut_position(&pages, os_pages, os_pages->prev); in mali_mem_os_free()
102 /* Put pages on pool. */ in mali_mem_os_free()
104 list_splice(&pages, &mali_mem_os_allocator.pool_pages); in mali_mem_os_free()
145 list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) in mali_mem_os_resize_pages()
148 list_move_tail(&m_page->list, &mem_to->pages); in mali_mem_os_resize_pages()
178 INIT_LIST_HEAD(&os_mem->pages); in mali_mem_os_alloc_pages()
181 /* Grab pages from pool. */ in mali_mem_os_alloc_pages()
195 /* Process pages fro in mali_mem_os_alloc_pages()
654 struct list_head *le, pages; mali_mem_os_shrink() local
[all...]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/mali400/mali/linux/
H A Dmali_memory_defer_bind.c64 /*allocate pages from OS memory*/
73 /* add to free pages list */ in mali_mem_defer_alloc_mem()
75 MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages)); in mali_mem_defer_alloc_mem()
76 list_splice(&os_mem.pages, &dblock->free_pages); in mali_mem_defer_alloc_mem()
98 /* allocate more pages from OS */ in mali_mem_prepare_mem_for_job()
137 INIT_LIST_HEAD(&mem_bkend->os_mem.pages); in mali_mem_defer_bind_allocation_prepare()
157 struct list_head *pages) in mali_mem_defer_bind_allocation()
164 list_splice(pages, &mem_bkend->os_mem.pages); in mali_mem_defer_bind_allocation()
179 static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_bloc argument
156 mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node, struct list_head *pages) mali_mem_defer_bind_allocation() argument
[all...]
H A Dmali_memory_cow.c33 * allocate pages for COW backend and flush cache
43 /* allocate pages from os mem */ in mali_mem_cow_alloc_page()
52 node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list); in mali_mem_cow_alloc_page()
72 return &target_bk->os_mem.pages; in _mali_memory_cow_get_node_list()
76 return &target_bk->cow_mem.pages; in _mali_memory_cow_get_node_list()
84 return &target_bk->swap_mem.pages; in _mali_memory_cow_get_node_list()
94 * This function allocate new pages for COW backend from os mem for a modified range
115 struct list_head *pages = NULL; in mali_memory_cow_os_memory() local
117 pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size); in mali_memory_cow_os_memory()
119 if (NULL == pages) { in mali_memory_cow_os_memory()
194 struct list_head *pages = NULL; mali_memory_cow_swap_memory() local
[all...]
H A Dmali_memory_os_alloc.c73 LIST_HEAD(pages); in mali_mem_os_free()
82 list_move(&m_page->list, &pages); in mali_mem_os_free()
94 list_cut_position(&pages, os_pages, os_pages->prev); in mali_mem_os_free()
99 /* Put pages on pool. */ in mali_mem_os_free()
101 list_splice(&pages, &mali_mem_os_allocator.pool_pages); in mali_mem_os_free()
140 list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) { in mali_mem_os_resize_pages()
142 list_move_tail(&m_page->list, &mem_to->pages); in mali_mem_os_resize_pages()
172 INIT_LIST_HEAD(&os_mem->pages); in mali_mem_os_alloc_pages()
175 /* Grab pages from pool. */ in mali_mem_os_alloc_pages()
189 /* Process pages fro in mali_mem_os_alloc_pages()
664 struct list_head *le, pages; mali_mem_os_shrink() local
[all...]
/device/soc/rockchip/rk3588/kernel/drivers/video/rockchip/rga3/
H A Drga2_mmu_info.c173 static int rga2_user_memory_check(struct page **pages, u32 w, u32 h, u32 format, in rga2_user_memory_check() argument
189 vaddr = kmap(pages[taipage_num - 1]); in rga2_user_memory_check()
192 vaddr = kmap(pages[taipage_num]); in rga2_user_memory_check()
205 kunmap(pages[taipage_num - 1]); in rga2_user_memory_check()
207 kunmap(pages[taipage_num]); in rga2_user_memory_check()
212 static int rga2_MapUserMemory(struct page **pages, uint32_t *pageTable, in rga2_MapUserMemory() argument
245 pages, NULL); in rga2_MapUserMemory()
248 pageCount, writeFlag, 0, pages, NULL); in rga2_MapUserMemory()
252 pageCount, writeFlag, pages, NULL, NULL); in rga2_MapUserMemory()
255 pageCount, writeFlag, pages, NUL in rga2_MapUserMemory()
403 struct page **pages = NULL; rga2_mmu_flush_cache() local
561 struct page **pages = NULL; rga2_mmu_info_BitBlt_mode() local
822 struct page **pages = NULL; rga2_mmu_info_color_palette_mode() local
995 struct page **pages = NULL; rga2_mmu_info_color_fill_mode() local
1128 struct page **pages = NULL; rga2_mmu_info_update_palette_table_mode() local
[all...]
/device/soc/rockchip/common/vendor/drivers/media/platform/rockchip/cif/
H A Dcommon.c89 struct page *page = NULL, **pages = NULL; in rkcif_alloc_page_dummy_buf() local
98 pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL); in rkcif_alloc_page_dummy_buf()
99 if (!pages) { in rkcif_alloc_page_dummy_buf()
103 pages[i] = page; in rkcif_alloc_page_dummy_buf()
110 ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0, n_pages << PAGE_SHIFT, GFP_KERNEL); in rkcif_alloc_page_dummy_buf()
118 buf->pages = pages; in rkcif_alloc_page_dummy_buf()
125 kvfree(pages); in rkcif_alloc_page_dummy_buf()
142 __free_pages(buf->pages[0], 0); in rkcif_free_page_dummy_buf()
143 kvfree(buf->pages); in rkcif_free_page_dummy_buf()
[all...]
/device/soc/rockchip/rk3588/kernel/drivers/media/platform/rockchip/cif/
H A Dcommon.c91 struct page *page = NULL, **pages = NULL; in rkcif_alloc_page_dummy_buf() local
99 pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL); in rkcif_alloc_page_dummy_buf()
100 if (!pages) in rkcif_alloc_page_dummy_buf()
103 pages[i] = page; in rkcif_alloc_page_dummy_buf()
108 ret = sg_alloc_table_from_pages(sg, pages, n_pages, 0, in rkcif_alloc_page_dummy_buf()
116 buf->pages = pages; in rkcif_alloc_page_dummy_buf()
124 kvfree(pages); in rkcif_alloc_page_dummy_buf()
140 __free_pages(buf->pages[0], 0); in rkcif_free_page_dummy_buf()
141 kvfree(buf->pages); in rkcif_free_page_dummy_buf()
[all...]
/device/soc/hisilicon/hi3516dv300/sdk_linux/drv/osal/linux/mmz/
H A Dcma_allocator.c380 struct page **pages = NULL; local
391 pages = vmalloc(array_size);
392 if (pages == NULL) {
398 *(pages + i) = tmp;
410 vfree(pages);
414 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), prot, pages)) {
416 if (map_vm_area(area, prot, pages)) {
418 osal_trace(KERN_ERR "map vm area to mmz pages failed!\n");
420 vfree(pages);
424 vfree(pages);
480 struct page **pages = NULL; global() local
[all...]
/device/soc/rockchip/common/vendor/drivers/media/platform/rockchip/isp/
H A Dvideobuf2-rdma-sg.c19 struct page **pages; member
58 buf->pages[i] = page + i; in vb2_dma_sg_alloc_contiguous()
90 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); in vb2_dma_sg_alloc()
91 if (!buf->pages) { in vb2_dma_sg_alloc()
100 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, buf->num_pages, 0, size, GFP_KERNEL); in vb2_dma_sg_alloc()
130 cma_release(dev->cma_area, buf->pages[0], num_pages); in vb2_dma_sg_alloc()
132 kvfree(buf->pages); in vb2_dma_sg_alloc()
150 cma_release(buf->dev->cma_area, buf->pages[0], i); in vb2_dma_sg_put()
151 kvfree(buf->pages); in vb2_dma_sg_put()
201 buf->pages in vb2_dma_sg_get_userptr()
[all...]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/
H A Dmali_kbase_mem.h45 /* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
46 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
48 /* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
49 The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages
52 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
88 u64 offset; /* in pages */
89 u64 length; /* in pages */
93 * @brief Physical pages tracking object properties
98 /* physical pages trackin
110 phys_addr_t *pages; /* N elements, only 0..nents are valid */ global() member
154 struct page **pages; global() member
777 kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) kbase_process_page_usage_inc() argument
792 kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) kbase_process_page_usage_dec() argument
[all...]
H A Dmali_kbase_mem_pool.c84 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked()
292 pool_dbg(pool, "reclaim freed %ld pages\n", freed); in kbase_mem_pool_reclaim_scan_objects()
359 /* Zero pages first without holding the next_pool lock */ in kbase_mem_pool_term()
368 /* Free remaining pages to kernel */ in kbase_mem_pool_term()
379 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill); in kbase_mem_pool_term()
424 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages) in kbase_mem_pool_alloc_pages() argument
433 /* Get pages from this pool */ in kbase_mem_pool_alloc_pages()
438 pages[i] = page_to_phys(p); in kbase_mem_pool_alloc_pages()
444 err = kbase_mem_pool_alloc_pages(pool->next_pool, nr_pages - i, pages + i); in kbase_mem_pool_alloc_pages()
452 /* Get any remaining pages fro in kbase_mem_pool_alloc_pages()
470 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
509 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
[all...]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/
H A Dmali_kbase_mem.h49 /* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
50 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
52 /* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by 8 pages.
53 The MMU reads in 8 page table entries from memory at a time, if we have more than one page fault within the same 8 pages and
56 #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
90 u64 offset; /* in pages */
91 u64 length; /* in pages */
95 * @brief Physical pages tracking object properties
100 /* physical pages trackin
112 phys_addr_t *pages; /* N elements, only 0..nents are valid */ global() member
156 struct page **pages; global() member
786 kbase_process_page_usage_inc(struct kbase_context *kctx, int pages) kbase_process_page_usage_inc() argument
801 kbase_process_page_usage_dec(struct kbase_context *kctx, int pages) kbase_process_page_usage_dec() argument
[all...]
H A Dmali_kbase_mem_pool.c91 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked()
307 pool_dbg(pool, "reclaim freed %ld pages\n", freed); in kbase_mem_pool_reclaim_scan_objects()
376 /* Zero pages first without holding the next_pool lock */ in kbase_mem_pool_term()
385 /* Free remaining pages to kernel */ in kbase_mem_pool_term()
396 pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill); in kbase_mem_pool_term()
442 phys_addr_t *pages) in kbase_mem_pool_alloc_pages()
451 /* Get pages from this pool */ in kbase_mem_pool_alloc_pages()
456 pages[i] = page_to_phys(p); in kbase_mem_pool_alloc_pages()
463 nr_pages - i, pages + i); in kbase_mem_pool_alloc_pages()
471 /* Get any remaining pages fro in kbase_mem_pool_alloc_pages()
441 kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages) kbase_mem_pool_alloc_pages() argument
488 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
526 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
[all...]
/device/soc/rockchip/rk3588/kernel/drivers/media/platform/rockchip/isp/
H A Dvideobuf2-rdma-sg.c19 struct page **pages; member
57 buf->pages[i] = page + i; in vb2_dma_sg_alloc_contiguous()
87 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), in vb2_dma_sg_alloc()
89 if (!buf->pages) in vb2_dma_sg_alloc()
96 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, in vb2_dma_sg_alloc()
126 cma_release(dev->cma_area, buf->pages[0], num_pages); in vb2_dma_sg_alloc()
128 kvfree(buf->pages); in vb2_dma_sg_alloc()
146 cma_release(buf->dev->cma_area, buf->pages[0], i); in vb2_dma_sg_put()
147 kvfree(buf->pages); in vb2_dma_sg_put()
195 buf->pages in vb2_dma_sg_get_userptr()
[all...]
/device/soc/hisilicon/hi3751v350/sdk_linux/source/common/drv/osal/
H A Dosal_addr.c225 struct page **pages = NULL; in osal_blockmem_vmap() local
234 pages = vmalloc(page_count * sizeof(struct page *)); in osal_blockmem_vmap()
235 if (!pages) { in osal_blockmem_vmap()
236 printk("vmap malloc pages failed\n"); in osal_blockmem_vmap()
240 ret = memset_s(pages, page_count * sizeof(struct page *), 0, page_count * sizeof(struct page *)); in osal_blockmem_vmap()
246 pages[i] = phys_to_page(phys_addr + i * PAGE_SIZE); in osal_blockmem_vmap()
249 vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); in osal_blockmem_vmap()
254 vfree(pages); in osal_blockmem_vmap()
255 pages = NULL; in osal_blockmem_vmap()
/device/soc/rockchip/common/vendor/drivers/video/rockchip/rga2/
H A Drga2_mmu_info.c164 static int rga2_user_memory_check(struct page **pages, u32 w, u32 h, u32 format, int flag) in rga2_user_memory_check() argument
180 vaddr = kmap(pages[taipage_num - 1]); in rga2_user_memory_check()
183 vaddr = kmap(pages[taipage_num]); in rga2_user_memory_check()
196 kunmap(pages[taipage_num - 1]); in rga2_user_memory_check()
198 kunmap(pages[taipage_num]); in rga2_user_memory_check()
825 static int rga2_MapUserMemory(struct page **pages, uint32_t *pageTable, unsigned long Memory, uint32_t pageCount, in rga2_MapUserMemory() argument
855 get_user_pages(current, current->mm, Memory << PAGE_SHIFT, pageCount, writeFlag ? FOLL_WRITE : 0, pages, NULL); in rga2_MapUserMemory()
857 result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT, pageCount, writeFlag, 0, pages, NULL); in rga2_MapUserMemory()
859 result = get_user_pages_remote(current, current->mm, Memory << PAGE_SHIFT, pageCount, writeFlag, pages, NULL, NULL); in rga2_MapUserMemory()
861 result = get_user_pages_remote(current->mm, Memory << PAGE_SHIFT, pageCount, writeFlag, pages, NUL in rga2_MapUserMemory()
989 struct page **pages = NULL; rga2_mmu_flush_cache() local
1060 struct page **pages = NULL; rga2_mmu_info_BitBlt_mode() local
1240 struct page **pages = NULL; rga2_mmu_info_color_palette_mode() local
1381 struct page **pages = NULL; rga2_mmu_info_color_fill_mode() local
1463 struct page **pages = NULL; rga2_mmu_info_update_palette_table_mode() local
[all...]
/device/soc/rockchip/common/sdk_linux/drivers/iommu/
H A Ddma-iommu.c570 static void iommu_dma_free_pages_ext(struct page **pages, int count) in iommu_dma_free_pages_ext() argument
573 __free_page(pages[count]); in iommu_dma_free_pages_ext()
575 kvfree(pages); in iommu_dma_free_pages_ext()
581 struct page **pages; in iommu_dma_alloc_pages_ext() local
589 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); in iommu_dma_alloc_pages_ext()
590 if (!pages) { in iommu_dma_alloc_pages_ext()
594 /* IOMMU can map any pages, so himem can also be used here */ in iommu_dma_alloc_pages_ext()
597 /* It makes no sense to muck about with huge pages */ in iommu_dma_alloc_pages_ext()
627 iommu_dma_free_pages_ext(pages, in iommu_dma_alloc_pages_ext()
662 struct page **pages; iommu_dma_alloc_remap() local
740 iommu_dma_mmap_ext(struct page **pages, size_t size, struct vm_area_struct *vma) iommu_dma_mmap_ext() argument
1033 struct page *page = NULL, **pages = NULL; iommu_dma_free_ext() local
1185 struct page **pages = dma_common_find_pages(cpu_addr); iommu_dma_mmap() local
1205 struct page **pages = dma_common_find_pages(cpu_addr); iommu_dma_get_sgtable() local
[all...]

Completed in 22 milliseconds

123456