/device/soc/rockchip/rk3588/kernel/include/trace/hooks/ |
H A D | memory.h | 15 TP_PROTO(unsigned long addr, int nr_pages), 16 TP_ARGS(addr, nr_pages)); 19 TP_PROTO(unsigned long addr, int nr_pages), 20 TP_ARGS(addr, nr_pages)); 23 TP_PROTO(unsigned long addr, int nr_pages), 24 TP_ARGS(addr, nr_pages)); 27 TP_PROTO(unsigned long addr, int nr_pages), 28 TP_ARGS(addr, nr_pages));
|
/device/soc/rockchip/common/vendor/drivers/dma-buf/heaps/ |
H A D | deferred-free-helper.c | 26 size_t nr_pages) in deferred_free() 31 item->nr_pages = nr_pages; in deferred_free() 36 list_nr_pages += nr_pages; in deferred_free() 45 size_t nr_pages; in free_one_item() local 55 nr_pages = item->nr_pages; in free_one_item() 56 list_nr_pages -= nr_pages; in free_one_item() 60 return nr_pages; in free_one_item() 65 unsigned long nr_pages; in get_freelist_nr_pages() local 25 deferred_free(struct deferred_freelist_item *item, void (*free)(struct deferred_freelist_item *, enum df_reason), size_t nr_pages) deferred_free() argument [all...] |
H A D | deferred-free-helper.h | 33 * @nr_pages: number of pages used by item to be freed 38 size_t nr_pages; member 48 * @nr_pages: number of pages to be freed 51 void (*free)(struct deferred_freelist_item *i, enum df_reason reason), size_t nr_pages);
|
/device/soc/rockchip/rk3588/kernel/drivers/dma-buf/heaps/ |
H A D | deferred-free-helper.c | 28 size_t nr_pages) in deferred_free() 33 item->nr_pages = nr_pages; in deferred_free() 38 list_nr_pages += nr_pages; in deferred_free() 47 size_t nr_pages; in free_one_item() local 57 nr_pages = item->nr_pages; in free_one_item() 58 list_nr_pages -= nr_pages; in free_one_item() 62 return nr_pages; in free_one_item() 67 unsigned long nr_pages; in get_freelist_nr_pages() local 25 deferred_free(struct deferred_freelist_item *item, void (*free)(struct deferred_freelist_item*, enum df_reason), size_t nr_pages) deferred_free() argument [all...] |
H A D | deferred-free-helper.h | 33 * @nr_pages: number of pages used by item to be freed 38 size_t nr_pages; member 49 * @nr_pages: number of pages to be freed 54 size_t nr_pages);
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_pool.c | 77 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages)
in kbase_mem_pool_add_list_locked() argument 82 pool->cur_size += nr_pages;
in kbase_mem_pool_add_list_locked() 84 pool_dbg(pool, "added %zu pages\n", nr_pages);
in kbase_mem_pool_add_list_locked() 87 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages)
in kbase_mem_pool_add_list() argument 90 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages);
in kbase_mem_pool_add_list() 424 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages)
in kbase_mem_pool_alloc_pages() argument 431 pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
in kbase_mem_pool_alloc_pages() 435 nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
in kbase_mem_pool_alloc_pages() 442 if (i != nr_pages && pool->next_pool) {
in kbase_mem_pool_alloc_pages() 444 err = kbase_mem_pool_alloc_pages(pool->next_pool, nr_pages in kbase_mem_pool_alloc_pages() 470 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument 509 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument [all...] |
H A D | mali_kbase_mem.c | 129 u64 start_pfn, size_t nr_pages)
in kbase_region_tracker_find_region_enclosing_range_free() 135 u64 end_pfn = start_pfn + nr_pages;
in kbase_region_tracker_find_region_enclosing_range_free() 146 tmp_end_pfn = reg->start_pfn + reg->nr_pages;
in kbase_region_tracker_find_region_enclosing_range_free() 182 tmp_end_pfn = reg->start_pfn + reg->nr_pages;
in kbase_region_tracker_find_region_enclosing_address() 234 size_t nr_pages, size_t align)
in kbase_region_tracker_find_region_meeting_reqs() 249 if ((reg->nr_pages >= nr_pages) && (reg->flags & KBASE_REG_FREE)) {
in kbase_region_tracker_find_region_meeting_reqs() 253 if ((start_pfn >= reg->start_pfn) && (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
in kbase_region_tracker_find_region_meeting_reqs() 254 ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages in kbase_region_tracker_find_region_meeting_reqs() 128 kbase_region_tracker_find_region_enclosing_range_free(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) kbase_region_tracker_find_region_enclosing_range_free() argument 232 kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align) kbase_region_tracker_find_region_meeting_reqs() argument 345 kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 397 kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_add_va_region() argument 762 kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument 855 kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_gpu_mmap() argument [all...] |
H A D | mali_kbase_mem.h | 153 unsigned long nr_pages; member 224 size_t nr_pages; member 349 static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type) in kbase_alloc_create() argument 352 size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages; in kbase_alloc_create() 357 alloc_size += nr_pages * sizeof(*alloc->imported.user_buf.dma_addrs); in kbase_alloc_create() 362 * Prevent nr_pages*per_page_size + sizeof(*alloc) from in kbase_alloc_create() 365 if (nr_pages > ((((size_t)-1) - sizeof(*alloc)) / per_page_size)) { in kbase_alloc_create() 393 alloc->imported.user_buf.dma_addrs = (void *)(alloc->pages + nr_pages); in kbase_alloc_create() 406 reg->cpu_alloc = kbase_alloc_create(reg->nr_pages, KBASE_MEM_TYPE_NATIVE); in kbase_reg_prepare_native() 415 reg->gpu_alloc = kbase_alloc_create(reg->nr_pages, KBASE_MEM_TYPE_NATIV in kbase_reg_prepare_native() [all...] |
H A D | mali_kbase_cache_policy.c | 26 u32 kbase_cache_enabled(u32 flags, u32 nr_pages) in kbase_cache_enabled() argument 30 CSTD_UNUSED(nr_pages); in kbase_cache_enabled()
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_pool.c | 84 struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list_locked() 89 pool->cur_size += nr_pages; in kbase_mem_pool_add_list_locked() 91 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked() 95 struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list() 98 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages); in kbase_mem_pool_add_list() 441 int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, in kbase_mem_pool_alloc_pages() argument 449 pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages); in kbase_mem_pool_alloc_pages() 453 nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool)); in kbase_mem_pool_alloc_pages() 460 if (i != nr_pages && pool->next_pool) { in kbase_mem_pool_alloc_pages() 463 nr_pages in kbase_mem_pool_alloc_pages() 83 kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list_locked() argument 94 kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list() argument 488 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument 526 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument [all...] |
H A D | mali_kbase_mem.c | 133 struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) in kbase_region_tracker_find_region_enclosing_range_free() 139 u64 end_pfn = start_pfn + nr_pages; in kbase_region_tracker_find_region_enclosing_range_free() 150 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in kbase_region_tracker_find_region_enclosing_range_free() 186 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in kbase_region_tracker_find_region_enclosing_address() 236 static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align) in kbase_region_tracker_find_region_meeting_reqs() argument 251 if ((reg->nr_pages >= nr_pages) && in kbase_region_tracker_find_region_meeting_reqs() 257 (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) && in kbase_region_tracker_find_region_meeting_reqs() 258 ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages in kbase_region_tracker_find_region_meeting_reqs() 132 kbase_region_tracker_find_region_enclosing_range_free( struct kbase_context *kctx, u64 start_pfn, size_t nr_pages) kbase_region_tracker_find_region_enclosing_range_free() argument 350 kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 413 kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_add_va_region() argument 790 kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument 884 kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_gpu_mmap() argument [all...] |
H A D | mali_kbase_mem.h | 155 unsigned long nr_pages; member 224 size_t nr_pages; member 347 static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type) in kbase_alloc_create() argument 350 size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages; in kbase_alloc_create() 355 alloc_size += nr_pages * in kbase_alloc_create() 361 * Prevent nr_pages*per_page_size + sizeof(*alloc) from in kbase_alloc_create() 364 if (nr_pages > ((((size_t) -1) - sizeof(*alloc)) in kbase_alloc_create() 390 (void *) (alloc->pages + nr_pages); in kbase_alloc_create() 403 reg->cpu_alloc = kbase_alloc_create(reg->nr_pages, in kbase_reg_prepare_native() 413 reg->gpu_alloc = kbase_alloc_create(reg->nr_pages, in kbase_reg_prepare_native() [all...] |
H A D | mali_kbase_softjobs.c | 497 int nr_pages; member 508 int nr_pages = buffer->nr_extres_pages; in free_user_buffer() local 513 for (i = 0; i < nr_pages; i++) { in free_user_buffer() 540 for (p = 0; p < buffers[i].nr_pages; p++) { in kbase_debug_copy_finish() 604 int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1; in kbase_debug_copy_prepare() local 612 buffers[i].nr_pages = nr_pages; in kbase_debug_copy_prepare() 620 buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *), in kbase_debug_copy_prepare() 628 nr_pages, in kbase_debug_copy_prepare() 635 if (pinned_pages != nr_pages) { in kbase_debug_copy_prepare() 667 unsigned long nr_pages = kbase_debug_copy_prepare() local 720 kbase_mem_copy_from_extres_page(struct kbase_context *kctx, void *extres_page, struct page **pages, unsigned int nr_pages, unsigned int *target_page_nr, size_t offset, size_t *to_copy) kbase_mem_copy_from_extres_page() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem_pool.c | 78 struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list_locked() 83 pool->cur_size += nr_pages; in kbase_mem_pool_add_list_locked() 85 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked() 89 struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list() 92 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages); in kbase_mem_pool_add_list() 657 size_t nr_pages, struct tagged_addr *pages, in kbase_mem_pool_add_array() 665 if (!nr_pages) in kbase_mem_pool_add_array() 669 nr_pages, zero, sync); in kbase_mem_pool_add_array() 672 for (i = 0; i < nr_pages; i++) { in kbase_mem_pool_add_array() 693 nr_pages, nr_to_poo in kbase_mem_pool_add_array() 77 kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list_locked() argument 88 kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list() argument 656 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument 696 kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array_locked() argument 738 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument 789 kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages_locked() argument [all...] |
H A D | mali_kbase_mem.c | 174 struct rb_root *rbtree, u64 start_pfn, size_t nr_pages) in find_region_enclosing_range_rbtree() 178 u64 end_pfn = start_pfn + nr_pages; in find_region_enclosing_range_rbtree() 187 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in find_region_enclosing_range_rbtree() 216 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in kbase_find_region_enclosing_address() 290 size_t nr_pages, size_t align_offset, size_t align_mask, in kbase_region_tracker_find_region_meeting_reqs() 304 if ((reg->nr_pages >= nr_pages) && in kbase_region_tracker_find_region_meeting_reqs() 321 if (0 == ((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB)) in kbase_region_tracker_find_region_meeting_reqs() 328 if (!((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB) || in kbase_region_tracker_find_region_meeting_reqs() 333 u64 end_pfn = start_pfn + nr_pages in kbase_region_tracker_find_region_meeting_reqs() 173 find_region_enclosing_range_rbtree( struct rb_root *rbtree, u64 start_pfn, size_t nr_pages) find_region_enclosing_range_rbtree() argument 288 kbase_region_tracker_find_region_meeting_reqs( struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align_offset, size_t align_mask, u64 *out_start_pfn) kbase_region_tracker_find_region_meeting_reqs() argument 482 kbase_insert_va_region_nolock(struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 555 kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_add_va_region() argument 617 kbase_add_va_region_rbtree(struct kbase_device *kbdev, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_add_va_region_rbtree() argument 1345 kbase_alloc_free_region(struct rb_root *rbtree, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument 1502 kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align, enum kbase_caller_mmu_sync_info mmu_sync_info) kbase_gpu_mmap() argument 4741 kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages, void *src_page, size_t *to_copy, unsigned int nr_pages, unsigned int *target_page_nr, size_t offset) kbase_mem_copy_to_pinned_user_pages() argument [all...] |
H A D | mali_kbase_mem.h | 172 unsigned long nr_pages; member 292 * @nr_pages: The size of the region in pages. 330 size_t nr_pages; member 638 struct kbase_context *kctx, size_t nr_pages, in kbase_alloc_create() 642 size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages; in kbase_alloc_create() 647 alloc_size += nr_pages * in kbase_alloc_create() 653 * Prevent nr_pages*per_page_size + sizeof(*alloc) from in kbase_alloc_create() 656 if (nr_pages > ((((size_t) -1) - sizeof(*alloc)) in kbase_alloc_create() 691 (void *) (alloc->pages + nr_pages); in kbase_alloc_create() 704 reg->cpu_alloc = kbase_alloc_create(kctx, reg->nr_pages, in kbase_reg_prepare_native() 637 kbase_alloc_create( struct kbase_context *kctx, size_t nr_pages, enum kbase_memory_type type, int group_id) kbase_alloc_create() argument [all...] |
H A D | mali_kbase_mem_linux.c | 122 (gpu_pfn < (reg->start_pfn + reg->nr_pages))) { in kbase_find_event_mem_region() 555 *out = reg->nr_pages; in kbase_mem_query() 1193 for (j = 0; (j < pages) && (count < reg->nr_pages); j++, count++) in kbase_mem_umm_map_attachment() 1202 WARN_ONCE(count < reg->nr_pages, in kbase_mem_umm_map_attachment() 1268 !WARN_ON(reg->nr_pages < alloc->nents)) { in kbase_mem_umm_map() 1278 kctx->aliasing_sink_page, reg->nr_pages - alloc->nents, in kbase_mem_umm_map() 1323 reg->nr_pages, in kbase_mem_umm_unmap() 1610 user_buf->nr_pages = *va_pages; in kbase_mem_from_user_buffer() 2201 if (new_pages > reg->nr_pages) 2225 new_pages = reg->nr_pages; 2373 get_aliased_alloc(struct vm_area_struct *vma, struct kbase_va_region *reg, pgoff_t *start_off, size_t nr_pages) global() argument 2484 kbase_cpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close) global() argument 2601 u32 nr_pages; global() local 2673 kbasep_reg_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **regm, size_t *nr_pages, size_t *aligned_offset) global() argument 2757 size_t nr_pages = vma_pages(vma); global() local 3283 size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start); global() local 3367 size_t nr_pages = vma_pages(vma); global() local 3456 size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start); global() local 3496 size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start); global() local [all...] |
H A D | mali_kbase_softjobs.c | 521 for (p = 0; p < buffers[i].nr_pages; p++) { in kbase_debug_copy_finish() 589 int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1; in kbase_debug_copy_prepare() local 602 buffers[i].nr_pages = nr_pages; in kbase_debug_copy_prepare() 610 if (nr_pages > (KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD / in kbase_debug_copy_prepare() 613 buffers[i].pages = vzalloc(nr_pages * in kbase_debug_copy_prepare() 617 buffers[i].pages = kcalloc(nr_pages, in kbase_debug_copy_prepare() 627 nr_pages, in kbase_debug_copy_prepare() 634 buffers[i].nr_pages = 0; in kbase_debug_copy_prepare() 639 if (pinned_pages != nr_pages) { in kbase_debug_copy_prepare() 676 unsigned long nr_pages = kbase_debug_copy_prepare() local [all...] |
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem_pool.c | 74 static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list_locked() argument 79 pool->cur_size += nr_pages; in kbase_mem_pool_add_list_locked() 81 pool_dbg(pool, "added %zu pages\n", nr_pages); in kbase_mem_pool_add_list_locked() 84 static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) in kbase_mem_pool_add_list() argument 87 kbase_mem_pool_add_list_locked(pool, page_list, nr_pages); in kbase_mem_pool_add_list() 652 static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, in kbase_mem_pool_add_array() argument 660 if (!nr_pages) { in kbase_mem_pool_add_array() 664 pool_dbg(pool, "add_array(%zu, zero=%d, sync=%d):\n", nr_pages, zero, sync); in kbase_mem_pool_add_array() 667 for (i = 0; i < nr_pages; i++) { in kbase_mem_pool_add_array() 689 pool_dbg(pool, "add_array(%zu) added %zu pages\n", nr_pages, nr_to_poo in kbase_mem_pool_add_array() 692 kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array_locked() argument 734 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument 784 kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages_locked() argument [all...] |
H A D | mali_kbase_mem.c | 169 size_t nr_pages) in find_region_enclosing_range_rbtree() 173 u64 end_pfn = start_pfn + nr_pages; in find_region_enclosing_range_rbtree() 182 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in find_region_enclosing_range_rbtree() 210 tmp_end_pfn = reg->start_pfn + reg->nr_pages; in kbase_find_region_enclosing_address() 281 size_t nr_pages, size_t align_offset, in kbase_region_tracker_find_region_meeting_reqs() 294 if ((reg->nr_pages >= nr_pages) && (reg->flags & KBASE_REG_FREE)) { in kbase_region_tracker_find_region_meeting_reqs() 309 if ((((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB) == 0)) { in kbase_region_tracker_find_region_meeting_reqs() 318 if (!((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB) || !(start_pfn & BASE_MEM_PFN_MASK_4GB)) { in kbase_region_tracker_find_region_meeting_reqs() 322 u64 end_pfn = start_pfn + nr_pages in kbase_region_tracker_find_region_meeting_reqs() 168 find_region_enclosing_range_rbtree(struct rb_root *rbtree, u64 start_pfn, size_t nr_pages) find_region_enclosing_range_rbtree() argument 280 kbase_region_tracker_find_region_meeting_reqs(struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align_offset, size_t align_mask, u64 *out_start_pfn) kbase_region_tracker_find_region_meeting_reqs() argument 427 kbase_insert_va_region_nolock(struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages) kbase_insert_va_region_nolock() argument 488 kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_add_va_region() argument 546 kbase_add_va_region_rbtree(struct kbase_device *kbdev, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_add_va_region_rbtree() argument 1076 kbase_alloc_free_region(struct rb_root *rbtree, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument 1227 kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align) kbase_gpu_mmap() argument 4249 kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages, void *src_page, size_t *to_copy, unsigned int nr_pages, unsigned int *target_page_nr, size_t offset) kbase_mem_copy_to_pinned_user_pages() argument [all...] |
H A D | mali_kbase_mem.h | 165 unsigned long nr_pages; member 258 * @nr_pages: The size of the region in pages. 277 size_t nr_pages; member 558 static inline struct kbase_mem_phy_alloc *kbase_alloc_create(struct kbase_context *kctx, size_t nr_pages, in kbase_alloc_create() argument 562 size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages; in kbase_alloc_create() 567 alloc_size += nr_pages * sizeof(*alloc->imported.user_buf.dma_addrs); in kbase_alloc_create() 572 * Prevent nr_pages*per_page_size + sizeof(*alloc) from in kbase_alloc_create() 575 if (nr_pages > ((((size_t)-1) - sizeof(*alloc)) / per_page_size)) { in kbase_alloc_create() 609 alloc->imported.user_buf.dma_addrs = (void *)(alloc->pages + nr_pages); in kbase_alloc_create() 622 reg->cpu_alloc = kbase_alloc_create(kctx, reg->nr_pages, KBASE_MEM_TYPE_NATIV in kbase_reg_prepare_native() [all...] |
H A D | mali_kbase_mem_linux.c | 127 (gpu_pfn < (reg->start_pfn + reg->nr_pages))) { in kbase_find_event_mem_region() 590 *out = reg->nr_pages; in kbase_mem_query() 1246 for (j = 0; (j < pages) && (count < reg->nr_pages); j++, count++) { in kbase_mem_umm_map_attachment() 1255 WARN_ONCE(count < reg->nr_pages, in kbase_mem_umm_map_attachment() 1317 !WARN_ON(reg->nr_pages < alloc->nents)) { in kbase_mem_umm_map() 1327 reg->nr_pages - alloc->nents, in kbase_mem_umm_map() 1369 reg->nr_pages, kctx->as_nr); in kbase_mem_umm_unmap() 1667 user_buf->nr_pages = *va_pages; in kbase_mem_from_user_buffer() 2239 if (new_pages > reg->nr_pages) { 2255 new_pages = reg->nr_pages; 2405 get_aliased_alloc(struct vm_area_struct *vma, struct kbase_va_region *reg, pgoff_t *start_off, size_t nr_pages) global() argument 2516 kbase_cpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close) global() argument 2637 u32 nr_pages; global() local 2707 kbasep_reg_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **regm, size_t *nr_pages, size_t *aligned_offset) global() argument 2780 size_t nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; global() local 3309 size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start); global() local 3394 size_t nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; global() local 3483 size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start); global() local 3504 size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start); global() local [all...] |
/device/soc/rockchip/common/sdk_linux/kernel/power/ |
H A D | snapshot.c | 1558 * @nr_pages: Number of page frames to allocate. 1563 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) in preallocate_image_pages() argument 1567 while (nr_pages > 0) { in preallocate_image_pages() 1580 nr_pages--; in preallocate_image_pages() 1587 static unsigned long preallocate_image_memory(unsigned long nr_pages, unsigned long avail_normal) in preallocate_image_memory() argument 1596 if (nr_pages < alloc) { in preallocate_image_memory() 1597 alloc = nr_pages; in preallocate_image_memory() 1604 static unsigned long preallocate_image_highmem(unsigned long nr_pages) in preallocate_image_highmem() argument 1606 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); in preallocate_image_highmem() 1617 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, unsigne argument 1624 preallocate_image_highmem(unsigned long nr_pages) preallocate_image_highmem() argument 1629 preallocate_highmem_fraction(unsigned long nr_pages, unsigned long highmem, unsigned long total) preallocate_highmem_fraction() argument 1926 enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) enough_free_mem() argument 1997 swsusp_alloc(struct memory_bitmap *copy_bm_ex, unsigned int nr_pages, unsigned int nr_highmem) swsusp_alloc() argument 2030 unsigned int nr_pages, nr_highmem; swsusp_save() local 2538 unsigned int nr_pages, nr_highmem; prepare_image() local [all...] |
/device/soc/rockchip/common/vendor/include/ |
H A D | deferred-free-helper.h | 33 * @nr_pages: number of pages used by item to be freed 38 size_t nr_pages; member 48 * @nr_pages: number of pages to be freed 51 void (*free)(struct deferred_freelist_item *i, enum df_reason reason), size_t nr_pages);
|
/device/soc/rockchip/rk3588/kernel/include/linux/ |
H A D | deferred-free-helper.h | 33 * @nr_pages: number of pages used by item to be freed 38 size_t nr_pages; member 49 * @nr_pages: number of pages to be freed 54 size_t nr_pages);
|