Home
last modified time | relevance | path

Searched refs:next_pool (Results 1 - 14 of 14) sorted by relevance

/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/
H A Dmali_kbase_mem_pool.c26 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, (pool->next_pool) ? "kctx" : "kbdev", \
137 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool, struct page *p) in kbase_mem_pool_spill() argument
140 kbase_mem_pool_zero_page(next_pool, p); in kbase_mem_pool_spill()
142 kbase_mem_pool_add(next_pool, p); in kbase_mem_pool_spill()
309 struct kbase_mem_pool *next_pool) in kbase_mem_pool_init()
314 pool->next_pool = next_pool; in kbase_mem_pool_init()
341 struct kbase_mem_pool *next_pool = pool->next_pool; in kbase_mem_pool_term() local
354 if (next_pool in kbase_mem_pool_term()
308 kbase_mem_pool_init(struct kbase_mem_pool *pool, size_t max_size, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
404 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free() local
512 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free_pages() local
[all...]
H A Dmali_kbase_mem.h460 * @next_pool: Pointer to the next pool or NULL.
468 * If @next_pool is not NULL, we will allocate from @next_pool before going to
469 * the kernel allocator. Similarily pages can spill over to @next_pool when
479 struct kbase_mem_pool *next_pool);
485 * Pages in the pool will spill over to @next_pool (if available) or freed to
496 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
497 * from @next_pool.
512 * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
513 * @next_pool
[all...]
H A Dmali_kbase_defs.h905 * @next_pool: Pointer to next pool where pages can be allocated when this pool
917 struct kbase_mem_pool *next_pool; member
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/
H A Dmali_kbase_mem_pool.c29 (pool->next_pool) ? "kctx" : "kbdev", \
147 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool, in kbase_mem_pool_spill() argument
151 kbase_mem_pool_zero_page(next_pool, p); in kbase_mem_pool_spill()
153 kbase_mem_pool_add(next_pool, p); in kbase_mem_pool_spill()
326 struct kbase_mem_pool *next_pool) in kbase_mem_pool_init()
331 pool->next_pool = next_pool; in kbase_mem_pool_init()
358 struct kbase_mem_pool *next_pool = pool->next_pool; in kbase_mem_pool_term() local
371 if (next_pool in kbase_mem_pool_term()
323 kbase_mem_pool_init(struct kbase_mem_pool *pool, size_t max_size, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
422 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free() local
529 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free_pages() local
[all...]
H A Dmali_kbase_mem.h459 * @next_pool: Pointer to the next pool or NULL.
467 * If @next_pool is not NULL, we will allocate from @next_pool before going to
468 * the kernel allocator. Similarily pages can spill over to @next_pool when
480 struct kbase_mem_pool *next_pool);
486 * Pages in the pool will spill over to @next_pool (if available) or freed to
497 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
498 * from @next_pool.
513 * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
514 * @next_pool
[all...]
H A Dmali_kbase_defs.h915 * @next_pool: Pointer to next pool where pages can be allocated when this pool
927 struct kbase_mem_pool *next_pool; member
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/
H A Dmali_kbase_mem_pool.c33 (pool->next_pool) ? "kctx" : "kbdev", \
144 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool, in kbase_mem_pool_spill() argument
148 kbase_mem_pool_zero_page(next_pool, p); in kbase_mem_pool_spill()
150 kbase_mem_pool_add(next_pool, p); in kbase_mem_pool_spill()
363 struct kbase_mem_pool *next_pool) in kbase_mem_pool_init()
375 pool->next_pool = next_pool; in kbase_mem_pool_init()
404 struct kbase_mem_pool *next_pool = pool->next_pool; in kbase_mem_pool_term() local
418 if (next_pool in kbase_mem_pool_term()
358 kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config, unsigned int order, int group_id, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
491 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free() local
741 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free_pages() local
[all...]
H A Dmali_kbase_defs.h454 * @next_pool: Pointer to next pool where pages can be allocated when this
472 struct kbase_mem_pool *next_pool; member
H A Dmali_kbase_mem.h793 * @next_pool: Pointer to the next pool or NULL.
801 * If @next_pool is not NULL, we will allocate from @next_pool before going to
802 * the memory group manager. Similarly pages can spill over to @next_pool when
816 struct kbase_mem_pool *next_pool);
822 * Pages in the pool will spill over to @next_pool (if available) or freed to
833 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
834 * from @next_pool.
849 * @pool. This function does not use @next_pool.
865 * 2. Otherwise, if @next_pool i
[all...]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/
H A Dmali_kbase_mem_pool.c33 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, (pool->next_pool) ? "kctx" : "kbdev", \
138 static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool, struct page *p) in kbase_mem_pool_spill() argument
141 kbase_mem_pool_zero_page(next_pool, p); in kbase_mem_pool_spill()
143 kbase_mem_pool_add(next_pool, p); in kbase_mem_pool_spill()
362 int group_id, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) in kbase_mem_pool_init()
373 pool->next_pool = next_pool; in kbase_mem_pool_init()
408 struct kbase_mem_pool *next_pool = pool->next_pool; in kbase_mem_pool_term() local
422 if (next_pool in kbase_mem_pool_term()
361 kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config, unsigned int order, int group_id, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
494 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free() local
737 struct kbase_mem_pool *next_pool = pool->next_pool; kbase_mem_pool_free_pages() local
[all...]
H A Dmali_kbase_defs.h435 * @next_pool: Pointer to next pool where pages can be allocated when this
453 struct kbase_mem_pool *next_pool; member
H A Dmali_kbase_mem.h707 * @next_pool: Pointer to the next pool or NULL.
715 * If @next_pool is not NULL, we will allocate from @next_pool before going to
716 * the memory group manager. Similarly pages can spill over to @next_pool when
726 int group_id, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool);
732 * Pages in the pool will spill over to @next_pool (if available) or freed to
743 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
744 * from @next_pool.
759 * @pool. This function does not use @next_pool.
775 * 2. Otherwise, if @next_pool i
[all...]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/mmu/
H A Dmali_kbase_mmu.c510 for (pool = root_pool; pool != NULL; pool = pool->next_pool) { in page_fault_try_alloc()
591 pool = pool->next_pool; in page_fault_try_alloc()
624 pool = pool->next_pool; in page_fault_try_alloc()
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/mmu/
H A Dmali_kbase_mmu.c368 for (pool = root_pool; pool != NULL; pool = pool->next_pool) { in page_fault_try_alloc()
448 pool = pool->next_pool; in page_fault_try_alloc()
478 pool = pool->next_pool; in page_fault_try_alloc()

Completed in 27 milliseconds