Home
last modified time | relevance | path

Searched refs:pool (Results 1 - 25 of 59) sorted by relevance

123

/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/
H A Dmali_kbase_mem_pool.c31 #define pool_dbg(pool, format, ...) \
32 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
33 (pool->next_pool) ? "kctx" : "kbdev", \
34 kbase_mem_pool_size(pool), \
35 kbase_mem_pool_max_size(pool), \
41 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool) in kbase_mem_pool_capacity() argument
43 ssize_t max_size = kbase_mem_pool_max_size(pool); in kbase_mem_pool_capacity()
44 ssize_t cur_size = kbase_mem_pool_size(pool); in kbase_mem_pool_capacity()
49 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool) in kbase_mem_pool_is_full() argument
54 kbase_mem_pool_is_empty(struct kbase_mem_pool *pool) kbase_mem_pool_is_empty() argument
59 kbase_mem_pool_add_locked(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add_locked() argument
70 kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add() argument
77 kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list_locked() argument
88 kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list() argument
96 kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool) kbase_mem_pool_remove_locked() argument
114 kbase_mem_pool_remove(struct kbase_mem_pool *pool) kbase_mem_pool_remove() argument
125 kbase_mem_pool_sync_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_sync_page() argument
133 kbase_mem_pool_zero_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_zero_page() argument
153 kbase_mem_alloc_page(struct kbase_mem_pool *pool) kbase_mem_alloc_page() argument
187 kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_free_page() argument
206 kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink_locked() argument
222 kbase_mem_pool_shrink(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink() argument
234 kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow) kbase_mem_pool_grow() argument
271 kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) kbase_mem_pool_trim() argument
295 kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size) kbase_mem_pool_set_max_size() argument
317 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_count_objects() local
336 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_scan_objects() local
358 kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config, unsigned int order, int group_id, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
395 kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool) kbase_mem_pool_mark_dying() argument
402 kbase_mem_pool_term(struct kbase_mem_pool *pool) kbase_mem_pool_term() argument
456 kbase_mem_pool_alloc(struct kbase_mem_pool *pool) kbase_mem_pool_alloc() argument
473 kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool) kbase_mem_pool_alloc_locked() argument
488 kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p, bool dirty) kbase_mem_pool_free() argument
510 kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p, bool dirty) kbase_mem_pool_free_locked() argument
529 kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages, bool partial_allowed) kbase_mem_pool_alloc_pages() argument
613 kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages) kbase_mem_pool_alloc_pages_locked() argument
656 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
696 kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array_locked() argument
738 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
789 kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages_locked() argument
[all...]
H A Dmali_kbase_mem.h302 * @jit_node: Links to neighboring regions in the just-in-time memory pool.
736 * Max size for kbdev memory pool (in pages)
741 * Max size for kctx memory pool (in pages)
757 * initial configuration of a memory pool
759 * @config: Initial configuration for a physical memory pool
760 * @max_size: Maximum number of free pages that a pool created from
771 * initial configuration of a memory pool
773 * @config: Initial configuration for a physical memory pool
775 * Return: Maximum number of free pages that a pool created from @config
785 * kbase_mem_pool_init - Create a memory pool fo
993 kbase_mem_pool_size(struct kbase_mem_pool *pool) kbase_mem_pool_size() argument
1004 kbase_mem_pool_max_size(struct kbase_mem_pool *pool) kbase_mem_pool_max_size() argument
1905 kbase_mem_pool_lock(struct kbase_mem_pool *pool) kbase_mem_pool_lock() argument
1914 kbase_mem_pool_unlock(struct kbase_mem_pool *pool) kbase_mem_pool_unlock() argument
[all...]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/
H A Dmali_kbase_mem_pool.c25 #define pool_dbg(pool, format, ...) \
26 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, (pool->next_pool) ? "kctx" : "kbdev", \
27 kbase_mem_pool_size(pool), kbase_mem_pool_max_size(pool), ##__VA_ARGS__)
32 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool) in kbase_mem_pool_lock() argument
34 spin_lock(&pool->pool_lock); in kbase_mem_pool_lock()
37 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool) in kbase_mem_pool_unlock() argument
39 spin_unlock(&pool in kbase_mem_pool_unlock()
42 kbase_mem_pool_capacity(struct kbase_mem_pool *pool) kbase_mem_pool_capacity() argument
50 kbase_mem_pool_is_full(struct kbase_mem_pool *pool) kbase_mem_pool_is_full() argument
55 kbase_mem_pool_is_empty(struct kbase_mem_pool *pool) kbase_mem_pool_is_empty() argument
60 kbase_mem_pool_add_locked(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add_locked() argument
70 kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add() argument
77 kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list_locked() argument
87 kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list() argument
94 kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool) kbase_mem_pool_remove_locked() argument
113 kbase_mem_pool_remove(struct kbase_mem_pool *pool) kbase_mem_pool_remove() argument
124 kbase_mem_pool_sync_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_sync_page() argument
131 kbase_mem_pool_zero_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_zero_page() argument
183 kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_free_page() argument
195 kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink_locked() argument
210 kbase_mem_pool_shrink(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink() argument
221 kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow) kbase_mem_pool_grow() argument
237 kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) kbase_mem_pool_trim() argument
254 kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size) kbase_mem_pool_set_max_size() argument
274 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_count_objects() local
283 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_scan_objects() local
308 kbase_mem_pool_init(struct kbase_mem_pool *pool, size_t max_size, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
339 kbase_mem_pool_term(struct kbase_mem_pool *pool) kbase_mem_pool_term() argument
385 kbase_mem_pool_alloc(struct kbase_mem_pool *pool) kbase_mem_pool_alloc() argument
402 kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p, bool dirty) kbase_mem_pool_free() argument
424 kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages) kbase_mem_pool_alloc_pages() argument
470 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
509 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
[all...]
H A Dmali_kbase_mem_pool_debugfs.c25 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_size_get() local
27 *val = kbase_mem_pool_size(pool); in kbase_mem_pool_debugfs_size_get()
34 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_size_set() local
36 kbase_mem_pool_trim(pool, val); in kbase_mem_pool_debugfs_size_set()
46 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_max_size_get() local
48 *val = kbase_mem_pool_max_size(pool); in kbase_mem_pool_debugfs_max_size_get()
55 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_max_size_set() local
57 kbase_mem_pool_set_max_size(pool, val); in kbase_mem_pool_debugfs_max_size_set()
65 void kbase_mem_pool_debugfs_init(struct dentry *parent, struct kbase_mem_pool *pool) in kbase_mem_pool_debugfs_init() argument
67 debugfs_create_file("mem_pool_size", S_IRUGO | S_IWUSR, parent, pool, in kbase_mem_pool_debugfs_init()
[all...]
H A Dmali_kbase_mem.h307 /* List head used to store the region in the JIT allocation pool */
446 * Max size for kbdev memory pool (in pages)
451 * Max size for kctx memory pool (in pages)
456 * kbase_mem_pool_init - Create a memory pool for a kbase device
457 * @pool: Memory pool to initialize
458 * @max_size: Maximum number of free pages the pool can hold
460 * @next_pool: Pointer to the next pool or NULL.
462 * Allocations from @pool are in whole pages. Each @pool ha
554 kbase_mem_pool_size(struct kbase_mem_pool *pool) kbase_mem_pool_size() argument
565 kbase_mem_pool_max_size(struct kbase_mem_pool *pool) kbase_mem_pool_max_size() argument
[all...]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/
H A Dmali_kbase_mem_pool.c27 #define pool_dbg(pool, format, ...) \
28 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
29 (pool->next_pool) ? "kctx" : "kbdev", \
30 kbase_mem_pool_size(pool), \
31 kbase_mem_pool_max_size(pool), \
37 static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool) in kbase_mem_pool_lock() argument
39 spin_lock(&pool->pool_lock); in kbase_mem_pool_lock()
42 static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool) in kbase_mem_pool_unlock() argument
44 spin_unlock(&pool in kbase_mem_pool_unlock()
47 kbase_mem_pool_capacity(struct kbase_mem_pool *pool) kbase_mem_pool_capacity() argument
55 kbase_mem_pool_is_full(struct kbase_mem_pool *pool) kbase_mem_pool_is_full() argument
60 kbase_mem_pool_is_empty(struct kbase_mem_pool *pool) kbase_mem_pool_is_empty() argument
65 kbase_mem_pool_add_locked(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add_locked() argument
76 kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add() argument
83 kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list_locked() argument
94 kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list() argument
102 kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool) kbase_mem_pool_remove_locked() argument
120 kbase_mem_pool_remove(struct kbase_mem_pool *pool) kbase_mem_pool_remove() argument
131 kbase_mem_pool_sync_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_sync_page() argument
140 kbase_mem_pool_zero_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_zero_page() argument
194 kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_free_page() argument
207 kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink_locked() argument
223 kbase_mem_pool_shrink(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink() argument
235 kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow) kbase_mem_pool_grow() argument
251 kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) kbase_mem_pool_trim() argument
266 kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size) kbase_mem_pool_set_max_size() argument
288 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_count_objects() local
298 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_scan_objects() local
323 kbase_mem_pool_init(struct kbase_mem_pool *pool, size_t max_size, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
356 kbase_mem_pool_term(struct kbase_mem_pool *pool) kbase_mem_pool_term() argument
402 kbase_mem_pool_alloc(struct kbase_mem_pool *pool) kbase_mem_pool_alloc() argument
419 kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p, bool dirty) kbase_mem_pool_free() argument
441 kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages) kbase_mem_pool_alloc_pages() argument
488 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
526 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, phys_addr_t *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
[all...]
H A Dmali_kbase_mem_pool_debugfs.c27 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_size_get() local
29 *val = kbase_mem_pool_size(pool); in kbase_mem_pool_debugfs_size_get()
36 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_size_set() local
38 kbase_mem_pool_trim(pool, val); in kbase_mem_pool_debugfs_size_set()
50 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_max_size_get() local
52 *val = kbase_mem_pool_max_size(pool); in kbase_mem_pool_debugfs_max_size_get()
59 struct kbase_mem_pool *pool = (struct kbase_mem_pool *)data; in kbase_mem_pool_debugfs_max_size_set() local
61 kbase_mem_pool_set_max_size(pool, val); in kbase_mem_pool_debugfs_max_size_set()
72 struct kbase_mem_pool *pool) in kbase_mem_pool_debugfs_init()
75 pool, in kbase_mem_pool_debugfs_init()
71 kbase_mem_pool_debugfs_init(struct dentry *parent, struct kbase_mem_pool *pool) kbase_mem_pool_debugfs_init() argument
[all...]
H A Dmali_kbase_mem.h306 /* List head used to store the region in the JIT allocation pool */
445 * Max size for kbdev memory pool (in pages)
450 * Max size for kctx memory pool (in pages)
455 * kbase_mem_pool_init - Create a memory pool for a kbase device
456 * @pool: Memory pool to initialize
457 * @max_size: Maximum number of free pages the pool can hold
459 * @next_pool: Pointer to the next pool or NULL.
461 * Allocations from @pool are in whole pages. Each @pool ha
557 kbase_mem_pool_size(struct kbase_mem_pool *pool) kbase_mem_pool_size() argument
568 kbase_mem_pool_max_size(struct kbase_mem_pool *pool) kbase_mem_pool_max_size() argument
[all...]
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/
H A Dmali_kbase_mem_pool.c32 #define pool_dbg(pool, format, ...) \
33 dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, (pool->next_pool) ? "kctx" : "kbdev", \
34 kbase_mem_pool_size(pool), kbase_mem_pool_max_size(pool), ##__VA_ARGS__)
39 static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool) in kbase_mem_pool_capacity() argument
41 ssize_t max_size = kbase_mem_pool_max_size(pool); in kbase_mem_pool_capacity()
42 ssize_t cur_size = kbase_mem_pool_size(pool); in kbase_mem_pool_capacity()
47 static bool kbase_mem_pool_is_full(struct kbase_mem_pool *pool) in kbase_mem_pool_is_full() argument
52 kbase_mem_pool_is_empty(struct kbase_mem_pool *pool) kbase_mem_pool_is_empty() argument
57 kbase_mem_pool_add_locked(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add_locked() argument
67 kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_add() argument
74 kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list_locked() argument
84 kbase_mem_pool_add_list(struct kbase_mem_pool *pool, struct list_head *page_list, size_t nr_pages) kbase_mem_pool_add_list() argument
91 kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool) kbase_mem_pool_remove_locked() argument
110 kbase_mem_pool_remove(struct kbase_mem_pool *pool) kbase_mem_pool_remove() argument
121 kbase_mem_pool_sync_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_sync_page() argument
127 kbase_mem_pool_zero_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_zero_page() argument
146 kbase_mem_alloc_page(struct kbase_mem_pool *pool) kbase_mem_alloc_page() argument
185 kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p) kbase_mem_pool_free_page() argument
202 kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink_locked() argument
217 kbase_mem_pool_shrink(struct kbase_mem_pool *pool, size_t nr_to_shrink) kbase_mem_pool_shrink() argument
228 kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow) kbase_mem_pool_grow() argument
264 kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) kbase_mem_pool_trim() argument
290 kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size) kbase_mem_pool_set_max_size() argument
310 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_count_objects() local
328 struct kbase_mem_pool *pool; kbase_mem_pool_reclaim_scan_objects() local
361 kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config, unsigned int order, int group_id, struct kbase_device *kbdev, struct kbase_mem_pool *next_pool) kbase_mem_pool_init() argument
399 kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool) kbase_mem_pool_mark_dying() argument
406 kbase_mem_pool_term(struct kbase_mem_pool *pool) kbase_mem_pool_term() argument
460 kbase_mem_pool_alloc(struct kbase_mem_pool *pool) kbase_mem_pool_alloc() argument
477 kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool) kbase_mem_pool_alloc_locked() argument
492 kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p, bool dirty) kbase_mem_pool_free() argument
514 kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p, bool dirty) kbase_mem_pool_free_locked() argument
533 kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages, bool partial_allowed) kbase_mem_pool_alloc_pages() argument
613 kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool, size_t nr_4k_pages, struct tagged_addr *pages) kbase_mem_pool_alloc_pages_locked() argument
652 kbase_mem_pool_add_array(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array() argument
692 kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool zero, bool sync) kbase_mem_pool_add_array_locked() argument
734 kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages() argument
784 kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool, size_t nr_pages, struct tagged_addr *pages, bool dirty, bool reclaimed) kbase_mem_pool_free_pages_locked() argument
[all...]
H A Dmali_kbase_mem.h267 * @jit_node: Links to neighboring regions in the just-in-time memory pool.
652 * Max size for kbdev memory pool (in pages)
657 * Max size for kctx memory pool (in pages)
673 * initial configuration of a memory pool
675 * @config: Initial configuration for a physical memory pool
676 * @max_size: Maximum number of free pages that a pool created from
686 * initial configuration of a memory pool
688 * @config: Initial configuration for a physical memory pool
690 * Return: Maximum number of free pages that a pool created from @config
699 * kbase_mem_pool_init - Create a memory pool fo
899 kbase_mem_pool_size(struct kbase_mem_pool *pool) kbase_mem_pool_size() argument
910 kbase_mem_pool_max_size(struct kbase_mem_pool *pool) kbase_mem_pool_max_size() argument
1760 kbase_mem_pool_lock(struct kbase_mem_pool *pool) kbase_mem_pool_lock() argument
1769 kbase_mem_pool_unlock(struct kbase_mem_pool *pool) kbase_mem_pool_unlock() argument
[all...]
/device/soc/rockchip/common/vendor/drivers/dma-buf/heaps/
H A Dpage_pool.c3 * DMA BUF page pool system
7 * Based on the ION page pool code
21 static inline struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool) in dmabuf_page_pool_alloc_pages() argument
26 return alloc_pages(pool->gfp_mask, pool->order); in dmabuf_page_pool_alloc_pages()
29 static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool, struct page *page) in dmabuf_page_pool_free_pages() argument
31 __free_pages(page, pool->order); in dmabuf_page_pool_free_pages()
34 static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page) in dmabuf_page_pool_add() argument
44 mutex_lock(&pool->mutex); in dmabuf_page_pool_add()
45 list_add_tail(&page->lru, &pool in dmabuf_page_pool_add()
51 dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index) dmabuf_page_pool_remove() argument
67 dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool) dmabuf_page_pool_fetch() argument
79 dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool) dmabuf_page_pool_alloc() argument
95 dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page) dmabuf_page_pool_free() argument
105 dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high) dmabuf_page_pool_total() argument
118 struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); dmabuf_page_pool_create() local
141 dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool) dmabuf_page_pool_destroy() argument
162 dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask, int nr_to_scan) dmabuf_page_pool_do_shrink() argument
199 struct dmabuf_page_pool *pool; dmabuf_page_pool_shrink() local
[all...]
/device/soc/rockchip/rk3588/kernel/drivers/dma-buf/heaps/
H A Dpage_pool.c3 * DMA BUF page pool system
7 * Based on the ION page pool code
22 struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool) in dmabuf_page_pool_alloc_pages() argument
26 return alloc_pages(pool->gfp_mask, pool->order); in dmabuf_page_pool_alloc_pages()
29 static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool, in dmabuf_page_pool_free_pages() argument
32 __free_pages(page, pool->order); in dmabuf_page_pool_free_pages()
35 static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page) in dmabuf_page_pool_add() argument
44 mutex_lock(&pool->mutex); in dmabuf_page_pool_add()
45 list_add_tail(&page->lru, &pool in dmabuf_page_pool_add()
52 dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index) dmabuf_page_pool_remove() argument
69 dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool) dmabuf_page_pool_fetch() argument
80 dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool) dmabuf_page_pool_alloc() argument
95 dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page) dmabuf_page_pool_free() argument
104 dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high) dmabuf_page_pool_total() argument
116 struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); dmabuf_page_pool_create() local
138 dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool) dmabuf_page_pool_destroy() argument
158 dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask, int nr_to_scan) dmabuf_page_pool_do_shrink() argument
192 struct dmabuf_page_pool *pool; dmabuf_page_pool_shrink() local
[all...]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/tests/kutf/
H A Dkutf_mem.c41 int kutf_mempool_init(struct kutf_mempool *pool) in kutf_mempool_init() argument
43 if (!pool) { in kutf_mempool_init()
48 INIT_LIST_HEAD(&pool->head); in kutf_mempool_init()
49 mutex_init(&pool->lock); in kutf_mempool_init()
55 void kutf_mempool_destroy(struct kutf_mempool *pool) in kutf_mempool_destroy() argument
60 if (!pool) { in kutf_mempool_destroy()
65 mutex_lock(&pool->lock); in kutf_mempool_destroy()
66 list_for_each_safe(remove, tmp, &pool->head) { in kutf_mempool_destroy()
73 mutex_unlock(&pool->lock); in kutf_mempool_destroy()
78 void *kutf_mempool_alloc(struct kutf_mempool *pool, size_ argument
[all...]
/device/soc/hisilicon/hi3861v100/sdk_liteos/platform/os/Huawei_LiteOS/kernel/include/
H A Dlos_memory.h67 * @brief Get the pointer to the little memory pool.
71 * <li>This API is used to get the pointer to the little memory pool.</li>
80 * @retval #VOID* return the pointer to the little memory pool.
92 * <li>This API is used to get the size of memory totally used in memory pool.</li>
96 * <li>The input pool parameter must be initialized via func LOS_MemInit.</li>
99 * @param pool [IN] A pointer pointed to the memory pool.
101 * @retval #LOS_NOK The incoming parameter pool is NULL.
102 * @retval #UINT32 The size of the memory pool used.
107 extern UINT32 LOS_MemTotalUsedGet(VOID *pool);
[all...]
/device/soc/rockchip/rk3588/hardware/mpp/include/
H A Dmpp_mem_pool.h31 #define mpp_mem_pool_deinit(pool) mpp_mem_pool_deinit_f(__FUNCTION__, pool);
33 #define mpp_mem_pool_get(pool) mpp_mem_pool_get_f(__FUNCTION__, pool)
34 #define mpp_mem_pool_put(pool, p) mpp_mem_pool_put_f(__FUNCTION__, pool, p)
37 void mpp_mem_pool_deinit_f(const char *caller, MppMemPool pool);
39 void *mpp_mem_pool_get_f(const char *caller, MppMemPool pool);
40 void mpp_mem_pool_put_f(const char *caller, MppMemPool pool, void *p);
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/tests/kutf/
H A Dkutf_mem.c37 int kutf_mempool_init(struct kutf_mempool *pool) in kutf_mempool_init() argument
39 if (!pool) { in kutf_mempool_init()
44 INIT_LIST_HEAD(&pool->head); in kutf_mempool_init()
50 void kutf_mempool_destroy(struct kutf_mempool *pool) in kutf_mempool_destroy() argument
55 if (!pool) { in kutf_mempool_destroy()
60 list_for_each_safe(remove, tmp, &pool->head) { in kutf_mempool_destroy()
70 void *kutf_mempool_alloc(struct kutf_mempool *pool, size_t size) in kutf_mempool_alloc() argument
74 if (!pool) { in kutf_mempool_alloc()
86 list_add(&ret->node, &pool->head); in kutf_mempool_alloc()
/device/soc/hisilicon/hi3861v100/sdk_liteos/third_party/lwip_sack/include/lwip/priv/
H A Dtcpip_priv.h58 #define API_VAR_ALLOC(type, pool, name, errorval) do { \
59 name = (type *)memp_malloc(pool); \
64 #define API_VAR_ALLOC_POOL(type, pool, name, errorval) do { \
65 name = (type *)LWIP_MEMPOOL_ALLOC(pool); \
70 #define API_VAR_FREE(pool, name) memp_free(pool, name)
71 #define API_VAR_FREE_POOL(pool, name) LWIP_MEMPOOL_FREE(pool, name)
84 #define API_VAR_ALLOC(type, pool, name, errorval)
85 #define API_VAR_ALLOC_POOL(type, pool, nam
[all...]
/device/soc/hisilicon/hi3861v100/sdk_liteos/platform/os/Huawei_LiteOS/kernel/base/include/
H A Dlos_memory_pri.h97 #define OS_MEM_HEAD(pool, size) OS_DLNK_HEAD(OS_MEM_HEAD_ADDR(pool), size)
98 #define OS_MEM_HEAD_ADDR(pool) ((VOID *)((UINT32)(UINTPTR)(pool) + sizeof(LosMemPoolInfo)))
100 #define OS_MEM_FIRST_NODE(pool) ((LosMemDynNode *)((UINT8 *)OS_MEM_HEAD_ADDR(pool) + OS_DLNK_HEAD_SIZE))
101 #define OS_MEM_END_NODE(pool, size) ((LosMemDynNode *)(((UINT8 *)(pool) + (size)) - OS_MEM_NODE_HEAD_SIZE))
111 * Memory pool information structure
114 VOID *poolAddr; /**< Starting address of a memory pool */
[all...]
/device/soc/hisilicon/hi3516dv300/sdk_linux/drv/mpp/component/hdmi/src/mkp/
H A Ddrv_hdmi_event.c35 static hi_s32 event_type_counter(hdmi_event_pool *pool, hdmi_event event, hi_bool write) in event_type_counter() argument
39 write ? pool->run_cnt.hpd_wr_cnt++ : pool->run_cnt.hpd_rd_cnt++; in event_type_counter()
42 write ? pool->run_cnt.unhpd_wr_cnt++ : pool->run_cnt.unhpd_rd_cnt++; in event_type_counter()
45 write ? pool->run_cnt.edid_fail_wr_cnt++ : pool->run_cnt.edid_fail_rd_cnt++; in event_type_counter()
48 write ? pool->run_cnt.rsen_con_wr_cnt++ : pool->run_cnt.rsen_con_rd_cnt++; in event_type_counter()
51 write ? pool in event_type_counter()
[all...]
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/tests/include/kutf/
H A Dkutf_mem.h24 * This module implements a memory pool allocator, allowing a test
26 * single free which releases all of the resources held by the entire pool.
28 * Note that it is not possible to free single resources within the pool once
35 * struct kutf_mempool - the memory pool context management structure
44 * kutf_mempool_init() - Initialize a memory pool.
45 * @pool: Memory pool structure to initialize, provided by the user
49 int kutf_mempool_init(struct kutf_mempool *pool);
52 * kutf_mempool_alloc() - Allocate memory from a pool
53 * @pool
[all...]
/device/soc/rockchip/rk3399/hardware/mpp/include/
H A Dmpp_mem_pool.h30 void mpp_mem_pool_deinit(MppMemPool pool);
32 void *mpp_mem_pool_get(MppMemPool pool);
33 void mpp_mem_pool_put(MppMemPool pool, void *p);
/device/soc/rockchip/rk3568/hardware/mpp/include/
H A Dmpp_mem_pool.h30 void mpp_mem_pool_deinit(MppMemPool pool);
32 void *mpp_mem_pool_get(MppMemPool pool);
33 void mpp_mem_pool_put(MppMemPool pool, void *p);
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/tests/include/kutf/
H A Dkutf_mem.h28 * This module implements a memory pool allocator, allowing a test
30 * single free which releases all of the resources held by the entire pool.
32 * Note that it is not possible to free single resources within the pool once
40 * struct kutf_mempool - the memory pool context management structure
51 * kutf_mempool_init() - Initialize a memory pool.
52 * @pool: Memory pool structure to initialize, provided by the user
56 int kutf_mempool_init(struct kutf_mempool *pool);
59 * kutf_mempool_alloc() - Allocate memory from a pool
60 * @pool
[all...]
/device/soc/rockchip/common/hardware/mpp/include/
H A Dmpp_mem_pool.h30 void mpp_mem_pool_deinit(MppMemPool pool);
32 void *mpp_mem_pool_get(MppMemPool pool);
33 void mpp_mem_pool_put(MppMemPool pool, void *p);
/device/soc/rockchip/common/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_wifi6/include/
H A Dbcm_mpool_pub.h15 * pool manager: A singleton object that acts as a factory for
16 * pool allocators. It also is used for global
18 * in use across all data structures. The pool manager
22 * memory pool: An object for allocating homogenous memory blocks.
25 * bcm_mpm_* Memory pool manager
26 * bcm_mp_* Memory pool
34 * heap: The memory pool allocator uses the heap (malloc/free) for memory.
35 * In this case, the pool allocator is just providing statistics
85 * Opaque type definition for the pool manager handle. This object is used for global
86 * memory pool operation
[all...]

Completed in 14 milliseconds

123