Lines Matching refs:pool
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
79 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
95 if (!pool->use_dma_alloc) {
96 p = alloc_pages_node(pool->nid, gfp_flags, order);
109 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
132 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
147 if (!pool || !pool->use_dma_alloc) {
157 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
185 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
191 if (pool->use_dma_alloc) {
198 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
199 if (dma_mapping_error(pool->dev, addr))
212 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
216 if (pool->use_dma_alloc)
219 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
257 /* Initialize and add a pool type to the global shrinker list */
258 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
261 pt->pool = pool;
282 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
286 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
290 if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
291 return &pool->caching[caching].orders[order];
296 if (pool->use_dma32)
301 if (pool->use_dma32)
327 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
337 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
339 if (pool->use_dma_alloc) {
348 /* Called when we got a page, either from a pool or newly allocated */
349 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
358 r = ttm_pool_map(pool, order, p, dma_addr);
372 * @pool: The pool used for allocating.
383 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
394 order = ttm_pool_page_order(pool, *pages);
397 ttm_pool_unmap(pool, tt->dma_address[i], nr);
399 pt = ttm_pool_select_type(pool, caching, order);
403 ttm_pool_free_page(pool, caching, order, *pages);
410 * @pool: ttm_pool to use
419 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
434 WARN_ON(dma_addr && !pool->dev);
442 if (pool->use_dma32)
453 pt = ttm_pool_select_type(pool, tt->caching, order);
463 r = ttm_pool_page_allocated(pool, order, p,
480 (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
489 r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
514 ttm_pool_free_page(pool, page_caching, order, p);
519 ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
520 ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
529 * @pool: Pool to give pages back to.
532 * Give the packing pages back to a pool or free them
534 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
536 ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
544 * ttm_pool_init - Initialize a pool
546 * @pool: the pool to initialize
552 * Initialize the pool and its pool types.
554 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
561 pool->dev = dev;
562 pool->nid = nid;
563 pool->use_dma_alloc = use_dma_alloc;
564 pool->use_dma32 = use_dma32;
569 ttm_pool_type_init(&pool->caching[i].orders[j],
570 pool, i, j);
576 * ttm_pool_fini - Cleanup a pool
578 * @pool: the pool to clean up
580 * Free all pages in the pool and unregister the types from the global
583 void ttm_pool_fini(struct ttm_pool *pool)
587 if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
590 ttm_pool_type_fini(&pool->caching[i].orders[j]);
593 /* We removed the pool types from the LRU, but we need to also make sure
594 * that no shrinker is concurrently freeing pages from the pool.
649 /* Dump information about the different pool types */
690 * ttm_pool_debugfs - Debugfs dump function for a pool
692 * @pool: the pool to dump the information for
695 * Make a debugfs dump with the per pool and global information.
697 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
701 if (!pool->use_dma_alloc) {
722 ttm_pool_debugfs_orders(pool->caching[i].orders, m);