Lines Matching defs:pool

14  * The current design of this allocator is fairly simple.  The pool is
42 struct dma_pool { /* the pool */
71 struct dma_pool *pool;
81 list_for_each_entry(pool, &dev->dma_pools, pools) {
85 spin_lock_irq(&pool->lock);
86 list_for_each_entry(page, &pool->page_list, page_list) {
90 spin_unlock_irq(&pool->lock);
92 /* per-pool info, no real statistics yet */
94 pool->name, blocks,
95 pages * (pool->allocation / pool->size),
96 pool->size, pages);
108 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109 * @name: name of pool, for diagnostics
111 * @size: size of the blocks in this pool.
127 * Return: a dma allocation pool with the requested characteristics, or
203 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
206 unsigned int next_boundary = pool->boundary;
209 unsigned int next = offset + pool->size;
210 if (unlikely((next + pool->size) >= next_boundary)) {
212 next_boundary += pool->boundary;
216 } while (offset < pool->allocation);
219 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
230 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
232 pool_initialise_page(pool, page);
247 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
252 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
254 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
260 * dma_pool_destroy - destroys a pool of dma memory blocks.
261 * @pool: dma pool that will be destroyed
264 * Caller guarantees that no more memory from the pool is in use,
265 * and that nothing will try to use the pool after this call.
267 void dma_pool_destroy(struct dma_pool *pool)
272 if (unlikely(!pool))
277 list_del(&pool->pools);
278 if (pool->dev && list_empty(&pool->dev->dma_pools))
282 device_remove_file(pool->dev, &dev_attr_pools);
285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
287 if (pool->dev)
288 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
289 pool->name, page->vaddr);
292 pool->name, page->vaddr);
297 pool_free_page(pool, page);
300 kfree(pool);
306 * @pool: dma pool that will produce the block
314 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
324 spin_lock_irqsave(&pool->lock, flags);
325 list_for_each_entry(page, &pool->page_list, page_list) {
326 if (page->offset < pool->allocation)
330 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
331 spin_unlock_irqrestore(&pool->lock, flags);
333 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
337 spin_lock_irqsave(&pool->lock, flags);
339 list_add(&page->page_list, &pool->page_list);
351 for (i = sizeof(page->offset); i < pool->size; i++) {
354 if (pool->dev)
355 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356 __func__, pool->name, retval);
359 __func__, pool->name, retval);
366 data, pool->size, 1);
371 memset(retval, POOL_POISON_ALLOCATED, pool->size);
373 spin_unlock_irqrestore(&pool->lock, flags);
376 memset(retval, 0, pool->size);
382 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
386 list_for_each_entry(page, &pool->page_list, page_list) {
389 if ((dma - page->dma) < pool->allocation)
396 * dma_pool_free - put block back into dma pool
397 * @pool: the dma pool holding the block
404 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
410 spin_lock_irqsave(&pool->lock, flags);
411 page = pool_find_page(pool, dma);
413 spin_unlock_irqrestore(&pool->lock, flags);
414 if (pool->dev)
415 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416 __func__, pool->name, vaddr, &dma);
419 __func__, pool->name, vaddr, &dma);
425 memset(vaddr, 0, pool->size);
428 spin_unlock_irqrestore(&pool->lock, flags);
429 if (pool->dev)
430 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431 __func__, pool->name, vaddr, &dma);
434 __func__, pool->name, vaddr, &dma);
439 while (chain < pool->allocation) {
444 spin_unlock_irqrestore(&pool->lock, flags);
445 if (pool->dev)
446 dev_err(pool->dev, "%s %s, dma %pad already free\n",
447 __func__, pool->name, &dma);
450 __func__, pool->name, &dma);
454 memset(vaddr, POOL_POISON_FREED, pool->size);
462 * if (!is_page_busy(page)) pool_free_page(pool, page);
465 spin_unlock_irqrestore(&pool->lock, flags);
470 * Managed DMA pool
474 struct dma_pool *pool = *(struct dma_pool **)res;
476 dma_pool_destroy(pool);
486 * @name: name of pool, for diagnostics
488 * @size: size of the blocks in this pool.
492 * Managed dma_pool_create(). DMA pool created with this function is
495 * Return: a managed dma allocation pool with the requested
501 struct dma_pool **ptr, *pool;
507 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
508 if (pool)
513 return pool;
519 * @pool: dma pool that will be destroyed
523 void dmam_pool_destroy(struct dma_pool *pool)
525 struct device *dev = pool->dev;
527 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));