Lines Matching refs:page

12  * uses one frame per-page, but it can fallback on the
13 * regular page allocator APIs.
21 * must call page_pool_put_page() to free the page, or attach
22 * the page to a page_pool-aware object like skbs marked with
25 * API users must call page_pool_put_page() once on a page, as it
26 * will either recycle the page, or in case of refcnt > 1, it will
40 * Drivers that wish to harvest page pool stats and report them to users
64 * page_pool_dev_alloc_pages() - allocate a page.
67 * Get a page from the page allocator or page_pool caches.
69 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
76 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
87 * @pool: pool from which page was allocated
98 /* pp_frag_count represents the number of writers who can update the page
100 * We can't rely on the page refcnt for that as we don't know who might be
101 * holding page references and we can't reliably destroy or sync DMA mappings
104 * When pp_frag_count reaches 0 we can either recycle the page if the page
108 static inline void page_pool_fragment_page(struct page *page, long nr)
110 atomic_long_set(&page->pp_frag_count, nr);
113 static inline long page_pool_defrag_page(struct page *page, long nr)
118 * references to the page. No need to actually overwrite it, instead
123 * especially when dealing with a page that may be partitioned
126 if (atomic_long_read(&page->pp_frag_count) == nr)
129 ret = atomic_long_sub_return(nr, &page->pp_frag_count);
135 struct page *page)
139 (page_pool_defrag_page(page, 1) == 0);
143 * page_pool_put_page() - release a reference to a page pool page
144 * @pool: pool from which page was allocated
145 * @page: page to release a reference on
146 * @dma_sync_size: how much of the page may have been touched by the device
149 * The outcome of this depends on the page refcnt. If the driver bumps
150 * the refcnt > 1 this will unmap the page. If the page refcnt is 1
151 * the allocator owns the page and will try to recycle it in one of the pool
152 * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
156 struct page *page,
164 if (!page_pool_is_last_frag(pool, page))
167 page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
172 * page_pool_put_full_page() - release a reference on a page pool page
173 * @pool: pool from which page was allocated
174 * @page: page to release a reference on
181 struct page *page, bool allow_direct)
183 page_pool_put_page(pool, page, -1, allow_direct);
187 * page_pool_recycle_direct() - release a reference on a page pool page
188 * @pool: pool from which page was allocated
189 * @page: page to release a reference on
192 * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
195 struct page *page)
197 page_pool_put_full_page(pool, page, true);
205 * @page: page allocated from a page pool
207 * Fetch the DMA address of the page. The page pool to which the page belongs
210 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
212 dma_addr_t ret = page->dma_addr;
215 ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
220 static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
222 page->dma_addr = addr;
224 page->dma_addr_upper = upper_32_bits(addr);