Lines Matching defs:page
300 * @frag_size is 0, otherwise data should come from the page allocator
328 * by a page fragment, not kmalloc() or vmalloc()
369 struct page_frag_cache page;
381 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
393 * netdev_alloc_frag - allocate a page fragment
396 * Allocates a frag from a page for receive buffer.
464 nc = this_cpu_ptr(&napi_alloc_cache.page);
533 data = page_frag_alloc(&nc->page, len, gfp_mask);
543 if (nc->page.pfmemalloc)
556 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
559 skb_fill_page_desc(skb, i, page, off, size);
794 struct page *p;
1353 struct page *page, *head = NULL;
1365 page = alloc_page(gfp_mask);
1366 if (!page) {
1368 struct page *next = (struct page *)page_private(head);
1374 set_page_private(page, (unsigned long)head);
1375 head = page;
1378 page = head;
1383 struct page *p;
1394 page = (struct page *)page_private(page);
1397 memcpy(page_address(page) + d_off,
1413 head = (struct page *)page_private(head);
2245 struct page *p;
2303 static struct page *linear_to_page(struct page *page, unsigned int *len,
2314 memcpy(page_address(pfrag->page) + pfrag->offset,
2315 page_address(page) + *offset, *len);
2319 return pfrag->page;
2323 struct page *page,
2327 spd->pages[spd->nr_pages - 1] == page &&
2333 * Fill page/offset/length into spd, if it can hold more pages.
2336 struct pipe_inode_info *pipe, struct page *page,
2345 page = linear_to_page(page, len, &offset, sk);
2346 if (!page)
2349 if (spd_can_coalesce(spd, page, offset)) {
2353 get_page(page);
2354 spd->pages[spd->nr_pages] = page;
2362 static bool __splice_segment(struct page *page, unsigned int poff,
2386 if (spd_fill_page(spd, pipe, page, &flen, poff,
2411 * we can avoid a copy since we own the head portion of this page.
2458 struct page *pages[MAX_SKB_FRAGS];
2606 struct page *p;
2685 struct page *p;
2785 struct page *p;
2975 struct page *page;
2992 page = virt_to_head_page(from->head);
2993 offset = from->data - (unsigned char *)page_address(page);
2994 __skb_fill_page_desc(to, 0, page, offset, plen);
2995 get_page(page);
3632 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3637 if (skb_can_coalesce(skb, i, page, offset)) {
3640 get_page(page);
3641 skb_fill_page_desc(skb, i, page, offset, size);
3675 struct page *page;
3677 page = virt_to_head_page(frag_skb->head);
3678 __skb_frag_set_page(&head_frag, page);
3680 (unsigned char *)page_address(page));
3840 * skb_frag_t page sharing. Therefore we must fallback to
4207 struct page *page = virt_to_head_page(skb->head);
4215 (unsigned char *)page_address(page) +
4220 __skb_frag_set_page(frag, page);
5233 struct page *page;
5245 page = virt_to_head_page(from->head);
5246 offset = from->data - (unsigned char *)page_address(page);
5249 page, offset, skb_headlen(from));
5889 * alloc_skb_with_frags - allocate skb with page frags
5893 * @max_page_order: max page order desired.
5908 struct page *page;
5930 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5934 if (page)
5942 page = alloc_page(gfp_mask);
5943 if (!page)
5948 skb_fill_page_desc(skb, i, page, 0, chunk);
6218 /* Nice, we can free page frag(s) right now */