Lines Matching defs:page

16  *  page-based RX descriptor engine with separate completion rings
20 * RX is handled by page sized buffers that are attached as fragments to
29 * -- on page reclamation, the driver swaps the page with a spare page.
30 * if that page is still in use, it frees its reference to that page,
31 * and allocates a new page for use. otherwise, it just recycles the
32 * the page.
155 * max mtu == 2 * page size - ethernet header - 64 - swivel =
444 static int cas_page_free(struct cas *cp, cas_page_t *page)
446 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
448 __free_pages(page->buffer, cp->page_order);
449 kfree(page);
461 /* local page allocation routines for the receive buffers. jumbo pages
466 cas_page_t *page;
468 page = kmalloc(sizeof(cas_page_t), flags);
469 if (!page)
472 INIT_LIST_HEAD(&page->list);
473 RX_USED_SET(page, 0);
474 page->buffer = alloc_pages(flags, cp->page_order);
475 if (!page->buffer)
477 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
479 return page;
482 kfree(page);
549 cas_page_t *page = list_entry(elem, cas_page_t, list);
553 * slightly less accurate: we might find that a page has an
559 * Importantly, if we find that the page has a refcount of 1
563 if (page_count(page->buffer) > 1)
574 cas_page_free(cp, page);
608 /* pull a page from the list. */
1297 /* receive page sizes. default == 2K (0x800) */
1306 /* round mtu + offset. constrain to page size. */
1345 * hand back the same page index while it's being processed.
1349 cas_page_t *page = cp->rx_pages[1][index];
1352 if (page_count(page->buffer) == 1)
1353 return page;
1358 list_add(&page->list, &cp->rx_inuse_list);
1400 cas_page_t *page = cas_page_swap(cp, 0, i);
1401 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1927 struct cas_page *page;
1953 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1960 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1962 addr = cas_page_map(page->buffer);
1965 page->dma_addr + off, i,
1968 RX_USED_ADD(page, 0x100);
1979 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1985 "rx page overflow: %d\n", hlen);
1992 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1998 addr = cas_page_map(page->buffer);
2001 page->dma_addr + off, i,
2006 RX_USED_ADD(page, cp->mtu_stride);
2008 RX_USED_ADD(page, hlen);
2017 __skb_frag_set_page(frag, page->buffer);
2028 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2030 page->dma_addr,
2034 page->dma_addr,
2043 __skb_frag_set_page(frag, page->buffer);
2047 RX_USED_ADD(page, hlen + cp->crc_size);
2051 addr = cas_page_map(page->buffer);
2061 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2066 "rx page overflow: %d\n", hlen);
2073 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2075 addr = cas_page_map(page->buffer);
2078 page->dma_addr + off, i,
2082 RX_USED_ADD(page, cp->mtu_stride);
2084 RX_USED_ADD(page, i);
2090 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2092 page->dma_addr,
2095 addr = cas_page_map(page->buffer);
2098 page->dma_addr,
2102 RX_USED_ADD(page, dlen + cp->crc_size);
2197 cas_page_t **page = cp->rx_pages[ring];
2210 if (page_count(page[entry]->buffer) > 1) {
2225 list_add(&page[entry]->list, &cp->rx_inuse_list);
2229 page[entry] = new;
2259 * bytes but in a single page.
2261 * NOTE: RX page posting is done in this routine as well. while there's
2263 * really worthwhile due to the fact that the page posting will
3384 /* get page size for rx buffers. */
3389 struct page *page = alloc_pages(GFP_ATOMIC,
3392 if (page) {
3393 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3915 cas_page_t **page = cp->rx_pages[ring];
3920 if (page[i]) {
3921 cas_page_free(cp, page[i]);
3922 page[i] = NULL;
3955 cas_page_t **page = cp->rx_pages[ring];
3960 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)