Lines Matching defs:page
16 * page-based RX descriptor engine with separate completion rings
20 * RX is handled by page sized buffers that are attached as fragments to
29 * -- on page reclamation, the driver swaps the page with a spare page.
30 * if that page is still in use, it frees its reference to that page,
31 * and allocates a new page for use. otherwise, it just recycles the
32 * page.
154 * max mtu == 2 * page size - ethernet header - 64 - swivel =
443 static int cas_page_free(struct cas *cp, cas_page_t *page)
445 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
447 __free_pages(page->buffer, cp->page_order);
448 kfree(page);
460 /* local page allocation routines for the receive buffers. jumbo pages
465 cas_page_t *page;
467 page = kmalloc(sizeof(cas_page_t), flags);
468 if (!page)
471 INIT_LIST_HEAD(&page->list);
472 RX_USED_SET(page, 0);
473 page->buffer = alloc_pages(flags, cp->page_order);
474 if (!page->buffer)
476 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
478 return page;
481 kfree(page);
548 cas_page_t *page = list_entry(elem, cas_page_t, list);
552 * slightly less accurate: we might find that a page has an
558 * Importantly, if we find that the page has a refcount of 1
562 if (page_count(page->buffer) > 1)
573 cas_page_free(cp, page);
607 /* pull a page from the list. */
1283 /* receive page sizes. default == 2K (0x800) */
1292 /* round mtu + offset. constrain to page size. */
1331 * hand back the same page index while it's being processed.
1335 cas_page_t *page = cp->rx_pages[1][index];
1338 if (page_count(page->buffer) == 1)
1339 return page;
1344 list_add(&page->list, &cp->rx_inuse_list);
1386 cas_page_t *page = cas_page_swap(cp, 0, i);
1387 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1914 struct cas_page *page;
1940 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1947 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1949 memcpy(p, page_address(page->buffer) + off, i);
1951 page->dma_addr + off, i,
1953 RX_USED_ADD(page, 0x100);
1964 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1970 "rx page overflow: %d\n", hlen);
1977 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1983 memcpy(p, page_address(page->buffer) + off,
1986 page->dma_addr + off, i,
1990 RX_USED_ADD(page, cp->mtu_stride);
1992 RX_USED_ADD(page, hlen);
2001 skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
2010 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2012 page->dma_addr,
2016 page->dma_addr,
2025 skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
2027 RX_USED_ADD(page, hlen + cp->crc_size);
2031 crcaddr = page_address(page->buffer) + off + hlen;
2039 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2044 "rx page overflow: %d\n", hlen);
2051 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2053 memcpy(p, page_address(page->buffer) + off, i);
2055 page->dma_addr + off, i,
2058 RX_USED_ADD(page, cp->mtu_stride);
2060 RX_USED_ADD(page, i);
2066 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2068 page->dma_addr,
2071 memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
2073 page->dma_addr,
2076 RX_USED_ADD(page, dlen + cp->crc_size);
2168 cas_page_t **page = cp->rx_pages[ring];
2181 if (page_count(page[entry]->buffer) > 1) {
2196 list_add(&page[entry]->list, &cp->rx_inuse_list);
2200 page[entry] = new;
2230 * bytes but in a single page.
2232 * NOTE: RX page posting is done in this routine as well. while there's
2234 * really worthwhile due to the fact that the page posting will
3352 /* get page size for rx buffers. */
3357 struct page *page = alloc_pages(GFP_ATOMIC,
3360 if (page) {
3361 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3880 cas_page_t **page = cp->rx_pages[ring];
3885 if (page[i]) {
3886 cas_page_free(cp, page[i]);
3887 page[i] = NULL;
3920 cas_page_t **page = cp->rx_pages[ring];
3925 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)