Lines Matching refs:buffer

124 #undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
442 /* cp->lock held. note: the last put_page will free the buffer */
447 __free_pages(page->buffer, cp->page_order);
473 page->buffer = alloc_pages(flags, cp->page_order);
474 if (!page->buffer)
476 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
562 if (page_count(page->buffer) > 1)
1338 if (page_count(page->buffer) == 1)
1357 /* swap if buffer is in use */
1358 if (page_count(page0[index]->buffer) > 1) {
1387 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1850 daddr = le64_to_cpu(txd->buffer);
1857 /* tiny buffer may follow */
1949 memcpy(p, page_address(page->buffer) + off, i);
1983 memcpy(p, page_address(page->buffer) + off,
2001 skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
2025 skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
2031 crcaddr = page_address(page->buffer) + off + hlen;
2053 memcpy(p, page_address(page->buffer) + off, i);
2071 memcpy(p, page_address(page->buffer), dlen + cp->crc_size);
2122 * do any additional locking here. stick the buffer
2133 /* put rx descriptor back on ring. if a buffer is in use by a higher
2144 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2180 /* make a new buffer if it's still in use */
2181 if (page_count(page[entry]->buffer) > 1) {
2198 cp->init_rxds[ring][entry].buffer =
2226 * small packets: should be copied header + data in single buffer.
2227 * large packets: header and data in a single buffer.
2228 * split packets: header in a separate buffer from data.
2694 txd->buffer = cpu_to_le64(mapping);
3850 /* first buffer is never a tiny buffer and so
3853 daddr = le64_to_cpu(txd[ent].buffer);
3862 /* next buffer might by a tiny buffer.