Lines Matching refs:rx_skbuff
266 struct sk_buff **rx_skbuff;
566 new_skb_list[new] = lp->rx_skbuff[new];
570 struct sk_buff *rx_skbuff;
572 rx_skbuff = new_skb_list[new];
573 if (!rx_skbuff) {
579 skb_reserve(rx_skbuff, NET_IP_ALIGN);
582 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
596 if (lp->rx_skbuff[new]) {
602 dev_kfree_skb(lp->rx_skbuff[new]);
606 kfree(lp->rx_skbuff);
618 lp->rx_skbuff = new_skb_list;
650 if (lp->rx_skbuff[i]) {
656 dev_kfree_skb_any(lp->rx_skbuff[i]);
658 lp->rx_skbuff[i] = NULL;
1079 skb = lp->rx_skbuff[x];
1089 skb = lp->rx_skbuff[x];
1232 skb = lp->rx_skbuff[entry];
1238 lp->rx_skbuff[entry] = newskb;
1259 (unsigned char *)(lp->rx_skbuff[entry]->data),
2039 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
2041 if (!lp->rx_skbuff)
2054 kfree(lp->rx_skbuff);
2055 lp->rx_skbuff = NULL;
2357 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
2358 if (rx_skbuff == NULL) {
2359 lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
2360 rx_skbuff = lp->rx_skbuff[i];
2361 if (!rx_skbuff) {
2367 skb_reserve(rx_skbuff, NET_IP_ALIGN);
2373 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,