Lines Matching refs:entry
915 /* Mark the last entry as wrapping the ring. */
948 int entry, free_count;
958 /* Calculate the next Tx descriptor entry. */
961 entry = ep->cur_tx % TX_RING_SIZE;
963 ep->tx_skbuff[entry] = skb;
964 ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
974 /* Leave room for an additional entry. */
978 ep->tx_ring[entry].buflength = ctrl_word | skb->len;
979 ep->tx_ring[entry].txstatus =
993 skb->len, entry, ctrl_word, er32(TxSTAT));
1031 int entry = dirty_tx % TX_RING_SIZE;
1032 int txstatus = ep->tx_ring[entry].txstatus;
1040 dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1045 skb = ep->tx_skbuff[entry];
1047 ep->tx_ring[entry].bufaddr, skb->len,
1050 ep->tx_skbuff[entry] = NULL;
1142 int entry = ep->cur_rx % RX_RING_SIZE;
1147 netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1148 ep->rx_ring[entry].rxstatus);
1153 /* If we own the next entry, it's a new packet. Send it up. */
1154 while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1155 int status = ep->rx_ring[entry].rxstatus;
1190 ep->rx_ring[entry].bufaddr,
1193 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1196 ep->rx_ring[entry].bufaddr,
1201 ep->rx_ring[entry].bufaddr,
1204 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1205 ep->rx_skbuff[entry] = NULL;
1213 entry = (++ep->cur_rx) % RX_RING_SIZE;
1218 entry = ep->dirty_rx % RX_RING_SIZE;
1219 if (ep->rx_skbuff[entry] == NULL) {
1221 skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1225 ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
1232 ep->rx_ring[entry].rxstatus = DescOwn;