Lines Matching refs:yp

183 the 'yp->tx_full' flag.
187 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
571 struct yellowfin_private *yp = netdev_priv(dev);
572 const int irq = yp->pci_dev->irq;
573 void __iomem *ioaddr = yp->base;
587 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
588 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
609 yp->tx_threshold = 32;
610 iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
613 dev->if_port = yp->default_port;
618 if (yp->drv_flags & IsGigabit) {
620 yp->full_duplex = 1;
625 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
640 timer_setup(&yp->timer, yellowfin_timer, 0);
641 yp->timer.expires = jiffies + 3*HZ;
642 add_timer(&yp->timer);
653 struct yellowfin_private *yp = from_timer(yp, t, timer);
654 struct net_device *dev = pci_get_drvdata(yp->pci_dev);
655 void __iomem *ioaddr = yp->base;
663 if (yp->mii_cnt) {
664 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
665 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
666 int negotiated = lpa & yp->advertising;
669 yp->phys[0], bmsr, lpa);
671 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
673 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
681 yp->timer.expires = jiffies + next_tick;
682 add_timer(&yp->timer);
687 struct yellowfin_private *yp = netdev_priv(dev);
688 void __iomem *ioaddr = yp->base;
691 yp->cur_tx, yp->dirty_tx,
698 pr_warn(" Rx ring %p: ", yp->rx_ring);
700 pr_cont(" %08x", yp->rx_ring[i].result_status);
702 pr_warn(" Tx ring %p: ", yp->tx_ring);
705 yp->tx_status[i].tx_errs,
706 yp->tx_ring[i].result_status);
715 iowrite32(0x10001000, yp->base + TxCtrl);
716 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
726 struct yellowfin_private *yp = netdev_priv(dev);
729 yp->tx_full = 0;
730 yp->cur_rx = yp->cur_tx = 0;
731 yp->dirty_tx = 0;
733 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
736 yp->rx_ring[i].dbdma_cmd =
737 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
738 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
743 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
744 yp->rx_skbuff[i] = skb;
748 yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
750 yp->rx_buf_sz,
755 dev_kfree_skb(yp->rx_skbuff[j]);
758 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
759 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
765 yp->tx_skbuff[i] = NULL;
766 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
767 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
771 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
777 yp->tx_skbuff[i] = 0;
779 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
780 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
783 if (yp->flags & FullTxStatus) {
784 yp->tx_ring[j].dbdma_cmd =
785 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
786 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
787 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
791 yp->tx_ring[j].dbdma_cmd =
793 yp->tx_ring[j].request_cnt = 2;
795 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
797 &(yp->tx_status[0].tx_errs) -
798 &(yp->tx_status[0]));
800 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
804 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
807 yp->tx_tail_desc = &yp->tx_status[0];
814 struct yellowfin_private *yp = netdev_priv(dev);
824 entry = yp->cur_tx % TX_RING_SIZE;
832 yp->tx_skbuff[entry] = NULL;
838 yp->tx_skbuff[entry] = skb;
841 yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
844 yp->tx_ring[entry].result_status = 0;
847 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
848 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
851 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
852 yp->tx_ring[entry].dbdma_cmd =
855 yp->cur_tx++;
857 yp->tx_ring[entry<<1].request_cnt = len;
858 yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
864 yp->cur_tx++;
866 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
867 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
871 yp->tx_ring[entry<<1].dbdma_cmd =
879 iowrite32(0x10001000, yp->base + TxCtrl);
881 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
884 yp->tx_full = 1;
888 yp->cur_tx, entry);
898 struct yellowfin_private *yp;
903 yp = netdev_priv(dev);
904 ioaddr = yp->base;
906 spin_lock (&yp->lock);
925 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
926 int entry = yp->dirty_tx % TX_RING_SIZE;
929 if (yp->tx_ring[entry].result_status == 0)
931 skb = yp->tx_skbuff[entry];
935 dma_unmap_single(&yp->pci_dev->dev,
936 le32_to_cpu(yp->tx_ring[entry].addr),
939 yp->tx_skbuff[entry] = NULL;
941 if (yp->tx_full &&
942 yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
944 yp->tx_full = 0;
948 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
949 unsigned dirty_tx = yp->dirty_tx;
951 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
955 u16 tx_errs = yp->tx_status[entry].tx_errs;
962 yp->tx_status[entry].tx_cnt,
963 yp->tx_status[entry].tx_errs,
964 yp->tx_status[entry].total_tx_cnt,
965 yp->tx_status[entry].paused);
969 skb = yp->tx_skbuff[entry];
993 dma_unmap_single(&yp->pci_dev->dev,
994 yp->tx_ring[entry << 1].addr,
997 yp->tx_skbuff[entry] = 0;
999 yp->tx_status[entry].tx_errs = 0;
1003 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1005 dirty_tx, yp->cur_tx, yp->tx_full);
1010 if (yp->tx_full &&
1011 yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1013 yp->tx_full = 0;
1017 yp->dirty_tx = dirty_tx;
1018 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1037 spin_unlock (&yp->lock);
1045 struct yellowfin_private *yp = netdev_priv(dev);
1046 int entry = yp->cur_rx % RX_RING_SIZE;
1047 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1051 entry, yp->rx_ring[entry].result_status);
1053 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1054 yp->rx_ring[entry].result_status);
1059 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1060 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1068 dma_sync_single_for_cpu(&yp->pci_dev->dev,
1070 yp->rx_buf_sz, DMA_FROM_DEVICE);
1089 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1099 } else if ( !(yp->drv_flags & IsGigabit) &&
1109 } else if ((yp->flags & HasMACAddrBug) &&
1110 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1113 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1123 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1135 dma_unmap_single(&yp->pci_dev->dev,
1136 le32_to_cpu(yp->rx_ring[entry].addr),
1137 yp->rx_buf_sz,
1139 yp->rx_skbuff[entry] = NULL;
1147 dma_sync_single_for_device(&yp->pci_dev->dev,
1149 yp->rx_buf_sz,
1157 entry = (++yp->cur_rx) % RX_RING_SIZE;
1161 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1162 entry = yp->dirty_rx % RX_RING_SIZE;
1163 if (yp->rx_skbuff[entry] == NULL) {
1164 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1167 yp->rx_skbuff[entry] = skb;
1169 yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
1171 yp->rx_buf_sz,
1174 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1175 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1177 yp->rx_ring[entry - 1].dbdma_cmd =
1178 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1180 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1182 | yp->rx_buf_sz);
1200 struct yellowfin_private *yp = netdev_priv(dev);
1201 void __iomem *ioaddr = yp->base;
1212 yp->cur_tx, yp->dirty_tx,
1213 yp->cur_rx, yp->dirty_rx);
1223 del_timer(&yp->timer);
1228 (unsigned long long)yp->tx_ring_dma);
1231 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1232 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1233 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1234 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1237 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1238 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1241 (unsigned long long)yp->rx_ring_dma);
1244 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1245 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1246 yp->rx_ring[i].result_status);
1248 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1254 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1262 free_irq(yp->pci_dev->irq, dev);
1266 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1267 yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1268 if (yp->rx_skbuff[i]) {
1269 dev_kfree_skb(yp->rx_skbuff[i]);
1271 yp->rx_skbuff[i] = NULL;
1274 dev_kfree_skb(yp->tx_skbuff[i]);
1275 yp->tx_skbuff[i] = NULL;
1292 struct yellowfin_private *yp = netdev_priv(dev);
1293 void __iomem *ioaddr = yp->base;
1315 if (yp->drv_flags & HasMulticastBug) {