Lines Matching refs:yp
183 the 'yp->tx_full' flag.
187 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
573 struct yellowfin_private *yp = netdev_priv(dev);
574 const int irq = yp->pci_dev->irq;
575 void __iomem *ioaddr = yp->base;
589 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
590 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
611 yp->tx_threshold = 32;
612 iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
615 dev->if_port = yp->default_port;
620 if (yp->drv_flags & IsGigabit) {
622 yp->full_duplex = 1;
627 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
642 timer_setup(&yp->timer, yellowfin_timer, 0);
643 yp->timer.expires = jiffies + 3*HZ;
644 add_timer(&yp->timer);
655 struct yellowfin_private *yp = from_timer(yp, t, timer);
656 struct net_device *dev = pci_get_drvdata(yp->pci_dev);
657 void __iomem *ioaddr = yp->base;
665 if (yp->mii_cnt) {
666 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
667 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
668 int negotiated = lpa & yp->advertising;
671 yp->phys[0], bmsr, lpa);
673 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
675 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
683 yp->timer.expires = jiffies + next_tick;
684 add_timer(&yp->timer);
689 struct yellowfin_private *yp = netdev_priv(dev);
690 void __iomem *ioaddr = yp->base;
693 yp->cur_tx, yp->dirty_tx,
700 pr_warn(" Rx ring %p: ", yp->rx_ring);
702 pr_cont(" %08x", yp->rx_ring[i].result_status);
704 pr_warn(" Tx ring %p: ", yp->tx_ring);
707 yp->tx_status[i].tx_errs,
708 yp->tx_ring[i].result_status);
717 iowrite32(0x10001000, yp->base + TxCtrl);
718 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
728 struct yellowfin_private *yp = netdev_priv(dev);
731 yp->tx_full = 0;
732 yp->cur_rx = yp->cur_tx = 0;
733 yp->dirty_tx = 0;
735 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
738 yp->rx_ring[i].dbdma_cmd =
739 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
740 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
745 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
746 yp->rx_skbuff[i] = skb;
750 yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
752 yp->rx_buf_sz,
757 dev_kfree_skb(yp->rx_skbuff[j]);
760 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
761 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
767 yp->tx_skbuff[i] = NULL;
768 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
769 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
773 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
779 yp->tx_skbuff[i] = 0;
781 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
782 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
785 if (yp->flags & FullTxStatus) {
786 yp->tx_ring[j].dbdma_cmd =
787 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
788 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
789 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
793 yp->tx_ring[j].dbdma_cmd =
795 yp->tx_ring[j].request_cnt = 2;
797 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
799 &(yp->tx_status[0].tx_errs) -
800 &(yp->tx_status[0]));
802 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
806 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
809 yp->tx_tail_desc = &yp->tx_status[0];
816 struct yellowfin_private *yp = netdev_priv(dev);
826 entry = yp->cur_tx % TX_RING_SIZE;
834 yp->tx_skbuff[entry] = NULL;
840 yp->tx_skbuff[entry] = skb;
843 yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
846 yp->tx_ring[entry].result_status = 0;
849 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
850 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
853 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
854 yp->tx_ring[entry].dbdma_cmd =
857 yp->cur_tx++;
859 yp->tx_ring[entry<<1].request_cnt = len;
860 yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
866 yp->cur_tx++;
868 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
869 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
873 yp->tx_ring[entry<<1].dbdma_cmd =
881 iowrite32(0x10001000, yp->base + TxCtrl);
883 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
886 yp->tx_full = 1;
890 yp->cur_tx, entry);
900 struct yellowfin_private *yp;
905 yp = netdev_priv(dev);
906 ioaddr = yp->base;
908 spin_lock (&yp->lock);
927 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
928 int entry = yp->dirty_tx % TX_RING_SIZE;
931 if (yp->tx_ring[entry].result_status == 0)
933 skb = yp->tx_skbuff[entry];
937 dma_unmap_single(&yp->pci_dev->dev,
938 le32_to_cpu(yp->tx_ring[entry].addr),
941 yp->tx_skbuff[entry] = NULL;
943 if (yp->tx_full &&
944 yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
946 yp->tx_full = 0;
950 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
951 unsigned dirty_tx = yp->dirty_tx;
953 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
957 u16 tx_errs = yp->tx_status[entry].tx_errs;
964 yp->tx_status[entry].tx_cnt,
965 yp->tx_status[entry].tx_errs,
966 yp->tx_status[entry].total_tx_cnt,
967 yp->tx_status[entry].paused);
971 skb = yp->tx_skbuff[entry];
995 dma_unmap_single(&yp->pci_dev->dev,
996 yp->tx_ring[entry << 1].addr,
999 yp->tx_skbuff[entry] = 0;
1001 yp->tx_status[entry].tx_errs = 0;
1005 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1007 dirty_tx, yp->cur_tx, yp->tx_full);
1012 if (yp->tx_full &&
1013 yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1015 yp->tx_full = 0;
1019 yp->dirty_tx = dirty_tx;
1020 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1039 spin_unlock (&yp->lock);
1047 struct yellowfin_private *yp = netdev_priv(dev);
1048 int entry = yp->cur_rx % RX_RING_SIZE;
1049 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1053 entry, yp->rx_ring[entry].result_status);
1055 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1056 yp->rx_ring[entry].result_status);
1061 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1062 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1070 dma_sync_single_for_cpu(&yp->pci_dev->dev,
1072 yp->rx_buf_sz, DMA_FROM_DEVICE);
1091 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1101 } else if ( !(yp->drv_flags & IsGigabit) &&
1111 } else if ((yp->flags & HasMACAddrBug) &&
1112 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1115 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1125 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1137 dma_unmap_single(&yp->pci_dev->dev,
1138 le32_to_cpu(yp->rx_ring[entry].addr),
1139 yp->rx_buf_sz,
1141 yp->rx_skbuff[entry] = NULL;
1149 dma_sync_single_for_device(&yp->pci_dev->dev,
1151 yp->rx_buf_sz,
1159 entry = (++yp->cur_rx) % RX_RING_SIZE;
1163 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1164 entry = yp->dirty_rx % RX_RING_SIZE;
1165 if (yp->rx_skbuff[entry] == NULL) {
1166 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1169 yp->rx_skbuff[entry] = skb;
1171 yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
1173 yp->rx_buf_sz,
1176 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1177 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1179 yp->rx_ring[entry - 1].dbdma_cmd =
1180 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1182 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1184 | yp->rx_buf_sz);
1202 struct yellowfin_private *yp = netdev_priv(dev);
1203 void __iomem *ioaddr = yp->base;
1214 yp->cur_tx, yp->dirty_tx,
1215 yp->cur_rx, yp->dirty_rx);
1225 del_timer(&yp->timer);
1230 (unsigned long long)yp->tx_ring_dma);
1233 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1234 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1235 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1236 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1239 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1240 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1243 (unsigned long long)yp->rx_ring_dma);
1246 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1247 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1248 yp->rx_ring[i].result_status);
1250 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1256 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1264 free_irq(yp->pci_dev->irq, dev);
1268 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1269 yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1270 if (yp->rx_skbuff[i]) {
1271 dev_kfree_skb(yp->rx_skbuff[i]);
1273 yp->rx_skbuff[i] = NULL;
1276 dev_kfree_skb(yp->tx_skbuff[i]);
1277 yp->tx_skbuff[i] = NULL;
1294 struct yellowfin_private *yp = netdev_priv(dev);
1295 void __iomem *ioaddr = yp->base;
1317 if (yp->drv_flags & HasMulticastBug) {