Lines Matching defs:hmp
273 the 'hmp->tx_full' flag.
277 empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
584 struct hamachi_private *hmp;
642 hmp = netdev_priv(dev);
643 spin_lock_init(&hmp->lock);
645 hmp->mii_if.dev = dev;
646 hmp->mii_if.mdio_read = mdio_read;
647 hmp->mii_if.mdio_write = mdio_write;
648 hmp->mii_if.phy_id_mask = 0x1f;
649 hmp->mii_if.reg_num_mask = 0x1f;
655 hmp->tx_ring = ring_space;
656 hmp->tx_ring_dma = ring_dma;
662 hmp->rx_ring = ring_space;
663 hmp->rx_ring_dma = ring_dma;
690 hmp->base = ioaddr;
693 hmp->chip_id = chip_id;
694 hmp->pci_dev = pdev;
698 hmp->option = option;
700 hmp->mii_if.full_duplex = 1;
702 hmp->mii_if.full_duplex = 0;
703 hmp->default_port = option & 15;
704 if (hmp->default_port)
705 hmp->mii_if.force_media = 1;
708 hmp->mii_if.full_duplex = 1;
711 if (hmp->mii_if.full_duplex || (option & 0x080))
712 hmp->duplex_lock = 1;
724 hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var :
726 hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var :
732 dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
754 if (chip_tbl[hmp->chip_id].flags & CanHaveMII) {
760 hmp->phys[phy_idx++] = phy;
761 hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
764 dev->name, phy, mii_status, hmp->mii_if.advertising);
767 hmp->mii_cnt = phy_idx;
768 if (hmp->mii_cnt > 0)
769 hmp->mii_if.phy_id = hmp->phys[0];
771 memset(&hmp->mii_if, 0, sizeof(hmp->mii_if));
782 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, hmp->rx_ring,
783 hmp->rx_ring_dma);
785 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, hmp->tx_ring,
786 hmp->tx_ring_dma);
819 struct hamachi_private *hmp = netdev_priv(dev);
820 void __iomem *ioaddr = hmp->base;
837 struct hamachi_private *hmp = netdev_priv(dev);
838 void __iomem *ioaddr = hmp->base;
857 struct hamachi_private *hmp = netdev_priv(dev);
858 void __iomem *ioaddr = hmp->base;
863 i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
872 writel(hmp->rx_ring_dma, ioaddr + RxPtr);
873 writel(hmp->rx_ring_dma >> 32, ioaddr + RxPtr + 4);
874 writel(hmp->tx_ring_dma, ioaddr + TxPtr);
875 writel(hmp->tx_ring_dma >> 32, ioaddr + TxPtr + 4);
877 writel(hmp->rx_ring_dma, ioaddr + RxPtr);
878 writel(hmp->tx_ring_dma, ioaddr + TxPtr);
918 dev->if_port = hmp->default_port;
923 if (hmp->duplex_lock != 1)
924 hmp->mii_if.full_duplex = 1;
948 rx_int_var = hmp->rx_int_var;
949 tx_int_var = hmp->tx_int_var;
988 timer_setup(&hmp->timer, hamachi_timer, 0);
989 hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
990 add_timer(&hmp->timer);
997 struct hamachi_private *hmp = netdev_priv(dev);
1001 for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
1002 int entry = hmp->dirty_tx % TX_RING_SIZE;
1005 if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
1008 skb = hmp->tx_skbuff[entry];
1010 dma_unmap_single(&hmp->pci_dev->dev,
1011 leXX_to_cpu(hmp->tx_ring[entry].addr),
1014 hmp->tx_skbuff[entry] = NULL;
1016 hmp->tx_ring[entry].status_n_length = 0;
1018 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
1028 struct hamachi_private *hmp = from_timer(hmp, t, timer);
1029 struct net_device *dev = hmp->mii_if.dev;
1030 void __iomem *ioaddr = hmp->base;
1047 hmp->timer.expires = RUN_AT(next_tick);
1048 add_timer(&hmp->timer);
1054 struct hamachi_private *hmp = netdev_priv(dev);
1055 void __iomem *ioaddr = hmp->base;
1061 printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
1064 le32_to_cpu(hmp->rx_ring[i].status_n_length));
1066 printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
1069 le32_to_cpu(hmp->tx_ring[i].status_n_length));
1085 hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn);
1094 hmp->tx_ring[i].status_n_length =
1096 (hmp->tx_ring[i].status_n_length &
1099 hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff);
1100 skb = hmp->tx_skbuff[i];
1102 dma_unmap_single(&hmp->pci_dev->dev,
1103 leXX_to_cpu(hmp->tx_ring[i].addr),
1106 hmp->tx_skbuff[i] = NULL;
1115 hmp->tx_full = 0;
1116 hmp->cur_rx = hmp->cur_tx = 0;
1117 hmp->dirty_rx = hmp->dirty_tx = 0;
1122 struct sk_buff *skb = hmp->rx_skbuff[i];
1125 dma_unmap_single(&hmp->pci_dev->dev,
1126 leXX_to_cpu(hmp->rx_ring[i].addr),
1127 hmp->rx_buf_sz, DMA_FROM_DEVICE);
1129 hmp->rx_skbuff[i] = NULL;
1136 skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
1137 hmp->rx_skbuff[i] = skb;
1141 hmp->rx_ring[i].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
1143 hmp->rx_buf_sz,
1145 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1146 DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
1148 hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1150 hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1168 struct hamachi_private *hmp = netdev_priv(dev);
1171 hmp->tx_full = 0;
1172 hmp->cur_rx = hmp->cur_tx = 0;
1173 hmp->dirty_rx = hmp->dirty_tx = 0;
1180 hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
1185 hmp->rx_ring[i].status_n_length = 0;
1186 hmp->rx_skbuff[i] = NULL;
1190 struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
1191 hmp->rx_skbuff[i] = skb;
1195 hmp->rx_ring[i].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
1197 hmp->rx_buf_sz,
1200 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1201 DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
1203 hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1204 hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1207 hmp->tx_skbuff[i] = NULL;
1208 hmp->tx_ring[i].status_n_length = 0;
1211 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1218 struct hamachi_private *hmp = netdev_priv(dev);
1227 if (hmp->tx_full) {
1229 printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx);
1233 status=readw(hmp->base + TxStatus);
1235 writew(0x0001, hmp->base + TxCmd);
1243 entry = hmp->cur_tx % TX_RING_SIZE;
1245 hmp->tx_skbuff[entry] = skb;
1247 hmp->tx_ring[entry].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
1262 hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
1265 hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn |
1267 hmp->cur_tx++;
1273 status=readw(hmp->base + TxStatus);
1275 writew(0x0001, hmp->base + TxCmd);
1286 if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4))
1289 hmp->tx_full = 1;
1295 dev->name, hmp->cur_tx, entry);
1305 struct hamachi_private *hmp = netdev_priv(dev);
1306 void __iomem *ioaddr = hmp->base;
1317 spin_lock(&hmp->lock);
1339 if (hmp->tx_full){
1340 for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){
1341 int entry = hmp->dirty_tx % TX_RING_SIZE;
1344 if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn))
1346 skb = hmp->tx_skbuff[entry];
1349 dma_unmap_single(&hmp->pci_dev->dev,
1350 leXX_to_cpu(hmp->tx_ring[entry].addr),
1354 hmp->tx_skbuff[entry] = NULL;
1356 hmp->tx_ring[entry].status_n_length = 0;
1358 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |=
1362 if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){
1364 hmp->tx_full = 0;
1402 spin_unlock(&hmp->lock);
1410 struct hamachi_private *hmp = netdev_priv(dev);
1411 int entry = hmp->cur_rx % RX_RING_SIZE;
1412 int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx;
1416 entry, hmp->rx_ring[entry].status_n_length);
1421 struct hamachi_desc *desc = &(hmp->rx_ring[entry]);
1429 dma_sync_single_for_cpu(&hmp->pci_dev->dev,
1431 hmp->rx_buf_sz, DMA_FROM_DEVICE);
1432 buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
1442 dev->name, hmp->cur_rx, data_size, desc_status);
1444 dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
1447 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000,
1448 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff,
1449 le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length));
1498 dma_sync_single_for_cpu(&hmp->pci_dev->dev,
1499 leXX_to_cpu(hmp->rx_ring[entry].addr),
1500 hmp->rx_buf_sz,
1505 hmp->rx_skbuff[entry]->data, pkt_len);
1508 skb_put_data(skb, hmp->rx_ring_dma
1511 dma_sync_single_for_device(&hmp->pci_dev->dev,
1512 leXX_to_cpu(hmp->rx_ring[entry].addr),
1513 hmp->rx_buf_sz,
1516 dma_unmap_single(&hmp->pci_dev->dev,
1517 leXX_to_cpu(hmp->rx_ring[entry].addr),
1518 hmp->rx_buf_sz,
1520 skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
1521 hmp->rx_skbuff[entry] = NULL;
1586 entry = (++hmp->cur_rx) % RX_RING_SIZE;
1590 for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
1593 entry = hmp->dirty_rx % RX_RING_SIZE;
1594 desc = &(hmp->rx_ring[entry]);
1595 if (hmp->rx_skbuff[entry] == NULL) {
1596 struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2);
1598 hmp->rx_skbuff[entry] = skb;
1602 desc->addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
1604 hmp->rx_buf_sz,
1607 desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
1618 if (readw(hmp->base + RxStatus) & 0x0002)
1619 writew(0x0001, hmp->base + RxCmd);
1628 struct hamachi_private *hmp = netdev_priv(dev);
1629 void __iomem *ioaddr = hmp->base;
1662 struct hamachi_private *hmp = netdev_priv(dev);
1663 void __iomem *ioaddr = hmp->base;
1674 dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx);
1687 (int)hmp->tx_ring_dma);
1690 readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
1691 i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
1693 (int)hmp->rx_ring_dma);
1696 readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
1697 i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
1699 if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
1701 hmp->rx_skbuff[i]->data;
1713 free_irq(hmp->pci_dev->irq, dev);
1715 del_timer_sync(&hmp->timer);
1719 skb = hmp->rx_skbuff[i];
1720 hmp->rx_ring[i].status_n_length = 0;
1722 dma_unmap_single(&hmp->pci_dev->dev,
1723 leXX_to_cpu(hmp->rx_ring[i].addr),
1724 hmp->rx_buf_sz, DMA_FROM_DEVICE);
1726 hmp->rx_skbuff[i] = NULL;
1728 hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */
1731 skb = hmp->tx_skbuff[i];
1733 dma_unmap_single(&hmp->pci_dev->dev,
1734 leXX_to_cpu(hmp->tx_ring[i].addr),
1737 hmp->tx_skbuff[i] = NULL;
1748 struct hamachi_private *hmp = netdev_priv(dev);
1749 void __iomem *ioaddr = hmp->base;
1784 struct hamachi_private *hmp = netdev_priv(dev);
1785 void __iomem *ioaddr = hmp->base;
1925 struct hamachi_private *hmp = netdev_priv(dev);
1927 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, hmp->rx_ring,
1928 hmp->rx_ring_dma);
1929 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, hmp->tx_ring,
1930 hmp->tx_ring_dma);
1932 iounmap(hmp->base);