Lines Matching refs:vp
584 /* Chip features we care about in vp->capabilities, read from the EEPROM. */
660 static void window_set(struct vortex_private *vp, int window)
662 if (window != vp->window) {
663 iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
664 vp->window = window;
670 window_read ## size(struct vortex_private *vp, int window, int addr) \
674 spin_lock_irqsave(&vp->window_lock, flags); \
675 window_set(vp, window); \
676 ret = ioread ## size(vp->ioaddr + addr); \
677 spin_unlock_irqrestore(&vp->window_lock, flags); \
681 window_write ## size(struct vortex_private *vp, u ## size value, \
685 spin_lock_irqsave(&vp->window_lock, flags); \
686 window_set(vp, window); \
687 iowrite ## size(value, vp->ioaddr + addr); \
688 spin_unlock_irqrestore(&vp->window_lock, flags); \
700 #define VORTEX_PCI(vp) \
701 ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
709 #define VORTEX_EISA(vp) \
710 ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
758 static void mdio_sync(struct vortex_private *vp, int bits);
760 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
930 struct vortex_private *vp;
941 vp = netdev_priv(dev);
942 ioaddr = vp->ioaddr;
1091 struct vortex_private *vp;
1118 dev = alloc_etherdev(sizeof(*vp));
1124 vp = netdev_priv(dev);
1147 vp->enable_wol = 1;
1163 vp->ioaddr = ioaddr;
1164 vp->large_frames = mtu > 1500;
1165 vp->drv_flags = vci->drv_flags;
1166 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1167 vp->io_size = vci->io_size;
1168 vp->card_idx = card_idx;
1169 vp->window = -1;
1199 spin_lock_init(&vp->lock);
1200 spin_lock_init(&vp->mii_lock);
1201 spin_lock_init(&vp->window_lock);
1202 vp->gendev = gendev;
1203 vp->mii.dev = dev;
1204 vp->mii.mdio_read = mdio_read;
1205 vp->mii.mdio_write = mdio_write;
1206 vp->mii.phy_id_mask = 0x1f;
1207 vp->mii.reg_num_mask = 0x1f;
1210 vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1212 &vp->rx_ring_dma, GFP_KERNEL);
1214 if (!vp->rx_ring)
1217 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1218 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1227 vp->media_override = 7;
1229 vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
1230 if (vp->media_override != 7)
1231 vp->medialock = 1;
1232 vp->full_duplex = (option & 0x200) ? 1 : 0;
1233 vp->bus_master = (option & 16) ? 1 : 0;
1237 vp->full_duplex = 1;
1239 vp->enable_wol = 1;
1243 vp->full_duplex = 1;
1245 vp->flow_ctrl = 1;
1247 vp->enable_wol = 1;
1250 vp->mii.force_media = vp->full_duplex;
1251 vp->options = option;
1265 window_write16(vp, base + i, 0, Wn0EepromCmd);
1269 if ((window_read16(vp, 0, Wn0EepromCmd) &
1273 eeprom[i] = window_read16(vp, 0, Wn0EepromData);
1298 window_write8(vp, dev->dev_addr[i], 2, i);
1307 step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
1318 vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1319 if (!vp->cb_fn_base) {
1328 vp->cb_fn_base);
1331 n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1332 if (vp->drv_flags & INVERT_LED_PWR)
1334 if (vp->drv_flags & INVERT_MII_PWR)
1336 window_write16(vp, n, 2, Wn2_ResetOptions);
1337 if (vp->drv_flags & WNO_XCVR_PWR) {
1338 window_write16(vp, 0x0800, 0, 0);
1343 vp->info1 = eeprom[13];
1344 vp->info2 = eeprom[15];
1345 vp->capabilities = eeprom[16];
1347 if (vp->info1 & 0x8000) {
1348 vp->full_duplex = 1;
1356 vp->available_media = window_read16(vp, 3, Wn3_Options);
1357 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1358 vp->available_media = 0x40;
1359 config = window_read32(vp, 3, Wn3_Config);
1362 config, window_read16(vp, 3, Wn3_Options));
1371 vp->default_media = XCVR(config);
1372 if (vp->default_media == XCVR_NWAY)
1373 vp->has_nway = 1;
1374 vp->autoselect = AUTOSELECT(config);
1377 if (vp->media_override != 7) {
1379 print_name, vp->media_override,
1380 media_tbl[vp->media_override].name);
1381 dev->if_port = vp->media_override;
1383 dev->if_port = vp->default_media;
1385 if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1389 if (vp->drv_flags & EXTRA_PREAMBLE)
1391 mdio_sync(vp, 32);
1408 vp->phys[phy_idx++] = phyx;
1420 vp->phys[0] = 24;
1422 vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
1423 if (vp->full_duplex) {
1425 vp->advertising &= ~0x02A0;
1426 mdio_write(dev, vp->phys[0], 4, vp->advertising);
1429 vp->mii.phy_id = vp->phys[0];
1432 if (vp->capabilities & CapBusMaster) {
1433 vp->full_bus_master_tx = 1;
1436 (vp->info2 & 1) ? "early" : "whole-frame" );
1438 vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1439 vp->bus_master = 0; /* AKPM: vortex only */
1443 if (vp->full_bus_master_tx) {
1447 ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1465 vp->pm_state_valid = 1;
1477 vp->rx_ring, vp->rx_ring_dma);
1488 struct vortex_private *vp = netdev_priv(dev);
1489 void __iomem *ioaddr = vp->ioaddr;
1515 struct vortex_private *vp = netdev_priv(dev);
1518 dev->name, (vp->full_duplex) ? "full" : "half");
1521 window_write16(vp,
1522 ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1523 (vp->large_frames ? 0x40 : 0) |
1524 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1531 struct vortex_private *vp = netdev_priv(dev);
1537 if (mii_check_media(&vp->mii, ok_to_print, init)) {
1538 vp->full_duplex = vp->mii.full_duplex;
1548 struct vortex_private *vp = netdev_priv(dev);
1549 void __iomem *ioaddr = vp->ioaddr;
1553 if (VORTEX_PCI(vp)) {
1554 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
1555 if (vp->pm_state_valid)
1556 pci_restore_state(VORTEX_PCI(vp));
1557 err = pci_enable_device(VORTEX_PCI(vp));
1565 config = window_read32(vp, 3, Wn3_Config);
1567 if (vp->media_override != 7) {
1569 dev->name, vp->media_override,
1570 media_tbl[vp->media_override].name);
1571 dev->if_port = vp->media_override;
1572 } else if (vp->autoselect) {
1573 if (vp->has_nway) {
1581 while (! (vp->available_media & media_tbl[dev->if_port].mask))
1588 dev->if_port = vp->default_media;
1594 timer_setup(&vp->timer, vortex_timer, 0);
1595 mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1601 vp->full_duplex = vp->mii.force_media;
1605 window_write32(vp, config, 3, Wn3_Config);
1608 mdio_read(dev, vp->phys[0], MII_BMSR);
1609 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1610 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1611 vp->mii.full_duplex = vp->full_duplex;
1629 dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
1634 window_write8(vp, dev->dev_addr[i], 2, i);
1636 window_write16(vp, 0, 2, i);
1638 if (vp->cb_fn_base) {
1639 unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1640 if (vp->drv_flags & INVERT_LED_PWR)
1642 if (vp->drv_flags & INVERT_MII_PWR)
1644 window_write16(vp, n, 2, Wn2_ResetOptions);
1651 window_write16(vp,
1652 (window_read16(vp, 4, Wn4_Media) &
1661 window_read8(vp, 6, i);
1662 window_read16(vp, 6, 10);
1663 window_read16(vp, 6, 12);
1665 window_read8(vp, 4, 12);
1667 window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1669 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1670 vp->cur_rx = 0;
1674 iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1676 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
1677 vp->cur_tx = vp->dirty_tx = 0;
1678 if (vp->drv_flags & IS_BOOMERANG)
1682 vp->rx_ring[i].status = 0;
1684 vp->tx_skbuff[i] = NULL;
1696 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1697 (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1698 (vp->full_bus_master_rx ? UpComplete : RxComplete) |
1699 (vp->bus_master ? DMADone : 0);
1700 vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1701 (vp->full_bus_master_rx ? 0 : RxComplete) |
1703 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1704 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1708 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1709 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
1710 iowrite32(0x8000, vp->cb_fn_base + 4);
1720 struct vortex_private *vp = netdev_priv(dev);
1731 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1736 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1737 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1738 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1742 vp->rx_skbuff[i] = skb;
1747 dma = dma_map_single(vp->gendev, skb->data,
1749 if (dma_mapping_error(vp->gendev, dma))
1751 vp->rx_ring[i].addr = cpu_to_le32(dma);
1759 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1768 if (vp->rx_skbuff[i]) {
1769 dev_kfree_skb(vp->rx_skbuff[i]);
1770 vp->rx_skbuff[i] = NULL;
1784 struct vortex_private *vp = from_timer(vp, t, timer);
1785 struct net_device *dev = vp->mii.dev;
1786 void __iomem *ioaddr = vp->ioaddr;
1797 media_status = window_read16(vp, 4, Wn4_Media);
1830 if (vp->medialock)
1836 spin_lock_irq(&vp->lock);
1840 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1842 dev->if_port = vp->default_media;
1852 window_write16(vp,
1857 config = window_read32(vp, 3, Wn3_Config);
1859 window_write32(vp, config, 3, Wn3_Config);
1867 spin_unlock_irq(&vp->lock);
1875 mod_timer(&vp->timer, RUN_AT(next_tick));
1876 if (vp->deferred)
1882 struct vortex_private *vp = netdev_priv(dev);
1883 void __iomem *ioaddr = vp->ioaddr;
1889 window_read16(vp, 4, Wn4_NetDiag),
1890 window_read16(vp, 4, Wn4_Media),
1892 window_read16(vp, 4, Wn4_FIFODiag));
1910 if (vp->full_bus_master_tx) {
1912 if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
1913 iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1915 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
1919 if (vp->drv_flags & IS_BOOMERANG)
1939 struct vortex_private *vp = netdev_priv(dev);
1940 void __iomem *ioaddr = vp->ioaddr;
1963 if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
1967 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
1990 (window_read16(vp, 5, 10) & ~StatsFull),
1992 vp->intr_enable &= ~StatsFull;
1997 iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1998 iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
2002 fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
2006 if (vp->full_bus_master_tx) {
2035 if (!vp->full_bus_master_tx)
2043 struct vortex_private *vp = netdev_priv(dev);
2044 void __iomem *ioaddr = vp->ioaddr;
2049 if (vp->bus_master) {
2052 vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
2054 if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
2060 spin_lock_irq(&vp->window_lock);
2061 window_set(vp, 7);
2062 iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
2064 spin_unlock_irq(&vp->window_lock);
2065 vp->tx_skb = skb;
2111 struct vortex_private *vp = netdev_priv(dev);
2112 void __iomem *ioaddr = vp->ioaddr;
2114 int entry = vp->cur_tx % TX_RING_SIZE;
2116 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2123 dev->name, vp->cur_tx);
2132 if (vp->handling_irq)
2135 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2143 vp->tx_skbuff[entry] = skb;
2145 vp->tx_ring[entry].next = 0;
2148 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2150 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2153 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
2155 if (dma_mapping_error(vp->gendev, dma_addr))
2158 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2159 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2163 dma_addr = dma_map_single(vp->gendev, skb->data,
2165 if (dma_mapping_error(vp->gendev, dma_addr))
2168 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2169 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2174 dma_addr = skb_frag_dma_map(vp->gendev, frag,
2178 if (dma_mapping_error(vp->gendev, dma_addr)) {
2180 dma_unmap_page(vp->gendev,
2181 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2182 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2185 dma_unmap_single(vp->gendev,
2186 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2187 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2193 vp->tx_ring[entry].frag[i+1].addr =
2197 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
2199 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
2203 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
2204 if (dma_mapping_error(vp->gendev, dma_addr))
2206 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2207 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2208 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2211 spin_lock_irqsave(&vp->lock, flags);
2214 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2216 iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2217 vp->queued_packet++;
2220 vp->cur_tx++;
2223 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2235 spin_unlock_irqrestore(&vp->lock, flags);
2239 dev_err(vp->gendev, "Error mapping dma buffer\n");
2254 struct vortex_private *vp = netdev_priv(dev);
2261 ioaddr = vp->ioaddr;
2273 status |= vp->deferred;
2274 vp->deferred = 0;
2284 spin_lock(&vp->window_lock);
2285 window_set(vp, 7);
2305 dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
2307 bytes_compl += vp->tx_skb->len;
2308 dev_consume_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2328 spin_unlock(&vp->window_lock);
2330 spin_lock(&vp->window_lock);
2331 window_set(vp, 7);
2339 vp->deferred |= status;
2340 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2342 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2345 mod_timer(&vp->timer, jiffies + 1*HZ);
2353 spin_unlock(&vp->window_lock);
2370 struct vortex_private *vp = netdev_priv(dev);
2377 ioaddr = vp->ioaddr;
2379 vp->handling_irq = 1;
2397 status |= vp->deferred;
2398 vp->deferred = 0;
2416 unsigned int dirty_tx = vp->dirty_tx;
2419 while (vp->cur_tx - dirty_tx > 0) {
2423 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2426 if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2430 if (vp->tx_skbuff[entry]) {
2431 struct sk_buff *skb = vp->tx_skbuff[entry];
2434 dma_unmap_single(vp->gendev,
2435 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2436 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2440 dma_unmap_page(vp->gendev,
2441 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2442 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2445 dma_unmap_single(vp->gendev,
2446 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
2451 vp->tx_skbuff[entry] = NULL;
2458 vp->dirty_tx = dirty_tx;
2459 if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2475 vp->deferred |= status;
2476 iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2478 iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2481 mod_timer(&vp->timer, jiffies + 1*HZ);
2486 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
2487 iowrite32(0x8000, vp->cb_fn_base + 4);
2496 vp->handling_irq = 0;
2504 struct vortex_private *vp = netdev_priv(dev);
2508 spin_lock_irqsave(&vp->lock, flags);
2510 if (vp->full_bus_master_rx)
2515 spin_unlock_irqrestore(&vp->lock, flags);
2522 struct vortex_private *vp = netdev_priv(dev);
2523 void __iomem *ioaddr = vp->ioaddr;
2553 if (vp->bus_master &&
2555 dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
2562 dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
2591 struct vortex_private *vp = netdev_priv(dev);
2592 int entry = vp->cur_rx % RX_RING_SIZE;
2593 void __iomem *ioaddr = vp->ioaddr;
2600 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2618 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2629 dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2631 skb_put_data(skb, vp->rx_skbuff[entry]->data,
2633 dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2634 vp->rx_copy++;
2645 newdma = dma_map_single(vp->gendev, newskb->data,
2647 if (dma_mapping_error(vp->gendev, newdma)) {
2654 skb = vp->rx_skbuff[entry];
2655 vp->rx_skbuff[entry] = newskb;
2656 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2658 dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2659 vp->rx_nocopy++;
2668 vp->rx_csumhits++;
2676 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2678 entry = (++vp->cur_rx) % RX_RING_SIZE;
2686 struct vortex_private *vp = netdev_priv(dev);
2687 void __iomem *ioaddr = vp->ioaddr;
2692 del_timer_sync(&vp->timer);
2711 if (vp->full_bus_master_rx)
2713 if (vp->full_bus_master_tx)
2716 if (final_down && VORTEX_PCI(vp)) {
2717 vp->pm_state_valid = 1;
2718 pci_save_state(VORTEX_PCI(vp));
2726 struct vortex_private *vp = netdev_priv(dev);
2727 void __iomem *ioaddr = vp->ioaddr;
2738 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2742 if (vp->rx_csumhits &&
2743 (vp->drv_flags & HAS_HWCKSM) == 0 &&
2744 (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
2752 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2754 if (vp->rx_skbuff[i]) {
2755 dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
2757 dev_kfree_skb(vp->rx_skbuff[i]);
2758 vp->rx_skbuff[i] = NULL;
2761 if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2763 if (vp->tx_skbuff[i]) {
2764 struct sk_buff *skb = vp->tx_skbuff[i];
2769 dma_unmap_single(vp->gendev,
2770 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2771 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2774 dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
2777 vp->tx_skbuff[i] = NULL;
2789 struct vortex_private *vp = netdev_priv(dev);
2790 void __iomem *ioaddr = vp->ioaddr;
2792 if (vp->full_bus_master_tx) {
2797 vp->full_bus_master_tx,
2798 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2799 vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2802 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2808 length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
2810 length = le32_to_cpu(vp->tx_ring[i].length);
2813 i, &vp->tx_ring[i], length,
2814 le32_to_cpu(vp->tx_ring[i].status));
2824 struct vortex_private *vp = netdev_priv(dev);
2825 void __iomem *ioaddr = vp->ioaddr;
2829 spin_lock_irqsave (&vp->lock, flags);
2831 spin_unlock_irqrestore (&vp->lock, flags);
2845 struct vortex_private *vp = netdev_priv(dev);
2849 dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
2850 dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
2851 dev->stats.tx_window_errors += window_read8(vp, 6, 4);
2852 dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
2853 dev->stats.tx_packets += window_read8(vp, 6, 6);
2854 dev->stats.tx_packets += (window_read8(vp, 6, 9) &
2856 /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
2860 dev->stats.rx_bytes += window_read16(vp, 6, 10);
2861 dev->stats.tx_bytes += window_read16(vp, 6, 12);
2863 vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
2864 vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
2865 vp->xstats.tx_deferred += window_read8(vp, 6, 8);
2866 vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
2868 dev->stats.collisions = vp->xstats.tx_multiple_collisions
2869 + vp->xstats.tx_single_collisions
2870 + vp->xstats.tx_max_collisions;
2873 u8 up = window_read8(vp, 4, 13);
2881 struct vortex_private *vp = netdev_priv(dev);
2883 return mii_nway_restart(&vp->mii);
2889 struct vortex_private *vp = netdev_priv(dev);
2891 mii_ethtool_get_link_ksettings(&vp->mii, cmd);
2899 struct vortex_private *vp = netdev_priv(dev);
2901 return mii_ethtool_set_link_ksettings(&vp->mii, cmd);
2927 struct vortex_private *vp = netdev_priv(dev);
2928 void __iomem *ioaddr = vp->ioaddr;
2931 spin_lock_irqsave(&vp->lock, flags);
2933 spin_unlock_irqrestore(&vp->lock, flags);
2935 data[0] = vp->xstats.tx_deferred;
2936 data[1] = vp->xstats.tx_max_collisions;
2937 data[2] = vp->xstats.tx_multiple_collisions;
2938 data[3] = vp->xstats.tx_single_collisions;
2939 data[4] = vp->xstats.rx_bad_ssd;
2958 struct vortex_private *vp = netdev_priv(dev);
2961 if (VORTEX_PCI(vp)) {
2962 strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
2965 if (VORTEX_EISA(vp))
2966 strlcpy(info->bus_info, dev_name(vp->gendev),
2976 struct vortex_private *vp = netdev_priv(dev);
2978 if (!VORTEX_PCI(vp))
2984 if (vp->enable_wol)
2990 struct vortex_private *vp = netdev_priv(dev);
2992 if (!VORTEX_PCI(vp))
2999 vp->enable_wol = 1;
3001 vp->enable_wol = 0;
3030 struct vortex_private *vp = netdev_priv(dev);
3033 if(VORTEX_PCI(vp))
3034 state = VORTEX_PCI(vp)->current_state;
3039 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
3040 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
3042 pci_set_power_state(VORTEX_PCI(vp), state);
3054 struct vortex_private *vp = netdev_priv(dev);
3055 void __iomem *ioaddr = vp->ioaddr;
3080 struct vortex_private *vp = netdev_priv(dev);
3083 if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
3091 window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
3095 window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
3099 vp->large_frames = dev->mtu > 1500 || enable;
3101 mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
3102 if (vp->large_frames)
3106 window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
3126 static void mdio_delay(struct vortex_private *vp)
3128 window_read32(vp, 4, Wn4_PhysicalMgmt);
3140 static void mdio_sync(struct vortex_private *vp, int bits)
3144 window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
3145 mdio_delay(vp);
3146 window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
3148 mdio_delay(vp);
3155 struct vortex_private *vp = netdev_priv(dev);
3159 spin_lock_bh(&vp->mii_lock);
3162 mdio_sync(vp, 32);
3167 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3168 mdio_delay(vp);
3169 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3171 mdio_delay(vp);
3175 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3176 mdio_delay(vp);
3178 ((window_read16(vp, 4, Wn4_PhysicalMgmt) &
3180 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3182 mdio_delay(vp);
3185 spin_unlock_bh(&vp->mii_lock);
3192 struct vortex_private *vp = netdev_priv(dev);
3196 spin_lock_bh(&vp->mii_lock);
3199 mdio_sync(vp, 32);
3204 window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3205 mdio_delay(vp);
3206 window_write16(vp, dataval | MDIO_SHIFT_CLK,
3208 mdio_delay(vp);
3212 window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3213 mdio_delay(vp);
3214 window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3216 mdio_delay(vp);
3219 spin_unlock_bh(&vp->mii_lock);
3226 struct vortex_private *vp = netdev_priv(dev);
3227 void __iomem *ioaddr = vp->ioaddr;
3229 device_set_wakeup_enable(vp->gendev, vp->enable_wol);
3231 if (vp->enable_wol) {
3233 window_write16(vp, 2, 7, 0x0c);
3238 if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
3239 pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
3241 vp->enable_wol = 0;
3245 if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3249 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3257 struct vortex_private *vp;
3264 vp = netdev_priv(dev);
3266 if (vp->cb_fn_base)
3267 pci_iounmap(pdev, vp->cb_fn_base);
3272 if (vp->pm_state_valid)
3277 iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3278 vp->ioaddr + EL3_CMD);
3280 pci_iounmap(pdev, vp->ioaddr);
3285 vp->rx_ring, vp->rx_ring_dma);