Lines Matching defs:vptr

86 static void velocity_set_power_state(struct velocity_info *vptr, char state)
88 void *addr = vptr->mac_regs;
90 if (vptr->pdev)
91 pci_set_power_state(vptr->pdev, state);
499 * @vptr: velocity to program
504 static void velocity_init_cam_filter(struct velocity_info *vptr)
506 struct mac_regs __iomem *regs = vptr->mac_regs;
514 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
515 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
516 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
517 mac_set_cam_mask(regs, vptr->mCAMmask);
520 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
522 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
526 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
532 struct velocity_info *vptr = netdev_priv(dev);
534 spin_lock_irq(&vptr->lock);
535 set_bit(vid, vptr->active_vlans);
536 velocity_init_cam_filter(vptr);
537 spin_unlock_irq(&vptr->lock);
544 struct velocity_info *vptr = netdev_priv(dev);
546 spin_lock_irq(&vptr->lock);
547 clear_bit(vid, vptr->active_vlans);
548 velocity_init_cam_filter(vptr);
549 spin_unlock_irq(&vptr->lock);
553 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
555 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
560 * @vptr: velocity we are resetting
565 static void velocity_rx_reset(struct velocity_info *vptr)
568 struct mac_regs __iomem *regs = vptr->mac_regs;
571 velocity_init_rx_ring_indexes(vptr);
576 for (i = 0; i < vptr->options.numrx; ++i)
577 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
579 writew(vptr->options.numrx, &regs->RBRDU);
580 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
582 writew(vptr->options.numrx - 1, &regs->RDCSize);
587 * @vptr: velocity adapter
593 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
597 switch (vptr->options.spd_dpx) {
617 vptr->mii_status = status;
789 * @vptr: velocity interface
794 static void set_mii_flow_control(struct velocity_info *vptr)
797 switch (vptr->options.flow_cntl) {
799 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
800 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
804 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
805 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
809 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
810 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
814 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
824 * @vptr: velocity
828 static void mii_set_auto_on(struct velocity_info *vptr)
830 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
831 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
833 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
872 * @vptr: velocity adapter
879 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
881 struct mac_regs __iomem *regs = vptr->mac_regs;
883 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
886 set_mii_flow_control(vptr);
888 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
889 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
895 netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
899 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
900 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
901 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
904 mii_set_auto_on(vptr);
930 netdev_info(vptr->netdev,
932 if (vptr->rev_id < REV_ID_VT3216_A0)
936 netdev_info(vptr->netdev,
939 if (vptr->rev_id < REV_ID_VT3216_A0)
943 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
949 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
956 /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
957 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
970 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
972 mii_set_auto_on(vptr);
973 /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
975 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
976 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
982 * @vptr: velocity to report on
988 static void velocity_print_link_status(struct velocity_info *vptr)
994 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
995 netdev_notice(vptr->netdev, "failed to detect cable link\n");
999 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1002 if (vptr->mii_status & VELOCITY_SPEED_1000)
1004 else if (vptr->mii_status & VELOCITY_SPEED_100)
1009 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1016 switch (vptr->options.spd_dpx) {
1043 netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
1049 * @vptr: veloity to configure
1054 static void enable_flow_control_ability(struct velocity_info *vptr)
1057 struct mac_regs __iomem *regs = vptr->mac_regs;
1059 switch (vptr->options.flow_cntl) {
1101 * @vptr: velocity to reset
1106 static int velocity_soft_reset(struct velocity_info *vptr)
1108 struct mac_regs __iomem *regs = vptr->mac_regs;
1138 struct velocity_info *vptr = netdev_priv(dev);
1139 struct mac_regs __iomem *regs = vptr->mac_regs;
1148 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1154 int offset = MCAM_SIZE - vptr->multicast_limit;
1155 mac_get_cam_mask(regs, vptr->mCAMmask);
1160 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1164 mac_set_cam_mask(regs, vptr->mCAMmask);
1180 * @vptr: velocity adapter
1185 static void mii_init(struct velocity_info *vptr, u32 mii_status)
1189 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1192 MII_ADVERTISE, vptr->mac_regs);
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1195 vptr->mac_regs);
1198 vptr->mac_regs);
1199 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1205 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1211 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1214 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1218 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1225 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1231 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1232 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1234 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1242 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1246 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1251 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1254 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1260 * @vptr: velocity adapter
1265 static void setup_queue_timers(struct velocity_info *vptr)
1268 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1272 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1274 txqueue_timer = vptr->options.txqueue_timer;
1275 rxqueue_timer = vptr->options.rxqueue_timer;
1278 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1279 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1285 * @vptr: velocity adapter
1290 static void setup_adaptive_interrupts(struct velocity_info *vptr)
1292 struct mac_regs __iomem *regs = vptr->mac_regs;
1293 u16 tx_intsup = vptr->options.tx_intsup;
1294 u16 rx_intsup = vptr->options.rx_intsup;
1297 vptr->int_mask = INT_MASK_DEF;
1302 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1311 vptr->int_mask &= ~ISR_PRXI;
1322 * @vptr: velocity to init
1328 static void velocity_init_registers(struct velocity_info *vptr,
1331 struct mac_regs __iomem *regs = vptr->mac_regs;
1332 struct net_device *netdev = vptr->netdev;
1346 velocity_rx_reset(vptr);
1350 mii_status = velocity_get_opt_media_mode(vptr);
1351 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1352 velocity_print_link_status(vptr);
1353 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1357 enable_flow_control_ability(vptr);
1371 velocity_soft_reset(vptr);
1374 if (!vptr->no_eeprom) {
1384 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1385 mac_set_dma_length(regs, vptr->options.DMA_length);
1396 velocity_init_cam_filter(vptr);
1408 setup_adaptive_interrupts(vptr);
1410 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1411 writew(vptr->options.numrx - 1, &regs->RDCSize);
1415 writew(vptr->options.numtx - 1, &regs->TDCSize);
1417 for (i = 0; i < vptr->tx.numq; i++) {
1418 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1422 init_flow_control_register(vptr);
1427 mii_status = velocity_get_opt_media_mode(vptr);
1430 mii_init(vptr, mii_status);
1432 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1433 velocity_print_link_status(vptr);
1434 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1438 enable_flow_control_ability(vptr);
1440 mac_write_int_mask(vptr->int_mask, regs);
1446 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1448 struct mac_regs __iomem *regs = vptr->mac_regs;
1455 if (vptr->rx.filled < 4)
1460 unusable = vptr->rx.filled & 0x0003;
1461 dirty = vptr->rx.dirty - unusable;
1462 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1463 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1464 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1467 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1468 vptr->rx.filled = unusable;
1473 * @vptr: Velocity to set up
1478 static int velocity_init_dma_rings(struct velocity_info *vptr)
1480 struct velocity_opt *opt = &vptr->options;
1493 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1496 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1497 vptr->netdev->name);
1501 vptr->rx.ring = pool;
1502 vptr->rx.pool_dma = pool_dma;
1507 for (i = 0; i < vptr->tx.numq; i++) {
1508 vptr->tx.rings[i] = pool;
1509 vptr->tx.pool_dma[i] = pool_dma;
1517 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1519 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1524 * @vptr: velocity
1532 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1534 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1535 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1537 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1547 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1548 vptr->rx.buf_sz, DMA_FROM_DEVICE);
1555 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1562 static int velocity_rx_refill(struct velocity_info *vptr)
1564 int dirty = vptr->rx.dirty, done = 0;
1567 struct rx_desc *rd = vptr->rx.ring + dirty;
1573 if (!vptr->rx.info[dirty].skb) {
1574 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1578 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1579 } while (dirty != vptr->rx.curr);
1582 vptr->rx.dirty = dirty;
1583 vptr->rx.filled += done;
1591 * @vptr: velocity to clean up
1596 static void velocity_free_rd_ring(struct velocity_info *vptr)
1600 if (vptr->rx.info == NULL)
1603 for (i = 0; i < vptr->options.numrx; i++) {
1604 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1605 struct rx_desc *rd = vptr->rx.ring + i;
1611 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1619 kfree(vptr->rx.info);
1620 vptr->rx.info = NULL;
1625 * @vptr: velocity to configure
1630 static int velocity_init_rd_ring(struct velocity_info *vptr)
1634 vptr->rx.info = kcalloc(vptr->options.numrx,
1636 if (!vptr->rx.info)
1639 velocity_init_rx_ring_indexes(vptr);
1641 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1642 netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
1643 velocity_free_rd_ring(vptr);
1654 * @vptr: velocity
1660 static int velocity_init_td_ring(struct velocity_info *vptr)
1665 for (j = 0; j < vptr->tx.numq; j++) {
1667 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1670 if (!vptr->tx.infos[j]) {
1672 kfree(vptr->tx.infos[j]);
1676 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1683 * @vptr: Velocity to free from
1687 static void velocity_free_dma_rings(struct velocity_info *vptr)
1689 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1690 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1692 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1695 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1699 velocity_set_rxbufsize(vptr, mtu);
1701 ret = velocity_init_dma_rings(vptr);
1705 ret = velocity_init_rd_ring(vptr);
1709 ret = velocity_init_td_ring(vptr);
1716 velocity_free_rd_ring(vptr);
1718 velocity_free_dma_rings(vptr);
1724 * @vptr: velocity
1731 static void velocity_free_tx_buf(struct velocity_info *vptr,
1748 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1758 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1761 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1770 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1782 * @vptr: velocity
1787 static void velocity_free_td_ring(struct velocity_info *vptr)
1791 for (j = 0; j < vptr->tx.numq; j++) {
1792 if (vptr->tx.infos[j] == NULL)
1794 for (i = 0; i < vptr->options.numtx; i++)
1795 velocity_free_td_ring_entry(vptr, j, i);
1797 kfree(vptr->tx.infos[j]);
1798 vptr->tx.infos[j] = NULL;
1802 static void velocity_free_rings(struct velocity_info *vptr)
1804 velocity_free_td_ring(vptr);
1805 velocity_free_rd_ring(vptr);
1806 velocity_free_dma_rings(vptr);
1811 * @vptr: velocity
1820 static void velocity_error(struct velocity_info *vptr, int status)
1824 struct mac_regs __iomem *regs = vptr->mac_regs;
1826 netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
1830 netif_stop_queue(vptr->netdev);
1837 struct mac_regs __iomem *regs = vptr->mac_regs;
1840 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1841 vptr->mii_status = check_connection_type(regs);
1848 if (vptr->rev_id < REV_ID_VT3216_A0) {
1849 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1857 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1862 setup_queue_timers(vptr);
1870 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1871 netif_carrier_on(vptr->netdev);
1873 vptr->mii_status |= VELOCITY_LINK_FAIL;
1874 netif_carrier_off(vptr->netdev);
1877 velocity_print_link_status(vptr);
1878 enable_flow_control_ability(vptr);
1887 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1888 netif_stop_queue(vptr->netdev);
1890 netif_wake_queue(vptr->netdev);
1894 velocity_update_hw_mibs(vptr);
1896 mac_rx_queue_wake(vptr->mac_regs);
1901 * @vptr: Velocity
1907 static int velocity_tx_srv(struct velocity_info *vptr)
1915 struct net_device_stats *stats = &vptr->netdev->stats;
1917 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1918 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1919 idx = (idx + 1) % vptr->options.numtx) {
1924 td = &(vptr->tx.rings[qnum][idx]);
1925 tdinfo = &(vptr->tx.infos[qnum][idx]);
1948 velocity_free_tx_buf(vptr, tdinfo, td);
1949 vptr->tx.used[qnum]--;
1951 vptr->tx.tail[qnum] = idx;
1953 if (AVAIL_TD(vptr, qnum) < 1)
1960 if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1961 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1962 netif_wake_queue(vptr->netdev);
1995 * @vptr: velocity adapter
2003 struct velocity_info *vptr)
2009 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2023 * @vptr: velocity we are handling
2030 static inline void velocity_iph_realign(struct velocity_info *vptr,
2033 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2041 * @vptr: velocity we are handling
2047 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2049 struct net_device_stats *stats = &vptr->netdev->stats;
2050 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2051 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2057 netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
2067 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2068 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2072 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2073 velocity_iph_realign(vptr, skb, pkt_len);
2075 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2078 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2079 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2083 skb->protocol = eth_type_trans(skb, vptr->netdev);
2100 * @vptr: velocity
2107 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2109 struct net_device_stats *stats = &vptr->netdev->stats;
2110 int rd_curr = vptr->rx.curr;
2114 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2116 if (!vptr->rx.info[rd_curr].skb)
2128 if (velocity_receive_frame(vptr, rd_curr) < 0)
2142 if (rd_curr >= vptr->options.numrx)
2147 vptr->rx.curr = rd_curr;
2149 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2150 velocity_give_many_rx_descs(vptr);
2158 struct velocity_info *vptr = container_of(napi,
2167 rx_done = velocity_rx_srv(vptr, budget);
2168 spin_lock_irqsave(&vptr->lock, flags);
2169 velocity_tx_srv(vptr);
2173 mac_enable_int(vptr->mac_regs);
2175 spin_unlock_irqrestore(&vptr->lock, flags);
2193 struct velocity_info *vptr = netdev_priv(dev);
2196 spin_lock(&vptr->lock);
2197 isr_status = mac_read_isr(vptr->mac_regs);
2201 spin_unlock(&vptr->lock);
2206 mac_write_isr(vptr->mac_regs, isr_status);
2208 if (likely(napi_schedule_prep(&vptr->napi))) {
2209 mac_disable_int(vptr->mac_regs);
2210 __napi_schedule(&vptr->napi);
2214 velocity_error(vptr, isr_status);
2216 spin_unlock(&vptr->lock);
2233 struct velocity_info *vptr = netdev_priv(dev);
2236 ret = velocity_init_rings(vptr, dev->mtu);
2241 velocity_set_power_state(vptr, PCI_D0);
2243 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2249 velocity_set_power_state(vptr, PCI_D3hot);
2250 velocity_free_rings(vptr);
2254 velocity_give_many_rx_descs(vptr);
2256 mac_enable_int(vptr->mac_regs);
2258 napi_enable(&vptr->napi);
2259 vptr->flags |= VELOCITY_FLAGS_OPENED;
2266 * @vptr: velocity to deactivate
2271 static void velocity_shutdown(struct velocity_info *vptr)
2273 struct mac_regs __iomem *regs = vptr->mac_regs;
2293 struct velocity_info *vptr = netdev_priv(dev);
2314 tmp_vptr->pdev = vptr->pdev;
2315 tmp_vptr->dev = vptr->dev;
2316 tmp_vptr->options = vptr->options;
2317 tmp_vptr->tx.numq = vptr->tx.numq;
2323 napi_disable(&vptr->napi);
2325 spin_lock_irqsave(&vptr->lock, flags);
2328 velocity_shutdown(vptr);
2330 rx = vptr->rx;
2331 tx = vptr->tx;
2333 vptr->rx = tmp_vptr->rx;
2334 vptr->tx = tmp_vptr->tx;
2341 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2343 velocity_give_many_rx_descs(vptr);
2345 napi_enable(&vptr->napi);
2347 mac_enable_int(vptr->mac_regs);
2350 spin_unlock_irqrestore(&vptr->lock, flags);
2390 struct velocity_info *vptr = netdev_priv(dev);
2391 struct mac_regs __iomem *regs = vptr->mac_regs;
2401 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2405 spin_lock_irqsave(&vptr->lock, flags);
2406 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2407 spin_unlock_irqrestore(&vptr->lock, flags);
2408 check_connection_type(vptr->mac_regs);
2429 struct velocity_info *vptr = netdev_priv(dev);
2436 velocity_set_power_state(vptr, PCI_D0);
2449 velocity_set_power_state(vptr, PCI_D3hot);
2467 struct velocity_info *vptr = netdev_priv(dev);
2473 spin_lock_irq(&vptr->lock);
2474 velocity_update_hw_mibs(vptr);
2475 spin_unlock_irq(&vptr->lock);
2477 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2478 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2479 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2482 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2486 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2506 struct velocity_info *vptr = netdev_priv(dev);
2508 napi_disable(&vptr->napi);
2510 velocity_shutdown(vptr);
2512 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2513 velocity_get_ip(vptr);
2517 velocity_free_rings(vptr);
2519 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2534 struct velocity_info *vptr = netdev_priv(dev);
2557 spin_lock_irqsave(&vptr->lock, flags);
2559 index = vptr->tx.curr[qnum];
2560 td_ptr = &(vptr->tx.rings[qnum][index]);
2561 tdinfo = &(vptr->tx.infos[qnum][index]);
2571 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2582 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2614 prev = vptr->options.numtx - 1;
2616 vptr->tx.used[qnum]++;
2617 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2619 if (AVAIL_TD(vptr, qnum) < 1)
2622 td_ptr = &(vptr->tx.rings[qnum][prev]);
2624 mac_tx_queue_wake(vptr->mac_regs, qnum);
2626 spin_unlock_irqrestore(&vptr->lock, flags);
2650 * @vptr: Velocity info
2656 static void velocity_init_info(struct velocity_info *vptr,
2659 vptr->chip_id = info->chip_id;
2660 vptr->tx.numq = info->txqueue;
2661 vptr->multicast_limit = MCAM_SIZE;
2662 spin_lock_init(&vptr->lock);
2667 * @vptr: velocity device
2672 static int velocity_get_pci_info(struct velocity_info *vptr)
2674 struct pci_dev *pdev = vptr->pdev;
2678 vptr->ioaddr = pci_resource_start(pdev, 0);
2679 vptr->memaddr = pci_resource_start(pdev, 1);
2703 * @vptr: velocity device
2707 static int velocity_get_platform_info(struct velocity_info *vptr)
2712 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2713 vptr->no_eeprom = 1;
2715 ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2717 dev_err(vptr->dev, "unable to find memory address\n");
2721 vptr->memaddr = res.start;
2724 dev_err(vptr->dev, "memory region is too small.\n");
2733 * @vptr: velocity
2738 static void velocity_print_info(struct velocity_info *vptr)
2740 netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
2741 get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
2746 struct velocity_info *vptr = netdev_priv(dev);
2747 struct mac_regs __iomem *regs = vptr->mac_regs;
2767 struct velocity_info *vptr;
2786 vptr = netdev_priv(netdev);
2793 vptr->netdev = netdev;
2794 vptr->dev = dev;
2796 velocity_init_info(vptr, info);
2799 vptr->pdev = to_pci_dev(dev);
2801 ret = velocity_get_pci_info(vptr);
2805 vptr->pdev = NULL;
2806 ret = velocity_get_platform_info(vptr);
2811 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2817 vptr->mac_regs = regs;
2818 vptr->rev_id = readb(&regs->rev_id);
2826 velocity_get_options(&vptr->options, velocity_nics);
2832 vptr->options.flags &= info->flags;
2838 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2840 vptr->wol_opts = vptr->options.wol_opts;
2841 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2843 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2847 netif_napi_add(netdev, &vptr->napi, velocity_poll,
2866 vptr->mii_status |= VELOCITY_LINK_FAIL;
2869 velocity_print_info(vptr);
2870 dev_set_drvdata(vptr->dev, netdev);
2874 velocity_set_power_state(vptr, PCI_D3hot);
2880 netif_napi_del(&vptr->napi);
2898 struct velocity_info *vptr = netdev_priv(netdev);
2901 netif_napi_del(&vptr->napi);
2902 iounmap(vptr->mac_regs);
3008 * @vptr: velocity to set WOL status on
3015 static int velocity_set_wol(struct velocity_info *vptr)
3017 struct mac_regs __iomem *regs = vptr->mac_regs;
3018 enum speed_opt spd_dpx = vptr->options.spd_dpx;
3032 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3036 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3039 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3050 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3070 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3071 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3072 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3074 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3077 if (vptr->mii_status & VELOCITY_SPEED_1000)
3078 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3102 * @vptr: velocity
3110 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3112 struct mac_regs __iomem *regs = vptr->mac_regs;
3130 struct velocity_info *vptr = netdev_priv(netdev);
3133 if (!netif_running(vptr->netdev))
3136 netif_device_detach(vptr->netdev);
3138 spin_lock_irqsave(&vptr->lock, flags);
3139 if (vptr->pdev)
3140 pci_save_state(vptr->pdev);
3142 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3143 velocity_get_ip(vptr);
3144 velocity_save_context(vptr, &vptr->context);
3145 velocity_shutdown(vptr);
3146 velocity_set_wol(vptr);
3147 if (vptr->pdev)
3148 pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3149 velocity_set_power_state(vptr, PCI_D3hot);
3151 velocity_save_context(vptr, &vptr->context);
3152 velocity_shutdown(vptr);
3153 if (vptr->pdev)
3154 pci_disable_device(vptr->pdev);
3155 velocity_set_power_state(vptr, PCI_D3hot);
3158 spin_unlock_irqrestore(&vptr->lock, flags);
3164 * @vptr: velocity
3170 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3172 struct mac_regs __iomem *regs = vptr->mac_regs;
3200 struct velocity_info *vptr = netdev_priv(netdev);
3204 if (!netif_running(vptr->netdev))
3207 velocity_set_power_state(vptr, PCI_D0);
3209 if (vptr->pdev) {
3210 pci_enable_wake(vptr->pdev, PCI_D0, 0);
3211 pci_restore_state(vptr->pdev);
3214 mac_wol_reset(vptr->mac_regs);
3216 spin_lock_irqsave(&vptr->lock, flags);
3217 velocity_restore_context(vptr, &vptr->context);
3218 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3219 mac_disable_int(vptr->mac_regs);
3221 velocity_tx_srv(vptr);
3223 for (i = 0; i < vptr->tx.numq; i++) {
3224 if (vptr->tx.used[i])
3225 mac_tx_queue_wake(vptr->mac_regs, i);
3228 mac_enable_int(vptr->mac_regs);
3229 spin_unlock_irqrestore(&vptr->lock, flags);
3230 netif_device_attach(vptr->netdev);
3272 struct velocity_info *vptr = netdev_priv(dev);
3274 if (vptr->ethtool_ops_nesting == U32_MAX)
3276 if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
3277 velocity_set_power_state(vptr, PCI_D0);
3291 struct velocity_info *vptr = netdev_priv(dev);
3293 if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
3294 velocity_set_power_state(vptr, PCI_D3hot);
3300 struct velocity_info *vptr = netdev_priv(dev);
3301 struct mac_regs __iomem *regs = vptr->mac_regs;
3305 status = check_connection_type(vptr->mac_regs);
3317 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3326 switch (vptr->options.spd_dpx) {
3375 struct velocity_info *vptr = netdev_priv(dev);
3381 curr_status = check_connection_type(vptr->mac_regs);
3411 vptr->options.spd_dpx = spd_dpx;
3413 velocity_set_media_mode(vptr, new_status);
3421 struct velocity_info *vptr = netdev_priv(dev);
3425 if (vptr->pdev)
3426 strlcpy(info->bus_info, pci_name(vptr->pdev),
3434 struct velocity_info *vptr = netdev_priv(dev);
3438 if (vptr->wol_opts & VELOCITY_WOL_PHY)
3441 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3443 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3445 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3450 struct velocity_info *vptr = netdev_priv(dev);
3454 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3458 vptr->wol_opts|=VELOCITY_WOL_PHY;
3459 vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3464 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3465 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3468 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3469 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3472 vptr->wol_opts |= VELOCITY_WOL_ARP;
3473 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3475 memcpy(vptr->wol_passwd, wol->sopass, 6);
3525 struct velocity_info *vptr = netdev_priv(dev);
3527 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3528 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3530 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3531 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3539 struct velocity_info *vptr = netdev_priv(dev);
3554 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3555 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3557 set_pending_timer_val(&vptr->options.rxqueue_timer,
3559 set_pending_timer_val(&vptr->options.txqueue_timer,
3563 spin_lock_irqsave(&vptr->lock, flags);
3564 mac_disable_int(vptr->mac_regs);
3565 setup_adaptive_interrupts(vptr);
3566 setup_queue_timers(vptr);
3568 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3569 mac_clear_isr(vptr->mac_regs);
3570 mac_enable_int(vptr->mac_regs);
3571 spin_unlock_irqrestore(&vptr->lock, flags);
3634 struct velocity_info *vptr = netdev_priv(dev);
3635 u32 *p = vptr->mib_counter;
3638 spin_lock_irq(&vptr->lock);
3639 velocity_update_hw_mibs(vptr);
3640 spin_unlock_irq(&vptr->lock);