Lines Matching refs:ndev

53 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
56 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
59 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
64 if ((ravb_read(ndev, reg) & mask) == value)
71 static int ravb_config(struct net_device *ndev)
76 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
78 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
80 netdev_err(ndev, "failed to switch device to config mode\n");
85 static void ravb_set_rate(struct net_device *ndev)
87 struct ravb_private *priv = netdev_priv(ndev);
91 ravb_write(ndev, GECMR_SPEED_100, GECMR);
94 ravb_write(ndev, GECMR_SPEED_1000, GECMR);
112 static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
115 ether_addr_copy(ndev->dev_addr, mac);
117 u32 mahr = ravb_read(ndev, MAHR);
118 u32 malr = ravb_read(ndev, MALR);
120 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
121 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
122 ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
123 ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
124 ndev->dev_addr[4] = (malr >> 8) & 0xFF;
125 ndev->dev_addr[5] = (malr >> 0) & 0xFF;
134 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
161 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
174 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
176 struct ravb_private *priv = netdev_priv(ndev);
198 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
218 static void ravb_ring_free(struct net_device *ndev, int q)
220 struct ravb_private *priv = netdev_priv(ndev);
229 if (!dma_mapping_error(ndev->dev.parent,
231 dma_unmap_single(ndev->dev.parent,
238 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
244 ravb_tx_free(ndev, q, false);
248 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
273 static void ravb_ring_format(struct net_device *ndev, int q)
275 struct ravb_private *priv = netdev_priv(ndev);
297 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
303 if (dma_mapping_error(ndev->dev.parent, dma_addr))
337 static int ravb_ring_init(struct net_device *ndev, int q)
339 struct ravb_private *priv = netdev_priv(ndev);
354 skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1);
371 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
382 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
391 ravb_ring_free(ndev, q);
397 static void ravb_emac_init(struct net_device *ndev)
400 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
403 ravb_write(ndev, ECMR_ZPF | ECMR_DM |
404 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
407 ravb_set_rate(ndev);
410 ravb_write(ndev,
411 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
412 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
413 ravb_write(ndev,
414 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
417 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
420 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
424 static int ravb_dmac_init(struct net_device *ndev)
426 struct ravb_private *priv = netdev_priv(ndev);
430 error = ravb_config(ndev);
434 error = ravb_ring_init(ndev, RAVB_BE);
437 error = ravb_ring_init(ndev, RAVB_NC);
439 ravb_ring_free(ndev, RAVB_BE);
444 ravb_ring_format(ndev, RAVB_BE);
445 ravb_ring_format(ndev, RAVB_NC);
448 ravb_write(ndev,
452 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
455 ravb_write(ndev, TCCR_TFEN, TCCR);
460 ravb_write(ndev, 0, DIL);
462 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
465 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
467 ravb_write(ndev, 0, RIC1);
469 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
471 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
474 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
479 static void ravb_get_tx_tstamp(struct net_device *ndev)
481 struct ravb_private *priv = netdev_priv(ndev);
490 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
492 tfa2 = ravb_read(ndev, TFA2);
494 ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
496 ravb_read(ndev, TFA1);
513 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
533 static bool ravb_rx(struct net_device *ndev, int *quota, int q)
535 struct ravb_private *priv = netdev_priv(ndev);
583 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
601 skb->protocol = eth_type_trans(skb, ndev);
602 if (ndev->features & NETIF_F_RXCSUM)
620 skb = netdev_alloc_skb(ndev,
626 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
633 if (dma_mapping_error(ndev->dev.parent, dma_addr))
648 static void ravb_rcv_snd_disable(struct net_device *ndev)
651 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
654 static void ravb_rcv_snd_enable(struct net_device *ndev)
657 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
661 static int ravb_stop_dma(struct net_device *ndev)
666 error = ravb_wait(ndev, TCCR,
671 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
677 ravb_rcv_snd_disable(ndev);
680 error = ravb_wait(ndev, CSR, CSR_RPO, 0);
685 return ravb_config(ndev);
689 static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
691 struct ravb_private *priv = netdev_priv(ndev);
694 ecsr = ravb_read(ndev, ECSR);
695 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
700 ndev->stats.tx_carrier_errors++;
705 psr = ravb_read(ndev, PSR);
710 ravb_rcv_snd_disable(ndev);
713 ravb_rcv_snd_enable(ndev);
720 struct net_device *ndev = dev_id;
721 struct ravb_private *priv = netdev_priv(ndev);
724 ravb_emac_interrupt_unlocked(ndev);
730 static void ravb_error_interrupt(struct net_device *ndev)
732 struct ravb_private *priv = netdev_priv(ndev);
735 eis = ravb_read(ndev, EIS);
736 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
738 ris2 = ravb_read(ndev, RIS2);
739 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED),
756 static bool ravb_queue_interrupt(struct net_device *ndev, int q)
758 struct ravb_private *priv = netdev_priv(ndev);
759 u32 ris0 = ravb_read(ndev, RIS0);
760 u32 ric0 = ravb_read(ndev, RIC0);
761 u32 tis = ravb_read(ndev, TIS);
762 u32 tic = ravb_read(ndev, TIC);
768 ravb_write(ndev, ric0 & ~BIT(q), RIC0);
769 ravb_write(ndev, tic & ~BIT(q), TIC);
771 ravb_write(ndev, BIT(q), RID0);
772 ravb_write(ndev, BIT(q), TID);
776 netdev_warn(ndev,
779 netdev_warn(ndev,
788 static bool ravb_timestamp_interrupt(struct net_device *ndev)
790 u32 tis = ravb_read(ndev, TIS);
793 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
794 ravb_get_tx_tstamp(ndev);
802 struct net_device *ndev = dev_id;
803 struct ravb_private *priv = netdev_priv(ndev);
809 iss = ravb_read(ndev, ISS);
816 if (ravb_timestamp_interrupt(ndev))
821 if (ravb_queue_interrupt(ndev, q))
828 ravb_emac_interrupt_unlocked(ndev);
834 ravb_error_interrupt(ndev);
840 ravb_ptp_interrupt(ndev);
851 struct net_device *ndev = dev_id;
852 struct ravb_private *priv = netdev_priv(ndev);
858 iss = ravb_read(ndev, ISS);
861 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
866 ravb_error_interrupt(ndev);
872 ravb_ptp_interrupt(ndev);
882 struct net_device *ndev = dev_id;
883 struct ravb_private *priv = netdev_priv(ndev);
889 if (ravb_queue_interrupt(ndev, q))
908 struct net_device *ndev = napi->dev;
909 struct ravb_private *priv = netdev_priv(ndev);
917 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
918 if (ravb_rx(ndev, &quota, q))
924 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
925 ravb_tx_free(ndev, q, true);
926 netif_wake_subqueue(ndev, q);
934 ravb_modify(ndev, RIC0, mask, mask);
935 ravb_modify(ndev, TIC, mask, mask);
937 ravb_write(ndev, mask, RIE0);
938 ravb_write(ndev, mask, TIE);
945 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
946 ndev->stats.rx_over_errors = priv->rx_over_errors;
947 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
948 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
954 static void ravb_adjust_link(struct net_device *ndev)
956 struct ravb_private *priv = netdev_priv(ndev);
957 struct phy_device *phydev = ndev->phydev;
965 ravb_rcv_snd_disable(ndev);
971 ravb_set_rate(ndev);
974 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
986 ravb_rcv_snd_enable(ndev);
1000 static int ravb_phy_init(struct net_device *ndev)
1002 struct device_node *np = ndev->dev.parent->of_node;
1003 struct ravb_private *priv = netdev_priv(ndev);
1028 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
1031 netdev_err(ndev, "failed to connect PHY\n");
1042 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1046 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
1073 static int ravb_phy_start(struct net_device *ndev)
1077 error = ravb_phy_init(ndev);
1081 phy_start(ndev->phydev);
1086 static u32 ravb_get_msglevel(struct net_device *ndev)
1088 struct ravb_private *priv = netdev_priv(ndev);
1093 static void ravb_set_msglevel(struct net_device *ndev, u32 value)
1095 struct ravb_private *priv = netdev_priv(ndev);
1146 static void ravb_get_ethtool_stats(struct net_device *ndev,
1149 struct ravb_private *priv = netdev_priv(ndev);
1175 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1184 static void ravb_get_ringparam(struct net_device *ndev,
1187 struct ravb_private *priv = netdev_priv(ndev);
1195 static int ravb_set_ringparam(struct net_device *ndev,
1198 struct ravb_private *priv = netdev_priv(ndev);
1209 if (netif_running(ndev)) {
1210 netif_device_detach(ndev);
1213 ravb_ptp_stop(ndev);
1215 error = ravb_stop_dma(ndev);
1217 netdev_err(ndev,
1221 synchronize_irq(ndev->irq);
1224 ravb_ring_free(ndev, RAVB_BE);
1225 ravb_ring_free(ndev, RAVB_NC);
1232 if (netif_running(ndev)) {
1233 error = ravb_dmac_init(ndev);
1235 netdev_err(ndev,
1241 ravb_emac_init(ndev);
1245 ravb_ptp_init(ndev, priv->pdev);
1247 netif_device_attach(ndev);
1253 static int ravb_get_ts_info(struct net_device *ndev,
1256 struct ravb_private *priv = netdev_priv(ndev);
1275 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1277 struct ravb_private *priv = netdev_priv(ndev);
1283 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1285 struct ravb_private *priv = netdev_priv(ndev);
1315 struct net_device *ndev, struct device *dev,
1321 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
1324 error = request_irq(irq, handler, 0, name, ndev);
1326 netdev_err(ndev, "cannot request IRQ %s\n", name);
1332 static int ravb_open(struct net_device *ndev)
1334 struct ravb_private *priv = netdev_priv(ndev);
1343 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
1344 ndev->name, ndev);
1346 netdev_err(ndev, "cannot request IRQ\n");
1350 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
1354 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
1359 ndev, dev, "ch0:rx_be");
1363 ndev, dev, "ch18:tx_be");
1367 ndev, dev, "ch1:rx_nc");
1371 ndev, dev, "ch19:tx_nc");
1377 error = ravb_dmac_init(ndev);
1380 ravb_emac_init(ndev);
1384 ravb_ptp_init(ndev, priv->pdev);
1387 error = ravb_phy_start(ndev);
1391 netif_tx_start_all_queues(ndev);
1398 ravb_ptp_stop(ndev);
1402 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1404 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1406 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1408 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1410 free_irq(priv->emac_irq, ndev);
1412 free_irq(ndev->irq, ndev);
1420 static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1422 struct ravb_private *priv = netdev_priv(ndev);
1424 netif_err(priv, tx_err, ndev,
1426 ravb_read(ndev, ISS));
1429 ndev->stats.tx_errors++;
1438 struct net_device *ndev = priv->ndev;
1447 netif_tx_stop_all_queues(ndev);
1451 ravb_ptp_stop(ndev);
1454 if (ravb_stop_dma(ndev)) {
1463 ravb_rcv_snd_enable(ndev);
1467 ravb_ring_free(ndev, RAVB_BE);
1468 ravb_ring_free(ndev, RAVB_NC);
1471 error = ravb_dmac_init(ndev);
1477 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1481 ravb_emac_init(ndev);
1486 ravb_ptp_init(ndev, priv->pdev);
1488 netif_tx_start_all_queues(ndev);
1495 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1497 struct ravb_private *priv = netdev_priv(ndev);
1511 netif_err(priv, tx_queued, ndev,
1513 netif_stop_subqueue(ndev, q);
1545 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1547 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1556 dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
1558 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1565 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1567 if (dma_mapping_error(ndev->dev.parent, dma_addr))
1579 dma_unmap_single(ndev->dev.parent, dma_addr,
1605 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
1610 !ravb_tx_free(ndev, q, true))
1611 netif_stop_subqueue(ndev, q);
1618 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
1626 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
1635 static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
1637 struct ravb_private *priv = netdev_priv(ndev);
1640 nstats = &ndev->stats;
1645 nstats->tx_dropped += ravb_read(ndev, TROCR);
1646 ravb_write(ndev, 0, TROCR); /* (write clear) */
1669 static void ravb_set_rx_mode(struct net_device *ndev)
1671 struct ravb_private *priv = netdev_priv(ndev);
1675 ravb_modify(ndev, ECMR, ECMR_PRM,
1676 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
1681 static int ravb_close(struct net_device *ndev)
1683 struct device_node *np = ndev->dev.parent->of_node;
1684 struct ravb_private *priv = netdev_priv(ndev);
1687 netif_tx_stop_all_queues(ndev);
1690 ravb_write(ndev, 0, RIC0);
1691 ravb_write(ndev, 0, RIC2);
1692 ravb_write(ndev, 0, TIC);
1696 ravb_ptp_stop(ndev);
1699 if (ravb_stop_dma(ndev) < 0)
1700 netdev_err(ndev,
1711 if (ndev->phydev) {
1712 phy_stop(ndev->phydev);
1713 phy_disconnect(ndev->phydev);
1721 free_irq(priv->tx_irqs[RAVB_NC], ndev);
1722 free_irq(priv->rx_irqs[RAVB_NC], ndev);
1723 free_irq(priv->tx_irqs[RAVB_BE], ndev);
1724 free_irq(priv->rx_irqs[RAVB_BE], ndev);
1725 free_irq(priv->emac_irq, ndev);
1727 free_irq(ndev->irq, ndev);
1733 ravb_ring_free(ndev, RAVB_BE);
1734 ravb_ring_free(ndev, RAVB_NC);
1739 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
1741 struct ravb_private *priv = netdev_priv(ndev);
1763 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
1765 struct ravb_private *priv = netdev_priv(ndev);
1808 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1810 struct phy_device *phydev = ndev->phydev;
1812 if (!netif_running(ndev))
1820 return ravb_hwtstamp_get(ndev, req);
1822 return ravb_hwtstamp_set(ndev, req);
1828 static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
1830 struct ravb_private *priv = netdev_priv(ndev);
1832 ndev->mtu = new_mtu;
1834 if (netif_running(ndev)) {
1836 ravb_emac_init(ndev);
1839 netdev_update_features(ndev);
1844 static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
1846 struct ravb_private *priv = netdev_priv(ndev);
1852 ravb_rcv_snd_disable(ndev);
1855 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
1858 ravb_rcv_snd_enable(ndev);
1863 static int ravb_set_features(struct net_device *ndev,
1866 netdev_features_t changed = ndev->features ^ features;
1869 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
1871 ndev->features = features;
1946 static int ravb_set_gti(struct net_device *ndev)
1948 struct ravb_private *priv = netdev_priv(ndev);
1949 struct device *dev = ndev->dev.parent;
1966 ravb_write(ndev, inc, GTI);
1971 static void ravb_set_config_mode(struct net_device *ndev)
1973 struct ravb_private *priv = netdev_priv(ndev);
1976 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
1978 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
1980 ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
1993 static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
1995 struct ravb_private *priv = netdev_priv(ndev);
2031 static void ravb_set_delay_mode(struct net_device *ndev)
2033 struct ravb_private *priv = netdev_priv(ndev);
2040 ravb_modify(ndev, APSR, APSR_DM, set);
2048 struct net_device *ndev;
2066 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
2068 if (!ndev)
2071 ndev->features = NETIF_F_RXCSUM;
2072 ndev->hw_features = NETIF_F_RXCSUM;
2080 ndev->base_addr = res->start;
2092 ndev->irq = irq;
2094 SET_NETDEV_DEV(ndev, &pdev->dev);
2096 priv = netdev_priv(ndev);
2097 priv->ndev = ndev;
2153 ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
2154 ndev->min_mtu = ETH_MIN_MTU;
2160 ndev->netdev_ops = &ravb_netdev_ops;
2161 ndev->ethtool_ops = &ravb_ethtool_ops;
2164 ravb_set_config_mode(ndev);
2167 error = ravb_set_gti(ndev);
2172 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2175 ravb_parse_delay_mode(np, ndev);
2176 ravb_set_delay_mode(ndev);
2181 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2192 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2199 ravb_ptp_init(ndev, pdev);
2205 ravb_read_mac_address(ndev, of_get_mac_address(np));
2206 if (!is_valid_ether_addr(ndev->dev_addr)) {
2209 eth_hw_addr_random(ndev);
2219 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
2220 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
2223 error = register_netdev(ndev);
2230 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
2231 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2233 platform_set_drvdata(pdev, ndev);
2242 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2247 ravb_ptp_stop(ndev);
2249 free_netdev(ndev);
2259 struct net_device *ndev = platform_get_drvdata(pdev);
2260 struct ravb_private *priv = netdev_priv(ndev);
2264 ravb_ptp_stop(ndev);
2267 ravb_write(ndev, CCC_OPC_RESET, CCC);
2268 unregister_netdev(ndev);
2272 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2276 free_netdev(ndev);
2282 static int ravb_wol_setup(struct net_device *ndev)
2284 struct ravb_private *priv = netdev_priv(ndev);
2287 ravb_write(ndev, 0, RIC0);
2288 ravb_write(ndev, 0, RIC2);
2289 ravb_write(ndev, 0, TIC);
2295 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
2298 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2303 static int ravb_wol_restore(struct net_device *ndev)
2305 struct ravb_private *priv = netdev_priv(ndev);
2312 ravb_modify(ndev, ECMR, ECMR_MPDE, 0);
2314 ret = ravb_close(ndev);
2323 struct net_device *ndev = dev_get_drvdata(dev);
2324 struct ravb_private *priv = netdev_priv(ndev);
2327 if (!netif_running(ndev))
2330 netif_device_detach(ndev);
2333 ret = ravb_wol_setup(ndev);
2335 ret = ravb_close(ndev);
2342 struct net_device *ndev = dev_get_drvdata(dev);
2343 struct ravb_private *priv = netdev_priv(ndev);
2348 ravb_write(ndev, CCC_OPC_RESET, CCC);
2356 ravb_set_config_mode(ndev);
2359 ret = ravb_set_gti(ndev);
2364 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2367 ravb_set_delay_mode(ndev);
2370 ravb_write(ndev, priv->desc_bat_dma, DBAT);
2372 if (netif_running(ndev)) {
2374 ret = ravb_wol_restore(ndev);
2378 ret = ravb_open(ndev);
2381 ravb_set_rx_mode(ndev);
2382 netif_device_attach(ndev);