Lines Matching refs:ag
403 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
405 return ag->dcfg->type == type;
408 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
410 iowrite32(value, ag->mac_base + reg);
412 (void)ioread32(ag->mac_base + reg);
415 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
417 return ioread32(ag->mac_base + reg);
420 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
424 r = ag->mac_base + reg;
430 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
434 r = ag->mac_base + reg;
440 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
442 ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
445 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
447 ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
453 struct ag71xx *ag = netdev_priv(ndev);
456 strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
463 struct ag71xx *ag = netdev_priv(ndev);
465 return phylink_ethtool_ksettings_get(ag->phylink, kset);
471 struct ag71xx *ag = netdev_priv(ndev);
473 return phylink_ethtool_ksettings_set(ag->phylink, kset);
478 struct ag71xx *ag = netdev_priv(ndev);
480 return phylink_ethtool_nway_reset(ag->phylink);
486 struct ag71xx *ag = netdev_priv(ndev);
488 phylink_ethtool_get_pauseparam(ag->phylink, pause);
494 struct ag71xx *ag = netdev_priv(ndev);
496 return phylink_ethtool_set_pauseparam(ag->phylink, pause);
519 struct ag71xx *ag = netdev_priv(ndev);
523 *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
554 static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
556 struct net_device *ndev = ag->ndev;
564 busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
571 netif_err(ag, link, ndev, "MDIO operation timed out\n");
578 struct ag71xx *ag = bus->priv;
581 err = ag71xx_mdio_wait_busy(ag);
585 ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
588 ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
590 err = ag71xx_mdio_wait_busy(ag);
594 val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
596 ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
598 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
607 struct ag71xx *ag = bus->priv;
609 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
612 ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
614 ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
616 return ag71xx_mdio_wait_busy(ag);
631 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
637 ref_clock = clk_get_rate(ag->clk_mdio);
641 if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
644 } else if (ag71xx_is(ag, AR7240)) {
667 struct ag71xx *ag = bus->priv;
671 err = ag71xx_mdio_get_divider(ag, &t);
675 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
678 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
684 static int ag71xx_mdio_probe(struct ag71xx *ag)
686 struct device *dev = &ag->pdev->dev;
687 struct net_device *ndev = ag->ndev;
693 ag->mii_bus = NULL;
695 ag->clk_mdio = devm_clk_get(dev, "mdio");
696 if (IS_ERR(ag->clk_mdio)) {
697 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
698 return PTR_ERR(ag->clk_mdio);
701 err = clk_prepare_enable(ag->clk_mdio);
703 netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
713 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
714 if (IS_ERR(ag->mdio_reset)) {
715 netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
716 err = PTR_ERR(ag->mdio_reset);
724 mii_bus->priv = ag;
726 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
728 if (!IS_ERR(ag->mdio_reset)) {
729 reset_control_assert(ag->mdio_reset);
731 reset_control_deassert(ag->mdio_reset);
741 ag->mii_bus = mii_bus;
746 clk_disable_unprepare(ag->clk_mdio);
750 static void ag71xx_mdio_remove(struct ag71xx *ag)
752 if (ag->mii_bus)
753 mdiobus_unregister(ag->mii_bus);
754 clk_disable_unprepare(ag->clk_mdio);
757 static void ag71xx_hw_stop(struct ag71xx *ag)
760 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
761 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
762 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
765 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
770 timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
774 if (!netif_carrier_ok(ag->ndev))
777 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
781 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
782 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
790 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
792 struct ag71xx_ring *ring = &ag->tx_ring;
794 struct net_device *ndev = ag->ndev;
801 netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
813 if (ag->dcfg->tx_hang_workaround &&
814 ag71xx_check_dma_stuck(ag)) {
815 schedule_delayed_work(&ag->restart_work,
838 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
843 netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
848 ag->ndev->stats.tx_bytes += bytes_compl;
849 ag->ndev->stats.tx_packets += sent;
851 netdev_completed_queue(ag->ndev, sent, bytes_compl);
853 netif_wake_queue(ag->ndev);
856 cancel_delayed_work(&ag->restart_work);
861 static void ag71xx_dma_wait_stop(struct ag71xx *ag)
863 struct net_device *ndev = ag->ndev;
871 rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
872 tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
877 netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
880 static void ag71xx_dma_reset(struct ag71xx *ag)
882 struct net_device *ndev = ag->ndev;
887 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
888 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
893 ag71xx_dma_wait_stop(ag);
896 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
897 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
901 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
902 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
906 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
907 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
909 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
911 netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
914 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
920 netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
924 static void ag71xx_hw_setup(struct ag71xx *ag)
929 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
931 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
935 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
938 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
939 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
940 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
941 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
942 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
950 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
957 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
960 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
963 static void ag71xx_fast_reset(struct ag71xx *ag)
965 struct net_device *dev = ag->ndev;
969 ag71xx_hw_stop(ag);
971 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
972 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
974 ag71xx_tx_packets(ag, true, 0);
976 reset_control_assert(ag->mac_reset);
978 reset_control_deassert(ag->mac_reset);
981 ag71xx_dma_reset(ag);
982 ag71xx_hw_setup(ag);
983 ag->tx_ring.curr = 0;
984 ag->tx_ring.dirty = 0;
985 netdev_reset_queue(ag->ndev);
988 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
989 ag71xx_max_frame_len(ag->ndev->mtu));
991 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
992 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
993 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
995 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
998 static void ag71xx_hw_start(struct ag71xx *ag)
1001 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1004 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
1006 netif_wake_queue(ag->ndev);
1012 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1017 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1018 ag71xx_fast_reset(ag);
1020 if (ag->tx_ring.desc_split) {
1021 ag->fifodata[2] &= 0xffff;
1022 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1025 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1031 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1033 ag71xx_hw_stop(ag);
1042 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1047 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1051 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1054 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1073 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1074 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1075 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1077 cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1084 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1086 ag71xx_hw_start(ag);
1095 static int ag71xx_phylink_setup(struct ag71xx *ag)
1099 ag->phylink_config.dev = &ag->ndev->dev;
1100 ag->phylink_config.type = PHYLINK_NETDEV;
1101 ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
1104 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1105 ag71xx_is(ag, AR9340) ||
1106 ag71xx_is(ag, QCA9530) ||
1107 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1109 ag->phylink_config.supported_interfaces);
1111 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1112 (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1113 (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1115 ag->phylink_config.supported_interfaces);
1117 if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1119 ag->phylink_config.supported_interfaces);
1121 if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1123 ag->phylink_config.supported_interfaces);
1125 if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1126 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1128 ag->phylink_config.supported_interfaces);
1130 phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1131 ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1135 ag->phylink = phylink;
1139 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1141 struct ag71xx_ring *ring = &ag->tx_ring;
1144 struct net_device *ndev = ag->ndev;
1171 static void ag71xx_ring_tx_init(struct ag71xx *ag)
1173 struct ag71xx_ring *ring = &ag->tx_ring;
1193 netdev_reset_queue(ag->ndev);
1196 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1198 struct ag71xx_ring *ring = &ag->rx_ring;
1207 dma_unmap_single(&ag->pdev->dev,
1209 ag->rx_buf_size, DMA_FROM_DEVICE);
1214 static int ag71xx_buffer_size(struct ag71xx *ag)
1216 return ag->rx_buf_size +
1220 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1224 struct ag71xx_ring *ring = &ag->rx_ring;
1230 data = alloc(ag71xx_buffer_size(ag));
1235 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1241 static int ag71xx_ring_rx_init(struct ag71xx *ag)
1243 struct ag71xx_ring *ring = &ag->rx_ring;
1244 struct net_device *ndev = ag->ndev;
1257 netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1264 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1282 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1284 struct ag71xx_ring *ring = &ag->rx_ring;
1286 int offset = ag->rx_buf_offset;
1298 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1309 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1315 static int ag71xx_rings_init(struct ag71xx *ag)
1317 struct ag71xx_ring *tx = &ag->tx_ring;
1318 struct ag71xx_ring *rx = &ag->rx_ring;
1328 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1341 ag71xx_ring_tx_init(ag);
1342 return ag71xx_ring_rx_init(ag);
1345 static void ag71xx_rings_free(struct ag71xx *ag)
1347 struct ag71xx_ring *tx = &ag->tx_ring;
1348 struct ag71xx_ring *rx = &ag->rx_ring;
1354 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1365 static void ag71xx_rings_cleanup(struct ag71xx *ag)
1367 ag71xx_ring_rx_clean(ag);
1368 ag71xx_ring_tx_clean(ag);
1369 ag71xx_rings_free(ag);
1371 netdev_reset_queue(ag->ndev);
1374 static void ag71xx_hw_init(struct ag71xx *ag)
1376 ag71xx_hw_stop(ag);
1378 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1381 reset_control_assert(ag->mac_reset);
1383 reset_control_deassert(ag->mac_reset);
1386 ag71xx_hw_setup(ag);
1388 ag71xx_dma_reset(ag);
1391 static int ag71xx_hw_enable(struct ag71xx *ag)
1395 ret = ag71xx_rings_init(ag);
1399 napi_enable(&ag->napi);
1400 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1401 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1402 netif_start_queue(ag->ndev);
1407 static void ag71xx_hw_disable(struct ag71xx *ag)
1409 netif_stop_queue(ag->ndev);
1411 ag71xx_hw_stop(ag);
1412 ag71xx_dma_reset(ag);
1414 napi_disable(&ag->napi);
1415 del_timer_sync(&ag->oom_timer);
1417 ag71xx_rings_cleanup(ag);
1422 struct ag71xx *ag = netdev_priv(ndev);
1426 ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1428 netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1434 ag->rx_buf_size =
1438 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1439 ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1441 ret = ag71xx_hw_enable(ag);
1445 phylink_start(ag->phylink);
1450 ag71xx_rings_cleanup(ag);
1451 phylink_disconnect_phy(ag->phylink);
1457 struct ag71xx *ag = netdev_priv(ndev);
1459 phylink_stop(ag->phylink);
1460 phylink_disconnect_phy(ag->phylink);
1461 ag71xx_hw_disable(ag);
1519 struct ag71xx *ag = netdev_priv(ndev);
1524 ring = &ag->tx_ring;
1529 netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1533 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1541 skb->len & ag->dcfg->desc_pktlen_mask);
1564 netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1568 netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1571 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1576 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1587 struct ag71xx *ag = from_timer(ag, t, oom_timer);
1589 napi_schedule(&ag->napi);
1594 struct ag71xx *ag = netdev_priv(ndev);
1596 netif_err(ag, tx_err, ndev, "tx timeout\n");
1598 schedule_delayed_work(&ag->restart_work, 1);
1603 struct ag71xx *ag = container_of(work, struct ag71xx,
1607 ag71xx_hw_disable(ag);
1608 ag71xx_hw_enable(ag);
1610 phylink_stop(ag->phylink);
1611 phylink_start(ag->phylink);
1616 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1618 struct net_device *ndev = ag->ndev;
1625 ring = &ag->rx_ring;
1626 pktlen_mask = ag->dcfg->desc_pktlen_mask;
1627 offset = ag->rx_buf_offset;
1631 netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1650 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1655 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1656 ag->rx_buf_size, DMA_FROM_DEVICE);
1661 skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1686 ag71xx_ring_rx_refill(ag);
1692 netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1700 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1701 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1703 struct net_device *ndev = ag->ndev;
1707 tx_done = ag71xx_tx_packets(ag, false, limit);
1709 netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1710 rx_done = ag71xx_rx_packets(ag, limit);
1715 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1717 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1721 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1728 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1732 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1738 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1743 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1748 netif_err(ag, rx_err, ndev, "out of memory\n");
1750 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1758 struct ag71xx *ag;
1761 ag = netdev_priv(ndev);
1762 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1769 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1770 netif_err(ag, intr, ndev, "TX BUS error\n");
1773 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1774 netif_err(ag, intr, ndev, "RX BUS error\n");
1779 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1780 netif_dbg(ag, intr, ndev, "enable polling mode\n");
1781 napi_schedule(&ag->napi);
1789 struct ag71xx *ag = netdev_priv(ndev);
1792 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1820 struct ag71xx *ag;
1825 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1837 ag = netdev_priv(ndev);
1838 ag->mac_idx = -1;
1841 ag->mac_idx = i;
1844 if (ag->mac_idx < 0) {
1845 netif_err(ag, probe, ndev, "unknown mac idx\n");
1849 ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
1850 if (IS_ERR(ag->clk_eth)) {
1851 netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
1852 return PTR_ERR(ag->clk_eth);
1857 ag->pdev = pdev;
1858 ag->ndev = ndev;
1859 ag->dcfg = dcfg;
1860 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1861 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1863 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1864 if (IS_ERR(ag->mac_reset)) {
1865 netif_err(ag, probe, ndev, "missing mac reset\n");
1866 return PTR_ERR(ag->mac_reset);
1869 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1870 if (!ag->mac_base)
1877 netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1885 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1886 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1889 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1894 ag->rx_buf_offset = NET_SKB_PAD;
1895 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1896 ag->rx_buf_offset += NET_IP_ALIGN;
1898 if (ag71xx_is(ag, AR7100)) {
1899 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1902 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1904 ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1906 &ag->stop_desc_dma, GFP_KERNEL);
1907 if (!ag->stop_desc)
1910 ag->stop_desc->data = 0;
1911 ag->stop_desc->ctrl = 0;
1912 ag->stop_desc->next = (u32)ag->stop_desc_dma;
1916 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1920 err = of_get_phy_mode(np, &ag->phy_if_mode);
1922 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1926 netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
1929 err = clk_prepare_enable(ag->clk_eth);
1931 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
1935 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1937 ag71xx_hw_init(ag);
1939 err = ag71xx_mdio_probe(ag);
1945 err = ag71xx_phylink_setup(ag);
1947 netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
1953 netif_err(ag, probe, ndev, "unable to register net device\n");
1958 netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
1959 (unsigned long)ag->mac_base, ndev->irq,
1960 phy_modes(ag->phy_if_mode));
1965 ag71xx_mdio_remove(ag);
1967 clk_disable_unprepare(ag->clk_eth);
1974 struct ag71xx *ag;
1979 ag = netdev_priv(ndev);
1981 ag71xx_mdio_remove(ag);
1982 clk_disable_unprepare(ag->clk_eth);