Lines Matching defs:ag
401 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
403 return ag->dcfg->type == type;
406 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
408 iowrite32(value, ag->mac_base + reg);
410 (void)ioread32(ag->mac_base + reg);
413 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
415 return ioread32(ag->mac_base + reg);
418 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
422 r = ag->mac_base + reg;
428 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
432 r = ag->mac_base + reg;
438 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
440 ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
443 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
445 ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
451 struct ag71xx *ag = netdev_priv(ndev);
454 strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
461 struct ag71xx *ag = netdev_priv(ndev);
463 return phylink_ethtool_ksettings_get(ag->phylink, kset);
469 struct ag71xx *ag = netdev_priv(ndev);
471 return phylink_ethtool_ksettings_set(ag->phylink, kset);
476 struct ag71xx *ag = netdev_priv(ndev);
478 return phylink_ethtool_nway_reset(ag->phylink);
484 struct ag71xx *ag = netdev_priv(ndev);
486 phylink_ethtool_get_pauseparam(ag->phylink, pause);
492 struct ag71xx *ag = netdev_priv(ndev);
494 return phylink_ethtool_set_pauseparam(ag->phylink, pause);
512 struct ag71xx *ag = netdev_priv(ndev);
516 *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
541 static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
543 struct net_device *ndev = ag->ndev;
551 busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
558 netif_err(ag, link, ndev, "MDIO operation timed out\n");
565 struct ag71xx *ag = bus->priv;
568 err = ag71xx_mdio_wait_busy(ag);
572 ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
575 ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
577 err = ag71xx_mdio_wait_busy(ag);
581 val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
583 ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
585 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
594 struct ag71xx *ag = bus->priv;
596 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
599 ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
601 ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
603 return ag71xx_mdio_wait_busy(ag);
618 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
624 ref_clock = clk_get_rate(ag->clk_mdio);
628 if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
631 } else if (ag71xx_is(ag, AR7240)) {
654 struct ag71xx *ag = bus->priv;
658 err = ag71xx_mdio_get_divider(ag, &t);
662 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
665 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
671 static int ag71xx_mdio_probe(struct ag71xx *ag)
673 struct device *dev = &ag->pdev->dev;
674 struct net_device *ndev = ag->ndev;
680 ag->mii_bus = NULL;
682 ag->clk_mdio = devm_clk_get(dev, "mdio");
683 if (IS_ERR(ag->clk_mdio)) {
684 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
685 return PTR_ERR(ag->clk_mdio);
688 err = clk_prepare_enable(ag->clk_mdio);
690 netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
700 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
701 if (IS_ERR(ag->mdio_reset)) {
702 netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
703 err = PTR_ERR(ag->mdio_reset);
711 mii_bus->priv = ag;
713 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
715 if (!IS_ERR(ag->mdio_reset)) {
716 reset_control_assert(ag->mdio_reset);
718 reset_control_deassert(ag->mdio_reset);
728 ag->mii_bus = mii_bus;
733 clk_disable_unprepare(ag->clk_mdio);
737 static void ag71xx_mdio_remove(struct ag71xx *ag)
739 if (ag->mii_bus)
740 mdiobus_unregister(ag->mii_bus);
741 clk_disable_unprepare(ag->clk_mdio);
744 static void ag71xx_hw_stop(struct ag71xx *ag)
747 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
748 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
749 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
752 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
757 timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
761 if (!netif_carrier_ok(ag->ndev))
764 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
768 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
769 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
777 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
779 struct ag71xx_ring *ring = &ag->tx_ring;
781 struct net_device *ndev = ag->ndev;
788 netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
800 if (ag->dcfg->tx_hang_workaround &&
801 ag71xx_check_dma_stuck(ag)) {
802 schedule_delayed_work(&ag->restart_work,
825 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
830 netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
835 ag->ndev->stats.tx_bytes += bytes_compl;
836 ag->ndev->stats.tx_packets += sent;
838 netdev_completed_queue(ag->ndev, sent, bytes_compl);
840 netif_wake_queue(ag->ndev);
843 cancel_delayed_work(&ag->restart_work);
848 static void ag71xx_dma_wait_stop(struct ag71xx *ag)
850 struct net_device *ndev = ag->ndev;
858 rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
859 tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
864 netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
867 static void ag71xx_dma_reset(struct ag71xx *ag)
869 struct net_device *ndev = ag->ndev;
874 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
875 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
880 ag71xx_dma_wait_stop(ag);
883 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
884 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
888 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
889 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
893 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
894 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
896 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
898 netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
901 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
907 netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
911 static void ag71xx_hw_setup(struct ag71xx *ag)
916 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
918 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
922 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
925 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
926 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
927 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
928 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
929 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
937 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
944 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
947 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
950 static void ag71xx_fast_reset(struct ag71xx *ag)
952 struct net_device *dev = ag->ndev;
956 ag71xx_hw_stop(ag);
958 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
959 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
961 ag71xx_tx_packets(ag, true);
963 reset_control_assert(ag->mac_reset);
965 reset_control_deassert(ag->mac_reset);
968 ag71xx_dma_reset(ag);
969 ag71xx_hw_setup(ag);
970 ag->tx_ring.curr = 0;
971 ag->tx_ring.dirty = 0;
972 netdev_reset_queue(ag->ndev);
975 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
976 ag71xx_max_frame_len(ag->ndev->mtu));
978 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
979 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
980 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
982 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
985 static void ag71xx_hw_start(struct ag71xx *ag)
988 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
991 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
993 netif_wake_queue(ag->ndev);
999 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1004 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1005 ag71xx_fast_reset(ag);
1007 if (ag->tx_ring.desc_split) {
1008 ag->fifodata[2] &= 0xffff;
1009 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1012 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1019 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1026 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1027 ag71xx_is(ag, AR9340) ||
1028 ag71xx_is(ag, QCA9530) ||
1029 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1033 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1034 (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1035 (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1039 if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1043 if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1047 if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1048 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1097 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1099 ag71xx_hw_stop(ag);
1108 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1113 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1117 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1120 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1139 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1140 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1141 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1143 cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1150 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1152 ag71xx_hw_start(ag);
1164 static int ag71xx_phylink_setup(struct ag71xx *ag)
1168 ag->phylink_config.dev = &ag->ndev->dev;
1169 ag->phylink_config.type = PHYLINK_NETDEV;
1171 phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1172 ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1176 ag->phylink = phylink;
1180 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1182 struct ag71xx_ring *ring = &ag->tx_ring;
1185 struct net_device *ndev = ag->ndev;
1212 static void ag71xx_ring_tx_init(struct ag71xx *ag)
1214 struct ag71xx_ring *ring = &ag->tx_ring;
1234 netdev_reset_queue(ag->ndev);
1237 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1239 struct ag71xx_ring *ring = &ag->rx_ring;
1248 dma_unmap_single(&ag->pdev->dev,
1250 ag->rx_buf_size, DMA_FROM_DEVICE);
1255 static int ag71xx_buffer_size(struct ag71xx *ag)
1257 return ag->rx_buf_size +
1261 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1265 struct ag71xx_ring *ring = &ag->rx_ring;
1271 data = alloc(ag71xx_buffer_size(ag));
1276 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1282 static int ag71xx_ring_rx_init(struct ag71xx *ag)
1284 struct ag71xx_ring *ring = &ag->rx_ring;
1285 struct net_device *ndev = ag->ndev;
1298 netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1305 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1323 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1325 struct ag71xx_ring *ring = &ag->rx_ring;
1327 int offset = ag->rx_buf_offset;
1339 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1350 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1356 static int ag71xx_rings_init(struct ag71xx *ag)
1358 struct ag71xx_ring *tx = &ag->tx_ring;
1359 struct ag71xx_ring *rx = &ag->rx_ring;
1369 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1382 ag71xx_ring_tx_init(ag);
1383 return ag71xx_ring_rx_init(ag);
1386 static void ag71xx_rings_free(struct ag71xx *ag)
1388 struct ag71xx_ring *tx = &ag->tx_ring;
1389 struct ag71xx_ring *rx = &ag->rx_ring;
1395 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1406 static void ag71xx_rings_cleanup(struct ag71xx *ag)
1408 ag71xx_ring_rx_clean(ag);
1409 ag71xx_ring_tx_clean(ag);
1410 ag71xx_rings_free(ag);
1412 netdev_reset_queue(ag->ndev);
1415 static void ag71xx_hw_init(struct ag71xx *ag)
1417 ag71xx_hw_stop(ag);
1419 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1422 reset_control_assert(ag->mac_reset);
1424 reset_control_deassert(ag->mac_reset);
1427 ag71xx_hw_setup(ag);
1429 ag71xx_dma_reset(ag);
1432 static int ag71xx_hw_enable(struct ag71xx *ag)
1436 ret = ag71xx_rings_init(ag);
1440 napi_enable(&ag->napi);
1441 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1442 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1443 netif_start_queue(ag->ndev);
1448 static void ag71xx_hw_disable(struct ag71xx *ag)
1450 netif_stop_queue(ag->ndev);
1452 ag71xx_hw_stop(ag);
1453 ag71xx_dma_reset(ag);
1455 napi_disable(&ag->napi);
1456 del_timer_sync(&ag->oom_timer);
1458 ag71xx_rings_cleanup(ag);
1463 struct ag71xx *ag = netdev_priv(ndev);
1467 ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1469 netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1475 ag->rx_buf_size =
1479 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1480 ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1482 ret = ag71xx_hw_enable(ag);
1486 phylink_start(ag->phylink);
1491 ag71xx_rings_cleanup(ag);
1492 phylink_disconnect_phy(ag->phylink);
1498 struct ag71xx *ag = netdev_priv(ndev);
1500 phylink_stop(ag->phylink);
1501 phylink_disconnect_phy(ag->phylink);
1502 ag71xx_hw_disable(ag);
1560 struct ag71xx *ag = netdev_priv(ndev);
1565 ring = &ag->tx_ring;
1570 netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1574 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1582 skb->len & ag->dcfg->desc_pktlen_mask);
1605 netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1609 netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1612 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1617 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1628 struct ag71xx *ag = from_timer(ag, t, oom_timer);
1630 napi_schedule(&ag->napi);
1635 struct ag71xx *ag = netdev_priv(ndev);
1637 netif_err(ag, tx_err, ndev, "tx timeout\n");
1639 schedule_delayed_work(&ag->restart_work, 1);
1644 struct ag71xx *ag = container_of(work, struct ag71xx,
1648 ag71xx_hw_disable(ag);
1649 ag71xx_hw_enable(ag);
1651 phylink_stop(ag->phylink);
1652 phylink_start(ag->phylink);
1657 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1659 struct net_device *ndev = ag->ndev;
1666 ring = &ag->rx_ring;
1667 pktlen_mask = ag->dcfg->desc_pktlen_mask;
1668 offset = ag->rx_buf_offset;
1672 netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1691 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1696 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1697 ag->rx_buf_size, DMA_FROM_DEVICE);
1702 skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1727 ag71xx_ring_rx_refill(ag);
1733 netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1741 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1742 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1744 struct net_device *ndev = ag->ndev;
1748 tx_done = ag71xx_tx_packets(ag, false);
1750 netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1751 rx_done = ag71xx_rx_packets(ag, limit);
1756 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1758 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1762 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1769 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1773 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1779 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1784 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1789 netif_err(ag, rx_err, ndev, "out of memory\n");
1791 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1799 struct ag71xx *ag;
1802 ag = netdev_priv(ndev);
1803 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1810 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1811 netif_err(ag, intr, ndev, "TX BUS error\n");
1814 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1815 netif_err(ag, intr, ndev, "RX BUS error\n");
1820 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1821 netif_dbg(ag, intr, ndev, "enable polling mode\n");
1822 napi_schedule(&ag->napi);
1830 struct ag71xx *ag = netdev_priv(ndev);
1833 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1862 struct ag71xx *ag;
1867 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1879 ag = netdev_priv(ndev);
1880 ag->mac_idx = -1;
1883 ag->mac_idx = i;
1886 if (ag->mac_idx < 0) {
1887 netif_err(ag, probe, ndev, "unknown mac idx\n");
1891 ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
1892 if (IS_ERR(ag->clk_eth)) {
1893 netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
1894 return PTR_ERR(ag->clk_eth);
1899 ag->pdev = pdev;
1900 ag->ndev = ndev;
1901 ag->dcfg = dcfg;
1902 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1903 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1905 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1906 if (IS_ERR(ag->mac_reset)) {
1907 netif_err(ag, probe, ndev, "missing mac reset\n");
1908 return PTR_ERR(ag->mac_reset);
1911 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1912 if (!ag->mac_base)
1919 netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1927 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1928 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1931 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1936 ag->rx_buf_offset = NET_SKB_PAD;
1937 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1938 ag->rx_buf_offset += NET_IP_ALIGN;
1940 if (ag71xx_is(ag, AR7100)) {
1941 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1944 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1946 ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1948 &ag->stop_desc_dma, GFP_KERNEL);
1949 if (!ag->stop_desc)
1952 ag->stop_desc->data = 0;
1953 ag->stop_desc->ctrl = 0;
1954 ag->stop_desc->next = (u32)ag->stop_desc_dma;
1960 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1964 err = of_get_phy_mode(np, &ag->phy_if_mode);
1966 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1970 netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1972 err = clk_prepare_enable(ag->clk_eth);
1974 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
1978 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1980 ag71xx_hw_init(ag);
1982 err = ag71xx_mdio_probe(ag);
1988 err = ag71xx_phylink_setup(ag);
1990 netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
1996 netif_err(ag, probe, ndev, "unable to register net device\n");
2001 netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
2002 (unsigned long)ag->mac_base, ndev->irq,
2003 phy_modes(ag->phy_if_mode));
2008 ag71xx_mdio_remove(ag);
2010 clk_disable_unprepare(ag->clk_eth);
2017 struct ag71xx *ag;
2022 ag = netdev_priv(ndev);
2024 ag71xx_mdio_remove(ag);
2025 clk_disable_unprepare(ag->clk_eth);