Lines Matching refs:fep

333 	struct fec_enet_private *fep = netdev_priv(ndev);
341 txq = fep->tx_queue[0];
385 struct fec_enet_private *fep = netdev_priv(ndev);
411 if (fep->bufdesc_ex) {
414 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
419 if (fep->bufdesc_ex) {
420 if (fep->quirks & FEC_QUIRK_HAS_AVB)
431 if (((unsigned long) bufaddr) & fep->tx_align ||
432 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
436 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
440 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
442 if (dma_mapping_error(&fep->pdev->dev, addr)) {
462 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
471 struct fec_enet_private *fep = netdev_priv(ndev);
507 if (((unsigned long) bufaddr) & fep->tx_align ||
508 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
512 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
517 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
518 if (dma_mapping_error(&fep->pdev->dev, addr)) {
528 dma_unmap_single(&fep->pdev->dev, addr,
535 if (fep->bufdesc_ex) {
538 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
545 if (fep->bufdesc_ex) {
550 fep->hwts_tx_en))
553 if (fep->quirks & FEC_QUIRK_HAS_AVB)
601 struct fec_enet_private *fep = netdev_priv(ndev);
612 if (((unsigned long) data) & fep->tx_align ||
613 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
617 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
621 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
622 if (dma_mapping_error(&fep->pdev->dev, addr)) {
632 if (fep->bufdesc_ex) {
633 if (fep->quirks & FEC_QUIRK_HAS_AVB)
646 if (fep->bufdesc_ex)
660 struct fec_enet_private *fep = netdev_priv(ndev);
674 if (((unsigned long)bufaddr) & fep->tx_align ||
675 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
679 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
682 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
684 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
695 if (fep->bufdesc_ex) {
696 if (fep->quirks & FEC_QUIRK_HAS_AVB)
713 struct fec_enet_private *fep = netdev_priv(ndev);
779 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
796 struct fec_enet_private *fep = netdev_priv(ndev);
804 txq = fep->tx_queue[queue];
825 struct fec_enet_private *fep = netdev_priv(dev);
832 for (q = 0; q < fep->num_rx_queues; q++) {
834 rxq = fep->rx_queue[q];
854 for (q = 0; q < fep->num_tx_queues; q++) {
856 txq = fep->tx_queue[q];
865 dma_unmap_single(&fep->pdev->dev,
886 struct fec_enet_private *fep = netdev_priv(ndev);
889 for (i = 0; i < fep->num_rx_queues; i++)
890 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
895 struct fec_enet_private *fep = netdev_priv(ndev);
900 for (i = 0; i < fep->num_rx_queues; i++) {
901 rxq = fep->rx_queue[i];
902 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
903 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
908 fep->hwp + FEC_RCMR(i));
911 for (i = 0; i < fep->num_tx_queues; i++) {
912 txq = fep->tx_queue[i];
913 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
918 fep->hwp + FEC_DMA_CFG(i));
924 struct fec_enet_private *fep = netdev_priv(ndev);
928 for (i = 0; i < fep->num_tx_queues; i++) {
929 txq = fep->tx_queue[i];
948 struct fec_enet_private *fep = netdev_priv(ndev);
958 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
959 writel(0, fep->hwp + FEC_ECNTRL);
961 writel(1, fep->hwp + FEC_ECNTRL);
971 fep->hwp + FEC_ADDR_LOW);
973 fep->hwp + FEC_ADDR_HIGH);
976 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
986 if (fep->full_duplex == DUPLEX_FULL) {
988 writel(0x04, fep->hwp + FEC_X_CNTRL);
992 writel(0x0, fep->hwp + FEC_X_CNTRL);
996 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
999 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1000 val = readl(fep->hwp + FEC_RACC);
1003 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1008 writel(val, fep->hwp + FEC_RACC);
1009 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1017 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1022 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1023 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1024 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1025 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1027 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1043 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1046 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1047 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1055 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1059 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1062 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1069 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1070 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1075 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1076 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1077 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1078 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1081 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1087 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1092 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1093 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1096 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1100 writel(1 << 8, fep->hwp + FEC_X_WMRK);
1103 if (fep->bufdesc_ex)
1108 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1112 writel(ecntl, fep->hwp + FEC_ECNTRL);
1115 if (fep->bufdesc_ex)
1119 if (fep->link)
1120 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1122 writel(0, fep->hwp + FEC_IMASK);
1129 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1131 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1132 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1150 struct fec_enet_private *fep = netdev_priv(ndev);
1151 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1155 if (fep->link) {
1156 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1158 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1166 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1167 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
1168 writel(0, fep->hwp + FEC_ECNTRL);
1170 writel(1, fep->hwp + FEC_ECNTRL);
1173 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1175 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1176 val = readl(fep->hwp + FEC_ECNTRL);
1178 writel(val, fep->hwp + FEC_ECNTRL);
1179 fec_enet_stop_mode(fep, true);
1181 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1184 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1185 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1186 writel(2, fep->hwp + FEC_ECNTRL);
1187 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1195 struct fec_enet_private *fep = netdev_priv(ndev);
1201 schedule_work(&fep->tx_timeout_work);
1206 struct fec_enet_private *fep =
1208 struct net_device *ndev = fep->netdev;
1212 napi_disable(&fep->napi);
1217 napi_enable(&fep->napi);
1223 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1229 spin_lock_irqsave(&fep->tmreg_lock, flags);
1230 ns = timecounter_cyc2time(&fep->tc, ts);
1231 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1240 struct fec_enet_private *fep;
1249 fep = netdev_priv(ndev);
1251 txq = fep->tx_queue[queue_id];
1271 dma_unmap_single(&fep->pdev->dev,
1304 fep->hwts_tx_en) &&
1305 fep->bufdesc_ex) {
1309 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1348 struct fec_enet_private *fep = netdev_priv(ndev);
1352 for (i = fep->num_tx_queues - 1; i >= 0; i--)
1359 struct fec_enet_private *fep = netdev_priv(ndev);
1362 off = ((unsigned long)skb->data) & fep->rx_align;
1364 skb_reserve(skb, fep->rx_align + 1 - off);
1366 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
1367 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1379 struct fec_enet_private *fep = netdev_priv(ndev);
1382 if (length > fep->rx_copybreak)
1389 dma_sync_single_for_cpu(&fep->pdev->dev,
1391 FEC_ENET_RX_FRSIZE - fep->rx_align,
1410 struct fec_enet_private *fep = netdev_priv(ndev);
1424 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1429 rxq = fep->rx_queue[queue_id];
1442 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1490 dma_unmap_single(&fep->pdev->dev,
1492 FEC_ENET_RX_FRSIZE - fep->rx_align,
1504 if (fep->quirks & FEC_QUIRK_HAS_RACC)
1510 if (fep->bufdesc_ex)
1516 fep->bufdesc_ex &&
1532 if (fep->hwts_rx_en && fep->bufdesc_ex)
1533 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1536 if (fep->bufdesc_ex &&
1537 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1553 napi_gro_receive(&fep->napi, skb);
1556 dma_sync_single_for_device(&fep->pdev->dev,
1558 FEC_ENET_RX_FRSIZE - fep->rx_align,
1572 if (fep->bufdesc_ex) {
1600 struct fec_enet_private *fep = netdev_priv(ndev);
1604 for (i = fep->num_rx_queues - 1; i >= 0; i--)
1610 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1614 int_events = readl(fep->hwp + FEC_IEVENT);
1619 writel(int_events, fep->hwp + FEC_IEVENT);
1628 struct fec_enet_private *fep = netdev_priv(ndev);
1631 if (fec_enet_collect_events(fep) && fep->link) {
1634 if (napi_schedule_prep(&fep->napi)) {
1636 writel(0, fep->hwp + FEC_IMASK);
1637 __napi_schedule(&fep->napi);
1647 struct fec_enet_private *fep = netdev_priv(ndev);
1653 } while ((done < budget) && fec_enet_collect_events(fep));
1657 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1666 struct fec_enet_private *fep = netdev_priv(ndev);
1667 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1682 struct device_node *np = fep->pdev->dev.of_node;
1708 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1710 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1719 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
1721 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
1730 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1740 struct fec_enet_private *fep = netdev_priv(ndev);
1750 fep->link = 0;
1752 if (!fep->link) {
1753 fep->link = phy_dev->link;
1757 if (fep->full_duplex != phy_dev->duplex) {
1758 fep->full_duplex = phy_dev->duplex;
1762 if (phy_dev->speed != fep->speed) {
1763 fep->speed = phy_dev->speed;
1770 napi_disable(&fep->napi);
1775 napi_enable(&fep->napi);
1778 if (fep->link) {
1780 napi_disable(&fep->napi);
1784 napi_enable(&fep->napi);
1785 fep->link = phy_dev->link;
1794 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
1799 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
1803 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1810 struct fec_enet_private *fep = bus->priv;
1811 struct device *dev = &fep->pdev->dev;
1827 fep->hwp + FEC_MII_DATA);
1830 ret = fec_enet_mdio_wait(fep);
1832 netdev_err(fep->netdev, "MDIO address write timeout\n");
1848 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
1851 ret = fec_enet_mdio_wait(fep);
1853 netdev_err(fep->netdev, "MDIO read timeout\n");
1857 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1869 struct fec_enet_private *fep = bus->priv;
1870 struct device *dev = &fep->pdev->dev;
1886 fep->hwp + FEC_MII_DATA);
1889 ret = fec_enet_mdio_wait(fep);
1891 netdev_err(fep->netdev, "MDIO address write timeout\n");
1904 fep->hwp + FEC_MII_DATA);
1907 ret = fec_enet_mdio_wait(fep);
1909 netdev_err(fep->netdev, "MDIO write timeout\n");
1920 struct fec_enet_private *fep = netdev_priv(ndev);
1925 } else if (fep->phy_node) {
1933 phy_dev = of_phy_find_device(fep->phy_node);
1941 struct fec_enet_private *fep = netdev_priv(ndev);
1945 ret = clk_prepare_enable(fep->clk_enet_out);
1949 if (fep->clk_ptp) {
1950 mutex_lock(&fep->ptp_clk_mutex);
1951 ret = clk_prepare_enable(fep->clk_ptp);
1953 mutex_unlock(&fep->ptp_clk_mutex);
1956 fep->ptp_clk_on = true;
1958 mutex_unlock(&fep->ptp_clk_mutex);
1961 ret = clk_prepare_enable(fep->clk_ref);
1967 clk_disable_unprepare(fep->clk_enet_out);
1968 if (fep->clk_ptp) {
1969 mutex_lock(&fep->ptp_clk_mutex);
1970 clk_disable_unprepare(fep->clk_ptp);
1971 fep->ptp_clk_on = false;
1972 mutex_unlock(&fep->ptp_clk_mutex);
1974 clk_disable_unprepare(fep->clk_ref);
1980 if (fep->clk_ptp) {
1981 mutex_lock(&fep->ptp_clk_mutex);
1982 clk_disable_unprepare(fep->clk_ptp);
1983 fep->ptp_clk_on = false;
1984 mutex_unlock(&fep->ptp_clk_mutex);
1987 clk_disable_unprepare(fep->clk_enet_out);
1994 struct fec_enet_private *fep = netdev_priv(ndev);
1999 int dev_id = fep->dev_id;
2001 if (fep->phy_node) {
2002 phy_dev = of_phy_connect(ndev, fep->phy_node,
2004 fep->phy_interface);
2012 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2016 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2029 fep->phy_interface);
2038 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2049 fep->link = 0;
2050 fep->full_duplex = 0;
2061 struct fec_enet_private *fep = netdev_priv(ndev);
2084 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2087 fep->mii_bus = fec0_mii_bus;
2110 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2111 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2116 clk_get_rate(fep->clk_ipg));
2133 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2135 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2138 fep->phy_speed |= BIT(7);
2140 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2149 writel(0, fep->hwp + FEC_MII_DATA);
2152 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2155 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2157 fep->mii_bus = mdiobus_alloc();
2158 if (fep->mii_bus == NULL) {
2163 fep->mii_bus->name = "fec_enet_mii_bus";
2164 fep->mii_bus->read = fec_enet_mdio_read;
2165 fep->mii_bus->write = fec_enet_mdio_write;
2166 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2167 pdev->name, fep->dev_id + 1);
2168 fep->mii_bus->priv = fep;
2169 fep->mii_bus->parent = &pdev->dev;
2171 err = of_mdiobus_register(fep->mii_bus, node);
2179 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2180 fec0_mii_bus = fep->mii_bus;
2185 mdiobus_free(fep->mii_bus);
2191 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2194 mdiobus_unregister(fep->mii_bus);
2195 mdiobus_free(fep->mii_bus);
2202 struct fec_enet_private *fep = netdev_priv(ndev);
2204 strlcpy(info->driver, fep->pdev->dev.driver->name,
2211 struct fec_enet_private *fep = netdev_priv(ndev);
2215 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2299 struct fec_enet_private *fep = netdev_priv(ndev);
2300 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2301 struct device *dev = &fep->pdev->dev;
2335 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2349 struct fec_enet_private *fep = netdev_priv(ndev);
2351 if (fep->bufdesc_ex) {
2359 if (fep->ptp_clock)
2360 info->phc_index = ptp_clock_index(fep->ptp_clock);
2380 struct fec_enet_private *fep = netdev_priv(ndev);
2382 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2383 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2390 struct fec_enet_private *fep = netdev_priv(ndev);
2401 fep->pause_flag = 0;
2404 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2405 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2416 napi_disable(&fep->napi);
2421 napi_enable(&fep->napi);
2497 struct fec_enet_private *fep = netdev_priv(dev);
2501 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2507 struct fec_enet_private *fep = netdev_priv(dev);
2512 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2540 struct fec_enet_private *fep = netdev_priv(dev);
2544 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2547 writel(0, fep->hwp + fec_stats[i].offset);
2550 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2570 struct fec_enet_private *fep = netdev_priv(ndev);
2572 return us * (fep->itr_clk_rate / 64000) / 1000;
2578 struct fec_enet_private *fep = netdev_priv(ndev);
2582 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2583 !fep->tx_time_itr || !fep->tx_pkts_itr)
2593 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
2594 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
2595 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
2596 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
2601 writel(tx_itr, fep->hwp + FEC_TXIC0);
2602 writel(rx_itr, fep->hwp + FEC_RXIC0);
2603 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
2604 writel(tx_itr, fep->hwp + FEC_TXIC1);
2605 writel(rx_itr, fep->hwp + FEC_RXIC1);
2606 writel(tx_itr, fep->hwp + FEC_TXIC2);
2607 writel(rx_itr, fep->hwp + FEC_RXIC2);
2614 struct fec_enet_private *fep = netdev_priv(ndev);
2616 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2619 ec->rx_coalesce_usecs = fep->rx_time_itr;
2620 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
2622 ec->tx_coalesce_usecs = fep->tx_time_itr;
2623 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
2631 struct fec_enet_private *fep = netdev_priv(ndev);
2632 struct device *dev = &fep->pdev->dev;
2635 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2660 fep->rx_time_itr = ec->rx_coalesce_usecs;
2661 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
2663 fep->tx_time_itr = ec->tx_coalesce_usecs;
2664 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
2688 struct fec_enet_private *fep = netdev_priv(netdev);
2693 *(u32 *)data = fep->rx_copybreak;
2707 struct fec_enet_private *fep = netdev_priv(netdev);
2712 fep->rx_copybreak = *(u32 *)data;
2725 struct fec_enet_private *fep = netdev_priv(ndev);
2727 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
2729 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
2738 struct fec_enet_private *fep = netdev_priv(ndev);
2740 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
2748 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
2749 if (fep->irq[0] > 0)
2750 enable_irq_wake(fep->irq[0]);
2752 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
2753 if (fep->irq[0] > 0)
2754 disable_irq_wake(fep->irq[0]);
2788 struct fec_enet_private *fep = netdev_priv(ndev);
2797 if (fep->bufdesc_ex) {
2815 struct fec_enet_private *fep = netdev_priv(ndev);
2823 for (q = 0; q < fep->num_rx_queues; q++) {
2824 rxq = fep->rx_queue[q];
2830 dma_unmap_single(&fep->pdev->dev,
2832 FEC_ENET_RX_FRSIZE - fep->rx_align,
2840 for (q = 0; q < fep->num_tx_queues; q++) {
2841 txq = fep->tx_queue[q];
2854 struct fec_enet_private *fep = netdev_priv(ndev);
2858 for (i = 0; i < fep->num_tx_queues; i++)
2859 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
2860 txq = fep->tx_queue[i];
2861 dma_free_coherent(&fep->pdev->dev,
2867 for (i = 0; i < fep->num_rx_queues; i++)
2868 kfree(fep->rx_queue[i]);
2869 for (i = 0; i < fep->num_tx_queues; i++)
2870 kfree(fep->tx_queue[i]);
2875 struct fec_enet_private *fep = netdev_priv(ndev);
2880 for (i = 0; i < fep->num_tx_queues; i++) {
2887 fep->tx_queue[i] = txq;
2889 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2895 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
2905 for (i = 0; i < fep->num_rx_queues; i++) {
2906 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
2908 if (!fep->rx_queue[i]) {
2913 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
2914 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2926 struct fec_enet_private *fep = netdev_priv(ndev);
2932 rxq = fep->rx_queue[queue];
2947 if (fep->bufdesc_ex) {
2968 struct fec_enet_private *fep = netdev_priv(ndev);
2973 txq = fep->tx_queue[queue];
2983 if (fep->bufdesc_ex) {
3004 struct fec_enet_private *fep = netdev_priv(ndev);
3007 for (i = 0; i < fep->num_rx_queues; i++)
3011 for (i = 0; i < fep->num_tx_queues; i++)
3020 struct fec_enet_private *fep = netdev_priv(ndev);
3024 ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3028 pinctrl_pm_select_default_state(&fep->pdev->dev);
3066 if (fep->quirks & FEC_QUIRK_ERR006687)
3069 napi_enable(&fep->napi);
3073 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3083 pm_runtime_mark_last_busy(&fep->pdev->dev);
3084 pm_runtime_put_autosuspend(&fep->pdev->dev);
3085 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3092 struct fec_enet_private *fep = netdev_priv(ndev);
3097 napi_disable(&fep->napi);
3104 if (fep->quirks & FEC_QUIRK_ERR006687)
3110 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3111 pm_runtime_mark_last_busy(&fep->pdev->dev);
3112 pm_runtime_put_autosuspend(&fep->pdev->dev);
3133 struct fec_enet_private *fep = netdev_priv(ndev);
3140 tmp = readl(fep->hwp + FEC_R_CNTRL);
3142 writel(tmp, fep->hwp + FEC_R_CNTRL);
3146 tmp = readl(fep->hwp + FEC_R_CNTRL);
3148 writel(tmp, fep->hwp + FEC_R_CNTRL);
3154 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3155 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3176 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3177 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3184 struct fec_enet_private *fep = netdev_priv(ndev);
3203 fep->hwp + FEC_ADDR_LOW);
3205 fep->hwp + FEC_ADDR_HIGH);
3220 struct fec_enet_private *fep = netdev_priv(dev);
3223 if (fep->irq[i] > 0) {
3224 disable_irq(fep->irq[i]);
3225 fec_enet_interrupt(fep->irq[i], dev);
3226 enable_irq(fep->irq[i]);
3235 struct fec_enet_private *fep = netdev_priv(netdev);
3243 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3245 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3252 struct fec_enet_private *fep = netdev_priv(netdev);
3256 napi_disable(&fep->napi);
3263 napi_enable(&fep->napi);
3274 struct fec_enet_private *fep = netdev_priv(ndev);
3277 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3325 struct fec_enet_private *fep = netdev_priv(ndev);
3330 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
3337 fep->rx_align = 0xf;
3338 fep->tx_align = 0xf;
3340 fep->rx_align = 0x3;
3341 fep->tx_align = 0x3;
3345 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
3347 dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
3355 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
3358 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3371 for (i = 0; i < fep->num_rx_queues; i++) {
3372 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
3381 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
3387 for (i = 0; i < fep->num_tx_queues; i++) {
3388 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
3397 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
3409 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
3410 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3412 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3416 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
3422 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3425 if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3426 fep->tx_align = 0;
3427 fep->rx_align = 0x3f;
3434 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
3556 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
3570 dev_dbg(&fep->pdev->dev, "no stop mode property\n");
3574 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
3575 if (IS_ERR(fep->stop_gpr.gpr)) {
3576 dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
3577 ret = PTR_ERR(fep->stop_gpr.gpr);
3578 fep->stop_gpr.gpr = NULL;
3582 fep->stop_gpr.reg = out_val[1];
3583 fep->stop_gpr.bit = out_val[2];
3594 struct fec_enet_private *fep;
3619 fep = netdev_priv(ndev);
3626 fep->quirks = dev_info->quirks;
3628 fep->netdev = ndev;
3629 fep->num_rx_queues = num_rx_qs;
3630 fep->num_tx_queues = num_tx_qs;
3634 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3635 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
3641 fep->hwp = devm_platform_ioremap_resource(pdev, 0);
3642 if (IS_ERR(fep->hwp)) {
3643 ret = PTR_ERR(fep->hwp);
3647 fep->pdev = pdev;
3648 fep->dev_id = dev_id++;
3655 fep->quirks |= FEC_QUIRK_ERR006687;
3658 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
3660 ret = fec_enet_init_stop_mode(fep, np);
3674 fep->phy_node = phy_node;
3680 fep->phy_interface = pdata->phy;
3682 fep->phy_interface = PHY_INTERFACE_MODE_MII;
3684 fep->phy_interface = interface;
3687 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
3688 if (IS_ERR(fep->clk_ipg)) {
3689 ret = PTR_ERR(fep->clk_ipg);
3693 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
3694 if (IS_ERR(fep->clk_ahb)) {
3695 ret = PTR_ERR(fep->clk_ahb);
3699 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
3702 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
3703 if (IS_ERR(fep->clk_enet_out))
3704 fep->clk_enet_out = NULL;
3706 fep->ptp_clk_on = false;
3707 mutex_init(&fep->ptp_clk_mutex);
3710 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
3711 if (IS_ERR(fep->clk_ref))
3712 fep->clk_ref = NULL;
3714 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3715 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
3716 if (IS_ERR(fep->clk_ptp)) {
3717 fep->clk_ptp = NULL;
3718 fep->bufdesc_ex = false;
3725 ret = clk_prepare_enable(fep->clk_ipg);
3728 ret = clk_prepare_enable(fep->clk_ahb);
3732 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
3733 if (!IS_ERR(fep->reg_phy)) {
3734 ret = regulator_enable(fep->reg_phy);
3741 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
3745 fep->reg_phy = NULL;
3759 if (fep->bufdesc_ex)
3780 fep->irq[i] = irq;
3798 device_init_wakeup(&ndev->dev, fep->wol_flag &
3801 if (fep->bufdesc_ex && fep->ptp_clock)
3802 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3804 fep->rx_copybreak = COPYBREAK_DEFAULT;
3805 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3813 fec_enet_mii_remove(fep);
3821 if (fep->reg_phy)
3822 regulator_disable(fep->reg_phy);
3824 clk_disable_unprepare(fep->clk_ahb);
3826 clk_disable_unprepare(fep->clk_ipg);
3846 struct fec_enet_private *fep = netdev_priv(ndev);
3856 cancel_work_sync(&fep->tx_timeout_work);
3859 fec_enet_mii_remove(fep);
3860 if (fep->reg_phy)
3861 regulator_disable(fep->reg_phy);
3865 of_node_put(fep->phy_node);
3871 clk_disable_unprepare(fep->clk_ahb);
3872 clk_disable_unprepare(fep->clk_ipg);
3884 struct fec_enet_private *fep = netdev_priv(ndev);
3888 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
3889 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3891 napi_disable(&fep->napi);
3897 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3898 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3902 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3903 regulator_disable(fep->reg_phy);
3908 if (fep->clk_enet_out || fep->reg_phy)
3909 fep->link = 0;
3917 struct fec_enet_private *fep = netdev_priv(ndev);
3921 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3922 ret = regulator_enable(fep->reg_phy);
3934 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
3935 fec_enet_stop_mode(fep, false);
3937 val = readl(fep->hwp + FEC_ECNTRL);
3939 writel(val, fep->hwp + FEC_ECNTRL);
3940 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
3942 pinctrl_pm_select_default_state(&fep->pdev->dev);
3948 napi_enable(&fep->napi);
3956 if (fep->reg_phy)
3957 regulator_disable(fep->reg_phy);
3964 struct fec_enet_private *fep = netdev_priv(ndev);
3966 clk_disable_unprepare(fep->clk_ahb);
3967 clk_disable_unprepare(fep->clk_ipg);
3975 struct fec_enet_private *fep = netdev_priv(ndev);
3978 ret = clk_prepare_enable(fep->clk_ahb);
3981 ret = clk_prepare_enable(fep->clk_ipg);
3988 clk_disable_unprepare(fep->clk_ahb);