Lines Matching refs:fep
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
384 struct fec_enet_private *fep = netdev_priv(ndev);
392 txq = fep->tx_queue[0];
432 fec_enet_create_page_pool(struct fec_enet_private *fep,
435 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
440 .nid = dev_to_node(&fep->pdev->dev),
441 .dev = &fep->pdev->dev,
455 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
479 struct fec_enet_private *fep = netdev_priv(ndev);
505 if (fep->bufdesc_ex) {
508 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
513 if (fep->bufdesc_ex) {
514 if (fep->quirks & FEC_QUIRK_HAS_AVB)
526 if (((unsigned long) bufaddr) & fep->tx_align ||
527 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
531 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
535 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
537 if (dma_mapping_error(&fep->pdev->dev, addr)) {
557 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
566 struct fec_enet_private *fep = netdev_priv(ndev);
602 if (((unsigned long) bufaddr) & fep->tx_align ||
603 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
607 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
612 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
613 if (dma_mapping_error(&fep->pdev->dev, addr)) {
623 dma_unmap_single(&fep->pdev->dev, addr,
630 if (fep->bufdesc_ex) {
633 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
640 if (fep->bufdesc_ex) {
645 fep->hwts_tx_en))
648 if (fep->quirks & FEC_QUIRK_HAS_AVB)
694 struct fec_enet_private *fep = netdev_priv(ndev);
705 if (((unsigned long) data) & fep->tx_align ||
706 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
710 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
714 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
715 if (dma_mapping_error(&fep->pdev->dev, addr)) {
725 if (fep->bufdesc_ex) {
726 if (fep->quirks & FEC_QUIRK_HAS_AVB)
739 if (fep->bufdesc_ex)
753 struct fec_enet_private *fep = netdev_priv(ndev);
767 if (((unsigned long)bufaddr) & fep->tx_align ||
768 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
772 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
775 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
777 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
788 if (fep->bufdesc_ex) {
789 if (fep->quirks & FEC_QUIRK_HAS_AVB)
806 struct fec_enet_private *fep = netdev_priv(ndev);
872 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
889 struct fec_enet_private *fep = netdev_priv(ndev);
897 txq = fep->tx_queue[queue];
918 struct fec_enet_private *fep = netdev_priv(dev);
925 for (q = 0; q < fep->num_rx_queues; q++) {
927 rxq = fep->rx_queue[q];
947 for (q = 0; q < fep->num_tx_queues; q++) {
949 txq = fep->tx_queue[q];
959 dma_unmap_single(&fep->pdev->dev,
967 dma_unmap_single(&fep->pdev->dev,
997 struct fec_enet_private *fep = netdev_priv(ndev);
1000 for (i = 0; i < fep->num_rx_queues; i++)
1001 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1006 struct fec_enet_private *fep = netdev_priv(ndev);
1011 for (i = 0; i < fep->num_rx_queues; i++) {
1012 rxq = fep->rx_queue[i];
1013 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1014 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1019 fep->hwp + FEC_RCMR(i));
1022 for (i = 0; i < fep->num_tx_queues; i++) {
1023 txq = fep->tx_queue[i];
1024 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1029 fep->hwp + FEC_DMA_CFG(i));
1041 struct fec_enet_private *fep = netdev_priv(ndev);
1050 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1051 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1052 writel(0, fep->hwp + FEC_ECNTRL);
1054 writel(1, fep->hwp + FEC_ECNTRL);
1064 fep->hwp + FEC_ADDR_LOW);
1066 fep->hwp + FEC_ADDR_HIGH);
1069 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1076 if (fep->full_duplex == DUPLEX_FULL) {
1078 writel(0x04, fep->hwp + FEC_X_CNTRL);
1082 writel(0x0, fep->hwp + FEC_X_CNTRL);
1086 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1089 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1090 u32 val = readl(fep->hwp + FEC_RACC);
1094 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1099 writel(val, fep->hwp + FEC_RACC);
1100 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1108 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1113 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1114 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1115 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1116 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1118 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1134 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1137 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1138 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1146 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1150 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1153 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1160 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1161 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1166 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1167 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1168 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1169 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1172 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1178 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1183 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1184 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1187 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1191 writel(1 << 8, fep->hwp + FEC_X_WMRK);
1194 if (fep->bufdesc_ex)
1197 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1198 fep->rgmii_txc_dly)
1200 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1201 fep->rgmii_rxc_dly)
1206 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1210 writel(ecntl, fep->hwp + FEC_ECNTRL);
1213 if (fep->bufdesc_ex)
1217 if (fep->link)
1218 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1220 writel(0, fep->hwp + FEC_IMASK);
1223 if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1227 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1234 return imx_scu_get_handle(&fep->ipc_handle);
1237 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1239 struct device_node *np = fep->pdev->dev.of_node;
1243 if (!np || !fep->ipc_handle)
1252 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1255 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1257 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1258 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1271 fec_enet_ipg_stop_set(fep, enabled);
1277 struct fec_enet_private *fep = netdev_priv(ndev);
1279 writel(0, fep->hwp + FEC_IMASK);
1284 struct fec_enet_private *fep = netdev_priv(ndev);
1286 writel(0, fep->hwp + FEC_IMASK);
1287 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1293 struct fec_enet_private *fep = netdev_priv(ndev);
1294 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1298 if (fep->link) {
1299 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1301 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1309 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1310 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1311 writel(0, fep->hwp + FEC_ECNTRL);
1313 writel(1, fep->hwp + FEC_ECNTRL);
1317 val = readl(fep->hwp + FEC_ECNTRL);
1319 writel(val, fep->hwp + FEC_ECNTRL);
1321 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1322 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1325 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1326 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1327 writel(2, fep->hwp + FEC_ECNTRL);
1328 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1336 struct fec_enet_private *fep = netdev_priv(ndev);
1342 schedule_work(&fep->tx_timeout_work);
1347 struct fec_enet_private *fep =
1349 struct net_device *ndev = fep->netdev;
1353 napi_disable(&fep->napi);
1358 napi_enable(&fep->napi);
1364 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1370 spin_lock_irqsave(&fep->tmreg_lock, flags);
1371 ns = timecounter_cyc2time(&fep->tc, ts);
1372 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1381 struct fec_enet_private *fep;
1393 fep = netdev_priv(ndev);
1395 txq = fep->tx_queue[queue_id];
1416 dma_unmap_single(&fep->pdev->dev,
1435 dma_unmap_single(&fep->pdev->dev,
1488 fep->hwts_tx_en) && fep->bufdesc_ex) {
1492 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1536 struct fec_enet_private *fep = netdev_priv(ndev);
1540 for (i = fep->num_tx_queues - 1; i >= 0; i--)
1560 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1585 err = xdp_do_redirect(fep->netdev, xdp, prog);
1594 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1604 bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1617 trace_xdp_exception(fep->netdev, prog, act);
1632 struct fec_enet_private *fep = netdev_priv(ndev);
1644 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1645 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1657 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1666 rxq = fep->rx_queue[queue_id];
1680 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1715 dma_sync_single_for_cpu(&fep->pdev->dev,
1727 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1758 if (fep->bufdesc_ex)
1764 fep->bufdesc_ex &&
1780 if (fep->hwts_rx_en && fep->bufdesc_ex)
1781 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1784 if (fep->bufdesc_ex &&
1785 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1801 napi_gro_receive(&fep->napi, skb);
1810 if (fep->bufdesc_ex) {
1842 struct fec_enet_private *fep = netdev_priv(ndev);
1846 for (i = fep->num_rx_queues - 1; i >= 0; i--)
1852 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1856 int_events = readl(fep->hwp + FEC_IEVENT);
1861 writel(int_events, fep->hwp + FEC_IEVENT);
1870 struct fec_enet_private *fep = netdev_priv(ndev);
1873 if (fec_enet_collect_events(fep) && fep->link) {
1876 if (napi_schedule_prep(&fep->napi)) {
1878 writel(0, fep->hwp + FEC_IMASK);
1879 __napi_schedule(&fep->napi);
1889 struct fec_enet_private *fep = netdev_priv(ndev);
1895 } while ((done < budget) && fec_enet_collect_events(fep));
1899 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1908 struct fec_enet_private *fep = netdev_priv(ndev);
1924 struct device_node *np = fep->pdev->dev.of_node;
1942 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1954 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1956 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1965 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
1967 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
1973 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
1985 struct fec_enet_private *fep = netdev_priv(ndev);
1995 fep->link = 0;
1997 if (!fep->link) {
1998 fep->link = phy_dev->link;
2002 if (fep->full_duplex != phy_dev->duplex) {
2003 fep->full_duplex = phy_dev->duplex;
2007 if (phy_dev->speed != fep->speed) {
2008 fep->speed = phy_dev->speed;
2015 napi_disable(&fep->napi);
2020 napi_enable(&fep->napi);
2023 if (fep->link) {
2025 napi_disable(&fep->napi);
2029 napi_enable(&fep->napi);
2030 fep->link = phy_dev->link;
2039 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2044 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2048 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2055 struct fec_enet_private *fep = bus->priv;
2056 struct device *dev = &fep->pdev->dev;
2071 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2074 ret = fec_enet_mdio_wait(fep);
2076 netdev_err(fep->netdev, "MDIO read timeout\n");
2080 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2092 struct fec_enet_private *fep = bus->priv;
2093 struct device *dev = &fep->pdev->dev;
2106 fep->hwp + FEC_MII_DATA);
2109 ret = fec_enet_mdio_wait(fep);
2111 netdev_err(fep->netdev, "MDIO address write timeout\n");
2120 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2123 ret = fec_enet_mdio_wait(fep);
2125 netdev_err(fep->netdev, "MDIO read timeout\n");
2129 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2141 struct fec_enet_private *fep = bus->priv;
2142 struct device *dev = &fep->pdev->dev;
2157 fep->hwp + FEC_MII_DATA);
2160 ret = fec_enet_mdio_wait(fep);
2162 netdev_err(fep->netdev, "MDIO write timeout\n");
2173 struct fec_enet_private *fep = bus->priv;
2174 struct device *dev = &fep->pdev->dev;
2187 fep->hwp + FEC_MII_DATA);
2190 ret = fec_enet_mdio_wait(fep);
2192 netdev_err(fep->netdev, "MDIO address write timeout\n");
2200 fep->hwp + FEC_MII_DATA);
2203 ret = fec_enet_mdio_wait(fep);
2205 netdev_err(fep->netdev, "MDIO write timeout\n");
2216 struct fec_enet_private *fep = netdev_priv(ndev);
2221 } else if (fep->phy_node) {
2229 phy_dev = of_phy_find_device(fep->phy_node);
2237 struct fec_enet_private *fep = netdev_priv(ndev);
2241 ret = clk_prepare_enable(fep->clk_enet_out);
2245 if (fep->clk_ptp) {
2246 mutex_lock(&fep->ptp_clk_mutex);
2247 ret = clk_prepare_enable(fep->clk_ptp);
2249 mutex_unlock(&fep->ptp_clk_mutex);
2252 fep->ptp_clk_on = true;
2254 mutex_unlock(&fep->ptp_clk_mutex);
2257 ret = clk_prepare_enable(fep->clk_ref);
2261 ret = clk_prepare_enable(fep->clk_2x_txclk);
2267 clk_disable_unprepare(fep->clk_enet_out);
2268 if (fep->clk_ptp) {
2269 mutex_lock(&fep->ptp_clk_mutex);
2270 clk_disable_unprepare(fep->clk_ptp);
2271 fep->ptp_clk_on = false;
2272 mutex_unlock(&fep->ptp_clk_mutex);
2274 clk_disable_unprepare(fep->clk_ref);
2275 clk_disable_unprepare(fep->clk_2x_txclk);
2281 if (fep->clk_ref)
2282 clk_disable_unprepare(fep->clk_ref);
2284 if (fep->clk_ptp) {
2285 mutex_lock(&fep->ptp_clk_mutex);
2286 clk_disable_unprepare(fep->clk_ptp);
2287 fep->ptp_clk_on = false;
2288 mutex_unlock(&fep->ptp_clk_mutex);
2291 clk_disable_unprepare(fep->clk_enet_out);
2296 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2304 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2307 fep->rgmii_txc_dly = true;
2314 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2317 fep->rgmii_rxc_dly = true;
2326 struct fec_enet_private *fep = netdev_priv(ndev);
2331 int dev_id = fep->dev_id;
2333 if (fep->phy_node) {
2334 phy_dev = of_phy_connect(ndev, fep->phy_node,
2336 fep->phy_interface);
2344 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2348 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2361 fep->phy_interface);
2370 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2381 fep->link = 0;
2382 fep->full_duplex = 0;
2395 struct fec_enet_private *fep = netdev_priv(ndev);
2418 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2421 fep->mii_bus = fec0_mii_bus;
2444 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2445 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2450 clk_get_rate(fep->clk_ipg));
2467 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2469 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2472 fep->phy_speed |= BIT(7);
2474 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2483 writel(0, fep->hwp + FEC_MII_DATA);
2486 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2489 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2491 fep->mii_bus = mdiobus_alloc();
2492 if (fep->mii_bus == NULL) {
2497 fep->mii_bus->name = "fec_enet_mii_bus";
2498 fep->mii_bus->read = fec_enet_mdio_read_c22;
2499 fep->mii_bus->write = fec_enet_mdio_write_c22;
2500 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2501 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2502 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2504 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2505 pdev->name, fep->dev_id + 1);
2506 fep->mii_bus->priv = fep;
2507 fep->mii_bus->parent = &pdev->dev;
2509 err = of_mdiobus_register(fep->mii_bus, node);
2517 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2518 fec0_mii_bus = fep->mii_bus;
2523 mdiobus_free(fep->mii_bus);
2529 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2532 mdiobus_unregister(fep->mii_bus);
2533 mdiobus_free(fep->mii_bus);
2540 struct fec_enet_private *fep = netdev_priv(ndev);
2542 strscpy(info->driver, fep->pdev->dev.driver->name,
2549 struct fec_enet_private *fep = netdev_priv(ndev);
2553 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2637 struct fec_enet_private *fep = netdev_priv(ndev);
2638 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2639 struct device *dev = &fep->pdev->dev;
2673 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2687 struct fec_enet_private *fep = netdev_priv(ndev);
2689 if (fep->bufdesc_ex) {
2697 if (fep->ptp_clock)
2698 info->phc_index = ptp_clock_index(fep->ptp_clock);
2718 struct fec_enet_private *fep = netdev_priv(ndev);
2720 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2721 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2728 struct fec_enet_private *fep = netdev_priv(ndev);
2739 fep->pause_flag = 0;
2742 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2743 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2754 napi_disable(&fep->napi);
2759 napi_enable(&fep->napi);
2845 struct fec_enet_private *fep = netdev_priv(dev);
2849 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2852 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2858 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2859 rxq = fep->rx_queue[i];
2868 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
2875 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2876 rxq = fep->rx_queue[i];
2891 struct fec_enet_private *fep = netdev_priv(dev);
2896 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2899 fec_enet_get_xdp_stats(fep, data);
2902 fec_enet_page_pool_stats(fep, data);
2947 struct fec_enet_private *fep = netdev_priv(dev);
2952 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2955 writel(0, fep->hwp + fec_stats[i].offset);
2957 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2958 rxq = fep->rx_queue[i];
2964 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2984 struct fec_enet_private *fep = netdev_priv(ndev);
2986 return us * (fep->itr_clk_rate / 64000) / 1000;
2992 struct fec_enet_private *fep = netdev_priv(ndev);
2996 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
2997 !fep->tx_time_itr || !fep->tx_pkts_itr)
3007 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3008 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
3009 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3010 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
3015 writel(tx_itr, fep->hwp + FEC_TXIC0);
3016 writel(rx_itr, fep->hwp + FEC_RXIC0);
3017 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3018 writel(tx_itr, fep->hwp + FEC_TXIC1);
3019 writel(rx_itr, fep->hwp + FEC_RXIC1);
3020 writel(tx_itr, fep->hwp + FEC_TXIC2);
3021 writel(rx_itr, fep->hwp + FEC_RXIC2);
3030 struct fec_enet_private *fep = netdev_priv(ndev);
3032 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3035 ec->rx_coalesce_usecs = fep->rx_time_itr;
3036 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3038 ec->tx_coalesce_usecs = fep->tx_time_itr;
3039 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3049 struct fec_enet_private *fep = netdev_priv(ndev);
3050 struct device *dev = &fep->pdev->dev;
3053 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3078 fep->rx_time_itr = ec->rx_coalesce_usecs;
3079 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3081 fep->tx_time_itr = ec->tx_coalesce_usecs;
3082 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3094 struct fec_enet_private *fep = netdev_priv(ndev);
3096 return us * (fep->clk_ref_rate / 1000) / 1000;
3101 struct fec_enet_private *fep = netdev_priv(ndev);
3102 struct ethtool_eee *p = &fep->eee;
3122 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
3123 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
3131 struct fec_enet_private *fep = netdev_priv(ndev);
3132 struct ethtool_eee *p = &fep->eee;
3134 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3151 struct fec_enet_private *fep = netdev_priv(ndev);
3152 struct ethtool_eee *p = &fep->eee;
3155 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3178 struct fec_enet_private *fep = netdev_priv(ndev);
3180 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3182 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3191 struct fec_enet_private *fep = netdev_priv(ndev);
3193 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3201 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3203 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3237 struct fec_enet_private *fep = netdev_priv(ndev);
3243 for (q = 0; q < fep->num_rx_queues; q++) {
3244 rxq = fep->rx_queue[q];
3257 for (q = 0; q < fep->num_tx_queues; q++) {
3258 txq = fep->tx_queue[q];
3286 struct fec_enet_private *fep = netdev_priv(ndev);
3290 for (i = 0; i < fep->num_tx_queues; i++)
3291 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3292 txq = fep->tx_queue[i];
3293 dma_free_coherent(&fep->pdev->dev,
3299 for (i = 0; i < fep->num_rx_queues; i++)
3300 kfree(fep->rx_queue[i]);
3301 for (i = 0; i < fep->num_tx_queues; i++)
3302 kfree(fep->tx_queue[i]);
3307 struct fec_enet_private *fep = netdev_priv(ndev);
3312 for (i = 0; i < fep->num_tx_queues; i++) {
3319 fep->tx_queue[i] = txq;
3321 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3326 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
3336 for (i = 0; i < fep->num_rx_queues; i++) {
3337 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3339 if (!fep->rx_queue[i]) {
3344 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3345 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3357 struct fec_enet_private *fep = netdev_priv(ndev);
3364 rxq = fep->rx_queue[queue];
3367 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3385 if (fep->bufdesc_ex) {
3406 struct fec_enet_private *fep = netdev_priv(ndev);
3411 txq = fep->tx_queue[queue];
3421 if (fep->bufdesc_ex) {
3442 struct fec_enet_private *fep = netdev_priv(ndev);
3445 for (i = 0; i < fep->num_rx_queues; i++)
3449 for (i = 0; i < fep->num_tx_queues; i++)
3458 struct fec_enet_private *fep = netdev_priv(ndev);
3462 ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3466 pinctrl_pm_select_default_state(&fep->pdev->dev);
3504 if (fep->quirks & FEC_QUIRK_ERR006687)
3507 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3508 cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3510 napi_enable(&fep->napi);
3514 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3524 pm_runtime_mark_last_busy(&fep->pdev->dev);
3525 pm_runtime_put_autosuspend(&fep->pdev->dev);
3526 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3533 struct fec_enet_private *fep = netdev_priv(ndev);
3538 napi_disable(&fep->napi);
3545 if (fep->quirks & FEC_QUIRK_ERR006687)
3551 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3552 cpu_latency_qos_remove_request(&fep->pm_qos_req);
3554 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3555 pm_runtime_mark_last_busy(&fep->pdev->dev);
3556 pm_runtime_put_autosuspend(&fep->pdev->dev);
3577 struct fec_enet_private *fep = netdev_priv(ndev);
3584 tmp = readl(fep->hwp + FEC_R_CNTRL);
3586 writel(tmp, fep->hwp + FEC_R_CNTRL);
3590 tmp = readl(fep->hwp + FEC_R_CNTRL);
3592 writel(tmp, fep->hwp + FEC_R_CNTRL);
3598 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3599 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3620 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3621 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3628 struct fec_enet_private *fep = netdev_priv(ndev);
3647 fep->hwp + FEC_ADDR_LOW);
3649 fep->hwp + FEC_ADDR_HIGH);
3664 struct fec_enet_private *fep = netdev_priv(dev);
3667 if (fep->irq[i] > 0) {
3668 disable_irq(fep->irq[i]);
3669 fec_enet_interrupt(fep->irq[i], dev);
3670 enable_irq(fep->irq[i]);
3679 struct fec_enet_private *fep = netdev_priv(netdev);
3687 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3689 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3696 struct fec_enet_private *fep = netdev_priv(netdev);
3700 napi_disable(&fep->napi);
3707 napi_enable(&fep->napi);
3718 struct fec_enet_private *fep = netdev_priv(ndev);
3721 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3741 struct fec_enet_private *fep = netdev_priv(dev);
3751 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3758 napi_disable(&fep->napi);
3762 old_prog = xchg(&fep->xdp_prog, bpf->prog);
3769 napi_enable(&fep->napi);
3787 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3792 return (index % fep->num_tx_queues);
3795 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3808 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3822 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3824 if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3837 dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3845 if (fep->bufdesc_ex)
3851 if (fep->bufdesc_ex) {
3854 if (fep->quirks & FEC_QUIRK_HAS_AVB)
3886 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3894 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3895 txq = fep->tx_queue[queue];
3896 nq = netdev_get_tx_queue(fep->netdev, queue);
3902 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3914 struct fec_enet_private *fep = netdev_priv(dev);
3922 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3923 txq = fep->tx_queue[queue];
3924 nq = netdev_get_tx_queue(fep->netdev, queue);
3931 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3944 struct fec_enet_private *fep = netdev_priv(ndev);
3949 if (!fep->bufdesc_ex)
3961 struct fec_enet_private *fep = netdev_priv(ndev);
3966 if (!fep->bufdesc_ex)
4006 struct fec_enet_private *fep = netdev_priv(ndev);
4011 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4018 fep->rx_align = 0xf;
4019 fep->tx_align = 0xf;
4021 fep->rx_align = 0x3;
4022 fep->tx_align = 0x3;
4024 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4025 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4026 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4027 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4030 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4032 dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4040 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4043 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
4056 for (i = 0; i < fep->num_rx_queues; i++) {
4057 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4066 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4072 for (i = 0; i < fep->num_tx_queues; i++) {
4073 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4082 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4094 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4095 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4097 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4101 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4107 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4110 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
4111 fep->tx_align = 0;
4112 fep->rx_align = 0x3f;
4117 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4123 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4241 struct fec_enet_private *fep = netdev_priv(ndev);
4243 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4244 fep->wake_irq = fep->irq[2];
4246 fep->wake_irq = fep->irq[0];
4249 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4263 dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4267 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4268 if (IS_ERR(fep->stop_gpr.gpr)) {
4269 dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4270 ret = PTR_ERR(fep->stop_gpr.gpr);
4271 fep->stop_gpr.gpr = NULL;
4275 fep->stop_gpr.reg = out_val[1];
4276 fep->stop_gpr.bit = out_val[2];
4287 struct fec_enet_private *fep;
4312 fep = netdev_priv(ndev);
4319 fep->quirks = dev_info->quirks;
4321 fep->netdev = ndev;
4322 fep->num_rx_queues = num_rx_qs;
4323 fep->num_tx_queues = num_tx_qs;
4327 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4328 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4334 fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4335 if (IS_ERR(fep->hwp)) {
4336 ret = PTR_ERR(fep->hwp);
4340 fep->pdev = pdev;
4341 fep->dev_id = dev_id++;
4348 fep->quirks |= FEC_QUIRK_ERR006687;
4350 ret = fec_enet_ipc_handle_init(fep);
4355 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4357 ret = fec_enet_init_stop_mode(fep, np);
4371 fep->phy_node = phy_node;
4377 fep->phy_interface = pdata->phy;
4379 fep->phy_interface = PHY_INTERFACE_MODE_MII;
4381 fep->phy_interface = interface;
4384 ret = fec_enet_parse_rgmii_delay(fep, np);
4388 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4389 if (IS_ERR(fep->clk_ipg)) {
4390 ret = PTR_ERR(fep->clk_ipg);
4394 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4395 if (IS_ERR(fep->clk_ahb)) {
4396 ret = PTR_ERR(fep->clk_ahb);
4400 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4403 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4404 if (IS_ERR(fep->clk_enet_out)) {
4405 ret = PTR_ERR(fep->clk_enet_out);
4409 fep->ptp_clk_on = false;
4410 mutex_init(&fep->ptp_clk_mutex);
4413 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4414 if (IS_ERR(fep->clk_ref)) {
4415 ret = PTR_ERR(fep->clk_ref);
4418 fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4421 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4422 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4423 if (IS_ERR(fep->clk_2x_txclk))
4424 fep->clk_2x_txclk = NULL;
4427 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4428 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4429 if (IS_ERR(fep->clk_ptp)) {
4430 fep->clk_ptp = NULL;
4431 fep->bufdesc_ex = false;
4438 ret = clk_prepare_enable(fep->clk_ipg);
4441 ret = clk_prepare_enable(fep->clk_ahb);
4445 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4446 if (!IS_ERR(fep->reg_phy)) {
4447 ret = regulator_enable(fep->reg_phy);
4454 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4458 fep->reg_phy = NULL;
4472 if (fep->bufdesc_ex)
4493 fep->irq[i] = irq;
4514 device_init_wakeup(&ndev->dev, fep->wol_flag &
4517 if (fep->bufdesc_ex && fep->ptp_clock)
4518 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4520 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4528 fec_enet_mii_remove(fep);
4536 if (fep->reg_phy)
4537 regulator_disable(fep->reg_phy);
4539 clk_disable_unprepare(fep->clk_ahb);
4541 clk_disable_unprepare(fep->clk_ipg);
4563 struct fec_enet_private *fep = netdev_priv(ndev);
4573 cancel_work_sync(&fep->tx_timeout_work);
4576 fec_enet_mii_remove(fep);
4577 if (fep->reg_phy)
4578 regulator_disable(fep->reg_phy);
4582 of_node_put(fep->phy_node);
4588 clk_disable_unprepare(fep->clk_ahb);
4589 clk_disable_unprepare(fep->clk_ipg);
4600 struct fec_enet_private *fep = netdev_priv(ndev);
4605 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4606 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4608 napi_disable(&fep->napi);
4613 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4615 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4618 if (fep->wake_irq > 0) {
4619 disable_irq(fep->wake_irq);
4620 enable_irq_wake(fep->wake_irq);
4622 fec_enet_stop_mode(fep, true);
4627 fep->rpm_active = !pm_runtime_status_suspended(dev);
4628 if (fep->rpm_active) {
4638 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4639 regulator_disable(fep->reg_phy);
4644 if (fep->clk_enet_out || fep->reg_phy)
4645 fep->link = 0;
4653 struct fec_enet_private *fep = netdev_priv(ndev);
4657 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4658 ret = regulator_enable(fep->reg_phy);
4665 if (fep->rpm_active)
4673 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4674 fec_enet_stop_mode(fep, false);
4675 if (fep->wake_irq) {
4676 disable_irq_wake(fep->wake_irq);
4677 enable_irq(fep->wake_irq);
4680 val = readl(fep->hwp + FEC_ECNTRL);
4682 writel(val, fep->hwp + FEC_ECNTRL);
4683 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4685 pinctrl_pm_select_default_state(&fep->pdev->dev);
4691 napi_enable(&fep->napi);
4700 if (fep->reg_phy)
4701 regulator_disable(fep->reg_phy);
4708 struct fec_enet_private *fep = netdev_priv(ndev);
4710 clk_disable_unprepare(fep->clk_ahb);
4711 clk_disable_unprepare(fep->clk_ipg);
4719 struct fec_enet_private *fep = netdev_priv(ndev);
4722 ret = clk_prepare_enable(fep->clk_ahb);
4725 ret = clk_prepare_enable(fep->clk_ipg);
4732 clk_disable_unprepare(fep->clk_ahb);