Lines Matching refs:pp

484 	struct mvneta_port	*pp;
762 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
764 writel(data, pp->base + offset);
768 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
770 return readl(pp->base + offset);
791 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
797 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
798 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
799 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
807 struct mvneta_port *pp = netdev_priv(dev);
820 cpu_stats = per_cpu_ptr(pp->stats, cpu);
856 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
875 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
887 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
896 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
916 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
932 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
936 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
940 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
945 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
951 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
956 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
963 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
974 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1002 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
1008 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
1013 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
1017 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
1022 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1028 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
1033 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1035 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1039 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
1044 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1046 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1048 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1052 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1057 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1059 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1061 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1065 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1072 dev_warn(pp->dev->dev.parent,
1078 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1080 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1084 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1090 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1092 if (pp->bm_win_id < 0) {
1096 pp->bm_win_id = i;
1103 i = pp->bm_win_id;
1106 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1107 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1110 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1112 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1115 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1117 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1119 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1122 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1127 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1134 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1139 pp->bm_win_id = -1;
1142 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1145 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1155 struct mvneta_port *pp)
1160 if (!pp->neta_armada3700) {
1163 ret = mvneta_bm_port_mbus_init(pp);
1169 netdev_info(pp->dev, "missing long pool id\n");
1174 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1175 MVNETA_BM_LONG, pp->id,
1176 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1177 if (!pp->pool_long) {
1178 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1182 pp->pool_long->port_map |= 1 << pp->id;
1184 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1185 pp->pool_long->id);
1192 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1193 MVNETA_BM_SHORT, pp->id,
1195 if (!pp->pool_short) {
1196 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1197 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1202 pp->pool_short->port_map |= 1 << pp->id;
1203 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1204 pp->pool_short->id);
1211 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1213 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1218 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1237 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1242 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1243 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1245 pp->bm_priv = NULL;
1246 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1247 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1248 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1252 static void mvneta_port_up(struct mvneta_port *pp)
1260 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1264 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1274 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1278 static void mvneta_port_down(struct mvneta_port *pp)
1284 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1288 mvreg_write(pp, MVNETA_RXQ_CMD,
1295 netdev_warn(pp->dev,
1302 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1308 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1311 mvreg_write(pp, MVNETA_TXQ_CMD,
1318 netdev_warn(pp->dev,
1326 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1334 netdev_warn(pp->dev,
1341 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1349 static void mvneta_port_enable(struct mvneta_port *pp)
1354 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1356 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1360 static void mvneta_port_disable(struct mvneta_port *pp)
1365 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1367 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1375 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1388 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1392 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1405 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1410 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1416 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1419 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1425 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1430 struct mvneta_port *pp = arg;
1435 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1443 struct mvneta_port *pp = arg;
1448 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1449 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1450 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1455 struct mvneta_port *pp = arg;
1460 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1461 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1462 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1474 static void mvneta_defaults_set(struct mvneta_port *pp)
1482 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1485 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1486 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1489 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1499 if (!pp->neta_armada3700) {
1513 txq_map = (cpu == pp->rxq_def) ?
1521 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1525 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1526 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1529 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1531 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1532 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1535 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1536 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1539 if (pp->bm_priv)
1545 mvreg_write(pp, MVNETA_ACC_MODE, val);
1547 if (pp->bm_priv)
1548 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1551 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1552 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1555 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1556 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1571 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1576 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1578 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1580 mvneta_set_ucast_table(pp, -1);
1581 mvneta_set_special_mcast_table(pp, -1);
1582 mvneta_set_other_mcast_table(pp, -1);
1585 mvreg_write(pp, MVNETA_INTR_ENABLE,
1589 mvneta_mib_counters_clear(pp);
1593 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1604 val = mvreg_read(pp, MVNETA_TX_MTU);
1607 mvreg_write(pp, MVNETA_TX_MTU, val);
1610 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1617 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1620 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1627 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1633 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1649 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1659 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1663 static void mvneta_mac_addr_set(struct mvneta_port *pp,
1674 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1675 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1679 mvneta_set_ucast_addr(pp, addr[5], queue);
1685 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1688 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1695 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1701 clk_rate = clk_get_rate(pp->clk);
1704 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1708 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1713 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1718 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1734 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1743 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1748 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1752 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1758 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1768 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1774 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1778 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1813 static void mvneta_rx_error(struct mvneta_port *pp,
1816 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1826 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1830 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1834 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1838 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1845 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
1847 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1859 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1864 return &pp->txqs[queue];
1868 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1889 dma_unmap_single(pp->dev->dev.parent,
1913 static void mvneta_txq_done(struct mvneta_port *pp,
1916 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1919 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1923 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1935 static int mvneta_rx_refill(struct mvneta_port *pp,
1948 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1986 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1991 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1993 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1995 if (pp->bm_priv) {
2002 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2004 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2025 mvneta_update_stats(struct mvneta_port *pp,
2028 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2041 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2050 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2056 stats = this_cpu_ptr(pp->stats);
2072 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2091 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2095 struct device *dev = pp->dev->dev.parent;
2174 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2176 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2188 txq = &pp->txqs[cpu % txq_number];
2189 nq = netdev_get_tx_queue(pp->dev, txq->id);
2192 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
2200 mvneta_txq_pend_desc_add(pp, txq, 0);
2215 struct mvneta_port *pp = netdev_priv(dev);
2216 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2223 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
2229 txq = &pp->txqs[cpu % txq_number];
2230 nq = netdev_get_tx_queue(pp->dev, txq->id);
2234 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
2243 mvneta_txq_pend_desc_add(pp, txq, 0);
2257 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2264 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2269 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2279 err = xdp_do_redirect(pp->dev, xdp, prog);
2281 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2290 ret = mvneta_xdp_xmit_back(pp, xdp);
2292 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2295 bpf_warn_invalid_xdp_action(pp->dev, prog, act);
2298 trace_xdp_exception(pp->dev, prog, act);
2301 mvneta_xdp_put_buff(pp, rxq, xdp, sync);
2314 mvneta_swbm_rx_frame(struct mvneta_port *pp,
2322 struct net_device *dev = pp->dev;
2344 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
2349 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2356 struct net_device *dev = pp->dev;
2380 pp->rx_offset_correction, data_len);
2395 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
2413 skb->ip_summed = mvneta_rx_csum(pp, desc_status);
2426 struct mvneta_port *pp, int budget,
2430 struct net_device *dev = pp->dev;
2440 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2442 xdp_prog = READ_ONCE(pp->xdp_prog);
2461 mvneta_rx_error(pp, rx_desc);
2469 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2479 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2488 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2493 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2496 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2498 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2500 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2520 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
2526 mvneta_update_stats(pp, &ps);
2529 refill = mvneta_rx_refill_queue(pp, rxq);
2532 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2539 struct mvneta_port *pp, int rx_todo,
2542 struct net_device *dev = pp->dev;
2548 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2572 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2578 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2581 mvneta_rx_error(pp, rx_desc);
2592 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2601 skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2608 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2622 stats = this_cpu_ptr(pp->stats);
2637 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2650 skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2656 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2665 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2670 static void mvneta_free_tso_hdrs(struct mvneta_port *pp,
2673 struct device *dev = pp->dev->dev.parent;
2686 static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp,
2689 struct device *dev = pp->dev->dev.parent;
2698 mvneta_free_tso_hdrs(pp, txq);
2775 static void mvneta_release_descs(struct mvneta_port *pp,
2790 dma_unmap_single(pp->dev->dev.parent,
2808 struct mvneta_port *pp = netdev_priv(dev);
2857 mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
2862 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2878 dma_map_single(pp->dev->dev.parent, addr,
2881 if (dma_mapping_error(pp->dev->dev.parent,
2906 mvneta_release_descs(pp, txq, first_desc, i - 1);
2913 struct mvneta_port *pp = netdev_priv(dev);
2915 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2963 if (mvneta_tx_frag_process(pp, skb, txq)) {
2977 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2987 mvneta_txq_pend_desc_add(pp, txq, frags);
3005 static void mvneta_txq_done_force(struct mvneta_port *pp,
3009 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3012 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
3023 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
3030 txq = mvneta_tx_done_policy(pp, cause_tx_done);
3032 nq = netdev_get_tx_queue(pp->dev, txq->id);
3036 mvneta_txq_done(pp, txq);
3071 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
3084 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
3094 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
3106 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
3117 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
3127 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
3139 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
3145 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3151 if (pp->mcast_count[crc_result] == 0) {
3152 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3157 pp->mcast_count[crc_result]--;
3158 if (pp->mcast_count[crc_result] != 0) {
3159 netdev_info(pp->dev,
3161 pp->mcast_count[crc_result], crc_result);
3165 pp->mcast_count[crc_result]++;
3167 mvneta_set_other_mcast_addr(pp, crc_result, queue);
3173 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
3178 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
3180 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
3187 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
3188 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
3195 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3196 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3202 struct mvneta_port *pp = netdev_priv(dev);
3207 mvneta_rx_unicast_promisc_set(pp, 1);
3208 mvneta_set_ucast_table(pp, pp->rxq_def);
3209 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3210 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3213 mvneta_rx_unicast_promisc_set(pp, 0);
3214 mvneta_set_ucast_table(pp, -1);
3215 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3219 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3220 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3223 mvneta_set_special_mcast_table(pp, -1);
3224 mvneta_set_other_mcast_table(pp, -1);
3228 mvneta_mcast_addr_set(pp, ha->addr,
3229 pp->rxq_def);
3239 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3241 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3242 napi_schedule(&pp->napi);
3252 disable_percpu_irq(port->pp->dev->irq);
3258 static void mvneta_link_change(struct mvneta_port *pp)
3260 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3262 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3277 struct mvneta_port *pp = netdev_priv(napi->dev);
3278 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3280 if (!netif_running(pp->dev)) {
3286 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3288 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3290 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3294 mvneta_link_change(pp);
3299 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3306 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3312 if (pp->bm_priv)
3313 rx_done = mvneta_rx_hwbm(napi, pp, budget,
3314 &pp->rxqs[rx_queue]);
3316 rx_done = mvneta_rx_swbm(napi, pp, budget,
3317 &pp->rxqs[rx_queue]);
3324 if (pp->neta_armada3700) {
3328 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3334 enable_percpu_irq(pp->dev->irq, 0);
3338 if (pp->neta_armada3700)
3339 pp->cause_rx_tx = cause_rx_tx;
3346 static int mvneta_create_page_pool(struct mvneta_port *pp,
3349 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3355 .dev = pp->dev->dev.parent,
3357 .offset = pp->rx_offset_correction,
3369 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
3390 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3395 err = mvneta_create_page_pool(pp, rxq, num);
3401 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3403 netdev_err(pp->dev,
3413 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3419 static void mvneta_tx_reset(struct mvneta_port *pp)
3425 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3427 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3428 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3431 static void mvneta_rx_reset(struct mvneta_port *pp)
3433 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3434 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3439 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3442 rxq->size = pp->rx_ring_size;
3445 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3456 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3460 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3461 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3464 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3465 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3467 if (!pp->bm_priv) {
3469 mvneta_rxq_offset_set(pp, rxq, 0);
3470 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3472 MVNETA_RX_BUF_SIZE(pp->pkt_size));
3473 mvneta_rxq_bm_disable(pp, rxq);
3474 mvneta_rxq_fill(pp, rxq, rxq->size);
3477 mvneta_rxq_offset_set(pp, rxq,
3478 NET_SKB_PAD - pp->rx_offset_correction);
3480 mvneta_rxq_bm_enable(pp, rxq);
3482 mvneta_rxq_long_pool_set(pp, rxq);
3483 mvneta_rxq_short_pool_set(pp, rxq);
3484 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3489 static int mvneta_rxq_init(struct mvneta_port *pp,
3495 ret = mvneta_rxq_sw_init(pp, rxq);
3499 mvneta_rxq_hw_init(pp, rxq);
3505 static void mvneta_rxq_deinit(struct mvneta_port *pp,
3508 mvneta_rxq_drop_pkts(pp, rxq);
3511 dma_free_coherent(pp->dev->dev.parent,
3524 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3529 txq->size = pp->tx_ring_size;
3539 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3552 err = mvneta_alloc_tso_hdrs(pp, txq);
3557 if (pp->neta_armada3700)
3562 cpu = pp->rxq_def % num_present_cpus();
3564 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3569 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3573 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3574 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3577 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3578 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3580 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3584 static int mvneta_txq_init(struct mvneta_port *pp,
3589 ret = mvneta_txq_sw_init(pp, txq);
3593 mvneta_txq_hw_init(pp, txq);
3599 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3602 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3606 mvneta_free_tso_hdrs(pp, txq);
3608 dma_free_coherent(pp->dev->dev.parent,
3621 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3625 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3626 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3629 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3630 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3633 static void mvneta_txq_deinit(struct mvneta_port *pp,
3636 mvneta_txq_sw_deinit(pp, txq);
3637 mvneta_txq_hw_deinit(pp, txq);
3641 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3646 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3650 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3655 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3660 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3665 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3668 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3670 mvneta_cleanup_rxqs(pp);
3679 static int mvneta_setup_txqs(struct mvneta_port *pp)
3684 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3686 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3688 mvneta_cleanup_txqs(pp);
3696 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3700 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3704 return phy_power_on(pp->comphy);
3707 static int mvneta_config_interface(struct mvneta_port *pp,
3712 if (pp->comphy) {
3716 ret = mvneta_comphy_init(pp, interface);
3721 mvreg_write(pp, MVNETA_SERDES_CFG,
3727 mvreg_write(pp, MVNETA_SERDES_CFG,
3732 mvreg_write(pp, MVNETA_SERDES_CFG,
3740 pp->phy_interface = interface;
3745 static void mvneta_start_dev(struct mvneta_port *pp)
3749 WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3751 mvneta_max_rx_size_set(pp, pp->pkt_size);
3752 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3755 mvneta_port_enable(pp);
3757 if (!pp->neta_armada3700) {
3761 per_cpu_ptr(pp->ports, cpu);
3766 napi_enable(&pp->napi);
3770 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3772 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3776 phylink_start(pp->phylink);
3779 phylink_speed_up(pp->phylink);
3781 netif_tx_start_all_queues(pp->dev);
3783 clear_bit(__MVNETA_DOWN, &pp->state);
3786 static void mvneta_stop_dev(struct mvneta_port *pp)
3790 set_bit(__MVNETA_DOWN, &pp->state);
3792 if (device_may_wakeup(&pp->dev->dev))
3793 phylink_speed_down(pp->phylink, false);
3795 phylink_stop(pp->phylink);
3797 if (!pp->neta_armada3700) {
3800 per_cpu_ptr(pp->ports, cpu);
3805 napi_disable(&pp->napi);
3808 netif_carrier_off(pp->dev);
3810 mvneta_port_down(pp);
3811 netif_tx_stop_all_queues(pp->dev);
3814 mvneta_port_disable(pp);
3817 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3820 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3822 mvneta_tx_reset(pp);
3823 mvneta_rx_reset(pp);
3825 WARN_ON(phy_power_off(pp->comphy));
3830 struct mvneta_port *pp = arg;
3832 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3837 struct mvneta_port *pp = arg;
3839 disable_percpu_irq(pp->dev->irq);
3845 struct mvneta_port *pp = netdev_priv(dev);
3846 struct bpf_prog *prog = pp->xdp_prog;
3866 if (pp->bm_priv)
3867 mvneta_bm_update_mtu(pp, mtu);
3876 mvneta_stop_dev(pp);
3877 on_each_cpu(mvneta_percpu_disable, pp, true);
3879 mvneta_cleanup_txqs(pp);
3880 mvneta_cleanup_rxqs(pp);
3882 if (pp->bm_priv)
3883 mvneta_bm_update_mtu(pp, mtu);
3885 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3887 ret = mvneta_setup_rxqs(pp);
3893 ret = mvneta_setup_txqs(pp);
3899 on_each_cpu(mvneta_percpu_enable, pp, true);
3900 mvneta_start_dev(pp);
3910 struct mvneta_port *pp = netdev_priv(dev);
3912 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3916 pp->tx_csum_limit);
3923 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3927 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3928 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3940 struct mvneta_port *pp = netdev_priv(dev);
3948 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3951 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3981 struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
3984 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
4010 struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
4051 old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4055 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
4063 struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
4064 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4066 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4068 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
4083 struct mvneta_port *pp = netdev_priv(ndev);
4085 return &pp->phylink_pcs;
4092 struct mvneta_port *pp = netdev_priv(ndev);
4095 if (pp->phy_interface != interface ||
4102 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4105 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4108 if (pp->phy_interface != interface)
4109 WARN_ON(phy_power_off(pp->comphy));
4113 unsigned long rate = clk_get_rate(pp->clk);
4115 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
4126 struct mvneta_port *pp = netdev_priv(ndev);
4127 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
4128 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4129 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
4165 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
4167 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4169 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
4172 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4182 struct mvneta_port *pp = netdev_priv(ndev);
4187 clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
4189 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
4192 if (pp->phy_interface != interface)
4194 WARN_ON(mvneta_config_interface(pp, interface));
4200 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4202 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4208 static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
4212 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4217 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
4224 struct mvneta_port *pp = netdev_priv(ndev);
4227 mvneta_port_down(pp);
4230 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4233 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4236 pp->eee_active = false;
4237 mvneta_set_eee(pp, false);
4247 struct mvneta_port *pp = netdev_priv(ndev);
4251 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4270 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4276 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4282 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4285 mvneta_port_up(pp);
4287 if (phy && pp->eee_enabled) {
4288 pp->eee_active = phy_init_eee(phy, false) >= 0;
4289 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
4302 static int mvneta_mdio_probe(struct mvneta_port *pp)
4305 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4308 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4310 phylink_ethtool_get_wol(pp->phylink, &wol);
4311 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4315 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4320 static void mvneta_mdio_remove(struct mvneta_port *pp)
4322 phylink_disconnect_phy(pp->phylink);
4329 static void mvneta_percpu_elect(struct mvneta_port *pp)
4336 if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
4337 elected_cpu = pp->rxq_def;
4351 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4361 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4364 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4370 pp, true);
4377 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4379 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4384 if (pp->neta_armada3700)
4387 spin_lock(&pp->lock);
4392 if (pp->is_stopped) {
4393 spin_unlock(&pp->lock);
4396 netif_tx_stop_all_queues(pp->dev);
4405 per_cpu_ptr(pp->ports, other_cpu);
4412 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4419 mvneta_percpu_enable(pp);
4425 mvneta_percpu_elect(pp);
4428 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4429 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4432 netif_tx_start_all_queues(pp->dev);
4433 spin_unlock(&pp->lock);
4439 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4441 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4447 spin_lock(&pp->lock);
4449 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4450 spin_unlock(&pp->lock);
4455 mvneta_percpu_disable(pp);
4461 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4465 spin_lock(&pp->lock);
4466 mvneta_percpu_elect(pp);
4467 spin_unlock(&pp->lock);
4469 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4470 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4473 netif_tx_start_all_queues(pp->dev);
4479 struct mvneta_port *pp = netdev_priv(dev);
4482 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4484 ret = mvneta_setup_rxqs(pp);
4488 ret = mvneta_setup_txqs(pp);
4493 if (pp->neta_armada3700)
4494 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4495 dev->name, pp);
4497 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4498 dev->name, pp->ports);
4500 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4504 if (!pp->neta_armada3700) {
4508 on_each_cpu(mvneta_percpu_enable, pp, true);
4510 pp->is_stopped = false;
4515 &pp->node_online);
4520 &pp->node_dead);
4525 ret = mvneta_mdio_probe(pp);
4531 mvneta_start_dev(pp);
4536 if (!pp->neta_armada3700)
4538 &pp->node_dead);
4540 if (!pp->neta_armada3700)
4542 &pp->node_online);
4544 if (pp->neta_armada3700) {
4545 free_irq(pp->dev->irq, pp);
4547 on_each_cpu(mvneta_percpu_disable, pp, true);
4548 free_percpu_irq(pp->dev->irq, pp->ports);
4551 mvneta_cleanup_txqs(pp);
4553 mvneta_cleanup_rxqs(pp);
4560 struct mvneta_port *pp = netdev_priv(dev);
4562 if (!pp->neta_armada3700) {
4568 spin_lock(&pp->lock);
4569 pp->is_stopped = true;
4570 spin_unlock(&pp->lock);
4572 mvneta_stop_dev(pp);
4573 mvneta_mdio_remove(pp);
4576 &pp->node_online);
4578 &pp->node_dead);
4579 on_each_cpu(mvneta_percpu_disable, pp, true);
4580 free_percpu_irq(dev->irq, pp->ports);
4582 mvneta_stop_dev(pp);
4583 mvneta_mdio_remove(pp);
4584 free_irq(dev->irq, pp);
4587 mvneta_cleanup_rxqs(pp);
4588 mvneta_cleanup_txqs(pp);
4595 struct mvneta_port *pp = netdev_priv(dev);
4597 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4604 struct mvneta_port *pp = netdev_priv(dev);
4613 if (pp->bm_priv) {
4619 need_update = !!pp->xdp_prog != !!prog;
4623 old_prog = xchg(&pp->xdp_prog, prog);
4650 struct mvneta_port *pp = netdev_priv(ndev);
4652 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4660 struct mvneta_port *pp = netdev_priv(ndev);
4662 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4667 struct mvneta_port *pp = netdev_priv(dev);
4669 return phylink_ethtool_nway_reset(pp->phylink);
4679 struct mvneta_port *pp = netdev_priv(dev);
4683 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4686 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4687 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4691 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4693 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4706 struct mvneta_port *pp = netdev_priv(dev);
4708 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
4709 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
4711 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
4734 struct mvneta_port *pp = netdev_priv(netdev);
4738 ring->rx_pending = pp->rx_ring_size;
4739 ring->tx_pending = pp->tx_ring_size;
4748 struct mvneta_port *pp = netdev_priv(dev);
4752 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4755 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4757 if (pp->tx_ring_size != ring->tx_pending)
4759 pp->tx_ring_size, ring->tx_pending);
4776 struct mvneta_port *pp = netdev_priv(dev);
4778 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4784 struct mvneta_port *pp = netdev_priv(dev);
4786 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4793 struct mvneta_port *pp = netdev_priv(netdev);
4800 if (!pp->bm_priv) {
4808 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4826 stats = per_cpu_ptr(pp->stats, cpu);
4852 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4856 void __iomem *base = pp->base;
4861 mvneta_ethtool_update_pcpu_stats(pp, &stats);
4868 pp->ethtool_stats[i] += val;
4875 pp->ethtool_stats[i] += val;
4880 val = phylink_get_eee_err(pp->phylink);
4881 pp->ethtool_stats[i] += val;
4884 pp->ethtool_stats[i] = stats.skb_alloc_error;
4887 pp->ethtool_stats[i] = stats.refill_error;
4890 pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4893 pp->ethtool_stats[i] = stats.ps.xdp_pass;
4896 pp->ethtool_stats[i] = stats.ps.xdp_drop;
4899 pp->ethtool_stats[i] = stats.ps.xdp_tx;
4902 pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4905 pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4908 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4916 static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
4922 if (pp->rxqs[i].page_pool)
4923 page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
4932 struct mvneta_port *pp = netdev_priv(dev);
4935 mvneta_ethtool_update_stats(pp);
4938 *data++ = pp->ethtool_stats[i];
4940 if (!pp->bm_priv)
4941 mvneta_ethtool_pp_stats(pp, data);
4948 struct mvneta_port *pp = netdev_priv(dev);
4950 if (!pp->bm_priv)
4979 static int mvneta_config_rss(struct mvneta_port *pp)
4984 netif_tx_stop_all_queues(pp->dev);
4986 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4988 if (!pp->neta_armada3700) {
4992 per_cpu_ptr(pp->ports, cpu);
4998 napi_synchronize(&pp->napi);
4999 napi_disable(&pp->napi);
5002 pp->rxq_def = pp->indir[0];
5005 mvneta_set_rx_mode(pp->dev);
5008 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
5009 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
5012 spin_lock(&pp->lock);
5013 mvneta_percpu_elect(pp);
5014 spin_unlock(&pp->lock);
5016 if (!pp->neta_armada3700) {
5020 per_cpu_ptr(pp->ports, cpu);
5025 napi_enable(&pp->napi);
5028 netif_tx_start_all_queues(pp->dev);
5036 struct mvneta_port *pp = netdev_priv(dev);
5039 if (pp->neta_armada3700)
5052 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
5054 return mvneta_config_rss(pp);
5060 struct mvneta_port *pp = netdev_priv(dev);
5063 if (pp->neta_armada3700)
5072 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
5080 struct mvneta_port *pp = netdev_priv(dev);
5082 phylink_ethtool_get_wol(pp->phylink, wol);
5088 struct mvneta_port *pp = netdev_priv(dev);
5091 ret = phylink_ethtool_set_wol(pp->phylink, wol);
5101 struct mvneta_port *pp = netdev_priv(dev);
5104 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
5106 eee->eee_enabled = pp->eee_enabled;
5107 eee->eee_active = pp->eee_active;
5108 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
5111 return phylink_ethtool_get_eee(pp->phylink, eee);
5117 struct mvneta_port *pp = netdev_priv(dev);
5126 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
5129 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
5131 pp->eee_enabled = eee->eee_enabled;
5132 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
5134 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
5136 return phylink_ethtool_set_eee(pp->phylink, eee);
5139 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
5141 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
5144 static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
5146 u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
5151 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
5154 static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
5160 core_clk_rate = clk_get_rate(pp->clk);
5171 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5173 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5176 mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
5181 static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
5183 u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
5186 mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
5189 static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
5212 mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
5220 struct mvneta_port *pp = netdev_priv(dev);
5232 mvneta_clear_rx_prio_map(pp);
5235 mvneta_disable_per_queue_rate_limit(pp);
5252 mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
5257 mvneta_disable_per_queue_rate_limit(pp);
5264 ret = mvneta_enable_per_queue_rate_limit(pp);
5275 ret = mvneta_setup_queue_rates(pp, txq,
5340 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
5345 mvneta_port_disable(pp);
5348 mvneta_defaults_set(pp);
5350 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
5351 if (!pp->txqs)
5356 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5358 txq->size = pp->tx_ring_size;
5362 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
5363 if (!pp->rxqs)
5368 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5370 rxq->size = pp->rx_ring_size;
5374 = devm_kmalloc_array(pp->dev->dev.parent,
5386 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
5394 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
5395 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
5398 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
5408 mvreg_write(pp, MVNETA_WIN_BASE(i),
5413 mvreg_write(pp, MVNETA_WIN_SIZE(i),
5420 if (pp->neta_ac5)
5421 mvreg_write(pp, MVNETA_WIN_BASE(0),
5428 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
5433 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5434 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5438 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5441 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
5457 struct mvneta_port *pp;
5478 pp = netdev_priv(dev);
5479 spin_lock_init(&pp->lock);
5480 pp->dn = dn;
5482 pp->rxq_def = rxq_def;
5483 pp->indir[0] = rxq_def;
5491 pp->phy_interface = phy_mode;
5500 pp->comphy = comphy;
5502 pp->base = devm_platform_ioremap_resource(pdev, 0);
5503 if (IS_ERR(pp->base))
5504 return PTR_ERR(pp->base);
5508 pp->neta_armada3700 = true;
5510 pp->neta_armada3700 = true;
5511 pp->neta_ac5 = true;
5518 pp->clk = devm_clk_get(&pdev->dev, "core");
5519 if (IS_ERR(pp->clk))
5520 pp->clk = devm_clk_get(&pdev->dev, NULL);
5521 if (IS_ERR(pp->clk)) {
5522 err = PTR_ERR(pp->clk);
5526 clk_prepare_enable(pp->clk);
5528 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5529 if (!IS_ERR(pp->clk_bus))
5530 clk_prepare_enable(pp->clk_bus);
5532 pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
5533 pp->phylink_pcs.neg_mode = true;
5535 pp->phylink_config.dev = &dev->dev;
5536 pp->phylink_config.type = PHYLINK_NETDEV;
5537 pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
5540 phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
5542 pp->phylink_config.supported_interfaces);
5548 pp->phylink_config.supported_interfaces);
5550 pp->phylink_config.supported_interfaces);
5552 pp->phylink_config.supported_interfaces);
5556 pp->phylink_config.supported_interfaces);
5561 pp->phylink_config.supported_interfaces);
5563 pp->phylink_config.supported_interfaces);
5566 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5573 pp->phylink = phylink;
5576 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5577 if (!pp->ports) {
5583 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5584 if (!pp->stats) {
5593 mvneta_get_mac_addr(pp, hw_mac_addr);
5617 pp->tx_csum_limit = tx_csum_limit;
5619 pp->dram_target_info = mv_mbus_dram_info();
5624 if (pp->dram_target_info || pp->neta_armada3700)
5625 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5627 pp->tx_ring_size = MVNETA_MAX_TXD;
5628 pp->rx_ring_size = MVNETA_MAX_RXD;
5630 pp->dev = dev;
5633 pp->id = global_port_id++;
5638 pp->bm_priv = mvneta_bm_get(bm_node);
5639 if (pp->bm_priv) {
5640 err = mvneta_bm_port_init(pdev, pp);
5644 mvneta_bm_put(pp->bm_priv);
5645 pp->bm_priv = NULL;
5652 pp->rx_offset_correction = max(0,
5659 if (!pp->bm_priv)
5660 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5662 err = mvneta_init(&pdev->dev, pp);
5666 err = mvneta_port_power_up(pp, pp->phy_interface);
5675 if (pp->neta_armada3700) {
5676 netif_napi_add(dev, &pp->napi, mvneta_poll);
5680 per_cpu_ptr(pp->ports, cpu);
5683 port->pp = pp;
5691 if (!pp->bm_priv)
5714 platform_set_drvdata(pdev, pp->dev);
5719 if (pp->bm_priv) {
5720 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5721 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5722 1 << pp->id);
5723 mvneta_bm_put(pp->bm_priv);
5725 free_percpu(pp->stats);
5727 free_percpu(pp->ports);
5729 if (pp->phylink)
5730 phylink_destroy(pp->phylink);
5732 clk_disable_unprepare(pp->clk_bus);
5733 clk_disable_unprepare(pp->clk);
5743 struct mvneta_port *pp = netdev_priv(dev);
5746 clk_disable_unprepare(pp->clk_bus);
5747 clk_disable_unprepare(pp->clk);
5748 free_percpu(pp->ports);
5749 free_percpu(pp->stats);
5751 phylink_destroy(pp->phylink);
5753 if (pp->bm_priv) {
5754 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5755 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5756 1 << pp->id);
5757 mvneta_bm_put(pp->bm_priv);
5768 struct mvneta_port *pp = netdev_priv(dev);
5773 if (!pp->neta_armada3700) {
5774 spin_lock(&pp->lock);
5775 pp->is_stopped = true;
5776 spin_unlock(&pp->lock);
5779 &pp->node_online);
5781 &pp->node_dead);
5785 mvneta_stop_dev(pp);
5789 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5791 mvneta_rxq_drop_pkts(pp, rxq);
5795 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5797 mvneta_txq_hw_deinit(pp, txq);
5802 clk_disable_unprepare(pp->clk_bus);
5803 clk_disable_unprepare(pp->clk);
5812 struct mvneta_port *pp = netdev_priv(dev);
5815 clk_prepare_enable(pp->clk);
5816 if (!IS_ERR(pp->clk_bus))
5817 clk_prepare_enable(pp->clk_bus);
5818 if (pp->dram_target_info || pp->neta_armada3700)
5819 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5820 if (pp->bm_priv) {
5821 err = mvneta_bm_port_init(pdev, pp);
5824 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5825 pp->bm_priv = NULL;
5828 mvneta_defaults_set(pp);
5829 err = mvneta_port_power_up(pp, pp->phy_interface);
5841 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5844 mvneta_rxq_hw_init(pp, rxq);
5848 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5851 mvneta_txq_hw_init(pp, txq);
5854 if (!pp->neta_armada3700) {
5855 spin_lock(&pp->lock);
5856 pp->is_stopped = false;
5857 spin_unlock(&pp->lock);
5859 &pp->node_online);
5861 &pp->node_dead);
5865 mvneta_start_dev(pp);