Lines Matching refs:pp
447 struct mvneta_port *pp;
722 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
724 writel(data, pp->base + offset);
728 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
730 return readl(pp->base + offset);
751 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
757 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
758 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
759 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
767 struct mvneta_port *pp = netdev_priv(dev);
780 cpu_stats = per_cpu_ptr(pp->stats, cpu);
816 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
824 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
830 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
835 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
840 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
847 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
856 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
876 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
892 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
896 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
900 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
905 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
911 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
916 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
923 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
934 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
962 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
968 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
973 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
977 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
982 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
984 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
988 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
993 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
995 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
999 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
1004 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1006 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1008 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1012 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1017 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1019 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1021 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1025 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1032 dev_warn(pp->dev->dev.parent,
1038 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1040 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1044 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1050 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1052 if (pp->bm_win_id < 0) {
1056 pp->bm_win_id = i;
1063 i = pp->bm_win_id;
1066 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1067 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1070 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1072 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1075 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1077 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1079 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1082 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1087 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1094 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1099 pp->bm_win_id = -1;
1102 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1105 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1115 struct mvneta_port *pp)
1120 if (!pp->neta_armada3700) {
1123 ret = mvneta_bm_port_mbus_init(pp);
1129 netdev_info(pp->dev, "missing long pool id\n");
1134 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1135 MVNETA_BM_LONG, pp->id,
1136 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1137 if (!pp->pool_long) {
1138 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1142 pp->pool_long->port_map |= 1 << pp->id;
1144 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1145 pp->pool_long->id);
1152 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1153 MVNETA_BM_SHORT, pp->id,
1155 if (!pp->pool_short) {
1156 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1157 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1162 pp->pool_short->port_map |= 1 << pp->id;
1163 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1164 pp->pool_short->id);
1171 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1173 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1178 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1197 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1202 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1203 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1205 pp->bm_priv = NULL;
1206 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1207 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1208 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1212 static void mvneta_port_up(struct mvneta_port *pp)
1220 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1224 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1229 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1234 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1238 static void mvneta_port_down(struct mvneta_port *pp)
1244 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1248 mvreg_write(pp, MVNETA_RXQ_CMD,
1255 netdev_warn(pp->dev,
1262 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1268 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1271 mvreg_write(pp, MVNETA_TXQ_CMD,
1278 netdev_warn(pp->dev,
1286 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1294 netdev_warn(pp->dev,
1301 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1309 static void mvneta_port_enable(struct mvneta_port *pp)
1314 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1316 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1320 static void mvneta_port_disable(struct mvneta_port *pp)
1325 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1327 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1335 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1348 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1352 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1365 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1370 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1376 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1379 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1385 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1390 struct mvneta_port *pp = arg;
1395 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1403 struct mvneta_port *pp = arg;
1408 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1409 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1410 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1415 struct mvneta_port *pp = arg;
1420 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1421 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1422 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1434 static void mvneta_defaults_set(struct mvneta_port *pp)
1442 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1445 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1446 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1449 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1459 if (!pp->neta_armada3700) {
1473 txq_map = (cpu == pp->rxq_def) ?
1481 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1485 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1486 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1489 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1491 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1492 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1495 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1496 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1499 if (pp->bm_priv)
1505 mvreg_write(pp, MVNETA_ACC_MODE, val);
1507 if (pp->bm_priv)
1508 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1511 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1512 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1515 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1516 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1531 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1536 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1538 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1540 mvneta_set_ucast_table(pp, -1);
1541 mvneta_set_special_mcast_table(pp, -1);
1542 mvneta_set_other_mcast_table(pp, -1);
1545 mvreg_write(pp, MVNETA_INTR_ENABLE,
1549 mvneta_mib_counters_clear(pp);
1553 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1564 val = mvreg_read(pp, MVNETA_TX_MTU);
1567 mvreg_write(pp, MVNETA_TX_MTU, val);
1570 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1577 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1580 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1587 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1593 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1609 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1619 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1623 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1634 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1635 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1639 mvneta_set_ucast_addr(pp, addr[5], queue);
1645 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1648 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1655 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1661 clk_rate = clk_get_rate(pp->clk);
1664 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1668 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1673 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1678 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1694 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1703 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1708 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1712 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1718 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1728 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1734 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1738 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1773 static void mvneta_rx_error(struct mvneta_port *pp,
1776 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1786 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1790 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1794 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1798 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1805 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1808 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1823 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1828 return &pp->txqs[queue];
1832 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1848 dma_unmap_single(pp->dev->dev.parent,
1868 static void mvneta_txq_done(struct mvneta_port *pp,
1871 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1874 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1878 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1890 static int mvneta_rx_refill(struct mvneta_port *pp,
1903 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1910 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1941 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1946 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1948 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1950 if (pp->bm_priv) {
1957 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1959 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1980 mvneta_update_stats(struct mvneta_port *pp,
1983 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1996 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2005 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2011 stats = this_cpu_ptr(pp->stats);
2027 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2041 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2056 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
2058 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
2068 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
2086 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2088 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2100 txq = &pp->txqs[cpu % txq_number];
2101 nq = netdev_get_tx_queue(pp->dev, txq->id);
2104 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
2112 mvneta_txq_pend_desc_add(pp, txq, 0);
2127 struct mvneta_port *pp = netdev_priv(dev);
2128 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2135 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
2141 txq = &pp->txqs[cpu % txq_number];
2142 nq = netdev_get_tx_queue(pp->dev, txq->id);
2146 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
2156 mvneta_txq_pend_desc_add(pp, txq, 0);
2170 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2177 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2182 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2192 err = xdp_do_redirect(pp->dev, xdp, prog);
2194 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2203 ret = mvneta_xdp_xmit_back(pp, xdp);
2205 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2211 trace_xdp_exception(pp->dev, prog, act);
2214 mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2227 mvneta_swbm_rx_frame(struct mvneta_port *pp,
2235 struct net_device *dev = pp->dev;
2259 xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
2268 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2275 struct net_device *dev = pp->dev;
2295 skb_frag_off_set(frag, pp->rx_offset_correction);
2306 mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2321 mvneta_rx_csum(pp, desc_status, skb);
2337 struct mvneta_port *pp, int budget,
2341 struct net_device *dev = pp->dev;
2351 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2354 xdp_prog = READ_ONCE(pp->xdp_prog);
2373 mvneta_rx_error(pp, rx_desc);
2381 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2391 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2400 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2405 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2408 skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
2410 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2412 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2433 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
2439 mvneta_update_stats(pp, &ps);
2442 refill = mvneta_rx_refill_queue(pp, rxq);
2445 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2452 struct mvneta_port *pp, int rx_todo,
2455 struct net_device *dev = pp->dev;
2461 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2485 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2491 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2494 mvneta_rx_error(pp, rx_desc);
2505 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2514 mvneta_rx_csum(pp, rx_status, skb);
2521 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2535 stats = this_cpu_ptr(pp->stats);
2550 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2564 mvneta_rx_csum(pp, rx_status, skb);
2570 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2579 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2586 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2594 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2643 struct mvneta_port *pp = netdev_priv(dev);
2671 mvneta_tso_put_hdr(skb, pp, txq);
2699 dma_unmap_single(pp->dev->dev.parent,
2709 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2724 dma_map_single(pp->dev->dev.parent, addr,
2727 if (dma_mapping_error(pp->dev->dev.parent,
2754 dma_unmap_single(pp->dev->dev.parent,
2767 struct mvneta_port *pp = netdev_priv(dev);
2769 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2789 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2817 if (mvneta_tx_frag_process(pp, skb, txq)) {
2831 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2841 mvneta_txq_pend_desc_add(pp, txq, frags);
2859 static void mvneta_txq_done_force(struct mvneta_port *pp,
2863 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2866 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
2877 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2884 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2886 nq = netdev_get_tx_queue(pp->dev, txq->id);
2890 mvneta_txq_done(pp, txq);
2925 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2938 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2948 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2960 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2971 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2981 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2993 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2999 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3005 if (pp->mcast_count[crc_result] == 0) {
3006 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3011 pp->mcast_count[crc_result]--;
3012 if (pp->mcast_count[crc_result] != 0) {
3013 netdev_info(pp->dev,
3015 pp->mcast_count[crc_result], crc_result);
3019 pp->mcast_count[crc_result]++;
3021 mvneta_set_other_mcast_addr(pp, crc_result, queue);
3027 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
3032 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
3034 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
3041 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
3042 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
3049 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3050 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3056 struct mvneta_port *pp = netdev_priv(dev);
3061 mvneta_rx_unicast_promisc_set(pp, 1);
3062 mvneta_set_ucast_table(pp, pp->rxq_def);
3063 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3064 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3067 mvneta_rx_unicast_promisc_set(pp, 0);
3068 mvneta_set_ucast_table(pp, -1);
3069 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3073 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3074 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3077 mvneta_set_special_mcast_table(pp, -1);
3078 mvneta_set_other_mcast_table(pp, -1);
3082 mvneta_mcast_addr_set(pp, ha->addr,
3083 pp->rxq_def);
3093 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3095 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3096 napi_schedule(&pp->napi);
3106 disable_percpu_irq(port->pp->dev->irq);
3112 static void mvneta_link_change(struct mvneta_port *pp)
3114 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3116 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3131 struct mvneta_port *pp = netdev_priv(napi->dev);
3132 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3134 if (!netif_running(pp->dev)) {
3140 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3142 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3144 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3148 mvneta_link_change(pp);
3153 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3160 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3166 if (pp->bm_priv)
3167 rx_done = mvneta_rx_hwbm(napi, pp, budget,
3168 &pp->rxqs[rx_queue]);
3170 rx_done = mvneta_rx_swbm(napi, pp, budget,
3171 &pp->rxqs[rx_queue]);
3178 if (pp->neta_armada3700) {
3182 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3188 enable_percpu_irq(pp->dev->irq, 0);
3192 if (pp->neta_armada3700)
3193 pp->cause_rx_tx = cause_rx_tx;
3200 static int mvneta_create_page_pool(struct mvneta_port *pp,
3203 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3209 .dev = pp->dev->dev.parent,
3211 .offset = pp->rx_offset_correction,
3223 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
3243 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3248 err = mvneta_create_page_pool(pp, rxq, num);
3254 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3256 netdev_err(pp->dev,
3266 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3272 static void mvneta_tx_reset(struct mvneta_port *pp)
3278 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3280 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3281 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3284 static void mvneta_rx_reset(struct mvneta_port *pp)
3286 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3287 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3292 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3295 rxq->size = pp->rx_ring_size;
3298 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3309 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3313 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3314 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3317 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3318 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3320 if (!pp->bm_priv) {
3322 mvneta_rxq_offset_set(pp, rxq, 0);
3323 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3325 MVNETA_RX_BUF_SIZE(pp->pkt_size));
3326 mvneta_rxq_bm_disable(pp, rxq);
3327 mvneta_rxq_fill(pp, rxq, rxq->size);
3330 mvneta_rxq_offset_set(pp, rxq,
3331 NET_SKB_PAD - pp->rx_offset_correction);
3333 mvneta_rxq_bm_enable(pp, rxq);
3335 mvneta_rxq_long_pool_set(pp, rxq);
3336 mvneta_rxq_short_pool_set(pp, rxq);
3337 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3342 static int mvneta_rxq_init(struct mvneta_port *pp,
3348 ret = mvneta_rxq_sw_init(pp, rxq);
3352 mvneta_rxq_hw_init(pp, rxq);
3358 static void mvneta_rxq_deinit(struct mvneta_port *pp,
3361 mvneta_rxq_drop_pkts(pp, rxq);
3364 dma_free_coherent(pp->dev->dev.parent,
3377 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3382 txq->size = pp->tx_ring_size;
3392 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3405 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3412 if (pp->neta_armada3700)
3417 cpu = pp->rxq_def % num_present_cpus();
3419 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3424 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3428 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3429 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3432 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3433 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3435 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3439 static int mvneta_txq_init(struct mvneta_port *pp,
3444 ret = mvneta_txq_sw_init(pp, txq);
3448 mvneta_txq_hw_init(pp, txq);
3454 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3457 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3462 dma_free_coherent(pp->dev->dev.parent,
3466 dma_free_coherent(pp->dev->dev.parent,
3478 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3482 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3483 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3486 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3487 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3490 static void mvneta_txq_deinit(struct mvneta_port *pp,
3493 mvneta_txq_sw_deinit(pp, txq);
3494 mvneta_txq_hw_deinit(pp, txq);
3498 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3503 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3507 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3512 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3517 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3522 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3525 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3527 mvneta_cleanup_rxqs(pp);
3536 static int mvneta_setup_txqs(struct mvneta_port *pp)
3541 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3543 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3545 mvneta_cleanup_txqs(pp);
3553 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3557 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3561 return phy_power_on(pp->comphy);
3564 static int mvneta_config_interface(struct mvneta_port *pp,
3569 if (pp->comphy) {
3573 ret = mvneta_comphy_init(pp, interface);
3578 mvreg_write(pp, MVNETA_SERDES_CFG,
3584 mvreg_write(pp, MVNETA_SERDES_CFG,
3589 mvreg_write(pp, MVNETA_SERDES_CFG,
3597 pp->phy_interface = interface;
3602 static void mvneta_start_dev(struct mvneta_port *pp)
3606 WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3608 mvneta_max_rx_size_set(pp, pp->pkt_size);
3609 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3612 mvneta_port_enable(pp);
3614 if (!pp->neta_armada3700) {
3618 per_cpu_ptr(pp->ports, cpu);
3623 napi_enable(&pp->napi);
3627 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3629 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3633 phylink_start(pp->phylink);
3636 phylink_speed_up(pp->phylink);
3638 netif_tx_start_all_queues(pp->dev);
3640 clear_bit(__MVNETA_DOWN, &pp->state);
3643 static void mvneta_stop_dev(struct mvneta_port *pp)
3647 set_bit(__MVNETA_DOWN, &pp->state);
3649 if (device_may_wakeup(&pp->dev->dev))
3650 phylink_speed_down(pp->phylink, false);
3652 phylink_stop(pp->phylink);
3654 if (!pp->neta_armada3700) {
3657 per_cpu_ptr(pp->ports, cpu);
3662 napi_disable(&pp->napi);
3665 netif_carrier_off(pp->dev);
3667 mvneta_port_down(pp);
3668 netif_tx_stop_all_queues(pp->dev);
3671 mvneta_port_disable(pp);
3674 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3677 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3679 mvneta_tx_reset(pp);
3680 mvneta_rx_reset(pp);
3682 WARN_ON(phy_power_off(pp->comphy));
3687 struct mvneta_port *pp = arg;
3689 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3694 struct mvneta_port *pp = arg;
3696 disable_percpu_irq(pp->dev->irq);
3702 struct mvneta_port *pp = netdev_priv(dev);
3711 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
3719 if (pp->bm_priv)
3720 mvneta_bm_update_mtu(pp, mtu);
3729 mvneta_stop_dev(pp);
3730 on_each_cpu(mvneta_percpu_disable, pp, true);
3732 mvneta_cleanup_txqs(pp);
3733 mvneta_cleanup_rxqs(pp);
3735 if (pp->bm_priv)
3736 mvneta_bm_update_mtu(pp, mtu);
3738 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3740 ret = mvneta_setup_rxqs(pp);
3746 ret = mvneta_setup_txqs(pp);
3752 on_each_cpu(mvneta_percpu_enable, pp, true);
3753 mvneta_start_dev(pp);
3763 struct mvneta_port *pp = netdev_priv(dev);
3765 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3769 pp->tx_csum_limit);
3776 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3780 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3781 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3793 struct mvneta_port *pp = netdev_priv(dev);
3801 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3804 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3815 struct mvneta_port *pp = netdev_priv(ndev);
3836 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3840 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3868 struct mvneta_port *pp = netdev_priv(ndev);
3871 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3896 struct mvneta_port *pp = netdev_priv(ndev);
3897 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3899 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3901 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3909 struct mvneta_port *pp = netdev_priv(ndev);
3910 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3911 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3912 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3913 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3914 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3979 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3991 if (pp->phy_interface != state->interface) {
3992 if (pp->comphy)
3993 WARN_ON(phy_power_off(pp->comphy));
3994 WARN_ON(mvneta_config_interface(pp, state->interface));
3998 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
4000 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4002 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
4004 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
4006 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
4009 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4015 static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
4019 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4024 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
4031 struct mvneta_port *pp = netdev_priv(ndev);
4034 mvneta_port_down(pp);
4037 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4040 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4043 pp->eee_active = false;
4044 mvneta_set_eee(pp, false);
4054 struct mvneta_port *pp = netdev_priv(ndev);
4058 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4077 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4083 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4089 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4092 mvneta_port_up(pp);
4094 if (phy && pp->eee_enabled) {
4095 pp->eee_active = phy_init_eee(phy, 0) >= 0;
4096 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
4109 static int mvneta_mdio_probe(struct mvneta_port *pp)
4112 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4115 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4117 phylink_ethtool_get_wol(pp->phylink, &wol);
4118 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4122 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4127 static void mvneta_mdio_remove(struct mvneta_port *pp)
4129 phylink_disconnect_phy(pp->phylink);
4136 static void mvneta_percpu_elect(struct mvneta_port *pp)
4143 if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
4144 elected_cpu = pp->rxq_def;
4160 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4170 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4173 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4179 pp, true);
4188 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4190 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4195 if (pp->neta_armada3700)
4198 spin_lock(&pp->lock);
4203 if (pp->is_stopped) {
4204 spin_unlock(&pp->lock);
4207 netif_tx_stop_all_queues(pp->dev);
4216 per_cpu_ptr(pp->ports, other_cpu);
4223 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4230 mvneta_percpu_enable(pp);
4236 mvneta_percpu_elect(pp);
4239 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4240 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4243 netif_tx_start_all_queues(pp->dev);
4244 spin_unlock(&pp->lock);
4250 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4252 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4258 spin_lock(&pp->lock);
4260 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4261 spin_unlock(&pp->lock);
4266 mvneta_percpu_disable(pp);
4272 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4276 spin_lock(&pp->lock);
4277 mvneta_percpu_elect(pp);
4278 spin_unlock(&pp->lock);
4280 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4281 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4284 netif_tx_start_all_queues(pp->dev);
4290 struct mvneta_port *pp = netdev_priv(dev);
4293 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4295 ret = mvneta_setup_rxqs(pp);
4299 ret = mvneta_setup_txqs(pp);
4304 if (pp->neta_armada3700)
4305 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4306 dev->name, pp);
4308 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4309 dev->name, pp->ports);
4311 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4315 if (!pp->neta_armada3700) {
4319 on_each_cpu(mvneta_percpu_enable, pp, true);
4321 pp->is_stopped = false;
4326 &pp->node_online);
4331 &pp->node_dead);
4336 ret = mvneta_mdio_probe(pp);
4342 mvneta_start_dev(pp);
4347 if (!pp->neta_armada3700)
4349 &pp->node_dead);
4351 if (!pp->neta_armada3700)
4353 &pp->node_online);
4355 if (pp->neta_armada3700) {
4356 free_irq(pp->dev->irq, pp);
4358 on_each_cpu(mvneta_percpu_disable, pp, true);
4359 free_percpu_irq(pp->dev->irq, pp->ports);
4362 mvneta_cleanup_txqs(pp);
4364 mvneta_cleanup_rxqs(pp);
4371 struct mvneta_port *pp = netdev_priv(dev);
4373 if (!pp->neta_armada3700) {
4379 spin_lock(&pp->lock);
4380 pp->is_stopped = true;
4381 spin_unlock(&pp->lock);
4383 mvneta_stop_dev(pp);
4384 mvneta_mdio_remove(pp);
4387 &pp->node_online);
4389 &pp->node_dead);
4390 on_each_cpu(mvneta_percpu_disable, pp, true);
4391 free_percpu_irq(dev->irq, pp->ports);
4393 mvneta_stop_dev(pp);
4394 mvneta_mdio_remove(pp);
4395 free_irq(dev->irq, pp);
4398 mvneta_cleanup_rxqs(pp);
4399 mvneta_cleanup_txqs(pp);
4406 struct mvneta_port *pp = netdev_priv(dev);
4408 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4415 struct mvneta_port *pp = netdev_priv(dev);
4423 if (pp->bm_priv) {
4429 need_update = !!pp->xdp_prog != !!prog;
4433 old_prog = xchg(&pp->xdp_prog, prog);
4460 struct mvneta_port *pp = netdev_priv(ndev);
4462 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4470 struct mvneta_port *pp = netdev_priv(ndev);
4472 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4477 struct mvneta_port *pp = netdev_priv(dev);
4479 return phylink_ethtool_nway_reset(pp->phylink);
4486 struct mvneta_port *pp = netdev_priv(dev);
4490 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4493 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4494 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4498 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4500 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4510 struct mvneta_port *pp = netdev_priv(dev);
4512 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
4513 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
4515 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
4535 struct mvneta_port *pp = netdev_priv(netdev);
4539 ring->rx_pending = pp->rx_ring_size;
4540 ring->tx_pending = pp->tx_ring_size;
4546 struct mvneta_port *pp = netdev_priv(dev);
4550 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4553 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4555 if (pp->tx_ring_size != ring->tx_pending)
4557 pp->tx_ring_size, ring->tx_pending);
4574 struct mvneta_port *pp = netdev_priv(dev);
4576 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4582 struct mvneta_port *pp = netdev_priv(dev);
4584 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4600 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4618 stats = per_cpu_ptr(pp->stats, cpu);
4644 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4648 void __iomem *base = pp->base;
4653 mvneta_ethtool_update_pcpu_stats(pp, &stats);
4660 pp->ethtool_stats[i] += val;
4667 pp->ethtool_stats[i] += val;
4672 val = phylink_get_eee_err(pp->phylink);
4673 pp->ethtool_stats[i] += val;
4676 pp->ethtool_stats[i] = stats.skb_alloc_error;
4679 pp->ethtool_stats[i] = stats.refill_error;
4682 pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4685 pp->ethtool_stats[i] = stats.ps.xdp_pass;
4688 pp->ethtool_stats[i] = stats.ps.xdp_drop;
4691 pp->ethtool_stats[i] = stats.ps.xdp_tx;
4694 pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4697 pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4700 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4711 struct mvneta_port *pp = netdev_priv(dev);
4714 mvneta_ethtool_update_stats(pp);
4717 *data++ = pp->ethtool_stats[i];
4747 static int mvneta_config_rss(struct mvneta_port *pp)
4752 netif_tx_stop_all_queues(pp->dev);
4754 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4756 if (!pp->neta_armada3700) {
4760 per_cpu_ptr(pp->ports, cpu);
4766 napi_synchronize(&pp->napi);
4767 napi_disable(&pp->napi);
4770 pp->rxq_def = pp->indir[0];
4773 mvneta_set_rx_mode(pp->dev);
4776 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4777 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4780 spin_lock(&pp->lock);
4781 mvneta_percpu_elect(pp);
4782 spin_unlock(&pp->lock);
4784 if (!pp->neta_armada3700) {
4788 per_cpu_ptr(pp->ports, cpu);
4793 napi_enable(&pp->napi);
4796 netif_tx_start_all_queues(pp->dev);
4804 struct mvneta_port *pp = netdev_priv(dev);
4807 if (pp->neta_armada3700)
4820 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4822 return mvneta_config_rss(pp);
4828 struct mvneta_port *pp = netdev_priv(dev);
4831 if (pp->neta_armada3700)
4840 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4848 struct mvneta_port *pp = netdev_priv(dev);
4850 phylink_ethtool_get_wol(pp->phylink, wol);
4856 struct mvneta_port *pp = netdev_priv(dev);
4859 ret = phylink_ethtool_set_wol(pp->phylink, wol);
4869 struct mvneta_port *pp = netdev_priv(dev);
4872 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4874 eee->eee_enabled = pp->eee_enabled;
4875 eee->eee_active = pp->eee_active;
4876 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4879 return phylink_ethtool_get_eee(pp->phylink, eee);
4885 struct mvneta_port *pp = netdev_priv(dev);
4893 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4896 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4898 pp->eee_enabled = eee->eee_enabled;
4899 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4901 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4903 return phylink_ethtool_set_eee(pp->phylink, eee);
4948 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4953 mvneta_port_disable(pp);
4956 mvneta_defaults_set(pp);
4958 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4959 if (!pp->txqs)
4964 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4966 txq->size = pp->tx_ring_size;
4970 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4971 if (!pp->rxqs)
4976 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4978 rxq->size = pp->rx_ring_size;
4982 = devm_kmalloc_array(pp->dev->dev.parent,
4994 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
5002 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
5003 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
5006 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
5016 mvreg_write(pp, MVNETA_WIN_BASE(i),
5021 mvreg_write(pp, MVNETA_WIN_SIZE(i),
5032 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
5037 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5038 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5042 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5045 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
5061 struct mvneta_port *pp;
5096 pp = netdev_priv(dev);
5097 spin_lock_init(&pp->lock);
5099 pp->phylink_config.dev = &dev->dev;
5100 pp->phylink_config.type = PHYLINK_NETDEV;
5102 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5115 pp->phylink = phylink;
5116 pp->comphy = comphy;
5117 pp->phy_interface = phy_mode;
5118 pp->dn = dn;
5120 pp->rxq_def = rxq_def;
5121 pp->indir[0] = rxq_def;
5125 pp->neta_armada3700 = true;
5127 pp->clk = devm_clk_get(&pdev->dev, "core");
5128 if (IS_ERR(pp->clk))
5129 pp->clk = devm_clk_get(&pdev->dev, NULL);
5130 if (IS_ERR(pp->clk)) {
5131 err = PTR_ERR(pp->clk);
5135 clk_prepare_enable(pp->clk);
5137 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5138 if (!IS_ERR(pp->clk_bus))
5139 clk_prepare_enable(pp->clk_bus);
5141 pp->base = devm_platform_ioremap_resource(pdev, 0);
5142 if (IS_ERR(pp->base)) {
5143 err = PTR_ERR(pp->base);
5148 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5149 if (!pp->ports) {
5155 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5156 if (!pp->stats) {
5166 mvneta_get_mac_addr(pp, hw_mac_addr);
5190 pp->tx_csum_limit = tx_csum_limit;
5192 pp->dram_target_info = mv_mbus_dram_info();
5197 if (pp->dram_target_info || pp->neta_armada3700)
5198 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5200 pp->tx_ring_size = MVNETA_MAX_TXD;
5201 pp->rx_ring_size = MVNETA_MAX_RXD;
5203 pp->dev = dev;
5206 pp->id = global_port_id++;
5211 pp->bm_priv = mvneta_bm_get(bm_node);
5212 if (pp->bm_priv) {
5213 err = mvneta_bm_port_init(pdev, pp);
5217 mvneta_bm_put(pp->bm_priv);
5218 pp->bm_priv = NULL;
5225 pp->rx_offset_correction = max(0,
5232 if (!pp->bm_priv)
5233 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5235 err = mvneta_init(&pdev->dev, pp);
5239 err = mvneta_port_power_up(pp, pp->phy_interface);
5248 if (pp->neta_armada3700) {
5249 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
5253 per_cpu_ptr(pp->ports, cpu);
5257 port->pp = pp;
5282 platform_set_drvdata(pdev, pp->dev);
5287 if (pp->bm_priv) {
5288 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5289 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5290 1 << pp->id);
5291 mvneta_bm_put(pp->bm_priv);
5293 free_percpu(pp->stats);
5295 free_percpu(pp->ports);
5297 clk_disable_unprepare(pp->clk_bus);
5298 clk_disable_unprepare(pp->clk);
5300 if (pp->phylink)
5301 phylink_destroy(pp->phylink);
5311 struct mvneta_port *pp = netdev_priv(dev);
5314 clk_disable_unprepare(pp->clk_bus);
5315 clk_disable_unprepare(pp->clk);
5316 free_percpu(pp->ports);
5317 free_percpu(pp->stats);
5319 phylink_destroy(pp->phylink);
5321 if (pp->bm_priv) {
5322 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5323 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5324 1 << pp->id);
5325 mvneta_bm_put(pp->bm_priv);
5336 struct mvneta_port *pp = netdev_priv(dev);
5341 if (!pp->neta_armada3700) {
5342 spin_lock(&pp->lock);
5343 pp->is_stopped = true;
5344 spin_unlock(&pp->lock);
5347 &pp->node_online);
5349 &pp->node_dead);
5353 mvneta_stop_dev(pp);
5357 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5359 mvneta_rxq_drop_pkts(pp, rxq);
5363 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5365 mvneta_txq_hw_deinit(pp, txq);
5370 clk_disable_unprepare(pp->clk_bus);
5371 clk_disable_unprepare(pp->clk);
5380 struct mvneta_port *pp = netdev_priv(dev);
5383 clk_prepare_enable(pp->clk);
5384 if (!IS_ERR(pp->clk_bus))
5385 clk_prepare_enable(pp->clk_bus);
5386 if (pp->dram_target_info || pp->neta_armada3700)
5387 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5388 if (pp->bm_priv) {
5389 err = mvneta_bm_port_init(pdev, pp);
5392 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5393 pp->bm_priv = NULL;
5396 mvneta_defaults_set(pp);
5397 err = mvneta_port_power_up(pp, pp->phy_interface);
5409 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5412 mvneta_rxq_hw_init(pp, rxq);
5416 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5419 mvneta_txq_hw_init(pp, txq);
5422 if (!pp->neta_armada3700) {
5423 spin_lock(&pp->lock);
5424 pp->is_stopped = false;
5425 spin_unlock(&pp->lock);
5427 &pp->node_online);
5429 &pp->node_dead);
5433 mvneta_start_dev(pp);