1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Broadcom BCM7xxx System Port Ethernet MAC driver 4 * 5 * Copyright (C) 2014 Broadcom Corporation 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10#include <linux/init.h> 11#include <linux/interrupt.h> 12#include <linux/module.h> 13#include <linux/kernel.h> 14#include <linux/netdevice.h> 15#include <linux/etherdevice.h> 16#include <linux/platform_device.h> 17#include <linux/of.h> 18#include <linux/of_net.h> 19#include <linux/of_mdio.h> 20#include <linux/phy.h> 21#include <linux/phy_fixed.h> 22#include <net/dsa.h> 23#include <linux/clk.h> 24#include <net/ip.h> 25#include <net/ipv6.h> 26 27#include "bcmsysport.h" 28 29/* I/O accessors register helpers */ 30#define BCM_SYSPORT_IO_MACRO(name, offset) \ 31static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 32{ \ 33 u32 reg = readl_relaxed(priv->base + offset + off); \ 34 return reg; \ 35} \ 36static inline void name##_writel(struct bcm_sysport_priv *priv, \ 37 u32 val, u32 off) \ 38{ \ 39 writel_relaxed(val, priv->base + offset + off); \ 40} \ 41 42BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 43BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 44BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 45BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET); 46BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 47BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 48BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 49BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 50BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 51BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 52 53/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact 54 * same layout, except it has been moved by 4 bytes up, *sigh* 55 */ 56static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off) 57{ 58 if (priv->is_lite && off >= RDMA_STATUS) 59 off += 4; 60 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); 61} 62 63static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off) 64{ 65 if (priv->is_lite && off >= RDMA_STATUS) 66 off += 4; 67 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); 68} 69 70static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit) 71{ 72 if (!priv->is_lite) { 73 return BIT(bit); 74 } else { 75 if (bit >= ACB_ALGO) 76 return BIT(bit + 1); 77 else 78 return BIT(bit); 79 } 80} 81 82/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 83 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 84 */ 85#define BCM_SYSPORT_INTR_L2(which) \ 86static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 87 u32 mask) \ 88{ \ 89 priv->irq##which##_mask &= ~(mask); \ 90 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 91} \ 92static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 93 u32 mask) \ 94{ \ 95 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 96 priv->irq##which##_mask |= (mask); \ 97} \ 98 99BCM_SYSPORT_INTR_L2(0) 100BCM_SYSPORT_INTR_L2(1) 101 102/* Register accesses to GISB/RBUS registers are expensive (few hundred 103 * nanoseconds), so keep the check for 64-bits explicit here to save 104 * one register write per-packet on 32-bits platforms. 105 */ 106static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 107 void __iomem *d, 108 dma_addr_t addr) 109{ 110#ifdef CONFIG_PHYS_ADDR_T_64BIT 111 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 112 d + DESC_ADDR_HI_STATUS_LEN); 113#endif 114 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO); 115} 116 117/* Ethtool operations */ 118static void bcm_sysport_set_rx_csum(struct net_device *dev, 119 netdev_features_t wanted) 120{ 121 struct bcm_sysport_priv *priv = netdev_priv(dev); 122 u32 reg; 123 124 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 125 reg = rxchk_readl(priv, RXCHK_CONTROL); 126 /* Clear L2 header checks, which would prevent BPDUs 127 * from being received. 128 */ 129 reg &= ~RXCHK_L2_HDR_DIS; 130 if (priv->rx_chk_en) 131 reg |= RXCHK_EN; 132 else 133 reg &= ~RXCHK_EN; 134 135 /* If UniMAC forwards CRC, we need to skip over it to get 136 * a valid CHK bit to be set in the per-packet status word 137 */ 138 if (priv->rx_chk_en && priv->crc_fwd) 139 reg |= RXCHK_SKIP_FCS; 140 else 141 reg &= ~RXCHK_SKIP_FCS; 142 143 /* If Broadcom tags are enabled (e.g: using a switch), make 144 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 145 * tag after the Ethernet MAC Source Address. 146 */ 147 if (netdev_uses_dsa(dev)) 148 reg |= RXCHK_BRCM_TAG_EN; 149 else 150 reg &= ~RXCHK_BRCM_TAG_EN; 151 152 rxchk_writel(priv, reg, RXCHK_CONTROL); 153} 154 155static void bcm_sysport_set_tx_csum(struct net_device *dev, 156 netdev_features_t wanted) 157{ 158 struct bcm_sysport_priv *priv = netdev_priv(dev); 159 u32 reg; 160 161 /* Hardware transmit checksum requires us to enable the Transmit status 162 * block prepended to the packet contents 163 */ 164 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 165 NETIF_F_HW_VLAN_CTAG_TX)); 166 reg = tdma_readl(priv, TDMA_CONTROL); 167 if (priv->tsb_en) 168 reg |= tdma_control_bit(priv, TSB_EN); 169 else 170 reg &= ~tdma_control_bit(priv, TSB_EN); 171 /* Indicating that software inserts Broadcom tags is needed for the TX 172 * checksum to be computed correctly when using VLAN HW acceleration, 173 * else it has no effect, so it can always be turned on. 174 */ 175 if (netdev_uses_dsa(dev)) 176 reg |= tdma_control_bit(priv, SW_BRCM_TAG); 177 else 178 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG); 179 tdma_writel(priv, reg, TDMA_CONTROL); 180 181 /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */ 182 if (wanted & NETIF_F_HW_VLAN_CTAG_TX) 183 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID); 184} 185 186static int bcm_sysport_set_features(struct net_device *dev, 187 netdev_features_t features) 188{ 189 struct bcm_sysport_priv *priv = netdev_priv(dev); 190 int ret; 191 192 ret = clk_prepare_enable(priv->clk); 193 if (ret) 194 return ret; 195 196 /* Read CRC forward */ 197 if (!priv->is_lite) 198 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 199 else 200 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & 201 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT); 202 203 bcm_sysport_set_rx_csum(dev, features); 204 bcm_sysport_set_tx_csum(dev, features); 205 206 clk_disable_unprepare(priv->clk); 207 208 return 0; 209} 210 211/* Hardware counters must be kept in sync because the order/offset 212 * is important here (order in structure declaration = order in hardware) 213 */ 214static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 215 /* general stats */ 216 STAT_NETDEV64(rx_packets), 217 STAT_NETDEV64(tx_packets), 218 STAT_NETDEV64(rx_bytes), 219 STAT_NETDEV64(tx_bytes), 220 STAT_NETDEV(rx_errors), 221 STAT_NETDEV(tx_errors), 222 STAT_NETDEV(rx_dropped), 223 STAT_NETDEV(tx_dropped), 224 STAT_NETDEV(multicast), 225 /* UniMAC RSV counters */ 226 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 227 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 228 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 229 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 230 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 231 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 232 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 233 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 234 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 235 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 236 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 237 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 238 STAT_MIB_RX("rx_multicast", mib.rx.mca), 239 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 240 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 241 STAT_MIB_RX("rx_control", mib.rx.cf), 242 STAT_MIB_RX("rx_pause", mib.rx.pf), 243 STAT_MIB_RX("rx_unknown", mib.rx.uo), 244 STAT_MIB_RX("rx_align", mib.rx.aln), 245 STAT_MIB_RX("rx_outrange", mib.rx.flr), 246 STAT_MIB_RX("rx_code", mib.rx.cde), 247 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 248 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 249 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 250 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 251 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 252 STAT_MIB_RX("rx_unicast", mib.rx.uc), 253 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 254 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 255 /* UniMAC TSV counters */ 256 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 257 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 258 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 259 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 260 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 261 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 262 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 263 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 264 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 265 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 266 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 267 STAT_MIB_TX("tx_multicast", mib.tx.mca), 268 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 269 STAT_MIB_TX("tx_pause", mib.tx.pf), 270 STAT_MIB_TX("tx_control", mib.tx.cf), 271 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 272 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 273 STAT_MIB_TX("tx_defer", mib.tx.drf), 274 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 275 STAT_MIB_TX("tx_single_col", mib.tx.scl), 276 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 277 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 278 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 279 STAT_MIB_TX("tx_frags", mib.tx.frg), 280 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 281 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 282 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 283 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 284 STAT_MIB_TX("tx_unicast", mib.tx.uc), 285 /* UniMAC RUNT counters */ 286 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 287 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 288 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 289 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 290 /* RXCHK misc statistics */ 291 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 292 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 293 RXCHK_OTHER_DISC_CNTR), 294 /* RBUF misc statistics */ 295 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 296 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 297 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 298 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 299 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 300 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb), 301 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed), 302 /* Per TX-queue statistics are dynamically appended */ 303}; 304 305#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 306 307static void bcm_sysport_get_drvinfo(struct net_device *dev, 308 struct ethtool_drvinfo *info) 309{ 310 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 311 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 312} 313 314static u32 bcm_sysport_get_msglvl(struct net_device *dev) 315{ 316 struct bcm_sysport_priv *priv = netdev_priv(dev); 317 318 return priv->msg_enable; 319} 320 321static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 322{ 323 struct bcm_sysport_priv *priv = netdev_priv(dev); 324 325 priv->msg_enable = enable; 326} 327 328static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) 329{ 330 switch (type) { 331 case BCM_SYSPORT_STAT_NETDEV: 332 case BCM_SYSPORT_STAT_NETDEV64: 333 case BCM_SYSPORT_STAT_RXCHK: 334 case BCM_SYSPORT_STAT_RBUF: 335 case BCM_SYSPORT_STAT_SOFT: 336 return true; 337 default: 338 return false; 339 } 340} 341 342static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 343{ 344 struct bcm_sysport_priv *priv = netdev_priv(dev); 345 const struct bcm_sysport_stats *s; 346 unsigned int i, j; 347 348 switch (string_set) { 349 case ETH_SS_STATS: 350 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 351 s = &bcm_sysport_gstrings_stats[i]; 352 if (priv->is_lite && 353 !bcm_sysport_lite_stat_valid(s->type)) 354 continue; 355 j++; 356 } 357 /* Include per-queue statistics */ 358 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 359 default: 360 return -EOPNOTSUPP; 361 } 362} 363 364static void bcm_sysport_get_strings(struct net_device *dev, 365 u32 stringset, u8 *data) 366{ 367 struct bcm_sysport_priv *priv = netdev_priv(dev); 368 const struct bcm_sysport_stats *s; 369 char buf[128]; 370 int i, j; 371 372 switch (stringset) { 373 case ETH_SS_STATS: 374 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 375 s = &bcm_sysport_gstrings_stats[i]; 376 if (priv->is_lite && 377 !bcm_sysport_lite_stat_valid(s->type)) 378 continue; 379 380 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string, 381 ETH_GSTRING_LEN); 382 j++; 383 } 384 385 for (i = 0; i < dev->num_tx_queues; i++) { 386 snprintf(buf, sizeof(buf), "txq%d_packets", i); 387 memcpy(data + j * ETH_GSTRING_LEN, buf, 388 ETH_GSTRING_LEN); 389 j++; 390 391 snprintf(buf, sizeof(buf), "txq%d_bytes", i); 392 memcpy(data + j * ETH_GSTRING_LEN, buf, 393 ETH_GSTRING_LEN); 394 j++; 395 } 396 break; 397 default: 398 break; 399 } 400} 401 402static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 403{ 404 int i, j = 0; 405 406 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 407 const struct bcm_sysport_stats *s; 408 u8 offset = 0; 409 u32 val = 0; 410 char *p; 411 412 s = &bcm_sysport_gstrings_stats[i]; 413 switch (s->type) { 414 case BCM_SYSPORT_STAT_NETDEV: 415 case BCM_SYSPORT_STAT_NETDEV64: 416 case BCM_SYSPORT_STAT_SOFT: 417 continue; 418 case BCM_SYSPORT_STAT_MIB_RX: 419 case BCM_SYSPORT_STAT_MIB_TX: 420 case BCM_SYSPORT_STAT_RUNT: 421 if (priv->is_lite) 422 continue; 423 424 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 425 offset = UMAC_MIB_STAT_OFFSET; 426 val = umac_readl(priv, UMAC_MIB_START + j + offset); 427 break; 428 case BCM_SYSPORT_STAT_RXCHK: 429 val = rxchk_readl(priv, s->reg_offset); 430 if (val == ~0) 431 rxchk_writel(priv, 0, s->reg_offset); 432 break; 433 case BCM_SYSPORT_STAT_RBUF: 434 val = rbuf_readl(priv, s->reg_offset); 435 if (val == ~0) 436 rbuf_writel(priv, 0, s->reg_offset); 437 break; 438 } 439 440 j += s->stat_sizeof; 441 p = (char *)priv + s->stat_offset; 442 *(u32 *)p = val; 443 } 444 445 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 446} 447 448static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, 449 u64 *tx_bytes, u64 *tx_packets) 450{ 451 struct bcm_sysport_tx_ring *ring; 452 u64 bytes = 0, packets = 0; 453 unsigned int start; 454 unsigned int q; 455 456 for (q = 0; q < priv->netdev->num_tx_queues; q++) { 457 ring = &priv->tx_rings[q]; 458 do { 459 start = u64_stats_fetch_begin_irq(&priv->syncp); 460 bytes = ring->bytes; 461 packets = ring->packets; 462 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 463 464 *tx_bytes += bytes; 465 *tx_packets += packets; 466 } 467} 468 469static void bcm_sysport_get_stats(struct net_device *dev, 470 struct ethtool_stats *stats, u64 *data) 471{ 472 struct bcm_sysport_priv *priv = netdev_priv(dev); 473 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 474 struct u64_stats_sync *syncp = &priv->syncp; 475 struct bcm_sysport_tx_ring *ring; 476 u64 tx_bytes = 0, tx_packets = 0; 477 unsigned int start; 478 int i, j; 479 480 if (netif_running(dev)) { 481 bcm_sysport_update_mib_counters(priv); 482 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets); 483 stats64->tx_bytes = tx_bytes; 484 stats64->tx_packets = tx_packets; 485 } 486 487 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 488 const struct bcm_sysport_stats *s; 489 char *p; 490 491 s = &bcm_sysport_gstrings_stats[i]; 492 if (s->type == BCM_SYSPORT_STAT_NETDEV) 493 p = (char *)&dev->stats; 494 else if (s->type == BCM_SYSPORT_STAT_NETDEV64) 495 p = (char *)stats64; 496 else 497 p = (char *)priv; 498 499 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) 500 continue; 501 p += s->stat_offset; 502 503 if (s->stat_sizeof == sizeof(u64) && 504 s->type == BCM_SYSPORT_STAT_NETDEV64) { 505 do { 506 start = u64_stats_fetch_begin_irq(syncp); 507 data[i] = *(u64 *)p; 508 } while (u64_stats_fetch_retry_irq(syncp, start)); 509 } else 510 data[i] = *(u32 *)p; 511 j++; 512 } 513 514 /* For SYSTEMPORT Lite since we have holes in our statistics, j would 515 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it 516 * needs to point to how many total statistics we have minus the 517 * number of per TX queue statistics 518 */ 519 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - 520 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; 521 522 for (i = 0; i < dev->num_tx_queues; i++) { 523 ring = &priv->tx_rings[i]; 524 data[j] = ring->packets; 525 j++; 526 data[j] = ring->bytes; 527 j++; 528 } 529} 530 531static void bcm_sysport_get_wol(struct net_device *dev, 532 struct ethtool_wolinfo *wol) 533{ 534 struct bcm_sysport_priv *priv = netdev_priv(dev); 535 536 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 537 wol->wolopts = priv->wolopts; 538 539 if (!(priv->wolopts & WAKE_MAGICSECURE)) 540 return; 541 542 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); 543} 544 545static int bcm_sysport_set_wol(struct net_device *dev, 546 struct ethtool_wolinfo *wol) 547{ 548 struct bcm_sysport_priv *priv = netdev_priv(dev); 549 struct device *kdev = &priv->pdev->dev; 550 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 551 552 if (!device_can_wakeup(kdev)) 553 return -ENOTSUPP; 554 555 if (wol->wolopts & ~supported) 556 return -EINVAL; 557 558 if (wol->wolopts & WAKE_MAGICSECURE) 559 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); 560 561 /* Flag the device and relevant IRQ as wakeup capable */ 562 if (wol->wolopts) { 563 device_set_wakeup_enable(kdev, 1); 564 if (priv->wol_irq_disabled) 565 enable_irq_wake(priv->wol_irq); 566 priv->wol_irq_disabled = 0; 567 } else { 568 device_set_wakeup_enable(kdev, 0); 569 /* Avoid unbalanced disable_irq_wake calls */ 570 if (!priv->wol_irq_disabled) 571 disable_irq_wake(priv->wol_irq); 572 priv->wol_irq_disabled = 1; 573 } 574 575 priv->wolopts = wol->wolopts; 576 577 return 0; 578} 579 580static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv, 581 u32 usecs, u32 pkts) 582{ 583 u32 reg; 584 585 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 586 reg &= ~(RDMA_INTR_THRESH_MASK | 587 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); 588 reg |= pkts; 589 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT; 590 rdma_writel(priv, reg, RDMA_MBDONE_INTR); 591} 592 593static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, 594 struct ethtool_coalesce *ec) 595{ 596 struct bcm_sysport_priv *priv = ring->priv; 597 u32 reg; 598 599 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 600 reg &= ~(RING_INTR_THRESH_MASK | 601 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); 602 reg |= ec->tx_max_coalesced_frames; 603 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << 604 RING_TIMEOUT_SHIFT; 605 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); 606} 607 608static int bcm_sysport_get_coalesce(struct net_device *dev, 609 struct ethtool_coalesce *ec) 610{ 611 struct bcm_sysport_priv *priv = netdev_priv(dev); 612 u32 reg; 613 614 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); 615 616 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; 617 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; 618 619 reg = rdma_readl(priv, RDMA_MBDONE_INTR); 620 621 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; 622 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; 623 ec->use_adaptive_rx_coalesce = priv->dim.use_dim; 624 625 return 0; 626} 627 628static int bcm_sysport_set_coalesce(struct net_device *dev, 629 struct ethtool_coalesce *ec) 630{ 631 struct bcm_sysport_priv *priv = netdev_priv(dev); 632 struct dim_cq_moder moder; 633 u32 usecs, pkts; 634 unsigned int i; 635 636 /* Base system clock is 125Mhz, DMA timeout is this reference clock 637 * divided by 1024, which yield roughly 8.192 us, our maximum value has 638 * to fit in the RING_TIMEOUT_MASK (16 bits). 639 */ 640 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || 641 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || 642 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || 643 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) 644 return -EINVAL; 645 646 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || 647 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) 648 return -EINVAL; 649 650 for (i = 0; i < dev->num_tx_queues; i++) 651 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); 652 653 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; 654 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; 655 usecs = priv->rx_coalesce_usecs; 656 pkts = priv->rx_max_coalesced_frames; 657 658 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { 659 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); 660 usecs = moder.usec; 661 pkts = moder.pkts; 662 } 663 664 priv->dim.use_dim = ec->use_adaptive_rx_coalesce; 665 666 /* Apply desired coalescing parameters */ 667 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 668 669 return 0; 670} 671 672static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 673{ 674 dev_consume_skb_any(cb->skb); 675 cb->skb = NULL; 676 dma_unmap_addr_set(cb, dma_addr, 0); 677} 678 679static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 680 struct bcm_sysport_cb *cb) 681{ 682 struct device *kdev = &priv->pdev->dev; 683 struct net_device *ndev = priv->netdev; 684 struct sk_buff *skb, *rx_skb; 685 dma_addr_t mapping; 686 687 /* Allocate a new SKB for a new packet */ 688 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH, 689 GFP_ATOMIC | __GFP_NOWARN); 690 if (!skb) { 691 priv->mib.alloc_rx_buff_failed++; 692 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 693 return NULL; 694 } 695 696 mapping = dma_map_single(kdev, skb->data, 697 RX_BUF_LENGTH, DMA_FROM_DEVICE); 698 if (dma_mapping_error(kdev, mapping)) { 699 priv->mib.rx_dma_failed++; 700 dev_kfree_skb_any(skb); 701 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 702 return NULL; 703 } 704 705 /* Grab the current SKB on the ring */ 706 rx_skb = cb->skb; 707 if (likely(rx_skb)) 708 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 709 RX_BUF_LENGTH, DMA_FROM_DEVICE); 710 711 /* Put the new SKB on the ring */ 712 cb->skb = skb; 713 dma_unmap_addr_set(cb, dma_addr, mapping); 714 dma_desc_set_addr(priv, cb->bd_addr, mapping); 715 716 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 717 718 /* Return the current SKB to the caller */ 719 return rx_skb; 720} 721 722static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 723{ 724 struct bcm_sysport_cb *cb; 725 struct sk_buff *skb; 726 unsigned int i; 727 728 for (i = 0; i < priv->num_rx_bds; i++) { 729 cb = &priv->rx_cbs[i]; 730 skb = bcm_sysport_rx_refill(priv, cb); 731 dev_kfree_skb(skb); 732 if (!cb->skb) 733 return -ENOMEM; 734 } 735 736 return 0; 737} 738 739/* Poll the hardware for up to budget packets to process */ 740static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 741 unsigned int budget) 742{ 743 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 744 struct net_device *ndev = priv->netdev; 745 unsigned int processed = 0, to_process; 746 unsigned int processed_bytes = 0; 747 struct bcm_sysport_cb *cb; 748 struct sk_buff *skb; 749 unsigned int p_index; 750 u16 len, status; 751 struct bcm_rsb *rsb; 752 753 /* Clear status before servicing to reduce spurious interrupts */ 754 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR); 755 756 /* Determine how much we should process since last call, SYSTEMPORT Lite 757 * groups the producer and consumer indexes into the same 32-bit 758 * which we access using RDMA_CONS_INDEX 759 */ 760 if (!priv->is_lite) 761 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 762 else 763 p_index = rdma_readl(priv, RDMA_CONS_INDEX); 764 p_index &= RDMA_PROD_INDEX_MASK; 765 766 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; 767 768 netif_dbg(priv, rx_status, ndev, 769 "p_index=%d rx_c_index=%d to_process=%d\n", 770 p_index, priv->rx_c_index, to_process); 771 772 while ((processed < to_process) && (processed < budget)) { 773 cb = &priv->rx_cbs[priv->rx_read_ptr]; 774 skb = bcm_sysport_rx_refill(priv, cb); 775 776 777 /* We do not have a backing SKB, so we do not a corresponding 778 * DMA mapping for this incoming packet since 779 * bcm_sysport_rx_refill always either has both skb and mapping 780 * or none. 781 */ 782 if (unlikely(!skb)) { 783 netif_err(priv, rx_err, ndev, "out of memory!\n"); 784 ndev->stats.rx_dropped++; 785 ndev->stats.rx_errors++; 786 goto next; 787 } 788 789 /* Extract the Receive Status Block prepended */ 790 rsb = (struct bcm_rsb *)skb->data; 791 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 792 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 793 DESC_STATUS_MASK; 794 795 netif_dbg(priv, rx_status, ndev, 796 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 797 p_index, priv->rx_c_index, priv->rx_read_ptr, 798 len, status); 799 800 if (unlikely(len > RX_BUF_LENGTH)) { 801 netif_err(priv, rx_status, ndev, "oversized packet\n"); 802 ndev->stats.rx_length_errors++; 803 ndev->stats.rx_errors++; 804 dev_kfree_skb_any(skb); 805 goto next; 806 } 807 808 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 809 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 810 ndev->stats.rx_dropped++; 811 ndev->stats.rx_errors++; 812 dev_kfree_skb_any(skb); 813 goto next; 814 } 815 816 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 817 netif_err(priv, rx_err, ndev, "error packet\n"); 818 if (status & RX_STATUS_OVFLOW) 819 ndev->stats.rx_over_errors++; 820 ndev->stats.rx_dropped++; 821 ndev->stats.rx_errors++; 822 dev_kfree_skb_any(skb); 823 goto next; 824 } 825 826 skb_put(skb, len); 827 828 /* Hardware validated our checksum */ 829 if (likely(status & DESC_L4_CSUM)) 830 skb->ip_summed = CHECKSUM_UNNECESSARY; 831 832 /* Hardware pre-pends packets with 2bytes before Ethernet 833 * header plus we have the Receive Status Block, strip off all 834 * of this from the SKB. 835 */ 836 skb_pull(skb, sizeof(*rsb) + 2); 837 len -= (sizeof(*rsb) + 2); 838 processed_bytes += len; 839 840 /* UniMAC may forward CRC */ 841 if (priv->crc_fwd) { 842 skb_trim(skb, len - ETH_FCS_LEN); 843 len -= ETH_FCS_LEN; 844 } 845 846 skb->protocol = eth_type_trans(skb, ndev); 847 ndev->stats.rx_packets++; 848 ndev->stats.rx_bytes += len; 849 u64_stats_update_begin(&priv->syncp); 850 stats64->rx_packets++; 851 stats64->rx_bytes += len; 852 u64_stats_update_end(&priv->syncp); 853 854 napi_gro_receive(&priv->napi, skb); 855next: 856 processed++; 857 priv->rx_read_ptr++; 858 859 if (priv->rx_read_ptr == priv->num_rx_bds) 860 priv->rx_read_ptr = 0; 861 } 862 863 priv->dim.packets = processed; 864 priv->dim.bytes = processed_bytes; 865 866 return processed; 867} 868 869static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, 870 struct bcm_sysport_cb *cb, 871 unsigned int *bytes_compl, 872 unsigned int *pkts_compl) 873{ 874 struct bcm_sysport_priv *priv = ring->priv; 875 struct device *kdev = &priv->pdev->dev; 876 877 if (cb->skb) { 878 *bytes_compl += cb->skb->len; 879 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 880 dma_unmap_len(cb, dma_len), 881 DMA_TO_DEVICE); 882 (*pkts_compl)++; 883 bcm_sysport_free_cb(cb); 884 /* SKB fragment */ 885 } else if (dma_unmap_addr(cb, dma_addr)) { 886 *bytes_compl += dma_unmap_len(cb, dma_len); 887 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 888 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 889 dma_unmap_addr_set(cb, dma_addr, 0); 890 } 891} 892 893/* Reclaim queued SKBs for transmission completion, lockless version */ 894static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 895 struct bcm_sysport_tx_ring *ring) 896{ 897 unsigned int pkts_compl = 0, bytes_compl = 0; 898 struct net_device *ndev = priv->netdev; 899 unsigned int txbds_processed = 0; 900 struct bcm_sysport_cb *cb; 901 unsigned int txbds_ready; 902 unsigned int c_index; 903 u32 hw_ind; 904 905 /* Clear status before servicing to reduce spurious interrupts */ 906 if (!ring->priv->is_lite) 907 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); 908 else 909 intrl2_0_writel(ring->priv, BIT(ring->index + 910 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR); 911 912 /* Compute how many descriptors have been processed since last call */ 913 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 914 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 915 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; 916 917 netif_dbg(priv, tx_done, ndev, 918 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", 919 ring->index, ring->c_index, c_index, txbds_ready); 920 921 while (txbds_processed < txbds_ready) { 922 cb = &ring->cbs[ring->clean_index]; 923 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 924 925 ring->desc_count++; 926 txbds_processed++; 927 928 if (likely(ring->clean_index < ring->size - 1)) 929 ring->clean_index++; 930 else 931 ring->clean_index = 0; 932 } 933 934 u64_stats_update_begin(&priv->syncp); 935 ring->packets += pkts_compl; 936 ring->bytes += bytes_compl; 937 u64_stats_update_end(&priv->syncp); 938 939 ring->c_index = c_index; 940 941 netif_dbg(priv, tx_done, ndev, 942 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 943 ring->index, ring->c_index, pkts_compl, bytes_compl); 944 945 return pkts_compl; 946} 947 948/* Locked version of the per-ring TX reclaim routine */ 949static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 950 struct bcm_sysport_tx_ring *ring) 951{ 952 struct netdev_queue *txq; 953 unsigned int released; 954 unsigned long flags; 955 956 txq = netdev_get_tx_queue(priv->netdev, ring->index); 957 958 spin_lock_irqsave(&ring->lock, flags); 959 released = __bcm_sysport_tx_reclaim(priv, ring); 960 if (released) 961 netif_tx_wake_queue(txq); 962 963 spin_unlock_irqrestore(&ring->lock, flags); 964 965 return released; 966} 967 968/* Locked version of the per-ring TX reclaim, but does not wake the queue */ 969static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, 970 struct bcm_sysport_tx_ring *ring) 971{ 972 unsigned long flags; 973 974 spin_lock_irqsave(&ring->lock, flags); 975 __bcm_sysport_tx_reclaim(priv, ring); 976 spin_unlock_irqrestore(&ring->lock, flags); 977} 978 979static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 980{ 981 struct bcm_sysport_tx_ring *ring = 982 container_of(napi, struct bcm_sysport_tx_ring, napi); 983 unsigned int work_done = 0; 984 985 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 986 987 if (work_done == 0) { 988 napi_complete(napi); 989 /* re-enable TX interrupt */ 990 if (!ring->priv->is_lite) 991 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 992 else 993 intrl2_0_mask_clear(ring->priv, BIT(ring->index + 994 INTRL2_0_TDMA_MBDONE_SHIFT)); 995 996 return 0; 997 } 998 999 return budget; 1000} 1001 1002static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 1003{ 1004 unsigned int q; 1005 1006 for (q = 0; q < priv->netdev->num_tx_queues; q++) 1007 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 1008} 1009 1010static int bcm_sysport_poll(struct napi_struct *napi, int budget) 1011{ 1012 struct bcm_sysport_priv *priv = 1013 container_of(napi, struct bcm_sysport_priv, napi); 1014 struct dim_sample dim_sample = {}; 1015 unsigned int work_done = 0; 1016 1017 work_done = bcm_sysport_desc_rx(priv, budget); 1018 1019 priv->rx_c_index += work_done; 1020 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 1021 1022 /* SYSTEMPORT Lite groups the producer/consumer index, producer is 1023 * maintained by HW, but writes to it will be ignore while RDMA 1024 * is active 1025 */ 1026 if (!priv->is_lite) 1027 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 1028 else 1029 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); 1030 1031 if (work_done < budget) { 1032 napi_complete_done(napi, work_done); 1033 /* re-enable RX interrupts */ 1034 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 1035 } 1036 1037 if (priv->dim.use_dim) { 1038 dim_update_sample(priv->dim.event_ctr, priv->dim.packets, 1039 priv->dim.bytes, &dim_sample); 1040 net_dim(&priv->dim.dim, dim_sample); 1041 } 1042 1043 return work_done; 1044} 1045 1046static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) 1047{ 1048 u32 reg, bit; 1049 1050 reg = umac_readl(priv, UMAC_MPD_CTRL); 1051 if (enable) 1052 reg |= MPD_EN; 1053 else 1054 reg &= ~MPD_EN; 1055 umac_writel(priv, reg, UMAC_MPD_CTRL); 1056 1057 if (priv->is_lite) 1058 bit = RBUF_ACPI_EN_LITE; 1059 else 1060 bit = RBUF_ACPI_EN; 1061 1062 reg = rbuf_readl(priv, RBUF_CONTROL); 1063 if (enable) 1064 reg |= bit; 1065 else 1066 reg &= ~bit; 1067 rbuf_writel(priv, reg, RBUF_CONTROL); 1068} 1069 1070static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 1071{ 1072 unsigned int index; 1073 u32 reg; 1074 1075 /* Disable RXCHK, active filters and Broadcom tag matching */ 1076 reg = rxchk_readl(priv, RXCHK_CONTROL); 1077 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 1078 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); 1079 rxchk_writel(priv, reg, RXCHK_CONTROL); 1080 1081 /* Make sure we restore correct CID index in case HW lost 1082 * its context during deep idle state 1083 */ 1084 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 1085 rxchk_writel(priv, priv->filters_loc[index] << 1086 RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index)); 1087 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 1088 } 1089 1090 /* Clear the MagicPacket detection logic */ 1091 mpd_enable_set(priv, false); 1092 1093 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS); 1094 if (reg & INTRL2_0_MPD) 1095 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); 1096 1097 if (reg & INTRL2_0_BRCM_MATCH_TAG) { 1098 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & 1099 RXCHK_BRCM_TAG_MATCH_MASK; 1100 netdev_info(priv->netdev, 1101 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); 1102 } 1103 1104 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 1105} 1106 1107static void bcm_sysport_dim_work(struct work_struct *work) 1108{ 1109 struct dim *dim = container_of(work, struct dim, work); 1110 struct bcm_sysport_net_dim *ndim = 1111 container_of(dim, struct bcm_sysport_net_dim, dim); 1112 struct bcm_sysport_priv *priv = 1113 container_of(ndim, struct bcm_sysport_priv, dim); 1114 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode, 1115 dim->profile_ix); 1116 1117 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); 1118 dim->state = DIM_START_MEASURE; 1119} 1120 1121/* RX and misc interrupt routine */ 1122static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 1123{ 1124 struct net_device *dev = dev_id; 1125 struct bcm_sysport_priv *priv = netdev_priv(dev); 1126 struct bcm_sysport_tx_ring *txr; 1127 unsigned int ring, ring_bit; 1128 1129 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 1130 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1131 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 1132 1133 if (unlikely(priv->irq0_stat == 0)) { 1134 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 1135 return IRQ_NONE; 1136 } 1137 1138 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 1139 priv->dim.event_ctr++; 1140 if (likely(napi_schedule_prep(&priv->napi))) { 1141 /* disable RX interrupts */ 1142 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 1143 __napi_schedule_irqoff(&priv->napi); 1144 } 1145 } 1146 1147 /* TX ring is full, perform a full reclaim since we do not know 1148 * which one would trigger this interrupt 1149 */ 1150 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 1151 bcm_sysport_tx_reclaim_all(priv); 1152 1153 if (!priv->is_lite) 1154 goto out; 1155 1156 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1157 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT); 1158 if (!(priv->irq0_stat & ring_bit)) 1159 continue; 1160 1161 txr = &priv->tx_rings[ring]; 1162 1163 if (likely(napi_schedule_prep(&txr->napi))) { 1164 intrl2_0_mask_set(priv, ring_bit); 1165 __napi_schedule(&txr->napi); 1166 } 1167 } 1168out: 1169 return IRQ_HANDLED; 1170} 1171 1172/* TX interrupt service routine */ 1173static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 1174{ 1175 struct net_device *dev = dev_id; 1176 struct bcm_sysport_priv *priv = netdev_priv(dev); 1177 struct bcm_sysport_tx_ring *txr; 1178 unsigned int ring; 1179 1180 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 1181 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 1182 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1183 1184 if (unlikely(priv->irq1_stat == 0)) { 1185 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 1186 return IRQ_NONE; 1187 } 1188 1189 for (ring = 0; ring < dev->num_tx_queues; ring++) { 1190 if (!(priv->irq1_stat & BIT(ring))) 1191 continue; 1192 1193 txr = &priv->tx_rings[ring]; 1194 1195 if (likely(napi_schedule_prep(&txr->napi))) { 1196 intrl2_1_mask_set(priv, BIT(ring)); 1197 __napi_schedule_irqoff(&txr->napi); 1198 } 1199 } 1200 1201 return IRQ_HANDLED; 1202} 1203 1204static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 1205{ 1206 struct bcm_sysport_priv *priv = dev_id; 1207 1208 pm_wakeup_event(&priv->pdev->dev, 0); 1209 1210 return IRQ_HANDLED; 1211} 1212 1213#ifdef CONFIG_NET_POLL_CONTROLLER 1214static void bcm_sysport_poll_controller(struct net_device *dev) 1215{ 1216 struct bcm_sysport_priv *priv = netdev_priv(dev); 1217 1218 disable_irq(priv->irq0); 1219 bcm_sysport_rx_isr(priv->irq0, priv); 1220 enable_irq(priv->irq0); 1221 1222 if (!priv->is_lite) { 1223 disable_irq(priv->irq1); 1224 bcm_sysport_tx_isr(priv->irq1, priv); 1225 enable_irq(priv->irq1); 1226 } 1227} 1228#endif 1229 1230static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 1231 struct net_device *dev) 1232{ 1233 struct bcm_sysport_priv *priv = netdev_priv(dev); 1234 struct sk_buff *nskb; 1235 struct bcm_tsb *tsb; 1236 u32 csum_info; 1237 u8 ip_proto; 1238 u16 csum_start; 1239 __be16 ip_ver; 1240 1241 /* Re-allocate SKB if needed */ 1242 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 1243 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 1244 if (!nskb) { 1245 dev_kfree_skb_any(skb); 1246 priv->mib.tx_realloc_tsb_failed++; 1247 dev->stats.tx_errors++; 1248 dev->stats.tx_dropped++; 1249 return NULL; 1250 } 1251 dev_consume_skb_any(skb); 1252 skb = nskb; 1253 priv->mib.tx_realloc_tsb++; 1254 } 1255 1256 tsb = skb_push(skb, sizeof(*tsb)); 1257 /* Zero-out TSB by default */ 1258 memset(tsb, 0, sizeof(*tsb)); 1259 1260 if (skb_vlan_tag_present(skb)) { 1261 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK; 1262 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT; 1263 } 1264 1265 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1266 ip_ver = skb->protocol; 1267 switch (ip_ver) { 1268 case htons(ETH_P_IP): 1269 ip_proto = ip_hdr(skb)->protocol; 1270 break; 1271 case htons(ETH_P_IPV6): 1272 ip_proto = ipv6_hdr(skb)->nexthdr; 1273 break; 1274 default: 1275 return skb; 1276 } 1277 1278 /* Get the checksum offset and the L4 (transport) offset */ 1279 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 1280 /* Account for the HW inserted VLAN tag */ 1281 if (skb_vlan_tag_present(skb)) 1282 csum_start += VLAN_HLEN; 1283 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 1284 csum_info |= (csum_start << L4_PTR_SHIFT); 1285 1286 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1287 csum_info |= L4_LENGTH_VALID; 1288 if (ip_proto == IPPROTO_UDP && 1289 ip_ver == htons(ETH_P_IP)) 1290 csum_info |= L4_UDP; 1291 } else { 1292 csum_info = 0; 1293 } 1294 1295 tsb->l4_ptr_dest_map = csum_info; 1296 } 1297 1298 return skb; 1299} 1300 1301static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 1302 struct net_device *dev) 1303{ 1304 struct bcm_sysport_priv *priv = netdev_priv(dev); 1305 struct device *kdev = &priv->pdev->dev; 1306 struct bcm_sysport_tx_ring *ring; 1307 unsigned long flags, desc_flags; 1308 struct bcm_sysport_cb *cb; 1309 struct netdev_queue *txq; 1310 u32 len_status, addr_lo; 1311 unsigned int skb_len; 1312 dma_addr_t mapping; 1313 u16 queue; 1314 int ret; 1315 1316 queue = skb_get_queue_mapping(skb); 1317 txq = netdev_get_tx_queue(dev, queue); 1318 ring = &priv->tx_rings[queue]; 1319 1320 /* lock against tx reclaim in BH context and TX ring full interrupt */ 1321 spin_lock_irqsave(&ring->lock, flags); 1322 if (unlikely(ring->desc_count == 0)) { 1323 netif_tx_stop_queue(txq); 1324 netdev_err(dev, "queue %d awake and ring full!\n", queue); 1325 ret = NETDEV_TX_BUSY; 1326 goto out; 1327 } 1328 1329 /* Insert TSB and checksum infos */ 1330 if (priv->tsb_en) { 1331 skb = bcm_sysport_insert_tsb(skb, dev); 1332 if (!skb) { 1333 ret = NETDEV_TX_OK; 1334 goto out; 1335 } 1336 } 1337 1338 skb_len = skb->len; 1339 1340 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1341 if (dma_mapping_error(kdev, mapping)) { 1342 priv->mib.tx_dma_failed++; 1343 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 1344 skb->data, skb_len); 1345 ret = NETDEV_TX_OK; 1346 goto out; 1347 } 1348 1349 /* Remember the SKB for future freeing */ 1350 cb = &ring->cbs[ring->curr_desc]; 1351 cb->skb = skb; 1352 dma_unmap_addr_set(cb, dma_addr, mapping); 1353 dma_unmap_len_set(cb, dma_len, skb_len); 1354 1355 addr_lo = lower_32_bits(mapping); 1356 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1357 len_status |= (skb_len << DESC_LEN_SHIFT); 1358 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1359 DESC_STATUS_SHIFT; 1360 if (skb->ip_summed == CHECKSUM_PARTIAL) 1361 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1362 if (skb_vlan_tag_present(skb)) 1363 len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT); 1364 1365 ring->curr_desc++; 1366 if (ring->curr_desc == ring->size) 1367 ring->curr_desc = 0; 1368 ring->desc_count--; 1369 1370 /* Ports are latched, so write upper address first */ 1371 spin_lock_irqsave(&priv->desc_lock, desc_flags); 1372 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index)); 1373 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index)); 1374 spin_unlock_irqrestore(&priv->desc_lock, desc_flags); 1375 1376 /* Check ring space and update SW control flow */ 1377 if (ring->desc_count == 0) 1378 netif_tx_stop_queue(txq); 1379 1380 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1381 ring->index, ring->desc_count, ring->curr_desc); 1382 1383 ret = NETDEV_TX_OK; 1384out: 1385 spin_unlock_irqrestore(&ring->lock, flags); 1386 return ret; 1387} 1388 1389static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue) 1390{ 1391 netdev_warn(dev, "transmit timeout!\n"); 1392 1393 netif_trans_update(dev); 1394 dev->stats.tx_errors++; 1395 1396 netif_tx_wake_all_queues(dev); 1397} 1398 1399/* phylib adjust link callback */ 1400static void bcm_sysport_adj_link(struct net_device *dev) 1401{ 1402 struct bcm_sysport_priv *priv = netdev_priv(dev); 1403 struct phy_device *phydev = dev->phydev; 1404 unsigned int changed = 0; 1405 u32 cmd_bits = 0, reg; 1406 1407 if (priv->old_link != phydev->link) { 1408 changed = 1; 1409 priv->old_link = phydev->link; 1410 } 1411 1412 if (priv->old_duplex != phydev->duplex) { 1413 changed = 1; 1414 priv->old_duplex = phydev->duplex; 1415 } 1416 1417 if (priv->is_lite) 1418 goto out; 1419 1420 switch (phydev->speed) { 1421 case SPEED_2500: 1422 cmd_bits = CMD_SPEED_2500; 1423 break; 1424 case SPEED_1000: 1425 cmd_bits = CMD_SPEED_1000; 1426 break; 1427 case SPEED_100: 1428 cmd_bits = CMD_SPEED_100; 1429 break; 1430 case SPEED_10: 1431 cmd_bits = CMD_SPEED_10; 1432 break; 1433 default: 1434 break; 1435 } 1436 cmd_bits <<= CMD_SPEED_SHIFT; 1437 1438 if (phydev->duplex == DUPLEX_HALF) 1439 cmd_bits |= CMD_HD_EN; 1440 1441 if (priv->old_pause != phydev->pause) { 1442 changed = 1; 1443 priv->old_pause = phydev->pause; 1444 } 1445 1446 if (!phydev->pause) 1447 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1448 1449 if (!changed) 1450 return; 1451 1452 if (phydev->link) { 1453 reg = umac_readl(priv, UMAC_CMD); 1454 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1455 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1456 CMD_TX_PAUSE_IGNORE); 1457 reg |= cmd_bits; 1458 umac_writel(priv, reg, UMAC_CMD); 1459 } 1460out: 1461 if (changed) 1462 phy_print_status(phydev); 1463} 1464 1465static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv, 1466 void (*cb)(struct work_struct *work)) 1467{ 1468 struct bcm_sysport_net_dim *dim = &priv->dim; 1469 1470 INIT_WORK(&dim->dim.work, cb); 1471 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 1472 dim->event_ctr = 0; 1473 dim->packets = 0; 1474 dim->bytes = 0; 1475} 1476 1477static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) 1478{ 1479 struct bcm_sysport_net_dim *dim = &priv->dim; 1480 struct dim_cq_moder moder; 1481 u32 usecs, pkts; 1482 1483 usecs = priv->rx_coalesce_usecs; 1484 pkts = priv->rx_max_coalesced_frames; 1485 1486 /* If DIM was enabled, re-apply default parameters */ 1487 if (dim->use_dim) { 1488 moder = net_dim_get_def_rx_moderation(dim->dim.mode); 1489 usecs = moder.usec; 1490 pkts = moder.pkts; 1491 } 1492 1493 bcm_sysport_set_rx_coalesce(priv, usecs, pkts); 1494} 1495 1496static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1497 unsigned int index) 1498{ 1499 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1500 size_t size; 1501 u32 reg; 1502 1503 /* Simple descriptors partitioning for now */ 1504 size = 256; 1505 1506 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1507 if (!ring->cbs) { 1508 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1509 return -ENOMEM; 1510 } 1511 1512 /* Initialize SW view of the ring */ 1513 spin_lock_init(&ring->lock); 1514 ring->priv = priv; 1515 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1516 ring->index = index; 1517 ring->size = size; 1518 ring->clean_index = 0; 1519 ring->alloc_size = ring->size; 1520 ring->desc_count = ring->size; 1521 ring->curr_desc = 0; 1522 1523 /* Initialize HW ring */ 1524 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1525 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1526 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1527 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1528 1529 /* Configure QID and port mapping */ 1530 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); 1531 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); 1532 if (ring->inspect) { 1533 reg |= ring->switch_queue & RING_QID_MASK; 1534 reg |= ring->switch_port << RING_PORT_ID_SHIFT; 1535 } else { 1536 reg |= RING_IGNORE_STATUS; 1537 } 1538 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); 1539 reg = 0; 1540 /* Adjust the packet size calculations if SYSTEMPORT is responsible 1541 * for HW insertion of VLAN tags 1542 */ 1543 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) 1544 reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT; 1545 tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index)); 1546 1547 /* Enable ACB algorithm 2 */ 1548 reg = tdma_readl(priv, TDMA_CONTROL); 1549 reg |= tdma_control_bit(priv, ACB_ALGO); 1550 tdma_writel(priv, reg, TDMA_CONTROL); 1551 1552 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides 1553 * with the original definition of ACB_ALGO 1554 */ 1555 reg = tdma_readl(priv, TDMA_CONTROL); 1556 if (priv->is_lite) 1557 reg &= ~BIT(TSB_SWAP1); 1558 /* Set a correct TSB format based on host endian */ 1559 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1560 reg |= tdma_control_bit(priv, TSB_SWAP0); 1561 else 1562 reg &= ~tdma_control_bit(priv, TSB_SWAP0); 1563 tdma_writel(priv, reg, TDMA_CONTROL); 1564 1565 /* Program the number of descriptors as MAX_THRESHOLD and half of 1566 * its size for the hysteresis trigger 1567 */ 1568 tdma_writel(priv, ring->size | 1569 1 << RING_HYST_THRESH_SHIFT, 1570 TDMA_DESC_RING_MAX_HYST(index)); 1571 1572 /* Enable the ring queue in the arbiter */ 1573 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1574 reg |= (1 << index); 1575 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1576 1577 napi_enable(&ring->napi); 1578 1579 netif_dbg(priv, hw, priv->netdev, 1580 "TDMA cfg, size=%d, switch q=%d,port=%d\n", 1581 ring->size, ring->switch_queue, 1582 ring->switch_port); 1583 1584 return 0; 1585} 1586 1587static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1588 unsigned int index) 1589{ 1590 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1591 u32 reg; 1592 1593 /* Caller should stop the TDMA engine */ 1594 reg = tdma_readl(priv, TDMA_STATUS); 1595 if (!(reg & TDMA_DISABLED)) 1596 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1597 1598 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1599 * fail, so by checking this pointer we know whether the TX ring was 1600 * fully initialized or not. 1601 */ 1602 if (!ring->cbs) 1603 return; 1604 1605 napi_disable(&ring->napi); 1606 netif_napi_del(&ring->napi); 1607 1608 bcm_sysport_tx_clean(priv, ring); 1609 1610 kfree(ring->cbs); 1611 ring->cbs = NULL; 1612 ring->size = 0; 1613 ring->alloc_size = 0; 1614 1615 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1616} 1617 1618/* RDMA helper */ 1619static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1620 unsigned int enable) 1621{ 1622 unsigned int timeout = 1000; 1623 u32 reg; 1624 1625 reg = rdma_readl(priv, RDMA_CONTROL); 1626 if (enable) 1627 reg |= RDMA_EN; 1628 else 1629 reg &= ~RDMA_EN; 1630 rdma_writel(priv, reg, RDMA_CONTROL); 1631 1632 /* Poll for RMDA disabling completion */ 1633 do { 1634 reg = rdma_readl(priv, RDMA_STATUS); 1635 if (!!(reg & RDMA_DISABLED) == !enable) 1636 return 0; 1637 usleep_range(1000, 2000); 1638 } while (timeout-- > 0); 1639 1640 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1641 1642 return -ETIMEDOUT; 1643} 1644 1645/* TDMA helper */ 1646static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1647 unsigned int enable) 1648{ 1649 unsigned int timeout = 1000; 1650 u32 reg; 1651 1652 reg = tdma_readl(priv, TDMA_CONTROL); 1653 if (enable) 1654 reg |= tdma_control_bit(priv, TDMA_EN); 1655 else 1656 reg &= ~tdma_control_bit(priv, TDMA_EN); 1657 tdma_writel(priv, reg, TDMA_CONTROL); 1658 1659 /* Poll for TMDA disabling completion */ 1660 do { 1661 reg = tdma_readl(priv, TDMA_STATUS); 1662 if (!!(reg & TDMA_DISABLED) == !enable) 1663 return 0; 1664 1665 usleep_range(1000, 2000); 1666 } while (timeout-- > 0); 1667 1668 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1669 1670 return -ETIMEDOUT; 1671} 1672 1673static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1674{ 1675 struct bcm_sysport_cb *cb; 1676 u32 reg; 1677 int ret; 1678 int i; 1679 1680 /* Initialize SW view of the RX ring */ 1681 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; 1682 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1683 priv->rx_c_index = 0; 1684 priv->rx_read_ptr = 0; 1685 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1686 GFP_KERNEL); 1687 if (!priv->rx_cbs) { 1688 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1689 return -ENOMEM; 1690 } 1691 1692 for (i = 0; i < priv->num_rx_bds; i++) { 1693 cb = priv->rx_cbs + i; 1694 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; 1695 } 1696 1697 ret = bcm_sysport_alloc_rx_bufs(priv); 1698 if (ret) { 1699 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1700 return ret; 1701 } 1702 1703 /* Initialize HW, ensure RDMA is disabled */ 1704 reg = rdma_readl(priv, RDMA_STATUS); 1705 if (!(reg & RDMA_DISABLED)) 1706 rdma_enable_set(priv, 0); 1707 1708 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1709 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1710 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1711 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1712 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1713 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1714 /* Operate the queue in ring mode */ 1715 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1716 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1717 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1718 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); 1719 1720 netif_dbg(priv, hw, priv->netdev, 1721 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1722 priv->num_rx_bds, priv->rx_bds); 1723 1724 return 0; 1725} 1726 1727static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1728{ 1729 struct bcm_sysport_cb *cb; 1730 unsigned int i; 1731 u32 reg; 1732 1733 /* Caller should ensure RDMA is disabled */ 1734 reg = rdma_readl(priv, RDMA_STATUS); 1735 if (!(reg & RDMA_DISABLED)) 1736 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1737 1738 for (i = 0; i < priv->num_rx_bds; i++) { 1739 cb = &priv->rx_cbs[i]; 1740 if (dma_unmap_addr(cb, dma_addr)) 1741 dma_unmap_single(&priv->pdev->dev, 1742 dma_unmap_addr(cb, dma_addr), 1743 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1744 bcm_sysport_free_cb(cb); 1745 } 1746 1747 kfree(priv->rx_cbs); 1748 priv->rx_cbs = NULL; 1749 1750 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1751} 1752 1753static void bcm_sysport_set_rx_mode(struct net_device *dev) 1754{ 1755 struct bcm_sysport_priv *priv = netdev_priv(dev); 1756 u32 reg; 1757 1758 if (priv->is_lite) 1759 return; 1760 1761 reg = umac_readl(priv, UMAC_CMD); 1762 if (dev->flags & IFF_PROMISC) 1763 reg |= CMD_PROMISC; 1764 else 1765 reg &= ~CMD_PROMISC; 1766 umac_writel(priv, reg, UMAC_CMD); 1767 1768 /* No support for ALLMULTI */ 1769 if (dev->flags & IFF_ALLMULTI) 1770 return; 1771} 1772 1773static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1774 u32 mask, unsigned int enable) 1775{ 1776 u32 reg; 1777 1778 if (!priv->is_lite) { 1779 reg = umac_readl(priv, UMAC_CMD); 1780 if (enable) 1781 reg |= mask; 1782 else 1783 reg &= ~mask; 1784 umac_writel(priv, reg, UMAC_CMD); 1785 } else { 1786 reg = gib_readl(priv, GIB_CONTROL); 1787 if (enable) 1788 reg |= mask; 1789 else 1790 reg &= ~mask; 1791 gib_writel(priv, reg, GIB_CONTROL); 1792 } 1793 1794 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1795 * to be processed (1 msec). 1796 */ 1797 if (enable == 0) 1798 usleep_range(1000, 2000); 1799} 1800 1801static inline void umac_reset(struct bcm_sysport_priv *priv) 1802{ 1803 u32 reg; 1804 1805 if (priv->is_lite) 1806 return; 1807 1808 reg = umac_readl(priv, UMAC_CMD); 1809 reg |= CMD_SW_RESET; 1810 umac_writel(priv, reg, UMAC_CMD); 1811 udelay(10); 1812 reg = umac_readl(priv, UMAC_CMD); 1813 reg &= ~CMD_SW_RESET; 1814 umac_writel(priv, reg, UMAC_CMD); 1815} 1816 1817static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1818 unsigned char *addr) 1819{ 1820 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 1821 addr[3]; 1822 u32 mac1 = (addr[4] << 8) | addr[5]; 1823 1824 if (!priv->is_lite) { 1825 umac_writel(priv, mac0, UMAC_MAC0); 1826 umac_writel(priv, mac1, UMAC_MAC1); 1827 } else { 1828 gib_writel(priv, mac0, GIB_MAC0); 1829 gib_writel(priv, mac1, GIB_MAC1); 1830 } 1831} 1832 1833static void topctrl_flush(struct bcm_sysport_priv *priv) 1834{ 1835 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1836 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1837 mdelay(1); 1838 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1839 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1840} 1841 1842static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1843{ 1844 struct bcm_sysport_priv *priv = netdev_priv(dev); 1845 struct sockaddr *addr = p; 1846 1847 if (!is_valid_ether_addr(addr->sa_data)) 1848 return -EINVAL; 1849 1850 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1851 1852 /* interface is disabled, changes to MAC will be reflected on next 1853 * open call 1854 */ 1855 if (!netif_running(dev)) 1856 return 0; 1857 1858 umac_set_hw_addr(priv, dev->dev_addr); 1859 1860 return 0; 1861} 1862 1863static void bcm_sysport_get_stats64(struct net_device *dev, 1864 struct rtnl_link_stats64 *stats) 1865{ 1866 struct bcm_sysport_priv *priv = netdev_priv(dev); 1867 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 1868 unsigned int start; 1869 1870 netdev_stats_to_stats64(stats, &dev->stats); 1871 1872 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, 1873 &stats->tx_packets); 1874 1875 do { 1876 start = u64_stats_fetch_begin_irq(&priv->syncp); 1877 stats->rx_packets = stats64->rx_packets; 1878 stats->rx_bytes = stats64->rx_bytes; 1879 } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); 1880} 1881 1882static void bcm_sysport_netif_start(struct net_device *dev) 1883{ 1884 struct bcm_sysport_priv *priv = netdev_priv(dev); 1885 1886 /* Enable NAPI */ 1887 bcm_sysport_init_dim(priv, bcm_sysport_dim_work); 1888 bcm_sysport_init_rx_coalesce(priv); 1889 napi_enable(&priv->napi); 1890 1891 /* Enable RX interrupt and TX ring full interrupt */ 1892 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1893 1894 phy_start(dev->phydev); 1895 1896 /* Enable TX interrupts for the TXQs */ 1897 if (!priv->is_lite) 1898 intrl2_1_mask_clear(priv, 0xffffffff); 1899 else 1900 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK); 1901} 1902 1903static void rbuf_init(struct bcm_sysport_priv *priv) 1904{ 1905 u32 reg; 1906 1907 reg = rbuf_readl(priv, RBUF_CONTROL); 1908 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1909 /* Set a correct RSB format on SYSTEMPORT Lite */ 1910 if (priv->is_lite) 1911 reg &= ~RBUF_RSB_SWAP1; 1912 1913 /* Set a correct RSB format based on host endian */ 1914 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1915 reg |= RBUF_RSB_SWAP0; 1916 else 1917 reg &= ~RBUF_RSB_SWAP0; 1918 rbuf_writel(priv, reg, RBUF_CONTROL); 1919} 1920 1921static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv) 1922{ 1923 intrl2_0_mask_set(priv, 0xffffffff); 1924 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1925 if (!priv->is_lite) { 1926 intrl2_1_mask_set(priv, 0xffffffff); 1927 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1928 } 1929} 1930 1931static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) 1932{ 1933 u32 reg; 1934 1935 reg = gib_readl(priv, GIB_CONTROL); 1936 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */ 1937 if (netdev_uses_dsa(priv->netdev)) { 1938 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); 1939 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; 1940 } 1941 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT); 1942 reg |= 12 << GIB_IPG_LEN_SHIFT; 1943 gib_writel(priv, reg, GIB_CONTROL); 1944} 1945 1946static int bcm_sysport_open(struct net_device *dev) 1947{ 1948 struct bcm_sysport_priv *priv = netdev_priv(dev); 1949 struct phy_device *phydev; 1950 unsigned int i; 1951 int ret; 1952 1953 clk_prepare_enable(priv->clk); 1954 1955 /* Reset UniMAC */ 1956 umac_reset(priv); 1957 1958 /* Flush TX and RX FIFOs at TOPCTRL level */ 1959 topctrl_flush(priv); 1960 1961 /* Disable the UniMAC RX/TX */ 1962 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1963 1964 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1965 rbuf_init(priv); 1966 1967 /* Set maximum frame length */ 1968 if (!priv->is_lite) 1969 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1970 else 1971 gib_set_pad_extension(priv); 1972 1973 /* Apply features again in case we changed them while interface was 1974 * down 1975 */ 1976 bcm_sysport_set_features(dev, dev->features); 1977 1978 /* Set MAC address */ 1979 umac_set_hw_addr(priv, dev->dev_addr); 1980 1981 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1982 0, priv->phy_interface); 1983 if (!phydev) { 1984 netdev_err(dev, "could not attach to PHY\n"); 1985 ret = -ENODEV; 1986 goto out_clk_disable; 1987 } 1988 1989 /* Reset house keeping link status */ 1990 priv->old_duplex = -1; 1991 priv->old_link = -1; 1992 priv->old_pause = -1; 1993 1994 /* mask all interrupts and request them */ 1995 bcm_sysport_mask_all_intrs(priv); 1996 1997 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1998 if (ret) { 1999 netdev_err(dev, "failed to request RX interrupt\n"); 2000 goto out_phy_disconnect; 2001 } 2002 2003 if (!priv->is_lite) { 2004 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, 2005 dev->name, dev); 2006 if (ret) { 2007 netdev_err(dev, "failed to request TX interrupt\n"); 2008 goto out_free_irq0; 2009 } 2010 } 2011 2012 /* Initialize both hardware and software ring */ 2013 spin_lock_init(&priv->desc_lock); 2014 for (i = 0; i < dev->num_tx_queues; i++) { 2015 ret = bcm_sysport_init_tx_ring(priv, i); 2016 if (ret) { 2017 netdev_err(dev, "failed to initialize TX ring %d\n", 2018 i); 2019 goto out_free_tx_ring; 2020 } 2021 } 2022 2023 /* Initialize linked-list */ 2024 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2025 2026 /* Initialize RX ring */ 2027 ret = bcm_sysport_init_rx_ring(priv); 2028 if (ret) { 2029 netdev_err(dev, "failed to initialize RX ring\n"); 2030 goto out_free_rx_ring; 2031 } 2032 2033 /* Turn on RDMA */ 2034 ret = rdma_enable_set(priv, 1); 2035 if (ret) 2036 goto out_free_rx_ring; 2037 2038 /* Turn on TDMA */ 2039 ret = tdma_enable_set(priv, 1); 2040 if (ret) 2041 goto out_clear_rx_int; 2042 2043 /* Turn on UniMAC TX/RX */ 2044 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 2045 2046 bcm_sysport_netif_start(dev); 2047 2048 netif_tx_start_all_queues(dev); 2049 2050 return 0; 2051 2052out_clear_rx_int: 2053 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 2054out_free_rx_ring: 2055 bcm_sysport_fini_rx_ring(priv); 2056out_free_tx_ring: 2057 for (i = 0; i < dev->num_tx_queues; i++) 2058 bcm_sysport_fini_tx_ring(priv, i); 2059 if (!priv->is_lite) 2060 free_irq(priv->irq1, dev); 2061out_free_irq0: 2062 free_irq(priv->irq0, dev); 2063out_phy_disconnect: 2064 phy_disconnect(phydev); 2065out_clk_disable: 2066 clk_disable_unprepare(priv->clk); 2067 return ret; 2068} 2069 2070static void bcm_sysport_netif_stop(struct net_device *dev) 2071{ 2072 struct bcm_sysport_priv *priv = netdev_priv(dev); 2073 2074 /* stop all software from updating hardware */ 2075 netif_tx_disable(dev); 2076 napi_disable(&priv->napi); 2077 cancel_work_sync(&priv->dim.dim.work); 2078 phy_stop(dev->phydev); 2079 2080 /* mask all interrupts */ 2081 bcm_sysport_mask_all_intrs(priv); 2082} 2083 2084static int bcm_sysport_stop(struct net_device *dev) 2085{ 2086 struct bcm_sysport_priv *priv = netdev_priv(dev); 2087 unsigned int i; 2088 int ret; 2089 2090 bcm_sysport_netif_stop(dev); 2091 2092 /* Disable UniMAC RX */ 2093 umac_enable_set(priv, CMD_RX_EN, 0); 2094 2095 ret = tdma_enable_set(priv, 0); 2096 if (ret) { 2097 netdev_err(dev, "timeout disabling RDMA\n"); 2098 return ret; 2099 } 2100 2101 /* Wait for a maximum packet size to be drained */ 2102 usleep_range(2000, 3000); 2103 2104 ret = rdma_enable_set(priv, 0); 2105 if (ret) { 2106 netdev_err(dev, "timeout disabling TDMA\n"); 2107 return ret; 2108 } 2109 2110 /* Disable UniMAC TX */ 2111 umac_enable_set(priv, CMD_TX_EN, 0); 2112 2113 /* Free RX/TX rings SW structures */ 2114 for (i = 0; i < dev->num_tx_queues; i++) 2115 bcm_sysport_fini_tx_ring(priv, i); 2116 bcm_sysport_fini_rx_ring(priv); 2117 2118 free_irq(priv->irq0, dev); 2119 if (!priv->is_lite) 2120 free_irq(priv->irq1, dev); 2121 2122 /* Disconnect from PHY */ 2123 phy_disconnect(dev->phydev); 2124 2125 clk_disable_unprepare(priv->clk); 2126 2127 return 0; 2128} 2129 2130static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv, 2131 u64 location) 2132{ 2133 unsigned int index; 2134 u32 reg; 2135 2136 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2137 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2138 reg >>= RXCHK_BRCM_TAG_CID_SHIFT; 2139 reg &= RXCHK_BRCM_TAG_CID_MASK; 2140 if (reg == location) 2141 return index; 2142 } 2143 2144 return -EINVAL; 2145} 2146 2147static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv, 2148 struct ethtool_rxnfc *nfc) 2149{ 2150 int index; 2151 2152 /* This is not a rule that we know about */ 2153 index = bcm_sysport_rule_find(priv, nfc->fs.location); 2154 if (index < 0) 2155 return -EOPNOTSUPP; 2156 2157 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; 2158 2159 return 0; 2160} 2161 2162static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, 2163 struct ethtool_rxnfc *nfc) 2164{ 2165 unsigned int index; 2166 u32 reg; 2167 2168 /* We cannot match locations greater than what the classification ID 2169 * permits (256 entries) 2170 */ 2171 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) 2172 return -E2BIG; 2173 2174 /* We cannot support flows that are not destined for a wake-up */ 2175 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) 2176 return -EOPNOTSUPP; 2177 2178 /* All filters are already in use, we cannot match more rules */ 2179 if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) == 2180 RXCHK_BRCM_TAG_MAX) 2181 return -ENOSPC; 2182 2183 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); 2184 if (index >= RXCHK_BRCM_TAG_MAX) 2185 return -ENOSPC; 2186 2187 /* Location is the classification ID, and index is the position 2188 * within one of our 8 possible filters to be programmed 2189 */ 2190 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index)); 2191 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT); 2192 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; 2193 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); 2194 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); 2195 2196 priv->filters_loc[index] = nfc->fs.location; 2197 set_bit(index, priv->filters); 2198 2199 return 0; 2200} 2201 2202static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, 2203 u64 location) 2204{ 2205 int index; 2206 2207 /* This is not a rule that we know about */ 2208 index = bcm_sysport_rule_find(priv, location); 2209 if (index < 0) 2210 return -EOPNOTSUPP; 2211 2212 /* No need to disable this filter if it was enabled, this will 2213 * be taken care of during suspend time by bcm_sysport_suspend_to_wol 2214 */ 2215 clear_bit(index, priv->filters); 2216 priv->filters_loc[index] = 0; 2217 2218 return 0; 2219} 2220 2221static int bcm_sysport_get_rxnfc(struct net_device *dev, 2222 struct ethtool_rxnfc *nfc, u32 *rule_locs) 2223{ 2224 struct bcm_sysport_priv *priv = netdev_priv(dev); 2225 int ret = -EOPNOTSUPP; 2226 2227 switch (nfc->cmd) { 2228 case ETHTOOL_GRXCLSRULE: 2229 ret = bcm_sysport_rule_get(priv, nfc); 2230 break; 2231 default: 2232 break; 2233 } 2234 2235 return ret; 2236} 2237 2238static int bcm_sysport_set_rxnfc(struct net_device *dev, 2239 struct ethtool_rxnfc *nfc) 2240{ 2241 struct bcm_sysport_priv *priv = netdev_priv(dev); 2242 int ret = -EOPNOTSUPP; 2243 2244 switch (nfc->cmd) { 2245 case ETHTOOL_SRXCLSRLINS: 2246 ret = bcm_sysport_rule_set(priv, nfc); 2247 break; 2248 case ETHTOOL_SRXCLSRLDEL: 2249 ret = bcm_sysport_rule_del(priv, nfc->fs.location); 2250 break; 2251 default: 2252 break; 2253 } 2254 2255 return ret; 2256} 2257 2258static const struct ethtool_ops bcm_sysport_ethtool_ops = { 2259 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 2260 ETHTOOL_COALESCE_MAX_FRAMES | 2261 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 2262 .get_drvinfo = bcm_sysport_get_drvinfo, 2263 .get_msglevel = bcm_sysport_get_msglvl, 2264 .set_msglevel = bcm_sysport_set_msglvl, 2265 .get_link = ethtool_op_get_link, 2266 .get_strings = bcm_sysport_get_strings, 2267 .get_ethtool_stats = bcm_sysport_get_stats, 2268 .get_sset_count = bcm_sysport_get_sset_count, 2269 .get_wol = bcm_sysport_get_wol, 2270 .set_wol = bcm_sysport_set_wol, 2271 .get_coalesce = bcm_sysport_get_coalesce, 2272 .set_coalesce = bcm_sysport_set_coalesce, 2273 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2274 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2275 .get_rxnfc = bcm_sysport_get_rxnfc, 2276 .set_rxnfc = bcm_sysport_set_rxnfc, 2277}; 2278 2279static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, 2280 struct net_device *sb_dev) 2281{ 2282 struct bcm_sysport_priv *priv = netdev_priv(dev); 2283 u16 queue = skb_get_queue_mapping(skb); 2284 struct bcm_sysport_tx_ring *tx_ring; 2285 unsigned int q, port; 2286 2287 if (!netdev_uses_dsa(dev)) 2288 return netdev_pick_tx(dev, skb, NULL); 2289 2290 /* DSA tagging layer will have configured the correct queue */ 2291 q = BRCM_TAG_GET_QUEUE(queue); 2292 port = BRCM_TAG_GET_PORT(queue); 2293 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; 2294 2295 if (unlikely(!tx_ring)) 2296 return netdev_pick_tx(dev, skb, NULL); 2297 2298 return tx_ring->index; 2299} 2300 2301static const struct net_device_ops bcm_sysport_netdev_ops = { 2302 .ndo_start_xmit = bcm_sysport_xmit, 2303 .ndo_tx_timeout = bcm_sysport_tx_timeout, 2304 .ndo_open = bcm_sysport_open, 2305 .ndo_stop = bcm_sysport_stop, 2306 .ndo_set_features = bcm_sysport_set_features, 2307 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 2308 .ndo_set_mac_address = bcm_sysport_change_mac, 2309#ifdef CONFIG_NET_POLL_CONTROLLER 2310 .ndo_poll_controller = bcm_sysport_poll_controller, 2311#endif 2312 .ndo_get_stats64 = bcm_sysport_get_stats64, 2313 .ndo_select_queue = bcm_sysport_select_queue, 2314}; 2315 2316static int bcm_sysport_map_queues(struct notifier_block *nb, 2317 struct dsa_notifier_register_info *info) 2318{ 2319 struct bcm_sysport_tx_ring *ring; 2320 struct bcm_sysport_priv *priv; 2321 struct net_device *slave_dev; 2322 unsigned int num_tx_queues; 2323 unsigned int q, qp, port; 2324 struct net_device *dev; 2325 2326 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2327 if (priv->netdev != info->master) 2328 return 0; 2329 2330 dev = info->master; 2331 2332 /* We can't be setting up queue inspection for non directly attached 2333 * switches 2334 */ 2335 if (info->switch_number) 2336 return 0; 2337 2338 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2339 return 0; 2340 2341 port = info->port_number; 2342 slave_dev = info->info.dev; 2343 2344 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a 2345 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of 2346 * per-port (slave_dev) network devices queue, we achieve just that. 2347 * This need to happen now before any slave network device is used such 2348 * it accurately reflects the number of real TX queues. 2349 */ 2350 if (priv->is_lite) 2351 netif_set_real_num_tx_queues(slave_dev, 2352 slave_dev->num_tx_queues / 2); 2353 2354 num_tx_queues = slave_dev->real_num_tx_queues; 2355 2356 if (priv->per_port_num_tx_queues && 2357 priv->per_port_num_tx_queues != num_tx_queues) 2358 netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); 2359 2360 priv->per_port_num_tx_queues = num_tx_queues; 2361 2362 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; 2363 q++) { 2364 ring = &priv->tx_rings[q]; 2365 2366 if (ring->inspect) 2367 continue; 2368 2369 /* Just remember the mapping actual programming done 2370 * during bcm_sysport_init_tx_ring 2371 */ 2372 ring->switch_queue = qp; 2373 ring->switch_port = port; 2374 ring->inspect = true; 2375 priv->ring_map[qp + port * num_tx_queues] = ring; 2376 qp++; 2377 } 2378 2379 return 0; 2380} 2381 2382static int bcm_sysport_unmap_queues(struct notifier_block *nb, 2383 struct dsa_notifier_register_info *info) 2384{ 2385 struct bcm_sysport_tx_ring *ring; 2386 struct bcm_sysport_priv *priv; 2387 struct net_device *slave_dev; 2388 unsigned int num_tx_queues; 2389 struct net_device *dev; 2390 unsigned int q, qp, port; 2391 2392 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2393 if (priv->netdev != info->master) 2394 return 0; 2395 2396 dev = info->master; 2397 2398 if (dev->netdev_ops != &bcm_sysport_netdev_ops) 2399 return 0; 2400 2401 port = info->port_number; 2402 slave_dev = info->info.dev; 2403 2404 num_tx_queues = slave_dev->real_num_tx_queues; 2405 2406 for (q = 0; q < dev->num_tx_queues; q++) { 2407 ring = &priv->tx_rings[q]; 2408 2409 if (ring->switch_port != port) 2410 continue; 2411 2412 if (!ring->inspect) 2413 continue; 2414 2415 ring->inspect = false; 2416 qp = ring->switch_queue; 2417 priv->ring_map[qp + port * num_tx_queues] = NULL; 2418 } 2419 2420 return 0; 2421} 2422 2423static int bcm_sysport_dsa_notifier(struct notifier_block *nb, 2424 unsigned long event, void *ptr) 2425{ 2426 int ret = NOTIFY_DONE; 2427 2428 switch (event) { 2429 case DSA_PORT_REGISTER: 2430 ret = bcm_sysport_map_queues(nb, ptr); 2431 break; 2432 case DSA_PORT_UNREGISTER: 2433 ret = bcm_sysport_unmap_queues(nb, ptr); 2434 break; 2435 } 2436 2437 return notifier_from_errno(ret); 2438} 2439 2440#define REV_FMT "v%2x.%02x" 2441 2442static const struct bcm_sysport_hw_params bcm_sysport_params[] = { 2443 [SYSTEMPORT] = { 2444 .is_lite = false, 2445 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS, 2446 }, 2447 [SYSTEMPORT_LITE] = { 2448 .is_lite = true, 2449 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS, 2450 }, 2451}; 2452 2453static const struct of_device_id bcm_sysport_of_match[] = { 2454 { .compatible = "brcm,systemportlite-v1.00", 2455 .data = &bcm_sysport_params[SYSTEMPORT_LITE] }, 2456 { .compatible = "brcm,systemport-v1.00", 2457 .data = &bcm_sysport_params[SYSTEMPORT] }, 2458 { .compatible = "brcm,systemport", 2459 .data = &bcm_sysport_params[SYSTEMPORT] }, 2460 { /* sentinel */ } 2461}; 2462MODULE_DEVICE_TABLE(of, bcm_sysport_of_match); 2463 2464static int bcm_sysport_probe(struct platform_device *pdev) 2465{ 2466 const struct bcm_sysport_hw_params *params; 2467 const struct of_device_id *of_id = NULL; 2468 struct bcm_sysport_priv *priv; 2469 struct device_node *dn; 2470 struct net_device *dev; 2471 const void *macaddr; 2472 u32 txq, rxq; 2473 int ret; 2474 2475 dn = pdev->dev.of_node; 2476 of_id = of_match_node(bcm_sysport_of_match, dn); 2477 if (!of_id || !of_id->data) 2478 return -EINVAL; 2479 2480 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 2481 if (ret) 2482 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 2483 if (ret) { 2484 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret); 2485 return ret; 2486 } 2487 2488 /* Fairly quickly we need to know the type of adapter we have */ 2489 params = of_id->data; 2490 2491 /* Read the Transmit/Receive Queue properties */ 2492 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 2493 txq = TDMA_NUM_RINGS; 2494 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 2495 rxq = 1; 2496 2497 /* Sanity check the number of transmit queues */ 2498 if (!txq || txq > TDMA_NUM_RINGS) 2499 return -EINVAL; 2500 2501 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 2502 if (!dev) 2503 return -ENOMEM; 2504 2505 /* Initialize private members */ 2506 priv = netdev_priv(dev); 2507 2508 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport"); 2509 if (IS_ERR(priv->clk)) { 2510 ret = PTR_ERR(priv->clk); 2511 goto err_free_netdev; 2512 } 2513 2514 /* Allocate number of TX rings */ 2515 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, 2516 sizeof(struct bcm_sysport_tx_ring), 2517 GFP_KERNEL); 2518 if (!priv->tx_rings) { 2519 ret = -ENOMEM; 2520 goto err_free_netdev; 2521 } 2522 2523 priv->is_lite = params->is_lite; 2524 priv->num_rx_desc_words = params->num_rx_desc_words; 2525 2526 priv->irq0 = platform_get_irq(pdev, 0); 2527 if (!priv->is_lite) { 2528 priv->irq1 = platform_get_irq(pdev, 1); 2529 priv->wol_irq = platform_get_irq(pdev, 2); 2530 } else { 2531 priv->wol_irq = platform_get_irq(pdev, 1); 2532 } 2533 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2534 ret = -EINVAL; 2535 goto err_free_netdev; 2536 } 2537 2538 priv->base = devm_platform_ioremap_resource(pdev, 0); 2539 if (IS_ERR(priv->base)) { 2540 ret = PTR_ERR(priv->base); 2541 goto err_free_netdev; 2542 } 2543 2544 priv->netdev = dev; 2545 priv->pdev = pdev; 2546 2547 ret = of_get_phy_mode(dn, &priv->phy_interface); 2548 /* Default to GMII interface mode */ 2549 if (ret) 2550 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 2551 2552 /* In the case of a fixed PHY, the DT node associated 2553 * to the PHY is the Ethernet MAC DT node. 2554 */ 2555 if (of_phy_is_fixed_link(dn)) { 2556 ret = of_phy_register_fixed_link(dn); 2557 if (ret) { 2558 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 2559 goto err_free_netdev; 2560 } 2561 2562 priv->phy_dn = dn; 2563 } 2564 2565 /* Initialize netdevice members */ 2566 macaddr = of_get_mac_address(dn); 2567 if (IS_ERR(macaddr)) { 2568 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 2569 eth_hw_addr_random(dev); 2570 } else { 2571 ether_addr_copy(dev->dev_addr, macaddr); 2572 } 2573 2574 SET_NETDEV_DEV(dev, &pdev->dev); 2575 dev_set_drvdata(&pdev->dev, dev); 2576 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 2577 dev->netdev_ops = &bcm_sysport_netdev_ops; 2578 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 2579 2580 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 2581 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2582 NETIF_F_HW_VLAN_CTAG_TX; 2583 dev->hw_features |= dev->features; 2584 dev->vlan_features |= dev->features; 2585 dev->max_mtu = UMAC_MAX_MTU_SIZE; 2586 2587 /* Request the WOL interrupt and advertise suspend if available */ 2588 priv->wol_irq_disabled = 1; 2589 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 2590 bcm_sysport_wol_isr, 0, dev->name, priv); 2591 if (!ret) 2592 device_set_wakeup_capable(&pdev->dev, 1); 2593 2594 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); 2595 if (IS_ERR(priv->wol_clk)) { 2596 ret = PTR_ERR(priv->wol_clk); 2597 goto err_deregister_fixed_link; 2598 } 2599 2600 /* Set the needed headroom once and for all */ 2601 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 2602 dev->needed_headroom += sizeof(struct bcm_tsb); 2603 2604 /* libphy will adjust the link state accordingly */ 2605 netif_carrier_off(dev); 2606 2607 priv->rx_max_coalesced_frames = 1; 2608 u64_stats_init(&priv->syncp); 2609 2610 priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; 2611 2612 ret = register_dsa_notifier(&priv->dsa_notifier); 2613 if (ret) { 2614 dev_err(&pdev->dev, "failed to register DSA notifier\n"); 2615 goto err_deregister_fixed_link; 2616 } 2617 2618 ret = register_netdev(dev); 2619 if (ret) { 2620 dev_err(&pdev->dev, "failed to register net_device\n"); 2621 goto err_deregister_notifier; 2622 } 2623 2624 clk_prepare_enable(priv->clk); 2625 2626 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 2627 dev_info(&pdev->dev, 2628 "Broadcom SYSTEMPORT%s " REV_FMT 2629 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 2630 priv->is_lite ? " Lite" : "", 2631 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 2632 priv->irq0, priv->irq1, txq, rxq); 2633 2634 clk_disable_unprepare(priv->clk); 2635 2636 return 0; 2637 2638err_deregister_notifier: 2639 unregister_dsa_notifier(&priv->dsa_notifier); 2640err_deregister_fixed_link: 2641 if (of_phy_is_fixed_link(dn)) 2642 of_phy_deregister_fixed_link(dn); 2643err_free_netdev: 2644 free_netdev(dev); 2645 return ret; 2646} 2647 2648static int bcm_sysport_remove(struct platform_device *pdev) 2649{ 2650 struct net_device *dev = dev_get_drvdata(&pdev->dev); 2651 struct bcm_sysport_priv *priv = netdev_priv(dev); 2652 struct device_node *dn = pdev->dev.of_node; 2653 2654 /* Not much to do, ndo_close has been called 2655 * and we use managed allocations 2656 */ 2657 unregister_dsa_notifier(&priv->dsa_notifier); 2658 unregister_netdev(dev); 2659 if (of_phy_is_fixed_link(dn)) 2660 of_phy_deregister_fixed_link(dn); 2661 free_netdev(dev); 2662 dev_set_drvdata(&pdev->dev, NULL); 2663 2664 return 0; 2665} 2666 2667static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 2668{ 2669 struct net_device *ndev = priv->netdev; 2670 unsigned int timeout = 1000; 2671 unsigned int index, i = 0; 2672 u32 reg; 2673 2674 reg = umac_readl(priv, UMAC_MPD_CTRL); 2675 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2676 reg |= MPD_EN; 2677 reg &= ~PSW_EN; 2678 if (priv->wolopts & WAKE_MAGICSECURE) { 2679 /* Program the SecureOn password */ 2680 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), 2681 UMAC_PSW_MS); 2682 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), 2683 UMAC_PSW_LS); 2684 reg |= PSW_EN; 2685 } 2686 umac_writel(priv, reg, UMAC_MPD_CTRL); 2687 2688 if (priv->wolopts & WAKE_FILTER) { 2689 /* Turn on ACPI matching to steal packets from RBUF */ 2690 reg = rbuf_readl(priv, RBUF_CONTROL); 2691 if (priv->is_lite) 2692 reg |= RBUF_ACPI_EN_LITE; 2693 else 2694 reg |= RBUF_ACPI_EN; 2695 rbuf_writel(priv, reg, RBUF_CONTROL); 2696 2697 /* Enable RXCHK, active filters and Broadcom tag matching */ 2698 reg = rxchk_readl(priv, RXCHK_CONTROL); 2699 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 2700 RXCHK_BRCM_TAG_MATCH_SHIFT); 2701 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { 2702 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i); 2703 i++; 2704 } 2705 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN; 2706 rxchk_writel(priv, reg, RXCHK_CONTROL); 2707 } 2708 2709 /* Make sure RBUF entered WoL mode as result */ 2710 do { 2711 reg = rbuf_readl(priv, RBUF_STATUS); 2712 if (reg & RBUF_WOL_MODE) 2713 break; 2714 2715 udelay(10); 2716 } while (timeout-- > 0); 2717 2718 /* Do not leave the UniMAC RBUF matching only MPD packets */ 2719 if (!timeout) { 2720 mpd_enable_set(priv, false); 2721 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 2722 return -ETIMEDOUT; 2723 } 2724 2725 /* UniMAC receive needs to be turned on */ 2726 umac_enable_set(priv, CMD_RX_EN, 1); 2727 2728 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2729 2730 return 0; 2731} 2732 2733static int __maybe_unused bcm_sysport_suspend(struct device *d) 2734{ 2735 struct net_device *dev = dev_get_drvdata(d); 2736 struct bcm_sysport_priv *priv = netdev_priv(dev); 2737 unsigned int i; 2738 int ret = 0; 2739 u32 reg; 2740 2741 if (!netif_running(dev)) 2742 return 0; 2743 2744 netif_device_detach(dev); 2745 2746 bcm_sysport_netif_stop(dev); 2747 2748 phy_suspend(dev->phydev); 2749 2750 /* Disable UniMAC RX */ 2751 umac_enable_set(priv, CMD_RX_EN, 0); 2752 2753 ret = rdma_enable_set(priv, 0); 2754 if (ret) { 2755 netdev_err(dev, "RDMA timeout!\n"); 2756 return ret; 2757 } 2758 2759 /* Disable RXCHK if enabled */ 2760 if (priv->rx_chk_en) { 2761 reg = rxchk_readl(priv, RXCHK_CONTROL); 2762 reg &= ~RXCHK_EN; 2763 rxchk_writel(priv, reg, RXCHK_CONTROL); 2764 } 2765 2766 /* Flush RX pipe */ 2767 if (!priv->wolopts) 2768 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 2769 2770 ret = tdma_enable_set(priv, 0); 2771 if (ret) { 2772 netdev_err(dev, "TDMA timeout!\n"); 2773 return ret; 2774 } 2775 2776 /* Wait for a packet boundary */ 2777 usleep_range(2000, 3000); 2778 2779 umac_enable_set(priv, CMD_TX_EN, 0); 2780 2781 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 2782 2783 /* Free RX/TX rings SW structures */ 2784 for (i = 0; i < dev->num_tx_queues; i++) 2785 bcm_sysport_fini_tx_ring(priv, i); 2786 bcm_sysport_fini_rx_ring(priv); 2787 2788 /* Get prepared for Wake-on-LAN */ 2789 if (device_may_wakeup(d) && priv->wolopts) { 2790 clk_prepare_enable(priv->wol_clk); 2791 ret = bcm_sysport_suspend_to_wol(priv); 2792 } 2793 2794 clk_disable_unprepare(priv->clk); 2795 2796 return ret; 2797} 2798 2799static int __maybe_unused bcm_sysport_resume(struct device *d) 2800{ 2801 struct net_device *dev = dev_get_drvdata(d); 2802 struct bcm_sysport_priv *priv = netdev_priv(dev); 2803 unsigned int i; 2804 int ret; 2805 2806 if (!netif_running(dev)) 2807 return 0; 2808 2809 clk_prepare_enable(priv->clk); 2810 if (priv->wolopts) 2811 clk_disable_unprepare(priv->wol_clk); 2812 2813 umac_reset(priv); 2814 2815 /* Disable the UniMAC RX/TX */ 2816 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 2817 2818 /* We may have been suspended and never received a WOL event that 2819 * would turn off MPD detection, take care of that now 2820 */ 2821 bcm_sysport_resume_from_wol(priv); 2822 2823 /* Initialize both hardware and software ring */ 2824 for (i = 0; i < dev->num_tx_queues; i++) { 2825 ret = bcm_sysport_init_tx_ring(priv, i); 2826 if (ret) { 2827 netdev_err(dev, "failed to initialize TX ring %d\n", 2828 i); 2829 goto out_free_tx_rings; 2830 } 2831 } 2832 2833 /* Initialize linked-list */ 2834 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 2835 2836 /* Initialize RX ring */ 2837 ret = bcm_sysport_init_rx_ring(priv); 2838 if (ret) { 2839 netdev_err(dev, "failed to initialize RX ring\n"); 2840 goto out_free_rx_ring; 2841 } 2842 2843 /* RX pipe enable */ 2844 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 2845 2846 ret = rdma_enable_set(priv, 1); 2847 if (ret) { 2848 netdev_err(dev, "failed to enable RDMA\n"); 2849 goto out_free_rx_ring; 2850 } 2851 2852 /* Restore enabled features */ 2853 bcm_sysport_set_features(dev, dev->features); 2854 2855 rbuf_init(priv); 2856 2857 /* Set maximum frame length */ 2858 if (!priv->is_lite) 2859 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 2860 else 2861 gib_set_pad_extension(priv); 2862 2863 /* Set MAC address */ 2864 umac_set_hw_addr(priv, dev->dev_addr); 2865 2866 umac_enable_set(priv, CMD_RX_EN, 1); 2867 2868 /* TX pipe enable */ 2869 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 2870 2871 umac_enable_set(priv, CMD_TX_EN, 1); 2872 2873 ret = tdma_enable_set(priv, 1); 2874 if (ret) { 2875 netdev_err(dev, "TDMA timeout!\n"); 2876 goto out_free_rx_ring; 2877 } 2878 2879 phy_resume(dev->phydev); 2880 2881 bcm_sysport_netif_start(dev); 2882 2883 netif_device_attach(dev); 2884 2885 return 0; 2886 2887out_free_rx_ring: 2888 bcm_sysport_fini_rx_ring(priv); 2889out_free_tx_rings: 2890 for (i = 0; i < dev->num_tx_queues; i++) 2891 bcm_sysport_fini_tx_ring(priv, i); 2892 clk_disable_unprepare(priv->clk); 2893 return ret; 2894} 2895 2896static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 2897 bcm_sysport_suspend, bcm_sysport_resume); 2898 2899static struct platform_driver bcm_sysport_driver = { 2900 .probe = bcm_sysport_probe, 2901 .remove = bcm_sysport_remove, 2902 .driver = { 2903 .name = "brcm-systemport", 2904 .of_match_table = bcm_sysport_of_match, 2905 .pm = &bcm_sysport_pm_ops, 2906 }, 2907}; 2908module_platform_driver(bcm_sysport_driver); 2909 2910MODULE_AUTHOR("Broadcom Corporation"); 2911MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2912MODULE_ALIAS("platform:brcm-systemport"); 2913MODULE_LICENSE("GPL"); 2914