1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Lantiq / Intel GSWIP switch driver for VRX200 SoCs 4 * 5 * Copyright (C) 2010 Lantiq Deutschland 6 * Copyright (C) 2012 John Crispin <john@phrozen.org> 7 * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> 8 * 9 * The VLAN and bridge model the GSWIP hardware uses does not directly 10 * matches the model DSA uses. 11 * 12 * The hardware has 64 possible table entries for bridges with one VLAN 13 * ID, one flow id and a list of ports for each bridge. All entries which 14 * match the same flow ID are combined in the mac learning table, they 15 * act as one global bridge. 16 * The hardware does not support VLAN filter on the port, but on the 17 * bridge, this driver converts the DSA model to the hardware. 18 * 19 * The CPU gets all the exception frames which do not match any forwarding 20 * rule and the CPU port is also added to all bridges. This makes it possible 21 * to handle all the special cases easily in software. 22 * At the initialization the driver allocates one bridge table entry for 23 * each switch port which is used when the port is used without an 24 * explicit bridge. This prevents the frames from being forwarded 25 * between all LAN ports by default. 26 */ 27 28#include <linux/clk.h> 29#include <linux/delay.h> 30#include <linux/etherdevice.h> 31#include <linux/firmware.h> 32#include <linux/if_bridge.h> 33#include <linux/if_vlan.h> 34#include <linux/iopoll.h> 35#include <linux/mfd/syscon.h> 36#include <linux/module.h> 37#include <linux/of_mdio.h> 38#include <linux/of_net.h> 39#include <linux/of_platform.h> 40#include <linux/phy.h> 41#include <linux/phylink.h> 42#include <linux/platform_device.h> 43#include <linux/regmap.h> 44#include <linux/reset.h> 45#include <net/dsa.h> 46#include <dt-bindings/mips/lantiq_rcu_gphy.h> 47 48#include "lantiq_pce.h" 49 50/* GSWIP MDIO Registers */ 51#define GSWIP_MDIO_GLOB 0x00 52#define GSWIP_MDIO_GLOB_ENABLE BIT(15) 53#define GSWIP_MDIO_CTRL 0x08 54#define GSWIP_MDIO_CTRL_BUSY BIT(12) 55#define GSWIP_MDIO_CTRL_RD BIT(11) 56#define GSWIP_MDIO_CTRL_WR BIT(10) 57#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f 58#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5 59#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f 60#define GSWIP_MDIO_READ 0x09 61#define GSWIP_MDIO_WRITE 0x0A 62#define GSWIP_MDIO_MDC_CFG0 0x0B 63#define GSWIP_MDIO_MDC_CFG1 0x0C 64#define GSWIP_MDIO_PHYp(p) (0x15 - (p)) 65#define GSWIP_MDIO_PHY_LINK_MASK 0x6000 66#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000 67#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000 68#define GSWIP_MDIO_PHY_LINK_UP 0x2000 69#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800 70#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800 71#define GSWIP_MDIO_PHY_SPEED_M10 0x0000 72#define GSWIP_MDIO_PHY_SPEED_M100 0x0800 73#define GSWIP_MDIO_PHY_SPEED_G1 0x1000 74#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600 75#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000 76#define GSWIP_MDIO_PHY_FDUP_EN 0x0200 77#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600 78#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180 79#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000 80#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100 81#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180 82#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060 83#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000 84#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020 85#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060 86#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f 87#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \ 88 GSWIP_MDIO_PHY_FCONRX_MASK | \ 89 GSWIP_MDIO_PHY_FCONTX_MASK | \ 90 GSWIP_MDIO_PHY_LINK_MASK | \ 91 GSWIP_MDIO_PHY_SPEED_MASK | \ 92 GSWIP_MDIO_PHY_FDUP_MASK) 93 94/* GSWIP MII Registers */ 95#define GSWIP_MII_CFGp(p) (0x2 * (p)) 96#define GSWIP_MII_CFG_RESET BIT(15) 97#define GSWIP_MII_CFG_EN BIT(14) 98#define GSWIP_MII_CFG_ISOLATE BIT(13) 99#define GSWIP_MII_CFG_LDCLKDIS BIT(12) 100#define GSWIP_MII_CFG_RGMII_IBS BIT(8) 101#define GSWIP_MII_CFG_RMII_CLK BIT(7) 102#define GSWIP_MII_CFG_MODE_MIIP 0x0 103#define GSWIP_MII_CFG_MODE_MIIM 0x1 104#define GSWIP_MII_CFG_MODE_RMIIP 0x2 105#define GSWIP_MII_CFG_MODE_RMIIM 0x3 106#define GSWIP_MII_CFG_MODE_RGMII 0x4 107#define GSWIP_MII_CFG_MODE_MASK 0xf 108#define GSWIP_MII_CFG_RATE_M2P5 0x00 109#define GSWIP_MII_CFG_RATE_M25 0x10 110#define GSWIP_MII_CFG_RATE_M125 0x20 111#define GSWIP_MII_CFG_RATE_M50 0x30 112#define GSWIP_MII_CFG_RATE_AUTO 0x40 113#define GSWIP_MII_CFG_RATE_MASK 0x70 114#define GSWIP_MII_PCDU0 0x01 115#define GSWIP_MII_PCDU1 0x03 116#define GSWIP_MII_PCDU5 0x05 117#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0) 118#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7) 119 120/* GSWIP Core Registers */ 121#define GSWIP_SWRES 0x000 122#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */ 123#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */ 124#define GSWIP_VERSION 0x013 125#define GSWIP_VERSION_REV_SHIFT 0 126#define GSWIP_VERSION_REV_MASK GENMASK(7, 0) 127#define GSWIP_VERSION_MOD_SHIFT 8 128#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8) 129#define GSWIP_VERSION_2_0 0x100 130#define GSWIP_VERSION_2_1 0x021 131#define GSWIP_VERSION_2_2 0x122 132#define GSWIP_VERSION_2_2_ETC 0x022 133 134#define GSWIP_BM_RAM_VAL(x) (0x043 - (x)) 135#define GSWIP_BM_RAM_ADDR 0x044 136#define GSWIP_BM_RAM_CTRL 0x045 137#define GSWIP_BM_RAM_CTRL_BAS BIT(15) 138#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5) 139#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0) 140#define GSWIP_BM_QUEUE_GCTRL 0x04A 141#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10) 142/* buffer management Port Configuration Register */ 143#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2)) 144#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */ 145#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */ 146/* buffer management Port Control Register */ 147#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2)) 148#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */ 149#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */ 150 151/* PCE */ 152#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x)) 153#define GSWIP_PCE_TBL_MASK 0x448 154#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x)) 155#define GSWIP_PCE_TBL_ADDR 0x44E 156#define GSWIP_PCE_TBL_CTRL 0x44F 157#define GSWIP_PCE_TBL_CTRL_BAS BIT(15) 158#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13) 159#define GSWIP_PCE_TBL_CTRL_VLD BIT(12) 160#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11) 161#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7) 162#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5) 163#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00 164#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20 165#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40 166#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60 167#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0) 168#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */ 169#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */ 170#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */ 171#define GSWIP_PCE_GCTRL_0 0x456 172#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */ 173#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3) 174#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */ 175#define GSWIP_PCE_GCTRL_1 0x457 176#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */ 177#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */ 178#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA)) 179#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */ 180#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */ 181#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */ 182#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0 183#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1 184#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2 185#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 186#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 187#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) 188#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA)) 189#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */ 190#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */ 191#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */ 192#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */ 193#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */ 194#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA)) 195 196#define GSWIP_MAC_FLEN 0x8C5 197#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC)) 198#define GSWIP_MAC_CTRL_0_PADEN BIT(8) 199#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7) 200#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070 201#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000 202#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010 203#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020 204#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030 205#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040 206#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C 207#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000 208#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004 209#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C 210#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003 211#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000 212#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001 213#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002 214#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) 215#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ 216 217/* Ethernet Switch Fetch DMA Port Control Register */ 218#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6)) 219#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */ 220#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */ 221#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */ 222#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */ 223#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 224#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 225#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 226#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) 227 228/* Ethernet Switch Store DMA Port Control Register */ 229#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6)) 230#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */ 231#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */ 232#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */ 233 234#define GSWIP_TABLE_ACTIVE_VLAN 0x01 235#define GSWIP_TABLE_VLAN_MAPPING 0x02 236#define GSWIP_TABLE_MAC_BRIDGE 0x0b 237#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */ 238 239#define XRX200_GPHY_FW_ALIGN (16 * 1024) 240 241struct gswip_hw_info { 242 int max_ports; 243 int cpu_port; 244}; 245 246struct xway_gphy_match_data { 247 char *fe_firmware_name; 248 char *ge_firmware_name; 249}; 250 251struct gswip_gphy_fw { 252 struct clk *clk_gate; 253 struct reset_control *reset; 254 u32 fw_addr_offset; 255 char *fw_name; 256}; 257 258struct gswip_vlan { 259 struct net_device *bridge; 260 u16 vid; 261 u8 fid; 262}; 263 264struct gswip_priv { 265 __iomem void *gswip; 266 __iomem void *mdio; 267 __iomem void *mii; 268 const struct gswip_hw_info *hw_info; 269 const struct xway_gphy_match_data *gphy_fw_name_cfg; 270 struct dsa_switch *ds; 271 struct device *dev; 272 struct regmap *rcu_regmap; 273 struct gswip_vlan vlans[64]; 274 int num_gphy_fw; 275 struct gswip_gphy_fw *gphy_fw; 276 u32 port_vlan_filter; 277}; 278 279struct gswip_pce_table_entry { 280 u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index 281 u16 table; // PCE_TBL_CTRL.ADDR = pData->table 282 u16 key[8]; 283 u16 val[5]; 284 u16 mask; 285 u8 gmap; 286 bool type; 287 bool valid; 288 bool key_mode; 289}; 290 291struct gswip_rmon_cnt_desc { 292 unsigned int size; 293 unsigned int offset; 294 const char *name; 295}; 296 297#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} 298 299static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { 300 /** Receive Packet Count (only packets that are accepted and not discarded). */ 301 MIB_DESC(1, 0x1F, "RxGoodPkts"), 302 MIB_DESC(1, 0x23, "RxUnicastPkts"), 303 MIB_DESC(1, 0x22, "RxMulticastPkts"), 304 MIB_DESC(1, 0x21, "RxFCSErrorPkts"), 305 MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), 306 MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), 307 MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), 308 MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), 309 MIB_DESC(1, 0x20, "RxGoodPausePkts"), 310 MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), 311 MIB_DESC(1, 0x12, "Rx64BytePkts"), 312 MIB_DESC(1, 0x13, "Rx127BytePkts"), 313 MIB_DESC(1, 0x14, "Rx255BytePkts"), 314 MIB_DESC(1, 0x15, "Rx511BytePkts"), 315 MIB_DESC(1, 0x16, "Rx1023BytePkts"), 316 /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ 317 MIB_DESC(1, 0x17, "RxMaxBytePkts"), 318 MIB_DESC(1, 0x18, "RxDroppedPkts"), 319 MIB_DESC(1, 0x19, "RxFilteredPkts"), 320 MIB_DESC(2, 0x24, "RxGoodBytes"), 321 MIB_DESC(2, 0x26, "RxBadBytes"), 322 MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), 323 MIB_DESC(1, 0x0C, "TxGoodPkts"), 324 MIB_DESC(1, 0x06, "TxUnicastPkts"), 325 MIB_DESC(1, 0x07, "TxMulticastPkts"), 326 MIB_DESC(1, 0x00, "Tx64BytePkts"), 327 MIB_DESC(1, 0x01, "Tx127BytePkts"), 328 MIB_DESC(1, 0x02, "Tx255BytePkts"), 329 MIB_DESC(1, 0x03, "Tx511BytePkts"), 330 MIB_DESC(1, 0x04, "Tx1023BytePkts"), 331 /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ 332 MIB_DESC(1, 0x05, "TxMaxBytePkts"), 333 MIB_DESC(1, 0x08, "TxSingleCollCount"), 334 MIB_DESC(1, 0x09, "TxMultCollCount"), 335 MIB_DESC(1, 0x0A, "TxLateCollCount"), 336 MIB_DESC(1, 0x0B, "TxExcessCollCount"), 337 MIB_DESC(1, 0x0D, "TxPauseCount"), 338 MIB_DESC(1, 0x10, "TxDroppedPkts"), 339 MIB_DESC(2, 0x0E, "TxGoodBytes"), 340}; 341 342static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset) 343{ 344 return __raw_readl(priv->gswip + (offset * 4)); 345} 346 347static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset) 348{ 349 __raw_writel(val, priv->gswip + (offset * 4)); 350} 351 352static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set, 353 u32 offset) 354{ 355 u32 val = gswip_switch_r(priv, offset); 356 357 val &= ~(clear); 358 val |= set; 359 gswip_switch_w(priv, val, offset); 360} 361 362static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, 363 u32 cleared) 364{ 365 u32 val; 366 367 return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val, 368 (val & cleared) == 0, 20, 50000); 369} 370 371static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset) 372{ 373 return __raw_readl(priv->mdio + (offset * 4)); 374} 375 376static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset) 377{ 378 __raw_writel(val, priv->mdio + (offset * 4)); 379} 380 381static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set, 382 u32 offset) 383{ 384 u32 val = gswip_mdio_r(priv, offset); 385 386 val &= ~(clear); 387 val |= set; 388 gswip_mdio_w(priv, val, offset); 389} 390 391static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset) 392{ 393 return __raw_readl(priv->mii + (offset * 4)); 394} 395 396static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset) 397{ 398 __raw_writel(val, priv->mii + (offset * 4)); 399} 400 401static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, 402 u32 offset) 403{ 404 u32 val = gswip_mii_r(priv, offset); 405 406 val &= ~(clear); 407 val |= set; 408 gswip_mii_w(priv, val, offset); 409} 410 411static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, 412 int port) 413{ 414 /* There's no MII_CFG register for the CPU port */ 415 if (!dsa_is_cpu_port(priv->ds, port)) 416 gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port)); 417} 418 419static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, 420 int port) 421{ 422 switch (port) { 423 case 0: 424 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0); 425 break; 426 case 1: 427 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1); 428 break; 429 case 5: 430 gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5); 431 break; 432 } 433} 434 435static int gswip_mdio_poll(struct gswip_priv *priv) 436{ 437 int cnt = 100; 438 439 while (likely(cnt--)) { 440 u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL); 441 442 if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0) 443 return 0; 444 usleep_range(20, 40); 445 } 446 447 return -ETIMEDOUT; 448} 449 450static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) 451{ 452 struct gswip_priv *priv = bus->priv; 453 int err; 454 455 err = gswip_mdio_poll(priv); 456 if (err) { 457 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 458 return err; 459 } 460 461 gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE); 462 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | 463 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | 464 (reg & GSWIP_MDIO_CTRL_REGAD_MASK), 465 GSWIP_MDIO_CTRL); 466 467 return 0; 468} 469 470static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) 471{ 472 struct gswip_priv *priv = bus->priv; 473 int err; 474 475 err = gswip_mdio_poll(priv); 476 if (err) { 477 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 478 return err; 479 } 480 481 gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | 482 ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | 483 (reg & GSWIP_MDIO_CTRL_REGAD_MASK), 484 GSWIP_MDIO_CTRL); 485 486 err = gswip_mdio_poll(priv); 487 if (err) { 488 dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); 489 return err; 490 } 491 492 return gswip_mdio_r(priv, GSWIP_MDIO_READ); 493} 494 495static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) 496{ 497 struct dsa_switch *ds = priv->ds; 498 499 ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev); 500 if (!ds->slave_mii_bus) 501 return -ENOMEM; 502 503 ds->slave_mii_bus->priv = priv; 504 ds->slave_mii_bus->read = gswip_mdio_rd; 505 ds->slave_mii_bus->write = gswip_mdio_wr; 506 ds->slave_mii_bus->name = "lantiq,xrx200-mdio"; 507 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", 508 dev_name(priv->dev)); 509 ds->slave_mii_bus->parent = priv->dev; 510 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; 511 512 return of_mdiobus_register(ds->slave_mii_bus, mdio_np); 513} 514 515static int gswip_pce_table_entry_read(struct gswip_priv *priv, 516 struct gswip_pce_table_entry *tbl) 517{ 518 int i; 519 int err; 520 u16 crtl; 521 u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : 522 GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; 523 524 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 525 GSWIP_PCE_TBL_CTRL_BAS); 526 if (err) 527 return err; 528 529 gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); 530 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 531 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 532 tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS, 533 GSWIP_PCE_TBL_CTRL); 534 535 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 536 GSWIP_PCE_TBL_CTRL_BAS); 537 if (err) 538 return err; 539 540 for (i = 0; i < ARRAY_SIZE(tbl->key); i++) 541 tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); 542 543 for (i = 0; i < ARRAY_SIZE(tbl->val); i++) 544 tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i)); 545 546 tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK); 547 548 crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); 549 550 tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); 551 tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); 552 tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; 553 554 return 0; 555} 556 557static int gswip_pce_table_entry_write(struct gswip_priv *priv, 558 struct gswip_pce_table_entry *tbl) 559{ 560 int i; 561 int err; 562 u16 crtl; 563 u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : 564 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; 565 566 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 567 GSWIP_PCE_TBL_CTRL_BAS); 568 if (err) 569 return err; 570 571 gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); 572 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 573 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 574 tbl->table | addr_mode, 575 GSWIP_PCE_TBL_CTRL); 576 577 for (i = 0; i < ARRAY_SIZE(tbl->key); i++) 578 gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i)); 579 580 for (i = 0; i < ARRAY_SIZE(tbl->val); i++) 581 gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i)); 582 583 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 584 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 585 tbl->table | addr_mode, 586 GSWIP_PCE_TBL_CTRL); 587 588 gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK); 589 590 crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); 591 crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD | 592 GSWIP_PCE_TBL_CTRL_GMAP_MASK); 593 if (tbl->type) 594 crtl |= GSWIP_PCE_TBL_CTRL_TYPE; 595 if (tbl->valid) 596 crtl |= GSWIP_PCE_TBL_CTRL_VLD; 597 crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; 598 crtl |= GSWIP_PCE_TBL_CTRL_BAS; 599 gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); 600 601 return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 602 GSWIP_PCE_TBL_CTRL_BAS); 603} 604 605/* Add the LAN port into a bridge with the CPU port by 606 * default. This prevents automatic forwarding of 607 * packages between the LAN ports when no explicit 608 * bridge is configured. 609 */ 610static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) 611{ 612 struct gswip_pce_table_entry vlan_active = {0,}; 613 struct gswip_pce_table_entry vlan_mapping = {0,}; 614 unsigned int cpu_port = priv->hw_info->cpu_port; 615 unsigned int max_ports = priv->hw_info->max_ports; 616 int err; 617 618 if (port >= max_ports) { 619 dev_err(priv->dev, "single port for %i supported\n", port); 620 return -EIO; 621 } 622 623 vlan_active.index = port + 1; 624 vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; 625 vlan_active.key[0] = 0; /* vid */ 626 vlan_active.val[0] = port + 1 /* fid */; 627 vlan_active.valid = add; 628 err = gswip_pce_table_entry_write(priv, &vlan_active); 629 if (err) { 630 dev_err(priv->dev, "failed to write active VLAN: %d\n", err); 631 return err; 632 } 633 634 if (!add) 635 return 0; 636 637 vlan_mapping.index = port + 1; 638 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 639 vlan_mapping.val[0] = 0 /* vid */; 640 vlan_mapping.val[1] = BIT(port) | BIT(cpu_port); 641 vlan_mapping.val[2] = 0; 642 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 643 if (err) { 644 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 645 return err; 646 } 647 648 return 0; 649} 650 651static int gswip_port_enable(struct dsa_switch *ds, int port, 652 struct phy_device *phydev) 653{ 654 struct gswip_priv *priv = ds->priv; 655 int err; 656 657 if (!dsa_is_user_port(ds, port)) 658 return 0; 659 660 if (!dsa_is_cpu_port(ds, port)) { 661 err = gswip_add_single_port_br(priv, port, true); 662 if (err) 663 return err; 664 } 665 666 /* RMON Counter Enable for port */ 667 gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); 668 669 /* enable port fetch/store dma & VLAN Modification */ 670 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN | 671 GSWIP_FDMA_PCTRL_VLANMOD_BOTH, 672 GSWIP_FDMA_PCTRLp(port)); 673 gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, 674 GSWIP_SDMA_PCTRLp(port)); 675 676 if (!dsa_is_cpu_port(ds, port)) { 677 u32 mdio_phy = 0; 678 679 if (phydev) 680 mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; 681 682 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, 683 GSWIP_MDIO_PHYp(port)); 684 } 685 686 return 0; 687} 688 689static void gswip_port_disable(struct dsa_switch *ds, int port) 690{ 691 struct gswip_priv *priv = ds->priv; 692 693 if (!dsa_is_user_port(ds, port)) 694 return; 695 696 gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, 697 GSWIP_FDMA_PCTRLp(port)); 698 gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, 699 GSWIP_SDMA_PCTRLp(port)); 700} 701 702static int gswip_pce_load_microcode(struct gswip_priv *priv) 703{ 704 int i; 705 int err; 706 707 gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | 708 GSWIP_PCE_TBL_CTRL_OPMOD_MASK, 709 GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL); 710 gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK); 711 712 for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) { 713 gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR); 714 gswip_switch_w(priv, gswip_pce_microcode[i].val_0, 715 GSWIP_PCE_TBL_VAL(0)); 716 gswip_switch_w(priv, gswip_pce_microcode[i].val_1, 717 GSWIP_PCE_TBL_VAL(1)); 718 gswip_switch_w(priv, gswip_pce_microcode[i].val_2, 719 GSWIP_PCE_TBL_VAL(2)); 720 gswip_switch_w(priv, gswip_pce_microcode[i].val_3, 721 GSWIP_PCE_TBL_VAL(3)); 722 723 /* start the table access: */ 724 gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS, 725 GSWIP_PCE_TBL_CTRL); 726 err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, 727 GSWIP_PCE_TBL_CTRL_BAS); 728 if (err) 729 return err; 730 } 731 732 /* tell the switch that the microcode is loaded */ 733 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID, 734 GSWIP_PCE_GCTRL_0); 735 736 return 0; 737} 738 739static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, 740 bool vlan_filtering, 741 struct switchdev_trans *trans) 742{ 743 struct gswip_priv *priv = ds->priv; 744 745 /* Do not allow changing the VLAN filtering options while in bridge */ 746 if (switchdev_trans_ph_prepare(trans)) { 747 struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; 748 749 if (!bridge) 750 return 0; 751 752 if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) 753 return -EIO; 754 755 return 0; 756 } 757 758 if (vlan_filtering) { 759 /* Use port based VLAN tag */ 760 gswip_switch_mask(priv, 761 GSWIP_PCE_VCTRL_VSR, 762 GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | 763 GSWIP_PCE_VCTRL_VEMR, 764 GSWIP_PCE_VCTRL(port)); 765 gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0, 766 GSWIP_PCE_PCTRL_0p(port)); 767 } else { 768 /* Use port based VLAN tag */ 769 gswip_switch_mask(priv, 770 GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | 771 GSWIP_PCE_VCTRL_VEMR, 772 GSWIP_PCE_VCTRL_VSR, 773 GSWIP_PCE_VCTRL(port)); 774 gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM, 775 GSWIP_PCE_PCTRL_0p(port)); 776 } 777 778 return 0; 779} 780 781static int gswip_setup(struct dsa_switch *ds) 782{ 783 struct gswip_priv *priv = ds->priv; 784 unsigned int cpu_port = priv->hw_info->cpu_port; 785 int i; 786 int err; 787 788 gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES); 789 usleep_range(5000, 10000); 790 gswip_switch_w(priv, 0, GSWIP_SWRES); 791 792 /* disable port fetch/store dma on all ports */ 793 for (i = 0; i < priv->hw_info->max_ports; i++) { 794 struct switchdev_trans trans; 795 796 /* Skip the prepare phase, this shouldn't return an error 797 * during setup. 798 */ 799 trans.ph_prepare = false; 800 801 gswip_port_disable(ds, i); 802 gswip_port_vlan_filtering(ds, i, false, &trans); 803 } 804 805 /* enable Switch */ 806 gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); 807 808 err = gswip_pce_load_microcode(priv); 809 if (err) { 810 dev_err(priv->dev, "writing PCE microcode failed, %i", err); 811 return err; 812 } 813 814 /* Default unknown Broadcast/Multicast/Unicast port maps */ 815 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1); 816 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); 817 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3); 818 819 /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an 820 * interoperability problem with this auto polling mechanism because 821 * their status registers think that the link is in a different state 822 * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set 823 * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the 824 * auto polling state machine consider the link being negotiated with 825 * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads 826 * to the switch port being completely dead (RX and TX are both not 827 * working). 828 * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F 829 * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes 830 * it would work fine for a few minutes to hours and then stop, on 831 * other device it would no traffic could be sent or received at all. 832 * Testing shows that when PHY auto polling is disabled these problems 833 * go away. 834 */ 835 gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); 836 837 /* Configure the MDIO Clock 2.5 MHz */ 838 gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); 839 840 /* Disable the xMII interface and clear it's isolation bit */ 841 for (i = 0; i < priv->hw_info->max_ports; i++) 842 gswip_mii_mask_cfg(priv, 843 GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, 844 0, i); 845 846 /* enable special tag insertion on cpu port */ 847 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, 848 GSWIP_FDMA_PCTRLp(cpu_port)); 849 850 /* accept special tag in ingress direction */ 851 gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, 852 GSWIP_PCE_PCTRL_0p(cpu_port)); 853 854 gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, 855 GSWIP_MAC_CTRL_2p(cpu_port)); 856 gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN, 857 GSWIP_MAC_FLEN); 858 gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, 859 GSWIP_BM_QUEUE_GCTRL); 860 861 /* VLAN aware Switching */ 862 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); 863 864 /* Flush MAC Table */ 865 gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0); 866 867 err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0, 868 GSWIP_PCE_GCTRL_0_MTFL); 869 if (err) { 870 dev_err(priv->dev, "MAC flushing didn't finish\n"); 871 return err; 872 } 873 874 gswip_port_enable(ds, cpu_port, NULL); 875 return 0; 876} 877 878static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, 879 int port, 880 enum dsa_tag_protocol mp) 881{ 882 return DSA_TAG_PROTO_GSWIP; 883} 884 885static int gswip_vlan_active_create(struct gswip_priv *priv, 886 struct net_device *bridge, 887 int fid, u16 vid) 888{ 889 struct gswip_pce_table_entry vlan_active = {0,}; 890 unsigned int max_ports = priv->hw_info->max_ports; 891 int idx = -1; 892 int err; 893 int i; 894 895 /* Look for a free slot */ 896 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 897 if (!priv->vlans[i].bridge) { 898 idx = i; 899 break; 900 } 901 } 902 903 if (idx == -1) 904 return -ENOSPC; 905 906 if (fid == -1) 907 fid = idx; 908 909 vlan_active.index = idx; 910 vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; 911 vlan_active.key[0] = vid; 912 vlan_active.val[0] = fid; 913 vlan_active.valid = true; 914 915 err = gswip_pce_table_entry_write(priv, &vlan_active); 916 if (err) { 917 dev_err(priv->dev, "failed to write active VLAN: %d\n", err); 918 return err; 919 } 920 921 priv->vlans[idx].bridge = bridge; 922 priv->vlans[idx].vid = vid; 923 priv->vlans[idx].fid = fid; 924 925 return idx; 926} 927 928static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx) 929{ 930 struct gswip_pce_table_entry vlan_active = {0,}; 931 int err; 932 933 vlan_active.index = idx; 934 vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; 935 vlan_active.valid = false; 936 err = gswip_pce_table_entry_write(priv, &vlan_active); 937 if (err) 938 dev_err(priv->dev, "failed to delete active VLAN: %d\n", err); 939 priv->vlans[idx].bridge = NULL; 940 941 return err; 942} 943 944static int gswip_vlan_add_unaware(struct gswip_priv *priv, 945 struct net_device *bridge, int port) 946{ 947 struct gswip_pce_table_entry vlan_mapping = {0,}; 948 unsigned int max_ports = priv->hw_info->max_ports; 949 unsigned int cpu_port = priv->hw_info->cpu_port; 950 bool active_vlan_created = false; 951 int idx = -1; 952 int i; 953 int err; 954 955 /* Check if there is already a page for this bridge */ 956 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 957 if (priv->vlans[i].bridge == bridge) { 958 idx = i; 959 break; 960 } 961 } 962 963 /* If this bridge is not programmed yet, add a Active VLAN table 964 * entry in a free slot and prepare the VLAN mapping table entry. 965 */ 966 if (idx == -1) { 967 idx = gswip_vlan_active_create(priv, bridge, -1, 0); 968 if (idx < 0) 969 return idx; 970 active_vlan_created = true; 971 972 vlan_mapping.index = idx; 973 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 974 /* VLAN ID byte, maps to the VLAN ID of vlan active table */ 975 vlan_mapping.val[0] = 0; 976 } else { 977 /* Read the existing VLAN mapping entry from the switch */ 978 vlan_mapping.index = idx; 979 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 980 err = gswip_pce_table_entry_read(priv, &vlan_mapping); 981 if (err) { 982 dev_err(priv->dev, "failed to read VLAN mapping: %d\n", 983 err); 984 return err; 985 } 986 } 987 988 /* Update the VLAN mapping entry and write it to the switch */ 989 vlan_mapping.val[1] |= BIT(cpu_port); 990 vlan_mapping.val[1] |= BIT(port); 991 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 992 if (err) { 993 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 994 /* In case an Active VLAN was creaetd delete it again */ 995 if (active_vlan_created) 996 gswip_vlan_active_remove(priv, idx); 997 return err; 998 } 999 1000 gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); 1001 return 0; 1002} 1003 1004static int gswip_vlan_add_aware(struct gswip_priv *priv, 1005 struct net_device *bridge, int port, 1006 u16 vid, bool untagged, 1007 bool pvid) 1008{ 1009 struct gswip_pce_table_entry vlan_mapping = {0,}; 1010 unsigned int max_ports = priv->hw_info->max_ports; 1011 unsigned int cpu_port = priv->hw_info->cpu_port; 1012 bool active_vlan_created = false; 1013 int idx = -1; 1014 int fid = -1; 1015 int i; 1016 int err; 1017 1018 /* Check if there is already a page for this bridge */ 1019 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 1020 if (priv->vlans[i].bridge == bridge) { 1021 if (fid != -1 && fid != priv->vlans[i].fid) 1022 dev_err(priv->dev, "one bridge with multiple flow ids\n"); 1023 fid = priv->vlans[i].fid; 1024 if (priv->vlans[i].vid == vid) { 1025 idx = i; 1026 break; 1027 } 1028 } 1029 } 1030 1031 /* If this bridge is not programmed yet, add a Active VLAN table 1032 * entry in a free slot and prepare the VLAN mapping table entry. 1033 */ 1034 if (idx == -1) { 1035 idx = gswip_vlan_active_create(priv, bridge, fid, vid); 1036 if (idx < 0) 1037 return idx; 1038 active_vlan_created = true; 1039 1040 vlan_mapping.index = idx; 1041 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 1042 /* VLAN ID byte, maps to the VLAN ID of vlan active table */ 1043 vlan_mapping.val[0] = vid; 1044 } else { 1045 /* Read the existing VLAN mapping entry from the switch */ 1046 vlan_mapping.index = idx; 1047 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 1048 err = gswip_pce_table_entry_read(priv, &vlan_mapping); 1049 if (err) { 1050 dev_err(priv->dev, "failed to read VLAN mapping: %d\n", 1051 err); 1052 return err; 1053 } 1054 } 1055 1056 vlan_mapping.val[0] = vid; 1057 /* Update the VLAN mapping entry and write it to the switch */ 1058 vlan_mapping.val[1] |= BIT(cpu_port); 1059 vlan_mapping.val[2] |= BIT(cpu_port); 1060 vlan_mapping.val[1] |= BIT(port); 1061 if (untagged) 1062 vlan_mapping.val[2] &= ~BIT(port); 1063 else 1064 vlan_mapping.val[2] |= BIT(port); 1065 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 1066 if (err) { 1067 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 1068 /* In case an Active VLAN was creaetd delete it again */ 1069 if (active_vlan_created) 1070 gswip_vlan_active_remove(priv, idx); 1071 return err; 1072 } 1073 1074 if (pvid) 1075 gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port)); 1076 1077 return 0; 1078} 1079 1080static int gswip_vlan_remove(struct gswip_priv *priv, 1081 struct net_device *bridge, int port, 1082 u16 vid, bool pvid, bool vlan_aware) 1083{ 1084 struct gswip_pce_table_entry vlan_mapping = {0,}; 1085 unsigned int max_ports = priv->hw_info->max_ports; 1086 unsigned int cpu_port = priv->hw_info->cpu_port; 1087 int idx = -1; 1088 int i; 1089 int err; 1090 1091 /* Check if there is already a page for this bridge */ 1092 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 1093 if (priv->vlans[i].bridge == bridge && 1094 (!vlan_aware || priv->vlans[i].vid == vid)) { 1095 idx = i; 1096 break; 1097 } 1098 } 1099 1100 if (idx == -1) { 1101 dev_err(priv->dev, "bridge to leave does not exists\n"); 1102 return -ENOENT; 1103 } 1104 1105 vlan_mapping.index = idx; 1106 vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; 1107 err = gswip_pce_table_entry_read(priv, &vlan_mapping); 1108 if (err) { 1109 dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err); 1110 return err; 1111 } 1112 1113 vlan_mapping.val[1] &= ~BIT(port); 1114 vlan_mapping.val[2] &= ~BIT(port); 1115 err = gswip_pce_table_entry_write(priv, &vlan_mapping); 1116 if (err) { 1117 dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); 1118 return err; 1119 } 1120 1121 /* In case all ports are removed from the bridge, remove the VLAN */ 1122 if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) { 1123 err = gswip_vlan_active_remove(priv, idx); 1124 if (err) { 1125 dev_err(priv->dev, "failed to write active VLAN: %d\n", 1126 err); 1127 return err; 1128 } 1129 } 1130 1131 /* GSWIP 2.2 (GRX300) and later program here the VID directly. */ 1132 if (pvid) 1133 gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); 1134 1135 return 0; 1136} 1137 1138static int gswip_port_bridge_join(struct dsa_switch *ds, int port, 1139 struct net_device *bridge) 1140{ 1141 struct gswip_priv *priv = ds->priv; 1142 int err; 1143 1144 /* When the bridge uses VLAN filtering we have to configure VLAN 1145 * specific bridges. No bridge is configured here. 1146 */ 1147 if (!br_vlan_enabled(bridge)) { 1148 err = gswip_vlan_add_unaware(priv, bridge, port); 1149 if (err) 1150 return err; 1151 priv->port_vlan_filter &= ~BIT(port); 1152 } else { 1153 priv->port_vlan_filter |= BIT(port); 1154 } 1155 return gswip_add_single_port_br(priv, port, false); 1156} 1157 1158static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, 1159 struct net_device *bridge) 1160{ 1161 struct gswip_priv *priv = ds->priv; 1162 1163 gswip_add_single_port_br(priv, port, true); 1164 1165 /* When the bridge uses VLAN filtering we have to configure VLAN 1166 * specific bridges. No bridge is configured here. 1167 */ 1168 if (!br_vlan_enabled(bridge)) 1169 gswip_vlan_remove(priv, bridge, port, 0, true, false); 1170} 1171 1172static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, 1173 const struct switchdev_obj_port_vlan *vlan) 1174{ 1175 struct gswip_priv *priv = ds->priv; 1176 struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; 1177 unsigned int max_ports = priv->hw_info->max_ports; 1178 u16 vid; 1179 int i; 1180 int pos = max_ports; 1181 1182 /* We only support VLAN filtering on bridges */ 1183 if (!dsa_is_cpu_port(ds, port) && !bridge) 1184 return -EOPNOTSUPP; 1185 1186 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1187 int idx = -1; 1188 1189 /* Check if there is already a page for this VLAN */ 1190 for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { 1191 if (priv->vlans[i].bridge == bridge && 1192 priv->vlans[i].vid == vid) { 1193 idx = i; 1194 break; 1195 } 1196 } 1197 1198 /* If this VLAN is not programmed yet, we have to reserve 1199 * one entry in the VLAN table. Make sure we start at the 1200 * next position round. 1201 */ 1202 if (idx == -1) { 1203 /* Look for a free slot */ 1204 for (; pos < ARRAY_SIZE(priv->vlans); pos++) { 1205 if (!priv->vlans[pos].bridge) { 1206 idx = pos; 1207 pos++; 1208 break; 1209 } 1210 } 1211 1212 if (idx == -1) 1213 return -ENOSPC; 1214 } 1215 } 1216 1217 return 0; 1218} 1219 1220static void gswip_port_vlan_add(struct dsa_switch *ds, int port, 1221 const struct switchdev_obj_port_vlan *vlan) 1222{ 1223 struct gswip_priv *priv = ds->priv; 1224 struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; 1225 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 1226 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1227 u16 vid; 1228 1229 /* We have to receive all packets on the CPU port and should not 1230 * do any VLAN filtering here. This is also called with bridge 1231 * NULL and then we do not know for which bridge to configure 1232 * this. 1233 */ 1234 if (dsa_is_cpu_port(ds, port)) 1235 return; 1236 1237 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) 1238 gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid); 1239} 1240 1241static int gswip_port_vlan_del(struct dsa_switch *ds, int port, 1242 const struct switchdev_obj_port_vlan *vlan) 1243{ 1244 struct gswip_priv *priv = ds->priv; 1245 struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; 1246 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 1247 u16 vid; 1248 int err; 1249 1250 /* We have to receive all packets on the CPU port and should not 1251 * do any VLAN filtering here. This is also called with bridge 1252 * NULL and then we do not know for which bridge to configure 1253 * this. 1254 */ 1255 if (dsa_is_cpu_port(ds, port)) 1256 return 0; 1257 1258 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { 1259 err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true); 1260 if (err) 1261 return err; 1262 } 1263 1264 return 0; 1265} 1266 1267static void gswip_port_fast_age(struct dsa_switch *ds, int port) 1268{ 1269 struct gswip_priv *priv = ds->priv; 1270 struct gswip_pce_table_entry mac_bridge = {0,}; 1271 int i; 1272 int err; 1273 1274 for (i = 0; i < 2048; i++) { 1275 mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; 1276 mac_bridge.index = i; 1277 1278 err = gswip_pce_table_entry_read(priv, &mac_bridge); 1279 if (err) { 1280 dev_err(priv->dev, "failed to read mac bridge: %d\n", 1281 err); 1282 return; 1283 } 1284 1285 if (!mac_bridge.valid) 1286 continue; 1287 1288 if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) 1289 continue; 1290 1291 if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port) 1292 continue; 1293 1294 mac_bridge.valid = false; 1295 err = gswip_pce_table_entry_write(priv, &mac_bridge); 1296 if (err) { 1297 dev_err(priv->dev, "failed to write mac bridge: %d\n", 1298 err); 1299 return; 1300 } 1301 } 1302} 1303 1304static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) 1305{ 1306 struct gswip_priv *priv = ds->priv; 1307 u32 stp_state; 1308 1309 switch (state) { 1310 case BR_STATE_DISABLED: 1311 gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, 1312 GSWIP_SDMA_PCTRLp(port)); 1313 return; 1314 case BR_STATE_BLOCKING: 1315 case BR_STATE_LISTENING: 1316 stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN; 1317 break; 1318 case BR_STATE_LEARNING: 1319 stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING; 1320 break; 1321 case BR_STATE_FORWARDING: 1322 stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING; 1323 break; 1324 default: 1325 dev_err(priv->dev, "invalid STP state: %d\n", state); 1326 return; 1327 } 1328 1329 gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, 1330 GSWIP_SDMA_PCTRLp(port)); 1331 gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state, 1332 GSWIP_PCE_PCTRL_0p(port)); 1333} 1334 1335static int gswip_port_fdb(struct dsa_switch *ds, int port, 1336 const unsigned char *addr, u16 vid, bool add) 1337{ 1338 struct gswip_priv *priv = ds->priv; 1339 struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev; 1340 struct gswip_pce_table_entry mac_bridge = {0,}; 1341 unsigned int cpu_port = priv->hw_info->cpu_port; 1342 int fid = -1; 1343 int i; 1344 int err; 1345 1346 if (!bridge) 1347 return -EINVAL; 1348 1349 for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) { 1350 if (priv->vlans[i].bridge == bridge) { 1351 fid = priv->vlans[i].fid; 1352 break; 1353 } 1354 } 1355 1356 if (fid == -1) { 1357 dev_err(priv->dev, "Port not part of a bridge\n"); 1358 return -EINVAL; 1359 } 1360 1361 mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; 1362 mac_bridge.key_mode = true; 1363 mac_bridge.key[0] = addr[5] | (addr[4] << 8); 1364 mac_bridge.key[1] = addr[3] | (addr[2] << 8); 1365 mac_bridge.key[2] = addr[1] | (addr[0] << 8); 1366 mac_bridge.key[3] = fid; 1367 mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ 1368 mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC; 1369 mac_bridge.valid = add; 1370 1371 err = gswip_pce_table_entry_write(priv, &mac_bridge); 1372 if (err) 1373 dev_err(priv->dev, "failed to write mac bridge: %d\n", err); 1374 1375 return err; 1376} 1377 1378static int gswip_port_fdb_add(struct dsa_switch *ds, int port, 1379 const unsigned char *addr, u16 vid) 1380{ 1381 return gswip_port_fdb(ds, port, addr, vid, true); 1382} 1383 1384static int gswip_port_fdb_del(struct dsa_switch *ds, int port, 1385 const unsigned char *addr, u16 vid) 1386{ 1387 return gswip_port_fdb(ds, port, addr, vid, false); 1388} 1389 1390static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, 1391 dsa_fdb_dump_cb_t *cb, void *data) 1392{ 1393 struct gswip_priv *priv = ds->priv; 1394 struct gswip_pce_table_entry mac_bridge = {0,}; 1395 unsigned char addr[6]; 1396 int i; 1397 int err; 1398 1399 for (i = 0; i < 2048; i++) { 1400 mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; 1401 mac_bridge.index = i; 1402 1403 err = gswip_pce_table_entry_read(priv, &mac_bridge); 1404 if (err) { 1405 dev_err(priv->dev, "failed to write mac bridge: %d\n", 1406 err); 1407 return err; 1408 } 1409 1410 if (!mac_bridge.valid) 1411 continue; 1412 1413 addr[5] = mac_bridge.key[0] & 0xff; 1414 addr[4] = (mac_bridge.key[0] >> 8) & 0xff; 1415 addr[3] = mac_bridge.key[1] & 0xff; 1416 addr[2] = (mac_bridge.key[1] >> 8) & 0xff; 1417 addr[1] = mac_bridge.key[2] & 0xff; 1418 addr[0] = (mac_bridge.key[2] >> 8) & 0xff; 1419 if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) { 1420 if (mac_bridge.val[0] & BIT(port)) { 1421 err = cb(addr, 0, true, data); 1422 if (err) 1423 return err; 1424 } 1425 } else { 1426 if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) { 1427 err = cb(addr, 0, false, data); 1428 if (err) 1429 return err; 1430 } 1431 } 1432 } 1433 return 0; 1434} 1435 1436static void gswip_phylink_validate(struct dsa_switch *ds, int port, 1437 unsigned long *supported, 1438 struct phylink_link_state *state) 1439{ 1440 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1441 1442 switch (port) { 1443 case 0: 1444 case 1: 1445 if (!phy_interface_mode_is_rgmii(state->interface) && 1446 state->interface != PHY_INTERFACE_MODE_MII && 1447 state->interface != PHY_INTERFACE_MODE_REVMII && 1448 state->interface != PHY_INTERFACE_MODE_RMII) 1449 goto unsupported; 1450 break; 1451 case 2: 1452 case 3: 1453 case 4: 1454 if (state->interface != PHY_INTERFACE_MODE_INTERNAL) 1455 goto unsupported; 1456 break; 1457 case 5: 1458 if (!phy_interface_mode_is_rgmii(state->interface) && 1459 state->interface != PHY_INTERFACE_MODE_INTERNAL) 1460 goto unsupported; 1461 break; 1462 default: 1463 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1464 dev_err(ds->dev, "Unsupported port: %i\n", port); 1465 return; 1466 } 1467 1468 /* Allow all the expected bits */ 1469 phylink_set(mask, Autoneg); 1470 phylink_set_port_modes(mask); 1471 phylink_set(mask, Pause); 1472 phylink_set(mask, Asym_Pause); 1473 1474 /* With the exclusion of MII, Reverse MII and Reduced MII, we 1475 * support Gigabit, including Half duplex 1476 */ 1477 if (state->interface != PHY_INTERFACE_MODE_MII && 1478 state->interface != PHY_INTERFACE_MODE_REVMII && 1479 state->interface != PHY_INTERFACE_MODE_RMII) { 1480 phylink_set(mask, 1000baseT_Full); 1481 phylink_set(mask, 1000baseT_Half); 1482 } 1483 1484 phylink_set(mask, 10baseT_Half); 1485 phylink_set(mask, 10baseT_Full); 1486 phylink_set(mask, 100baseT_Half); 1487 phylink_set(mask, 100baseT_Full); 1488 1489 bitmap_and(supported, supported, mask, 1490 __ETHTOOL_LINK_MODE_MASK_NBITS); 1491 bitmap_and(state->advertising, state->advertising, mask, 1492 __ETHTOOL_LINK_MODE_MASK_NBITS); 1493 return; 1494 1495unsupported: 1496 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1497 dev_err(ds->dev, "Unsupported interface '%s' for port %d\n", 1498 phy_modes(state->interface), port); 1499 return; 1500} 1501 1502static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) 1503{ 1504 u32 mdio_phy; 1505 1506 if (link) 1507 mdio_phy = GSWIP_MDIO_PHY_LINK_UP; 1508 else 1509 mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; 1510 1511 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy, 1512 GSWIP_MDIO_PHYp(port)); 1513} 1514 1515static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, 1516 phy_interface_t interface) 1517{ 1518 u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; 1519 1520 switch (speed) { 1521 case SPEED_10: 1522 mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; 1523 1524 if (interface == PHY_INTERFACE_MODE_RMII) 1525 mii_cfg = GSWIP_MII_CFG_RATE_M50; 1526 else 1527 mii_cfg = GSWIP_MII_CFG_RATE_M2P5; 1528 1529 mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; 1530 break; 1531 1532 case SPEED_100: 1533 mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; 1534 1535 if (interface == PHY_INTERFACE_MODE_RMII) 1536 mii_cfg = GSWIP_MII_CFG_RATE_M50; 1537 else 1538 mii_cfg = GSWIP_MII_CFG_RATE_M25; 1539 1540 mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; 1541 break; 1542 1543 case SPEED_1000: 1544 mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; 1545 1546 mii_cfg = GSWIP_MII_CFG_RATE_M125; 1547 1548 mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; 1549 break; 1550 } 1551 1552 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy, 1553 GSWIP_MDIO_PHYp(port)); 1554 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); 1555 gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0, 1556 GSWIP_MAC_CTRL_0p(port)); 1557} 1558 1559static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) 1560{ 1561 u32 mac_ctrl_0, mdio_phy; 1562 1563 if (duplex == DUPLEX_FULL) { 1564 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; 1565 mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; 1566 } else { 1567 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; 1568 mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; 1569 } 1570 1571 gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0, 1572 GSWIP_MAC_CTRL_0p(port)); 1573 gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy, 1574 GSWIP_MDIO_PHYp(port)); 1575} 1576 1577static void gswip_port_set_pause(struct gswip_priv *priv, int port, 1578 bool tx_pause, bool rx_pause) 1579{ 1580 u32 mac_ctrl_0, mdio_phy; 1581 1582 if (tx_pause && rx_pause) { 1583 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; 1584 mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | 1585 GSWIP_MDIO_PHY_FCONRX_EN; 1586 } else if (tx_pause) { 1587 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; 1588 mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | 1589 GSWIP_MDIO_PHY_FCONRX_DIS; 1590 } else if (rx_pause) { 1591 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; 1592 mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | 1593 GSWIP_MDIO_PHY_FCONRX_EN; 1594 } else { 1595 mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; 1596 mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | 1597 GSWIP_MDIO_PHY_FCONRX_DIS; 1598 } 1599 1600 gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK, 1601 mac_ctrl_0, GSWIP_MAC_CTRL_0p(port)); 1602 gswip_mdio_mask(priv, 1603 GSWIP_MDIO_PHY_FCONTX_MASK | 1604 GSWIP_MDIO_PHY_FCONRX_MASK, 1605 mdio_phy, GSWIP_MDIO_PHYp(port)); 1606} 1607 1608static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, 1609 unsigned int mode, 1610 const struct phylink_link_state *state) 1611{ 1612 struct gswip_priv *priv = ds->priv; 1613 u32 miicfg = 0; 1614 1615 miicfg |= GSWIP_MII_CFG_LDCLKDIS; 1616 1617 switch (state->interface) { 1618 case PHY_INTERFACE_MODE_MII: 1619 case PHY_INTERFACE_MODE_INTERNAL: 1620 miicfg |= GSWIP_MII_CFG_MODE_MIIM; 1621 break; 1622 case PHY_INTERFACE_MODE_REVMII: 1623 miicfg |= GSWIP_MII_CFG_MODE_MIIP; 1624 break; 1625 case PHY_INTERFACE_MODE_RMII: 1626 miicfg |= GSWIP_MII_CFG_MODE_RMIIM; 1627 break; 1628 case PHY_INTERFACE_MODE_RGMII: 1629 case PHY_INTERFACE_MODE_RGMII_ID: 1630 case PHY_INTERFACE_MODE_RGMII_RXID: 1631 case PHY_INTERFACE_MODE_RGMII_TXID: 1632 miicfg |= GSWIP_MII_CFG_MODE_RGMII; 1633 break; 1634 default: 1635 dev_err(ds->dev, 1636 "Unsupported interface: %d\n", state->interface); 1637 return; 1638 } 1639 1640 gswip_mii_mask_cfg(priv, 1641 GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | 1642 GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, 1643 miicfg, port); 1644 1645 switch (state->interface) { 1646 case PHY_INTERFACE_MODE_RGMII_ID: 1647 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK | 1648 GSWIP_MII_PCDU_RXDLY_MASK, 0, port); 1649 break; 1650 case PHY_INTERFACE_MODE_RGMII_RXID: 1651 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port); 1652 break; 1653 case PHY_INTERFACE_MODE_RGMII_TXID: 1654 gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port); 1655 break; 1656 default: 1657 break; 1658 } 1659} 1660 1661static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, 1662 unsigned int mode, 1663 phy_interface_t interface) 1664{ 1665 struct gswip_priv *priv = ds->priv; 1666 1667 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); 1668 1669 if (!dsa_is_cpu_port(ds, port)) 1670 gswip_port_set_link(priv, port, false); 1671} 1672 1673static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, 1674 unsigned int mode, 1675 phy_interface_t interface, 1676 struct phy_device *phydev, 1677 int speed, int duplex, 1678 bool tx_pause, bool rx_pause) 1679{ 1680 struct gswip_priv *priv = ds->priv; 1681 1682 if (!dsa_is_cpu_port(ds, port)) { 1683 gswip_port_set_link(priv, port, true); 1684 gswip_port_set_speed(priv, port, speed, interface); 1685 gswip_port_set_duplex(priv, port, duplex); 1686 gswip_port_set_pause(priv, port, tx_pause, rx_pause); 1687 } 1688 1689 gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); 1690} 1691 1692static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, 1693 uint8_t *data) 1694{ 1695 int i; 1696 1697 if (stringset != ETH_SS_STATS) 1698 return; 1699 1700 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) 1701 strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name, 1702 ETH_GSTRING_LEN); 1703} 1704 1705static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, 1706 u32 index) 1707{ 1708 u32 result; 1709 int err; 1710 1711 gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR); 1712 gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK | 1713 GSWIP_BM_RAM_CTRL_OPMOD, 1714 table | GSWIP_BM_RAM_CTRL_BAS, 1715 GSWIP_BM_RAM_CTRL); 1716 1717 err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, 1718 GSWIP_BM_RAM_CTRL_BAS); 1719 if (err) { 1720 dev_err(priv->dev, "timeout while reading table: %u, index: %u", 1721 table, index); 1722 return 0; 1723 } 1724 1725 result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0)); 1726 result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16; 1727 1728 return result; 1729} 1730 1731static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, 1732 uint64_t *data) 1733{ 1734 struct gswip_priv *priv = ds->priv; 1735 const struct gswip_rmon_cnt_desc *rmon_cnt; 1736 int i; 1737 u64 high; 1738 1739 for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { 1740 rmon_cnt = &gswip_rmon_cnt[i]; 1741 1742 data[i] = gswip_bcm_ram_entry_read(priv, port, 1743 rmon_cnt->offset); 1744 if (rmon_cnt->size == 2) { 1745 high = gswip_bcm_ram_entry_read(priv, port, 1746 rmon_cnt->offset + 1); 1747 data[i] |= high << 32; 1748 } 1749 } 1750} 1751 1752static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) 1753{ 1754 if (sset != ETH_SS_STATS) 1755 return 0; 1756 1757 return ARRAY_SIZE(gswip_rmon_cnt); 1758} 1759 1760static const struct dsa_switch_ops gswip_switch_ops = { 1761 .get_tag_protocol = gswip_get_tag_protocol, 1762 .setup = gswip_setup, 1763 .port_enable = gswip_port_enable, 1764 .port_disable = gswip_port_disable, 1765 .port_bridge_join = gswip_port_bridge_join, 1766 .port_bridge_leave = gswip_port_bridge_leave, 1767 .port_fast_age = gswip_port_fast_age, 1768 .port_vlan_filtering = gswip_port_vlan_filtering, 1769 .port_vlan_prepare = gswip_port_vlan_prepare, 1770 .port_vlan_add = gswip_port_vlan_add, 1771 .port_vlan_del = gswip_port_vlan_del, 1772 .port_stp_state_set = gswip_port_stp_state_set, 1773 .port_fdb_add = gswip_port_fdb_add, 1774 .port_fdb_del = gswip_port_fdb_del, 1775 .port_fdb_dump = gswip_port_fdb_dump, 1776 .phylink_validate = gswip_phylink_validate, 1777 .phylink_mac_config = gswip_phylink_mac_config, 1778 .phylink_mac_link_down = gswip_phylink_mac_link_down, 1779 .phylink_mac_link_up = gswip_phylink_mac_link_up, 1780 .get_strings = gswip_get_strings, 1781 .get_ethtool_stats = gswip_get_ethtool_stats, 1782 .get_sset_count = gswip_get_sset_count, 1783}; 1784 1785static const struct xway_gphy_match_data xrx200a1x_gphy_data = { 1786 .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", 1787 .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", 1788}; 1789 1790static const struct xway_gphy_match_data xrx200a2x_gphy_data = { 1791 .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", 1792 .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", 1793}; 1794 1795static const struct xway_gphy_match_data xrx300_gphy_data = { 1796 .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", 1797 .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", 1798}; 1799 1800static const struct of_device_id xway_gphy_match[] = { 1801 { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL }, 1802 { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data }, 1803 { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data }, 1804 { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data }, 1805 { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data }, 1806 {}, 1807}; 1808 1809static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw) 1810{ 1811 struct device *dev = priv->dev; 1812 const struct firmware *fw; 1813 void *fw_addr; 1814 dma_addr_t dma_addr; 1815 dma_addr_t dev_addr; 1816 size_t size; 1817 int ret; 1818 1819 ret = clk_prepare_enable(gphy_fw->clk_gate); 1820 if (ret) 1821 return ret; 1822 1823 reset_control_assert(gphy_fw->reset); 1824 1825 ret = request_firmware(&fw, gphy_fw->fw_name, dev); 1826 if (ret) { 1827 dev_err(dev, "failed to load firmware: %s, error: %i\n", 1828 gphy_fw->fw_name, ret); 1829 return ret; 1830 } 1831 1832 /* GPHY cores need the firmware code in a persistent and contiguous 1833 * memory area with a 16 kB boundary aligned start address. 1834 */ 1835 size = fw->size + XRX200_GPHY_FW_ALIGN; 1836 1837 fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); 1838 if (fw_addr) { 1839 fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); 1840 dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); 1841 memcpy(fw_addr, fw->data, fw->size); 1842 } else { 1843 dev_err(dev, "failed to alloc firmware memory\n"); 1844 release_firmware(fw); 1845 return -ENOMEM; 1846 } 1847 1848 release_firmware(fw); 1849 1850 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr); 1851 if (ret) 1852 return ret; 1853 1854 reset_control_deassert(gphy_fw->reset); 1855 1856 return ret; 1857} 1858 1859static int gswip_gphy_fw_probe(struct gswip_priv *priv, 1860 struct gswip_gphy_fw *gphy_fw, 1861 struct device_node *gphy_fw_np, int i) 1862{ 1863 struct device *dev = priv->dev; 1864 u32 gphy_mode; 1865 int ret; 1866 char gphyname[10]; 1867 1868 snprintf(gphyname, sizeof(gphyname), "gphy%d", i); 1869 1870 gphy_fw->clk_gate = devm_clk_get(dev, gphyname); 1871 if (IS_ERR(gphy_fw->clk_gate)) { 1872 dev_err(dev, "Failed to lookup gate clock\n"); 1873 return PTR_ERR(gphy_fw->clk_gate); 1874 } 1875 1876 ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset); 1877 if (ret) 1878 return ret; 1879 1880 ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode); 1881 /* Default to GE mode */ 1882 if (ret) 1883 gphy_mode = GPHY_MODE_GE; 1884 1885 switch (gphy_mode) { 1886 case GPHY_MODE_FE: 1887 gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name; 1888 break; 1889 case GPHY_MODE_GE: 1890 gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name; 1891 break; 1892 default: 1893 dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); 1894 return -EINVAL; 1895 } 1896 1897 gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np); 1898 if (IS_ERR(gphy_fw->reset)) { 1899 if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER) 1900 dev_err(dev, "Failed to lookup gphy reset\n"); 1901 return PTR_ERR(gphy_fw->reset); 1902 } 1903 1904 return gswip_gphy_fw_load(priv, gphy_fw); 1905} 1906 1907static void gswip_gphy_fw_remove(struct gswip_priv *priv, 1908 struct gswip_gphy_fw *gphy_fw) 1909{ 1910 int ret; 1911 1912 /* check if the device was fully probed */ 1913 if (!gphy_fw->fw_name) 1914 return; 1915 1916 ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0); 1917 if (ret) 1918 dev_err(priv->dev, "can not reset GPHY FW pointer"); 1919 1920 clk_disable_unprepare(gphy_fw->clk_gate); 1921 1922 reset_control_put(gphy_fw->reset); 1923} 1924 1925static int gswip_gphy_fw_list(struct gswip_priv *priv, 1926 struct device_node *gphy_fw_list_np, u32 version) 1927{ 1928 struct device *dev = priv->dev; 1929 struct device_node *gphy_fw_np; 1930 const struct of_device_id *match; 1931 int err; 1932 int i = 0; 1933 1934 /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older 1935 * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also 1936 * needs a different GPHY firmware. 1937 */ 1938 if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) { 1939 switch (version) { 1940 case GSWIP_VERSION_2_0: 1941 priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data; 1942 break; 1943 case GSWIP_VERSION_2_1: 1944 priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data; 1945 break; 1946 default: 1947 dev_err(dev, "unknown GSWIP version: 0x%x", version); 1948 return -ENOENT; 1949 } 1950 } 1951 1952 match = of_match_node(xway_gphy_match, gphy_fw_list_np); 1953 if (match && match->data) 1954 priv->gphy_fw_name_cfg = match->data; 1955 1956 if (!priv->gphy_fw_name_cfg) { 1957 dev_err(dev, "GPHY compatible type not supported"); 1958 return -ENOENT; 1959 } 1960 1961 priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np); 1962 if (!priv->num_gphy_fw) 1963 return -ENOENT; 1964 1965 priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np, 1966 "lantiq,rcu"); 1967 if (IS_ERR(priv->rcu_regmap)) 1968 return PTR_ERR(priv->rcu_regmap); 1969 1970 priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw, 1971 sizeof(*priv->gphy_fw), 1972 GFP_KERNEL | __GFP_ZERO); 1973 if (!priv->gphy_fw) 1974 return -ENOMEM; 1975 1976 for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { 1977 err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], 1978 gphy_fw_np, i); 1979 if (err) { 1980 of_node_put(gphy_fw_np); 1981 goto remove_gphy; 1982 } 1983 i++; 1984 } 1985 1986 /* The standalone PHY11G requires 300ms to be fully 1987 * initialized and ready for any MDIO communication after being 1988 * taken out of reset. For the SoC-internal GPHY variant there 1989 * is no (known) documentation for the minimum time after a 1990 * reset. Use the same value as for the standalone variant as 1991 * some users have reported internal PHYs not being detected 1992 * without any delay. 1993 */ 1994 msleep(300); 1995 1996 return 0; 1997 1998remove_gphy: 1999 for (i = 0; i < priv->num_gphy_fw; i++) 2000 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 2001 return err; 2002} 2003 2004static int gswip_probe(struct platform_device *pdev) 2005{ 2006 struct gswip_priv *priv; 2007 struct device_node *mdio_np, *gphy_fw_np; 2008 struct device *dev = &pdev->dev; 2009 int err; 2010 int i; 2011 u32 version; 2012 2013 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 2014 if (!priv) 2015 return -ENOMEM; 2016 2017 priv->gswip = devm_platform_ioremap_resource(pdev, 0); 2018 if (IS_ERR(priv->gswip)) 2019 return PTR_ERR(priv->gswip); 2020 2021 priv->mdio = devm_platform_ioremap_resource(pdev, 1); 2022 if (IS_ERR(priv->mdio)) 2023 return PTR_ERR(priv->mdio); 2024 2025 priv->mii = devm_platform_ioremap_resource(pdev, 2); 2026 if (IS_ERR(priv->mii)) 2027 return PTR_ERR(priv->mii); 2028 2029 priv->hw_info = of_device_get_match_data(dev); 2030 if (!priv->hw_info) 2031 return -EINVAL; 2032 2033 priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); 2034 if (!priv->ds) 2035 return -ENOMEM; 2036 2037 priv->ds->dev = dev; 2038 priv->ds->num_ports = priv->hw_info->max_ports; 2039 priv->ds->priv = priv; 2040 priv->ds->ops = &gswip_switch_ops; 2041 priv->dev = dev; 2042 version = gswip_switch_r(priv, GSWIP_VERSION); 2043 2044 /* bring up the mdio bus */ 2045 gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw"); 2046 if (gphy_fw_np) { 2047 err = gswip_gphy_fw_list(priv, gphy_fw_np, version); 2048 of_node_put(gphy_fw_np); 2049 if (err) { 2050 dev_err(dev, "gphy fw probe failed\n"); 2051 return err; 2052 } 2053 } 2054 2055 /* bring up the mdio bus */ 2056 mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio"); 2057 if (mdio_np) { 2058 err = gswip_mdio(priv, mdio_np); 2059 if (err) { 2060 dev_err(dev, "mdio probe failed\n"); 2061 goto put_mdio_node; 2062 } 2063 } 2064 2065 err = dsa_register_switch(priv->ds); 2066 if (err) { 2067 dev_err(dev, "dsa switch register failed: %i\n", err); 2068 goto mdio_bus; 2069 } 2070 if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) { 2071 dev_err(dev, "wrong CPU port defined, HW only supports port: %i", 2072 priv->hw_info->cpu_port); 2073 err = -EINVAL; 2074 goto disable_switch; 2075 } 2076 2077 platform_set_drvdata(pdev, priv); 2078 2079 dev_info(dev, "probed GSWIP version %lx mod %lx\n", 2080 (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT, 2081 (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT); 2082 return 0; 2083 2084disable_switch: 2085 gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); 2086 dsa_unregister_switch(priv->ds); 2087mdio_bus: 2088 if (mdio_np) 2089 mdiobus_unregister(priv->ds->slave_mii_bus); 2090put_mdio_node: 2091 of_node_put(mdio_np); 2092 for (i = 0; i < priv->num_gphy_fw; i++) 2093 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 2094 return err; 2095} 2096 2097static int gswip_remove(struct platform_device *pdev) 2098{ 2099 struct gswip_priv *priv = platform_get_drvdata(pdev); 2100 int i; 2101 2102 /* disable the switch */ 2103 gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); 2104 2105 dsa_unregister_switch(priv->ds); 2106 2107 if (priv->ds->slave_mii_bus) { 2108 mdiobus_unregister(priv->ds->slave_mii_bus); 2109 of_node_put(priv->ds->slave_mii_bus->dev.of_node); 2110 mdiobus_free(priv->ds->slave_mii_bus); 2111 } 2112 2113 for (i = 0; i < priv->num_gphy_fw; i++) 2114 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 2115 2116 return 0; 2117} 2118 2119static const struct gswip_hw_info gswip_xrx200 = { 2120 .max_ports = 7, 2121 .cpu_port = 6, 2122}; 2123 2124static const struct of_device_id gswip_of_match[] = { 2125 { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, 2126 {}, 2127}; 2128MODULE_DEVICE_TABLE(of, gswip_of_match); 2129 2130static struct platform_driver gswip_driver = { 2131 .probe = gswip_probe, 2132 .remove = gswip_remove, 2133 .driver = { 2134 .name = "gswip", 2135 .of_match_table = gswip_of_match, 2136 }, 2137}; 2138 2139module_platform_driver(gswip_driver); 2140 2141MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); 2142MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); 2143MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); 2144MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); 2145MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); 2146MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); 2147MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); 2148MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver"); 2149MODULE_LICENSE("GPL v2"); 2150