1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 SED Systems, a division of Calian Ltd. 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25#include <linux/clk.h> 26#include <linux/delay.h> 27#include <linux/etherdevice.h> 28#include <linux/module.h> 29#include <linux/netdevice.h> 30#include <linux/of_mdio.h> 31#include <linux/of_net.h> 32#include <linux/of_platform.h> 33#include <linux/of_irq.h> 34#include <linux/of_address.h> 35#include <linux/skbuff.h> 36#include <linux/spinlock.h> 37#include <linux/phy.h> 38#include <linux/mii.h> 39#include <linux/ethtool.h> 40 41#include "xilinx_axienet.h" 42 43/* Descriptors defines for Tx and Rx DMA */ 44#define TX_BD_NUM_DEFAULT 128 45#define RX_BD_NUM_DEFAULT 1024 46#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 47#define TX_BD_NUM_MAX 4096 48#define RX_BD_NUM_MAX 4096 49 50/* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 51#define DRIVER_NAME "xaxienet" 52#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 53#define DRIVER_VERSION "1.00a" 54 55#define AXIENET_REGS_N 40 56 57/* Match table for of_platform binding */ 58static const struct of_device_id axienet_of_match[] = { 59 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 60 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 61 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 62 {}, 63}; 64 65MODULE_DEVICE_TABLE(of, axienet_of_match); 66 67/* Option table for setting up Axi Ethernet hardware options */ 68static struct axienet_option axienet_options[] = { 69 /* Turn on jumbo packet support for both Rx and Tx */ 70 { 71 .opt = XAE_OPTION_JUMBO, 72 .reg = XAE_TC_OFFSET, 73 .m_or = XAE_TC_JUM_MASK, 74 }, { 75 .opt = XAE_OPTION_JUMBO, 76 .reg = XAE_RCW1_OFFSET, 77 .m_or = XAE_RCW1_JUM_MASK, 78 }, { /* Turn on VLAN packet support for both Rx and Tx */ 79 .opt = XAE_OPTION_VLAN, 80 .reg = XAE_TC_OFFSET, 81 .m_or = XAE_TC_VLAN_MASK, 82 }, { 83 .opt = XAE_OPTION_VLAN, 84 .reg = XAE_RCW1_OFFSET, 85 .m_or = XAE_RCW1_VLAN_MASK, 86 }, { /* Turn on FCS stripping on receive packets */ 87 .opt = XAE_OPTION_FCS_STRIP, 88 .reg = XAE_RCW1_OFFSET, 89 .m_or = XAE_RCW1_FCS_MASK, 90 }, { /* Turn on FCS insertion on transmit packets */ 91 .opt = XAE_OPTION_FCS_INSERT, 92 .reg = XAE_TC_OFFSET, 93 .m_or = XAE_TC_FCS_MASK, 94 }, { /* Turn off length/type field checking on receive packets */ 95 .opt = XAE_OPTION_LENTYPE_ERR, 96 .reg = XAE_RCW1_OFFSET, 97 .m_or = XAE_RCW1_LT_DIS_MASK, 98 }, { /* Turn on Rx flow control */ 99 .opt = XAE_OPTION_FLOW_CONTROL, 100 .reg = XAE_FCC_OFFSET, 101 .m_or = XAE_FCC_FCRX_MASK, 102 }, { /* Turn on Tx flow control */ 103 .opt = XAE_OPTION_FLOW_CONTROL, 104 .reg = XAE_FCC_OFFSET, 105 .m_or = XAE_FCC_FCTX_MASK, 106 }, { /* Turn on promiscuous frame filtering */ 107 .opt = XAE_OPTION_PROMISC, 108 .reg = XAE_FMI_OFFSET, 109 .m_or = XAE_FMI_PM_MASK, 110 }, { /* Enable transmitter */ 111 .opt = XAE_OPTION_TXEN, 112 .reg = XAE_TC_OFFSET, 113 .m_or = XAE_TC_TX_MASK, 114 }, { /* Enable receiver */ 115 .opt = XAE_OPTION_RXEN, 116 .reg = XAE_RCW1_OFFSET, 117 .m_or = XAE_RCW1_RX_MASK, 118 }, 119 {} 120}; 121 122/** 123 * axienet_dma_in32 - Memory mapped Axi DMA register read 124 * @lp: Pointer to axienet local structure 125 * @reg: Address offset from the base address of the Axi DMA core 126 * 127 * Return: The contents of the Axi DMA register 128 * 129 * This function returns the contents of the corresponding Axi DMA register. 130 */ 131static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 132{ 133 return ioread32(lp->dma_regs + reg); 134} 135 136/** 137 * axienet_dma_out32 - Memory mapped Axi DMA register write. 138 * @lp: Pointer to axienet local structure 139 * @reg: Address offset from the base address of the Axi DMA core 140 * @value: Value to be written into the Axi DMA register 141 * 142 * This function writes the desired value into the corresponding Axi DMA 143 * register. 144 */ 145static inline void axienet_dma_out32(struct axienet_local *lp, 146 off_t reg, u32 value) 147{ 148 iowrite32(value, lp->dma_regs + reg); 149} 150 151static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, 152 dma_addr_t addr) 153{ 154 axienet_dma_out32(lp, reg, lower_32_bits(addr)); 155 156 if (lp->features & XAE_FEATURE_DMA_64BIT) 157 axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); 158} 159 160static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 161 struct axidma_bd *desc) 162{ 163 desc->phys = lower_32_bits(addr); 164 if (lp->features & XAE_FEATURE_DMA_64BIT) 165 desc->phys_msb = upper_32_bits(addr); 166} 167 168static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 169 struct axidma_bd *desc) 170{ 171 dma_addr_t ret = desc->phys; 172 173 if (lp->features & XAE_FEATURE_DMA_64BIT) 174 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 175 176 return ret; 177} 178 179/** 180 * axienet_dma_bd_release - Release buffer descriptor rings 181 * @ndev: Pointer to the net_device structure 182 * 183 * This function is used to release the descriptors allocated in 184 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 185 * driver stop api is called. 186 */ 187static void axienet_dma_bd_release(struct net_device *ndev) 188{ 189 int i; 190 struct axienet_local *lp = netdev_priv(ndev); 191 192 /* If we end up here, tx_bd_v must have been DMA allocated. */ 193 dma_free_coherent(ndev->dev.parent, 194 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 195 lp->tx_bd_v, 196 lp->tx_bd_p); 197 198 if (!lp->rx_bd_v) 199 return; 200 201 for (i = 0; i < lp->rx_bd_num; i++) { 202 dma_addr_t phys; 203 204 /* A NULL skb means this descriptor has not been initialised 205 * at all. 206 */ 207 if (!lp->rx_bd_v[i].skb) 208 break; 209 210 dev_kfree_skb(lp->rx_bd_v[i].skb); 211 212 /* For each descriptor, we programmed cntrl with the (non-zero) 213 * descriptor size, after it had been successfully allocated. 214 * So a non-zero value in there means we need to unmap it. 215 */ 216 if (lp->rx_bd_v[i].cntrl) { 217 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 218 dma_unmap_single(ndev->dev.parent, phys, 219 lp->max_frm_size, DMA_FROM_DEVICE); 220 } 221 } 222 223 dma_free_coherent(ndev->dev.parent, 224 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 225 lp->rx_bd_v, 226 lp->rx_bd_p); 227} 228 229/** 230 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 231 * @ndev: Pointer to the net_device structure 232 * 233 * Return: 0, on success -ENOMEM, on failure 234 * 235 * This function is called to initialize the Rx and Tx DMA descriptor 236 * rings. This initializes the descriptors with required default values 237 * and is called when Axi Ethernet driver reset is called. 238 */ 239static int axienet_dma_bd_init(struct net_device *ndev) 240{ 241 u32 cr; 242 int i; 243 struct sk_buff *skb; 244 struct axienet_local *lp = netdev_priv(ndev); 245 246 /* Reset the indexes which are used for accessing the BDs */ 247 lp->tx_bd_ci = 0; 248 lp->tx_bd_tail = 0; 249 lp->rx_bd_ci = 0; 250 251 /* Allocate the Tx and Rx buffer descriptors. */ 252 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 253 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 254 &lp->tx_bd_p, GFP_KERNEL); 255 if (!lp->tx_bd_v) 256 return -ENOMEM; 257 258 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 259 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 260 &lp->rx_bd_p, GFP_KERNEL); 261 if (!lp->rx_bd_v) 262 goto out; 263 264 for (i = 0; i < lp->tx_bd_num; i++) { 265 dma_addr_t addr = lp->tx_bd_p + 266 sizeof(*lp->tx_bd_v) * 267 ((i + 1) % lp->tx_bd_num); 268 269 lp->tx_bd_v[i].next = lower_32_bits(addr); 270 if (lp->features & XAE_FEATURE_DMA_64BIT) 271 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 272 } 273 274 for (i = 0; i < lp->rx_bd_num; i++) { 275 dma_addr_t addr; 276 277 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 278 ((i + 1) % lp->rx_bd_num); 279 lp->rx_bd_v[i].next = lower_32_bits(addr); 280 if (lp->features & XAE_FEATURE_DMA_64BIT) 281 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 282 283 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 284 if (!skb) 285 goto out; 286 287 lp->rx_bd_v[i].skb = skb; 288 addr = dma_map_single(ndev->dev.parent, skb->data, 289 lp->max_frm_size, DMA_FROM_DEVICE); 290 if (dma_mapping_error(ndev->dev.parent, addr)) { 291 netdev_err(ndev, "DMA mapping error\n"); 292 goto out; 293 } 294 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 295 296 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 297 } 298 299 /* Start updating the Rx channel control register */ 300 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 301 /* Update the interrupt coalesce count */ 302 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 303 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 304 /* Update the delay timer count */ 305 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 306 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 307 /* Enable coalesce, delay timer and error interrupts */ 308 cr |= XAXIDMA_IRQ_ALL_MASK; 309 /* Write to the Rx channel control register */ 310 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 311 312 /* Start updating the Tx channel control register */ 313 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 314 /* Update the interrupt coalesce count */ 315 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 316 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 317 /* Update the delay timer count */ 318 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 319 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 320 /* Enable coalesce, delay timer and error interrupts */ 321 cr |= XAXIDMA_IRQ_ALL_MASK; 322 /* Write to the Tx channel control register */ 323 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 324 325 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 326 * halted state. This will make the Rx side ready for reception. 327 */ 328 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 329 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 330 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 331 cr | XAXIDMA_CR_RUNSTOP_MASK); 332 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 333 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 334 335 /* Write to the RS (Run-stop) bit in the Tx channel control register. 336 * Tx channel is now ready to run. But only after we write to the 337 * tail pointer register that the Tx channel will start transmitting. 338 */ 339 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 340 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 341 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 342 cr | XAXIDMA_CR_RUNSTOP_MASK); 343 344 return 0; 345out: 346 axienet_dma_bd_release(ndev); 347 return -ENOMEM; 348} 349 350/** 351 * axienet_set_mac_address - Write the MAC address 352 * @ndev: Pointer to the net_device structure 353 * @address: 6 byte Address to be written as MAC address 354 * 355 * This function is called to initialize the MAC address of the Axi Ethernet 356 * core. It writes to the UAW0 and UAW1 registers of the core. 357 */ 358static void axienet_set_mac_address(struct net_device *ndev, 359 const void *address) 360{ 361 struct axienet_local *lp = netdev_priv(ndev); 362 363 if (address) 364 memcpy(ndev->dev_addr, address, ETH_ALEN); 365 if (!is_valid_ether_addr(ndev->dev_addr)) 366 eth_hw_addr_random(ndev); 367 368 /* Set up unicast MAC address filter set its mac address */ 369 axienet_iow(lp, XAE_UAW0_OFFSET, 370 (ndev->dev_addr[0]) | 371 (ndev->dev_addr[1] << 8) | 372 (ndev->dev_addr[2] << 16) | 373 (ndev->dev_addr[3] << 24)); 374 axienet_iow(lp, XAE_UAW1_OFFSET, 375 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 376 ~XAE_UAW1_UNICASTADDR_MASK) | 377 (ndev->dev_addr[4] | 378 (ndev->dev_addr[5] << 8)))); 379} 380 381/** 382 * netdev_set_mac_address - Write the MAC address (from outside the driver) 383 * @ndev: Pointer to the net_device structure 384 * @p: 6 byte Address to be written as MAC address 385 * 386 * Return: 0 for all conditions. Presently, there is no failure case. 387 * 388 * This function is called to initialize the MAC address of the Axi Ethernet 389 * core. It calls the core specific axienet_set_mac_address. This is the 390 * function that goes into net_device_ops structure entry ndo_set_mac_address. 391 */ 392static int netdev_set_mac_address(struct net_device *ndev, void *p) 393{ 394 struct sockaddr *addr = p; 395 axienet_set_mac_address(ndev, addr->sa_data); 396 return 0; 397} 398 399/** 400 * axienet_set_multicast_list - Prepare the multicast table 401 * @ndev: Pointer to the net_device structure 402 * 403 * This function is called to initialize the multicast table during 404 * initialization. The Axi Ethernet basic multicast support has a four-entry 405 * multicast table which is initialized here. Additionally this function 406 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 407 * means whenever the multicast table entries need to be updated this 408 * function gets called. 409 */ 410static void axienet_set_multicast_list(struct net_device *ndev) 411{ 412 int i; 413 u32 reg, af0reg, af1reg; 414 struct axienet_local *lp = netdev_priv(ndev); 415 416 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 417 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 418 /* We must make the kernel realize we had to move into 419 * promiscuous mode. If it was a promiscuous mode request 420 * the flag is already set. If not we set it. 421 */ 422 ndev->flags |= IFF_PROMISC; 423 reg = axienet_ior(lp, XAE_FMI_OFFSET); 424 reg |= XAE_FMI_PM_MASK; 425 axienet_iow(lp, XAE_FMI_OFFSET, reg); 426 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 427 } else if (!netdev_mc_empty(ndev)) { 428 struct netdev_hw_addr *ha; 429 430 i = 0; 431 netdev_for_each_mc_addr(ha, ndev) { 432 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 433 break; 434 435 af0reg = (ha->addr[0]); 436 af0reg |= (ha->addr[1] << 8); 437 af0reg |= (ha->addr[2] << 16); 438 af0reg |= (ha->addr[3] << 24); 439 440 af1reg = (ha->addr[4]); 441 af1reg |= (ha->addr[5] << 8); 442 443 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 444 reg |= i; 445 446 axienet_iow(lp, XAE_FMI_OFFSET, reg); 447 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 448 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 449 i++; 450 } 451 } else { 452 reg = axienet_ior(lp, XAE_FMI_OFFSET); 453 reg &= ~XAE_FMI_PM_MASK; 454 455 axienet_iow(lp, XAE_FMI_OFFSET, reg); 456 457 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 458 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 459 reg |= i; 460 461 axienet_iow(lp, XAE_FMI_OFFSET, reg); 462 axienet_iow(lp, XAE_AF0_OFFSET, 0); 463 axienet_iow(lp, XAE_AF1_OFFSET, 0); 464 } 465 466 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 467 } 468} 469 470/** 471 * axienet_setoptions - Set an Axi Ethernet option 472 * @ndev: Pointer to the net_device structure 473 * @options: Option to be enabled/disabled 474 * 475 * The Axi Ethernet core has multiple features which can be selectively turned 476 * on or off. The typical options could be jumbo frame option, basic VLAN 477 * option, promiscuous mode option etc. This function is used to set or clear 478 * these options in the Axi Ethernet hardware. This is done through 479 * axienet_option structure . 480 */ 481static void axienet_setoptions(struct net_device *ndev, u32 options) 482{ 483 int reg; 484 struct axienet_local *lp = netdev_priv(ndev); 485 struct axienet_option *tp = &axienet_options[0]; 486 487 while (tp->opt) { 488 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 489 if (options & tp->opt) 490 reg |= tp->m_or; 491 axienet_iow(lp, tp->reg, reg); 492 tp++; 493 } 494 495 lp->options |= options; 496} 497 498static int __axienet_device_reset(struct axienet_local *lp) 499{ 500 u32 value; 501 int ret; 502 503 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 504 * process of Axi DMA takes a while to complete as all pending 505 * commands/transfers will be flushed or completed during this 506 * reset process. 507 * Note that even though both TX and RX have their own reset register, 508 * they both reset the entire DMA core, so only one needs to be used. 509 */ 510 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 511 ret = read_poll_timeout(axienet_dma_in32, value, 512 !(value & XAXIDMA_CR_RESET_MASK), 513 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 514 XAXIDMA_TX_CR_OFFSET); 515 if (ret) { 516 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 517 return ret; 518 } 519 520 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 521 ret = read_poll_timeout(axienet_ior, value, 522 value & XAE_INT_PHYRSTCMPLT_MASK, 523 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 524 XAE_IS_OFFSET); 525 if (ret) { 526 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 527 return ret; 528 } 529 530 return 0; 531} 532 533/** 534 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 535 * @ndev: Pointer to the net_device structure 536 * 537 * This function is called to reset and initialize the Axi Ethernet core. This 538 * is typically called during initialization. It does a reset of the Axi DMA 539 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 540 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 541 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 542 * core. 543 * Returns 0 on success or a negative error number otherwise. 544 */ 545static int axienet_device_reset(struct net_device *ndev) 546{ 547 u32 axienet_status; 548 struct axienet_local *lp = netdev_priv(ndev); 549 int ret; 550 551 ret = __axienet_device_reset(lp); 552 if (ret) 553 return ret; 554 555 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 556 lp->options |= XAE_OPTION_VLAN; 557 lp->options &= (~XAE_OPTION_JUMBO); 558 559 if ((ndev->mtu > XAE_MTU) && 560 (ndev->mtu <= XAE_JUMBO_MTU)) { 561 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 562 XAE_TRL_SIZE; 563 564 if (lp->max_frm_size <= lp->rxmem) 565 lp->options |= XAE_OPTION_JUMBO; 566 } 567 568 ret = axienet_dma_bd_init(ndev); 569 if (ret) { 570 netdev_err(ndev, "%s: descriptor allocation failed\n", 571 __func__); 572 return ret; 573 } 574 575 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 576 axienet_status &= ~XAE_RCW1_RX_MASK; 577 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 578 579 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 580 if (axienet_status & XAE_INT_RXRJECT_MASK) 581 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 582 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 583 XAE_INT_RECV_ERROR_MASK : 0); 584 585 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 586 587 /* Sync default options with HW but leave receiver and 588 * transmitter disabled. 589 */ 590 axienet_setoptions(ndev, lp->options & 591 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 592 axienet_set_mac_address(ndev, NULL); 593 axienet_set_multicast_list(ndev); 594 axienet_setoptions(ndev, lp->options); 595 596 netif_trans_update(ndev); 597 598 return 0; 599} 600 601/** 602 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 603 * @ndev: Pointer to the net_device structure 604 * @first_bd: Index of first descriptor to clean up 605 * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. 606 * @sizep: Pointer to a u32 filled with the total sum of all bytes 607 * in all cleaned-up descriptors. Ignored if NULL. 608 * 609 * Would either be called after a successful transmit operation, or after 610 * there was an error when setting up the chain. 611 * Returns the number of descriptors handled. 612 */ 613static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, 614 int nr_bds, u32 *sizep) 615{ 616 struct axienet_local *lp = netdev_priv(ndev); 617 struct axidma_bd *cur_p; 618 int max_bds = nr_bds; 619 unsigned int status; 620 dma_addr_t phys; 621 int i; 622 623 if (max_bds == -1) 624 max_bds = lp->tx_bd_num; 625 626 for (i = 0; i < max_bds; i++) { 627 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 628 status = cur_p->status; 629 630 /* If no number is given, clean up *all* descriptors that have 631 * been completed by the MAC. 632 */ 633 if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 634 break; 635 636 /* Ensure we see complete descriptor update */ 637 dma_rmb(); 638 phys = desc_get_phys_addr(lp, cur_p); 639 dma_unmap_single(ndev->dev.parent, phys, 640 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 641 DMA_TO_DEVICE); 642 643 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 644 dev_consume_skb_irq(cur_p->skb); 645 646 cur_p->app0 = 0; 647 cur_p->app1 = 0; 648 cur_p->app2 = 0; 649 cur_p->app4 = 0; 650 cur_p->skb = NULL; 651 /* ensure our transmit path and device don't prematurely see status cleared */ 652 wmb(); 653 cur_p->cntrl = 0; 654 cur_p->status = 0; 655 656 if (sizep) 657 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 658 } 659 660 return i; 661} 662 663/** 664 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 665 * @lp: Pointer to the axienet_local structure 666 * @num_frag: The number of BDs to check for 667 * 668 * Return: 0, on success 669 * NETDEV_TX_BUSY, if any of the descriptors are not free 670 * 671 * This function is invoked before BDs are allocated and transmission starts. 672 * This function returns 0 if a BD or group of BDs can be allocated for 673 * transmission. If the BD or any of the BDs are not free the function 674 * returns a busy status. This is invoked from axienet_start_xmit. 675 */ 676static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 677 int num_frag) 678{ 679 struct axidma_bd *cur_p; 680 681 /* Ensure we see all descriptor updates from device or TX IRQ path */ 682 rmb(); 683 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; 684 if (cur_p->cntrl) 685 return NETDEV_TX_BUSY; 686 return 0; 687} 688 689/** 690 * axienet_start_xmit_done - Invoked once a transmit is completed by the 691 * Axi DMA Tx channel. 692 * @ndev: Pointer to the net_device structure 693 * 694 * This function is invoked from the Axi DMA Tx isr to notify the completion 695 * of transmit operation. It clears fields in the corresponding Tx BDs and 696 * unmaps the corresponding buffer so that CPU can regain ownership of the 697 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 698 * required. 699 */ 700static void axienet_start_xmit_done(struct net_device *ndev) 701{ 702 struct axienet_local *lp = netdev_priv(ndev); 703 u32 packets = 0; 704 u32 size = 0; 705 706 packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); 707 708 lp->tx_bd_ci += packets; 709 if (lp->tx_bd_ci >= lp->tx_bd_num) 710 lp->tx_bd_ci -= lp->tx_bd_num; 711 712 ndev->stats.tx_packets += packets; 713 ndev->stats.tx_bytes += size; 714 715 /* Matches barrier in axienet_start_xmit */ 716 smp_mb(); 717 718 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 719 netif_wake_queue(ndev); 720} 721 722/** 723 * axienet_start_xmit - Starts the transmission. 724 * @skb: sk_buff pointer that contains data to be Txed. 725 * @ndev: Pointer to net_device structure. 726 * 727 * Return: NETDEV_TX_OK, on success 728 * NETDEV_TX_BUSY, if any of the descriptors are not free 729 * 730 * This function is invoked from upper layers to initiate transmission. The 731 * function uses the next available free BDs and populates their fields to 732 * start the transmission. Additionally if checksum offloading is supported, 733 * it populates AXI Stream Control fields with appropriate values. 734 */ 735static netdev_tx_t 736axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 737{ 738 u32 ii; 739 u32 num_frag; 740 u32 csum_start_off; 741 u32 csum_index_off; 742 skb_frag_t *frag; 743 dma_addr_t tail_p, phys; 744 struct axienet_local *lp = netdev_priv(ndev); 745 struct axidma_bd *cur_p; 746 u32 orig_tail_ptr = lp->tx_bd_tail; 747 748 num_frag = skb_shinfo(skb)->nr_frags; 749 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 750 751 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 752 /* Should not happen as last start_xmit call should have 753 * checked for sufficient space and queue should only be 754 * woken when sufficient space is available. 755 */ 756 netif_stop_queue(ndev); 757 if (net_ratelimit()) 758 netdev_warn(ndev, "TX ring unexpectedly full\n"); 759 return NETDEV_TX_BUSY; 760 } 761 762 if (skb->ip_summed == CHECKSUM_PARTIAL) { 763 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 764 /* Tx Full Checksum Offload Enabled */ 765 cur_p->app0 |= 2; 766 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 767 csum_start_off = skb_transport_offset(skb); 768 csum_index_off = csum_start_off + skb->csum_offset; 769 /* Tx Partial Checksum Offload Enabled */ 770 cur_p->app0 |= 1; 771 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 772 } 773 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 774 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 775 } 776 777 phys = dma_map_single(ndev->dev.parent, skb->data, 778 skb_headlen(skb), DMA_TO_DEVICE); 779 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 780 if (net_ratelimit()) 781 netdev_err(ndev, "TX DMA mapping error\n"); 782 ndev->stats.tx_dropped++; 783 return NETDEV_TX_OK; 784 } 785 desc_set_phys_addr(lp, phys, cur_p); 786 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 787 788 for (ii = 0; ii < num_frag; ii++) { 789 if (++lp->tx_bd_tail >= lp->tx_bd_num) 790 lp->tx_bd_tail = 0; 791 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 792 frag = &skb_shinfo(skb)->frags[ii]; 793 phys = dma_map_single(ndev->dev.parent, 794 skb_frag_address(frag), 795 skb_frag_size(frag), 796 DMA_TO_DEVICE); 797 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 798 if (net_ratelimit()) 799 netdev_err(ndev, "TX DMA mapping error\n"); 800 ndev->stats.tx_dropped++; 801 axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, 802 NULL); 803 lp->tx_bd_tail = orig_tail_ptr; 804 805 return NETDEV_TX_OK; 806 } 807 desc_set_phys_addr(lp, phys, cur_p); 808 cur_p->cntrl = skb_frag_size(frag); 809 } 810 811 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 812 cur_p->skb = skb; 813 814 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 815 /* Start the transfer */ 816 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 817 if (++lp->tx_bd_tail >= lp->tx_bd_num) 818 lp->tx_bd_tail = 0; 819 820 /* Stop queue if next transmit may not have space */ 821 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 822 netif_stop_queue(ndev); 823 824 /* Matches barrier in axienet_start_xmit_done */ 825 smp_mb(); 826 827 /* Space might have just been freed - check again */ 828 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 829 netif_wake_queue(ndev); 830 } 831 832 return NETDEV_TX_OK; 833} 834 835/** 836 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 837 * BD processing. 838 * @ndev: Pointer to net_device structure. 839 * 840 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 841 * does minimal processing and invokes "netif_rx" to complete further 842 * processing. 843 */ 844static void axienet_recv(struct net_device *ndev) 845{ 846 u32 length; 847 u32 csumstatus; 848 u32 size = 0; 849 u32 packets = 0; 850 dma_addr_t tail_p = 0; 851 struct axienet_local *lp = netdev_priv(ndev); 852 struct sk_buff *skb, *new_skb; 853 struct axidma_bd *cur_p; 854 855 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 856 857 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 858 dma_addr_t phys; 859 860 /* Ensure we see complete descriptor update */ 861 dma_rmb(); 862 863 skb = cur_p->skb; 864 cur_p->skb = NULL; 865 866 /* skb could be NULL if a previous pass already received the 867 * packet for this slot in the ring, but failed to refill it 868 * with a newly allocated buffer. In this case, don't try to 869 * receive it again. 870 */ 871 if (likely(skb)) { 872 length = cur_p->app4 & 0x0000FFFF; 873 874 phys = desc_get_phys_addr(lp, cur_p); 875 dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, 876 DMA_FROM_DEVICE); 877 878 skb_put(skb, length); 879 skb->protocol = eth_type_trans(skb, ndev); 880 /*skb_checksum_none_assert(skb);*/ 881 skb->ip_summed = CHECKSUM_NONE; 882 883 /* if we're doing Rx csum offload, set it up */ 884 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 885 csumstatus = (cur_p->app2 & 886 XAE_FULL_CSUM_STATUS_MASK) >> 3; 887 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 888 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 889 skb->ip_summed = CHECKSUM_UNNECESSARY; 890 } 891 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 892 skb->protocol == htons(ETH_P_IP) && 893 skb->len > 64) { 894 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 895 skb->ip_summed = CHECKSUM_COMPLETE; 896 } 897 898 netif_rx(skb); 899 900 size += length; 901 packets++; 902 } 903 904 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 905 if (!new_skb) 906 break; 907 908 phys = dma_map_single(ndev->dev.parent, new_skb->data, 909 lp->max_frm_size, 910 DMA_FROM_DEVICE); 911 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 912 if (net_ratelimit()) 913 netdev_err(ndev, "RX DMA mapping error\n"); 914 dev_kfree_skb(new_skb); 915 break; 916 } 917 desc_set_phys_addr(lp, phys, cur_p); 918 919 cur_p->cntrl = lp->max_frm_size; 920 cur_p->status = 0; 921 cur_p->skb = new_skb; 922 923 /* Only update tail_p to mark this slot as usable after it has 924 * been successfully refilled. 925 */ 926 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 927 928 if (++lp->rx_bd_ci >= lp->rx_bd_num) 929 lp->rx_bd_ci = 0; 930 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 931 } 932 933 ndev->stats.rx_packets += packets; 934 ndev->stats.rx_bytes += size; 935 936 if (tail_p) 937 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 938} 939 940/** 941 * axienet_tx_irq - Tx Done Isr. 942 * @irq: irq number 943 * @_ndev: net_device pointer 944 * 945 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 946 * 947 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 948 * to complete the BD processing. 949 */ 950static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 951{ 952 u32 cr; 953 unsigned int status; 954 struct net_device *ndev = _ndev; 955 struct axienet_local *lp = netdev_priv(ndev); 956 957 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 958 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 959 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 960 axienet_start_xmit_done(lp->ndev); 961 goto out; 962 } 963 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 964 return IRQ_NONE; 965 if (status & XAXIDMA_IRQ_ERROR_MASK) { 966 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 967 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 968 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 969 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 970 971 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 972 /* Disable coalesce, delay timer and error interrupts */ 973 cr &= (~XAXIDMA_IRQ_ALL_MASK); 974 /* Write to the Tx channel control register */ 975 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 976 977 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 978 /* Disable coalesce, delay timer and error interrupts */ 979 cr &= (~XAXIDMA_IRQ_ALL_MASK); 980 /* Write to the Rx channel control register */ 981 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 982 983 schedule_work(&lp->dma_err_task); 984 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 985 } 986out: 987 return IRQ_HANDLED; 988} 989 990/** 991 * axienet_rx_irq - Rx Isr. 992 * @irq: irq number 993 * @_ndev: net_device pointer 994 * 995 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 996 * 997 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 998 * processing. 999 */ 1000static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1001{ 1002 u32 cr; 1003 unsigned int status; 1004 struct net_device *ndev = _ndev; 1005 struct axienet_local *lp = netdev_priv(ndev); 1006 1007 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1008 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 1009 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1010 axienet_recv(lp->ndev); 1011 goto out; 1012 } 1013 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1014 return IRQ_NONE; 1015 if (status & XAXIDMA_IRQ_ERROR_MASK) { 1016 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 1017 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 1018 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1019 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1020 1021 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1022 /* Disable coalesce, delay timer and error interrupts */ 1023 cr &= (~XAXIDMA_IRQ_ALL_MASK); 1024 /* Finally write to the Tx channel control register */ 1025 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1026 1027 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1028 /* Disable coalesce, delay timer and error interrupts */ 1029 cr &= (~XAXIDMA_IRQ_ALL_MASK); 1030 /* write to the Rx channel control register */ 1031 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1032 1033 schedule_work(&lp->dma_err_task); 1034 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1035 } 1036out: 1037 return IRQ_HANDLED; 1038} 1039 1040/** 1041 * axienet_eth_irq - Ethernet core Isr. 1042 * @irq: irq number 1043 * @_ndev: net_device pointer 1044 * 1045 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1046 * 1047 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1048 */ 1049static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1050{ 1051 struct net_device *ndev = _ndev; 1052 struct axienet_local *lp = netdev_priv(ndev); 1053 unsigned int pending; 1054 1055 pending = axienet_ior(lp, XAE_IP_OFFSET); 1056 if (!pending) 1057 return IRQ_NONE; 1058 1059 if (pending & XAE_INT_RXFIFOOVR_MASK) 1060 ndev->stats.rx_missed_errors++; 1061 1062 if (pending & XAE_INT_RXRJECT_MASK) 1063 ndev->stats.rx_frame_errors++; 1064 1065 axienet_iow(lp, XAE_IS_OFFSET, pending); 1066 return IRQ_HANDLED; 1067} 1068 1069static void axienet_dma_err_handler(struct work_struct *work); 1070 1071/** 1072 * axienet_open - Driver open routine. 1073 * @ndev: Pointer to net_device structure 1074 * 1075 * Return: 0, on success. 1076 * non-zero error value on failure 1077 * 1078 * This is the driver open routine. It calls phylink_start to start the 1079 * PHY device. 1080 * It also allocates interrupt service routines, enables the interrupt lines 1081 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1082 * descriptors are initialized. 1083 */ 1084static int axienet_open(struct net_device *ndev) 1085{ 1086 int ret; 1087 struct axienet_local *lp = netdev_priv(ndev); 1088 1089 dev_dbg(&ndev->dev, "axienet_open()\n"); 1090 1091 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1092 * When we do an Axi Ethernet reset, it resets the complete core 1093 * including the MDIO. MDIO must be disabled before resetting 1094 * and re-enabled afterwards. 1095 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1096 */ 1097 mutex_lock(&lp->mii_bus->mdio_lock); 1098 axienet_mdio_disable(lp); 1099 ret = axienet_device_reset(ndev); 1100 if (ret == 0) 1101 ret = axienet_mdio_enable(lp); 1102 mutex_unlock(&lp->mii_bus->mdio_lock); 1103 if (ret < 0) 1104 return ret; 1105 1106 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1107 if (ret) { 1108 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1109 return ret; 1110 } 1111 1112 phylink_start(lp->phylink); 1113 1114 /* Enable worker thread for Axi DMA error handling */ 1115 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1116 1117 /* Enable interrupts for Axi DMA Tx */ 1118 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1119 ndev->name, ndev); 1120 if (ret) 1121 goto err_tx_irq; 1122 /* Enable interrupts for Axi DMA Rx */ 1123 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1124 ndev->name, ndev); 1125 if (ret) 1126 goto err_rx_irq; 1127 /* Enable interrupts for Axi Ethernet core (if defined) */ 1128 if (lp->eth_irq > 0) { 1129 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1130 ndev->name, ndev); 1131 if (ret) 1132 goto err_eth_irq; 1133 } 1134 1135 return 0; 1136 1137err_eth_irq: 1138 free_irq(lp->rx_irq, ndev); 1139err_rx_irq: 1140 free_irq(lp->tx_irq, ndev); 1141err_tx_irq: 1142 phylink_stop(lp->phylink); 1143 phylink_disconnect_phy(lp->phylink); 1144 cancel_work_sync(&lp->dma_err_task); 1145 dev_err(lp->dev, "request_irq() failed\n"); 1146 return ret; 1147} 1148 1149/** 1150 * axienet_stop - Driver stop routine. 1151 * @ndev: Pointer to net_device structure 1152 * 1153 * Return: 0, on success. 1154 * 1155 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1156 * device. It also removes the interrupt handlers and disables the interrupts. 1157 * The Axi DMA Tx/Rx BDs are released. 1158 */ 1159static int axienet_stop(struct net_device *ndev) 1160{ 1161 u32 cr, sr; 1162 int count; 1163 struct axienet_local *lp = netdev_priv(ndev); 1164 1165 dev_dbg(&ndev->dev, "axienet_close()\n"); 1166 1167 phylink_stop(lp->phylink); 1168 phylink_disconnect_phy(lp->phylink); 1169 1170 axienet_setoptions(ndev, lp->options & 1171 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1172 1173 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1174 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1175 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1176 1177 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1178 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1179 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1180 1181 axienet_iow(lp, XAE_IE_OFFSET, 0); 1182 1183 /* Give DMAs a chance to halt gracefully */ 1184 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1185 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1186 msleep(20); 1187 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1188 } 1189 1190 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1191 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1192 msleep(20); 1193 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1194 } 1195 1196 /* Do a reset to ensure DMA is really stopped */ 1197 mutex_lock(&lp->mii_bus->mdio_lock); 1198 axienet_mdio_disable(lp); 1199 __axienet_device_reset(lp); 1200 axienet_mdio_enable(lp); 1201 mutex_unlock(&lp->mii_bus->mdio_lock); 1202 1203 cancel_work_sync(&lp->dma_err_task); 1204 1205 if (lp->eth_irq > 0) 1206 free_irq(lp->eth_irq, ndev); 1207 free_irq(lp->tx_irq, ndev); 1208 free_irq(lp->rx_irq, ndev); 1209 1210 axienet_dma_bd_release(ndev); 1211 return 0; 1212} 1213 1214/** 1215 * axienet_change_mtu - Driver change mtu routine. 1216 * @ndev: Pointer to net_device structure 1217 * @new_mtu: New mtu value to be applied 1218 * 1219 * Return: Always returns 0 (success). 1220 * 1221 * This is the change mtu driver routine. It checks if the Axi Ethernet 1222 * hardware supports jumbo frames before changing the mtu. This can be 1223 * called only when the device is not up. 1224 */ 1225static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1226{ 1227 struct axienet_local *lp = netdev_priv(ndev); 1228 1229 if (netif_running(ndev)) 1230 return -EBUSY; 1231 1232 if ((new_mtu + VLAN_ETH_HLEN + 1233 XAE_TRL_SIZE) > lp->rxmem) 1234 return -EINVAL; 1235 1236 ndev->mtu = new_mtu; 1237 1238 return 0; 1239} 1240 1241#ifdef CONFIG_NET_POLL_CONTROLLER 1242/** 1243 * axienet_poll_controller - Axi Ethernet poll mechanism. 1244 * @ndev: Pointer to net_device structure 1245 * 1246 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1247 * to polling the ISRs and are enabled back after the polling is done. 1248 */ 1249static void axienet_poll_controller(struct net_device *ndev) 1250{ 1251 struct axienet_local *lp = netdev_priv(ndev); 1252 disable_irq(lp->tx_irq); 1253 disable_irq(lp->rx_irq); 1254 axienet_rx_irq(lp->tx_irq, ndev); 1255 axienet_tx_irq(lp->rx_irq, ndev); 1256 enable_irq(lp->tx_irq); 1257 enable_irq(lp->rx_irq); 1258} 1259#endif 1260 1261static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1262{ 1263 struct axienet_local *lp = netdev_priv(dev); 1264 1265 if (!netif_running(dev)) 1266 return -EINVAL; 1267 1268 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1269} 1270 1271static const struct net_device_ops axienet_netdev_ops = { 1272 .ndo_open = axienet_open, 1273 .ndo_stop = axienet_stop, 1274 .ndo_start_xmit = axienet_start_xmit, 1275 .ndo_change_mtu = axienet_change_mtu, 1276 .ndo_set_mac_address = netdev_set_mac_address, 1277 .ndo_validate_addr = eth_validate_addr, 1278 .ndo_do_ioctl = axienet_ioctl, 1279 .ndo_set_rx_mode = axienet_set_multicast_list, 1280#ifdef CONFIG_NET_POLL_CONTROLLER 1281 .ndo_poll_controller = axienet_poll_controller, 1282#endif 1283}; 1284 1285/** 1286 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1287 * @ndev: Pointer to net_device structure 1288 * @ed: Pointer to ethtool_drvinfo structure 1289 * 1290 * This implements ethtool command for getting the driver information. 1291 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1292 */ 1293static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1294 struct ethtool_drvinfo *ed) 1295{ 1296 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1297 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1298} 1299 1300/** 1301 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1302 * AxiEthernet core. 1303 * @ndev: Pointer to net_device structure 1304 * 1305 * This implements ethtool command for getting the total register length 1306 * information. 1307 * 1308 * Return: the total regs length 1309 */ 1310static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1311{ 1312 return sizeof(u32) * AXIENET_REGS_N; 1313} 1314 1315/** 1316 * axienet_ethtools_get_regs - Dump the contents of all registers present 1317 * in AxiEthernet core. 1318 * @ndev: Pointer to net_device structure 1319 * @regs: Pointer to ethtool_regs structure 1320 * @ret: Void pointer used to return the contents of the registers. 1321 * 1322 * This implements ethtool command for getting the Axi Ethernet register dump. 1323 * Issue "ethtool -d ethX" to execute this function. 1324 */ 1325static void axienet_ethtools_get_regs(struct net_device *ndev, 1326 struct ethtool_regs *regs, void *ret) 1327{ 1328 u32 *data = (u32 *) ret; 1329 size_t len = sizeof(u32) * AXIENET_REGS_N; 1330 struct axienet_local *lp = netdev_priv(ndev); 1331 1332 regs->version = 0; 1333 regs->len = len; 1334 1335 memset(data, 0, len); 1336 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1337 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1338 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1339 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1340 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1341 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1342 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1343 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1344 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1345 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1346 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1347 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1348 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1349 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1350 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1351 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1352 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1353 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1354 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1355 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1356 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1357 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1358 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1359 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1360 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1361 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1362 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1363 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1364 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1365 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1366 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1367 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1368 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1369 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1370 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1371 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1372} 1373 1374static void axienet_ethtools_get_ringparam(struct net_device *ndev, 1375 struct ethtool_ringparam *ering) 1376{ 1377 struct axienet_local *lp = netdev_priv(ndev); 1378 1379 ering->rx_max_pending = RX_BD_NUM_MAX; 1380 ering->rx_mini_max_pending = 0; 1381 ering->rx_jumbo_max_pending = 0; 1382 ering->tx_max_pending = TX_BD_NUM_MAX; 1383 ering->rx_pending = lp->rx_bd_num; 1384 ering->rx_mini_pending = 0; 1385 ering->rx_jumbo_pending = 0; 1386 ering->tx_pending = lp->tx_bd_num; 1387} 1388 1389static int axienet_ethtools_set_ringparam(struct net_device *ndev, 1390 struct ethtool_ringparam *ering) 1391{ 1392 struct axienet_local *lp = netdev_priv(ndev); 1393 1394 if (ering->rx_pending > RX_BD_NUM_MAX || 1395 ering->rx_mini_pending || 1396 ering->rx_jumbo_pending || 1397 ering->tx_pending < TX_BD_NUM_MIN || 1398 ering->tx_pending > TX_BD_NUM_MAX) 1399 return -EINVAL; 1400 1401 if (netif_running(ndev)) 1402 return -EBUSY; 1403 1404 lp->rx_bd_num = ering->rx_pending; 1405 lp->tx_bd_num = ering->tx_pending; 1406 return 0; 1407} 1408 1409/** 1410 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1411 * Tx and Rx paths. 1412 * @ndev: Pointer to net_device structure 1413 * @epauseparm: Pointer to ethtool_pauseparam structure. 1414 * 1415 * This implements ethtool command for getting axi ethernet pause frame 1416 * setting. Issue "ethtool -a ethX" to execute this function. 1417 */ 1418static void 1419axienet_ethtools_get_pauseparam(struct net_device *ndev, 1420 struct ethtool_pauseparam *epauseparm) 1421{ 1422 struct axienet_local *lp = netdev_priv(ndev); 1423 1424 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1425} 1426 1427/** 1428 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1429 * settings. 1430 * @ndev: Pointer to net_device structure 1431 * @epauseparm:Pointer to ethtool_pauseparam structure 1432 * 1433 * This implements ethtool command for enabling flow control on Rx and Tx 1434 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1435 * function. 1436 * 1437 * Return: 0 on success, -EFAULT if device is running 1438 */ 1439static int 1440axienet_ethtools_set_pauseparam(struct net_device *ndev, 1441 struct ethtool_pauseparam *epauseparm) 1442{ 1443 struct axienet_local *lp = netdev_priv(ndev); 1444 1445 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1446} 1447 1448/** 1449 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1450 * @ndev: Pointer to net_device structure 1451 * @ecoalesce: Pointer to ethtool_coalesce structure 1452 * 1453 * This implements ethtool command for getting the DMA interrupt coalescing 1454 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1455 * execute this function. 1456 * 1457 * Return: 0 always 1458 */ 1459static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1460 struct ethtool_coalesce *ecoalesce) 1461{ 1462 u32 regval = 0; 1463 struct axienet_local *lp = netdev_priv(ndev); 1464 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1465 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1466 >> XAXIDMA_COALESCE_SHIFT; 1467 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1468 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1469 >> XAXIDMA_COALESCE_SHIFT; 1470 return 0; 1471} 1472 1473/** 1474 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1475 * @ndev: Pointer to net_device structure 1476 * @ecoalesce: Pointer to ethtool_coalesce structure 1477 * 1478 * This implements ethtool command for setting the DMA interrupt coalescing 1479 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1480 * prompt to execute this function. 1481 * 1482 * Return: 0, on success, Non-zero error value on failure. 1483 */ 1484static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1485 struct ethtool_coalesce *ecoalesce) 1486{ 1487 struct axienet_local *lp = netdev_priv(ndev); 1488 1489 if (netif_running(ndev)) { 1490 netdev_err(ndev, 1491 "Please stop netif before applying configuration\n"); 1492 return -EFAULT; 1493 } 1494 1495 if (ecoalesce->rx_max_coalesced_frames) 1496 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1497 if (ecoalesce->tx_max_coalesced_frames) 1498 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1499 1500 return 0; 1501} 1502 1503static int 1504axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1505 struct ethtool_link_ksettings *cmd) 1506{ 1507 struct axienet_local *lp = netdev_priv(ndev); 1508 1509 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1510} 1511 1512static int 1513axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1514 const struct ethtool_link_ksettings *cmd) 1515{ 1516 struct axienet_local *lp = netdev_priv(ndev); 1517 1518 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1519} 1520 1521static const struct ethtool_ops axienet_ethtool_ops = { 1522 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, 1523 .get_drvinfo = axienet_ethtools_get_drvinfo, 1524 .get_regs_len = axienet_ethtools_get_regs_len, 1525 .get_regs = axienet_ethtools_get_regs, 1526 .get_link = ethtool_op_get_link, 1527 .get_ringparam = axienet_ethtools_get_ringparam, 1528 .set_ringparam = axienet_ethtools_set_ringparam, 1529 .get_pauseparam = axienet_ethtools_get_pauseparam, 1530 .set_pauseparam = axienet_ethtools_set_pauseparam, 1531 .get_coalesce = axienet_ethtools_get_coalesce, 1532 .set_coalesce = axienet_ethtools_set_coalesce, 1533 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1534 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1535}; 1536 1537static void axienet_validate(struct phylink_config *config, 1538 unsigned long *supported, 1539 struct phylink_link_state *state) 1540{ 1541 struct net_device *ndev = to_net_dev(config->dev); 1542 struct axienet_local *lp = netdev_priv(ndev); 1543 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1544 1545 /* Only support the mode we are configured for */ 1546 if (state->interface != PHY_INTERFACE_MODE_NA && 1547 state->interface != lp->phy_mode) { 1548 netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", 1549 phy_modes(state->interface), 1550 phy_modes(lp->phy_mode)); 1551 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1552 return; 1553 } 1554 1555 phylink_set(mask, Autoneg); 1556 phylink_set_port_modes(mask); 1557 1558 phylink_set(mask, Asym_Pause); 1559 phylink_set(mask, Pause); 1560 1561 switch (state->interface) { 1562 case PHY_INTERFACE_MODE_NA: 1563 case PHY_INTERFACE_MODE_1000BASEX: 1564 case PHY_INTERFACE_MODE_SGMII: 1565 case PHY_INTERFACE_MODE_GMII: 1566 case PHY_INTERFACE_MODE_RGMII: 1567 case PHY_INTERFACE_MODE_RGMII_ID: 1568 case PHY_INTERFACE_MODE_RGMII_RXID: 1569 case PHY_INTERFACE_MODE_RGMII_TXID: 1570 phylink_set(mask, 1000baseX_Full); 1571 phylink_set(mask, 1000baseT_Full); 1572 if (state->interface == PHY_INTERFACE_MODE_1000BASEX) 1573 break; 1574 fallthrough; 1575 case PHY_INTERFACE_MODE_MII: 1576 phylink_set(mask, 100baseT_Full); 1577 phylink_set(mask, 10baseT_Full); 1578 default: 1579 break; 1580 } 1581 1582 bitmap_and(supported, supported, mask, 1583 __ETHTOOL_LINK_MODE_MASK_NBITS); 1584 bitmap_and(state->advertising, state->advertising, mask, 1585 __ETHTOOL_LINK_MODE_MASK_NBITS); 1586} 1587 1588static void axienet_mac_pcs_get_state(struct phylink_config *config, 1589 struct phylink_link_state *state) 1590{ 1591 struct net_device *ndev = to_net_dev(config->dev); 1592 struct axienet_local *lp = netdev_priv(ndev); 1593 1594 switch (state->interface) { 1595 case PHY_INTERFACE_MODE_SGMII: 1596 case PHY_INTERFACE_MODE_1000BASEX: 1597 phylink_mii_c22_pcs_get_state(lp->pcs_phy, state); 1598 break; 1599 default: 1600 break; 1601 } 1602} 1603 1604static void axienet_mac_an_restart(struct phylink_config *config) 1605{ 1606 struct net_device *ndev = to_net_dev(config->dev); 1607 struct axienet_local *lp = netdev_priv(ndev); 1608 1609 phylink_mii_c22_pcs_an_restart(lp->pcs_phy); 1610} 1611 1612static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1613 const struct phylink_link_state *state) 1614{ 1615 struct net_device *ndev = to_net_dev(config->dev); 1616 struct axienet_local *lp = netdev_priv(ndev); 1617 int ret; 1618 1619 switch (state->interface) { 1620 case PHY_INTERFACE_MODE_SGMII: 1621 case PHY_INTERFACE_MODE_1000BASEX: 1622 ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode, 1623 state->interface, 1624 state->advertising); 1625 if (ret < 0) 1626 netdev_warn(ndev, "Failed to configure PCS: %d\n", 1627 ret); 1628 break; 1629 1630 default: 1631 break; 1632 } 1633} 1634 1635static void axienet_mac_link_down(struct phylink_config *config, 1636 unsigned int mode, 1637 phy_interface_t interface) 1638{ 1639 /* nothing meaningful to do */ 1640} 1641 1642static void axienet_mac_link_up(struct phylink_config *config, 1643 struct phy_device *phy, 1644 unsigned int mode, phy_interface_t interface, 1645 int speed, int duplex, 1646 bool tx_pause, bool rx_pause) 1647{ 1648 struct net_device *ndev = to_net_dev(config->dev); 1649 struct axienet_local *lp = netdev_priv(ndev); 1650 u32 emmc_reg, fcc_reg; 1651 1652 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1653 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1654 1655 switch (speed) { 1656 case SPEED_1000: 1657 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1658 break; 1659 case SPEED_100: 1660 emmc_reg |= XAE_EMMC_LINKSPD_100; 1661 break; 1662 case SPEED_10: 1663 emmc_reg |= XAE_EMMC_LINKSPD_10; 1664 break; 1665 default: 1666 dev_err(&ndev->dev, 1667 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1668 break; 1669 } 1670 1671 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1672 1673 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1674 if (tx_pause) 1675 fcc_reg |= XAE_FCC_FCTX_MASK; 1676 else 1677 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1678 if (rx_pause) 1679 fcc_reg |= XAE_FCC_FCRX_MASK; 1680 else 1681 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1682 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1683} 1684 1685static const struct phylink_mac_ops axienet_phylink_ops = { 1686 .validate = axienet_validate, 1687 .mac_pcs_get_state = axienet_mac_pcs_get_state, 1688 .mac_an_restart = axienet_mac_an_restart, 1689 .mac_config = axienet_mac_config, 1690 .mac_link_down = axienet_mac_link_down, 1691 .mac_link_up = axienet_mac_link_up, 1692}; 1693 1694/** 1695 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1696 * @work: pointer to work_struct 1697 * 1698 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1699 * Tx/Rx BDs. 1700 */ 1701static void axienet_dma_err_handler(struct work_struct *work) 1702{ 1703 u32 axienet_status; 1704 u32 cr, i; 1705 struct axienet_local *lp = container_of(work, struct axienet_local, 1706 dma_err_task); 1707 struct net_device *ndev = lp->ndev; 1708 struct axidma_bd *cur_p; 1709 1710 axienet_setoptions(ndev, lp->options & 1711 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1712 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1713 * When we do an Axi Ethernet reset, it resets the complete core 1714 * including the MDIO. MDIO must be disabled before resetting 1715 * and re-enabled afterwards. 1716 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1717 */ 1718 mutex_lock(&lp->mii_bus->mdio_lock); 1719 axienet_mdio_disable(lp); 1720 __axienet_device_reset(lp); 1721 axienet_mdio_enable(lp); 1722 mutex_unlock(&lp->mii_bus->mdio_lock); 1723 1724 for (i = 0; i < lp->tx_bd_num; i++) { 1725 cur_p = &lp->tx_bd_v[i]; 1726 if (cur_p->cntrl) { 1727 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1728 1729 dma_unmap_single(ndev->dev.parent, addr, 1730 (cur_p->cntrl & 1731 XAXIDMA_BD_CTRL_LENGTH_MASK), 1732 DMA_TO_DEVICE); 1733 } 1734 if (cur_p->skb) 1735 dev_kfree_skb_irq(cur_p->skb); 1736 cur_p->phys = 0; 1737 cur_p->phys_msb = 0; 1738 cur_p->cntrl = 0; 1739 cur_p->status = 0; 1740 cur_p->app0 = 0; 1741 cur_p->app1 = 0; 1742 cur_p->app2 = 0; 1743 cur_p->app3 = 0; 1744 cur_p->app4 = 0; 1745 cur_p->skb = NULL; 1746 } 1747 1748 for (i = 0; i < lp->rx_bd_num; i++) { 1749 cur_p = &lp->rx_bd_v[i]; 1750 cur_p->status = 0; 1751 cur_p->app0 = 0; 1752 cur_p->app1 = 0; 1753 cur_p->app2 = 0; 1754 cur_p->app3 = 0; 1755 cur_p->app4 = 0; 1756 } 1757 1758 lp->tx_bd_ci = 0; 1759 lp->tx_bd_tail = 0; 1760 lp->rx_bd_ci = 0; 1761 1762 /* Start updating the Rx channel control register */ 1763 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1764 /* Update the interrupt coalesce count */ 1765 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1766 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1767 /* Update the delay timer count */ 1768 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1769 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1770 /* Enable coalesce, delay timer and error interrupts */ 1771 cr |= XAXIDMA_IRQ_ALL_MASK; 1772 /* Finally write to the Rx channel control register */ 1773 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1774 1775 /* Start updating the Tx channel control register */ 1776 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1777 /* Update the interrupt coalesce count */ 1778 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1779 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1780 /* Update the delay timer count */ 1781 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1782 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1783 /* Enable coalesce, delay timer and error interrupts */ 1784 cr |= XAXIDMA_IRQ_ALL_MASK; 1785 /* Finally write to the Tx channel control register */ 1786 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1787 1788 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1789 * halted state. This will make the Rx side ready for reception. 1790 */ 1791 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1792 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1793 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1794 cr | XAXIDMA_CR_RUNSTOP_MASK); 1795 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1796 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 1797 1798 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1799 * Tx channel is now ready to run. But only after we write to the 1800 * tail pointer register that the Tx channel will start transmitting 1801 */ 1802 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1803 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1804 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1805 cr | XAXIDMA_CR_RUNSTOP_MASK); 1806 1807 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1808 axienet_status &= ~XAE_RCW1_RX_MASK; 1809 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1810 1811 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1812 if (axienet_status & XAE_INT_RXRJECT_MASK) 1813 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1814 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1815 XAE_INT_RECV_ERROR_MASK : 0); 1816 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1817 1818 /* Sync default options with HW but leave receiver and 1819 * transmitter disabled. 1820 */ 1821 axienet_setoptions(ndev, lp->options & 1822 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1823 axienet_set_mac_address(ndev, NULL); 1824 axienet_set_multicast_list(ndev); 1825 axienet_setoptions(ndev, lp->options); 1826} 1827 1828/** 1829 * axienet_probe - Axi Ethernet probe function. 1830 * @pdev: Pointer to platform device structure. 1831 * 1832 * Return: 0, on success 1833 * Non-zero error value on failure. 1834 * 1835 * This is the probe routine for Axi Ethernet driver. This is called before 1836 * any other driver routines are invoked. It allocates and sets up the Ethernet 1837 * device. Parses through device tree and populates fields of 1838 * axienet_local. It registers the Ethernet device. 1839 */ 1840static int axienet_probe(struct platform_device *pdev) 1841{ 1842 int ret; 1843 struct device_node *np; 1844 struct axienet_local *lp; 1845 struct net_device *ndev; 1846 const void *mac_addr; 1847 struct resource *ethres; 1848 int addr_width = 32; 1849 u32 value; 1850 1851 ndev = alloc_etherdev(sizeof(*lp)); 1852 if (!ndev) 1853 return -ENOMEM; 1854 1855 platform_set_drvdata(pdev, ndev); 1856 1857 SET_NETDEV_DEV(ndev, &pdev->dev); 1858 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1859 ndev->features = NETIF_F_SG; 1860 ndev->netdev_ops = &axienet_netdev_ops; 1861 ndev->ethtool_ops = &axienet_ethtool_ops; 1862 1863 /* MTU range: 64 - 9000 */ 1864 ndev->min_mtu = 64; 1865 ndev->max_mtu = XAE_JUMBO_MTU; 1866 1867 lp = netdev_priv(ndev); 1868 lp->ndev = ndev; 1869 lp->dev = &pdev->dev; 1870 lp->options = XAE_OPTION_DEFAULTS; 1871 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1872 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1873 1874 lp->clk = devm_clk_get_optional(&pdev->dev, NULL); 1875 if (IS_ERR(lp->clk)) { 1876 ret = PTR_ERR(lp->clk); 1877 goto free_netdev; 1878 } 1879 ret = clk_prepare_enable(lp->clk); 1880 if (ret) { 1881 dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret); 1882 goto free_netdev; 1883 } 1884 1885 /* Map device registers */ 1886 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1887 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1888 if (IS_ERR(lp->regs)) { 1889 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1890 ret = PTR_ERR(lp->regs); 1891 goto cleanup_clk; 1892 } 1893 lp->regs_start = ethres->start; 1894 1895 /* Setup checksum offload, but default to off if not specified */ 1896 lp->features = 0; 1897 1898 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1899 if (!ret) { 1900 switch (value) { 1901 case 1: 1902 lp->csum_offload_on_tx_path = 1903 XAE_FEATURE_PARTIAL_TX_CSUM; 1904 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1905 /* Can checksum TCP/UDP over IPv4. */ 1906 ndev->features |= NETIF_F_IP_CSUM; 1907 break; 1908 case 2: 1909 lp->csum_offload_on_tx_path = 1910 XAE_FEATURE_FULL_TX_CSUM; 1911 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1912 /* Can checksum TCP/UDP over IPv4. */ 1913 ndev->features |= NETIF_F_IP_CSUM; 1914 break; 1915 default: 1916 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1917 } 1918 } 1919 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1920 if (!ret) { 1921 switch (value) { 1922 case 1: 1923 lp->csum_offload_on_rx_path = 1924 XAE_FEATURE_PARTIAL_RX_CSUM; 1925 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1926 break; 1927 case 2: 1928 lp->csum_offload_on_rx_path = 1929 XAE_FEATURE_FULL_RX_CSUM; 1930 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1931 break; 1932 default: 1933 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1934 } 1935 } 1936 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1937 * a larger Rx/Tx Memory. Typically, the size must be large so that 1938 * we can enable jumbo option and start supporting jumbo frames. 1939 * Here we check for memory allocated for Rx/Tx in the hardware from 1940 * the device-tree and accordingly set flags. 1941 */ 1942 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1943 1944 /* Start with the proprietary, and broken phy_type */ 1945 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1946 if (!ret) { 1947 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1948 switch (value) { 1949 case XAE_PHY_TYPE_MII: 1950 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1951 break; 1952 case XAE_PHY_TYPE_GMII: 1953 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1954 break; 1955 case XAE_PHY_TYPE_RGMII_2_0: 1956 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1957 break; 1958 case XAE_PHY_TYPE_SGMII: 1959 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1960 break; 1961 case XAE_PHY_TYPE_1000BASE_X: 1962 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1963 break; 1964 default: 1965 ret = -EINVAL; 1966 goto cleanup_clk; 1967 } 1968 } else { 1969 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1970 if (ret) 1971 goto cleanup_clk; 1972 } 1973 1974 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1975 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1976 if (np) { 1977 struct resource dmares; 1978 1979 ret = of_address_to_resource(np, 0, &dmares); 1980 if (ret) { 1981 dev_err(&pdev->dev, 1982 "unable to get DMA resource\n"); 1983 of_node_put(np); 1984 goto cleanup_clk; 1985 } 1986 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 1987 &dmares); 1988 lp->rx_irq = irq_of_parse_and_map(np, 1); 1989 lp->tx_irq = irq_of_parse_and_map(np, 0); 1990 of_node_put(np); 1991 lp->eth_irq = platform_get_irq_optional(pdev, 0); 1992 } else { 1993 /* Check for these resources directly on the Ethernet node. */ 1994 struct resource *res = platform_get_resource(pdev, 1995 IORESOURCE_MEM, 1); 1996 lp->dma_regs = devm_ioremap_resource(&pdev->dev, res); 1997 lp->rx_irq = platform_get_irq(pdev, 1); 1998 lp->tx_irq = platform_get_irq(pdev, 0); 1999 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2000 } 2001 if (IS_ERR(lp->dma_regs)) { 2002 dev_err(&pdev->dev, "could not map DMA regs\n"); 2003 ret = PTR_ERR(lp->dma_regs); 2004 goto cleanup_clk; 2005 } 2006 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2007 dev_err(&pdev->dev, "could not determine irqs\n"); 2008 ret = -ENOMEM; 2009 goto cleanup_clk; 2010 } 2011 2012 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2013 ret = __axienet_device_reset(lp); 2014 if (ret) 2015 goto cleanup_clk; 2016 2017 /* Autodetect the need for 64-bit DMA pointers. 2018 * When the IP is configured for a bus width bigger than 32 bits, 2019 * writing the MSB registers is mandatory, even if they are all 0. 2020 * We can detect this case by writing all 1's to one such register 2021 * and see if that sticks: when the IP is configured for 32 bits 2022 * only, those registers are RES0. 2023 * Those MSB registers were introduced in IP v7.1, which we check first. 2024 */ 2025 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2026 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2027 2028 iowrite32(0x0, desc); 2029 if (ioread32(desc) == 0) { /* sanity check */ 2030 iowrite32(0xffffffff, desc); 2031 if (ioread32(desc) > 0) { 2032 lp->features |= XAE_FEATURE_DMA_64BIT; 2033 addr_width = 64; 2034 dev_info(&pdev->dev, 2035 "autodetected 64-bit DMA range\n"); 2036 } 2037 iowrite32(0x0, desc); 2038 } 2039 } 2040 2041 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2042 if (ret) { 2043 dev_err(&pdev->dev, "No suitable DMA available\n"); 2044 goto cleanup_clk; 2045 } 2046 2047 /* Check for Ethernet core IRQ (optional) */ 2048 if (lp->eth_irq <= 0) 2049 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2050 2051 /* Retrieve the MAC address */ 2052 mac_addr = of_get_mac_address(pdev->dev.of_node); 2053 if (IS_ERR(mac_addr)) { 2054 dev_warn(&pdev->dev, "could not find MAC address property: %ld\n", 2055 PTR_ERR(mac_addr)); 2056 mac_addr = NULL; 2057 } 2058 axienet_set_mac_address(ndev, mac_addr); 2059 2060 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2061 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2062 2063 ret = axienet_mdio_setup(lp); 2064 if (ret) 2065 dev_warn(&pdev->dev, 2066 "error registering MDIO bus: %d\n", ret); 2067 2068 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2069 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2070 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2071 if (!lp->phy_node) { 2072 dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); 2073 ret = -EINVAL; 2074 goto cleanup_mdio; 2075 } 2076 lp->pcs_phy = of_mdio_find_device(lp->phy_node); 2077 if (!lp->pcs_phy) { 2078 ret = -EPROBE_DEFER; 2079 goto cleanup_mdio; 2080 } 2081 lp->phylink_config.pcs_poll = true; 2082 } 2083 2084 lp->phylink_config.dev = &ndev->dev; 2085 lp->phylink_config.type = PHYLINK_NETDEV; 2086 2087 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2088 lp->phy_mode, 2089 &axienet_phylink_ops); 2090 if (IS_ERR(lp->phylink)) { 2091 ret = PTR_ERR(lp->phylink); 2092 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2093 goto cleanup_mdio; 2094 } 2095 2096 ret = register_netdev(lp->ndev); 2097 if (ret) { 2098 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2099 goto cleanup_phylink; 2100 } 2101 2102 return 0; 2103 2104cleanup_phylink: 2105 phylink_destroy(lp->phylink); 2106 2107cleanup_mdio: 2108 if (lp->pcs_phy) 2109 put_device(&lp->pcs_phy->dev); 2110 if (lp->mii_bus) 2111 axienet_mdio_teardown(lp); 2112 of_node_put(lp->phy_node); 2113 2114cleanup_clk: 2115 clk_disable_unprepare(lp->clk); 2116 2117free_netdev: 2118 free_netdev(ndev); 2119 2120 return ret; 2121} 2122 2123static int axienet_remove(struct platform_device *pdev) 2124{ 2125 struct net_device *ndev = platform_get_drvdata(pdev); 2126 struct axienet_local *lp = netdev_priv(ndev); 2127 2128 unregister_netdev(ndev); 2129 2130 if (lp->phylink) 2131 phylink_destroy(lp->phylink); 2132 2133 if (lp->pcs_phy) 2134 put_device(&lp->pcs_phy->dev); 2135 2136 axienet_mdio_teardown(lp); 2137 2138 clk_disable_unprepare(lp->clk); 2139 2140 of_node_put(lp->phy_node); 2141 lp->phy_node = NULL; 2142 2143 free_netdev(ndev); 2144 2145 return 0; 2146} 2147 2148static void axienet_shutdown(struct platform_device *pdev) 2149{ 2150 struct net_device *ndev = platform_get_drvdata(pdev); 2151 2152 rtnl_lock(); 2153 netif_device_detach(ndev); 2154 2155 if (netif_running(ndev)) 2156 dev_close(ndev); 2157 2158 rtnl_unlock(); 2159} 2160 2161static struct platform_driver axienet_driver = { 2162 .probe = axienet_probe, 2163 .remove = axienet_remove, 2164 .shutdown = axienet_shutdown, 2165 .driver = { 2166 .name = "xilinx_axienet", 2167 .of_match_table = axienet_of_match, 2168 }, 2169}; 2170 2171module_platform_driver(axienet_driver); 2172 2173MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2174MODULE_AUTHOR("Xilinx"); 2175MODULE_LICENSE("GPL"); 2176