1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2015 Mans Rullgard <mans@mansr.com> 4 * 5 * Mostly rewritten, based on driver from Sigma Designs. Original 6 * copyright notice below. 7 * 8 * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac. 9 * 10 * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr> 11 */ 12 13#include <linux/module.h> 14#include <linux/etherdevice.h> 15#include <linux/delay.h> 16#include <linux/ethtool.h> 17#include <linux/interrupt.h> 18#include <linux/platform_device.h> 19#include <linux/of_device.h> 20#include <linux/of_mdio.h> 21#include <linux/of_net.h> 22#include <linux/dma-mapping.h> 23#include <linux/phy.h> 24#include <linux/cache.h> 25#include <linux/jiffies.h> 26#include <linux/io.h> 27#include <linux/iopoll.h> 28#include <asm/barrier.h> 29 30#include "nb8800.h" 31 32static void nb8800_tx_done(struct net_device *dev); 33static int nb8800_dma_stop(struct net_device *dev); 34 35static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg) 36{ 37 return readb_relaxed(priv->base + reg); 38} 39 40static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg) 41{ 42 return readl_relaxed(priv->base + reg); 43} 44 45static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val) 46{ 47 writeb_relaxed(val, priv->base + reg); 48} 49 50static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val) 51{ 52 writew_relaxed(val, priv->base + reg); 53} 54 55static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val) 56{ 57 writel_relaxed(val, priv->base + reg); 58} 59 60static inline void nb8800_maskb(struct nb8800_priv *priv, int reg, 61 u32 mask, u32 val) 62{ 63 u32 old = nb8800_readb(priv, reg); 64 u32 new = (old & ~mask) | (val & mask); 65 66 if (new != old) 67 nb8800_writeb(priv, reg, new); 68} 69 70static inline void nb8800_maskl(struct nb8800_priv *priv, int reg, 71 u32 mask, u32 val) 72{ 73 u32 old = nb8800_readl(priv, reg); 74 u32 new = (old & ~mask) | (val & mask); 75 76 if (new != old) 77 nb8800_writel(priv, reg, new); 78} 79 80static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits, 81 bool set) 82{ 83 nb8800_maskb(priv, reg, bits, set ? bits : 0); 84} 85 86static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits) 87{ 88 nb8800_maskb(priv, reg, bits, bits); 89} 90 91static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits) 92{ 93 nb8800_maskb(priv, reg, bits, 0); 94} 95 96static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits, 97 bool set) 98{ 99 nb8800_maskl(priv, reg, bits, set ? bits : 0); 100} 101 102static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits) 103{ 104 nb8800_maskl(priv, reg, bits, bits); 105} 106 107static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits) 108{ 109 nb8800_maskl(priv, reg, bits, 0); 110} 111 112static int nb8800_mdio_wait(struct mii_bus *bus) 113{ 114 struct nb8800_priv *priv = bus->priv; 115 u32 val; 116 117 return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD, 118 val, !(val & MDIO_CMD_GO), 1, 1000); 119} 120 121static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd) 122{ 123 struct nb8800_priv *priv = bus->priv; 124 int err; 125 126 err = nb8800_mdio_wait(bus); 127 if (err) 128 return err; 129 130 nb8800_writel(priv, NB8800_MDIO_CMD, cmd); 131 udelay(10); 132 nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO); 133 134 return nb8800_mdio_wait(bus); 135} 136 137static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg) 138{ 139 struct nb8800_priv *priv = bus->priv; 140 u32 val; 141 int err; 142 143 err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg)); 144 if (err) 145 return err; 146 147 val = nb8800_readl(priv, NB8800_MDIO_STS); 148 if (val & MDIO_STS_ERR) 149 return 0xffff; 150 151 return val & 0xffff; 152} 153 154static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) 155{ 156 u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) | 157 MDIO_CMD_DATA(val) | MDIO_CMD_WR; 158 159 return nb8800_mdio_cmd(bus, cmd); 160} 161 162static void nb8800_mac_tx(struct net_device *dev, bool enable) 163{ 164 struct nb8800_priv *priv = netdev_priv(dev); 165 166 while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN) 167 cpu_relax(); 168 169 nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable); 170} 171 172static void nb8800_mac_rx(struct net_device *dev, bool enable) 173{ 174 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable); 175} 176 177static void nb8800_mac_af(struct net_device *dev, bool enable) 178{ 179 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable); 180} 181 182static void nb8800_start_rx(struct net_device *dev) 183{ 184 nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN); 185} 186 187static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi) 188{ 189 struct nb8800_priv *priv = netdev_priv(dev); 190 struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; 191 struct nb8800_rx_buf *rxb = &priv->rx_bufs[i]; 192 int size = L1_CACHE_ALIGN(RX_BUF_SIZE); 193 dma_addr_t dma_addr; 194 struct page *page; 195 unsigned long offset; 196 void *data; 197 198 data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size); 199 if (!data) 200 return -ENOMEM; 201 202 page = virt_to_head_page(data); 203 offset = data - page_address(page); 204 205 dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE, 206 DMA_FROM_DEVICE); 207 208 if (dma_mapping_error(&dev->dev, dma_addr)) { 209 skb_free_frag(data); 210 return -ENOMEM; 211 } 212 213 rxb->page = page; 214 rxb->offset = offset; 215 rxd->desc.s_addr = dma_addr; 216 217 return 0; 218} 219 220static void nb8800_receive(struct net_device *dev, unsigned int i, 221 unsigned int len) 222{ 223 struct nb8800_priv *priv = netdev_priv(dev); 224 struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; 225 struct page *page = priv->rx_bufs[i].page; 226 int offset = priv->rx_bufs[i].offset; 227 void *data = page_address(page) + offset; 228 dma_addr_t dma = rxd->desc.s_addr; 229 struct sk_buff *skb; 230 unsigned int size; 231 int err; 232 233 size = len <= RX_COPYBREAK ? len : RX_COPYHDR; 234 235 skb = napi_alloc_skb(&priv->napi, size); 236 if (!skb) { 237 netdev_err(dev, "rx skb allocation failed\n"); 238 dev->stats.rx_dropped++; 239 return; 240 } 241 242 if (len <= RX_COPYBREAK) { 243 dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE); 244 skb_put_data(skb, data, len); 245 dma_sync_single_for_device(&dev->dev, dma, len, 246 DMA_FROM_DEVICE); 247 } else { 248 err = nb8800_alloc_rx(dev, i, true); 249 if (err) { 250 netdev_err(dev, "rx buffer allocation failed\n"); 251 dev->stats.rx_dropped++; 252 dev_kfree_skb(skb); 253 return; 254 } 255 256 dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE); 257 skb_put_data(skb, data, RX_COPYHDR); 258 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 259 offset + RX_COPYHDR, len - RX_COPYHDR, 260 RX_BUF_SIZE); 261 } 262 263 skb->protocol = eth_type_trans(skb, dev); 264 napi_gro_receive(&priv->napi, skb); 265} 266 267static void nb8800_rx_error(struct net_device *dev, u32 report) 268{ 269 if (report & RX_LENGTH_ERR) 270 dev->stats.rx_length_errors++; 271 272 if (report & RX_FCS_ERR) 273 dev->stats.rx_crc_errors++; 274 275 if (report & RX_FIFO_OVERRUN) 276 dev->stats.rx_fifo_errors++; 277 278 if (report & RX_ALIGNMENT_ERROR) 279 dev->stats.rx_frame_errors++; 280 281 dev->stats.rx_errors++; 282} 283 284static int nb8800_poll(struct napi_struct *napi, int budget) 285{ 286 struct net_device *dev = napi->dev; 287 struct nb8800_priv *priv = netdev_priv(dev); 288 struct nb8800_rx_desc *rxd; 289 unsigned int last = priv->rx_eoc; 290 unsigned int next; 291 int work = 0; 292 293 nb8800_tx_done(dev); 294 295again: 296 do { 297 unsigned int len; 298 299 next = (last + 1) % RX_DESC_COUNT; 300 301 rxd = &priv->rx_descs[next]; 302 303 if (!rxd->report) 304 break; 305 306 len = RX_BYTES_TRANSFERRED(rxd->report); 307 308 if (IS_RX_ERROR(rxd->report)) 309 nb8800_rx_error(dev, rxd->report); 310 else 311 nb8800_receive(dev, next, len); 312 313 dev->stats.rx_packets++; 314 dev->stats.rx_bytes += len; 315 316 if (rxd->report & RX_MULTICAST_PKT) 317 dev->stats.multicast++; 318 319 rxd->report = 0; 320 last = next; 321 work++; 322 } while (work < budget); 323 324 if (work) { 325 priv->rx_descs[last].desc.config |= DESC_EOC; 326 wmb(); /* ensure new EOC is written before clearing old */ 327 priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC; 328 priv->rx_eoc = last; 329 nb8800_start_rx(dev); 330 } 331 332 if (work < budget) { 333 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); 334 335 /* If a packet arrived after we last checked but 336 * before writing RX_ITR, the interrupt will be 337 * delayed, so we retrieve it now. 338 */ 339 if (priv->rx_descs[next].report) 340 goto again; 341 342 napi_complete_done(napi, work); 343 } 344 345 return work; 346} 347 348static void __nb8800_tx_dma_start(struct net_device *dev) 349{ 350 struct nb8800_priv *priv = netdev_priv(dev); 351 struct nb8800_tx_buf *txb; 352 u32 txc_cr; 353 354 txb = &priv->tx_bufs[priv->tx_queue]; 355 if (!txb->ready) 356 return; 357 358 txc_cr = nb8800_readl(priv, NB8800_TXC_CR); 359 if (txc_cr & TCR_EN) 360 return; 361 362 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); 363 wmb(); /* ensure desc addr is written before starting DMA */ 364 nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN); 365 366 priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT; 367} 368 369static void nb8800_tx_dma_start(struct net_device *dev) 370{ 371 struct nb8800_priv *priv = netdev_priv(dev); 372 373 spin_lock_irq(&priv->tx_lock); 374 __nb8800_tx_dma_start(dev); 375 spin_unlock_irq(&priv->tx_lock); 376} 377 378static void nb8800_tx_dma_start_irq(struct net_device *dev) 379{ 380 struct nb8800_priv *priv = netdev_priv(dev); 381 382 spin_lock(&priv->tx_lock); 383 __nb8800_tx_dma_start(dev); 384 spin_unlock(&priv->tx_lock); 385} 386 387static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev) 388{ 389 struct nb8800_priv *priv = netdev_priv(dev); 390 struct nb8800_tx_desc *txd; 391 struct nb8800_tx_buf *txb; 392 struct nb8800_dma_desc *desc; 393 dma_addr_t dma_addr; 394 unsigned int dma_len; 395 unsigned int align; 396 unsigned int next; 397 bool xmit_more; 398 399 if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) { 400 netif_stop_queue(dev); 401 return NETDEV_TX_BUSY; 402 } 403 404 align = (8 - (uintptr_t)skb->data) & 7; 405 406 dma_len = skb->len - align; 407 dma_addr = dma_map_single(&dev->dev, skb->data + align, 408 dma_len, DMA_TO_DEVICE); 409 410 if (dma_mapping_error(&dev->dev, dma_addr)) { 411 netdev_err(dev, "tx dma mapping error\n"); 412 kfree_skb(skb); 413 dev->stats.tx_dropped++; 414 return NETDEV_TX_OK; 415 } 416 417 xmit_more = netdev_xmit_more(); 418 if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) { 419 netif_stop_queue(dev); 420 xmit_more = false; 421 } 422 423 next = priv->tx_next; 424 txb = &priv->tx_bufs[next]; 425 txd = &priv->tx_descs[next]; 426 desc = &txd->desc[0]; 427 428 next = (next + 1) % TX_DESC_COUNT; 429 430 if (align) { 431 memcpy(txd->buf, skb->data, align); 432 433 desc->s_addr = 434 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); 435 desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]); 436 desc->config = DESC_BTS(2) | DESC_DS | align; 437 438 desc++; 439 } 440 441 desc->s_addr = dma_addr; 442 desc->n_addr = priv->tx_bufs[next].dma_desc; 443 desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len; 444 445 if (!xmit_more) 446 desc->config |= DESC_EOC; 447 448 txb->skb = skb; 449 txb->dma_addr = dma_addr; 450 txb->dma_len = dma_len; 451 452 if (!priv->tx_chain) { 453 txb->chain_len = 1; 454 priv->tx_chain = txb; 455 } else { 456 priv->tx_chain->chain_len++; 457 } 458 459 netdev_sent_queue(dev, skb->len); 460 461 priv->tx_next = next; 462 463 if (!xmit_more) { 464 smp_wmb(); 465 priv->tx_chain->ready = true; 466 priv->tx_chain = NULL; 467 nb8800_tx_dma_start(dev); 468 } 469 470 return NETDEV_TX_OK; 471} 472 473static void nb8800_tx_error(struct net_device *dev, u32 report) 474{ 475 if (report & TX_LATE_COLLISION) 476 dev->stats.collisions++; 477 478 if (report & TX_PACKET_DROPPED) 479 dev->stats.tx_dropped++; 480 481 if (report & TX_FIFO_UNDERRUN) 482 dev->stats.tx_fifo_errors++; 483 484 dev->stats.tx_errors++; 485} 486 487static void nb8800_tx_done(struct net_device *dev) 488{ 489 struct nb8800_priv *priv = netdev_priv(dev); 490 unsigned int limit = priv->tx_next; 491 unsigned int done = priv->tx_done; 492 unsigned int packets = 0; 493 unsigned int len = 0; 494 495 while (done != limit) { 496 struct nb8800_tx_desc *txd = &priv->tx_descs[done]; 497 struct nb8800_tx_buf *txb = &priv->tx_bufs[done]; 498 struct sk_buff *skb; 499 500 if (!txd->report) 501 break; 502 503 skb = txb->skb; 504 len += skb->len; 505 506 dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len, 507 DMA_TO_DEVICE); 508 509 if (IS_TX_ERROR(txd->report)) { 510 nb8800_tx_error(dev, txd->report); 511 kfree_skb(skb); 512 } else { 513 consume_skb(skb); 514 } 515 516 dev->stats.tx_packets++; 517 dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report); 518 dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report); 519 520 txb->skb = NULL; 521 txb->ready = false; 522 txd->report = 0; 523 524 done = (done + 1) % TX_DESC_COUNT; 525 packets++; 526 } 527 528 if (packets) { 529 smp_mb__before_atomic(); 530 atomic_add(packets, &priv->tx_free); 531 netdev_completed_queue(dev, packets, len); 532 netif_wake_queue(dev); 533 priv->tx_done = done; 534 } 535} 536 537static irqreturn_t nb8800_irq(int irq, void *dev_id) 538{ 539 struct net_device *dev = dev_id; 540 struct nb8800_priv *priv = netdev_priv(dev); 541 irqreturn_t ret = IRQ_NONE; 542 u32 val; 543 544 /* tx interrupt */ 545 val = nb8800_readl(priv, NB8800_TXC_SR); 546 if (val) { 547 nb8800_writel(priv, NB8800_TXC_SR, val); 548 549 if (val & TSR_DI) 550 nb8800_tx_dma_start_irq(dev); 551 552 if (val & TSR_TI) 553 napi_schedule_irqoff(&priv->napi); 554 555 if (unlikely(val & TSR_DE)) 556 netdev_err(dev, "TX DMA error\n"); 557 558 /* should never happen with automatic status retrieval */ 559 if (unlikely(val & TSR_TO)) 560 netdev_err(dev, "TX Status FIFO overflow\n"); 561 562 ret = IRQ_HANDLED; 563 } 564 565 /* rx interrupt */ 566 val = nb8800_readl(priv, NB8800_RXC_SR); 567 if (val) { 568 nb8800_writel(priv, NB8800_RXC_SR, val); 569 570 if (likely(val & (RSR_RI | RSR_DI))) { 571 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll); 572 napi_schedule_irqoff(&priv->napi); 573 } 574 575 if (unlikely(val & RSR_DE)) 576 netdev_err(dev, "RX DMA error\n"); 577 578 /* should never happen with automatic status retrieval */ 579 if (unlikely(val & RSR_RO)) 580 netdev_err(dev, "RX Status FIFO overflow\n"); 581 582 ret = IRQ_HANDLED; 583 } 584 585 return ret; 586} 587 588static void nb8800_mac_config(struct net_device *dev) 589{ 590 struct nb8800_priv *priv = netdev_priv(dev); 591 bool gigabit = priv->speed == SPEED_1000; 592 u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE; 593 u32 mac_mode = 0; 594 u32 slot_time; 595 u32 phy_clk; 596 u32 ict; 597 598 if (!priv->duplex) 599 mac_mode |= HALF_DUPLEX; 600 601 if (gigabit) { 602 if (phy_interface_is_rgmii(dev->phydev)) 603 mac_mode |= RGMII_MODE; 604 605 mac_mode |= GMAC_MODE; 606 phy_clk = 125000000; 607 608 /* Should be 512 but register is only 8 bits */ 609 slot_time = 255; 610 } else { 611 phy_clk = 25000000; 612 slot_time = 128; 613 } 614 615 ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk)); 616 617 nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict); 618 nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time); 619 nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode); 620} 621 622static void nb8800_pause_config(struct net_device *dev) 623{ 624 struct nb8800_priv *priv = netdev_priv(dev); 625 struct phy_device *phydev = dev->phydev; 626 u32 rxcr; 627 628 if (priv->pause_aneg) { 629 if (!phydev || !phydev->link) 630 return; 631 632 priv->pause_rx = phydev->pause; 633 priv->pause_tx = phydev->pause ^ phydev->asym_pause; 634 } 635 636 nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx); 637 638 rxcr = nb8800_readl(priv, NB8800_RXC_CR); 639 if (!!(rxcr & RCR_FL) == priv->pause_tx) 640 return; 641 642 if (netif_running(dev)) { 643 napi_disable(&priv->napi); 644 netif_tx_lock_bh(dev); 645 nb8800_dma_stop(dev); 646 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); 647 nb8800_start_rx(dev); 648 netif_tx_unlock_bh(dev); 649 napi_enable(&priv->napi); 650 } else { 651 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); 652 } 653} 654 655static void nb8800_link_reconfigure(struct net_device *dev) 656{ 657 struct nb8800_priv *priv = netdev_priv(dev); 658 struct phy_device *phydev = dev->phydev; 659 int change = 0; 660 661 if (phydev->link) { 662 if (phydev->speed != priv->speed) { 663 priv->speed = phydev->speed; 664 change = 1; 665 } 666 667 if (phydev->duplex != priv->duplex) { 668 priv->duplex = phydev->duplex; 669 change = 1; 670 } 671 672 if (change) 673 nb8800_mac_config(dev); 674 675 nb8800_pause_config(dev); 676 } 677 678 if (phydev->link != priv->link) { 679 priv->link = phydev->link; 680 change = 1; 681 } 682 683 if (change) 684 phy_print_status(phydev); 685} 686 687static void nb8800_update_mac_addr(struct net_device *dev) 688{ 689 struct nb8800_priv *priv = netdev_priv(dev); 690 int i; 691 692 for (i = 0; i < ETH_ALEN; i++) 693 nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]); 694 695 for (i = 0; i < ETH_ALEN; i++) 696 nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]); 697} 698 699static int nb8800_set_mac_address(struct net_device *dev, void *addr) 700{ 701 struct sockaddr *sock = addr; 702 703 if (netif_running(dev)) 704 return -EBUSY; 705 706 ether_addr_copy(dev->dev_addr, sock->sa_data); 707 nb8800_update_mac_addr(dev); 708 709 return 0; 710} 711 712static void nb8800_mc_init(struct net_device *dev, int val) 713{ 714 struct nb8800_priv *priv = netdev_priv(dev); 715 716 nb8800_writeb(priv, NB8800_MC_INIT, val); 717 readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val, 718 1, 1000); 719} 720 721static void nb8800_set_rx_mode(struct net_device *dev) 722{ 723 struct nb8800_priv *priv = netdev_priv(dev); 724 struct netdev_hw_addr *ha; 725 int i; 726 727 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 728 nb8800_mac_af(dev, false); 729 return; 730 } 731 732 nb8800_mac_af(dev, true); 733 nb8800_mc_init(dev, 0); 734 735 netdev_for_each_mc_addr(ha, dev) { 736 for (i = 0; i < ETH_ALEN; i++) 737 nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]); 738 739 nb8800_mc_init(dev, 0xff); 740 } 741} 742 743#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc)) 744#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc)) 745 746static void nb8800_dma_free(struct net_device *dev) 747{ 748 struct nb8800_priv *priv = netdev_priv(dev); 749 unsigned int i; 750 751 if (priv->rx_bufs) { 752 for (i = 0; i < RX_DESC_COUNT; i++) 753 if (priv->rx_bufs[i].page) 754 put_page(priv->rx_bufs[i].page); 755 756 kfree(priv->rx_bufs); 757 priv->rx_bufs = NULL; 758 } 759 760 if (priv->tx_bufs) { 761 for (i = 0; i < TX_DESC_COUNT; i++) 762 kfree_skb(priv->tx_bufs[i].skb); 763 764 kfree(priv->tx_bufs); 765 priv->tx_bufs = NULL; 766 } 767 768 if (priv->rx_descs) { 769 dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs, 770 priv->rx_desc_dma); 771 priv->rx_descs = NULL; 772 } 773 774 if (priv->tx_descs) { 775 dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs, 776 priv->tx_desc_dma); 777 priv->tx_descs = NULL; 778 } 779} 780 781static void nb8800_dma_reset(struct net_device *dev) 782{ 783 struct nb8800_priv *priv = netdev_priv(dev); 784 struct nb8800_rx_desc *rxd; 785 struct nb8800_tx_desc *txd; 786 unsigned int i; 787 788 for (i = 0; i < RX_DESC_COUNT; i++) { 789 dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd); 790 791 rxd = &priv->rx_descs[i]; 792 rxd->desc.n_addr = rx_dma + sizeof(*rxd); 793 rxd->desc.r_addr = 794 rx_dma + offsetof(struct nb8800_rx_desc, report); 795 rxd->desc.config = priv->rx_dma_config; 796 rxd->report = 0; 797 } 798 799 rxd->desc.n_addr = priv->rx_desc_dma; 800 rxd->desc.config |= DESC_EOC; 801 802 priv->rx_eoc = RX_DESC_COUNT - 1; 803 804 for (i = 0; i < TX_DESC_COUNT; i++) { 805 struct nb8800_tx_buf *txb = &priv->tx_bufs[i]; 806 dma_addr_t r_dma = txb->dma_desc + 807 offsetof(struct nb8800_tx_desc, report); 808 809 txd = &priv->tx_descs[i]; 810 txd->desc[0].r_addr = r_dma; 811 txd->desc[1].r_addr = r_dma; 812 txd->report = 0; 813 } 814 815 priv->tx_next = 0; 816 priv->tx_queue = 0; 817 priv->tx_done = 0; 818 atomic_set(&priv->tx_free, TX_DESC_COUNT); 819 820 nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma); 821 822 wmb(); /* ensure all setup is written before starting */ 823} 824 825static int nb8800_dma_init(struct net_device *dev) 826{ 827 struct nb8800_priv *priv = netdev_priv(dev); 828 unsigned int n_rx = RX_DESC_COUNT; 829 unsigned int n_tx = TX_DESC_COUNT; 830 unsigned int i; 831 int err; 832 833 priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE, 834 &priv->rx_desc_dma, GFP_KERNEL); 835 if (!priv->rx_descs) 836 goto err_out; 837 838 priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL); 839 if (!priv->rx_bufs) 840 goto err_out; 841 842 for (i = 0; i < n_rx; i++) { 843 err = nb8800_alloc_rx(dev, i, false); 844 if (err) 845 goto err_out; 846 } 847 848 priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE, 849 &priv->tx_desc_dma, GFP_KERNEL); 850 if (!priv->tx_descs) 851 goto err_out; 852 853 priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL); 854 if (!priv->tx_bufs) 855 goto err_out; 856 857 for (i = 0; i < n_tx; i++) 858 priv->tx_bufs[i].dma_desc = 859 priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc); 860 861 nb8800_dma_reset(dev); 862 863 return 0; 864 865err_out: 866 nb8800_dma_free(dev); 867 868 return -ENOMEM; 869} 870 871static int nb8800_dma_stop(struct net_device *dev) 872{ 873 struct nb8800_priv *priv = netdev_priv(dev); 874 struct nb8800_tx_buf *txb = &priv->tx_bufs[0]; 875 struct nb8800_tx_desc *txd = &priv->tx_descs[0]; 876 int retry = 5; 877 u32 txcr; 878 u32 rxcr; 879 int err; 880 unsigned int i; 881 882 /* wait for tx to finish */ 883 err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr, 884 !(txcr & TCR_EN) && 885 priv->tx_done == priv->tx_next, 886 1000, 1000000); 887 if (err) 888 return err; 889 890 /* The rx DMA only stops if it reaches the end of chain. 891 * To make this happen, we set the EOC flag on all rx 892 * descriptors, put the device in loopback mode, and send 893 * a few dummy frames. The interrupt handler will ignore 894 * these since NAPI is disabled and no real frames are in 895 * the tx queue. 896 */ 897 898 for (i = 0; i < RX_DESC_COUNT; i++) 899 priv->rx_descs[i].desc.config |= DESC_EOC; 900 901 txd->desc[0].s_addr = 902 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); 903 txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8; 904 memset(txd->buf, 0, sizeof(txd->buf)); 905 906 nb8800_mac_af(dev, false); 907 nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN); 908 909 do { 910 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); 911 wmb(); 912 nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN); 913 914 err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR, 915 rxcr, !(rxcr & RCR_EN), 916 1000, 100000); 917 } while (err && --retry); 918 919 nb8800_mac_af(dev, true); 920 nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN); 921 nb8800_dma_reset(dev); 922 923 return retry ? 0 : -ETIMEDOUT; 924} 925 926static void nb8800_pause_adv(struct net_device *dev) 927{ 928 struct nb8800_priv *priv = netdev_priv(dev); 929 struct phy_device *phydev = dev->phydev; 930 931 if (!phydev) 932 return; 933 934 phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx); 935} 936 937static int nb8800_open(struct net_device *dev) 938{ 939 struct nb8800_priv *priv = netdev_priv(dev); 940 struct phy_device *phydev; 941 int err; 942 943 /* clear any pending interrupts */ 944 nb8800_writel(priv, NB8800_RXC_SR, 0xf); 945 nb8800_writel(priv, NB8800_TXC_SR, 0xf); 946 947 err = nb8800_dma_init(dev); 948 if (err) 949 return err; 950 951 err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev); 952 if (err) 953 goto err_free_dma; 954 955 nb8800_mac_rx(dev, true); 956 nb8800_mac_tx(dev, true); 957 958 phydev = of_phy_connect(dev, priv->phy_node, 959 nb8800_link_reconfigure, 0, 960 priv->phy_mode); 961 if (!phydev) { 962 err = -ENODEV; 963 goto err_free_irq; 964 } 965 966 nb8800_pause_adv(dev); 967 968 netdev_reset_queue(dev); 969 napi_enable(&priv->napi); 970 netif_start_queue(dev); 971 972 nb8800_start_rx(dev); 973 phy_start(phydev); 974 975 return 0; 976 977err_free_irq: 978 free_irq(dev->irq, dev); 979err_free_dma: 980 nb8800_dma_free(dev); 981 982 return err; 983} 984 985static int nb8800_stop(struct net_device *dev) 986{ 987 struct nb8800_priv *priv = netdev_priv(dev); 988 struct phy_device *phydev = dev->phydev; 989 990 phy_stop(phydev); 991 992 netif_stop_queue(dev); 993 napi_disable(&priv->napi); 994 995 nb8800_dma_stop(dev); 996 nb8800_mac_rx(dev, false); 997 nb8800_mac_tx(dev, false); 998 999 phy_disconnect(phydev); 1000 1001 free_irq(dev->irq, dev); 1002 1003 nb8800_dma_free(dev); 1004 1005 return 0; 1006} 1007 1008static const struct net_device_ops nb8800_netdev_ops = { 1009 .ndo_open = nb8800_open, 1010 .ndo_stop = nb8800_stop, 1011 .ndo_start_xmit = nb8800_xmit, 1012 .ndo_set_mac_address = nb8800_set_mac_address, 1013 .ndo_set_rx_mode = nb8800_set_rx_mode, 1014 .ndo_do_ioctl = phy_do_ioctl, 1015 .ndo_validate_addr = eth_validate_addr, 1016}; 1017 1018static void nb8800_get_pauseparam(struct net_device *dev, 1019 struct ethtool_pauseparam *pp) 1020{ 1021 struct nb8800_priv *priv = netdev_priv(dev); 1022 1023 pp->autoneg = priv->pause_aneg; 1024 pp->rx_pause = priv->pause_rx; 1025 pp->tx_pause = priv->pause_tx; 1026} 1027 1028static int nb8800_set_pauseparam(struct net_device *dev, 1029 struct ethtool_pauseparam *pp) 1030{ 1031 struct nb8800_priv *priv = netdev_priv(dev); 1032 struct phy_device *phydev = dev->phydev; 1033 1034 priv->pause_aneg = pp->autoneg; 1035 priv->pause_rx = pp->rx_pause; 1036 priv->pause_tx = pp->tx_pause; 1037 1038 nb8800_pause_adv(dev); 1039 1040 if (!priv->pause_aneg) 1041 nb8800_pause_config(dev); 1042 else if (phydev) 1043 phy_start_aneg(phydev); 1044 1045 return 0; 1046} 1047 1048static const char nb8800_stats_names[][ETH_GSTRING_LEN] = { 1049 "rx_bytes_ok", 1050 "rx_frames_ok", 1051 "rx_undersize_frames", 1052 "rx_fragment_frames", 1053 "rx_64_byte_frames", 1054 "rx_127_byte_frames", 1055 "rx_255_byte_frames", 1056 "rx_511_byte_frames", 1057 "rx_1023_byte_frames", 1058 "rx_max_size_frames", 1059 "rx_oversize_frames", 1060 "rx_bad_fcs_frames", 1061 "rx_broadcast_frames", 1062 "rx_multicast_frames", 1063 "rx_control_frames", 1064 "rx_pause_frames", 1065 "rx_unsup_control_frames", 1066 "rx_align_error_frames", 1067 "rx_overrun_frames", 1068 "rx_jabber_frames", 1069 "rx_bytes", 1070 "rx_frames", 1071 1072 "tx_bytes_ok", 1073 "tx_frames_ok", 1074 "tx_64_byte_frames", 1075 "tx_127_byte_frames", 1076 "tx_255_byte_frames", 1077 "tx_511_byte_frames", 1078 "tx_1023_byte_frames", 1079 "tx_max_size_frames", 1080 "tx_oversize_frames", 1081 "tx_broadcast_frames", 1082 "tx_multicast_frames", 1083 "tx_control_frames", 1084 "tx_pause_frames", 1085 "tx_underrun_frames", 1086 "tx_single_collision_frames", 1087 "tx_multi_collision_frames", 1088 "tx_deferred_collision_frames", 1089 "tx_late_collision_frames", 1090 "tx_excessive_collision_frames", 1091 "tx_bytes", 1092 "tx_frames", 1093 "tx_collisions", 1094}; 1095 1096#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names) 1097 1098static int nb8800_get_sset_count(struct net_device *dev, int sset) 1099{ 1100 if (sset == ETH_SS_STATS) 1101 return NB8800_NUM_STATS; 1102 1103 return -EOPNOTSUPP; 1104} 1105 1106static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf) 1107{ 1108 if (sset == ETH_SS_STATS) 1109 memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names)); 1110} 1111 1112static u32 nb8800_read_stat(struct net_device *dev, int index) 1113{ 1114 struct nb8800_priv *priv = netdev_priv(dev); 1115 1116 nb8800_writeb(priv, NB8800_STAT_INDEX, index); 1117 1118 return nb8800_readl(priv, NB8800_STAT_DATA); 1119} 1120 1121static void nb8800_get_ethtool_stats(struct net_device *dev, 1122 struct ethtool_stats *estats, u64 *st) 1123{ 1124 unsigned int i; 1125 u32 rx, tx; 1126 1127 for (i = 0; i < NB8800_NUM_STATS / 2; i++) { 1128 rx = nb8800_read_stat(dev, i); 1129 tx = nb8800_read_stat(dev, i | 0x80); 1130 st[i] = rx; 1131 st[i + NB8800_NUM_STATS / 2] = tx; 1132 } 1133} 1134 1135static const struct ethtool_ops nb8800_ethtool_ops = { 1136 .nway_reset = phy_ethtool_nway_reset, 1137 .get_link = ethtool_op_get_link, 1138 .get_pauseparam = nb8800_get_pauseparam, 1139 .set_pauseparam = nb8800_set_pauseparam, 1140 .get_sset_count = nb8800_get_sset_count, 1141 .get_strings = nb8800_get_strings, 1142 .get_ethtool_stats = nb8800_get_ethtool_stats, 1143 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1144 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1145}; 1146 1147static int nb8800_hw_init(struct net_device *dev) 1148{ 1149 struct nb8800_priv *priv = netdev_priv(dev); 1150 u32 val; 1151 1152 val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS; 1153 nb8800_writeb(priv, NB8800_TX_CTL1, val); 1154 1155 /* Collision retry count */ 1156 nb8800_writeb(priv, NB8800_TX_CTL2, 5); 1157 1158 val = RX_PAD_STRIP | RX_AF_EN; 1159 nb8800_writeb(priv, NB8800_RX_CTL, val); 1160 1161 /* Chosen by fair dice roll */ 1162 nb8800_writeb(priv, NB8800_RANDOM_SEED, 4); 1163 1164 /* TX cycles per deferral period */ 1165 nb8800_writeb(priv, NB8800_TX_SDP, 12); 1166 1167 /* The following three threshold values have been 1168 * experimentally determined for good results. 1169 */ 1170 1171 /* RX/TX FIFO threshold for partial empty (64-bit entries) */ 1172 nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0); 1173 1174 /* RX/TX FIFO threshold for partial full (64-bit entries) */ 1175 nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255); 1176 1177 /* Buffer size for transmit (64-bit entries) */ 1178 nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64); 1179 1180 /* Configure tx DMA */ 1181 1182 val = nb8800_readl(priv, NB8800_TXC_CR); 1183 val &= TCR_LE; /* keep endian setting */ 1184 val |= TCR_DM; /* DMA descriptor mode */ 1185 val |= TCR_RS; /* automatically store tx status */ 1186 val |= TCR_DIE; /* interrupt on DMA chain completion */ 1187 val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */ 1188 val |= TCR_BTS(2); /* 32-byte bus transaction size */ 1189 nb8800_writel(priv, NB8800_TXC_CR, val); 1190 1191 /* TX complete interrupt after 10 ms or 7 frames (see above) */ 1192 val = clk_get_rate(priv->clk) / 100; 1193 nb8800_writel(priv, NB8800_TX_ITR, val); 1194 1195 /* Configure rx DMA */ 1196 1197 val = nb8800_readl(priv, NB8800_RXC_CR); 1198 val &= RCR_LE; /* keep endian setting */ 1199 val |= RCR_DM; /* DMA descriptor mode */ 1200 val |= RCR_RS; /* automatically store rx status */ 1201 val |= RCR_DIE; /* interrupt at end of DMA chain */ 1202 val |= RCR_RFI(7); /* interrupt after 7 frames received */ 1203 val |= RCR_BTS(2); /* 32-byte bus transaction size */ 1204 nb8800_writel(priv, NB8800_RXC_CR, val); 1205 1206 /* The rx interrupt can fire before the DMA has completed 1207 * unless a small delay is added. 50 us is hopefully enough. 1208 */ 1209 priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000; 1210 1211 /* In NAPI poll mode we want to disable interrupts, but the 1212 * hardware does not permit this. Delay 10 ms instead. 1213 */ 1214 priv->rx_itr_poll = clk_get_rate(priv->clk) / 100; 1215 1216 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); 1217 1218 priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF; 1219 1220 /* Flow control settings */ 1221 1222 /* Pause time of 0.1 ms */ 1223 val = 100000 / 512; 1224 nb8800_writeb(priv, NB8800_PQ1, val >> 8); 1225 nb8800_writeb(priv, NB8800_PQ2, val & 0xff); 1226 1227 /* Auto-negotiate by default */ 1228 priv->pause_aneg = true; 1229 priv->pause_rx = true; 1230 priv->pause_tx = true; 1231 1232 nb8800_mc_init(dev, 0); 1233 1234 return 0; 1235} 1236 1237static int nb8800_tangox_init(struct net_device *dev) 1238{ 1239 struct nb8800_priv *priv = netdev_priv(dev); 1240 u32 pad_mode = PAD_MODE_MII; 1241 1242 switch (priv->phy_mode) { 1243 case PHY_INTERFACE_MODE_MII: 1244 case PHY_INTERFACE_MODE_GMII: 1245 pad_mode = PAD_MODE_MII; 1246 break; 1247 1248 case PHY_INTERFACE_MODE_RGMII: 1249 case PHY_INTERFACE_MODE_RGMII_ID: 1250 case PHY_INTERFACE_MODE_RGMII_RXID: 1251 case PHY_INTERFACE_MODE_RGMII_TXID: 1252 pad_mode = PAD_MODE_RGMII; 1253 break; 1254 1255 default: 1256 dev_err(dev->dev.parent, "unsupported phy mode %s\n", 1257 phy_modes(priv->phy_mode)); 1258 return -EINVAL; 1259 } 1260 1261 nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode); 1262 1263 return 0; 1264} 1265 1266static int nb8800_tangox_reset(struct net_device *dev) 1267{ 1268 struct nb8800_priv *priv = netdev_priv(dev); 1269 int clk_div; 1270 1271 nb8800_writeb(priv, NB8800_TANGOX_RESET, 0); 1272 usleep_range(1000, 10000); 1273 nb8800_writeb(priv, NB8800_TANGOX_RESET, 1); 1274 1275 wmb(); /* ensure reset is cleared before proceeding */ 1276 1277 clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK); 1278 nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div); 1279 1280 return 0; 1281} 1282 1283static const struct nb8800_ops nb8800_tangox_ops = { 1284 .init = nb8800_tangox_init, 1285 .reset = nb8800_tangox_reset, 1286}; 1287 1288static int nb8800_tango4_init(struct net_device *dev) 1289{ 1290 struct nb8800_priv *priv = netdev_priv(dev); 1291 int err; 1292 1293 err = nb8800_tangox_init(dev); 1294 if (err) 1295 return err; 1296 1297 /* On tango4 interrupt on DMA completion per frame works and gives 1298 * better performance despite generating more rx interrupts. 1299 */ 1300 1301 /* Disable unnecessary interrupt on rx completion */ 1302 nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7)); 1303 1304 /* Request interrupt on descriptor DMA completion */ 1305 priv->rx_dma_config |= DESC_ID; 1306 1307 return 0; 1308} 1309 1310static const struct nb8800_ops nb8800_tango4_ops = { 1311 .init = nb8800_tango4_init, 1312 .reset = nb8800_tangox_reset, 1313}; 1314 1315static const struct of_device_id nb8800_dt_ids[] = { 1316 { 1317 .compatible = "aurora,nb8800", 1318 }, 1319 { 1320 .compatible = "sigma,smp8642-ethernet", 1321 .data = &nb8800_tangox_ops, 1322 }, 1323 { 1324 .compatible = "sigma,smp8734-ethernet", 1325 .data = &nb8800_tango4_ops, 1326 }, 1327 { } 1328}; 1329MODULE_DEVICE_TABLE(of, nb8800_dt_ids); 1330 1331static int nb8800_probe(struct platform_device *pdev) 1332{ 1333 const struct of_device_id *match; 1334 const struct nb8800_ops *ops = NULL; 1335 struct nb8800_priv *priv; 1336 struct resource *res; 1337 struct net_device *dev; 1338 struct mii_bus *bus; 1339 const unsigned char *mac; 1340 void __iomem *base; 1341 int irq; 1342 int ret; 1343 1344 match = of_match_device(nb8800_dt_ids, &pdev->dev); 1345 if (match) 1346 ops = match->data; 1347 1348 irq = platform_get_irq(pdev, 0); 1349 if (irq <= 0) 1350 return -EINVAL; 1351 1352 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1353 base = devm_ioremap_resource(&pdev->dev, res); 1354 if (IS_ERR(base)) 1355 return PTR_ERR(base); 1356 1357 dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start); 1358 1359 dev = alloc_etherdev(sizeof(*priv)); 1360 if (!dev) 1361 return -ENOMEM; 1362 1363 platform_set_drvdata(pdev, dev); 1364 SET_NETDEV_DEV(dev, &pdev->dev); 1365 1366 priv = netdev_priv(dev); 1367 priv->base = base; 1368 1369 ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode); 1370 if (ret) 1371 priv->phy_mode = PHY_INTERFACE_MODE_RGMII; 1372 1373 priv->clk = devm_clk_get(&pdev->dev, NULL); 1374 if (IS_ERR(priv->clk)) { 1375 dev_err(&pdev->dev, "failed to get clock\n"); 1376 ret = PTR_ERR(priv->clk); 1377 goto err_free_dev; 1378 } 1379 1380 ret = clk_prepare_enable(priv->clk); 1381 if (ret) 1382 goto err_free_dev; 1383 1384 spin_lock_init(&priv->tx_lock); 1385 1386 if (ops && ops->reset) { 1387 ret = ops->reset(dev); 1388 if (ret) 1389 goto err_disable_clk; 1390 } 1391 1392 bus = devm_mdiobus_alloc(&pdev->dev); 1393 if (!bus) { 1394 ret = -ENOMEM; 1395 goto err_disable_clk; 1396 } 1397 1398 bus->name = "nb8800-mii"; 1399 bus->read = nb8800_mdio_read; 1400 bus->write = nb8800_mdio_write; 1401 bus->parent = &pdev->dev; 1402 snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii", 1403 (unsigned long)res->start); 1404 bus->priv = priv; 1405 1406 ret = of_mdiobus_register(bus, pdev->dev.of_node); 1407 if (ret) { 1408 dev_err(&pdev->dev, "failed to register MII bus\n"); 1409 goto err_disable_clk; 1410 } 1411 1412 if (of_phy_is_fixed_link(pdev->dev.of_node)) { 1413 ret = of_phy_register_fixed_link(pdev->dev.of_node); 1414 if (ret < 0) { 1415 dev_err(&pdev->dev, "bad fixed-link spec\n"); 1416 goto err_free_bus; 1417 } 1418 priv->phy_node = of_node_get(pdev->dev.of_node); 1419 } 1420 1421 if (!priv->phy_node) 1422 priv->phy_node = of_parse_phandle(pdev->dev.of_node, 1423 "phy-handle", 0); 1424 1425 if (!priv->phy_node) { 1426 dev_err(&pdev->dev, "no PHY specified\n"); 1427 ret = -ENODEV; 1428 goto err_free_bus; 1429 } 1430 1431 priv->mii_bus = bus; 1432 1433 ret = nb8800_hw_init(dev); 1434 if (ret) 1435 goto err_deregister_fixed_link; 1436 1437 if (ops && ops->init) { 1438 ret = ops->init(dev); 1439 if (ret) 1440 goto err_deregister_fixed_link; 1441 } 1442 1443 dev->netdev_ops = &nb8800_netdev_ops; 1444 dev->ethtool_ops = &nb8800_ethtool_ops; 1445 dev->flags |= IFF_MULTICAST; 1446 dev->irq = irq; 1447 1448 mac = of_get_mac_address(pdev->dev.of_node); 1449 if (!IS_ERR(mac)) 1450 ether_addr_copy(dev->dev_addr, mac); 1451 1452 if (!is_valid_ether_addr(dev->dev_addr)) 1453 eth_hw_addr_random(dev); 1454 1455 nb8800_update_mac_addr(dev); 1456 1457 netif_carrier_off(dev); 1458 1459 ret = register_netdev(dev); 1460 if (ret) { 1461 netdev_err(dev, "failed to register netdev\n"); 1462 goto err_free_dma; 1463 } 1464 1465 netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT); 1466 1467 netdev_info(dev, "MAC address %pM\n", dev->dev_addr); 1468 1469 return 0; 1470 1471err_free_dma: 1472 nb8800_dma_free(dev); 1473err_deregister_fixed_link: 1474 if (of_phy_is_fixed_link(pdev->dev.of_node)) 1475 of_phy_deregister_fixed_link(pdev->dev.of_node); 1476err_free_bus: 1477 of_node_put(priv->phy_node); 1478 mdiobus_unregister(bus); 1479err_disable_clk: 1480 clk_disable_unprepare(priv->clk); 1481err_free_dev: 1482 free_netdev(dev); 1483 1484 return ret; 1485} 1486 1487static int nb8800_remove(struct platform_device *pdev) 1488{ 1489 struct net_device *ndev = platform_get_drvdata(pdev); 1490 struct nb8800_priv *priv = netdev_priv(ndev); 1491 1492 unregister_netdev(ndev); 1493 if (of_phy_is_fixed_link(pdev->dev.of_node)) 1494 of_phy_deregister_fixed_link(pdev->dev.of_node); 1495 of_node_put(priv->phy_node); 1496 1497 mdiobus_unregister(priv->mii_bus); 1498 1499 clk_disable_unprepare(priv->clk); 1500 1501 nb8800_dma_free(ndev); 1502 free_netdev(ndev); 1503 1504 return 0; 1505} 1506 1507static struct platform_driver nb8800_driver = { 1508 .driver = { 1509 .name = "nb8800", 1510 .of_match_table = nb8800_dt_ids, 1511 }, 1512 .probe = nb8800_probe, 1513 .remove = nb8800_remove, 1514}; 1515 1516module_platform_driver(nb8800_driver); 1517 1518MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver"); 1519MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>"); 1520MODULE_LICENSE("GPL"); 1521