Lines Matching refs:enet
93 static u32 enet_read(struct bcm4908_enet *enet, u16 offset)
95 return readl(enet->base + offset);
98 static void enet_write(struct bcm4908_enet *enet, u16 offset, u32 value)
100 writel(value, enet->base + offset);
103 static void enet_maskset(struct bcm4908_enet *enet, u16 offset, u32 mask, u32 set)
109 val = enet_read(enet, offset);
111 enet_write(enet, offset, val);
114 static void enet_set(struct bcm4908_enet *enet, u16 offset, u32 set)
116 enet_maskset(enet, offset, set, set);
119 static u32 enet_umac_read(struct bcm4908_enet *enet, u16 offset)
121 return enet_read(enet, ENET_UNIMAC + offset);
124 static void enet_umac_write(struct bcm4908_enet *enet, u16 offset, u32 value)
126 enet_write(enet, ENET_UNIMAC + offset, value);
129 static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
131 enet_set(enet, ENET_UNIMAC + offset, set);
138 static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
140 enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
147 static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet,
150 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
153 static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet,
156 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
159 static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet,
162 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
169 static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
173 struct device *dev = enet->dev;
196 static void bcm4908_enet_dma_free(struct bcm4908_enet *enet)
198 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
199 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
200 struct device *dev = enet->dev;
214 static int bcm4908_enet_dma_alloc(struct bcm4908_enet *enet)
216 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
217 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
218 struct device *dev = enet->dev;
225 err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
235 err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
238 bcm4908_enet_dma_free(enet);
245 static void bcm4908_enet_dma_reset(struct bcm4908_enet *enet)
247 struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
252 enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
253 enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
259 enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
260 enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
261 enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
262 enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
266 static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int idx)
268 struct bcm4908_enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
269 struct bcm4908_enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
270 struct device *dev = enet->dev;
290 if (idx == enet->rx_ring.length - 1)
298 static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
305 enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
306 enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
308 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
309 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
310 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
312 enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
319 static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
321 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
323 struct device *dev = enet->dev;
336 static int bcm4908_enet_dma_init(struct bcm4908_enet *enet)
338 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
339 struct device *dev = enet->dev;
344 err = bcm4908_enet_dma_alloc_rx_buf(enet, i);
347 bcm4908_enet_dma_uninit(enet);
352 bcm4908_enet_dma_ring_init(enet, &enet->tx_ring);
353 bcm4908_enet_dma_ring_init(enet, &enet->rx_ring);
358 static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet *enet,
361 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
364 static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet *enet,
367 enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
370 static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet *enet,
373 enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
376 static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet *enet,
382 enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
386 tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
389 enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
393 dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
400 static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
404 bcm4908_enet_set_mtu(enet, enet->netdev->mtu);
406 cmd = enet_umac_read(enet, UMAC_CMD);
407 enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
408 enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
410 enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
411 enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
413 enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
414 enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
416 cmd = enet_umac_read(enet, UMAC_CMD);
421 enet_umac_write(enet, UMAC_CMD, cmd);
423 enet_maskset(enet, ENET_GMAC_STATUS,
435 struct bcm4908_enet *enet = dev_id;
438 ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring;
440 bcm4908_enet_dma_ring_intrs_off(enet, ring);
441 bcm4908_enet_dma_ring_intrs_ack(enet, ring);
450 struct bcm4908_enet *enet = netdev_priv(netdev);
451 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
452 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
453 struct device *dev = enet->dev;
456 err = request_irq(netdev->irq, bcm4908_enet_irq_handler, 0, "enet", enet);
462 if (enet->irq_tx > 0) {
463 err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0,
464 "tx", enet);
467 enet->irq_tx, err);
468 free_irq(netdev->irq, enet);
473 bcm4908_enet_gmac_init(enet);
474 bcm4908_enet_dma_reset(enet);
475 bcm4908_enet_dma_init(enet);
477 enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
479 enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
480 enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
482 if (enet->irq_tx > 0) {
484 bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring);
485 bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
488 bcm4908_enet_dma_rx_ring_enable(enet, rx_ring);
492 bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring);
493 bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
500 struct bcm4908_enet *enet = netdev_priv(netdev);
501 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
502 struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
510 bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
511 bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
513 bcm4908_enet_dma_uninit(enet);
515 free_irq(enet->irq_tx, enet);
516 free_irq(enet->netdev->irq, enet);
523 struct bcm4908_enet *enet = netdev_priv(netdev);
524 struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
526 struct device *dev = enet->dev;
532 if (enet->irq_tx < 0 &&
534 napi_schedule(&enet->tx_ring.napi);
568 netdev_sent_queue(enet->netdev, skb->len);
573 bcm4908_enet_dma_tx_ring_enable(enet, &enet->tx_ring);
584 struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring);
585 struct device *dev = enet->dev;
596 buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
601 slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
604 err = bcm4908_enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
608 if (++enet->rx_ring.read_idx == enet->rx_ring.length)
609 enet->rx_ring.read_idx = 0;
616 enet->netdev->stats.rx_dropped++;
625 enet->netdev->stats.rx_dropped++;
630 skb->protocol = eth_type_trans(skb, enet->netdev);
634 enet->netdev->stats.rx_packets++;
635 enet->netdev->stats.rx_bytes += len;
642 bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
646 bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
654 struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring);
657 struct device *dev = enet->dev;
677 netdev_completed_queue(enet->netdev, handled, bytes);
678 enet->netdev->stats.tx_packets += handled;
679 enet->netdev->stats.tx_bytes += bytes;
683 bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
686 if (netif_queue_stopped(enet->netdev))
687 netif_wake_queue(enet->netdev);
694 struct bcm4908_enet *enet = netdev_priv(netdev);
696 bcm4908_enet_set_mtu(enet, new_mtu);
713 struct bcm4908_enet *enet;
716 netdev = devm_alloc_etherdev(dev, sizeof(*enet));
720 enet = netdev_priv(netdev);
721 enet->dev = dev;
722 enet->netdev = netdev;
724 enet->base = devm_platform_ioremap_resource(pdev, 0);
725 if (IS_ERR(enet->base)) {
726 dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
727 return PTR_ERR(enet->base);
734 enet->irq_tx = platform_get_irq_byname(pdev, "tx");
740 err = bcm4908_enet_dma_alloc(enet);
754 netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
755 netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
761 platform_set_drvdata(pdev, enet);
766 bcm4908_enet_dma_free(enet);
773 struct bcm4908_enet *enet = platform_get_drvdata(pdev);
775 unregister_netdev(enet->netdev);
776 netif_napi_del(&enet->rx_ring.napi);
777 netif_napi_del(&enet->tx_ring.napi);
778 bcm4908_enet_dma_free(enet);
784 { .compatible = "brcm,bcm4908-enet"},