Lines Matching refs:priv
36 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
38 return bcm_readl(priv->base + off);
41 static inline void enet_writel(struct bcm_enet_priv *priv,
44 bcm_writel(val, priv->base + off);
50 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
52 return bcm_readl(priv->base + off);
55 static inline void enetsw_writel(struct bcm_enet_priv *priv,
58 bcm_writel(val, priv->base + off);
61 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
63 return bcm_readw(priv->base + off);
66 static inline void enetsw_writew(struct bcm_enet_priv *priv,
69 bcm_writew(val, priv->base + off);
72 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
74 return bcm_readb(priv->base + off);
77 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
80 bcm_writeb(val, priv->base + off);
85 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
90 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
96 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
99 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
102 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
106 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
109 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
114 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
117 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
124 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
129 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
131 enet_writel(priv, data, ENET_MIIDATA_REG);
137 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
148 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
158 if (do_mdio_op(priv, tmp))
161 val = enet_readl(priv, ENET_MIIDATA_REG);
169 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
180 (void)do_mdio_op(priv, tmp);
190 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
199 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
225 struct bcm_enet_priv *priv;
227 priv = netdev_priv(dev);
229 while (priv->rx_desc_count < priv->rx_ring_size) {
236 desc_idx = priv->rx_dirty_desc;
237 desc = &priv->rx_desc_cpu[desc_idx];
239 if (!priv->rx_skb[desc_idx]) {
240 skb = netdev_alloc_skb(dev, priv->rx_skb_size);
243 priv->rx_skb[desc_idx] = skb;
244 p = dma_map_single(&priv->pdev->dev, skb->data,
245 priv->rx_skb_size,
250 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
252 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
253 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
254 priv->rx_dirty_desc = 0;
256 priv->rx_dirty_desc++;
261 priv->rx_desc_count++;
264 if (priv->dma_has_sram)
265 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
267 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
272 if (priv->rx_desc_count == 0 && netif_running(dev)) {
273 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
274 priv->rx_timeout.expires = jiffies + HZ;
275 add_timer(&priv->rx_timeout);
286 struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
287 struct net_device *dev = priv->net_dev;
289 spin_lock(&priv->rx_lock);
291 spin_unlock(&priv->rx_lock);
299 struct bcm_enet_priv *priv;
303 priv = netdev_priv(dev);
304 kdev = &priv->pdev->dev;
309 if (budget > priv->rx_desc_count)
310 budget = priv->rx_desc_count;
319 desc_idx = priv->rx_curr_desc;
320 desc = &priv->rx_desc_cpu[desc_idx];
333 priv->rx_curr_desc++;
334 if (priv->rx_curr_desc == priv->rx_ring_size)
335 priv->rx_curr_desc = 0;
336 priv->rx_desc_count--;
340 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
341 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
347 if (!priv->enet_is_sw &&
363 skb = priv->rx_skb[desc_idx];
371 nskb = napi_alloc_skb(&priv->napi, len);
385 dma_unmap_single(&priv->pdev->dev, desc->address,
386 priv->rx_skb_size, DMA_FROM_DEVICE);
387 priv->rx_skb[desc_idx] = NULL;
398 if (processed || !priv->rx_desc_count) {
402 enet_dmac_writel(priv, priv->dma_chan_en_mask,
403 ENETDMAC_CHANCFG, priv->rx_chan);
415 struct bcm_enet_priv *priv;
418 priv = netdev_priv(dev);
421 while (priv->tx_desc_count < priv->tx_ring_size) {
427 spin_lock(&priv->tx_lock);
429 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
432 spin_unlock(&priv->tx_lock);
440 skb = priv->tx_skb[priv->tx_dirty_desc];
441 priv->tx_skb[priv->tx_dirty_desc] = NULL;
442 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
445 priv->tx_dirty_desc++;
446 if (priv->tx_dirty_desc == priv->tx_ring_size)
447 priv->tx_dirty_desc = 0;
448 priv->tx_desc_count++;
450 spin_unlock(&priv->tx_lock);
470 struct bcm_enet_priv *priv;
474 priv = container_of(napi, struct bcm_enet_priv, napi);
475 dev = priv->net_dev;
478 enet_dmac_writel(priv, priv->dma_chan_int_mask,
479 ENETDMAC_IR, priv->rx_chan);
480 enet_dmac_writel(priv, priv->dma_chan_int_mask,
481 ENETDMAC_IR, priv->tx_chan);
486 spin_lock(&priv->rx_lock);
488 spin_unlock(&priv->rx_lock);
500 enet_dmac_writel(priv, priv->dma_chan_int_mask,
501 ENETDMAC_IRMASK, priv->rx_chan);
502 enet_dmac_writel(priv, priv->dma_chan_int_mask,
503 ENETDMAC_IRMASK, priv->tx_chan);
514 struct bcm_enet_priv *priv;
518 priv = netdev_priv(dev);
520 stat = enet_readl(priv, ENET_IR_REG);
525 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
526 enet_writel(priv, 0, ENET_IRMASK_REG);
529 schedule_work(&priv->mib_update_task);
540 struct bcm_enet_priv *priv;
543 priv = netdev_priv(dev);
546 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
547 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
549 napi_schedule(&priv->napi);
560 struct bcm_enet_priv *priv;
565 priv = netdev_priv(dev);
568 spin_lock(&priv->tx_lock);
572 if (unlikely(!priv->tx_desc_count)) {
574 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
581 if (priv->enet_is_sw && skb->len < 64) {
600 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
601 priv->tx_skb[priv->tx_curr_desc] = skb;
604 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
608 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
612 priv->tx_curr_desc++;
613 if (priv->tx_curr_desc == priv->tx_ring_size) {
614 priv->tx_curr_desc = 0;
615 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
617 priv->tx_desc_count--;
626 enet_dmac_writel(priv, priv->dma_chan_en_mask,
627 ENETDMAC_CHANCFG, priv->tx_chan);
630 if (!priv->tx_desc_count)
638 spin_unlock(&priv->tx_lock);
647 struct bcm_enet_priv *priv;
651 priv = netdev_priv(dev);
657 enet_writel(priv, val, ENET_PML_REG(0));
661 enet_writel(priv, val, ENET_PMH_REG(0));
671 struct bcm_enet_priv *priv;
676 priv = netdev_priv(dev);
678 val = enet_readl(priv, ENET_RXCFG_REG);
695 enet_writel(priv, val, ENET_RXCFG_REG);
710 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
714 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
718 enet_writel(priv, 0, ENET_PML_REG(i + 1));
719 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
722 enet_writel(priv, val, ENET_RXCFG_REG);
728 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
732 val = enet_readl(priv, ENET_TXCTL_REG);
737 enet_writel(priv, val, ENET_TXCTL_REG);
743 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
748 val = enet_readl(priv, ENET_RXCFG_REG);
753 enet_writel(priv, val, ENET_RXCFG_REG);
755 if (!priv->dma_has_sram)
759 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
761 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
763 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
764 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
772 struct bcm_enet_priv *priv;
776 priv = netdev_priv(dev);
780 if (priv->old_link != phydev->link) {
782 priv->old_link = phydev->link;
786 if (phydev->link && phydev->duplex != priv->old_duplex) {
787 bcm_enet_set_duplex(priv,
790 priv->old_duplex = phydev->duplex;
795 if (phydev->link && phydev->pause != priv->old_pause) {
802 } else if (!priv->pause_auto) {
804 rx_pause_en = priv->pause_rx;
805 tx_pause_en = priv->pause_tx;
811 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
813 priv->old_pause = phydev->pause;
833 struct bcm_enet_priv *priv;
835 priv = netdev_priv(dev);
836 bcm_enet_set_duplex(priv, priv->force_duplex_full);
837 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
842 priv->force_speed_100 ? 100 : 10,
843 priv->force_duplex_full ? "full" : "half",
844 priv->pause_rx ? "rx" : "off",
845 priv->pause_tx ? "tx" : "off");
853 struct bcm_enet_priv *priv;
863 priv = netdev_priv(dev);
864 kdev = &priv->pdev->dev;
866 if (priv->has_phy) {
869 priv->mii_bus->id, priv->phy_id);
882 phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
883 priv->pause_auto);
887 priv->old_link = 0;
888 priv->old_duplex = -1;
889 priv->old_pause = -1;
895 enet_writel(priv, 0, ENET_IRMASK_REG);
896 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
897 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
903 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
908 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
915 enet_writel(priv, 0, ENET_PML_REG(i));
916 enet_writel(priv, 0, ENET_PMH_REG(i));
924 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
925 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
931 priv->rx_desc_alloc_size = size;
932 priv->rx_desc_cpu = p;
935 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
936 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
942 priv->tx_desc_alloc_size = size;
943 priv->tx_desc_cpu = p;
945 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
947 if (!priv->tx_skb) {
952 priv->tx_desc_count = priv->tx_ring_size;
953 priv->tx_dirty_desc = 0;
954 priv->tx_curr_desc = 0;
955 spin_lock_init(&priv->tx_lock);
958 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
960 if (!priv->rx_skb) {
965 priv->rx_desc_count = 0;
966 priv->rx_dirty_desc = 0;
967 priv->rx_curr_desc = 0;
970 if (priv->dma_has_sram)
971 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
972 ENETDMA_BUFALLOC_REG(priv->rx_chan));
974 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
975 ENETDMAC_BUFALLOC, priv->rx_chan);
984 if (priv->dma_has_sram) {
985 enet_dmas_writel(priv, priv->rx_desc_dma,
986 ENETDMAS_RSTART_REG, priv->rx_chan);
987 enet_dmas_writel(priv, priv->tx_desc_dma,
988 ENETDMAS_RSTART_REG, priv->tx_chan);
990 enet_dmac_writel(priv, priv->rx_desc_dma,
991 ENETDMAC_RSTART, priv->rx_chan);
992 enet_dmac_writel(priv, priv->tx_desc_dma,
993 ENETDMAC_RSTART, priv->tx_chan);
997 if (priv->dma_has_sram) {
998 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
999 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1000 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1001 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1002 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1003 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1005 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1006 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1010 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1011 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1014 enet_dmac_writel(priv, priv->dma_maxburst,
1015 ENETDMAC_MAXBURST, priv->rx_chan);
1016 enet_dmac_writel(priv, priv->dma_maxburst,
1017 ENETDMAC_MAXBURST, priv->tx_chan);
1020 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1023 if (priv->dma_has_sram) {
1024 val = priv->rx_ring_size / 3;
1025 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1026 val = (priv->rx_ring_size * 2) / 3;
1027 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1029 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1030 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1031 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1037 val = enet_readl(priv, ENET_CTL_REG);
1039 enet_writel(priv, val, ENET_CTL_REG);
1040 if (priv->dma_has_sram)
1041 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1042 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1043 ENETDMAC_CHANCFG, priv->rx_chan);
1046 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1047 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1050 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1051 ENETDMAC_IR, priv->rx_chan);
1052 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1053 ENETDMAC_IR, priv->tx_chan);
1056 napi_enable(&priv->napi);
1058 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1059 ENETDMAC_IRMASK, priv->rx_chan);
1060 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1061 ENETDMAC_IRMASK, priv->tx_chan);
1072 for (i = 0; i < priv->rx_ring_size; i++) {
1075 if (!priv->rx_skb[i])
1078 desc = &priv->rx_desc_cpu[i];
1079 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1081 kfree_skb(priv->rx_skb[i]);
1083 kfree(priv->rx_skb);
1086 kfree(priv->tx_skb);
1089 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1090 priv->tx_desc_cpu, priv->tx_desc_dma);
1093 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1094 priv->rx_desc_cpu, priv->rx_desc_dma);
1097 free_irq(priv->irq_tx, dev);
1100 free_irq(priv->irq_rx, dev);
1115 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1120 val = enet_readl(priv, ENET_CTL_REG);
1122 enet_writel(priv, val, ENET_CTL_REG);
1128 val = enet_readl(priv, ENET_CTL_REG);
1138 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1142 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1148 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1160 struct bcm_enet_priv *priv;
1164 priv = netdev_priv(dev);
1165 kdev = &priv->pdev->dev;
1168 napi_disable(&priv->napi);
1169 if (priv->has_phy)
1171 del_timer_sync(&priv->rx_timeout);
1174 enet_writel(priv, 0, ENET_IRMASK_REG);
1175 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1176 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1179 cancel_work_sync(&priv->mib_update_task);
1182 bcm_enet_disable_dma(priv, priv->tx_chan);
1183 bcm_enet_disable_dma(priv, priv->rx_chan);
1184 bcm_enet_disable_mac(priv);
1190 for (i = 0; i < priv->rx_ring_size; i++) {
1193 if (!priv->rx_skb[i])
1196 desc = &priv->rx_desc_cpu[i];
1197 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1199 kfree_skb(priv->rx_skb[i]);
1203 kfree(priv->rx_skb);
1204 kfree(priv->tx_skb);
1205 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1206 priv->rx_desc_cpu, priv->rx_desc_dma);
1207 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1208 priv->tx_desc_cpu, priv->tx_desc_dma);
1209 free_irq(priv->irq_tx, dev);
1210 free_irq(priv->irq_rx, dev);
1214 if (priv->has_phy)
1336 static void update_mib_counters(struct bcm_enet_priv *priv)
1349 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1350 p = (char *)priv + s->stat_offset;
1361 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1366 struct bcm_enet_priv *priv;
1368 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1369 mutex_lock(&priv->mib_update_lock);
1370 update_mib_counters(priv);
1371 mutex_unlock(&priv->mib_update_lock);
1374 if (netif_running(priv->net_dev))
1375 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1382 struct bcm_enet_priv *priv;
1385 priv = netdev_priv(netdev);
1387 mutex_lock(&priv->mib_update_lock);
1388 update_mib_counters(priv);
1398 p = (char *)priv;
1403 mutex_unlock(&priv->mib_update_lock);
1408 struct bcm_enet_priv *priv;
1410 priv = netdev_priv(dev);
1411 if (priv->has_phy)
1420 struct bcm_enet_priv *priv;
1423 priv = netdev_priv(dev);
1425 if (priv->has_phy) {
1434 cmd->base.speed = (priv->force_speed_100) ?
1436 cmd->base.duplex = (priv->force_duplex_full) ?
1455 struct bcm_enet_priv *priv;
1457 priv = netdev_priv(dev);
1458 if (priv->has_phy) {
1470 priv->force_speed_100 =
1472 priv->force_duplex_full =
1484 struct bcm_enet_priv *priv;
1486 priv = netdev_priv(dev);
1491 ering->rx_pending = priv->rx_ring_size;
1492 ering->tx_pending = priv->tx_ring_size;
1498 struct bcm_enet_priv *priv;
1501 priv = netdev_priv(dev);
1509 priv->rx_ring_size = ering->rx_pending;
1510 priv->tx_ring_size = ering->tx_pending;
1527 struct bcm_enet_priv *priv;
1529 priv = netdev_priv(dev);
1530 ecmd->autoneg = priv->pause_auto;
1531 ecmd->rx_pause = priv->pause_rx;
1532 ecmd->tx_pause = priv->pause_tx;
1538 struct bcm_enet_priv *priv;
1540 priv = netdev_priv(dev);
1542 if (priv->has_phy) {
1555 priv->pause_auto = ecmd->autoneg;
1556 priv->pause_rx = ecmd->rx_pause;
1557 priv->pause_tx = ecmd->tx_pause;
1579 struct bcm_enet_priv *priv;
1581 priv = netdev_priv(dev);
1582 if (priv->has_phy) {
1604 struct bcm_enet_priv *priv = netdev_priv(dev);
1619 priv->hw_mtu = actual_mtu;
1625 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1626 priv->dma_maxburst * 4);
1635 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1641 bcm_enet_disable_mac(priv);
1645 enet_writel(priv, val, ENET_CTL_REG);
1650 val = enet_readl(priv, ENET_CTL_REG);
1657 val = enet_readl(priv, ENET_CTL_REG);
1658 if (priv->use_external_mii)
1662 enet_writel(priv, val, ENET_CTL_REG);
1665 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1669 val = enet_readl(priv, ENET_MIBCTL_REG);
1671 enet_writel(priv, val, ENET_MIBCTL_REG);
1689 struct bcm_enet_priv *priv;
1705 dev = alloc_etherdev(sizeof(*priv));
1708 priv = netdev_priv(dev);
1710 priv->enet_is_sw = false;
1711 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1717 priv->base = devm_platform_ioremap_resource(pdev, 0);
1718 if (IS_ERR(priv->base)) {
1719 ret = PTR_ERR(priv->base);
1723 dev->irq = priv->irq = res_irq->start;
1724 priv->irq_rx = res_irq_rx->start;
1725 priv->irq_tx = res_irq_tx->start;
1727 priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1728 if (IS_ERR(priv->mac_clk)) {
1729 ret = PTR_ERR(priv->mac_clk);
1732 ret = clk_prepare_enable(priv->mac_clk);
1737 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1738 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1743 priv->has_phy = pd->has_phy;
1744 priv->phy_id = pd->phy_id;
1745 priv->has_phy_interrupt = pd->has_phy_interrupt;
1746 priv->phy_interrupt = pd->phy_interrupt;
1747 priv->use_external_mii = !pd->use_internal_phy;
1748 priv->pause_auto = pd->pause_auto;
1749 priv->pause_rx = pd->pause_rx;
1750 priv->pause_tx = pd->pause_tx;
1751 priv->force_duplex_full = pd->force_duplex_full;
1752 priv->force_speed_100 = pd->force_speed_100;
1753 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1754 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1755 priv->dma_chan_width = pd->dma_chan_width;
1756 priv->dma_has_sram = pd->dma_has_sram;
1757 priv->dma_desc_shift = pd->dma_desc_shift;
1758 priv->rx_chan = pd->rx_chan;
1759 priv->tx_chan = pd->tx_chan;
1762 if (priv->has_phy && !priv->use_external_mii) {
1764 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1765 if (IS_ERR(priv->phy_clk)) {
1766 ret = PTR_ERR(priv->phy_clk);
1767 priv->phy_clk = NULL;
1770 ret = clk_prepare_enable(priv->phy_clk);
1776 bcm_enet_hw_preinit(priv);
1779 if (priv->has_phy) {
1781 priv->mii_bus = mdiobus_alloc();
1782 if (!priv->mii_bus) {
1787 bus = priv->mii_bus;
1790 bus->priv = priv;
1798 bus->phy_mask = ~(1 << priv->phy_id);
1800 if (priv->has_phy_interrupt)
1801 bus->irq[priv->phy_id] = priv->phy_interrupt;
1819 spin_lock_init(&priv->rx_lock);
1822 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1825 mutex_init(&priv->mib_update_lock);
1826 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1830 enet_writel(priv, 0, ENET_MIB_REG(i));
1834 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1848 priv->pdev = pdev;
1849 priv->net_dev = dev;
1854 if (priv->mii_bus)
1855 mdiobus_unregister(priv->mii_bus);
1858 if (priv->mii_bus)
1859 mdiobus_free(priv->mii_bus);
1863 enet_writel(priv, 0, ENET_MIISC_REG);
1864 clk_disable_unprepare(priv->phy_clk);
1867 clk_disable_unprepare(priv->mac_clk);
1879 struct bcm_enet_priv *priv;
1884 priv = netdev_priv(dev);
1888 enet_writel(priv, 0, ENET_MIISC_REG);
1890 if (priv->has_phy) {
1891 mdiobus_unregister(priv->mii_bus);
1892 mdiobus_free(priv->mii_bus);
1903 clk_disable_unprepare(priv->phy_clk);
1904 clk_disable_unprepare(priv->mac_clk);
1922 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1928 spin_lock_bh(&priv->enetsw_mdio_lock);
1929 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1938 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1940 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1941 spin_unlock_bh(&priv->enetsw_mdio_lock);
1945 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1951 spin_lock_bh(&priv->enetsw_mdio_lock);
1952 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1963 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1965 spin_unlock_bh(&priv->enetsw_mdio_lock);
1978 struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
1981 for (i = 0; i < priv->num_ports; i++) {
1987 port = &priv->used_ports[i];
1996 val = bcmenet_sw_mdio_read(priv, external_phy,
2003 if (!(up ^ priv->sw_port_link[i]))
2006 priv->sw_port_link[i] = up;
2010 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2012 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2014 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2020 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2023 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2036 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2039 lpa = bcmenet_sw_mdio_read(priv, external_phy,
2049 dev_info(&priv->pdev->dev,
2063 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2064 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2067 priv->swphy_poll.expires = jiffies + HZ;
2068 add_timer(&priv->swphy_poll);
2076 struct bcm_enet_priv *priv;
2083 priv = netdev_priv(dev);
2084 kdev = &priv->pdev->dev;
2087 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2088 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2090 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2095 if (priv->irq_tx != -1) {
2096 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2103 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2104 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2111 priv->rx_desc_alloc_size = size;
2112 priv->rx_desc_cpu = p;
2115 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2116 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2123 priv->tx_desc_alloc_size = size;
2124 priv->tx_desc_cpu = p;
2126 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2128 if (!priv->tx_skb) {
2134 priv->tx_desc_count = priv->tx_ring_size;
2135 priv->tx_dirty_desc = 0;
2136 priv->tx_curr_desc = 0;
2137 spin_lock_init(&priv->tx_lock);
2140 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
2142 if (!priv->rx_skb) {
2148 priv->rx_desc_count = 0;
2149 priv->rx_dirty_desc = 0;
2150 priv->rx_curr_desc = 0;
2153 for (i = 0; i < priv->num_ports; i++) {
2154 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2156 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2160 priv->sw_port_link[i] = 0;
2164 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2166 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2169 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2173 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2175 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2178 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2180 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2183 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2184 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2187 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2188 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2197 enet_dmas_writel(priv, priv->rx_desc_dma,
2198 ENETDMAS_RSTART_REG, priv->rx_chan);
2199 enet_dmas_writel(priv, priv->tx_desc_dma,
2200 ENETDMAS_RSTART_REG, priv->tx_chan);
2203 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2204 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2205 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2206 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2207 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2208 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2211 enet_dmac_writel(priv, priv->dma_maxburst,
2212 ENETDMAC_MAXBURST, priv->rx_chan);
2213 enet_dmac_writel(priv, priv->dma_maxburst,
2214 ENETDMAC_MAXBURST, priv->tx_chan);
2217 val = priv->rx_ring_size / 3;
2218 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2219 val = (priv->rx_ring_size * 2) / 3;
2220 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2226 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2227 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2228 ENETDMAC_CHANCFG, priv->rx_chan);
2231 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2232 ENETDMAC_IR, priv->rx_chan);
2233 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2234 ENETDMAC_IR, priv->tx_chan);
2237 napi_enable(&priv->napi);
2239 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2240 ENETDMAC_IRMASK, priv->rx_chan);
2241 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2242 ENETDMAC_IRMASK, priv->tx_chan);
2248 for (i = 0; i < priv->num_ports; i++) {
2251 port = &priv->used_ports[i];
2280 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2281 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2285 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2286 mod_timer(&priv->swphy_poll, jiffies);
2290 for (i = 0; i < priv->rx_ring_size; i++) {
2293 if (!priv->rx_skb[i])
2296 desc = &priv->rx_desc_cpu[i];
2297 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2299 kfree_skb(priv->rx_skb[i]);
2301 kfree(priv->rx_skb);
2304 kfree(priv->tx_skb);
2307 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2308 priv->tx_desc_cpu, priv->tx_desc_dma);
2311 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2312 priv->rx_desc_cpu, priv->rx_desc_dma);
2315 if (priv->irq_tx != -1)
2316 free_irq(priv->irq_tx, dev);
2319 free_irq(priv->irq_rx, dev);
2328 struct bcm_enet_priv *priv;
2332 priv = netdev_priv(dev);
2333 kdev = &priv->pdev->dev;
2335 del_timer_sync(&priv->swphy_poll);
2337 napi_disable(&priv->napi);
2338 del_timer_sync(&priv->rx_timeout);
2341 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2342 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2345 bcm_enet_disable_dma(priv, priv->tx_chan);
2346 bcm_enet_disable_dma(priv, priv->rx_chan);
2352 for (i = 0; i < priv->rx_ring_size; i++) {
2355 if (!priv->rx_skb[i])
2358 desc = &priv->rx_desc_cpu[i];
2359 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2361 kfree_skb(priv->rx_skb[i]);
2365 kfree(priv->rx_skb);
2366 kfree(priv->tx_skb);
2367 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2368 priv->rx_desc_cpu, priv->rx_desc_dma);
2369 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2370 priv->tx_desc_cpu, priv->tx_desc_dma);
2371 if (priv->irq_tx != -1)
2372 free_irq(priv->irq_tx, dev);
2373 free_irq(priv->irq_rx, dev);
2383 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2387 for (i = 0; i < priv->num_ports; ++i) {
2388 if (!priv->used_ports[i].used)
2390 if (priv->used_ports[i].phy_id == phy_id)
2405 struct bcm_enet_priv *priv;
2407 priv = netdev_priv(dev);
2408 return bcmenet_sw_mdio_read(priv,
2409 bcm_enetsw_phy_is_external(priv, phy_id),
2420 struct bcm_enet_priv *priv;
2422 priv = netdev_priv(dev);
2423 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2535 struct bcm_enet_priv *priv;
2538 priv = netdev_priv(netdev);
2552 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2553 p = (char *)priv + s->stat_offset;
2556 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2572 p = (char *)priv + s->stat_offset;
2582 struct bcm_enet_priv *priv;
2584 priv = netdev_priv(dev);
2591 ering->rx_pending = priv->rx_ring_size;
2592 ering->tx_pending = priv->tx_ring_size;
2598 struct bcm_enet_priv *priv;
2601 priv = netdev_priv(dev);
2609 priv->rx_ring_size = ering->rx_pending;
2610 priv->tx_ring_size = ering->tx_pending;
2634 struct bcm_enet_priv *priv;
2650 dev = alloc_etherdev(sizeof(*priv));
2653 priv = netdev_priv(dev);
2656 priv->enet_is_sw = true;
2657 priv->irq_rx = irq_rx;
2658 priv->irq_tx = irq_tx;
2659 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2660 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2661 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2666 memcpy(priv->used_ports, pd->used_ports,
2668 priv->num_ports = pd->num_ports;
2669 priv->dma_has_sram = pd->dma_has_sram;
2670 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2671 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2672 priv->dma_chan_width = pd->dma_chan_width;
2679 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2680 if (IS_ERR(priv->base)) {
2681 ret = PTR_ERR(priv->base);
2685 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2686 if (IS_ERR(priv->mac_clk)) {
2687 ret = PTR_ERR(priv->mac_clk);
2690 ret = clk_prepare_enable(priv->mac_clk);
2694 priv->rx_chan = 0;
2695 priv->tx_chan = 1;
2696 spin_lock_init(&priv->rx_lock);
2699 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2703 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2707 spin_lock_init(&priv->enetsw_mdio_lock);
2715 priv->pdev = pdev;
2716 priv->net_dev = dev;
2721 clk_disable_unprepare(priv->mac_clk);
2731 struct bcm_enet_priv *priv;
2736 priv = netdev_priv(dev);
2739 clk_disable_unprepare(priv->mac_clk);