Lines Matching refs:priv
36 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
38 return bcm_readl(priv->base + off);
41 static inline void enet_writel(struct bcm_enet_priv *priv,
44 bcm_writel(val, priv->base + off);
50 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
52 return bcm_readl(priv->base + off);
55 static inline void enetsw_writel(struct bcm_enet_priv *priv,
58 bcm_writel(val, priv->base + off);
61 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
63 return bcm_readw(priv->base + off);
66 static inline void enetsw_writew(struct bcm_enet_priv *priv,
69 bcm_writew(val, priv->base + off);
72 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
74 return bcm_readb(priv->base + off);
77 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
80 bcm_writeb(val, priv->base + off);
85 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
90 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
96 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
99 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
102 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
106 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
109 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
114 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
117 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
124 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
129 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
131 enet_writel(priv, data, ENET_MIIDATA_REG);
137 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
148 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
158 if (do_mdio_op(priv, tmp))
161 val = enet_readl(priv, ENET_MIIDATA_REG);
169 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
180 (void)do_mdio_op(priv, tmp);
190 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
199 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
225 struct bcm_enet_priv *priv;
227 priv = netdev_priv(dev);
229 while (priv->rx_desc_count < priv->rx_ring_size) {
234 desc_idx = priv->rx_dirty_desc;
235 desc = &priv->rx_desc_cpu[desc_idx];
237 if (!priv->rx_buf[desc_idx]) {
241 buf = napi_alloc_frag(priv->rx_frag_size);
243 buf = netdev_alloc_frag(priv->rx_frag_size);
246 priv->rx_buf[desc_idx] = buf;
247 desc->address = dma_map_single(&priv->pdev->dev,
248 buf + priv->rx_buf_offset,
249 priv->rx_buf_size,
253 len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
255 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
256 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
257 priv->rx_dirty_desc = 0;
259 priv->rx_dirty_desc++;
264 priv->rx_desc_count++;
267 if (priv->dma_has_sram)
268 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
270 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
275 if (priv->rx_desc_count == 0 && netif_running(dev)) {
276 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
277 priv->rx_timeout.expires = jiffies + HZ;
278 add_timer(&priv->rx_timeout);
289 struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
290 struct net_device *dev = priv->net_dev;
292 spin_lock(&priv->rx_lock);
294 spin_unlock(&priv->rx_lock);
302 struct bcm_enet_priv *priv;
307 priv = netdev_priv(dev);
309 kdev = &priv->pdev->dev;
314 if (budget > priv->rx_desc_count)
315 budget = priv->rx_desc_count;
325 desc_idx = priv->rx_curr_desc;
326 desc = &priv->rx_desc_cpu[desc_idx];
339 priv->rx_curr_desc++;
340 if (priv->rx_curr_desc == priv->rx_ring_size)
341 priv->rx_curr_desc = 0;
345 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
346 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
352 if (!priv->enet_is_sw &&
368 buf = priv->rx_buf[desc_idx];
374 skb = napi_alloc_skb(&priv->napi, len);
383 memcpy(skb->data, buf + priv->rx_buf_offset, len);
388 priv->rx_buf_size, DMA_FROM_DEVICE);
389 priv->rx_buf[desc_idx] = NULL;
391 skb = napi_build_skb(buf, priv->rx_frag_size);
397 skb_reserve(skb, priv->rx_buf_offset);
409 priv->rx_desc_count -= processed;
411 if (processed || !priv->rx_desc_count) {
415 enet_dmac_writel(priv, priv->dma_chan_en_mask,
416 ENETDMAC_CHANCFG, priv->rx_chan);
428 struct bcm_enet_priv *priv;
432 priv = netdev_priv(dev);
436 while (priv->tx_desc_count < priv->tx_ring_size) {
442 spin_lock(&priv->tx_lock);
444 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
447 spin_unlock(&priv->tx_lock);
455 skb = priv->tx_skb[priv->tx_dirty_desc];
456 priv->tx_skb[priv->tx_dirty_desc] = NULL;
457 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
460 priv->tx_dirty_desc++;
461 if (priv->tx_dirty_desc == priv->tx_ring_size)
462 priv->tx_dirty_desc = 0;
463 priv->tx_desc_count++;
465 spin_unlock(&priv->tx_lock);
488 struct bcm_enet_priv *priv;
492 priv = container_of(napi, struct bcm_enet_priv, napi);
493 dev = priv->net_dev;
496 enet_dmac_writel(priv, priv->dma_chan_int_mask,
497 ENETDMAC_IR, priv->rx_chan);
498 enet_dmac_writel(priv, priv->dma_chan_int_mask,
499 ENETDMAC_IR, priv->tx_chan);
504 spin_lock(&priv->rx_lock);
506 spin_unlock(&priv->rx_lock);
518 enet_dmac_writel(priv, priv->dma_chan_int_mask,
519 ENETDMAC_IRMASK, priv->rx_chan);
520 enet_dmac_writel(priv, priv->dma_chan_int_mask,
521 ENETDMAC_IRMASK, priv->tx_chan);
532 struct bcm_enet_priv *priv;
536 priv = netdev_priv(dev);
538 stat = enet_readl(priv, ENET_IR_REG);
543 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
544 enet_writel(priv, 0, ENET_IRMASK_REG);
547 schedule_work(&priv->mib_update_task);
558 struct bcm_enet_priv *priv;
561 priv = netdev_priv(dev);
564 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
565 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
567 napi_schedule(&priv->napi);
578 struct bcm_enet_priv *priv;
583 priv = netdev_priv(dev);
586 spin_lock(&priv->tx_lock);
590 if (unlikely(!priv->tx_desc_count)) {
592 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
599 if (priv->enet_is_sw && skb->len < 64) {
618 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
619 priv->tx_skb[priv->tx_curr_desc] = skb;
622 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
626 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
630 priv->tx_curr_desc++;
631 if (priv->tx_curr_desc == priv->tx_ring_size) {
632 priv->tx_curr_desc = 0;
633 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
635 priv->tx_desc_count--;
646 if (!netdev_xmit_more() || !priv->tx_desc_count)
647 enet_dmac_writel(priv, priv->dma_chan_en_mask,
648 ENETDMAC_CHANCFG, priv->tx_chan);
651 if (!priv->tx_desc_count)
659 spin_unlock(&priv->tx_lock);
668 struct bcm_enet_priv *priv;
672 priv = netdev_priv(dev);
678 enet_writel(priv, val, ENET_PML_REG(0));
682 enet_writel(priv, val, ENET_PMH_REG(0));
692 struct bcm_enet_priv *priv;
697 priv = netdev_priv(dev);
699 val = enet_readl(priv, ENET_RXCFG_REG);
716 enet_writel(priv, val, ENET_RXCFG_REG);
731 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
735 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
739 enet_writel(priv, 0, ENET_PML_REG(i + 1));
740 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
743 enet_writel(priv, val, ENET_RXCFG_REG);
749 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
753 val = enet_readl(priv, ENET_TXCTL_REG);
758 enet_writel(priv, val, ENET_TXCTL_REG);
764 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
769 val = enet_readl(priv, ENET_RXCFG_REG);
774 enet_writel(priv, val, ENET_RXCFG_REG);
776 if (!priv->dma_has_sram)
780 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
782 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
784 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
785 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
793 struct bcm_enet_priv *priv;
797 priv = netdev_priv(dev);
801 if (priv->old_link != phydev->link) {
803 priv->old_link = phydev->link;
807 if (phydev->link && phydev->duplex != priv->old_duplex) {
808 bcm_enet_set_duplex(priv,
811 priv->old_duplex = phydev->duplex;
816 if (phydev->link && phydev->pause != priv->old_pause) {
823 } else if (!priv->pause_auto) {
825 rx_pause_en = priv->pause_rx;
826 tx_pause_en = priv->pause_tx;
832 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
834 priv->old_pause = phydev->pause;
854 struct bcm_enet_priv *priv;
856 priv = netdev_priv(dev);
857 bcm_enet_set_duplex(priv, priv->force_duplex_full);
858 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
863 priv->force_speed_100 ? 100 : 10,
864 priv->force_duplex_full ? "full" : "half",
865 priv->pause_rx ? "rx" : "off",
866 priv->pause_tx ? "tx" : "off");
869 static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
873 for (i = 0; i < priv->rx_ring_size; i++) {
876 if (!priv->rx_buf[i])
879 desc = &priv->rx_desc_cpu[i];
880 dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
882 skb_free_frag(priv->rx_buf[i]);
884 kfree(priv->rx_buf);
892 struct bcm_enet_priv *priv;
902 priv = netdev_priv(dev);
903 kdev = &priv->pdev->dev;
905 if (priv->has_phy) {
908 priv->mii_bus->id, priv->phy_id);
921 phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
922 priv->pause_auto);
926 priv->old_link = 0;
927 priv->old_duplex = -1;
928 priv->old_pause = -1;
934 enet_writel(priv, 0, ENET_IRMASK_REG);
935 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
936 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
942 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
947 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
954 enet_writel(priv, 0, ENET_PML_REG(i));
955 enet_writel(priv, 0, ENET_PMH_REG(i));
963 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
964 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
970 priv->rx_desc_alloc_size = size;
971 priv->rx_desc_cpu = p;
974 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
975 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
981 priv->tx_desc_alloc_size = size;
982 priv->tx_desc_cpu = p;
984 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
986 if (!priv->tx_skb) {
991 priv->tx_desc_count = priv->tx_ring_size;
992 priv->tx_dirty_desc = 0;
993 priv->tx_curr_desc = 0;
994 spin_lock_init(&priv->tx_lock);
997 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
999 if (!priv->rx_buf) {
1004 priv->rx_desc_count = 0;
1005 priv->rx_dirty_desc = 0;
1006 priv->rx_curr_desc = 0;
1009 if (priv->dma_has_sram)
1010 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1011 ENETDMA_BUFALLOC_REG(priv->rx_chan));
1013 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1014 ENETDMAC_BUFALLOC, priv->rx_chan);
1023 if (priv->dma_has_sram) {
1024 enet_dmas_writel(priv, priv->rx_desc_dma,
1025 ENETDMAS_RSTART_REG, priv->rx_chan);
1026 enet_dmas_writel(priv, priv->tx_desc_dma,
1027 ENETDMAS_RSTART_REG, priv->tx_chan);
1029 enet_dmac_writel(priv, priv->rx_desc_dma,
1030 ENETDMAC_RSTART, priv->rx_chan);
1031 enet_dmac_writel(priv, priv->tx_desc_dma,
1032 ENETDMAC_RSTART, priv->tx_chan);
1036 if (priv->dma_has_sram) {
1037 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1038 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1039 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1040 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1041 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1042 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1044 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1045 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1049 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1050 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1053 enet_dmac_writel(priv, priv->dma_maxburst,
1054 ENETDMAC_MAXBURST, priv->rx_chan);
1055 enet_dmac_writel(priv, priv->dma_maxburst,
1056 ENETDMAC_MAXBURST, priv->tx_chan);
1059 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1062 if (priv->dma_has_sram) {
1063 val = priv->rx_ring_size / 3;
1064 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1065 val = (priv->rx_ring_size * 2) / 3;
1066 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1068 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1069 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1070 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1076 val = enet_readl(priv, ENET_CTL_REG);
1078 enet_writel(priv, val, ENET_CTL_REG);
1079 if (priv->dma_has_sram)
1080 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1081 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1082 ENETDMAC_CHANCFG, priv->rx_chan);
1085 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1086 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1089 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1090 ENETDMAC_IR, priv->rx_chan);
1091 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1092 ENETDMAC_IR, priv->tx_chan);
1095 napi_enable(&priv->napi);
1097 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1098 ENETDMAC_IRMASK, priv->rx_chan);
1099 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1100 ENETDMAC_IRMASK, priv->tx_chan);
1111 bcm_enet_free_rx_buf_ring(kdev, priv);
1114 kfree(priv->tx_skb);
1117 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1118 priv->tx_desc_cpu, priv->tx_desc_dma);
1121 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1122 priv->rx_desc_cpu, priv->rx_desc_dma);
1125 free_irq(priv->irq_tx, dev);
1128 free_irq(priv->irq_rx, dev);
1143 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1148 val = enet_readl(priv, ENET_CTL_REG);
1150 enet_writel(priv, val, ENET_CTL_REG);
1156 val = enet_readl(priv, ENET_CTL_REG);
1166 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1170 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1176 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1188 struct bcm_enet_priv *priv;
1191 priv = netdev_priv(dev);
1192 kdev = &priv->pdev->dev;
1195 napi_disable(&priv->napi);
1196 if (priv->has_phy)
1198 del_timer_sync(&priv->rx_timeout);
1201 enet_writel(priv, 0, ENET_IRMASK_REG);
1202 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1203 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1206 cancel_work_sync(&priv->mib_update_task);
1209 bcm_enet_disable_dma(priv, priv->tx_chan);
1210 bcm_enet_disable_dma(priv, priv->rx_chan);
1211 bcm_enet_disable_mac(priv);
1217 bcm_enet_free_rx_buf_ring(kdev, priv);
1220 kfree(priv->tx_skb);
1221 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1222 priv->rx_desc_cpu, priv->rx_desc_dma);
1223 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1224 priv->tx_desc_cpu, priv->tx_desc_dma);
1225 free_irq(priv->irq_tx, dev);
1226 free_irq(priv->irq_rx, dev);
1230 if (priv->has_phy)
1355 static void update_mib_counters(struct bcm_enet_priv *priv)
1368 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1369 p = (char *)priv + s->stat_offset;
1380 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1385 struct bcm_enet_priv *priv;
1387 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1388 mutex_lock(&priv->mib_update_lock);
1389 update_mib_counters(priv);
1390 mutex_unlock(&priv->mib_update_lock);
1393 if (netif_running(priv->net_dev))
1394 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1401 struct bcm_enet_priv *priv;
1404 priv = netdev_priv(netdev);
1406 mutex_lock(&priv->mib_update_lock);
1407 update_mib_counters(priv);
1417 p = (char *)priv;
1422 mutex_unlock(&priv->mib_update_lock);
1427 struct bcm_enet_priv *priv;
1429 priv = netdev_priv(dev);
1430 if (priv->has_phy)
1439 struct bcm_enet_priv *priv;
1442 priv = netdev_priv(dev);
1444 if (priv->has_phy) {
1453 cmd->base.speed = (priv->force_speed_100) ?
1455 cmd->base.duplex = (priv->force_duplex_full) ?
1474 struct bcm_enet_priv *priv;
1476 priv = netdev_priv(dev);
1477 if (priv->has_phy) {
1489 priv->force_speed_100 =
1491 priv->force_duplex_full =
1506 struct bcm_enet_priv *priv;
1508 priv = netdev_priv(dev);
1513 ering->rx_pending = priv->rx_ring_size;
1514 ering->tx_pending = priv->tx_ring_size;
1522 struct bcm_enet_priv *priv;
1525 priv = netdev_priv(dev);
1533 priv->rx_ring_size = ering->rx_pending;
1534 priv->tx_ring_size = ering->tx_pending;
1551 struct bcm_enet_priv *priv;
1553 priv = netdev_priv(dev);
1554 ecmd->autoneg = priv->pause_auto;
1555 ecmd->rx_pause = priv->pause_rx;
1556 ecmd->tx_pause = priv->pause_tx;
1562 struct bcm_enet_priv *priv;
1564 priv = netdev_priv(dev);
1566 if (priv->has_phy) {
1579 priv->pause_auto = ecmd->autoneg;
1580 priv->pause_rx = ecmd->rx_pause;
1581 priv->pause_tx = ecmd->tx_pause;
1603 struct bcm_enet_priv *priv;
1605 priv = netdev_priv(dev);
1606 if (priv->has_phy) {
1628 struct bcm_enet_priv *priv = netdev_priv(dev);
1643 priv->hw_mtu = actual_mtu;
1649 priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1650 priv->dma_maxburst * 4);
1652 priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
1662 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1668 bcm_enet_disable_mac(priv);
1672 enet_writel(priv, val, ENET_CTL_REG);
1677 val = enet_readl(priv, ENET_CTL_REG);
1684 val = enet_readl(priv, ENET_CTL_REG);
1685 if (priv->use_external_mii)
1689 enet_writel(priv, val, ENET_CTL_REG);
1692 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1696 val = enet_readl(priv, ENET_MIBCTL_REG);
1698 enet_writel(priv, val, ENET_MIBCTL_REG);
1716 struct bcm_enet_priv *priv;
1732 dev = alloc_etherdev(sizeof(*priv));
1735 priv = netdev_priv(dev);
1737 priv->enet_is_sw = false;
1738 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1739 priv->rx_buf_offset = NET_SKB_PAD;
1745 priv->base = devm_platform_ioremap_resource(pdev, 0);
1746 if (IS_ERR(priv->base)) {
1747 ret = PTR_ERR(priv->base);
1751 dev->irq = priv->irq = irq;
1752 priv->irq_rx = irq_rx;
1753 priv->irq_tx = irq_tx;
1755 priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1756 if (IS_ERR(priv->mac_clk)) {
1757 ret = PTR_ERR(priv->mac_clk);
1760 ret = clk_prepare_enable(priv->mac_clk);
1765 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1766 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1771 priv->has_phy = pd->has_phy;
1772 priv->phy_id = pd->phy_id;
1773 priv->has_phy_interrupt = pd->has_phy_interrupt;
1774 priv->phy_interrupt = pd->phy_interrupt;
1775 priv->use_external_mii = !pd->use_internal_phy;
1776 priv->pause_auto = pd->pause_auto;
1777 priv->pause_rx = pd->pause_rx;
1778 priv->pause_tx = pd->pause_tx;
1779 priv->force_duplex_full = pd->force_duplex_full;
1780 priv->force_speed_100 = pd->force_speed_100;
1781 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1782 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1783 priv->dma_chan_width = pd->dma_chan_width;
1784 priv->dma_has_sram = pd->dma_has_sram;
1785 priv->dma_desc_shift = pd->dma_desc_shift;
1786 priv->rx_chan = pd->rx_chan;
1787 priv->tx_chan = pd->tx_chan;
1790 if (priv->has_phy && !priv->use_external_mii) {
1792 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1793 if (IS_ERR(priv->phy_clk)) {
1794 ret = PTR_ERR(priv->phy_clk);
1795 priv->phy_clk = NULL;
1798 ret = clk_prepare_enable(priv->phy_clk);
1804 bcm_enet_hw_preinit(priv);
1807 if (priv->has_phy) {
1809 priv->mii_bus = mdiobus_alloc();
1810 if (!priv->mii_bus) {
1815 bus = priv->mii_bus;
1818 bus->priv = priv;
1826 bus->phy_mask = ~(1 << priv->phy_id);
1828 if (priv->has_phy_interrupt)
1829 bus->irq[priv->phy_id] = priv->phy_interrupt;
1847 spin_lock_init(&priv->rx_lock);
1850 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1853 mutex_init(&priv->mib_update_lock);
1854 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1858 enet_writel(priv, 0, ENET_MIB_REG(i));
1862 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
1876 priv->pdev = pdev;
1877 priv->net_dev = dev;
1882 if (priv->mii_bus)
1883 mdiobus_unregister(priv->mii_bus);
1886 if (priv->mii_bus)
1887 mdiobus_free(priv->mii_bus);
1891 enet_writel(priv, 0, ENET_MIISC_REG);
1892 clk_disable_unprepare(priv->phy_clk);
1895 clk_disable_unprepare(priv->mac_clk);
1907 struct bcm_enet_priv *priv;
1912 priv = netdev_priv(dev);
1916 enet_writel(priv, 0, ENET_MIISC_REG);
1918 if (priv->has_phy) {
1919 mdiobus_unregister(priv->mii_bus);
1920 mdiobus_free(priv->mii_bus);
1931 clk_disable_unprepare(priv->phy_clk);
1932 clk_disable_unprepare(priv->mac_clk);
1949 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1955 spin_lock_bh(&priv->enetsw_mdio_lock);
1956 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1965 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1967 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1968 spin_unlock_bh(&priv->enetsw_mdio_lock);
1972 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1978 spin_lock_bh(&priv->enetsw_mdio_lock);
1979 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1990 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1992 spin_unlock_bh(&priv->enetsw_mdio_lock);
2005 struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
2008 for (i = 0; i < priv->num_ports; i++) {
2014 port = &priv->used_ports[i];
2023 val = bcmenet_sw_mdio_read(priv, external_phy,
2030 if (!(up ^ priv->sw_port_link[i]))
2033 priv->sw_port_link[i] = up;
2037 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2039 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2041 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2047 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2050 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2063 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2066 lpa = bcmenet_sw_mdio_read(priv, external_phy,
2076 dev_info(&priv->pdev->dev,
2090 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2091 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2094 priv->swphy_poll.expires = jiffies + HZ;
2095 add_timer(&priv->swphy_poll);
2103 struct bcm_enet_priv *priv;
2110 priv = netdev_priv(dev);
2111 kdev = &priv->pdev->dev;
2114 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2115 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2117 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2122 if (priv->irq_tx != -1) {
2123 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2130 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2131 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2138 priv->rx_desc_alloc_size = size;
2139 priv->rx_desc_cpu = p;
2142 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2143 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2150 priv->tx_desc_alloc_size = size;
2151 priv->tx_desc_cpu = p;
2153 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2155 if (!priv->tx_skb) {
2161 priv->tx_desc_count = priv->tx_ring_size;
2162 priv->tx_dirty_desc = 0;
2163 priv->tx_curr_desc = 0;
2164 spin_lock_init(&priv->tx_lock);
2167 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
2169 if (!priv->rx_buf) {
2175 priv->rx_desc_count = 0;
2176 priv->rx_dirty_desc = 0;
2177 priv->rx_curr_desc = 0;
2180 for (i = 0; i < priv->num_ports; i++) {
2181 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2183 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2187 priv->sw_port_link[i] = 0;
2191 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2193 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2196 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2200 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2202 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2205 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2207 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2210 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2211 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2214 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2215 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2224 enet_dmas_writel(priv, priv->rx_desc_dma,
2225 ENETDMAS_RSTART_REG, priv->rx_chan);
2226 enet_dmas_writel(priv, priv->tx_desc_dma,
2227 ENETDMAS_RSTART_REG, priv->tx_chan);
2230 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2231 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2232 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2233 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2234 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2235 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2238 enet_dmac_writel(priv, priv->dma_maxburst,
2239 ENETDMAC_MAXBURST, priv->rx_chan);
2240 enet_dmac_writel(priv, priv->dma_maxburst,
2241 ENETDMAC_MAXBURST, priv->tx_chan);
2244 val = priv->rx_ring_size / 3;
2245 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2246 val = (priv->rx_ring_size * 2) / 3;
2247 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2253 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2254 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2255 ENETDMAC_CHANCFG, priv->rx_chan);
2258 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2259 ENETDMAC_IR, priv->rx_chan);
2260 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2261 ENETDMAC_IR, priv->tx_chan);
2264 napi_enable(&priv->napi);
2266 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2267 ENETDMAC_IRMASK, priv->rx_chan);
2268 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2269 ENETDMAC_IRMASK, priv->tx_chan);
2275 for (i = 0; i < priv->num_ports; i++) {
2278 port = &priv->used_ports[i];
2307 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2308 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2312 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2313 mod_timer(&priv->swphy_poll, jiffies);
2317 bcm_enet_free_rx_buf_ring(kdev, priv);
2320 kfree(priv->tx_skb);
2323 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2324 priv->tx_desc_cpu, priv->tx_desc_dma);
2327 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2328 priv->rx_desc_cpu, priv->rx_desc_dma);
2331 if (priv->irq_tx != -1)
2332 free_irq(priv->irq_tx, dev);
2335 free_irq(priv->irq_rx, dev);
2344 struct bcm_enet_priv *priv;
2347 priv = netdev_priv(dev);
2348 kdev = &priv->pdev->dev;
2350 del_timer_sync(&priv->swphy_poll);
2352 napi_disable(&priv->napi);
2353 del_timer_sync(&priv->rx_timeout);
2356 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2357 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2360 bcm_enet_disable_dma(priv, priv->tx_chan);
2361 bcm_enet_disable_dma(priv, priv->rx_chan);
2367 bcm_enet_free_rx_buf_ring(kdev, priv);
2370 kfree(priv->tx_skb);
2371 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2372 priv->rx_desc_cpu, priv->rx_desc_dma);
2373 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2374 priv->tx_desc_cpu, priv->tx_desc_dma);
2375 if (priv->irq_tx != -1)
2376 free_irq(priv->irq_tx, dev);
2377 free_irq(priv->irq_rx, dev);
2390 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2394 for (i = 0; i < priv->num_ports; ++i) {
2395 if (!priv->used_ports[i].used)
2397 if (priv->used_ports[i].phy_id == phy_id)
2412 struct bcm_enet_priv *priv;
2414 priv = netdev_priv(dev);
2415 return bcmenet_sw_mdio_read(priv,
2416 bcm_enetsw_phy_is_external(priv, phy_id),
2427 struct bcm_enet_priv *priv;
2429 priv = netdev_priv(dev);
2430 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2542 struct bcm_enet_priv *priv;
2545 priv = netdev_priv(netdev);
2559 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2560 p = (char *)priv + s->stat_offset;
2563 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2579 p = (char *)priv + s->stat_offset;
2592 struct bcm_enet_priv *priv;
2594 priv = netdev_priv(dev);
2601 ering->rx_pending = priv->rx_ring_size;
2602 ering->tx_pending = priv->tx_ring_size;
2611 struct bcm_enet_priv *priv;
2614 priv = netdev_priv(dev);
2622 priv->rx_ring_size = ering->rx_pending;
2623 priv->tx_ring_size = ering->tx_pending;
2647 struct bcm_enet_priv *priv;
2662 dev = alloc_etherdev(sizeof(*priv));
2665 priv = netdev_priv(dev);
2668 priv->enet_is_sw = true;
2669 priv->irq_rx = irq_rx;
2670 priv->irq_tx = irq_tx;
2671 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2672 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2673 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2674 priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
2679 memcpy(priv->used_ports, pd->used_ports,
2681 priv->num_ports = pd->num_ports;
2682 priv->dma_has_sram = pd->dma_has_sram;
2683 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2684 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2685 priv->dma_chan_width = pd->dma_chan_width;
2692 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2693 if (IS_ERR(priv->base)) {
2694 ret = PTR_ERR(priv->base);
2698 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2699 if (IS_ERR(priv->mac_clk)) {
2700 ret = PTR_ERR(priv->mac_clk);
2703 ret = clk_prepare_enable(priv->mac_clk);
2707 priv->rx_chan = 0;
2708 priv->tx_chan = 1;
2709 spin_lock_init(&priv->rx_lock);
2712 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2716 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
2720 spin_lock_init(&priv->enetsw_mdio_lock);
2728 priv->pdev = pdev;
2729 priv->net_dev = dev;
2734 clk_disable_unprepare(priv->mac_clk);
2744 struct bcm_enet_priv *priv;
2749 priv = netdev_priv(dev);
2752 clk_disable_unprepare(priv->mac_clk);