Lines Matching refs:greth

43 #include "greth.h"
117 static inline void greth_enable_tx(struct greth_private *greth)
120 GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
123 static inline void greth_enable_tx_and_irq(struct greth_private *greth)
126 GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
129 static inline void greth_disable_tx(struct greth_private *greth)
131 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
134 static inline void greth_enable_rx(struct greth_private *greth)
137 GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
140 static inline void greth_disable_rx(struct greth_private *greth)
142 GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
145 static inline void greth_enable_irqs(struct greth_private *greth)
147 GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
150 static inline void greth_disable_irqs(struct greth_private *greth)
152 GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
165 static void greth_clean_rings(struct greth_private *greth)
168 struct greth_bd *rx_bdp = greth->rx_bd_base;
169 struct greth_bd *tx_bdp = greth->tx_bd_base;
171 if (greth->gbit_mac) {
175 if (greth->rx_skbuff[i] != NULL) {
176 dev_kfree_skb(greth->rx_skbuff[i]);
177 dma_unmap_single(greth->dev,
185 while (greth->tx_free < GRETH_TXBD_NUM) {
187 struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
189 tx_bdp = greth->tx_bd_base + greth->tx_last;
190 greth->tx_last = NEXT_TX(greth->tx_last);
192 dma_unmap_single(greth->dev,
199 tx_bdp = greth->tx_bd_base + greth->tx_last;
201 dma_unmap_page(greth->dev,
206 greth->tx_last = NEXT_TX(greth->tx_last);
208 greth->tx_free += nr_frags+1;
216 kfree(greth->rx_bufs[i]);
217 dma_unmap_single(greth->dev,
223 kfree(greth->tx_bufs[i]);
224 dma_unmap_single(greth->dev,
232 static int greth_init_rings(struct greth_private *greth)
239 rx_bd = greth->rx_bd_base;
240 tx_bd = greth->tx_bd_base;
243 if (greth->gbit_mac) {
246 skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
248 if (netif_msg_ifup(greth))
249 dev_err(greth->dev, "Error allocating DMA ring.\n");
253 dma_addr = dma_map_single(greth->dev,
258 if (dma_mapping_error(greth->dev, dma_addr)) {
259 if (netif_msg_ifup(greth))
260 dev_err(greth->dev, "Could not create initial DMA mapping\n");
264 greth->rx_skbuff[i] = skb;
274 greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
276 if (greth->rx_bufs[i] == NULL) {
277 if (netif_msg_ifup(greth))
278 dev_err(greth->dev, "Error allocating DMA ring.\n");
282 dma_addr = dma_map_single(greth->dev,
283 greth->rx_bufs[i],
287 if (dma_mapping_error(greth->dev, dma_addr)) {
288 if (netif_msg_ifup(greth))
289 dev_err(greth->dev, "Could not create initial DMA mapping\n");
297 greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
299 if (greth->tx_bufs[i] == NULL) {
300 if (netif_msg_ifup(greth))
301 dev_err(greth->dev, "Error allocating DMA ring.\n");
305 dma_addr = dma_map_single(greth->dev,
306 greth->tx_bufs[i],
310 if (dma_mapping_error(greth->dev, dma_addr)) {
311 if (netif_msg_ifup(greth))
312 dev_err(greth->dev, "Could not create initial DMA mapping\n");
323 greth->rx_cur = 0;
324 greth->tx_next = 0;
325 greth->tx_last = 0;
326 greth->tx_free = GRETH_TXBD_NUM;
329 GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
330 GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
335 greth_clean_rings(greth);
341 struct greth_private *greth = netdev_priv(dev);
344 err = greth_init_rings(greth);
346 if (netif_msg_ifup(greth))
351 err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
353 if (netif_msg_ifup(greth))
355 greth_clean_rings(greth);
359 if (netif_msg_ifup(greth))
363 GRETH_REGSAVE(greth->regs->status, 0xFF);
365 napi_enable(&greth->napi);
367 greth_enable_irqs(greth);
368 greth_enable_tx(greth);
369 greth_enable_rx(greth);
376 struct greth_private *greth = netdev_priv(dev);
378 napi_disable(&greth->napi);
380 greth_disable_irqs(greth);
381 greth_disable_tx(greth);
382 greth_disable_rx(greth);
386 free_irq(greth->irq, (void *) dev);
388 greth_clean_rings(greth);
396 struct greth_private *greth = netdev_priv(dev);
403 greth_clean_tx(greth->netdev);
405 if (unlikely(greth->tx_free <= 0)) {
406 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
407 ctrl = GRETH_REGLOAD(greth->regs->control);
410 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
412 spin_unlock_irqrestore(&greth->devlock, flags);
416 if (netif_msg_pktdata(greth))
425 bdp = greth->tx_bd_base + greth->tx_next;
430 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
433 greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
436 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
440 greth->tx_next = NEXT_TX(greth->tx_next);
441 greth->tx_free--;
445 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
446 greth_enable_tx(greth);
447 spin_unlock_irqrestore(&greth->devlock, flags);
465 struct greth_private *greth = netdev_priv(dev);
473 tx_last = greth->tx_last;
476 if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
482 if (netif_msg_pktdata(greth))
491 greth->tx_skbuff[greth->tx_next] = skb;
502 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
506 bdp = greth->tx_bd_base + greth->tx_next;
508 dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
510 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
515 curr_tx = NEXT_TX(greth->tx_next);
520 greth->tx_skbuff[curr_tx] = NULL;
521 bdp = greth->tx_bd_base + curr_tx;
540 dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
543 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
554 bdp = greth->tx_bd_base + greth->tx_next;
558 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
559 greth->tx_next = curr_tx;
560 greth_enable_tx_and_irq(greth);
561 spin_unlock_irqrestore(&greth->devlock, flags);
567 for (i = 0; greth->tx_next + i != curr_tx; i++) {
568 bdp = greth->tx_bd_base + greth->tx_next + i;
569 dma_unmap_single(greth->dev,
577 dev_warn(greth->dev, "Could not create TX DMA mapping\n");
586 struct greth_private *greth;
590 greth = netdev_priv(dev);
592 spin_lock(&greth->devlock);
595 status = GRETH_REGLOAD(greth->regs->status);
601 ctrl = GRETH_REGLOAD(greth->regs->control);
609 greth_disable_irqs(greth);
610 napi_schedule(&greth->napi);
613 spin_unlock(&greth->devlock);
620 struct greth_private *greth;
624 greth = netdev_priv(dev);
627 bdp = greth->tx_bd_base + greth->tx_last;
628 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
635 if (greth->tx_free == GRETH_TXBD_NUM)
647 dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
648 greth->tx_last = NEXT_TX(greth->tx_last);
649 greth->tx_free++;
652 if (greth->tx_free > 0) {
674 struct greth_private *greth;
681 greth = netdev_priv(dev);
682 tx_last = greth->tx_last;
684 while (tx_last != greth->tx_next) {
686 skb = greth->tx_skbuff[tx_last];
691 bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
693 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
700 greth->tx_skbuff[tx_last] = NULL;
705 bdp = greth->tx_bd_base + tx_last;
709 dma_unmap_single(greth->dev,
716 bdp = greth->tx_bd_base + tx_last;
718 dma_unmap_page(greth->dev,
729 greth->tx_last = tx_last;
732 (greth_num_free_bds(tx_last, greth->tx_next) >
740 struct greth_private *greth;
748 greth = netdev_priv(dev);
752 bdp = greth->rx_bd_base + greth->rx_cur;
753 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
798 dma_sync_single_for_cpu(greth->dev,
803 if (netif_msg_pktdata(greth))
817 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
824 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
826 spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
827 greth_enable_rx(greth);
828 spin_unlock_irqrestore(&greth->devlock, flags);
830 greth->rx_cur = NEXT_RX(greth->rx_cur);
856 struct greth_private *greth;
864 greth = netdev_priv(dev);
868 bdp = greth->rx_bd_base + greth->rx_cur;
869 skb = greth->rx_skbuff[greth->rx_cur];
870 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
899 dma_addr = dma_map_single(greth->dev,
904 if (!dma_mapping_error(greth->dev, dma_addr)) {
908 dma_unmap_single(greth->dev,
913 if (netif_msg_pktdata(greth))
928 greth->rx_skbuff[greth->rx_cur] = newskb;
932 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
948 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
954 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
960 spin_lock_irqsave(&greth->devlock, flags);
961 greth_enable_rx(greth);
962 spin_unlock_irqrestore(&greth->devlock, flags);
963 greth->rx_cur = NEXT_RX(greth->rx_cur);
972 struct greth_private *greth;
976 greth = container_of(napi, struct greth_private, napi);
979 if (greth->gbit_mac) {
980 greth_clean_tx_gbit(greth->netdev);
981 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
983 if (netif_queue_stopped(greth->netdev))
984 greth_clean_tx(greth->netdev);
985 work_done += greth_rx(greth->netdev, budget - work_done);
990 spin_lock_irqsave(&greth->devlock, flags);
992 ctrl = GRETH_REGLOAD(greth->regs->control);
993 if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
994 (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
995 GRETH_REGSAVE(greth->regs->control,
1000 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
1004 if (GRETH_REGLOAD(greth->regs->status) & mask) {
1005 GRETH_REGSAVE(greth->regs->control, ctrl);
1006 spin_unlock_irqrestore(&greth->devlock, flags);
1010 spin_unlock_irqrestore(&greth->devlock, flags);
1020 struct greth_private *greth;
1023 greth = netdev_priv(dev);
1024 regs = greth->regs;
1045 struct greth_private *greth = netdev_priv(dev);
1046 struct greth_regs *regs = greth->regs;
1064 struct greth_private *greth = netdev_priv(dev);
1065 struct greth_regs *regs = greth->regs;
1073 if (greth->multicast) {
1097 struct greth_private *greth = netdev_priv(dev);
1098 return greth->msg_enable;
1103 struct greth_private *greth = netdev_priv(dev);
1104 greth->msg_enable = value;
1114 struct greth_private *greth = netdev_priv(dev);
1116 strlcpy(info->driver, dev_driver_string(greth->dev),
1118 strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
1124 struct greth_private *greth = netdev_priv(dev);
1125 u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1151 static inline int wait_for_mdio(struct greth_private *greth)
1154 while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1163 struct greth_private *greth = bus->priv;
1166 if (!wait_for_mdio(greth))
1169 GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1171 if (!wait_for_mdio(greth))
1174 if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1175 data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1185 struct greth_private *greth = bus->priv;
1187 if (!wait_for_mdio(greth))
1190 GRETH_REGSAVE(greth->regs->mdio,
1193 if (!wait_for_mdio(greth))
1201 struct greth_private *greth = netdev_priv(dev);
1207 spin_lock_irqsave(&greth->devlock, flags);
1211 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
1212 ctrl = GRETH_REGLOAD(greth->regs->control) &
1223 GRETH_REGSAVE(greth->regs->control, ctrl);
1224 greth->speed = phydev->speed;
1225 greth->duplex = phydev->duplex;
1230 if (phydev->link != greth->link) {
1232 greth->speed = 0;
1233 greth->duplex = -1;
1235 greth->link = phydev->link;
1240 spin_unlock_irqrestore(&greth->devlock, flags);
1254 struct greth_private *greth = netdev_priv(dev);
1259 phy = phy_find_first(greth->mdio);
1262 if (netif_msg_probe(greth))
1268 greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
1270 if (netif_msg_ifup(greth))
1275 if (greth->gbit_mac)
1282 greth->link = 0;
1283 greth->speed = 0;
1284 greth->duplex = -1;
1289 static int greth_mdio_init(struct greth_private *greth)
1293 struct net_device *ndev = greth->netdev;
1295 greth->mdio = mdiobus_alloc();
1296 if (!greth->mdio) {
1300 greth->mdio->name = "greth-mdio";
1301 snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1302 greth->mdio->read = greth_mdio_read;
1303 greth->mdio->write = greth_mdio_write;
1304 greth->mdio->priv = greth;
1306 ret = mdiobus_register(greth->mdio);
1311 ret = greth_mdio_probe(greth->netdev);
1313 if (netif_msg_probe(greth))
1314 dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1321 if (greth->edcl && greth_edcl == 1) {
1328 greth_link_change(greth->netdev);
1334 mdiobus_unregister(greth->mdio);
1336 mdiobus_free(greth->mdio);
1344 struct greth_private *greth;
1357 greth = netdev_priv(dev);
1358 greth->netdev = dev;
1359 greth->dev = &ofdev->dev;
1362 greth->msg_enable = greth_debug;
1364 greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1366 spin_lock_init(&greth->devlock);
1368 greth->regs = of_ioremap(&ofdev->resource[0], 0,
1370 "grlib-greth regs");
1372 if (greth->regs == NULL) {
1373 if (netif_msg_probe(greth))
1374 dev_err(greth->dev, "ioremap failure.\n");
1379 regs = greth->regs;
1380 greth->irq = ofdev->archdata.irqs[0];
1382 dev_set_drvdata(greth->dev, dev);
1383 SET_NETDEV_DEV(dev, greth->dev);
1385 if (netif_msg_probe(greth))
1386 dev_dbg(greth->dev, "resetting controller.\n");
1396 if (netif_msg_probe(greth))
1397 dev_err(greth->dev, "timeout when waiting for reset.\n");
1403 greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1407 greth->gbit_mac = (tmp >> 27) & 1;
1410 greth->multicast = (tmp >> 25) & 1;
1412 greth->edcl = (tmp >> 31) & 1;
1416 if (greth->edcl != 0)
1420 greth->mdio_int_en = (tmp >> 26) & 1;
1422 err = greth_mdio_init(greth);
1424 if (netif_msg_probe(greth))
1425 dev_err(greth->dev, "failed to register MDIO bus\n");
1430 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1431 &greth->tx_bd_base_phys,
1433 if (!greth->tx_bd_base) {
1439 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1440 &greth->rx_bd_base_phys,
1442 if (!greth->rx_bd_base) {
1473 if (netif_msg_probe(greth))
1474 dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1486 if (greth->gbit_mac) {
1493 if (greth->multicast) {
1505 if (netif_msg_probe(greth))
1506 dev_err(greth->dev, "netdevice registration failed.\n");
1511 netif_napi_add(dev, &greth->napi, greth_poll, 64);
1516 dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1518 dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1520 mdiobus_unregister(greth->mdio);
1522 of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1531 struct greth_private *greth = netdev_priv(ndev);
1534 dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1536 dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1540 mdiobus_unregister(greth->mdio);
1544 of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1565 .name = "grlib-greth",