Lines Matching refs:mp
91 static inline void mace_clean_rings(struct mace_data *mp);
112 struct mace_data *mp;
155 mp = netdev_priv(dev);
156 mp->mdev = mdev;
160 mp->mace = ioremap(dev->base_addr, 0x1000);
161 if (mp->mace == NULL) {
173 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
174 in_8(&mp->mace->chipid_lo);
177 mp = netdev_priv(dev);
178 mp->maccc = ENXMT | ENRCV;
180 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
181 if (mp->tx_dma == NULL) {
186 mp->tx_dma_intr = macio_irq(mdev, 1);
188 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
189 if (mp->rx_dma == NULL) {
194 mp->rx_dma_intr = macio_irq(mdev, 2);
196 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
197 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
199 memset((char *) mp->tx_cmds, 0,
201 timer_setup(&mp->tx_timeout, mace_tx_timeout, 0);
202 spin_lock_init(&mp->lock);
203 mp->timeout_active = 0;
206 mp->port_aaui = port_aaui;
210 mp->port_aaui = 1;
213 mp->port_aaui = 1;
215 mp->port_aaui = 0;
232 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
234 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
237 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
239 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
251 mp->chipid >> 8, mp->chipid & 0xff);
262 iounmap(mp->rx_dma);
264 iounmap(mp->tx_dma);
266 iounmap(mp->mace);
278 struct mace_data *mp;
284 mp = netdev_priv(dev);
289 free_irq(mp->tx_dma_intr, dev);
290 free_irq(mp->rx_dma_intr, dev);
292 iounmap(mp->rx_dma);
293 iounmap(mp->tx_dma);
294 iounmap(mp->mace);
320 struct mace_data *mp = netdev_priv(dev);
321 volatile struct mace __iomem *mb = mp->mace;
353 if (mp->chipid == BROKEN_ADDRCHG_REV)
364 if (mp->chipid != BROKEN_ADDRCHG_REV)
367 if (mp->port_aaui)
375 struct mace_data *mp = netdev_priv(dev);
376 volatile struct mace __iomem *mb = mp->mace;
382 if (mp->chipid == BROKEN_ADDRCHG_REV)
394 if (mp->chipid != BROKEN_ADDRCHG_REV)
400 struct mace_data *mp = netdev_priv(dev);
401 volatile struct mace __iomem *mb = mp->mace;
404 spin_lock_irqsave(&mp->lock, flags);
409 out_8(&mb->maccc, mp->maccc);
411 spin_unlock_irqrestore(&mp->lock, flags);
415 static inline void mace_clean_rings(struct mace_data *mp)
421 if (mp->rx_bufs[i] != NULL) {
422 dev_kfree_skb(mp->rx_bufs[i]);
423 mp->rx_bufs[i] = NULL;
426 for (i = mp->tx_empty; i != mp->tx_fill; ) {
427 dev_kfree_skb(mp->tx_bufs[i]);
435 struct mace_data *mp = netdev_priv(dev);
436 volatile struct mace __iomem *mb = mp->mace;
437 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
438 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
448 mace_clean_rings(mp);
449 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
450 cp = mp->rx_cmds;
459 mp->rx_bufs[i] = skb;
466 mp->rx_bufs[i] = NULL;
468 mp->rx_fill = i;
469 mp->rx_empty = 0;
474 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
478 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
482 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
484 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
488 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
489 mp->tx_fill = 0;
490 mp->tx_empty = 0;
491 mp->tx_fullup = 0;
492 mp->tx_active = 0;
493 mp->tx_bad_runt = 0;
496 out_8(&mb->maccc, mp->maccc);
505 struct mace_data *mp = netdev_priv(dev);
506 volatile struct mace __iomem *mb = mp->mace;
507 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
508 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
518 mace_clean_rings(mp);
525 struct mace_data *mp = netdev_priv(dev);
527 if (mp->timeout_active)
528 del_timer(&mp->tx_timeout);
529 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
530 add_timer(&mp->tx_timeout);
531 mp->timeout_active = 1;
536 struct mace_data *mp = netdev_priv(dev);
537 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
543 spin_lock_irqsave(&mp->lock, flags);
544 fill = mp->tx_fill;
548 if (next == mp->tx_empty) {
550 mp->tx_fullup = 1;
551 spin_unlock_irqrestore(&mp->lock, flags);
554 spin_unlock_irqrestore(&mp->lock, flags);
562 mp->tx_bufs[fill] = skb;
563 cp = mp->tx_cmds + NCMDS_TX * fill;
567 np = mp->tx_cmds + NCMDS_TX * next;
571 spin_lock_irqsave(&mp->lock, flags);
572 mp->tx_fill = next;
573 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
577 ++mp->tx_active;
582 if (next == mp->tx_empty)
584 spin_unlock_irqrestore(&mp->lock, flags);
591 struct mace_data *mp = netdev_priv(dev);
592 volatile struct mace __iomem *mb = mp->mace;
597 spin_lock_irqsave(&mp->lock, flags);
598 mp->maccc &= ~PROM;
600 mp->maccc |= PROM;
624 if (mp->chipid == BROKEN_ADDRCHG_REV)
633 if (mp->chipid != BROKEN_ADDRCHG_REV)
637 out_8(&mb->maccc, mp->maccc);
638 spin_unlock_irqrestore(&mp->lock, flags);
641 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
643 volatile struct mace __iomem *mb = mp->mace;
665 struct mace_data *mp = netdev_priv(dev);
666 volatile struct mace __iomem *mb = mp->mace;
667 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
674 spin_lock_irqsave(&mp->lock, flags);
677 mace_handle_misc_intrs(mp, intr, dev);
679 i = mp->tx_empty;
681 del_timer(&mp->tx_timeout);
682 mp->timeout_active = 0;
690 mace_handle_misc_intrs(mp, intr, dev);
691 if (mp->tx_bad_runt) {
693 mp->tx_bad_runt = 0;
729 cp = mp->tx_cmds + NCMDS_TX * i;
740 mp->tx_bad_runt = 1;
757 if (i == mp->tx_fill) {
770 dev->stats.tx_bytes += mp->tx_bufs[i]->len;
773 dev_consume_skb_irq(mp->tx_bufs[i]);
774 --mp->tx_active;
783 if (i != mp->tx_empty) {
784 mp->tx_fullup = 0;
787 mp->tx_empty = i;
788 i += mp->tx_active;
791 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
794 cp = mp->tx_cmds + NCMDS_TX * i;
797 ++mp->tx_active;
800 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
804 spin_unlock_irqrestore(&mp->lock, flags);
810 struct mace_data *mp = from_timer(mp, t, tx_timeout);
811 struct net_device *dev = macio_get_drvdata(mp->mdev);
812 volatile struct mace __iomem *mb = mp->mace;
813 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
814 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
819 spin_lock_irqsave(&mp->lock, flags);
820 mp->timeout_active = 0;
821 if (mp->tx_active == 0 && !mp->tx_bad_runt)
825 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
827 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
843 i = mp->tx_empty;
844 mp->tx_active = 0;
846 if (mp->tx_bad_runt) {
847 mp->tx_bad_runt = 0;
848 } else if (i != mp->tx_fill) {
849 dev_kfree_skb_irq(mp->tx_bufs[i]);
852 mp->tx_empty = i;
854 mp->tx_fullup = 0;
856 if (i != mp->tx_fill) {
857 cp = mp->tx_cmds + NCMDS_TX * i;
862 ++mp->tx_active;
868 out_8(&mb->maccc, mp->maccc);
871 spin_unlock_irqrestore(&mp->lock, flags);
882 struct mace_data *mp = netdev_priv(dev);
883 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
892 spin_lock_irqsave(&mp->lock, flags);
893 for (i = mp->rx_empty; i != mp->rx_fill; ) {
894 cp = mp->rx_cmds + i;
900 np = mp->rx_cmds + next;
901 if (next != mp->rx_fill &&
911 skb = mp->rx_bufs[i];
938 mp->rx_bufs[i] = NULL;
950 mp->rx_empty = i;
952 i = mp->rx_fill;
957 if (next == mp->rx_empty)
959 cp = mp->rx_cmds + i;
960 skb = mp->rx_bufs[i];
965 mp->rx_bufs[i] = skb;
982 if (i != mp->rx_fill) {
984 mp->rx_fill = i;
986 spin_unlock_irqrestore(&mp->lock, flags);