Lines Matching refs:mp
92 static inline void mace_clean_rings(struct mace_data *mp);
113 struct mace_data *mp;
155 mp = netdev_priv(dev);
156 mp->mdev = mdev;
160 mp->mace = ioremap(dev->base_addr, 0x1000);
161 if (mp->mace == NULL) {
172 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
173 in_8(&mp->mace->chipid_lo);
176 mp = netdev_priv(dev);
177 mp->maccc = ENXMT | ENRCV;
179 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
180 if (mp->tx_dma == NULL) {
185 mp->tx_dma_intr = macio_irq(mdev, 1);
187 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
188 if (mp->rx_dma == NULL) {
193 mp->rx_dma_intr = macio_irq(mdev, 2);
195 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
196 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
198 memset((char *) mp->tx_cmds, 0,
200 timer_setup(&mp->tx_timeout, mace_tx_timeout, 0);
201 spin_lock_init(&mp->lock);
202 mp->timeout_active = 0;
205 mp->port_aaui = port_aaui;
209 mp->port_aaui = 1;
212 mp->port_aaui = 1;
214 mp->port_aaui = 0;
231 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
233 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
236 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
238 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
250 mp->chipid >> 8, mp->chipid & 0xff);
261 iounmap(mp->rx_dma);
263 iounmap(mp->tx_dma);
265 iounmap(mp->mace);
277 struct mace_data *mp;
283 mp = netdev_priv(dev);
288 free_irq(mp->tx_dma_intr, dev);
289 free_irq(mp->rx_dma_intr, dev);
291 iounmap(mp->rx_dma);
292 iounmap(mp->tx_dma);
293 iounmap(mp->mace);
319 struct mace_data *mp = netdev_priv(dev);
320 volatile struct mace __iomem *mb = mp->mace;
352 if (mp->chipid == BROKEN_ADDRCHG_REV)
363 if (mp->chipid != BROKEN_ADDRCHG_REV)
366 if (mp->port_aaui)
374 struct mace_data *mp = netdev_priv(dev);
375 volatile struct mace __iomem *mb = mp->mace;
380 if (mp->chipid == BROKEN_ADDRCHG_REV)
389 if (mp->chipid != BROKEN_ADDRCHG_REV)
395 struct mace_data *mp = netdev_priv(dev);
396 volatile struct mace __iomem *mb = mp->mace;
399 spin_lock_irqsave(&mp->lock, flags);
404 out_8(&mb->maccc, mp->maccc);
406 spin_unlock_irqrestore(&mp->lock, flags);
410 static inline void mace_clean_rings(struct mace_data *mp)
416 if (mp->rx_bufs[i] != NULL) {
417 dev_kfree_skb(mp->rx_bufs[i]);
418 mp->rx_bufs[i] = NULL;
421 for (i = mp->tx_empty; i != mp->tx_fill; ) {
422 dev_kfree_skb(mp->tx_bufs[i]);
430 struct mace_data *mp = netdev_priv(dev);
431 volatile struct mace __iomem *mb = mp->mace;
432 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
433 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
443 mace_clean_rings(mp);
444 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
445 cp = mp->rx_cmds;
454 mp->rx_bufs[i] = skb;
461 mp->rx_bufs[i] = NULL;
463 mp->rx_fill = i;
464 mp->rx_empty = 0;
469 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
473 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
477 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
479 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
483 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
484 mp->tx_fill = 0;
485 mp->tx_empty = 0;
486 mp->tx_fullup = 0;
487 mp->tx_active = 0;
488 mp->tx_bad_runt = 0;
491 out_8(&mb->maccc, mp->maccc);
500 struct mace_data *mp = netdev_priv(dev);
501 volatile struct mace __iomem *mb = mp->mace;
502 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
503 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
513 mace_clean_rings(mp);
520 struct mace_data *mp = netdev_priv(dev);
522 if (mp->timeout_active)
523 del_timer(&mp->tx_timeout);
524 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
525 add_timer(&mp->tx_timeout);
526 mp->timeout_active = 1;
531 struct mace_data *mp = netdev_priv(dev);
532 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
538 spin_lock_irqsave(&mp->lock, flags);
539 fill = mp->tx_fill;
543 if (next == mp->tx_empty) {
545 mp->tx_fullup = 1;
546 spin_unlock_irqrestore(&mp->lock, flags);
549 spin_unlock_irqrestore(&mp->lock, flags);
557 mp->tx_bufs[fill] = skb;
558 cp = mp->tx_cmds + NCMDS_TX * fill;
562 np = mp->tx_cmds + NCMDS_TX * next;
566 spin_lock_irqsave(&mp->lock, flags);
567 mp->tx_fill = next;
568 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
572 ++mp->tx_active;
577 if (next == mp->tx_empty)
579 spin_unlock_irqrestore(&mp->lock, flags);
586 struct mace_data *mp = netdev_priv(dev);
587 volatile struct mace __iomem *mb = mp->mace;
592 spin_lock_irqsave(&mp->lock, flags);
593 mp->maccc &= ~PROM;
595 mp->maccc |= PROM;
619 if (mp->chipid == BROKEN_ADDRCHG_REV)
628 if (mp->chipid != BROKEN_ADDRCHG_REV)
632 out_8(&mb->maccc, mp->maccc);
633 spin_unlock_irqrestore(&mp->lock, flags);
636 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
638 volatile struct mace __iomem *mb = mp->mace;
660 struct mace_data *mp = netdev_priv(dev);
661 volatile struct mace __iomem *mb = mp->mace;
662 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
669 spin_lock_irqsave(&mp->lock, flags);
672 mace_handle_misc_intrs(mp, intr, dev);
674 i = mp->tx_empty;
676 del_timer(&mp->tx_timeout);
677 mp->timeout_active = 0;
685 mace_handle_misc_intrs(mp, intr, dev);
686 if (mp->tx_bad_runt) {
688 mp->tx_bad_runt = 0;
724 cp = mp->tx_cmds + NCMDS_TX * i;
735 mp->tx_bad_runt = 1;
752 if (i == mp->tx_fill) {
765 dev->stats.tx_bytes += mp->tx_bufs[i]->len;
768 dev_consume_skb_irq(mp->tx_bufs[i]);
769 --mp->tx_active;
778 if (i != mp->tx_empty) {
779 mp->tx_fullup = 0;
782 mp->tx_empty = i;
783 i += mp->tx_active;
786 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
789 cp = mp->tx_cmds + NCMDS_TX * i;
792 ++mp->tx_active;
795 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
799 spin_unlock_irqrestore(&mp->lock, flags);
805 struct mace_data *mp = from_timer(mp, t, tx_timeout);
806 struct net_device *dev = macio_get_drvdata(mp->mdev);
807 volatile struct mace __iomem *mb = mp->mace;
808 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
809 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
814 spin_lock_irqsave(&mp->lock, flags);
815 mp->timeout_active = 0;
816 if (mp->tx_active == 0 && !mp->tx_bad_runt)
820 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
822 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
838 i = mp->tx_empty;
839 mp->tx_active = 0;
841 if (mp->tx_bad_runt) {
842 mp->tx_bad_runt = 0;
843 } else if (i != mp->tx_fill) {
844 dev_kfree_skb_irq(mp->tx_bufs[i]);
847 mp->tx_empty = i;
849 mp->tx_fullup = 0;
851 if (i != mp->tx_fill) {
852 cp = mp->tx_cmds + NCMDS_TX * i;
857 ++mp->tx_active;
863 out_8(&mb->maccc, mp->maccc);
866 spin_unlock_irqrestore(&mp->lock, flags);
877 struct mace_data *mp = netdev_priv(dev);
878 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
887 spin_lock_irqsave(&mp->lock, flags);
888 for (i = mp->rx_empty; i != mp->rx_fill; ) {
889 cp = mp->rx_cmds + i;
895 np = mp->rx_cmds + next;
896 if (next != mp->rx_fill &&
906 skb = mp->rx_bufs[i];
933 mp->rx_bufs[i] = NULL;
945 mp->rx_empty = i;
947 i = mp->rx_fill;
952 if (next == mp->rx_empty)
954 cp = mp->rx_cmds + i;
955 skb = mp->rx_bufs[i];
960 mp->rx_bufs[i] = skb;
977 if (i != mp->rx_fill) {
979 mp->rx_fill = i;
981 spin_unlock_irqrestore(&mp->lock, flags);