Lines Matching refs:qep

85 static inline int qe_stop(struct sunqe *qep)
87 void __iomem *cregs = qep->qcregs;
88 void __iomem *mregs = qep->mregs;
124 static void qe_init_rings(struct sunqe *qep)
126 struct qe_init_block *qb = qep->qe_block;
127 struct sunqe_buffers *qbufs = qep->buffers;
128 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
131 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
141 static int qe_init(struct sunqe *qep, int from_irq)
143 struct sunqec *qecp = qep->parent;
144 void __iomem *cregs = qep->qcregs;
145 void __iomem *mregs = qep->mregs;
147 const unsigned char *e = &qep->dev->dev_addr[0];
148 __u32 qblk_dvma = (__u32)qep->qblock_dvma;
153 if (qe_stop(qep))
168 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
224 qe_init_rings(qep);
241 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
250 qe_set_multicast(qep->dev);
259 static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
261 struct net_device *dev = qep->dev;
405 qe_init(qep, 1);
412 static void qe_rx(struct sunqe *qep)
414 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
415 struct net_device *dev = qep->dev;
417 struct sunqe_buffers *qbufs = qep->buffers;
418 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
419 int elem = qep->rx_new;
447 skb->protocol = eth_type_trans(skb, qep->dev);
459 qep->rx_new = elem;
462 static void qe_tx_reclaim(struct sunqe *qep);
478 struct sunqe *qep = qecp->qes[channel];
481 qe_status = sbus_readl(qep->qcregs + CREG_STAT);
483 if (qe_is_bolixed(qep, qe_status))
487 qe_rx(qep);
488 if (netif_queue_stopped(qep->dev) &&
490 spin_lock(&qep->lock);
491 qe_tx_reclaim(qep);
492 if (TX_BUFFS_AVAIL(qep) > 0) {
496 netif_wake_queue(qep->dev);
497 sbus_writel(1, qep->qcregs + CREG_TIMASK);
499 spin_unlock(&qep->lock);
513 struct sunqe *qep = netdev_priv(dev);
515 qep->mconfig = (MREGS_MCONFIG_TXENAB |
518 return qe_init(qep, 0);
523 struct sunqe *qep = netdev_priv(dev);
525 qe_stop(qep);
530 * the IRQ protected qep->lock.
532 static void qe_tx_reclaim(struct sunqe *qep)
534 struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
535 int elem = qep->tx_old;
537 while (elem != qep->tx_new) {
544 qep->tx_old = elem;
549 struct sunqe *qep = netdev_priv(dev);
552 spin_lock_irq(&qep->lock);
557 qe_tx_reclaim(qep);
558 tx_full = TX_BUFFS_AVAIL(qep) <= 0;
560 spin_unlock_irq(&qep->lock);
566 qe_init(qep, 1);
575 struct sunqe *qep = netdev_priv(dev);
576 struct sunqe_buffers *qbufs = qep->buffers;
577 __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
581 spin_lock_irq(&qep->lock);
583 qe_tx_reclaim(qep);
586 entry = qep->tx_new;
593 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
597 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
598 qep->qe_block->qe_txd[entry].tx_flags =
600 qep->tx_new = NEXT_TX(entry);
603 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
608 if (TX_BUFFS_AVAIL(qep) <= 0) {
615 sbus_writel(0, qep->qcregs + CREG_TIMASK);
617 spin_unlock_irq(&qep->lock);
626 struct sunqe *qep = netdev_priv(dev);
628 u8 new_mconfig = qep->mconfig;
637 qep->mregs + MREGS_IACONFIG);
638 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
641 sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
642 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
657 qep->mregs + MREGS_IACONFIG);
658 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
662 sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
664 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
673 qep->mconfig = new_mconfig;
674 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
684 struct sunqe *qep = netdev_priv(dev);
690 op = qep->op;
700 struct sunqe *qep = netdev_priv(dev);
701 void __iomem *mregs = qep->mregs;
704 spin_lock_irq(&qep->lock);
706 spin_unlock_irq(&qep->lock);