Lines Matching refs:tp
61 struct tulip_private *tp = netdev_priv(dev);
66 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
67 entry = tp->dirty_rx % RX_RING_SIZE;
68 if (tp->rx_buffers[entry].skb == NULL) {
72 skb = tp->rx_buffers[entry].skb =
77 mapping = dma_map_single(&tp->pdev->dev, skb->data,
79 if (dma_mapping_error(&tp->pdev->dev, mapping)) {
81 tp->rx_buffers[entry].skb = NULL;
85 tp->rx_buffers[entry].mapping = mapping;
87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
90 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
92 if(tp->chip_id == LC82C168) {
93 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
97 iowrite32(0x01, tp->base_addr + CSR2);
107 struct tulip_private *tp = from_timer(tp, t, oom_timer);
109 napi_schedule(&tp->napi);
114 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
115 struct net_device *dev = tp->dev;
116 int entry = tp->cur_rx % RX_RING_SIZE;
132 entry, tp->rx_ring[entry].status);
135 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
140 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
145 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
213 dma_sync_single_for_cpu(&tp->pdev->dev,
214 tp->rx_buffers[entry].mapping,
218 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
223 tp->rx_buffers[entry].skb->data,
226 dma_sync_single_for_device(&tp->pdev->dev,
227 tp->rx_buffers[entry].mapping,
231 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
235 if (tp->rx_buffers[entry].mapping !=
236 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
239 le32_to_cpu(tp->rx_ring[entry].buffer1),
240 (unsigned long long)tp->rx_buffers[entry].mapping,
245 dma_unmap_single(&tp->pdev->dev,
246 tp->rx_buffers[entry].mapping,
250 tp->rx_buffers[entry].skb = NULL;
251 tp->rx_buffers[entry].mapping = 0;
264 entry = (++tp->cur_rx) % RX_RING_SIZE;
265 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
280 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
301 if( tp->flags & HAS_INTR_MITIGATION) {
303 if( ! tp->mit_on ) {
304 tp->mit_on = 1;
305 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
309 if( tp->mit_on ) {
310 tp->mit_on = 0;
311 iowrite32(0, tp->base_addr + CSR11);
321 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
327 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
343 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
344 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
347 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
355 mod_timer(&tp->oom_timer, jiffies+1);
371 struct tulip_private *tp = netdev_priv(dev);
372 int entry = tp->cur_rx % RX_RING_SIZE;
373 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
378 entry, tp->rx_ring[entry].status);
380 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
381 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
443 dma_sync_single_for_cpu(&tp->pdev->dev,
444 tp->rx_buffers[entry].mapping,
448 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
453 tp->rx_buffers[entry].skb->data,
456 dma_sync_single_for_device(&tp->pdev->dev,
457 tp->rx_buffers[entry].mapping,
461 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
465 if (tp->rx_buffers[entry].mapping !=
466 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
469 le32_to_cpu(tp->rx_ring[entry].buffer1),
470 (long long)tp->rx_buffers[entry].mapping,
475 dma_unmap_single(&tp->pdev->dev,
476 tp->rx_buffers[entry].mapping,
479 tp->rx_buffers[entry].skb = NULL;
480 tp->rx_buffers[entry].mapping = 0;
490 entry = (++tp->cur_rx) % RX_RING_SIZE;
499 struct tulip_private *tp = netdev_priv(dev);
500 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
502 if (csr12 != tp->csr12_shadow) {
504 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
505 tp->csr12_shadow = csr12;
507 spin_lock(&tp->lock);
509 spin_unlock(&tp->lock);
511 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
525 struct tulip_private *tp = netdev_priv(dev);
526 void __iomem *ioaddr = tp->base_addr;
546 if (tp->flags & HAS_PHY_IRQ)
552 tp->nir++;
561 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
562 napi_schedule(&tp->napi);
593 spin_lock(&tp->lock);
595 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
598 int status = le32_to_cpu(tp->tx_ring[entry].status);
604 if (tp->tx_buffers[entry].skb == NULL) {
606 if (tp->tx_buffers[entry].mapping)
607 dma_unmap_single(&tp->pdev->dev,
608 tp->tx_buffers[entry].mapping,
609 sizeof(tp->setup_frame),
630 if ((status & 0x0080) && tp->full_duplex == 0)
634 tp->tx_buffers[entry].skb->len;
639 dma_unmap_single(&tp->pdev->dev,
640 tp->tx_buffers[entry].mapping,
641 tp->tx_buffers[entry].skb->len,
645 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
646 tp->tx_buffers[entry].skb = NULL;
647 tp->tx_buffers[entry].mapping = 0;
652 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
655 dirty_tx, tp->cur_tx);
660 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
663 tp->dirty_tx = dirty_tx;
669 tp->csr6);
670 tulip_restart_rxtx(tp);
672 spin_unlock(&tp->lock);
682 if ((tp->csr6 & 0xC000) != 0xC000)
683 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
685 tp->csr6 |= 0x00200000; /* Store-n-forward. */
687 tulip_restart_rxtx(tp);
691 if (tp->flags & COMET_MAC_ADDR) {
692 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
693 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
699 tulip_start_rxtx(tp);
706 if (tp->link_change)
707 (tp->link_change)(dev, csr5);
723 tp->nir, error);
735 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
736 tp->ttimer = 0;
742 csr5, tp->nir, tx, rx, oi);
746 if (tp->flags & HAS_INTR_MITIGATION) {
750 } else if (tp->chip_id == LC82C168) {
753 mod_timer(&tp->timer, RUN_AT(HZ/50));
788 entry = tp->dirty_rx % RX_RING_SIZE;
789 if (tp->rx_buffers[entry].skb == NULL) {
792 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
793 tp->nir, tp->cur_rx, tp->ttimer, rx);
794 if (tp->chip_id == LC82C168) {
796 mod_timer(&tp->timer, RUN_AT(HZ/50));
798 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
802 tp->nir);
803 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
807 tp->ttimer = 1;