Lines Matching defs:sge
4 * File: sge.c *
49 #include "sge.h"
233 struct sge *sge;
247 struct sge {
261 u32 sge_control; /* shadow value of sge control reg */
275 static void tx_sched_stop(struct sge *sge)
277 struct sched *s = sge->tx_sched;
290 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
293 struct sched *s = sge->tx_sched;
313 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
336 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
338 struct sched *s = sge->tx_sched;
343 t1_sched_update_parms(sge, i, 0, 0);
350 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
353 struct sched *s = sge->tx_sched;
356 t1_sched_update_parms(sge, port, 0, 0);
364 static int tx_sched_init(struct sge *sge)
375 s->sge = sge;
376 sge->tx_sched = s;
380 t1_sched_update_parms(sge, i, 1500, 1000);
391 static inline int sched_update_avail(struct sge *sge)
393 struct sched *s = sge->tx_sched;
425 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
428 struct sched *s = sge->tx_sched;
466 if (update-- && sched_update_avail(sge))
474 struct cmdQ *q = &sge->cmdQ[0];
478 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
518 static void free_rx_resources(struct sge *sge)
520 struct pci_dev *pdev = sge->adapter->pdev;
523 if (sge->respQ.entries) {
524 size = sizeof(struct respQ_e) * sge->respQ.size;
525 dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
526 sge->respQ.dma_addr);
530 struct freelQ *q = &sge->freelQ[i];
548 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
550 struct pci_dev *pdev = sge->adapter->pdev;
554 struct freelQ *q = &sge->freelQ[i];
558 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
578 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
580 sge->freelQ[!sge->jumbo_fl].dma_offset;
584 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
590 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
591 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
593 sge->respQ.genbit = 1;
594 sge->respQ.size = SGE_RESPQ_E_N;
595 sge->respQ.credits = 0;
596 size = sizeof(struct respQ_e) * sge->respQ.size;
597 sge->respQ.entries =
598 dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
600 if (!sge->respQ.entries)
605 free_rx_resources(sge);
612 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
615 struct pci_dev *pdev = sge->adapter->pdev;
647 static void free_tx_resources(struct sge *sge)
649 struct pci_dev *pdev = sge->adapter->pdev;
653 struct cmdQ *q = &sge->cmdQ[i];
657 free_cmdQ_buffers(sge, q, q->in_use);
671 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
673 struct pci_dev *pdev = sge->adapter->pdev;
677 struct cmdQ *q = &sge->cmdQ[i];
706 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
711 free_tx_resources(sge);
729 struct sge *sge = adapter->sge;
732 sge->sge_control |= F_VLAN_XTRACT;
734 sge->sge_control &= ~F_VLAN_XTRACT;
736 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
743 * but sge->sge_control is setup and ready to go.
745 static void configure_sge(struct sge *sge, struct sge_params *p)
747 struct adapter *ap = sge->adapter;
750 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
752 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
754 setup_ring_params(ap, sge->freelQ[0].dma_addr,
755 sge->freelQ[0].size, A_SG_FL0BASELWR,
757 setup_ring_params(ap, sge->freelQ[1].dma_addr,
758 sge->freelQ[1].size, A_SG_FL1BASELWR,
764 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
766 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
768 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
771 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
774 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
778 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
780 t1_sge_set_coalesce_params(sge, p);
786 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
788 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
789 sge->freelQ[sge->jumbo_fl].dma_offset -
794 * Frees all SGE related resources and the sge structure itself
796 void t1_sge_destroy(struct sge *sge)
800 for_each_port(sge->adapter, i)
801 free_percpu(sge->port_stats[i]);
803 kfree(sge->tx_sched);
804 free_tx_resources(sge);
805 free_rx_resources(sge);
806 kfree(sge);
817 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
821 static void refill_free_list(struct sge *sge, struct freelQ *q)
823 struct pci_dev *pdev = sge->adapter->pdev;
839 skb_reserve(skb, sge->rx_pkt_pad);
867 static void freelQs_empty(struct sge *sge)
869 struct adapter *adapter = sge->adapter;
873 refill_free_list(sge, &sge->freelQ[0]);
874 refill_free_list(sge, &sge->freelQ[1]);
876 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
877 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
879 irqholdoff_reg = sge->fixed_intrtimer;
883 irqholdoff_reg = sge->intrtimer_nres;
900 void t1_sge_intr_disable(struct sge *sge)
902 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
904 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
905 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
911 void t1_sge_intr_enable(struct sge *sge)
914 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
916 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
918 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
919 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
925 void t1_sge_intr_clear(struct sge *sge)
927 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
928 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
934 bool t1_sge_intr_error_handler(struct sge *sge)
936 struct adapter *adapter = sge->adapter;
943 sge->stats.respQ_empty++;
945 sge->stats.respQ_overflow++;
950 sge->stats.freelistQ_empty++;
951 freelQs_empty(sge);
954 sge->stats.pkt_too_big++;
959 sge->stats.pkt_mismatch++;
972 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
974 return &sge->stats;
977 void t1_sge_get_port_stats(const struct sge *sge, int port,
984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
1290 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1297 free_cmdQ_buffers(sge, q, reclaim);
1309 struct sge *sge = s->sge;
1310 struct adapter *adapter = sge->adapter;
1311 struct cmdQ *q = &sge->cmdQ[0];
1316 reclaim_completed_tx(sge, q);
1320 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1349 * @sge: the sge structure
1355 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1359 struct adapter *adapter = sge->adapter;
1363 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
1365 sge->stats.rx_drops++;
1376 st = this_cpu_ptr(sge->port_stats[p->iff]);
1410 static void restart_tx_queues(struct sge *sge)
1412 struct adapter *adap = sge->adapter;
1415 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1421 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1423 sge->stats.cmdQ_restarted[2]++;
1437 struct sge *sge = adapter->sge;
1438 struct cmdQ *cmdq = &sge->cmdQ[0];
1442 freelQs_empty(sge);
1453 if (sge->tx_sched)
1454 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1459 if (unlikely(sge->stopped_tx_queues != 0))
1460 restart_tx_queues(sge);
1471 struct sge *sge = adapter->sge;
1472 struct respQ *q = &sge->respQ;
1494 sge->cmdQ[1].processed += cmdq_processed[1];
1499 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1505 sge_rx(sge, fl, e->BufferLength);
1519 refill_free_list(sge, fl);
1521 sge->stats.pure_rsps++;
1538 sge->cmdQ[1].processed += cmdq_processed[1];
1545 const struct respQ *Q = &adapter->sge->respQ;
1561 struct sge *sge = adapter->sge;
1562 struct respQ *q = &sge->respQ;
1564 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1590 sge->stats.pure_rsps++;
1594 sge->cmdQ[1].processed += cmdq_processed[1];
1611 writel(adapter->sge->respQ.cidx,
1637 t1_sge_stop(adapter->sge);
1655 struct sge *sge = adapter->sge;
1666 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1679 sge->stats.unhandled_irqs++;
1700 struct sge *sge = adapter->sge;
1701 struct cmdQ *q = &sge->cmdQ[qid];
1706 reclaim_completed_tx(sge, q);
1717 set_bit(dev->if_port, &sge->stopped_tx_queues);
1718 sge->stats.cmdQ_full[2]++;
1728 set_bit(dev->if_port, &sge->stopped_tx_queues);
1729 sge->stats.cmdQ_full[2]++;
1735 if (sge->tx_sched && !qid && skb->dev) {
1741 skb = sched_skb(sge, skb, credits);
1811 struct sge *sge = adapter->sge;
1812 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1877 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1880 adapter->sge->espibug_skb[dev->if_port] = skb;
1925 struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
1928 struct cmdQ *q = &sge->cmdQ[i];
1933 reclaim_completed_tx(sge, q);
1935 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1939 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1945 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1947 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1948 core_ticks_per_usec(sge->adapter);
1949 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1957 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1959 if (alloc_rx_resources(sge, p))
1961 if (alloc_tx_resources(sge, p)) {
1962 free_rx_resources(sge);
1965 configure_sge(sge, p);
1973 p->large_buf_capacity = jumbo_payload_capacity(sge);
1980 void t1_sge_stop(struct sge *sge)
1983 writel(0, sge->adapter->regs + A_SG_CONTROL);
1984 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1986 if (is_T2(sge->adapter))
1987 del_timer_sync(&sge->espibug_timer);
1989 del_timer_sync(&sge->tx_reclaim_timer);
1990 if (sge->tx_sched)
1991 tx_sched_stop(sge);
1994 kfree_skb(sge->espibug_skb[i]);
2000 void t1_sge_start(struct sge *sge)
2002 refill_free_list(sge, &sge->freelQ[0]);
2003 refill_free_list(sge, &sge->freelQ[1]);
2005 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
2006 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
2007 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
2009 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2011 if (is_T2(sge->adapter))
2012 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2020 struct sge *sge = from_timer(sge, t, espibug_timer);
2021 struct adapter *adapter = sge->adapter;
2032 struct sk_buff *skb = sge->espibug_skb[i];
2058 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2063 struct sge *sge = from_timer(sge, t, espibug_timer);
2064 struct adapter *adapter = sge->adapter;
2067 struct sk_buff *skb = sge->espibug_skb[0];
2090 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2096 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
2098 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2101 if (!sge)
2104 sge->adapter = adapter;
2105 sge->netdev = adapter->port[0].dev;
2106 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2107 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2110 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2111 if (!sge->port_stats[i])
2115 timer_setup(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, 0);
2117 if (is_T2(sge->adapter)) {
2118 timer_setup(&sge->espibug_timer,
2123 tx_sched_init(sge);
2125 sge->espibug_timeout = 1;
2128 sge->espibug_timeout = HZ/100;
2134 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2135 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2136 if (sge->tx_sched) {
2137 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2147 return sge;
2150 free_percpu(sge->port_stats[i]);
2153 kfree(sge);