Lines Matching refs:tnapi

207 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
1019 struct tg3_napi *tnapi = &tp->napi[i];
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 tp->coal_now |= tnapi->coal_now;
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 struct tg3 *tp = tnapi->tp;
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
6474 struct tg3_napi *tnapi = &tp->napi[i];
6480 tnapi->hw_status->status,
6481 tnapi->hw_status->status_tag,
6482 tnapi->hw_status->rx_jumbo_consumer,
6483 tnapi->hw_status->rx_consumer,
6484 tnapi->hw_status->rx_mini_consumer,
6485 tnapi->hw_status->idx[0].rx_producer,
6486 tnapi->hw_status->idx[0].tx_consumer);
6491 tnapi->last_tag, tnapi->last_irq_tag,
6492 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6493 tnapi->rx_rcb_ptr,
6494 tnapi->prodring.rx_std_prod_idx,
6495 tnapi->prodring.rx_std_cons_idx,
6496 tnapi->prodring.rx_jmb_prod_idx,
6497 tnapi->prodring.rx_jmb_cons_idx);
6521 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6525 return tnapi->tx_pending -
6526 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6533 static void tg3_tx(struct tg3_napi *tnapi)
6535 struct tg3 *tp = tnapi->tp;
6536 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6537 u32 sw_idx = tnapi->tx_cons;
6539 int index = tnapi - tp->napi;
6548 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6557 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6575 ri = &tnapi->tx_buffers[sw_idx];
6581 ri = &tnapi->tx_buffers[sw_idx];
6593 ri = &tnapi->tx_buffers[sw_idx];
6612 tnapi->tx_cons = sw_idx;
6622 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6625 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6732 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6737 struct tg3 *tp = tnapi->tp;
6802 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6804 struct tg3 *tp = tnapi->tp;
6807 u32 sw_idx = tnapi->rx_rcb_ptr;
6810 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6812 hw_idx = *(tnapi->rx_rcb_prod_idx);
6824 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6852 tg3_recycle_rx(tnapi, tpr, opaque_key,
6856 tnapi->rx_dropped++;
6901 tg3_recycle_rx(tnapi, tpr, opaque_key,
6946 napi_gro_receive(&tnapi->napi, skb);
6968 hw_idx = *(tnapi->rx_rcb_prod_idx);
6974 tnapi->rx_rcb_ptr = sw_idx;
6975 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7003 if (tnapi != &tp->napi[1]) {
7162 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7164 struct tg3 *tp = tnapi->tp;
7167 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7168 tg3_tx(tnapi);
7173 if (!tnapi->rx_rcb_prod_idx)
7180 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7181 work_done += tg3_rx(tnapi, budget - work_done);
7183 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7226 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7227 struct tg3 *tp = tnapi->tp;
7229 struct tg3_hw_status *sblk = tnapi->hw_status;
7232 work_done = tg3_poll_work(tnapi, work_done, budget);
7244 tnapi->last_tag = sblk->status_tag;
7245 tnapi->last_irq_tag = tnapi->last_tag;
7249 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7250 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7255 if (tnapi == &tp->napi[1] && tp->rx_refill)
7260 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7265 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7268 tnapi->coal_now);
7320 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7321 struct tg3 *tp = tnapi->tp;
7323 struct tg3_hw_status *sblk = tnapi->hw_status;
7331 work_done = tg3_poll_work(tnapi, work_done, budget);
7344 tnapi->last_tag = sblk->status_tag;
7345 tnapi->last_irq_tag = tnapi->last_tag;
7350 if (likely(!tg3_has_work(tnapi))) {
7352 tg3_int_reenable(tnapi);
7468 struct tg3_napi *tnapi = dev_id;
7469 struct tg3 *tp = tnapi->tp;
7471 prefetch(tnapi->hw_status);
7472 if (tnapi->rx_rcb)
7473 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7476 napi_schedule(&tnapi->napi);
7487 struct tg3_napi *tnapi = dev_id;
7488 struct tg3 *tp = tnapi->tp;
7490 prefetch(tnapi->hw_status);
7491 if (tnapi->rx_rcb)
7492 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7500 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7502 napi_schedule(&tnapi->napi);
7509 struct tg3_napi *tnapi = dev_id;
7510 struct tg3 *tp = tnapi->tp;
7511 struct tg3_hw_status *sblk = tnapi->hw_status;
7542 if (likely(tg3_has_work(tnapi))) {
7543 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7544 napi_schedule(&tnapi->napi);
7558 struct tg3_napi *tnapi = dev_id;
7559 struct tg3 *tp = tnapi->tp;
7560 struct tg3_hw_status *sblk = tnapi->hw_status;
7568 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7595 tnapi->last_irq_tag = sblk->status_tag;
7600 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7602 napi_schedule(&tnapi->napi);
7611 struct tg3_napi *tnapi = dev_id;
7612 struct tg3 *tp = tnapi->tp;
7613 struct tg3_hw_status *sblk = tnapi->hw_status;
7694 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7698 struct tg3 *tp = tnapi->tp;
7726 tnapi->tx_buffers[*entry].fragmented = true;
7728 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7739 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7745 tnapi->tx_buffers[prvidx].fragmented = false;
7749 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7757 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7761 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7766 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7772 txb = &tnapi->tx_buffers[entry];
7779 txb = &tnapi->tx_buffers[entry];
7781 dma_unmap_page(&tnapi->tp->pdev->dev,
7788 txb = &tnapi->tx_buffers[entry];
7794 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7799 struct tg3 *tp = tnapi->tp;
7829 tnapi->tx_buffers[*entry].skb = new_skb;
7830 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7833 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7836 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7848 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7853 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7861 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7868 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7877 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7886 tnapi->tx_dropped++;
7909 struct tg3_napi *tnapi;
7918 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7920 tnapi++;
7922 budget = tg3_tx_avail(tnapi);
7940 entry = tnapi->tx_prod;
7960 if (tg3_tso_bug_gso_check(tnapi, skb))
7961 return tg3_tso_bug(tp, tnapi, txq, skb);
7968 if (tg3_tso_bug_gso_check(tnapi, skb))
7969 return tg3_tso_bug(tp, tnapi, txq, skb);
8053 tnapi->tx_buffers[entry].skb = skb;
8054 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8061 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8084 tnapi->tx_buffers[entry].skb = NULL;
8085 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8091 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8102 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8104 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8113 return tg3_tso_bug(tp, tnapi, txq, skb);
8119 entry = tnapi->tx_prod;
8120 budget = tg3_tx_avail(tnapi);
8121 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8132 tnapi->tx_prod = entry;
8133 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8142 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8148 tw32_tx_mbox(tnapi->prodmbox, entry);
8154 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8155 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8159 tnapi->tx_dropped++;
8541 struct tg3_napi *tnapi = &tp->napi[j];
8543 tg3_rx_prodring_free(tp, &tnapi->prodring);
8545 if (!tnapi->tx_buffers)
8549 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8554 tg3_tx_skb_unmap(tnapi, i,
8578 struct tg3_napi *tnapi = &tp->napi[i];
8580 tnapi->last_tag = 0;
8581 tnapi->last_irq_tag = 0;
8582 tnapi->hw_status->status = 0;
8583 tnapi->hw_status->status_tag = 0;
8584 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8586 tnapi->tx_prod = 0;
8587 tnapi->tx_cons = 0;
8588 if (tnapi->tx_ring)
8589 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8591 tnapi->rx_rcb_ptr = 0;
8592 if (tnapi->rx_rcb)
8593 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8595 if (tnapi->prodring.rx_std &&
8596 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8610 struct tg3_napi *tnapi = &tp->napi[i];
8612 if (tnapi->tx_ring) {
8614 tnapi->tx_ring, tnapi->tx_desc_mapping);
8615 tnapi->tx_ring = NULL;
8618 kfree(tnapi->tx_buffers);
8619 tnapi->tx_buffers = NULL;
8626 struct tg3_napi *tnapi = &tp->napi[0];
8632 tnapi++;
8634 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8635 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8638 if (!tnapi->tx_buffers)
8641 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8643 &tnapi->tx_desc_mapping,
8645 if (!tnapi->tx_ring)
8661 struct tg3_napi *tnapi = &tp->napi[i];
8663 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8665 if (!tnapi->rx_rcb)
8670 tnapi->rx_rcb,
8671 tnapi->rx_rcb_mapping);
8672 tnapi->rx_rcb = NULL;
8689 struct tg3_napi *tnapi = &tp->napi[i];
8691 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8701 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8703 &tnapi->rx_rcb_mapping,
8705 if (!tnapi->rx_rcb)
8725 struct tg3_napi *tnapi = &tp->napi[i];
8727 if (tnapi->hw_status) {
8729 tnapi->hw_status,
8730 tnapi->status_mapping);
8731 tnapi->hw_status = NULL;
8764 struct tg3_napi *tnapi = &tp->napi[i];
8767 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8769 &tnapi->status_mapping,
8771 if (!tnapi->hw_status)
8774 sblk = tnapi->hw_status;
8799 tnapi->rx_rcb_prod_idx = prodptr;
8801 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8935 struct tg3_napi *tnapi = &tp->napi[i];
8936 if (tnapi->hw_status)
8937 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9102 struct tg3_napi *tnapi = &tp->napi[i];
9103 if (tnapi->hw_status) {
9104 tnapi->hw_status->status = 0;
9105 tnapi->hw_status->status_tag = 0;
9107 tnapi->last_tag = 0;
9108 tnapi->last_irq_tag = 0;
9361 struct tg3_napi *tnapi = &tp->napi[i];
9363 tnapi->rx_dropped = 0;
9364 tnapi->tx_dropped = 0;
9545 struct tg3_napi *tnapi = &tp->napi[i];
9547 if (!tnapi->tx_ring)
9550 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9589 struct tg3_napi *tnapi = &tp->napi[i];
9591 if (!tnapi->rx_rcb)
9594 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9605 struct tg3_napi *tnapi = &tp->napi[0];
9647 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9651 ((u64) tnapi->status_mapping >> 32));
9653 ((u64) tnapi->status_mapping & 0xffffffff));
9657 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9658 u64 mapping = (u64)tnapi->status_mapping;
9664 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
10964 struct tg3_napi *tnapi = &tp->napi[i];
10966 if (tg3_has_work(tnapi)) {
10967 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10968 tnapi->last_tx_cons == tnapi->tx_cons) {
10969 if (tnapi->chk_msi_cnt < 1) {
10970 tnapi->chk_msi_cnt++;
10973 tg3_msi(0, tnapi);
10976 tnapi->chk_msi_cnt = 0;
10977 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10978 tnapi->last_tx_cons = tnapi->tx_cons;
11240 struct tg3_napi *tnapi = &tp->napi[irq_num];
11245 name = &tnapi->irq_lbl[0];
11246 if (tnapi->tx_buffers && tnapi->rx_rcb)
11249 else if (tnapi->tx_buffers)
11252 else if (tnapi->rx_rcb)
11273 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11278 struct tg3_napi *tnapi = &tp->napi[0];
11288 free_irq(tnapi->irq_vec, tnapi);
11299 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11300 IRQF_SHARED, dev->name, tnapi);
11304 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11308 tnapi->coal_now);
11313 int_mbox = tr32_mailbox(tnapi->int_mbox);
11323 tnapi->hw_status->status_tag != tnapi->last_tag)
11324 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11331 free_irq(tnapi->irq_vec, tnapi);
11597 struct tg3_napi *tnapi = &tp->napi[i];
11599 free_irq(tnapi->irq_vec, tnapi);
11668 struct tg3_napi *tnapi = &tp->napi[i];
11669 free_irq(tnapi->irq_vec, tnapi);
11707 struct tg3_napi *tnapi = &tp->napi[i];
11708 free_irq(tnapi->irq_vec, tnapi);
11977 struct tg3_napi *tnapi = &tp->napi[i];
11979 rx_dropped += tnapi->rx_dropped;
11980 tx_dropped += tnapi->tx_dropped;
13423 struct tg3_napi *tnapi, *rnapi;
13426 tnapi = &tp->napi[0];
13432 tnapi = &tp->napi[1];
13434 coal_now = tnapi->coal_now | rnapi->coal_now;
13511 val = tnapi->tx_prod;
13512 tnapi->tx_buffers[val].skb = skb;
13513 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13522 budget = tg3_tx_avail(tnapi);
13523 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13525 tnapi->tx_buffers[val].skb = NULL;
13530 tnapi->tx_prod++;
13535 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13536 tr32_mailbox(tnapi->prodmbox);
13547 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13549 if ((tx_idx == tnapi->tx_prod) &&
13554 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13557 if (tx_idx != tnapi->tx_prod)
17830 struct tg3_napi *tnapi = &tp->napi[i];
17832 tnapi->tp = tp;
17833 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17835 tnapi->int_mbox = intmbx;
17838 tnapi->consmbox = rcvmbx;
17839 tnapi->prodmbox = sndmbx;
17842 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17844 tnapi->coal_now = HOSTCC_MODE_NOW;