Lines Matching refs:tnapi

206 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
1018 struct tg3_napi *tnapi = &tp->napi[i];
1020 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024 tp->coal_now |= tnapi->coal_now;
1037 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 struct tg3 *tp = tnapi->tp;
1040 struct tg3_hw_status *sblk = tnapi->hw_status;
1050 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1054 if (tnapi->rx_rcb_prod_idx &&
1055 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1066 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 struct tg3 *tp = tnapi->tp;
1070 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1076 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 HOSTCC_MODE_ENABLE | tnapi->coal_now);
6489 struct tg3_napi *tnapi = &tp->napi[i];
6495 tnapi->hw_status->status,
6496 tnapi->hw_status->status_tag,
6497 tnapi->hw_status->rx_jumbo_consumer,
6498 tnapi->hw_status->rx_consumer,
6499 tnapi->hw_status->rx_mini_consumer,
6500 tnapi->hw_status->idx[0].rx_producer,
6501 tnapi->hw_status->idx[0].tx_consumer);
6506 tnapi->last_tag, tnapi->last_irq_tag,
6507 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6508 tnapi->rx_rcb_ptr,
6509 tnapi->prodring.rx_std_prod_idx,
6510 tnapi->prodring.rx_std_cons_idx,
6511 tnapi->prodring.rx_jmb_prod_idx,
6512 tnapi->prodring.rx_jmb_cons_idx);
6536 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6540 return tnapi->tx_pending -
6541 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6548 static void tg3_tx(struct tg3_napi *tnapi)
6550 struct tg3 *tp = tnapi->tp;
6551 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6552 u32 sw_idx = tnapi->tx_cons;
6554 int index = tnapi - tp->napi;
6563 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6572 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6592 ri = &tnapi->tx_buffers[sw_idx];
6598 ri = &tnapi->tx_buffers[sw_idx];
6610 ri = &tnapi->tx_buffers[sw_idx];
6629 tnapi->tx_cons = sw_idx;
6639 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6642 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6751 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6756 struct tg3 *tp = tnapi->tp;
6821 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6823 struct tg3 *tp = tnapi->tp;
6826 u32 sw_idx = tnapi->rx_rcb_ptr;
6829 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6831 hw_idx = *(tnapi->rx_rcb_prod_idx);
6843 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6871 tg3_recycle_rx(tnapi, tpr, opaque_key,
6875 tnapi->rx_dropped++;
6917 tg3_recycle_rx(tnapi, tpr, opaque_key,
6960 napi_gro_receive(&tnapi->napi, skb);
6982 hw_idx = *(tnapi->rx_rcb_prod_idx);
6988 tnapi->rx_rcb_ptr = sw_idx;
6989 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7017 if (tnapi != &tp->napi[1]) {
7176 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7178 struct tg3 *tp = tnapi->tp;
7181 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7182 tg3_tx(tnapi);
7187 if (!tnapi->rx_rcb_prod_idx)
7194 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7195 work_done += tg3_rx(tnapi, budget - work_done);
7197 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7240 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7241 struct tg3 *tp = tnapi->tp;
7243 struct tg3_hw_status *sblk = tnapi->hw_status;
7246 work_done = tg3_poll_work(tnapi, work_done, budget);
7258 tnapi->last_tag = sblk->status_tag;
7259 tnapi->last_irq_tag = tnapi->last_tag;
7263 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7264 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7269 if (tnapi == &tp->napi[1] && tp->rx_refill)
7274 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7279 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7282 tnapi->coal_now);
7334 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7335 struct tg3 *tp = tnapi->tp;
7337 struct tg3_hw_status *sblk = tnapi->hw_status;
7345 work_done = tg3_poll_work(tnapi, work_done, budget);
7358 tnapi->last_tag = sblk->status_tag;
7359 tnapi->last_irq_tag = tnapi->last_tag;
7364 if (likely(!tg3_has_work(tnapi))) {
7366 tg3_int_reenable(tnapi);
7482 struct tg3_napi *tnapi = dev_id;
7483 struct tg3 *tp = tnapi->tp;
7485 prefetch(tnapi->hw_status);
7486 if (tnapi->rx_rcb)
7487 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7490 napi_schedule(&tnapi->napi);
7501 struct tg3_napi *tnapi = dev_id;
7502 struct tg3 *tp = tnapi->tp;
7504 prefetch(tnapi->hw_status);
7505 if (tnapi->rx_rcb)
7506 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7514 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7516 napi_schedule(&tnapi->napi);
7523 struct tg3_napi *tnapi = dev_id;
7524 struct tg3 *tp = tnapi->tp;
7525 struct tg3_hw_status *sblk = tnapi->hw_status;
7556 if (likely(tg3_has_work(tnapi))) {
7557 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7558 napi_schedule(&tnapi->napi);
7572 struct tg3_napi *tnapi = dev_id;
7573 struct tg3 *tp = tnapi->tp;
7574 struct tg3_hw_status *sblk = tnapi->hw_status;
7582 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7609 tnapi->last_irq_tag = sblk->status_tag;
7614 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7616 napi_schedule(&tnapi->napi);
7625 struct tg3_napi *tnapi = dev_id;
7626 struct tg3 *tp = tnapi->tp;
7627 struct tg3_hw_status *sblk = tnapi->hw_status;
7708 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7712 struct tg3 *tp = tnapi->tp;
7740 tnapi->tx_buffers[*entry].fragmented = true;
7742 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7753 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7759 tnapi->tx_buffers[prvidx].fragmented = false;
7763 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7771 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7775 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7780 pci_unmap_single(tnapi->tp->pdev,
7788 txb = &tnapi->tx_buffers[entry];
7795 txb = &tnapi->tx_buffers[entry];
7797 pci_unmap_page(tnapi->tp->pdev,
7804 txb = &tnapi->tx_buffers[entry];
7810 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7815 struct tg3 *tp = tnapi->tp;
7845 tnapi->tx_buffers[*entry].skb = new_skb;
7846 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7849 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7852 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7864 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7869 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7877 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7884 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7893 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7902 tnapi->tx_dropped++;
7925 struct tg3_napi *tnapi;
7934 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7936 tnapi++;
7938 budget = tg3_tx_avail(tnapi);
7956 entry = tnapi->tx_prod;
7976 if (tg3_tso_bug_gso_check(tnapi, skb))
7977 return tg3_tso_bug(tp, tnapi, txq, skb);
7984 if (tg3_tso_bug_gso_check(tnapi, skb))
7985 return tg3_tso_bug(tp, tnapi, txq, skb);
8068 tnapi->tx_buffers[entry].skb = skb;
8069 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8076 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8099 tnapi->tx_buffers[entry].skb = NULL;
8100 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8106 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8117 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8119 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8128 return tg3_tso_bug(tp, tnapi, txq, skb);
8134 entry = tnapi->tx_prod;
8135 budget = tg3_tx_avail(tnapi);
8136 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8147 tnapi->tx_prod = entry;
8148 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8157 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8163 tw32_tx_mbox(tnapi->prodmbox, entry);
8169 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8170 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8174 tnapi->tx_dropped++;
8556 struct tg3_napi *tnapi = &tp->napi[j];
8558 tg3_rx_prodring_free(tp, &tnapi->prodring);
8560 if (!tnapi->tx_buffers)
8564 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8569 tg3_tx_skb_unmap(tnapi, i,
8593 struct tg3_napi *tnapi = &tp->napi[i];
8595 tnapi->last_tag = 0;
8596 tnapi->last_irq_tag = 0;
8597 tnapi->hw_status->status = 0;
8598 tnapi->hw_status->status_tag = 0;
8599 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8601 tnapi->tx_prod = 0;
8602 tnapi->tx_cons = 0;
8603 if (tnapi->tx_ring)
8604 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8606 tnapi->rx_rcb_ptr = 0;
8607 if (tnapi->rx_rcb)
8608 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8610 if (tnapi->prodring.rx_std &&
8611 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8625 struct tg3_napi *tnapi = &tp->napi[i];
8627 if (tnapi->tx_ring) {
8629 tnapi->tx_ring, tnapi->tx_desc_mapping);
8630 tnapi->tx_ring = NULL;
8633 kfree(tnapi->tx_buffers);
8634 tnapi->tx_buffers = NULL;
8641 struct tg3_napi *tnapi = &tp->napi[0];
8647 tnapi++;
8649 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8650 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8653 if (!tnapi->tx_buffers)
8656 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8658 &tnapi->tx_desc_mapping,
8660 if (!tnapi->tx_ring)
8676 struct tg3_napi *tnapi = &tp->napi[i];
8678 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8680 if (!tnapi->rx_rcb)
8685 tnapi->rx_rcb,
8686 tnapi->rx_rcb_mapping);
8687 tnapi->rx_rcb = NULL;
8704 struct tg3_napi *tnapi = &tp->napi[i];
8706 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8716 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8718 &tnapi->rx_rcb_mapping,
8720 if (!tnapi->rx_rcb)
8740 struct tg3_napi *tnapi = &tp->napi[i];
8742 if (tnapi->hw_status) {
8744 tnapi->hw_status,
8745 tnapi->status_mapping);
8746 tnapi->hw_status = NULL;
8779 struct tg3_napi *tnapi = &tp->napi[i];
8782 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8784 &tnapi->status_mapping,
8786 if (!tnapi->hw_status)
8789 sblk = tnapi->hw_status;
8814 tnapi->rx_rcb_prod_idx = prodptr;
8816 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8950 struct tg3_napi *tnapi = &tp->napi[i];
8951 if (tnapi->hw_status)
8952 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9117 struct tg3_napi *tnapi = &tp->napi[i];
9118 if (tnapi->hw_status) {
9119 tnapi->hw_status->status = 0;
9120 tnapi->hw_status->status_tag = 0;
9122 tnapi->last_tag = 0;
9123 tnapi->last_irq_tag = 0;
9376 struct tg3_napi *tnapi = &tp->napi[i];
9378 tnapi->rx_dropped = 0;
9379 tnapi->tx_dropped = 0;
9560 struct tg3_napi *tnapi = &tp->napi[i];
9562 if (!tnapi->tx_ring)
9565 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9604 struct tg3_napi *tnapi = &tp->napi[i];
9606 if (!tnapi->rx_rcb)
9609 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9620 struct tg3_napi *tnapi = &tp->napi[0];
9662 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9666 ((u64) tnapi->status_mapping >> 32));
9668 ((u64) tnapi->status_mapping & 0xffffffff));
9672 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9673 u64 mapping = (u64)tnapi->status_mapping;
9679 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
10980 struct tg3_napi *tnapi = &tp->napi[i];
10982 if (tg3_has_work(tnapi)) {
10983 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10984 tnapi->last_tx_cons == tnapi->tx_cons) {
10985 if (tnapi->chk_msi_cnt < 1) {
10986 tnapi->chk_msi_cnt++;
10989 tg3_msi(0, tnapi);
10992 tnapi->chk_msi_cnt = 0;
10993 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10994 tnapi->last_tx_cons = tnapi->tx_cons;
11260 struct tg3_napi *tnapi = &tp->napi[irq_num];
11265 name = &tnapi->irq_lbl[0];
11266 if (tnapi->tx_buffers && tnapi->rx_rcb)
11269 else if (tnapi->tx_buffers)
11272 else if (tnapi->rx_rcb)
11293 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11298 struct tg3_napi *tnapi = &tp->napi[0];
11308 free_irq(tnapi->irq_vec, tnapi);
11319 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11320 IRQF_SHARED, dev->name, tnapi);
11324 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11328 tnapi->coal_now);
11333 int_mbox = tr32_mailbox(tnapi->int_mbox);
11343 tnapi->hw_status->status_tag != tnapi->last_tag)
11344 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11351 free_irq(tnapi->irq_vec, tnapi);
11617 struct tg3_napi *tnapi = &tp->napi[i];
11619 free_irq(tnapi->irq_vec, tnapi);
11688 struct tg3_napi *tnapi = &tp->napi[i];
11689 free_irq(tnapi->irq_vec, tnapi);
11727 struct tg3_napi *tnapi = &tp->napi[i];
11728 free_irq(tnapi->irq_vec, tnapi);
11997 struct tg3_napi *tnapi = &tp->napi[i];
11999 rx_dropped += tnapi->rx_dropped;
12000 tx_dropped += tnapi->tx_dropped;
13474 struct tg3_napi *tnapi, *rnapi;
13477 tnapi = &tp->napi[0];
13483 tnapi = &tp->napi[1];
13485 coal_now = tnapi->coal_now | rnapi->coal_now;
13562 val = tnapi->tx_prod;
13563 tnapi->tx_buffers[val].skb = skb;
13564 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13573 budget = tg3_tx_avail(tnapi);
13574 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13576 tnapi->tx_buffers[val].skb = NULL;
13581 tnapi->tx_prod++;
13586 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13587 tr32_mailbox(tnapi->prodmbox);
13598 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13600 if ((tx_idx == tnapi->tx_prod) &&
13605 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13608 if (tx_idx != tnapi->tx_prod)
17905 struct tg3_napi *tnapi = &tp->napi[i];
17907 tnapi->tp = tp;
17908 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17910 tnapi->int_mbox = intmbx;
17916 tnapi->consmbox = rcvmbx;
17917 tnapi->prodmbox = sndmbx;
17920 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17922 tnapi->coal_now = HOSTCC_MODE_NOW;