Lines Matching refs:txq

31  * @txq: txq on which skb was output
39 struct hfi1_ipoib_txq *txq;
47 struct hfi1_ipoib_txq *txq;
59 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
61 return hfi1_ipoib_txreqs(txq->sent_txreqs,
62 atomic64_read(&txq->complete_txreqs));
65 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
67 trace_hfi1_txq_stop(txq);
68 if (atomic_inc_return(&txq->stops) == 1)
69 netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
72 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
74 trace_hfi1_txq_wake(txq);
75 if (atomic_dec_and_test(&txq->stops))
76 netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
79 static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
81 return min_t(uint, txq->priv->netdev->tx_queue_len,
82 txq->tx_ring.max_items - 1);
85 static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
87 return min_t(uint, txq->priv->netdev->tx_queue_len,
88 txq->tx_ring.max_items) >> 1;
91 static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
93 ++txq->sent_txreqs;
94 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
95 !atomic_xchg(&txq->ring_full, 1)) {
96 trace_hfi1_txq_full(txq);
97 hfi1_ipoib_stop_txq(txq);
101 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
103 struct net_device *dev = txq->priv->netdev;
119 if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
120 atomic_xchg(&txq->ring_full, 0)) {
121 trace_hfi1_txq_xmit_unstopped(txq);
122 hfi1_ipoib_wake_txq(txq);
135 "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
137 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
138 tx->txq->sde->this_idx);
146 static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
148 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
169 atomic64_add(work_done, &txq->complete_txreqs);
176 hfi1_ipoib_check_queue_stopped(txq);
184 struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
186 int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
196 struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
212 napi_schedule(tx->txq->napi);
214 struct hfi1_ipoib_txq *txq = tx->txq;
219 atomic64_inc(&txq->complete_txreqs);
220 dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
366 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
401 tx->txq = txp->txq;
409 if (txp->txq->flow.as_int != txp->flow.as_int) {
410 txp->txq->flow.tx_queue = txp->flow.tx_queue;
411 txp->txq->flow.sc5 = txp->flow.sc5;
412 txp->txq->sde =
416 trace_hfi1_flow_switch(txp->txq);
429 struct hfi1_ipoib_txq *txq)
434 ret = sdma_send_txlist(txq->sde,
435 iowait_get_ib_work(&txq->wait),
436 &txq->tx_list,
441 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
447 struct hfi1_ipoib_txq *txq)
451 if (!list_empty(&txq->tx_list)) {
453 ret = hfi1_ipoib_submit_tx_list(dev, txq);
463 static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
468 ret = sdma_send_txreq(txq->sde,
469 iowait_get_ib_work(&txq->wait),
471 txq->pkts_sent);
473 txq->pkts_sent = true;
474 iowait_starve_clear(txq->pkts_sent, &txq->wait);
485 struct hfi1_ipoib_txq *txq = txp->txq;
503 ret = hfi1_ipoib_submit_tx(txq, tx);
509 hfi1_ipoib_check_queue_depth(txq);
513 txq->pkts_sent = false;
530 struct hfi1_ipoib_txq *txq = txp->txq;
534 if (txq->flow.as_int != txp->flow.as_int) {
537 trace_hfi1_flow_flush(txq);
538 ret = hfi1_ipoib_flush_tx_list(dev, txq);
560 list_add_tail(&tx->txreq.list, &txq->tx_list);
562 hfi1_ipoib_check_queue_depth(txq);
569 (void)hfi1_ipoib_flush_tx_list(dev, txq);
607 txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
613 if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
633 struct hfi1_ipoib_txq *txq =
638 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
646 list_add_tail(&txreq->list, &txq->tx_list);
647 if (list_empty(&txq->wait.list)) {
648 if (!atomic_xchg(&txq->no_desc, 1)) {
649 trace_hfi1_txq_queued(txq);
650 hfi1_ipoib_stop_txq(txq);
671 struct hfi1_ipoib_txq *txq =
674 trace_hfi1_txq_wakeup(txq);
675 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
684 struct hfi1_ipoib_txq *txq =
686 struct net_device *dev = txq->priv->netdev;
689 likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
690 if (atomic_xchg(&txq->no_desc, 0))
691 hfi1_ipoib_wake_txq(txq);
731 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
733 iowait_init(&txq->wait,
741 txq->priv = priv;
742 txq->sde = NULL;
743 INIT_LIST_HEAD(&txq->tx_list);
744 atomic64_set(&txq->complete_txreqs, 0);
745 atomic_set(&txq->stops, 0);
746 atomic_set(&txq->ring_full, 0);
747 atomic_set(&txq->no_desc, 0);
748 txq->q_idx = i;
749 txq->flow.tx_queue = 0xff;
750 txq->flow.sc5 = 0xff;
751 txq->pkts_sent = false;
756 txq->tx_ring.items =
760 if (!txq->tx_ring.items)
763 spin_lock_init(&txq->tx_ring.producer_lock);
764 spin_lock_init(&txq->tx_ring.consumer_lock);
765 txq->tx_ring.max_items = tx_ring_size;
767 txq->napi = &priv->tx_napis[i];
768 netif_tx_napi_add(dev, txq->napi,
777 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
779 netif_napi_del(txq->napi);
780 kfree(txq->tx_ring.items);
796 static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
800 atomic64_t *complete_txreqs = &txq->complete_txreqs;
802 list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
807 sdma_txclean(txq->priv->dd, &tx->txreq);
809 kmem_cache_free(txq->priv->txreq_cache, tx);
813 if (hfi1_ipoib_used(txq))
814 dd_dev_warn(txq->priv->dd,
815 "txq %d not empty found %llu requests\n",
816 txq->q_idx,
817 hfi1_ipoib_txreqs(txq->sent_txreqs,
826 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
828 iowait_cancel_work(&txq->wait);
829 iowait_sdma_drain(&txq->wait);
830 hfi1_ipoib_drain_tx_list(txq);
831 netif_napi_del(txq->napi);
832 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
833 kfree(txq->tx_ring.items);
852 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
854 napi_enable(txq->napi);
864 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
866 napi_disable(txq->napi);
867 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);