Lines Matching refs:txq

29 	struct hfi1_ipoib_txq      *txq;
47 static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
50 txq->tx_ring.complete_txreqs);
53 static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
55 trace_hfi1_txq_stop(txq);
56 if (atomic_inc_return(&txq->tx_ring.stops) == 1)
57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
60 static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
62 trace_hfi1_txq_wake(txq);
63 if (atomic_dec_and_test(&txq->tx_ring.stops))
64 netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
67 static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
69 return min_t(uint, txq->priv->netdev->tx_queue_len,
70 txq->tx_ring.max_items - 1);
73 static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
75 return min_t(uint, txq->priv->netdev->tx_queue_len,
76 txq->tx_ring.max_items) >> 1;
79 static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
81 ++txq->tx_ring.sent_txreqs;
82 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
83 !atomic_xchg(&txq->tx_ring.ring_full, 1)) {
84 trace_hfi1_txq_full(txq);
85 hfi1_ipoib_stop_txq(txq);
89 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
91 struct net_device *dev = txq->priv->netdev;
107 if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
108 atomic_xchg(&txq->tx_ring.ring_full, 0)) {
109 trace_hfi1_txq_xmit_unstopped(txq);
110 hfi1_ipoib_wake_txq(txq);
116 struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
123 "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
125 le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
126 tx->txq->sde->this_idx);
134 static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq)
136 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
145 sdma_txclean(txq->priv->dd, &tx->txreq);
151 tx_ring->avail = hfi1_ipoib_ring_hwat(txq);
156 struct hfi1_ipoib_txq *txq =
158 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
164 trace_hfi1_txq_poll(txq);
180 hfi1_ipoib_check_queue_stopped(txq);
192 trace_hfi1_txq_complete(tx->txq);
196 napi_schedule_irqoff(&tx->txq->napi);
259 struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
330 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs));
353 struct hfi1_ipoib_txq *txq = txp->txq;
355 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
362 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq))
368 min_t(u32, hfi1_ipoib_ring_hwat(txq),
374 trace_hfi1_txq_alloc_tx(txq);
378 tx->txq = txq;
386 if (txq->flow.as_int != txp->flow.as_int) {
387 txq->flow.tx_queue = txp->flow.tx_queue;
388 txq->flow.sc5 = txp->flow.sc5;
389 txq->sde =
393 trace_hfi1_flow_switch(txq);
405 struct hfi1_ipoib_txq *txq)
410 ret = sdma_send_txlist(txq->sde,
411 iowait_get_ib_work(&txq->wait),
412 &txq->tx_list,
417 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
423 struct hfi1_ipoib_txq *txq)
427 if (!list_empty(&txq->tx_list)) {
429 ret = hfi1_ipoib_submit_tx_list(dev, txq);
439 static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
444 ret = sdma_send_txreq(txq->sde,
445 iowait_get_ib_work(&txq->wait),
447 txq->pkts_sent);
449 txq->pkts_sent = true;
450 iowait_starve_clear(txq->pkts_sent, &txq->wait);
460 struct hfi1_ipoib_txq *txq = txp->txq;
479 tx_ring = &txq->tx_ring;
483 ret = hfi1_ipoib_submit_tx(txq, tx);
486 trace_sdma_output_ibhdr(txq->priv->dd,
489 hfi1_ipoib_check_queue_depth(txq);
493 txq->pkts_sent = false;
500 napi_schedule(&tx->txq->napi);
511 struct hfi1_ipoib_txq *txq = txp->txq;
516 if (txq->flow.as_int != txp->flow.as_int) {
519 trace_hfi1_flow_flush(txq);
520 ret = hfi1_ipoib_flush_tx_list(dev, txq);
542 tx_ring = &txq->tx_ring;
546 list_add_tail(&tx->txreq.list, &txq->tx_list);
548 hfi1_ipoib_check_queue_depth(txq);
550 trace_sdma_output_ibhdr(txq->priv->dd,
555 (void)hfi1_ipoib_flush_tx_list(dev, txq);
593 txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
599 if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
619 struct hfi1_ipoib_txq *txq =
624 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
632 list_add_tail(&txreq->list, &txq->tx_list);
633 if (list_empty(&txq->wait.list)) {
636 if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) {
637 trace_hfi1_txq_queued(txq);
638 hfi1_ipoib_stop_txq(txq);
660 struct hfi1_ipoib_txq *txq =
663 trace_hfi1_txq_wakeup(txq);
664 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
673 struct hfi1_ipoib_txq *txq =
675 struct net_device *dev = txq->priv->netdev;
678 likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
679 if (atomic_xchg(&txq->tx_ring.no_desc, 0))
680 hfi1_ipoib_wake_txq(txq);
705 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
708 tx_ring = &txq->tx_ring;
709 iowait_init(&txq->wait,
717 txq->priv = priv;
718 txq->sde = NULL;
719 INIT_LIST_HEAD(&txq->tx_list);
720 atomic_set(&txq->tx_ring.stops, 0);
721 atomic_set(&txq->tx_ring.ring_full, 0);
722 atomic_set(&txq->tx_ring.no_desc, 0);
723 txq->q_idx = i;
724 txq->flow.tx_queue = 0xff;
725 txq->flow.sc5 = 0xff;
726 txq->pkts_sent = false;
731 txq->tx_ring.items =
734 if (!txq->tx_ring.items)
737 txq->tx_ring.max_items = tx_ring_size;
738 txq->tx_ring.shift = ilog2(tx_item_size);
739 txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
740 tx_ring = &txq->tx_ring;
749 netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
756 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
758 netif_napi_del(&txq->napi);
759 tx_ring = &txq->tx_ring;
770 static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
775 list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
780 sdma_txclean(txq->priv->dd, &tx->txreq);
783 txq->tx_ring.complete_txreqs++;
786 if (hfi1_ipoib_used(txq))
787 dd_dev_warn(txq->priv->dd,
788 "txq %d not empty found %u requests\n",
789 txq->q_idx,
790 hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
791 txq->tx_ring.complete_txreqs));
799 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
800 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
802 iowait_cancel_work(&txq->wait);
803 iowait_sdma_drain(&txq->wait);
804 hfi1_ipoib_drain_tx_list(txq);
805 netif_napi_del(&txq->napi);
806 hfi1_ipoib_drain_tx_ring(txq);
822 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
824 napi_enable(&txq->napi);
834 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
836 napi_disable(&txq->napi);
837 hfi1_ipoib_drain_tx_ring(txq);
844 struct hfi1_ipoib_txq *txq = &priv->txqs[q];
846 dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
847 txq, q,
848 __netif_subqueue_stopped(dev, txq->q_idx),
849 atomic_read(&txq->tx_ring.stops),
850 atomic_read(&txq->tx_ring.no_desc),
851 atomic_read(&txq->tx_ring.ring_full));
853 txq->sde,
854 txq->sde ? txq->sde->this_idx : 0);
855 dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
857 txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs,
858 hfi1_ipoib_used(txq));
860 dev->tx_queue_len, txq->tx_ring.max_items);
862 txq->tx_ring.head, txq->tx_ring.tail);
864 !list_empty(&txq->wait.list));
866 list_empty(&txq->tx_list));