Lines Matching refs:txq

56 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
59 int tx_flags, struct ath_txq *txq,
62 struct ath_txq *txq, struct list_head *bf_q,
65 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
73 struct ath_txq *txq,
107 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
108 __releases(&txq->axq_lock)
115 skb_queue_splice_init(&txq->complete_q, &q);
116 spin_unlock_bh(&txq->axq_lock);
135 struct ath_txq *txq = tid->txq;
141 ath_txq_lock(sc, txq);
142 ath_txq_schedule(sc, txq);
143 ath_txq_unlock(sc, txq);
211 static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
215 int q = fi->txq;
220 txq = sc->tx.txq_map[q];
221 if (WARN_ON(--txq->pending_frames < 0))
222 txq->pending_frames = 0;
236 struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv);
240 .txq = tid->txq,
247 skb = ieee80211_tx_dequeue(hw, txq);
258 if (tid->txq == sc->tx.txq_map[q]) {
260 fi->txq = q;
261 ++tid->txq->pending_frames;
281 struct ath_txq *txq = tid->txq;
297 ath_txq_skb_done(sc, txq, skb);
308 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
312 ath_txq_unlock(sc, txq);
314 ath_txq_lock(sc, txq);
363 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
382 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
387 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
391 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
399 TX_STAT_INC(sc, txq->axq_qnum, a_retries);
488 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
528 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, 0);
601 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
637 ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
657 ath_tx_complete_buf(sc, bf, txq,
697 ath_txq_unlock(sc, txq);
699 ath_txq_lock(sc, txq);
730 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
743 txq->axq_tx_inprogress = false;
745 txq->axq_depth--;
747 txq->axq_ampdu_depth--;
771 ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
773 ath_tx_complete_aggr(sc, txq, bf, bf_head, sta, tid, ts, txok);
776 ath_txq_schedule(sc, txq);
809 int q = tid->txq->mac80211_qnum;
943 ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
961 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
966 ath_txq_skb_done(sc, txq, skb);
1017 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
1032 ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
1090 ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
1105 TX_STAT_INC(sc, txq->axq_qnum, a_aggr);
1409 struct ath_txq *txq, int len)
1420 info.qcu = txq->axq_qnum;
1440 txq == sc->tx.uapsdq)
1501 ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
1520 ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
1534 static int ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
1545 ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf);
1551 if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
1552 (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
1559 aggr_len = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf);
1561 ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
1571 ath_tx_fill_desc(sc, bf, txq, aggr_len);
1572 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1581 struct ath_txq *txq;
1589 txq = txtid->txq;
1591 ath_txq_lock(sc, txq);
1611 ath_txq_unlock_complete(sc, txq);
1621 struct ath_txq *txq = txtid->txq;
1625 ath_txq_lock(sc, txq);
1628 ath_txq_unlock_complete(sc, txq);
1653 struct ath_txq *txq;
1660 txq = tid->txq;
1662 ath_txq_lock(sc, txq);
1666 ath_txq_schedule(sc, txq);
1668 ath_txq_unlock_complete(sc, txq);
1697 struct ath_txq *txq = sc->tx.uapsdq;
1712 ath_txq_lock(sc, tid->txq);
1729 TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
1734 ath_txq_unlock_complete(sc, tid->txq);
1747 ath_txq_lock(sc, txq);
1748 ath_tx_fill_desc(sc, bf, txq, 0);
1749 ath_tx_txqaddbuf(sc, txq, &bf_q, false);
1750 ath_txq_unlock(sc, txq);
1809 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
1811 txq->axq_qnum = axq_qnum;
1812 txq->mac80211_qnum = -1;
1813 txq->axq_link = NULL;
1814 __skb_queue_head_init(&txq->complete_q);
1815 INIT_LIST_HEAD(&txq->axq_q);
1816 spin_lock_init(&txq->axq_lock);
1817 txq->axq_depth = 0;
1818 txq->axq_ampdu_depth = 0;
1819 txq->axq_tx_inprogress = false;
1822 txq->txq_headidx = txq->txq_tailidx = 0;
1824 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1826 return &sc->tx.txq[axq_qnum];
1836 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
1871 static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1894 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
1904 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
1907 ath_txq_lock(sc, txq);
1910 int idx = txq->txq_tailidx;
1912 while (!list_empty(&txq->txq_fifo[idx])) {
1913 ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
1917 txq->txq_tailidx = idx;
1920 txq->axq_link = NULL;
1921 txq->axq_tx_inprogress = false;
1922 ath_drain_txq_list(sc, txq, &txq->axq_q);
1924 ath_txq_unlock_complete(sc, txq);
1932 struct ath_txq *txq;
1946 if (!sc->tx.txq[i].axq_depth)
1949 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum))
1963 txq = &sc->tx.txq[i];
1964 ath_draintxq(sc, txq);
1970 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1972 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1973 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1979 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1987 if (txq->mac80211_qnum < 0)
1993 ieee80211_txq_schedule_start(hw, txq->mac80211_qnum);
2000 while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
2005 ret = ath_tx_sched_aggr(sc, txq, tid);
2015 ieee80211_txq_schedule_end(hw, txq->mac80211_qnum);
2020 struct ath_txq *txq;
2024 txq = sc->tx.txq_map[i];
2026 spin_lock_bh(&txq->axq_lock);
2027 ath_txq_schedule(sc, txq);
2028 spin_unlock_bh(&txq->axq_lock);
2037 * Insert a chain of ath_buf (descriptors) on a txq and
2040 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
2061 ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
2062 txq->axq_qnum, txq->axq_depth);
2064 if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
2065 list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
2066 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
2069 list_splice_tail_init(head, &txq->axq_q);
2071 if (txq->axq_link) {
2072 ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
2074 txq->axq_qnum, txq->axq_link,
2079 txq->axq_link = bf_last->bf_desc;
2083 TX_STAT_INC(sc, txq->axq_qnum, puttxbuf);
2084 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
2086 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
2090 TX_STAT_INC(sc, txq->axq_qnum, txstart);
2091 ath9k_hw_txstart(ah, txq->axq_qnum);
2096 txq->axq_depth++;
2098 txq->axq_ampdu_depth++;
2107 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
2125 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
2126 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
2127 TX_STAT_INC(sc, txq->axq_qnum, queued);
2174 fi->txq = -1;
2213 struct ath_txq *txq,
2341 struct ath_txq *txq = txctl->txq;
2362 txq = sc->tx.uapsdq;
2369 ath_txq_lock(sc, txq);
2370 if (txq == sc->tx.txq_map[q]) {
2371 fi->txq = q;
2372 ++txq->pending_frames;
2375 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
2377 ath_txq_skb_done(sc, txq, skb);
2391 ath_tx_send_normal(sc, txq, tid, skb);
2394 ath_txq_unlock(sc, txq);
2404 .txq = sc->beacon.cabq
2423 bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
2454 ath_txq_lock(sc, txctl.txq);
2455 ath_tx_fill_desc(sc, bf, txctl.txq, 0);
2456 ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
2457 TX_STAT_INC(sc, txctl.txq->axq_qnum, queued);
2458 ath_txq_unlock(sc, txctl.txq);
2466 int tx_flags, struct ath_txq *txq,
2501 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
2512 ath_txq_skb_done(sc, txq, skb);
2514 __skb_queue_tail(&txq->complete_q, skb);
2518 struct ath_txq *txq, struct list_head *bf_q,
2546 ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
2547 ath_tx_complete(sc, skb, tx_flags, txq, sta);
2630 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2641 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2642 txq->axq_link);
2644 ath_txq_lock(sc, txq);
2649 if (list_empty(&txq->axq_q)) {
2650 txq->axq_link = NULL;
2651 ath_txq_schedule(sc, txq);
2654 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2667 if (list_is_last(&bf_held->list, &txq->axq_q))
2682 TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
2685 * Remove ath_buf's of the same transmit unit from txq,
2693 &txq->axq_q, lastbf->list.prev);
2700 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2702 ath_txq_unlock_complete(sc, txq);
2714 ath_tx_processq(sc, &sc->tx.txq[i]);
2724 struct ath_txq *txq;
2757 txq = &sc->tx.txq[ts.qid];
2759 ath_txq_lock(sc, txq);
2761 TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
2763 fifo_list = &txq->txq_fifo[txq->txq_tailidx];
2765 ath_txq_unlock(sc, txq);
2781 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2783 if (!list_empty(&txq->axq_q)) {
2787 txq->axq_link = NULL;
2788 list_splice_tail_init(&txq->axq_q, &bf_q);
2789 ath_tx_txqaddbuf(sc, txq, &bf_q, true);
2798 ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
2799 ath_txq_unlock_complete(sc, txq);
2881 tid->txq = sc->tx.txq_map[acno];
2891 struct ath_txq *txq;
2898 txq = tid->txq;
2900 ath_txq_lock(sc, txq);
2905 ath_tid_drain(sc, txq, tid);
2908 ath_txq_unlock(sc, txq);
2946 bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
2955 ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
2957 ath_tx_send_normal(sc, txctl->txq, NULL, skb);