Lines Matching refs:wcid
65 struct mt76_wcid *wcid;
67 wcid = rcu_dereference(dev->wcid[cb->wcid]);
68 if (wcid) {
69 status.sta = wcid_to_sta(wcid);
70 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
71 rs.rate_idx = wcid->rate;
121 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
131 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
149 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
156 cb->wcid = wcid->idx;
159 if (list_empty(&wcid->list))
160 list_add_tail(&wcid->list, &dev->wcid_list);
170 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
178 skb = idr_remove(&wcid->pktid, pktid);
182 /* look for stale entries in the wcid idr queue */
183 idr_for_each_entry(&wcid->pktid, skb, id) {
198 idr_remove(&wcid->pktid, cb->pktid);
204 if (idr_is_empty(&wcid->pktid))
205 list_del_init(&wcid->list);
214 struct mt76_wcid *wcid, *tmp;
218 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
219 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
225 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
231 if (!wcid || info->tx_time_est)
234 pending = atomic_dec_return(&wcid->non_aql_packets);
236 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
247 struct mt76_wcid *wcid = NULL;
253 if (wcid_idx < ARRAY_SIZE(dev->wcid))
254 wcid = rcu_dereference(dev->wcid[wcid_idx]);
256 mt76_tx_check_non_aql(dev, wcid, skb);
276 status.sta = wcid_to_sta(wcid);
277 if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
278 rs.rate_idx = wcid->rate;
300 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
311 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
315 wcid = (struct mt76_wcid *)sta->drv_priv;
316 q->entry[idx].wcid = wcid->idx;
321 pending = atomic_inc_return(&wcid->non_aql_packets);
330 struct mt76_wcid *wcid, struct sk_buff *skb)
355 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
363 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
390 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
399 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
456 struct mt76_txq *mtxq, struct mt76_wcid *wcid)
467 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
470 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
478 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
483 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
500 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
505 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
527 struct mt76_wcid *wcid;
546 wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
547 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
562 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);