18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci	Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
48c2ecf20Sopenharmony_ci	Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
58c2ecf20Sopenharmony_ci	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
68c2ecf20Sopenharmony_ci	<http://rt2x00.serialmonkey.com>
78c2ecf20Sopenharmony_ci
88c2ecf20Sopenharmony_ci */
98c2ecf20Sopenharmony_ci
108c2ecf20Sopenharmony_ci/*
118c2ecf20Sopenharmony_ci	Module: rt2x00lib
128c2ecf20Sopenharmony_ci	Abstract: rt2x00 queue specific routines.
138c2ecf20Sopenharmony_ci */
148c2ecf20Sopenharmony_ci
158c2ecf20Sopenharmony_ci#include <linux/slab.h>
168c2ecf20Sopenharmony_ci#include <linux/kernel.h>
178c2ecf20Sopenharmony_ci#include <linux/module.h>
188c2ecf20Sopenharmony_ci#include <linux/dma-mapping.h>
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_ci#include "rt2x00.h"
218c2ecf20Sopenharmony_ci#include "rt2x00lib.h"
228c2ecf20Sopenharmony_ci
238c2ecf20Sopenharmony_cistruct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
248c2ecf20Sopenharmony_ci{
258c2ecf20Sopenharmony_ci	struct data_queue *queue = entry->queue;
268c2ecf20Sopenharmony_ci	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
278c2ecf20Sopenharmony_ci	struct sk_buff *skb;
288c2ecf20Sopenharmony_ci	struct skb_frame_desc *skbdesc;
298c2ecf20Sopenharmony_ci	unsigned int frame_size;
308c2ecf20Sopenharmony_ci	unsigned int head_size = 0;
318c2ecf20Sopenharmony_ci	unsigned int tail_size = 0;
328c2ecf20Sopenharmony_ci
338c2ecf20Sopenharmony_ci	/*
348c2ecf20Sopenharmony_ci	 * The frame size includes descriptor size, because the
358c2ecf20Sopenharmony_ci	 * hardware directly receive the frame into the skbuffer.
368c2ecf20Sopenharmony_ci	 */
378c2ecf20Sopenharmony_ci	frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
388c2ecf20Sopenharmony_ci
398c2ecf20Sopenharmony_ci	/*
408c2ecf20Sopenharmony_ci	 * The payload should be aligned to a 4-byte boundary,
418c2ecf20Sopenharmony_ci	 * this means we need at least 3 bytes for moving the frame
428c2ecf20Sopenharmony_ci	 * into the correct offset.
438c2ecf20Sopenharmony_ci	 */
448c2ecf20Sopenharmony_ci	head_size = 4;
458c2ecf20Sopenharmony_ci
468c2ecf20Sopenharmony_ci	/*
478c2ecf20Sopenharmony_ci	 * For IV/EIV/ICV assembly we must make sure there is
488c2ecf20Sopenharmony_ci	 * at least 8 bytes bytes available in headroom for IV/EIV
498c2ecf20Sopenharmony_ci	 * and 8 bytes for ICV data as tailroon.
508c2ecf20Sopenharmony_ci	 */
518c2ecf20Sopenharmony_ci	if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
528c2ecf20Sopenharmony_ci		head_size += 8;
538c2ecf20Sopenharmony_ci		tail_size += 8;
548c2ecf20Sopenharmony_ci	}
558c2ecf20Sopenharmony_ci
568c2ecf20Sopenharmony_ci	/*
578c2ecf20Sopenharmony_ci	 * Allocate skbuffer.
588c2ecf20Sopenharmony_ci	 */
598c2ecf20Sopenharmony_ci	skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
608c2ecf20Sopenharmony_ci	if (!skb)
618c2ecf20Sopenharmony_ci		return NULL;
628c2ecf20Sopenharmony_ci
638c2ecf20Sopenharmony_ci	/*
648c2ecf20Sopenharmony_ci	 * Make sure we not have a frame with the requested bytes
658c2ecf20Sopenharmony_ci	 * available in the head and tail.
668c2ecf20Sopenharmony_ci	 */
678c2ecf20Sopenharmony_ci	skb_reserve(skb, head_size);
688c2ecf20Sopenharmony_ci	skb_put(skb, frame_size);
698c2ecf20Sopenharmony_ci
708c2ecf20Sopenharmony_ci	/*
718c2ecf20Sopenharmony_ci	 * Populate skbdesc.
728c2ecf20Sopenharmony_ci	 */
738c2ecf20Sopenharmony_ci	skbdesc = get_skb_frame_desc(skb);
748c2ecf20Sopenharmony_ci	memset(skbdesc, 0, sizeof(*skbdesc));
758c2ecf20Sopenharmony_ci
768c2ecf20Sopenharmony_ci	if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
778c2ecf20Sopenharmony_ci		dma_addr_t skb_dma;
788c2ecf20Sopenharmony_ci
798c2ecf20Sopenharmony_ci		skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
808c2ecf20Sopenharmony_ci					 DMA_FROM_DEVICE);
818c2ecf20Sopenharmony_ci		if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
828c2ecf20Sopenharmony_ci			dev_kfree_skb_any(skb);
838c2ecf20Sopenharmony_ci			return NULL;
848c2ecf20Sopenharmony_ci		}
858c2ecf20Sopenharmony_ci
868c2ecf20Sopenharmony_ci		skbdesc->skb_dma = skb_dma;
878c2ecf20Sopenharmony_ci		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
888c2ecf20Sopenharmony_ci	}
898c2ecf20Sopenharmony_ci
908c2ecf20Sopenharmony_ci	return skb;
918c2ecf20Sopenharmony_ci}
928c2ecf20Sopenharmony_ci
938c2ecf20Sopenharmony_ciint rt2x00queue_map_txskb(struct queue_entry *entry)
948c2ecf20Sopenharmony_ci{
958c2ecf20Sopenharmony_ci	struct device *dev = entry->queue->rt2x00dev->dev;
968c2ecf20Sopenharmony_ci	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci	skbdesc->skb_dma =
998c2ecf20Sopenharmony_ci	    dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
1008c2ecf20Sopenharmony_ci
1018c2ecf20Sopenharmony_ci	if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
1028c2ecf20Sopenharmony_ci		return -ENOMEM;
1038c2ecf20Sopenharmony_ci
1048c2ecf20Sopenharmony_ci	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
1058c2ecf20Sopenharmony_ci	rt2x00lib_dmadone(entry);
1068c2ecf20Sopenharmony_ci	return 0;
1078c2ecf20Sopenharmony_ci}
1088c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
1098c2ecf20Sopenharmony_ci
1108c2ecf20Sopenharmony_civoid rt2x00queue_unmap_skb(struct queue_entry *entry)
1118c2ecf20Sopenharmony_ci{
1128c2ecf20Sopenharmony_ci	struct device *dev = entry->queue->rt2x00dev->dev;
1138c2ecf20Sopenharmony_ci	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1148c2ecf20Sopenharmony_ci
1158c2ecf20Sopenharmony_ci	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
1168c2ecf20Sopenharmony_ci		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
1178c2ecf20Sopenharmony_ci				 DMA_FROM_DEVICE);
1188c2ecf20Sopenharmony_ci		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
1198c2ecf20Sopenharmony_ci	} else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
1208c2ecf20Sopenharmony_ci		dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
1218c2ecf20Sopenharmony_ci				 DMA_TO_DEVICE);
1228c2ecf20Sopenharmony_ci		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
1238c2ecf20Sopenharmony_ci	}
1248c2ecf20Sopenharmony_ci}
1258c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
1268c2ecf20Sopenharmony_ci
1278c2ecf20Sopenharmony_civoid rt2x00queue_free_skb(struct queue_entry *entry)
1288c2ecf20Sopenharmony_ci{
1298c2ecf20Sopenharmony_ci	if (!entry->skb)
1308c2ecf20Sopenharmony_ci		return;
1318c2ecf20Sopenharmony_ci
1328c2ecf20Sopenharmony_ci	rt2x00queue_unmap_skb(entry);
1338c2ecf20Sopenharmony_ci	dev_kfree_skb_any(entry->skb);
1348c2ecf20Sopenharmony_ci	entry->skb = NULL;
1358c2ecf20Sopenharmony_ci}
1368c2ecf20Sopenharmony_ci
1378c2ecf20Sopenharmony_civoid rt2x00queue_align_frame(struct sk_buff *skb)
1388c2ecf20Sopenharmony_ci{
1398c2ecf20Sopenharmony_ci	unsigned int frame_length = skb->len;
1408c2ecf20Sopenharmony_ci	unsigned int align = ALIGN_SIZE(skb, 0);
1418c2ecf20Sopenharmony_ci
1428c2ecf20Sopenharmony_ci	if (!align)
1438c2ecf20Sopenharmony_ci		return;
1448c2ecf20Sopenharmony_ci
1458c2ecf20Sopenharmony_ci	skb_push(skb, align);
1468c2ecf20Sopenharmony_ci	memmove(skb->data, skb->data + align, frame_length);
1478c2ecf20Sopenharmony_ci	skb_trim(skb, frame_length);
1488c2ecf20Sopenharmony_ci}
1498c2ecf20Sopenharmony_ci
1508c2ecf20Sopenharmony_ci/*
1518c2ecf20Sopenharmony_ci * H/W needs L2 padding between the header and the paylod if header size
1528c2ecf20Sopenharmony_ci * is not 4 bytes aligned.
1538c2ecf20Sopenharmony_ci */
1548c2ecf20Sopenharmony_civoid rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
1558c2ecf20Sopenharmony_ci{
1568c2ecf20Sopenharmony_ci	unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
1578c2ecf20Sopenharmony_ci
1588c2ecf20Sopenharmony_ci	if (!l2pad)
1598c2ecf20Sopenharmony_ci		return;
1608c2ecf20Sopenharmony_ci
1618c2ecf20Sopenharmony_ci	skb_push(skb, l2pad);
1628c2ecf20Sopenharmony_ci	memmove(skb->data, skb->data + l2pad, hdr_len);
1638c2ecf20Sopenharmony_ci}
1648c2ecf20Sopenharmony_ci
1658c2ecf20Sopenharmony_civoid rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
1668c2ecf20Sopenharmony_ci{
1678c2ecf20Sopenharmony_ci	unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
1688c2ecf20Sopenharmony_ci
1698c2ecf20Sopenharmony_ci	if (!l2pad)
1708c2ecf20Sopenharmony_ci		return;
1718c2ecf20Sopenharmony_ci
1728c2ecf20Sopenharmony_ci	memmove(skb->data + l2pad, skb->data, hdr_len);
1738c2ecf20Sopenharmony_ci	skb_pull(skb, l2pad);
1748c2ecf20Sopenharmony_ci}
1758c2ecf20Sopenharmony_ci
1768c2ecf20Sopenharmony_cistatic void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
1778c2ecf20Sopenharmony_ci						 struct sk_buff *skb,
1788c2ecf20Sopenharmony_ci						 struct txentry_desc *txdesc)
1798c2ecf20Sopenharmony_ci{
1808c2ecf20Sopenharmony_ci	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1818c2ecf20Sopenharmony_ci	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1828c2ecf20Sopenharmony_ci	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
1838c2ecf20Sopenharmony_ci	u16 seqno;
1848c2ecf20Sopenharmony_ci
1858c2ecf20Sopenharmony_ci	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
1868c2ecf20Sopenharmony_ci		return;
1878c2ecf20Sopenharmony_ci
1888c2ecf20Sopenharmony_ci	__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
1898c2ecf20Sopenharmony_ci
1908c2ecf20Sopenharmony_ci	if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
1918c2ecf20Sopenharmony_ci		/*
1928c2ecf20Sopenharmony_ci		 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
1938c2ecf20Sopenharmony_ci		 * seqno on retransmitted data (non-QOS) and management frames.
1948c2ecf20Sopenharmony_ci		 * To workaround the problem let's generate seqno in software.
1958c2ecf20Sopenharmony_ci		 * Except for beacons which are transmitted periodically by H/W
1968c2ecf20Sopenharmony_ci		 * hence hardware has to assign seqno for them.
1978c2ecf20Sopenharmony_ci		 */
1988c2ecf20Sopenharmony_ci	    	if (ieee80211_is_beacon(hdr->frame_control)) {
1998c2ecf20Sopenharmony_ci			__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2008c2ecf20Sopenharmony_ci			/* H/W will generate sequence number */
2018c2ecf20Sopenharmony_ci			return;
2028c2ecf20Sopenharmony_ci		}
2038c2ecf20Sopenharmony_ci
2048c2ecf20Sopenharmony_ci		__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
2058c2ecf20Sopenharmony_ci	}
2068c2ecf20Sopenharmony_ci
2078c2ecf20Sopenharmony_ci	/*
2088c2ecf20Sopenharmony_ci	 * The hardware is not able to insert a sequence number. Assign a
2098c2ecf20Sopenharmony_ci	 * software generated one here.
2108c2ecf20Sopenharmony_ci	 *
2118c2ecf20Sopenharmony_ci	 * This is wrong because beacons are not getting sequence
2128c2ecf20Sopenharmony_ci	 * numbers assigned properly.
2138c2ecf20Sopenharmony_ci	 *
2148c2ecf20Sopenharmony_ci	 * A secondary problem exists for drivers that cannot toggle
2158c2ecf20Sopenharmony_ci	 * sequence counting per-frame, since those will override the
2168c2ecf20Sopenharmony_ci	 * sequence counter given by mac80211.
2178c2ecf20Sopenharmony_ci	 */
2188c2ecf20Sopenharmony_ci	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
2198c2ecf20Sopenharmony_ci		seqno = atomic_add_return(0x10, &intf->seqno);
2208c2ecf20Sopenharmony_ci	else
2218c2ecf20Sopenharmony_ci		seqno = atomic_read(&intf->seqno);
2228c2ecf20Sopenharmony_ci
2238c2ecf20Sopenharmony_ci	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2248c2ecf20Sopenharmony_ci	hdr->seq_ctrl |= cpu_to_le16(seqno);
2258c2ecf20Sopenharmony_ci}
2268c2ecf20Sopenharmony_ci
2278c2ecf20Sopenharmony_cistatic void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
2288c2ecf20Sopenharmony_ci						  struct sk_buff *skb,
2298c2ecf20Sopenharmony_ci						  struct txentry_desc *txdesc,
2308c2ecf20Sopenharmony_ci						  const struct rt2x00_rate *hwrate)
2318c2ecf20Sopenharmony_ci{
2328c2ecf20Sopenharmony_ci	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2338c2ecf20Sopenharmony_ci	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
2348c2ecf20Sopenharmony_ci	unsigned int data_length;
2358c2ecf20Sopenharmony_ci	unsigned int duration;
2368c2ecf20Sopenharmony_ci	unsigned int residual;
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_ci	/*
2398c2ecf20Sopenharmony_ci	 * Determine with what IFS priority this frame should be send.
2408c2ecf20Sopenharmony_ci	 * Set ifs to IFS_SIFS when the this is not the first fragment,
2418c2ecf20Sopenharmony_ci	 * or this fragment came after RTS/CTS.
2428c2ecf20Sopenharmony_ci	 */
2438c2ecf20Sopenharmony_ci	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
2448c2ecf20Sopenharmony_ci		txdesc->u.plcp.ifs = IFS_BACKOFF;
2458c2ecf20Sopenharmony_ci	else
2468c2ecf20Sopenharmony_ci		txdesc->u.plcp.ifs = IFS_SIFS;
2478c2ecf20Sopenharmony_ci
2488c2ecf20Sopenharmony_ci	/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
2498c2ecf20Sopenharmony_ci	data_length = skb->len + 4;
2508c2ecf20Sopenharmony_ci	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
2518c2ecf20Sopenharmony_ci
2528c2ecf20Sopenharmony_ci	/*
2538c2ecf20Sopenharmony_ci	 * PLCP setup
2548c2ecf20Sopenharmony_ci	 * Length calculation depends on OFDM/CCK rate.
2558c2ecf20Sopenharmony_ci	 */
2568c2ecf20Sopenharmony_ci	txdesc->u.plcp.signal = hwrate->plcp;
2578c2ecf20Sopenharmony_ci	txdesc->u.plcp.service = 0x04;
2588c2ecf20Sopenharmony_ci
2598c2ecf20Sopenharmony_ci	if (hwrate->flags & DEV_RATE_OFDM) {
2608c2ecf20Sopenharmony_ci		txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
2618c2ecf20Sopenharmony_ci		txdesc->u.plcp.length_low = data_length & 0x3f;
2628c2ecf20Sopenharmony_ci	} else {
2638c2ecf20Sopenharmony_ci		/*
2648c2ecf20Sopenharmony_ci		 * Convert length to microseconds.
2658c2ecf20Sopenharmony_ci		 */
2668c2ecf20Sopenharmony_ci		residual = GET_DURATION_RES(data_length, hwrate->bitrate);
2678c2ecf20Sopenharmony_ci		duration = GET_DURATION(data_length, hwrate->bitrate);
2688c2ecf20Sopenharmony_ci
2698c2ecf20Sopenharmony_ci		if (residual != 0) {
2708c2ecf20Sopenharmony_ci			duration++;
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_ci			/*
2738c2ecf20Sopenharmony_ci			 * Check if we need to set the Length Extension
2748c2ecf20Sopenharmony_ci			 */
2758c2ecf20Sopenharmony_ci			if (hwrate->bitrate == 110 && residual <= 30)
2768c2ecf20Sopenharmony_ci				txdesc->u.plcp.service |= 0x80;
2778c2ecf20Sopenharmony_ci		}
2788c2ecf20Sopenharmony_ci
2798c2ecf20Sopenharmony_ci		txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
2808c2ecf20Sopenharmony_ci		txdesc->u.plcp.length_low = duration & 0xff;
2818c2ecf20Sopenharmony_ci
2828c2ecf20Sopenharmony_ci		/*
2838c2ecf20Sopenharmony_ci		 * When preamble is enabled we should set the
2848c2ecf20Sopenharmony_ci		 * preamble bit for the signal.
2858c2ecf20Sopenharmony_ci		 */
2868c2ecf20Sopenharmony_ci		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
2878c2ecf20Sopenharmony_ci			txdesc->u.plcp.signal |= 0x08;
2888c2ecf20Sopenharmony_ci	}
2898c2ecf20Sopenharmony_ci}
2908c2ecf20Sopenharmony_ci
2918c2ecf20Sopenharmony_cistatic void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
2928c2ecf20Sopenharmony_ci						struct sk_buff *skb,
2938c2ecf20Sopenharmony_ci						struct txentry_desc *txdesc,
2948c2ecf20Sopenharmony_ci						struct ieee80211_sta *sta,
2958c2ecf20Sopenharmony_ci						const struct rt2x00_rate *hwrate)
2968c2ecf20Sopenharmony_ci{
2978c2ecf20Sopenharmony_ci	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2988c2ecf20Sopenharmony_ci	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
2998c2ecf20Sopenharmony_ci	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3008c2ecf20Sopenharmony_ci	struct rt2x00_sta *sta_priv = NULL;
3018c2ecf20Sopenharmony_ci	u8 density = 0;
3028c2ecf20Sopenharmony_ci
3038c2ecf20Sopenharmony_ci	if (sta) {
3048c2ecf20Sopenharmony_ci		sta_priv = sta_to_rt2x00_sta(sta);
3058c2ecf20Sopenharmony_ci		txdesc->u.ht.wcid = sta_priv->wcid;
3068c2ecf20Sopenharmony_ci		density = sta->ht_cap.ampdu_density;
3078c2ecf20Sopenharmony_ci	}
3088c2ecf20Sopenharmony_ci
3098c2ecf20Sopenharmony_ci	/*
3108c2ecf20Sopenharmony_ci	 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
3118c2ecf20Sopenharmony_ci	 * mcs rate to be used
3128c2ecf20Sopenharmony_ci	 */
3138c2ecf20Sopenharmony_ci	if (txrate->flags & IEEE80211_TX_RC_MCS) {
3148c2ecf20Sopenharmony_ci		txdesc->u.ht.mcs = txrate->idx;
3158c2ecf20Sopenharmony_ci
3168c2ecf20Sopenharmony_ci		/*
3178c2ecf20Sopenharmony_ci		 * MIMO PS should be set to 1 for STA's using dynamic SM PS
3188c2ecf20Sopenharmony_ci		 * when using more then one tx stream (>MCS7).
3198c2ecf20Sopenharmony_ci		 */
3208c2ecf20Sopenharmony_ci		if (sta && txdesc->u.ht.mcs > 7 &&
3218c2ecf20Sopenharmony_ci		    sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
3228c2ecf20Sopenharmony_ci			__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
3238c2ecf20Sopenharmony_ci	} else {
3248c2ecf20Sopenharmony_ci		txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
3258c2ecf20Sopenharmony_ci		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
3268c2ecf20Sopenharmony_ci			txdesc->u.ht.mcs |= 0x08;
3278c2ecf20Sopenharmony_ci	}
3288c2ecf20Sopenharmony_ci
3298c2ecf20Sopenharmony_ci	if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
3308c2ecf20Sopenharmony_ci		if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
3318c2ecf20Sopenharmony_ci			txdesc->u.ht.txop = TXOP_SIFS;
3328c2ecf20Sopenharmony_ci		else
3338c2ecf20Sopenharmony_ci			txdesc->u.ht.txop = TXOP_BACKOFF;
3348c2ecf20Sopenharmony_ci
3358c2ecf20Sopenharmony_ci		/* Left zero on all other settings. */
3368c2ecf20Sopenharmony_ci		return;
3378c2ecf20Sopenharmony_ci	}
3388c2ecf20Sopenharmony_ci
3398c2ecf20Sopenharmony_ci	/*
3408c2ecf20Sopenharmony_ci	 * Only one STBC stream is supported for now.
3418c2ecf20Sopenharmony_ci	 */
3428c2ecf20Sopenharmony_ci	if (tx_info->flags & IEEE80211_TX_CTL_STBC)
3438c2ecf20Sopenharmony_ci		txdesc->u.ht.stbc = 1;
3448c2ecf20Sopenharmony_ci
3458c2ecf20Sopenharmony_ci	/*
3468c2ecf20Sopenharmony_ci	 * This frame is eligible for an AMPDU, however, don't aggregate
3478c2ecf20Sopenharmony_ci	 * frames that are intended to probe a specific tx rate.
3488c2ecf20Sopenharmony_ci	 */
3498c2ecf20Sopenharmony_ci	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
3508c2ecf20Sopenharmony_ci	    !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
3518c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
3528c2ecf20Sopenharmony_ci		txdesc->u.ht.mpdu_density = density;
3538c2ecf20Sopenharmony_ci		txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
3548c2ecf20Sopenharmony_ci	}
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci	/*
3578c2ecf20Sopenharmony_ci	 * Set 40Mhz mode if necessary (for legacy rates this will
3588c2ecf20Sopenharmony_ci	 * duplicate the frame to both channels).
3598c2ecf20Sopenharmony_ci	 */
3608c2ecf20Sopenharmony_ci	if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
3618c2ecf20Sopenharmony_ci	    txrate->flags & IEEE80211_TX_RC_DUP_DATA)
3628c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
3638c2ecf20Sopenharmony_ci	if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
3648c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
3658c2ecf20Sopenharmony_ci
3668c2ecf20Sopenharmony_ci	/*
3678c2ecf20Sopenharmony_ci	 * Determine IFS values
3688c2ecf20Sopenharmony_ci	 * - Use TXOP_BACKOFF for management frames except beacons
3698c2ecf20Sopenharmony_ci	 * - Use TXOP_SIFS for fragment bursts
3708c2ecf20Sopenharmony_ci	 * - Use TXOP_HTTXOP for everything else
3718c2ecf20Sopenharmony_ci	 *
3728c2ecf20Sopenharmony_ci	 * Note: rt2800 devices won't use CTS protection (if used)
3738c2ecf20Sopenharmony_ci	 * for frames not transmitted with TXOP_HTTXOP
3748c2ecf20Sopenharmony_ci	 */
3758c2ecf20Sopenharmony_ci	if (ieee80211_is_mgmt(hdr->frame_control) &&
3768c2ecf20Sopenharmony_ci	    !ieee80211_is_beacon(hdr->frame_control))
3778c2ecf20Sopenharmony_ci		txdesc->u.ht.txop = TXOP_BACKOFF;
3788c2ecf20Sopenharmony_ci	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
3798c2ecf20Sopenharmony_ci		txdesc->u.ht.txop = TXOP_SIFS;
3808c2ecf20Sopenharmony_ci	else
3818c2ecf20Sopenharmony_ci		txdesc->u.ht.txop = TXOP_HTTXOP;
3828c2ecf20Sopenharmony_ci}
3838c2ecf20Sopenharmony_ci
3848c2ecf20Sopenharmony_cistatic void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
3858c2ecf20Sopenharmony_ci					     struct sk_buff *skb,
3868c2ecf20Sopenharmony_ci					     struct txentry_desc *txdesc,
3878c2ecf20Sopenharmony_ci					     struct ieee80211_sta *sta)
3888c2ecf20Sopenharmony_ci{
3898c2ecf20Sopenharmony_ci	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
3908c2ecf20Sopenharmony_ci	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3918c2ecf20Sopenharmony_ci	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
3928c2ecf20Sopenharmony_ci	struct ieee80211_rate *rate;
3938c2ecf20Sopenharmony_ci	const struct rt2x00_rate *hwrate = NULL;
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_ci	memset(txdesc, 0, sizeof(*txdesc));
3968c2ecf20Sopenharmony_ci
3978c2ecf20Sopenharmony_ci	/*
3988c2ecf20Sopenharmony_ci	 * Header and frame information.
3998c2ecf20Sopenharmony_ci	 */
4008c2ecf20Sopenharmony_ci	txdesc->length = skb->len;
4018c2ecf20Sopenharmony_ci	txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
4028c2ecf20Sopenharmony_ci
4038c2ecf20Sopenharmony_ci	/*
4048c2ecf20Sopenharmony_ci	 * Check whether this frame is to be acked.
4058c2ecf20Sopenharmony_ci	 */
4068c2ecf20Sopenharmony_ci	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
4078c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);
4088c2ecf20Sopenharmony_ci
4098c2ecf20Sopenharmony_ci	/*
4108c2ecf20Sopenharmony_ci	 * Check if this is a RTS/CTS frame
4118c2ecf20Sopenharmony_ci	 */
4128c2ecf20Sopenharmony_ci	if (ieee80211_is_rts(hdr->frame_control) ||
4138c2ecf20Sopenharmony_ci	    ieee80211_is_cts(hdr->frame_control)) {
4148c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
4158c2ecf20Sopenharmony_ci		if (ieee80211_is_rts(hdr->frame_control))
4168c2ecf20Sopenharmony_ci			__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
4178c2ecf20Sopenharmony_ci		else
4188c2ecf20Sopenharmony_ci			__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
4198c2ecf20Sopenharmony_ci		if (tx_info->control.rts_cts_rate_idx >= 0)
4208c2ecf20Sopenharmony_ci			rate =
4218c2ecf20Sopenharmony_ci			    ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
4228c2ecf20Sopenharmony_ci	}
4238c2ecf20Sopenharmony_ci
4248c2ecf20Sopenharmony_ci	/*
4258c2ecf20Sopenharmony_ci	 * Determine retry information.
4268c2ecf20Sopenharmony_ci	 */
4278c2ecf20Sopenharmony_ci	txdesc->retry_limit = tx_info->control.rates[0].count - 1;
4288c2ecf20Sopenharmony_ci	if (txdesc->retry_limit >= rt2x00dev->long_retry)
4298c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
4308c2ecf20Sopenharmony_ci
4318c2ecf20Sopenharmony_ci	/*
4328c2ecf20Sopenharmony_ci	 * Check if more fragments are pending
4338c2ecf20Sopenharmony_ci	 */
4348c2ecf20Sopenharmony_ci	if (ieee80211_has_morefrags(hdr->frame_control)) {
4358c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
4368c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
4378c2ecf20Sopenharmony_ci	}
4388c2ecf20Sopenharmony_ci
4398c2ecf20Sopenharmony_ci	/*
4408c2ecf20Sopenharmony_ci	 * Check if more frames (!= fragments) are pending
4418c2ecf20Sopenharmony_ci	 */
4428c2ecf20Sopenharmony_ci	if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
4438c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_ci	/*
4468c2ecf20Sopenharmony_ci	 * Beacons and probe responses require the tsf timestamp
4478c2ecf20Sopenharmony_ci	 * to be inserted into the frame.
4488c2ecf20Sopenharmony_ci	 */
4498c2ecf20Sopenharmony_ci	if (ieee80211_is_beacon(hdr->frame_control) ||
4508c2ecf20Sopenharmony_ci	    ieee80211_is_probe_resp(hdr->frame_control))
4518c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
4528c2ecf20Sopenharmony_ci
4538c2ecf20Sopenharmony_ci	if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
4548c2ecf20Sopenharmony_ci	    !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
4558c2ecf20Sopenharmony_ci		__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
4568c2ecf20Sopenharmony_ci
4578c2ecf20Sopenharmony_ci	/*
4588c2ecf20Sopenharmony_ci	 * Determine rate modulation.
4598c2ecf20Sopenharmony_ci	 */
4608c2ecf20Sopenharmony_ci	if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
4618c2ecf20Sopenharmony_ci		txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
4628c2ecf20Sopenharmony_ci	else if (txrate->flags & IEEE80211_TX_RC_MCS)
4638c2ecf20Sopenharmony_ci		txdesc->rate_mode = RATE_MODE_HT_MIX;
4648c2ecf20Sopenharmony_ci	else {
4658c2ecf20Sopenharmony_ci		rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
4668c2ecf20Sopenharmony_ci		hwrate = rt2x00_get_rate(rate->hw_value);
4678c2ecf20Sopenharmony_ci		if (hwrate->flags & DEV_RATE_OFDM)
4688c2ecf20Sopenharmony_ci			txdesc->rate_mode = RATE_MODE_OFDM;
4698c2ecf20Sopenharmony_ci		else
4708c2ecf20Sopenharmony_ci			txdesc->rate_mode = RATE_MODE_CCK;
4718c2ecf20Sopenharmony_ci	}
4728c2ecf20Sopenharmony_ci
4738c2ecf20Sopenharmony_ci	/*
4748c2ecf20Sopenharmony_ci	 * Apply TX descriptor handling by components
4758c2ecf20Sopenharmony_ci	 */
4768c2ecf20Sopenharmony_ci	rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
4778c2ecf20Sopenharmony_ci	rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
4788c2ecf20Sopenharmony_ci
4798c2ecf20Sopenharmony_ci	if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
4808c2ecf20Sopenharmony_ci		rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
4818c2ecf20Sopenharmony_ci						   sta, hwrate);
4828c2ecf20Sopenharmony_ci	else
4838c2ecf20Sopenharmony_ci		rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
4848c2ecf20Sopenharmony_ci						      hwrate);
4858c2ecf20Sopenharmony_ci}
4868c2ecf20Sopenharmony_ci
4878c2ecf20Sopenharmony_cistatic int rt2x00queue_write_tx_data(struct queue_entry *entry,
4888c2ecf20Sopenharmony_ci				     struct txentry_desc *txdesc)
4898c2ecf20Sopenharmony_ci{
4908c2ecf20Sopenharmony_ci	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
4918c2ecf20Sopenharmony_ci
4928c2ecf20Sopenharmony_ci	/*
4938c2ecf20Sopenharmony_ci	 * This should not happen, we already checked the entry
4948c2ecf20Sopenharmony_ci	 * was ours. When the hardware disagrees there has been
4958c2ecf20Sopenharmony_ci	 * a queue corruption!
4968c2ecf20Sopenharmony_ci	 */
4978c2ecf20Sopenharmony_ci	if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
4988c2ecf20Sopenharmony_ci		     rt2x00dev->ops->lib->get_entry_state(entry))) {
4998c2ecf20Sopenharmony_ci		rt2x00_err(rt2x00dev,
5008c2ecf20Sopenharmony_ci			   "Corrupt queue %d, accessing entry which is not ours\n"
5018c2ecf20Sopenharmony_ci			   "Please file bug report to %s\n",
5028c2ecf20Sopenharmony_ci			   entry->queue->qid, DRV_PROJECT);
5038c2ecf20Sopenharmony_ci		return -EINVAL;
5048c2ecf20Sopenharmony_ci	}
5058c2ecf20Sopenharmony_ci
5068c2ecf20Sopenharmony_ci	/*
5078c2ecf20Sopenharmony_ci	 * Add the requested extra tx headroom in front of the skb.
5088c2ecf20Sopenharmony_ci	 */
5098c2ecf20Sopenharmony_ci	skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
5108c2ecf20Sopenharmony_ci	memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
5118c2ecf20Sopenharmony_ci
5128c2ecf20Sopenharmony_ci	/*
5138c2ecf20Sopenharmony_ci	 * Call the driver's write_tx_data function, if it exists.
5148c2ecf20Sopenharmony_ci	 */
5158c2ecf20Sopenharmony_ci	if (rt2x00dev->ops->lib->write_tx_data)
5168c2ecf20Sopenharmony_ci		rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
5178c2ecf20Sopenharmony_ci
5188c2ecf20Sopenharmony_ci	/*
5198c2ecf20Sopenharmony_ci	 * Map the skb to DMA.
5208c2ecf20Sopenharmony_ci	 */
5218c2ecf20Sopenharmony_ci	if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
5228c2ecf20Sopenharmony_ci	    rt2x00queue_map_txskb(entry))
5238c2ecf20Sopenharmony_ci		return -ENOMEM;
5248c2ecf20Sopenharmony_ci
5258c2ecf20Sopenharmony_ci	return 0;
5268c2ecf20Sopenharmony_ci}
5278c2ecf20Sopenharmony_ci
5288c2ecf20Sopenharmony_cistatic void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
5298c2ecf20Sopenharmony_ci					    struct txentry_desc *txdesc)
5308c2ecf20Sopenharmony_ci{
5318c2ecf20Sopenharmony_ci	struct data_queue *queue = entry->queue;
5328c2ecf20Sopenharmony_ci
5338c2ecf20Sopenharmony_ci	queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
5348c2ecf20Sopenharmony_ci
5358c2ecf20Sopenharmony_ci	/*
5368c2ecf20Sopenharmony_ci	 * All processing on the frame has been completed, this means
5378c2ecf20Sopenharmony_ci	 * it is now ready to be dumped to userspace through debugfs.
5388c2ecf20Sopenharmony_ci	 */
5398c2ecf20Sopenharmony_ci	rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
5408c2ecf20Sopenharmony_ci}
5418c2ecf20Sopenharmony_ci
5428c2ecf20Sopenharmony_cistatic void rt2x00queue_kick_tx_queue(struct data_queue *queue,
5438c2ecf20Sopenharmony_ci				      struct txentry_desc *txdesc)
5448c2ecf20Sopenharmony_ci{
5458c2ecf20Sopenharmony_ci	/*
5468c2ecf20Sopenharmony_ci	 * Check if we need to kick the queue, there are however a few rules
5478c2ecf20Sopenharmony_ci	 *	1) Don't kick unless this is the last in frame in a burst.
5488c2ecf20Sopenharmony_ci	 *	   When the burst flag is set, this frame is always followed
5498c2ecf20Sopenharmony_ci	 *	   by another frame which in some way are related to eachother.
5508c2ecf20Sopenharmony_ci	 *	   This is true for fragments, RTS or CTS-to-self frames.
5518c2ecf20Sopenharmony_ci	 *	2) Rule 1 can be broken when the available entries
5528c2ecf20Sopenharmony_ci	 *	   in the queue are less then a certain threshold.
5538c2ecf20Sopenharmony_ci	 */
5548c2ecf20Sopenharmony_ci	if (rt2x00queue_threshold(queue) ||
5558c2ecf20Sopenharmony_ci	    !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
5568c2ecf20Sopenharmony_ci		queue->rt2x00dev->ops->lib->kick_queue(queue);
5578c2ecf20Sopenharmony_ci}
5588c2ecf20Sopenharmony_ci
5598c2ecf20Sopenharmony_cistatic void rt2x00queue_bar_check(struct queue_entry *entry)
5608c2ecf20Sopenharmony_ci{
5618c2ecf20Sopenharmony_ci	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
5628c2ecf20Sopenharmony_ci	struct ieee80211_bar *bar = (void *) (entry->skb->data +
5638c2ecf20Sopenharmony_ci				    rt2x00dev->extra_tx_headroom);
5648c2ecf20Sopenharmony_ci	struct rt2x00_bar_list_entry *bar_entry;
5658c2ecf20Sopenharmony_ci
5668c2ecf20Sopenharmony_ci	if (likely(!ieee80211_is_back_req(bar->frame_control)))
5678c2ecf20Sopenharmony_ci		return;
5688c2ecf20Sopenharmony_ci
5698c2ecf20Sopenharmony_ci	bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
5708c2ecf20Sopenharmony_ci
5718c2ecf20Sopenharmony_ci	/*
5728c2ecf20Sopenharmony_ci	 * If the alloc fails we still send the BAR out but just don't track
5738c2ecf20Sopenharmony_ci	 * it in our bar list. And as a result we will report it to mac80211
5748c2ecf20Sopenharmony_ci	 * back as failed.
5758c2ecf20Sopenharmony_ci	 */
5768c2ecf20Sopenharmony_ci	if (!bar_entry)
5778c2ecf20Sopenharmony_ci		return;
5788c2ecf20Sopenharmony_ci
5798c2ecf20Sopenharmony_ci	bar_entry->entry = entry;
5808c2ecf20Sopenharmony_ci	bar_entry->block_acked = 0;
5818c2ecf20Sopenharmony_ci
5828c2ecf20Sopenharmony_ci	/*
5838c2ecf20Sopenharmony_ci	 * Copy the relevant parts of the 802.11 BAR into out check list
5848c2ecf20Sopenharmony_ci	 * such that we can use RCU for less-overhead in the RX path since
5858c2ecf20Sopenharmony_ci	 * sending BARs and processing the according BlockAck should be
5868c2ecf20Sopenharmony_ci	 * the exception.
5878c2ecf20Sopenharmony_ci	 */
5888c2ecf20Sopenharmony_ci	memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
5898c2ecf20Sopenharmony_ci	memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
5908c2ecf20Sopenharmony_ci	bar_entry->control = bar->control;
5918c2ecf20Sopenharmony_ci	bar_entry->start_seq_num = bar->start_seq_num;
5928c2ecf20Sopenharmony_ci
5938c2ecf20Sopenharmony_ci	/*
5948c2ecf20Sopenharmony_ci	 * Insert BAR into our BAR check list.
5958c2ecf20Sopenharmony_ci	 */
5968c2ecf20Sopenharmony_ci	spin_lock_bh(&rt2x00dev->bar_list_lock);
5978c2ecf20Sopenharmony_ci	list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
5988c2ecf20Sopenharmony_ci	spin_unlock_bh(&rt2x00dev->bar_list_lock);
5998c2ecf20Sopenharmony_ci}
6008c2ecf20Sopenharmony_ci
6018c2ecf20Sopenharmony_ciint rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
6028c2ecf20Sopenharmony_ci			       struct ieee80211_sta *sta, bool local)
6038c2ecf20Sopenharmony_ci{
6048c2ecf20Sopenharmony_ci	struct ieee80211_tx_info *tx_info;
6058c2ecf20Sopenharmony_ci	struct queue_entry *entry;
6068c2ecf20Sopenharmony_ci	struct txentry_desc txdesc;
6078c2ecf20Sopenharmony_ci	struct skb_frame_desc *skbdesc;
6088c2ecf20Sopenharmony_ci	u8 rate_idx, rate_flags;
6098c2ecf20Sopenharmony_ci	int ret = 0;
6108c2ecf20Sopenharmony_ci
6118c2ecf20Sopenharmony_ci	/*
6128c2ecf20Sopenharmony_ci	 * Copy all TX descriptor information into txdesc,
6138c2ecf20Sopenharmony_ci	 * after that we are free to use the skb->cb array
6148c2ecf20Sopenharmony_ci	 * for our information.
6158c2ecf20Sopenharmony_ci	 */
6168c2ecf20Sopenharmony_ci	rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
6178c2ecf20Sopenharmony_ci
6188c2ecf20Sopenharmony_ci	/*
6198c2ecf20Sopenharmony_ci	 * All information is retrieved from the skb->cb array,
6208c2ecf20Sopenharmony_ci	 * now we should claim ownership of the driver part of that
6218c2ecf20Sopenharmony_ci	 * array, preserving the bitrate index and flags.
6228c2ecf20Sopenharmony_ci	 */
6238c2ecf20Sopenharmony_ci	tx_info = IEEE80211_SKB_CB(skb);
6248c2ecf20Sopenharmony_ci	rate_idx = tx_info->control.rates[0].idx;
6258c2ecf20Sopenharmony_ci	rate_flags = tx_info->control.rates[0].flags;
6268c2ecf20Sopenharmony_ci	skbdesc = get_skb_frame_desc(skb);
6278c2ecf20Sopenharmony_ci	memset(skbdesc, 0, sizeof(*skbdesc));
6288c2ecf20Sopenharmony_ci	skbdesc->tx_rate_idx = rate_idx;
6298c2ecf20Sopenharmony_ci	skbdesc->tx_rate_flags = rate_flags;
6308c2ecf20Sopenharmony_ci
6318c2ecf20Sopenharmony_ci	if (local)
6328c2ecf20Sopenharmony_ci		skbdesc->flags |= SKBDESC_NOT_MAC80211;
6338c2ecf20Sopenharmony_ci
6348c2ecf20Sopenharmony_ci	/*
6358c2ecf20Sopenharmony_ci	 * When hardware encryption is supported, and this frame
6368c2ecf20Sopenharmony_ci	 * is to be encrypted, we should strip the IV/EIV data from
6378c2ecf20Sopenharmony_ci	 * the frame so we can provide it to the driver separately.
6388c2ecf20Sopenharmony_ci	 */
6398c2ecf20Sopenharmony_ci	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
6408c2ecf20Sopenharmony_ci	    !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
6418c2ecf20Sopenharmony_ci		if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
6428c2ecf20Sopenharmony_ci			rt2x00crypto_tx_copy_iv(skb, &txdesc);
6438c2ecf20Sopenharmony_ci		else
6448c2ecf20Sopenharmony_ci			rt2x00crypto_tx_remove_iv(skb, &txdesc);
6458c2ecf20Sopenharmony_ci	}
6468c2ecf20Sopenharmony_ci
6478c2ecf20Sopenharmony_ci	/*
6488c2ecf20Sopenharmony_ci	 * When DMA allocation is required we should guarantee to the
6498c2ecf20Sopenharmony_ci	 * driver that the DMA is aligned to a 4-byte boundary.
6508c2ecf20Sopenharmony_ci	 * However some drivers require L2 padding to pad the payload
6518c2ecf20Sopenharmony_ci	 * rather then the header. This could be a requirement for
6528c2ecf20Sopenharmony_ci	 * PCI and USB devices, while header alignment only is valid
6538c2ecf20Sopenharmony_ci	 * for PCI devices.
6548c2ecf20Sopenharmony_ci	 */
6558c2ecf20Sopenharmony_ci	if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
6568c2ecf20Sopenharmony_ci		rt2x00queue_insert_l2pad(skb, txdesc.header_length);
6578c2ecf20Sopenharmony_ci	else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
6588c2ecf20Sopenharmony_ci		rt2x00queue_align_frame(skb);
6598c2ecf20Sopenharmony_ci
6608c2ecf20Sopenharmony_ci	/*
6618c2ecf20Sopenharmony_ci	 * That function must be called with bh disabled.
6628c2ecf20Sopenharmony_ci	 */
6638c2ecf20Sopenharmony_ci	spin_lock(&queue->tx_lock);
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci	if (unlikely(rt2x00queue_full(queue))) {
6668c2ecf20Sopenharmony_ci		rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
6678c2ecf20Sopenharmony_ci			   queue->qid);
6688c2ecf20Sopenharmony_ci		ret = -ENOBUFS;
6698c2ecf20Sopenharmony_ci		goto out;
6708c2ecf20Sopenharmony_ci	}
6718c2ecf20Sopenharmony_ci
6728c2ecf20Sopenharmony_ci	entry = rt2x00queue_get_entry(queue, Q_INDEX);
6738c2ecf20Sopenharmony_ci
6748c2ecf20Sopenharmony_ci	if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
6758c2ecf20Sopenharmony_ci				      &entry->flags))) {
6768c2ecf20Sopenharmony_ci		rt2x00_err(queue->rt2x00dev,
6778c2ecf20Sopenharmony_ci			   "Arrived at non-free entry in the non-full queue %d\n"
6788c2ecf20Sopenharmony_ci			   "Please file bug report to %s\n",
6798c2ecf20Sopenharmony_ci			   queue->qid, DRV_PROJECT);
6808c2ecf20Sopenharmony_ci		ret = -EINVAL;
6818c2ecf20Sopenharmony_ci		goto out;
6828c2ecf20Sopenharmony_ci	}
6838c2ecf20Sopenharmony_ci
6848c2ecf20Sopenharmony_ci	entry->skb = skb;
6858c2ecf20Sopenharmony_ci
6868c2ecf20Sopenharmony_ci	/*
6878c2ecf20Sopenharmony_ci	 * It could be possible that the queue was corrupted and this
6888c2ecf20Sopenharmony_ci	 * call failed. Since we always return NETDEV_TX_OK to mac80211,
6898c2ecf20Sopenharmony_ci	 * this frame will simply be dropped.
6908c2ecf20Sopenharmony_ci	 */
6918c2ecf20Sopenharmony_ci	if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
6928c2ecf20Sopenharmony_ci		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
6938c2ecf20Sopenharmony_ci		entry->skb = NULL;
6948c2ecf20Sopenharmony_ci		ret = -EIO;
6958c2ecf20Sopenharmony_ci		goto out;
6968c2ecf20Sopenharmony_ci	}
6978c2ecf20Sopenharmony_ci
6988c2ecf20Sopenharmony_ci	/*
6998c2ecf20Sopenharmony_ci	 * Put BlockAckReqs into our check list for driver BA processing.
7008c2ecf20Sopenharmony_ci	 */
7018c2ecf20Sopenharmony_ci	rt2x00queue_bar_check(entry);
7028c2ecf20Sopenharmony_ci
7038c2ecf20Sopenharmony_ci	set_bit(ENTRY_DATA_PENDING, &entry->flags);
7048c2ecf20Sopenharmony_ci
7058c2ecf20Sopenharmony_ci	rt2x00queue_index_inc(entry, Q_INDEX);
7068c2ecf20Sopenharmony_ci	rt2x00queue_write_tx_descriptor(entry, &txdesc);
7078c2ecf20Sopenharmony_ci	rt2x00queue_kick_tx_queue(queue, &txdesc);
7088c2ecf20Sopenharmony_ci
7098c2ecf20Sopenharmony_ciout:
7108c2ecf20Sopenharmony_ci	/*
7118c2ecf20Sopenharmony_ci	 * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
7128c2ecf20Sopenharmony_ci	 * do this under queue->tx_lock. Bottom halve was already disabled
7138c2ecf20Sopenharmony_ci	 * before ieee80211_xmit() call.
7148c2ecf20Sopenharmony_ci	 */
7158c2ecf20Sopenharmony_ci	if (rt2x00queue_threshold(queue))
7168c2ecf20Sopenharmony_ci		rt2x00queue_pause_queue(queue);
7178c2ecf20Sopenharmony_ci
7188c2ecf20Sopenharmony_ci	spin_unlock(&queue->tx_lock);
7198c2ecf20Sopenharmony_ci	return ret;
7208c2ecf20Sopenharmony_ci}
7218c2ecf20Sopenharmony_ci
7228c2ecf20Sopenharmony_ciint rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
7238c2ecf20Sopenharmony_ci			     struct ieee80211_vif *vif)
7248c2ecf20Sopenharmony_ci{
7258c2ecf20Sopenharmony_ci	struct rt2x00_intf *intf = vif_to_intf(vif);
7268c2ecf20Sopenharmony_ci
7278c2ecf20Sopenharmony_ci	if (unlikely(!intf->beacon))
7288c2ecf20Sopenharmony_ci		return -ENOBUFS;
7298c2ecf20Sopenharmony_ci
7308c2ecf20Sopenharmony_ci	/*
7318c2ecf20Sopenharmony_ci	 * Clean up the beacon skb.
7328c2ecf20Sopenharmony_ci	 */
7338c2ecf20Sopenharmony_ci	rt2x00queue_free_skb(intf->beacon);
7348c2ecf20Sopenharmony_ci
7358c2ecf20Sopenharmony_ci	/*
7368c2ecf20Sopenharmony_ci	 * Clear beacon (single bssid devices don't need to clear the beacon
7378c2ecf20Sopenharmony_ci	 * since the beacon queue will get stopped anyway).
7388c2ecf20Sopenharmony_ci	 */
7398c2ecf20Sopenharmony_ci	if (rt2x00dev->ops->lib->clear_beacon)
7408c2ecf20Sopenharmony_ci		rt2x00dev->ops->lib->clear_beacon(intf->beacon);
7418c2ecf20Sopenharmony_ci
7428c2ecf20Sopenharmony_ci	return 0;
7438c2ecf20Sopenharmony_ci}
7448c2ecf20Sopenharmony_ci
7458c2ecf20Sopenharmony_ciint rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
7468c2ecf20Sopenharmony_ci			      struct ieee80211_vif *vif)
7478c2ecf20Sopenharmony_ci{
7488c2ecf20Sopenharmony_ci	struct rt2x00_intf *intf = vif_to_intf(vif);
7498c2ecf20Sopenharmony_ci	struct skb_frame_desc *skbdesc;
7508c2ecf20Sopenharmony_ci	struct txentry_desc txdesc;
7518c2ecf20Sopenharmony_ci
7528c2ecf20Sopenharmony_ci	if (unlikely(!intf->beacon))
7538c2ecf20Sopenharmony_ci		return -ENOBUFS;
7548c2ecf20Sopenharmony_ci
7558c2ecf20Sopenharmony_ci	/*
7568c2ecf20Sopenharmony_ci	 * Clean up the beacon skb.
7578c2ecf20Sopenharmony_ci	 */
7588c2ecf20Sopenharmony_ci	rt2x00queue_free_skb(intf->beacon);
7598c2ecf20Sopenharmony_ci
7608c2ecf20Sopenharmony_ci	intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
7618c2ecf20Sopenharmony_ci	if (!intf->beacon->skb)
7628c2ecf20Sopenharmony_ci		return -ENOMEM;
7638c2ecf20Sopenharmony_ci
7648c2ecf20Sopenharmony_ci	/*
7658c2ecf20Sopenharmony_ci	 * Copy all TX descriptor information into txdesc,
7668c2ecf20Sopenharmony_ci	 * after that we are free to use the skb->cb array
7678c2ecf20Sopenharmony_ci	 * for our information.
7688c2ecf20Sopenharmony_ci	 */
7698c2ecf20Sopenharmony_ci	rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
7708c2ecf20Sopenharmony_ci
7718c2ecf20Sopenharmony_ci	/*
7728c2ecf20Sopenharmony_ci	 * Fill in skb descriptor
7738c2ecf20Sopenharmony_ci	 */
7748c2ecf20Sopenharmony_ci	skbdesc = get_skb_frame_desc(intf->beacon->skb);
7758c2ecf20Sopenharmony_ci	memset(skbdesc, 0, sizeof(*skbdesc));
7768c2ecf20Sopenharmony_ci
7778c2ecf20Sopenharmony_ci	/*
7788c2ecf20Sopenharmony_ci	 * Send beacon to hardware.
7798c2ecf20Sopenharmony_ci	 */
7808c2ecf20Sopenharmony_ci	rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
7818c2ecf20Sopenharmony_ci
7828c2ecf20Sopenharmony_ci	return 0;
7838c2ecf20Sopenharmony_ci
7848c2ecf20Sopenharmony_ci}
7858c2ecf20Sopenharmony_ci
7868c2ecf20Sopenharmony_cibool rt2x00queue_for_each_entry(struct data_queue *queue,
7878c2ecf20Sopenharmony_ci				enum queue_index start,
7888c2ecf20Sopenharmony_ci				enum queue_index end,
7898c2ecf20Sopenharmony_ci				void *data,
7908c2ecf20Sopenharmony_ci				bool (*fn)(struct queue_entry *entry,
7918c2ecf20Sopenharmony_ci					   void *data))
7928c2ecf20Sopenharmony_ci{
7938c2ecf20Sopenharmony_ci	unsigned long irqflags;
7948c2ecf20Sopenharmony_ci	unsigned int index_start;
7958c2ecf20Sopenharmony_ci	unsigned int index_end;
7968c2ecf20Sopenharmony_ci	unsigned int i;
7978c2ecf20Sopenharmony_ci
7988c2ecf20Sopenharmony_ci	if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
7998c2ecf20Sopenharmony_ci		rt2x00_err(queue->rt2x00dev,
8008c2ecf20Sopenharmony_ci			   "Entry requested from invalid index range (%d - %d)\n",
8018c2ecf20Sopenharmony_ci			   start, end);
8028c2ecf20Sopenharmony_ci		return true;
8038c2ecf20Sopenharmony_ci	}
8048c2ecf20Sopenharmony_ci
8058c2ecf20Sopenharmony_ci	/*
8068c2ecf20Sopenharmony_ci	 * Only protect the range we are going to loop over,
8078c2ecf20Sopenharmony_ci	 * if during our loop a extra entry is set to pending
8088c2ecf20Sopenharmony_ci	 * it should not be kicked during this run, since it
8098c2ecf20Sopenharmony_ci	 * is part of another TX operation.
8108c2ecf20Sopenharmony_ci	 */
8118c2ecf20Sopenharmony_ci	spin_lock_irqsave(&queue->index_lock, irqflags);
8128c2ecf20Sopenharmony_ci	index_start = queue->index[start];
8138c2ecf20Sopenharmony_ci	index_end = queue->index[end];
8148c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&queue->index_lock, irqflags);
8158c2ecf20Sopenharmony_ci
8168c2ecf20Sopenharmony_ci	/*
8178c2ecf20Sopenharmony_ci	 * Start from the TX done pointer, this guarantees that we will
8188c2ecf20Sopenharmony_ci	 * send out all frames in the correct order.
8198c2ecf20Sopenharmony_ci	 */
8208c2ecf20Sopenharmony_ci	if (index_start < index_end) {
8218c2ecf20Sopenharmony_ci		for (i = index_start; i < index_end; i++) {
8228c2ecf20Sopenharmony_ci			if (fn(&queue->entries[i], data))
8238c2ecf20Sopenharmony_ci				return true;
8248c2ecf20Sopenharmony_ci		}
8258c2ecf20Sopenharmony_ci	} else {
8268c2ecf20Sopenharmony_ci		for (i = index_start; i < queue->limit; i++) {
8278c2ecf20Sopenharmony_ci			if (fn(&queue->entries[i], data))
8288c2ecf20Sopenharmony_ci				return true;
8298c2ecf20Sopenharmony_ci		}
8308c2ecf20Sopenharmony_ci
8318c2ecf20Sopenharmony_ci		for (i = 0; i < index_end; i++) {
8328c2ecf20Sopenharmony_ci			if (fn(&queue->entries[i], data))
8338c2ecf20Sopenharmony_ci				return true;
8348c2ecf20Sopenharmony_ci		}
8358c2ecf20Sopenharmony_ci	}
8368c2ecf20Sopenharmony_ci
8378c2ecf20Sopenharmony_ci	return false;
8388c2ecf20Sopenharmony_ci}
8398c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
8408c2ecf20Sopenharmony_ci
8418c2ecf20Sopenharmony_cistruct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
8428c2ecf20Sopenharmony_ci					  enum queue_index index)
8438c2ecf20Sopenharmony_ci{
8448c2ecf20Sopenharmony_ci	struct queue_entry *entry;
8458c2ecf20Sopenharmony_ci	unsigned long irqflags;
8468c2ecf20Sopenharmony_ci
8478c2ecf20Sopenharmony_ci	if (unlikely(index >= Q_INDEX_MAX)) {
8488c2ecf20Sopenharmony_ci		rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
8498c2ecf20Sopenharmony_ci			   index);
8508c2ecf20Sopenharmony_ci		return NULL;
8518c2ecf20Sopenharmony_ci	}
8528c2ecf20Sopenharmony_ci
8538c2ecf20Sopenharmony_ci	spin_lock_irqsave(&queue->index_lock, irqflags);
8548c2ecf20Sopenharmony_ci
8558c2ecf20Sopenharmony_ci	entry = &queue->entries[queue->index[index]];
8568c2ecf20Sopenharmony_ci
8578c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&queue->index_lock, irqflags);
8588c2ecf20Sopenharmony_ci
8598c2ecf20Sopenharmony_ci	return entry;
8608c2ecf20Sopenharmony_ci}
8618c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
8628c2ecf20Sopenharmony_ci
8638c2ecf20Sopenharmony_civoid rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
8648c2ecf20Sopenharmony_ci{
8658c2ecf20Sopenharmony_ci	struct data_queue *queue = entry->queue;
8668c2ecf20Sopenharmony_ci	unsigned long irqflags;
8678c2ecf20Sopenharmony_ci
8688c2ecf20Sopenharmony_ci	if (unlikely(index >= Q_INDEX_MAX)) {
8698c2ecf20Sopenharmony_ci		rt2x00_err(queue->rt2x00dev,
8708c2ecf20Sopenharmony_ci			   "Index change on invalid index type (%d)\n", index);
8718c2ecf20Sopenharmony_ci		return;
8728c2ecf20Sopenharmony_ci	}
8738c2ecf20Sopenharmony_ci
8748c2ecf20Sopenharmony_ci	spin_lock_irqsave(&queue->index_lock, irqflags);
8758c2ecf20Sopenharmony_ci
8768c2ecf20Sopenharmony_ci	queue->index[index]++;
8778c2ecf20Sopenharmony_ci	if (queue->index[index] >= queue->limit)
8788c2ecf20Sopenharmony_ci		queue->index[index] = 0;
8798c2ecf20Sopenharmony_ci
8808c2ecf20Sopenharmony_ci	entry->last_action = jiffies;
8818c2ecf20Sopenharmony_ci
8828c2ecf20Sopenharmony_ci	if (index == Q_INDEX) {
8838c2ecf20Sopenharmony_ci		queue->length++;
8848c2ecf20Sopenharmony_ci	} else if (index == Q_INDEX_DONE) {
8858c2ecf20Sopenharmony_ci		queue->length--;
8868c2ecf20Sopenharmony_ci		queue->count++;
8878c2ecf20Sopenharmony_ci	}
8888c2ecf20Sopenharmony_ci
8898c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&queue->index_lock, irqflags);
8908c2ecf20Sopenharmony_ci}
8918c2ecf20Sopenharmony_ci
8928c2ecf20Sopenharmony_cistatic void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
8938c2ecf20Sopenharmony_ci{
8948c2ecf20Sopenharmony_ci	switch (queue->qid) {
8958c2ecf20Sopenharmony_ci	case QID_AC_VO:
8968c2ecf20Sopenharmony_ci	case QID_AC_VI:
8978c2ecf20Sopenharmony_ci	case QID_AC_BE:
8988c2ecf20Sopenharmony_ci	case QID_AC_BK:
8998c2ecf20Sopenharmony_ci		/*
9008c2ecf20Sopenharmony_ci		 * For TX queues, we have to disable the queue
9018c2ecf20Sopenharmony_ci		 * inside mac80211.
9028c2ecf20Sopenharmony_ci		 */
9038c2ecf20Sopenharmony_ci		ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
9048c2ecf20Sopenharmony_ci		break;
9058c2ecf20Sopenharmony_ci	default:
9068c2ecf20Sopenharmony_ci		break;
9078c2ecf20Sopenharmony_ci	}
9088c2ecf20Sopenharmony_ci}
9098c2ecf20Sopenharmony_civoid rt2x00queue_pause_queue(struct data_queue *queue)
9108c2ecf20Sopenharmony_ci{
9118c2ecf20Sopenharmony_ci	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
9128c2ecf20Sopenharmony_ci	    !test_bit(QUEUE_STARTED, &queue->flags) ||
9138c2ecf20Sopenharmony_ci	    test_and_set_bit(QUEUE_PAUSED, &queue->flags))
9148c2ecf20Sopenharmony_ci		return;
9158c2ecf20Sopenharmony_ci
9168c2ecf20Sopenharmony_ci	rt2x00queue_pause_queue_nocheck(queue);
9178c2ecf20Sopenharmony_ci}
9188c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
9198c2ecf20Sopenharmony_ci
9208c2ecf20Sopenharmony_civoid rt2x00queue_unpause_queue(struct data_queue *queue)
9218c2ecf20Sopenharmony_ci{
9228c2ecf20Sopenharmony_ci	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
9238c2ecf20Sopenharmony_ci	    !test_bit(QUEUE_STARTED, &queue->flags) ||
9248c2ecf20Sopenharmony_ci	    !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
9258c2ecf20Sopenharmony_ci		return;
9268c2ecf20Sopenharmony_ci
9278c2ecf20Sopenharmony_ci	switch (queue->qid) {
9288c2ecf20Sopenharmony_ci	case QID_AC_VO:
9298c2ecf20Sopenharmony_ci	case QID_AC_VI:
9308c2ecf20Sopenharmony_ci	case QID_AC_BE:
9318c2ecf20Sopenharmony_ci	case QID_AC_BK:
9328c2ecf20Sopenharmony_ci		/*
9338c2ecf20Sopenharmony_ci		 * For TX queues, we have to enable the queue
9348c2ecf20Sopenharmony_ci		 * inside mac80211.
9358c2ecf20Sopenharmony_ci		 */
9368c2ecf20Sopenharmony_ci		ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
9378c2ecf20Sopenharmony_ci		break;
9388c2ecf20Sopenharmony_ci	case QID_RX:
9398c2ecf20Sopenharmony_ci		/*
9408c2ecf20Sopenharmony_ci		 * For RX we need to kick the queue now in order to
9418c2ecf20Sopenharmony_ci		 * receive frames.
9428c2ecf20Sopenharmony_ci		 */
9438c2ecf20Sopenharmony_ci		queue->rt2x00dev->ops->lib->kick_queue(queue);
9448c2ecf20Sopenharmony_ci	default:
9458c2ecf20Sopenharmony_ci		break;
9468c2ecf20Sopenharmony_ci	}
9478c2ecf20Sopenharmony_ci}
9488c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
9498c2ecf20Sopenharmony_ci
9508c2ecf20Sopenharmony_civoid rt2x00queue_start_queue(struct data_queue *queue)
9518c2ecf20Sopenharmony_ci{
9528c2ecf20Sopenharmony_ci	mutex_lock(&queue->status_lock);
9538c2ecf20Sopenharmony_ci
9548c2ecf20Sopenharmony_ci	if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
9558c2ecf20Sopenharmony_ci	    test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
9568c2ecf20Sopenharmony_ci		mutex_unlock(&queue->status_lock);
9578c2ecf20Sopenharmony_ci		return;
9588c2ecf20Sopenharmony_ci	}
9598c2ecf20Sopenharmony_ci
9608c2ecf20Sopenharmony_ci	set_bit(QUEUE_PAUSED, &queue->flags);
9618c2ecf20Sopenharmony_ci
9628c2ecf20Sopenharmony_ci	queue->rt2x00dev->ops->lib->start_queue(queue);
9638c2ecf20Sopenharmony_ci
9648c2ecf20Sopenharmony_ci	rt2x00queue_unpause_queue(queue);
9658c2ecf20Sopenharmony_ci
9668c2ecf20Sopenharmony_ci	mutex_unlock(&queue->status_lock);
9678c2ecf20Sopenharmony_ci}
9688c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
9698c2ecf20Sopenharmony_ci
9708c2ecf20Sopenharmony_civoid rt2x00queue_stop_queue(struct data_queue *queue)
9718c2ecf20Sopenharmony_ci{
9728c2ecf20Sopenharmony_ci	mutex_lock(&queue->status_lock);
9738c2ecf20Sopenharmony_ci
9748c2ecf20Sopenharmony_ci	if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
9758c2ecf20Sopenharmony_ci		mutex_unlock(&queue->status_lock);
9768c2ecf20Sopenharmony_ci		return;
9778c2ecf20Sopenharmony_ci	}
9788c2ecf20Sopenharmony_ci
9798c2ecf20Sopenharmony_ci	rt2x00queue_pause_queue_nocheck(queue);
9808c2ecf20Sopenharmony_ci
9818c2ecf20Sopenharmony_ci	queue->rt2x00dev->ops->lib->stop_queue(queue);
9828c2ecf20Sopenharmony_ci
9838c2ecf20Sopenharmony_ci	mutex_unlock(&queue->status_lock);
9848c2ecf20Sopenharmony_ci}
9858c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
9868c2ecf20Sopenharmony_ci
9878c2ecf20Sopenharmony_civoid rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
9888c2ecf20Sopenharmony_ci{
9898c2ecf20Sopenharmony_ci	bool tx_queue =
9908c2ecf20Sopenharmony_ci		(queue->qid == QID_AC_VO) ||
9918c2ecf20Sopenharmony_ci		(queue->qid == QID_AC_VI) ||
9928c2ecf20Sopenharmony_ci		(queue->qid == QID_AC_BE) ||
9938c2ecf20Sopenharmony_ci		(queue->qid == QID_AC_BK);
9948c2ecf20Sopenharmony_ci
9958c2ecf20Sopenharmony_ci	if (rt2x00queue_empty(queue))
9968c2ecf20Sopenharmony_ci		return;
9978c2ecf20Sopenharmony_ci
9988c2ecf20Sopenharmony_ci	/*
9998c2ecf20Sopenharmony_ci	 * If we are not supposed to drop any pending
10008c2ecf20Sopenharmony_ci	 * frames, this means we must force a start (=kick)
10018c2ecf20Sopenharmony_ci	 * to the queue to make sure the hardware will
10028c2ecf20Sopenharmony_ci	 * start transmitting.
10038c2ecf20Sopenharmony_ci	 */
10048c2ecf20Sopenharmony_ci	if (!drop && tx_queue)
10058c2ecf20Sopenharmony_ci		queue->rt2x00dev->ops->lib->kick_queue(queue);
10068c2ecf20Sopenharmony_ci
10078c2ecf20Sopenharmony_ci	/*
10088c2ecf20Sopenharmony_ci	 * Check if driver supports flushing, if that is the case we can
10098c2ecf20Sopenharmony_ci	 * defer the flushing to the driver. Otherwise we must use the
10108c2ecf20Sopenharmony_ci	 * alternative which just waits for the queue to become empty.
10118c2ecf20Sopenharmony_ci	 */
10128c2ecf20Sopenharmony_ci	if (likely(queue->rt2x00dev->ops->lib->flush_queue))
10138c2ecf20Sopenharmony_ci		queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
10148c2ecf20Sopenharmony_ci
10158c2ecf20Sopenharmony_ci	/*
10168c2ecf20Sopenharmony_ci	 * The queue flush has failed...
10178c2ecf20Sopenharmony_ci	 */
10188c2ecf20Sopenharmony_ci	if (unlikely(!rt2x00queue_empty(queue)))
10198c2ecf20Sopenharmony_ci		rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
10208c2ecf20Sopenharmony_ci			    queue->qid);
10218c2ecf20Sopenharmony_ci}
10228c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
10238c2ecf20Sopenharmony_ci
10248c2ecf20Sopenharmony_civoid rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
10258c2ecf20Sopenharmony_ci{
10268c2ecf20Sopenharmony_ci	struct data_queue *queue;
10278c2ecf20Sopenharmony_ci
10288c2ecf20Sopenharmony_ci	/*
10298c2ecf20Sopenharmony_ci	 * rt2x00queue_start_queue will call ieee80211_wake_queue
10308c2ecf20Sopenharmony_ci	 * for each queue after is has been properly initialized.
10318c2ecf20Sopenharmony_ci	 */
10328c2ecf20Sopenharmony_ci	tx_queue_for_each(rt2x00dev, queue)
10338c2ecf20Sopenharmony_ci		rt2x00queue_start_queue(queue);
10348c2ecf20Sopenharmony_ci
10358c2ecf20Sopenharmony_ci	rt2x00queue_start_queue(rt2x00dev->rx);
10368c2ecf20Sopenharmony_ci}
10378c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
10388c2ecf20Sopenharmony_ci
10398c2ecf20Sopenharmony_civoid rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
10408c2ecf20Sopenharmony_ci{
10418c2ecf20Sopenharmony_ci	struct data_queue *queue;
10428c2ecf20Sopenharmony_ci
10438c2ecf20Sopenharmony_ci	/*
10448c2ecf20Sopenharmony_ci	 * rt2x00queue_stop_queue will call ieee80211_stop_queue
10458c2ecf20Sopenharmony_ci	 * as well, but we are completely shutting doing everything
10468c2ecf20Sopenharmony_ci	 * now, so it is much safer to stop all TX queues at once,
10478c2ecf20Sopenharmony_ci	 * and use rt2x00queue_stop_queue for cleaning up.
10488c2ecf20Sopenharmony_ci	 */
10498c2ecf20Sopenharmony_ci	ieee80211_stop_queues(rt2x00dev->hw);
10508c2ecf20Sopenharmony_ci
10518c2ecf20Sopenharmony_ci	tx_queue_for_each(rt2x00dev, queue)
10528c2ecf20Sopenharmony_ci		rt2x00queue_stop_queue(queue);
10538c2ecf20Sopenharmony_ci
10548c2ecf20Sopenharmony_ci	rt2x00queue_stop_queue(rt2x00dev->rx);
10558c2ecf20Sopenharmony_ci}
10568c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
10578c2ecf20Sopenharmony_ci
10588c2ecf20Sopenharmony_civoid rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
10598c2ecf20Sopenharmony_ci{
10608c2ecf20Sopenharmony_ci	struct data_queue *queue;
10618c2ecf20Sopenharmony_ci
10628c2ecf20Sopenharmony_ci	tx_queue_for_each(rt2x00dev, queue)
10638c2ecf20Sopenharmony_ci		rt2x00queue_flush_queue(queue, drop);
10648c2ecf20Sopenharmony_ci
10658c2ecf20Sopenharmony_ci	rt2x00queue_flush_queue(rt2x00dev->rx, drop);
10668c2ecf20Sopenharmony_ci}
10678c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
10688c2ecf20Sopenharmony_ci
10698c2ecf20Sopenharmony_cistatic void rt2x00queue_reset(struct data_queue *queue)
10708c2ecf20Sopenharmony_ci{
10718c2ecf20Sopenharmony_ci	unsigned long irqflags;
10728c2ecf20Sopenharmony_ci	unsigned int i;
10738c2ecf20Sopenharmony_ci
10748c2ecf20Sopenharmony_ci	spin_lock_irqsave(&queue->index_lock, irqflags);
10758c2ecf20Sopenharmony_ci
10768c2ecf20Sopenharmony_ci	queue->count = 0;
10778c2ecf20Sopenharmony_ci	queue->length = 0;
10788c2ecf20Sopenharmony_ci
10798c2ecf20Sopenharmony_ci	for (i = 0; i < Q_INDEX_MAX; i++)
10808c2ecf20Sopenharmony_ci		queue->index[i] = 0;
10818c2ecf20Sopenharmony_ci
10828c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&queue->index_lock, irqflags);
10838c2ecf20Sopenharmony_ci}
10848c2ecf20Sopenharmony_ci
10858c2ecf20Sopenharmony_civoid rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
10868c2ecf20Sopenharmony_ci{
10878c2ecf20Sopenharmony_ci	struct data_queue *queue;
10888c2ecf20Sopenharmony_ci	unsigned int i;
10898c2ecf20Sopenharmony_ci
10908c2ecf20Sopenharmony_ci	queue_for_each(rt2x00dev, queue) {
10918c2ecf20Sopenharmony_ci		rt2x00queue_reset(queue);
10928c2ecf20Sopenharmony_ci
10938c2ecf20Sopenharmony_ci		for (i = 0; i < queue->limit; i++)
10948c2ecf20Sopenharmony_ci			rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
10958c2ecf20Sopenharmony_ci	}
10968c2ecf20Sopenharmony_ci}
10978c2ecf20Sopenharmony_ci
10988c2ecf20Sopenharmony_cistatic int rt2x00queue_alloc_entries(struct data_queue *queue)
10998c2ecf20Sopenharmony_ci{
11008c2ecf20Sopenharmony_ci	struct queue_entry *entries;
11018c2ecf20Sopenharmony_ci	unsigned int entry_size;
11028c2ecf20Sopenharmony_ci	unsigned int i;
11038c2ecf20Sopenharmony_ci
11048c2ecf20Sopenharmony_ci	rt2x00queue_reset(queue);
11058c2ecf20Sopenharmony_ci
11068c2ecf20Sopenharmony_ci	/*
11078c2ecf20Sopenharmony_ci	 * Allocate all queue entries.
11088c2ecf20Sopenharmony_ci	 */
11098c2ecf20Sopenharmony_ci	entry_size = sizeof(*entries) + queue->priv_size;
11108c2ecf20Sopenharmony_ci	entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
11118c2ecf20Sopenharmony_ci	if (!entries)
11128c2ecf20Sopenharmony_ci		return -ENOMEM;
11138c2ecf20Sopenharmony_ci
11148c2ecf20Sopenharmony_ci#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
11158c2ecf20Sopenharmony_ci	(((char *)(__base)) + ((__limit) * (__esize)) + \
11168c2ecf20Sopenharmony_ci	    ((__index) * (__psize)))
11178c2ecf20Sopenharmony_ci
11188c2ecf20Sopenharmony_ci	for (i = 0; i < queue->limit; i++) {
11198c2ecf20Sopenharmony_ci		entries[i].flags = 0;
11208c2ecf20Sopenharmony_ci		entries[i].queue = queue;
11218c2ecf20Sopenharmony_ci		entries[i].skb = NULL;
11228c2ecf20Sopenharmony_ci		entries[i].entry_idx = i;
11238c2ecf20Sopenharmony_ci		entries[i].priv_data =
11248c2ecf20Sopenharmony_ci		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
11258c2ecf20Sopenharmony_ci					    sizeof(*entries), queue->priv_size);
11268c2ecf20Sopenharmony_ci	}
11278c2ecf20Sopenharmony_ci
11288c2ecf20Sopenharmony_ci#undef QUEUE_ENTRY_PRIV_OFFSET
11298c2ecf20Sopenharmony_ci
11308c2ecf20Sopenharmony_ci	queue->entries = entries;
11318c2ecf20Sopenharmony_ci
11328c2ecf20Sopenharmony_ci	return 0;
11338c2ecf20Sopenharmony_ci}
11348c2ecf20Sopenharmony_ci
11358c2ecf20Sopenharmony_cistatic void rt2x00queue_free_skbs(struct data_queue *queue)
11368c2ecf20Sopenharmony_ci{
11378c2ecf20Sopenharmony_ci	unsigned int i;
11388c2ecf20Sopenharmony_ci
11398c2ecf20Sopenharmony_ci	if (!queue->entries)
11408c2ecf20Sopenharmony_ci		return;
11418c2ecf20Sopenharmony_ci
11428c2ecf20Sopenharmony_ci	for (i = 0; i < queue->limit; i++) {
11438c2ecf20Sopenharmony_ci		rt2x00queue_free_skb(&queue->entries[i]);
11448c2ecf20Sopenharmony_ci	}
11458c2ecf20Sopenharmony_ci}
11468c2ecf20Sopenharmony_ci
11478c2ecf20Sopenharmony_cistatic int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
11488c2ecf20Sopenharmony_ci{
11498c2ecf20Sopenharmony_ci	unsigned int i;
11508c2ecf20Sopenharmony_ci	struct sk_buff *skb;
11518c2ecf20Sopenharmony_ci
11528c2ecf20Sopenharmony_ci	for (i = 0; i < queue->limit; i++) {
11538c2ecf20Sopenharmony_ci		skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
11548c2ecf20Sopenharmony_ci		if (!skb)
11558c2ecf20Sopenharmony_ci			return -ENOMEM;
11568c2ecf20Sopenharmony_ci		queue->entries[i].skb = skb;
11578c2ecf20Sopenharmony_ci	}
11588c2ecf20Sopenharmony_ci
11598c2ecf20Sopenharmony_ci	return 0;
11608c2ecf20Sopenharmony_ci}
11618c2ecf20Sopenharmony_ci
11628c2ecf20Sopenharmony_ciint rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
11638c2ecf20Sopenharmony_ci{
11648c2ecf20Sopenharmony_ci	struct data_queue *queue;
11658c2ecf20Sopenharmony_ci	int status;
11668c2ecf20Sopenharmony_ci
11678c2ecf20Sopenharmony_ci	status = rt2x00queue_alloc_entries(rt2x00dev->rx);
11688c2ecf20Sopenharmony_ci	if (status)
11698c2ecf20Sopenharmony_ci		goto exit;
11708c2ecf20Sopenharmony_ci
11718c2ecf20Sopenharmony_ci	tx_queue_for_each(rt2x00dev, queue) {
11728c2ecf20Sopenharmony_ci		status = rt2x00queue_alloc_entries(queue);
11738c2ecf20Sopenharmony_ci		if (status)
11748c2ecf20Sopenharmony_ci			goto exit;
11758c2ecf20Sopenharmony_ci	}
11768c2ecf20Sopenharmony_ci
11778c2ecf20Sopenharmony_ci	status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
11788c2ecf20Sopenharmony_ci	if (status)
11798c2ecf20Sopenharmony_ci		goto exit;
11808c2ecf20Sopenharmony_ci
11818c2ecf20Sopenharmony_ci	if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
11828c2ecf20Sopenharmony_ci		status = rt2x00queue_alloc_entries(rt2x00dev->atim);
11838c2ecf20Sopenharmony_ci		if (status)
11848c2ecf20Sopenharmony_ci			goto exit;
11858c2ecf20Sopenharmony_ci	}
11868c2ecf20Sopenharmony_ci
11878c2ecf20Sopenharmony_ci	status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
11888c2ecf20Sopenharmony_ci	if (status)
11898c2ecf20Sopenharmony_ci		goto exit;
11908c2ecf20Sopenharmony_ci
11918c2ecf20Sopenharmony_ci	return 0;
11928c2ecf20Sopenharmony_ci
11938c2ecf20Sopenharmony_ciexit:
11948c2ecf20Sopenharmony_ci	rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
11958c2ecf20Sopenharmony_ci
11968c2ecf20Sopenharmony_ci	rt2x00queue_uninitialize(rt2x00dev);
11978c2ecf20Sopenharmony_ci
11988c2ecf20Sopenharmony_ci	return status;
11998c2ecf20Sopenharmony_ci}
12008c2ecf20Sopenharmony_ci
12018c2ecf20Sopenharmony_civoid rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
12028c2ecf20Sopenharmony_ci{
12038c2ecf20Sopenharmony_ci	struct data_queue *queue;
12048c2ecf20Sopenharmony_ci
12058c2ecf20Sopenharmony_ci	rt2x00queue_free_skbs(rt2x00dev->rx);
12068c2ecf20Sopenharmony_ci
12078c2ecf20Sopenharmony_ci	queue_for_each(rt2x00dev, queue) {
12088c2ecf20Sopenharmony_ci		kfree(queue->entries);
12098c2ecf20Sopenharmony_ci		queue->entries = NULL;
12108c2ecf20Sopenharmony_ci	}
12118c2ecf20Sopenharmony_ci}
12128c2ecf20Sopenharmony_ci
12138c2ecf20Sopenharmony_cistatic void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
12148c2ecf20Sopenharmony_ci			     struct data_queue *queue, enum data_queue_qid qid)
12158c2ecf20Sopenharmony_ci{
12168c2ecf20Sopenharmony_ci	mutex_init(&queue->status_lock);
12178c2ecf20Sopenharmony_ci	spin_lock_init(&queue->tx_lock);
12188c2ecf20Sopenharmony_ci	spin_lock_init(&queue->index_lock);
12198c2ecf20Sopenharmony_ci
12208c2ecf20Sopenharmony_ci	queue->rt2x00dev = rt2x00dev;
12218c2ecf20Sopenharmony_ci	queue->qid = qid;
12228c2ecf20Sopenharmony_ci	queue->txop = 0;
12238c2ecf20Sopenharmony_ci	queue->aifs = 2;
12248c2ecf20Sopenharmony_ci	queue->cw_min = 5;
12258c2ecf20Sopenharmony_ci	queue->cw_max = 10;
12268c2ecf20Sopenharmony_ci
12278c2ecf20Sopenharmony_ci	rt2x00dev->ops->queue_init(queue);
12288c2ecf20Sopenharmony_ci
12298c2ecf20Sopenharmony_ci	queue->threshold = DIV_ROUND_UP(queue->limit, 10);
12308c2ecf20Sopenharmony_ci}
12318c2ecf20Sopenharmony_ci
12328c2ecf20Sopenharmony_ciint rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
12338c2ecf20Sopenharmony_ci{
12348c2ecf20Sopenharmony_ci	struct data_queue *queue;
12358c2ecf20Sopenharmony_ci	enum data_queue_qid qid;
12368c2ecf20Sopenharmony_ci	unsigned int req_atim =
12378c2ecf20Sopenharmony_ci	    rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
12388c2ecf20Sopenharmony_ci
12398c2ecf20Sopenharmony_ci	/*
12408c2ecf20Sopenharmony_ci	 * We need the following queues:
12418c2ecf20Sopenharmony_ci	 * RX: 1
12428c2ecf20Sopenharmony_ci	 * TX: ops->tx_queues
12438c2ecf20Sopenharmony_ci	 * Beacon: 1
12448c2ecf20Sopenharmony_ci	 * Atim: 1 (if required)
12458c2ecf20Sopenharmony_ci	 */
12468c2ecf20Sopenharmony_ci	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
12478c2ecf20Sopenharmony_ci
12488c2ecf20Sopenharmony_ci	queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
12498c2ecf20Sopenharmony_ci	if (!queue)
12508c2ecf20Sopenharmony_ci		return -ENOMEM;
12518c2ecf20Sopenharmony_ci
12528c2ecf20Sopenharmony_ci	/*
12538c2ecf20Sopenharmony_ci	 * Initialize pointers
12548c2ecf20Sopenharmony_ci	 */
12558c2ecf20Sopenharmony_ci	rt2x00dev->rx = queue;
12568c2ecf20Sopenharmony_ci	rt2x00dev->tx = &queue[1];
12578c2ecf20Sopenharmony_ci	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
12588c2ecf20Sopenharmony_ci	rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
12598c2ecf20Sopenharmony_ci
12608c2ecf20Sopenharmony_ci	/*
12618c2ecf20Sopenharmony_ci	 * Initialize queue parameters.
12628c2ecf20Sopenharmony_ci	 * RX: qid = QID_RX
12638c2ecf20Sopenharmony_ci	 * TX: qid = QID_AC_VO + index
12648c2ecf20Sopenharmony_ci	 * TX: cw_min: 2^5 = 32.
12658c2ecf20Sopenharmony_ci	 * TX: cw_max: 2^10 = 1024.
12668c2ecf20Sopenharmony_ci	 * BCN: qid = QID_BEACON
12678c2ecf20Sopenharmony_ci	 * ATIM: qid = QID_ATIM
12688c2ecf20Sopenharmony_ci	 */
12698c2ecf20Sopenharmony_ci	rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
12708c2ecf20Sopenharmony_ci
12718c2ecf20Sopenharmony_ci	qid = QID_AC_VO;
12728c2ecf20Sopenharmony_ci	tx_queue_for_each(rt2x00dev, queue)
12738c2ecf20Sopenharmony_ci		rt2x00queue_init(rt2x00dev, queue, qid++);
12748c2ecf20Sopenharmony_ci
12758c2ecf20Sopenharmony_ci	rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
12768c2ecf20Sopenharmony_ci	if (req_atim)
12778c2ecf20Sopenharmony_ci		rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
12788c2ecf20Sopenharmony_ci
12798c2ecf20Sopenharmony_ci	return 0;
12808c2ecf20Sopenharmony_ci}
12818c2ecf20Sopenharmony_ci
12828c2ecf20Sopenharmony_civoid rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
12838c2ecf20Sopenharmony_ci{
12848c2ecf20Sopenharmony_ci	kfree(rt2x00dev->rx);
12858c2ecf20Sopenharmony_ci	rt2x00dev->rx = NULL;
12868c2ecf20Sopenharmony_ci	rt2x00dev->tx = NULL;
12878c2ecf20Sopenharmony_ci	rt2x00dev->bcn = NULL;
12888c2ecf20Sopenharmony_ci}
1289