1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <net/mac80211.h>
17
18#include "rate.h"
19#include "scb.h"
20#include "phy/phy_hal.h"
21#include "antsel.h"
22#include "main.h"
23#include "ampdu.h"
24#include "debug.h"
25#include "brcms_trace_events.h"
26
27/* max number of mpdus in an ampdu */
28#define AMPDU_MAX_MPDU			32
29/* max number of mpdus in an ampdu to a legacy */
30#define AMPDU_NUM_MPDU_LEGACY		16
31/* max Tx ba window size (in pdu) */
32#define AMPDU_TX_BA_MAX_WSIZE		64
33/* default Tx ba window size (in pdu) */
34#define AMPDU_TX_BA_DEF_WSIZE		64
35/* default Rx ba window size (in pdu) */
36#define AMPDU_RX_BA_DEF_WSIZE		64
37/* max Rx ba window size (in pdu) */
38#define AMPDU_RX_BA_MAX_WSIZE		64
39/* max dur of tx ampdu (in msec) */
40#define	AMPDU_MAX_DUR			5
41/* default tx retry limit */
42#define AMPDU_DEF_RETRY_LIMIT		5
43/* default tx retry limit at reg rate */
44#define AMPDU_DEF_RR_RETRY_LIMIT	2
45/* default ffpld reserved bytes */
46#define AMPDU_DEF_FFPLD_RSVD		2048
47/* # of inis to be freed on detach */
48#define AMPDU_INI_FREE			10
49/* max # of mpdus released at a time */
50#define	AMPDU_SCB_MAX_RELEASE		20
51
52#define NUM_FFPLD_FIFO 4	/* number of fifo concerned by pre-loading */
53#define FFPLD_TX_MAX_UNFL   200	/* default value of the average number of ampdu
54				 * without underflows
55				 */
56#define FFPLD_MPDU_SIZE 1800	/* estimate of maximum mpdu size */
57#define FFPLD_MAX_MCS 23	/* we don't deal with mcs 32 */
58#define FFPLD_PLD_INCR 1000	/* increments in bytes */
59#define FFPLD_MAX_AMPDU_CNT 5000	/* maximum number of ampdu we
60					 * accumulate between resets.
61					 */
62
63#define AMPDU_DELIMITER_LEN	4
64
65/* max allowed number of mpdus in an ampdu (2 streams) */
66#define AMPDU_NUM_MPDU		16
67
68#define TX_SEQ_TO_INDEX(seq) ((seq) % AMPDU_TX_BA_MAX_WSIZE)
69
70/* max possible overhead per mpdu in the ampdu; 3 is for roundup if needed */
71#define AMPDU_MAX_MPDU_OVERHEAD (FCS_LEN + DOT11_ICV_AES_LEN +\
72	AMPDU_DELIMITER_LEN + 3\
73	+ DOT11_A4_HDR_LEN + DOT11_QOS_LEN + DOT11_IV_MAX_LEN)
74
75/* modulo add/sub, bound = 2^k */
76#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
77#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
78
79/* structure to hold tx fifo information and pre-loading state
80 * counters specific to tx underflows of ampdus
81 * some counters might be redundant with the ones in wlc or ampdu structures.
82 * This allows to maintain a specific state independently of
83 * how often and/or when the wlc counters are updated.
84 *
85 * ampdu_pld_size: number of bytes to be pre-loaded
86 * mcs2ampdu_table: per-mcs max # of mpdus in an ampdu
87 * prev_txfunfl: num of underflows last read from the HW macstats counter
88 * accum_txfunfl: num of underflows since we modified pld params
89 * accum_txampdu: num of tx ampdu since we modified pld params
90 * prev_txampdu: previous reading of tx ampdu
91 * dmaxferrate: estimated dma avg xfer rate in kbits/sec
92 */
93struct brcms_fifo_info {
94	u16 ampdu_pld_size;
95	u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1];
96	u16 prev_txfunfl;
97	u32 accum_txfunfl;
98	u32 accum_txampdu;
99	u32 prev_txampdu;
100	u32 dmaxferrate;
101};
102
103/* AMPDU module specific state
104 *
105 * wlc: pointer to main wlc structure
106 * scb_handle: scb cubby handle to retrieve data from scb
107 * ini_enable: per-tid initiator enable/disable of ampdu
108 * ba_tx_wsize: Tx ba window size (in pdu)
109 * ba_rx_wsize: Rx ba window size (in pdu)
110 * retry_limit: mpdu transmit retry limit
111 * rr_retry_limit: mpdu transmit retry limit at regular rate
112 * retry_limit_tid: per-tid mpdu transmit retry limit
113 * rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate
114 * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
115 * max_pdu: max pdus allowed in ampdu
116 * dur: max duration of an ampdu (in msec)
117 * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
118 * ffpld_rsvd: number of bytes to reserve for preload
119 * max_txlen: max size of ampdu per mcs, bw and sgi
120 * mfbr: enable multiple fallback rate
121 * tx_max_funl: underflows should be kept such that
122 *		(tx_max_funfl*underflows) < tx frames
123 * fifo_tb: table of fifo infos
124 */
125struct ampdu_info {
126	struct brcms_c_info *wlc;
127	int scb_handle;
128	u8 ini_enable[AMPDU_MAX_SCB_TID];
129	u8 ba_tx_wsize;
130	u8 ba_rx_wsize;
131	u8 retry_limit;
132	u8 rr_retry_limit;
133	u8 retry_limit_tid[AMPDU_MAX_SCB_TID];
134	u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
135	u8 mpdu_density;
136	s8 max_pdu;
137	u8 dur;
138	u8 rx_factor;
139	u32 ffpld_rsvd;
140	u32 max_txlen[MCS_TABLE_SIZE][2][2];
141	bool mfbr;
142	u32 tx_max_funl;
143	struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
144};
145
146/* used for flushing ampdu packets */
147struct cb_del_ampdu_pars {
148	struct ieee80211_sta *sta;
149	u16 tid;
150};
151
152static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
153{
154	u32 rate, mcs;
155
156	for (mcs = 0; mcs < MCS_TABLE_SIZE; mcs++) {
157		/* rate is in Kbps; dur is in msec ==> len = (rate * dur) / 8 */
158		/* 20MHz, No SGI */
159		rate = mcs_2_rate(mcs, false, false);
160		ampdu->max_txlen[mcs][0][0] = (rate * dur) >> 3;
161		/* 40 MHz, No SGI */
162		rate = mcs_2_rate(mcs, true, false);
163		ampdu->max_txlen[mcs][1][0] = (rate * dur) >> 3;
164		/* 20MHz, SGI */
165		rate = mcs_2_rate(mcs, false, true);
166		ampdu->max_txlen[mcs][0][1] = (rate * dur) >> 3;
167		/* 40 MHz, SGI */
168		rate = mcs_2_rate(mcs, true, true);
169		ampdu->max_txlen[mcs][1][1] = (rate * dur) >> 3;
170	}
171}
172
173static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
174{
175	if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
176		return true;
177	else
178		return false;
179}
180
181static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
182{
183	struct brcms_c_info *wlc = ampdu->wlc;
184	struct bcma_device *core = wlc->hw->d11core;
185
186	wlc->pub->_ampdu = false;
187
188	if (on) {
189		if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
190			brcms_err(core, "wl%d: driver not nmode enabled\n",
191				  wlc->pub->unit);
192			return -ENOTSUPP;
193		}
194		if (!brcms_c_ampdu_cap(ampdu)) {
195			brcms_err(core, "wl%d: device not ampdu capable\n",
196				  wlc->pub->unit);
197			return -ENOTSUPP;
198		}
199		wlc->pub->_ampdu = on;
200	}
201
202	return 0;
203}
204
205static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
206{
207	int i, j;
208	struct brcms_fifo_info *fifo;
209
210	for (j = 0; j < NUM_FFPLD_FIFO; j++) {
211		fifo = (ampdu->fifo_tb + j);
212		fifo->ampdu_pld_size = 0;
213		for (i = 0; i <= FFPLD_MAX_MCS; i++)
214			fifo->mcs2ampdu_table[i] = 255;
215		fifo->dmaxferrate = 0;
216		fifo->accum_txampdu = 0;
217		fifo->prev_txfunfl = 0;
218		fifo->accum_txfunfl = 0;
219
220	}
221}
222
223struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
224{
225	struct ampdu_info *ampdu;
226	int i;
227
228	ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
229	if (!ampdu)
230		return NULL;
231
232	ampdu->wlc = wlc;
233
234	for (i = 0; i < AMPDU_MAX_SCB_TID; i++)
235		ampdu->ini_enable[i] = true;
236	/* Disable ampdu for VO by default */
237	ampdu->ini_enable[PRIO_8021D_VO] = false;
238	ampdu->ini_enable[PRIO_8021D_NC] = false;
239
240	/* Disable ampdu for BK by default since not enough fifo space */
241	ampdu->ini_enable[PRIO_8021D_NONE] = false;
242	ampdu->ini_enable[PRIO_8021D_BK] = false;
243
244	ampdu->ba_tx_wsize = AMPDU_TX_BA_DEF_WSIZE;
245	ampdu->ba_rx_wsize = AMPDU_RX_BA_DEF_WSIZE;
246	ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
247	ampdu->max_pdu = AUTO;
248	ampdu->dur = AMPDU_MAX_DUR;
249
250	ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
251	/*
252	 * bump max ampdu rcv size to 64k for all 11n
253	 * devices except 4321A0 and 4321A1
254	 */
255	if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
256		ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
257	else
258		ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K;
259	ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
260	ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
261
262	for (i = 0; i < AMPDU_MAX_SCB_TID; i++) {
263		ampdu->retry_limit_tid[i] = ampdu->retry_limit;
264		ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
265	}
266
267	brcms_c_scb_ampdu_update_max_txlen(ampdu, ampdu->dur);
268	ampdu->mfbr = false;
269	/* try to set ampdu to the default value */
270	brcms_c_ampdu_set(ampdu, wlc->pub->_ampdu);
271
272	ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
273	brcms_c_ffpld_init(ampdu);
274
275	return ampdu;
276}
277
278void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
279{
280	kfree(ampdu);
281}
282
283static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
284					    struct scb *scb)
285{
286	struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
287	int i;
288
289	scb_ampdu->max_pdu = AMPDU_NUM_MPDU;
290
291	/* go back to legacy size if some preloading is occurring */
292	for (i = 0; i < NUM_FFPLD_FIFO; i++) {
293		if (ampdu->fifo_tb[i].ampdu_pld_size > FFPLD_PLD_INCR)
294			scb_ampdu->max_pdu = AMPDU_NUM_MPDU_LEGACY;
295	}
296
297	/* apply user override */
298	if (ampdu->max_pdu != AUTO)
299		scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
300
301	scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu,
302				   AMPDU_SCB_MAX_RELEASE);
303
304	if (scb_ampdu->max_rx_ampdu_bytes)
305		scb_ampdu->release = min_t(u8, scb_ampdu->release,
306			scb_ampdu->max_rx_ampdu_bytes / 1600);
307
308	scb_ampdu->release = min(scb_ampdu->release,
309				 ampdu->fifo_tb[TX_AC_BE_FIFO].
310				 mcs2ampdu_table[FFPLD_MAX_MCS]);
311}
312
313static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu)
314{
315	brcms_c_scb_ampdu_update_config(ampdu, &ampdu->wlc->pri_scb);
316}
317
318static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
319{
320	int i;
321	u32 phy_rate, dma_rate, tmp;
322	u8 max_mpdu;
323	struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
324
325	/* recompute the dma rate */
326	/* note : we divide/multiply by 100 to avoid integer overflows */
327	max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
328			 AMPDU_NUM_MPDU_LEGACY);
329	phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
330	dma_rate =
331	    (((phy_rate / 100) *
332	      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
333	     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
334	fifo->dmaxferrate = dma_rate;
335
336	/* fill up the mcs2ampdu table; do not recalc the last mcs */
337	dma_rate = dma_rate >> 7;
338	for (i = 0; i < FFPLD_MAX_MCS; i++) {
339		/* shifting to keep it within integer range */
340		phy_rate = mcs_2_rate(i, true, false) >> 7;
341		if (phy_rate > dma_rate) {
342			tmp = ((fifo->ampdu_pld_size * phy_rate) /
343			       ((phy_rate - dma_rate) * FFPLD_MPDU_SIZE)) + 1;
344			tmp = min_t(u32, tmp, 255);
345			fifo->mcs2ampdu_table[i] = (u8) tmp;
346		}
347	}
348}
349
350/* evaluate the dma transfer rate using the tx underflows as feedback.
351 * If necessary, increase tx fifo preloading. If not enough,
352 * decrease maximum ampdu size for each mcs till underflows stop
353 * Return 1 if pre-loading not active, -1 if not an underflow event,
354 * 0 if pre-loading module took care of the event.
355 */
356static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
357{
358	struct ampdu_info *ampdu = wlc->ampdu;
359	u32 phy_rate = mcs_2_rate(FFPLD_MAX_MCS, true, false);
360	u32 txunfl_ratio;
361	u8 max_mpdu;
362	u32 current_ampdu_cnt = 0;
363	u16 max_pld_size;
364	u32 new_txunfl;
365	struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
366	uint xmtfifo_sz;
367	u16 cur_txunfl;
368
369	/* return if we got here for a different reason than underflows */
370	cur_txunfl = brcms_b_read_shm(wlc->hw,
371				      M_UCODE_MACSTAT +
372				      offsetof(struct macstat, txfunfl[fid]));
373	new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
374	if (new_txunfl == 0) {
375		brcms_dbg_ht(wlc->hw->d11core,
376			     "TX status FRAG set but no tx underflows\n");
377		return -1;
378	}
379	fifo->prev_txfunfl = cur_txunfl;
380
381	if (!ampdu->tx_max_funl)
382		return 1;
383
384	/* check if fifo is big enough */
385	if (brcms_b_xmtfifo_sz_get(wlc->hw, fid, &xmtfifo_sz))
386		return -1;
387
388	if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
389		return 1;
390
391	max_pld_size = TXFIFO_SIZE_UNIT * xmtfifo_sz - ampdu->ffpld_rsvd;
392	fifo->accum_txfunfl += new_txunfl;
393
394	/* we need to wait for at least 10 underflows */
395	if (fifo->accum_txfunfl < 10)
396		return 0;
397
398	brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d  tx_underflows %d\n",
399		     current_ampdu_cnt, fifo->accum_txfunfl);
400
401	/*
402	   compute the current ratio of tx unfl per ampdu.
403	   When the current ampdu count becomes too
404	   big while the ratio remains small, we reset
405	   the current count in order to not
406	   introduce too big of a latency in detecting a
407	   large amount of tx underflows later.
408	 */
409
410	txunfl_ratio = current_ampdu_cnt / fifo->accum_txfunfl;
411
412	if (txunfl_ratio > ampdu->tx_max_funl) {
413		if (current_ampdu_cnt >= FFPLD_MAX_AMPDU_CNT)
414			fifo->accum_txfunfl = 0;
415
416		return 0;
417	}
418	max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
419			 AMPDU_NUM_MPDU_LEGACY);
420
421	/* In case max value max_pdu is already lower than
422	   the fifo depth, there is nothing more we can do.
423	 */
424
425	if (fifo->ampdu_pld_size >= max_mpdu * FFPLD_MPDU_SIZE) {
426		fifo->accum_txfunfl = 0;
427		return 0;
428	}
429
430	if (fifo->ampdu_pld_size < max_pld_size) {
431
432		/* increment by TX_FIFO_PLD_INC bytes */
433		fifo->ampdu_pld_size += FFPLD_PLD_INCR;
434		if (fifo->ampdu_pld_size > max_pld_size)
435			fifo->ampdu_pld_size = max_pld_size;
436
437		/* update scb release size */
438		brcms_c_scb_ampdu_update_config_all(ampdu);
439
440		/*
441		 * compute a new dma xfer rate for max_mpdu @ max mcs.
442		 * This is the minimum dma rate that can achieve no
443		 * underflow condition for the current mpdu size.
444		 *
445		 * note : we divide/multiply by 100 to avoid integer overflows
446		 */
447		fifo->dmaxferrate =
448		    (((phy_rate / 100) *
449		      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
450		     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
451
452		brcms_dbg_ht(wlc->hw->d11core,
453			     "DMA estimated transfer rate %d; "
454			     "pre-load size %d\n",
455			     fifo->dmaxferrate, fifo->ampdu_pld_size);
456	} else {
457
458		/* decrease ampdu size */
459		if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] > 1) {
460			if (fifo->mcs2ampdu_table[FFPLD_MAX_MCS] == 255)
461				fifo->mcs2ampdu_table[FFPLD_MAX_MCS] =
462				    AMPDU_NUM_MPDU_LEGACY - 1;
463			else
464				fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
465
466			/* recompute the table */
467			brcms_c_ffpld_calc_mcs2ampdu_table(ampdu, fid);
468
469			/* update scb release size */
470			brcms_c_scb_ampdu_update_config_all(ampdu);
471		}
472	}
473	fifo->accum_txfunfl = 0;
474	return 0;
475}
476
477void
478brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
479	u8 ba_wsize,		/* negotiated ba window size (in pdu) */
480	uint max_rx_ampdu_bytes) /* from ht_cap in beacon */
481{
482	struct scb_ampdu *scb_ampdu;
483	struct scb_ampdu_tid_ini *ini;
484	struct ampdu_info *ampdu = wlc->ampdu;
485	struct scb *scb = &wlc->pri_scb;
486	scb_ampdu = &scb->scb_ampdu;
487
488	if (!ampdu->ini_enable[tid]) {
489		brcms_err(wlc->hw->d11core, "%s: Rejecting tid %d\n",
490			  __func__, tid);
491		return;
492	}
493
494	ini = &scb_ampdu->ini[tid];
495	ini->tid = tid;
496	ini->scb = scb_ampdu->scb;
497	ini->ba_wsize = ba_wsize;
498	scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
499}
500
501void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
502				 struct brcms_c_info *wlc)
503{
504	session->wlc = wlc;
505	skb_queue_head_init(&session->skb_list);
506	session->max_ampdu_len = 0;    /* determined from first MPDU */
507	session->max_ampdu_frames = 0; /* determined from first MPDU */
508	session->ampdu_len = 0;
509	session->dma_len = 0;
510}
511
512/*
513 * Preps the given packet for AMPDU based on the session data. If the
514 * frame cannot be accomodated in the current session, -ENOSPC is
515 * returned.
516 */
517int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
518			    struct sk_buff *p)
519{
520	struct brcms_c_info *wlc = session->wlc;
521	struct ampdu_info *ampdu = wlc->ampdu;
522	struct scb *scb = &wlc->pri_scb;
523	struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
524	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
525	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
526	struct d11txh *txh = (struct d11txh *)p->data;
527	unsigned ampdu_frames;
528	u8 ndelim, tid;
529	u8 *plcp;
530	uint len;
531	u16 mcl;
532	bool fbr_iscck;
533	bool rr;
534
535	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
536	plcp = (u8 *)(txh + 1);
537	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
538	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
539			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
540	len = roundup(len, 4) + (ndelim + 1) * AMPDU_DELIMITER_LEN;
541
542	ampdu_frames = skb_queue_len(&session->skb_list);
543	if (ampdu_frames != 0) {
544		struct sk_buff *first;
545
546		if (ampdu_frames + 1 > session->max_ampdu_frames ||
547		    session->ampdu_len + len > session->max_ampdu_len)
548			return -ENOSPC;
549
550		/*
551		 * We aren't really out of space if the new frame is of
552		 * a different priority, but we want the same behaviour
553		 * so return -ENOSPC anyway.
554		 *
555		 * XXX: The old AMPDU code did this, but is it really
556		 * necessary?
557		 */
558		first = skb_peek(&session->skb_list);
559		if (p->priority != first->priority)
560			return -ENOSPC;
561	}
562
563	/*
564	 * Now that we're sure this frame can be accomodated, update the
565	 * session information.
566	 */
567	session->ampdu_len += len;
568	session->dma_len += p->len;
569
570	tid = (u8)p->priority;
571
572	/* Handle retry limits */
573	if (txrate[0].count <= ampdu->rr_retry_limit_tid[tid]) {
574		txrate[0].count++;
575		rr = true;
576	} else {
577		txrate[1].count++;
578		rr = false;
579	}
580
581	if (ampdu_frames == 0) {
582		u8 plcp0, plcp3, is40, sgi, mcs;
583		uint fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
584		struct brcms_fifo_info *f = &ampdu->fifo_tb[fifo];
585
586		if (rr) {
587			plcp0 = plcp[0];
588			plcp3 = plcp[3];
589		} else {
590			plcp0 = txh->FragPLCPFallback[0];
591			plcp3 = txh->FragPLCPFallback[3];
592
593		}
594
595		/* Limit AMPDU size based on MCS */
596		is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
597		sgi = plcp3_issgi(plcp3) ? 1 : 0;
598		mcs = plcp0 & ~MIMO_PLCP_40MHZ;
599		session->max_ampdu_len = min(scb_ampdu->max_rx_ampdu_bytes,
600					     ampdu->max_txlen[mcs][is40][sgi]);
601
602		session->max_ampdu_frames = scb_ampdu->max_pdu;
603		if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
604			session->max_ampdu_frames =
605				min_t(u16, f->mcs2ampdu_table[mcs],
606				      session->max_ampdu_frames);
607		}
608	}
609
610	/*
611	 * Treat all frames as "middle" frames of AMPDU here. First and
612	 * last frames must be fixed up after all MPDUs have been prepped.
613	 */
614	mcl = le16_to_cpu(txh->MacTxControlLow);
615	mcl &= ~TXC_AMPDU_MASK;
616	mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
617	mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
618	txh->MacTxControlLow = cpu_to_le16(mcl);
619	txh->PreloadSize = 0;	/* always default to 0 */
620
621	skb_queue_tail(&session->skb_list, p);
622
623	return 0;
624}
625
626void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
627{
628	struct brcms_c_info *wlc = session->wlc;
629	struct ampdu_info *ampdu = wlc->ampdu;
630	struct sk_buff *first, *last;
631	struct d11txh *txh;
632	struct ieee80211_tx_info *tx_info;
633	struct ieee80211_tx_rate *txrate;
634	u8 ndelim;
635	u8 *plcp;
636	uint len;
637	uint fifo;
638	struct brcms_fifo_info *f;
639	u16 mcl;
640	bool fbr;
641	bool fbr_iscck;
642	struct ieee80211_rts *rts;
643	bool use_rts = false, use_cts = false;
644	u16 dma_len = session->dma_len;
645	u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
646	u32 rspec = 0, rspec_fallback = 0;
647	u32 rts_rspec = 0, rts_rspec_fallback = 0;
648	u8 plcp0, is40, mcs;
649	u16 mch;
650	u8 preamble_type = BRCMS_GF_PREAMBLE;
651	u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
652	u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
653	u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
654
655	if (skb_queue_empty(&session->skb_list))
656		return;
657
658	first = skb_peek(&session->skb_list);
659	last = skb_peek_tail(&session->skb_list);
660
661	/* Need to fix up last MPDU first to adjust AMPDU length */
662	txh = (struct d11txh *)last->data;
663	fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
664	f = &ampdu->fifo_tb[fifo];
665
666	mcl = le16_to_cpu(txh->MacTxControlLow);
667	mcl &= ~TXC_AMPDU_MASK;
668	mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
669	txh->MacTxControlLow = cpu_to_le16(mcl);
670
671	/* remove the null delimiter after last mpdu */
672	ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
673	txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
674	session->ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
675
676	/* remove the pad len from last mpdu */
677	fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
678	len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
679			  BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
680	session->ampdu_len -= roundup(len, 4) - len;
681
682	/* Now fix up the first MPDU */
683	tx_info = IEEE80211_SKB_CB(first);
684	txrate = tx_info->status.rates;
685	txh = (struct d11txh *)first->data;
686	plcp = (u8 *)(txh + 1);
687	rts = (struct ieee80211_rts *)&txh->rts_frame;
688
689	mcl = le16_to_cpu(txh->MacTxControlLow);
690	/* If only one MPDU leave it marked as last */
691	if (first != last) {
692		mcl &= ~TXC_AMPDU_MASK;
693		mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
694	}
695	mcl |= TXC_STARTMSDU;
696	if (ieee80211_is_rts(rts->frame_control)) {
697		mcl |= TXC_SENDRTS;
698		use_rts = true;
699	}
700	if (ieee80211_is_cts(rts->frame_control)) {
701		mcl |= TXC_SENDCTS;
702		use_cts = true;
703	}
704	txh->MacTxControlLow = cpu_to_le16(mcl);
705
706	fbr = txrate[1].count > 0;
707	if (!fbr)
708		plcp0 = plcp[0];
709	else
710		plcp0 = txh->FragPLCPFallback[0];
711
712	is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
713	mcs = plcp0 & ~MIMO_PLCP_40MHZ;
714
715	if (is40) {
716		if (CHSPEC_SB_UPPER(wlc_phy_chanspec_get(wlc->band->pi)))
717			mimo_ctlchbw = PHY_TXC1_BW_20MHZ_UP;
718		else
719			mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
720	}
721
722	/* rebuild the rspec and rspec_fallback */
723	rspec = RSPEC_MIMORATE;
724	rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
725	if (plcp[0] & MIMO_PLCP_40MHZ)
726		rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
727
728	fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
729	if (fbr_iscck) {
730		rspec_fallback =
731			cck_rspec(cck_phy2mac_rate(txh->FragPLCPFallback[0]));
732	} else {
733		rspec_fallback = RSPEC_MIMORATE;
734		rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
735		if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
736			rspec_fallback |= PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT;
737	}
738
739	if (use_rts || use_cts) {
740		rts_rspec =
741			brcms_c_rspec_to_rts_rspec(wlc, rspec,
742						   false, mimo_ctlchbw);
743		rts_rspec_fallback =
744			brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback,
745						   false, mimo_ctlchbw);
746	}
747
748	BRCMS_SET_MIMO_PLCP_LEN(plcp, session->ampdu_len);
749	/* mark plcp to indicate ampdu */
750	BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
751
752	/* reset the mixed mode header durations */
753	if (txh->MModeLen) {
754		u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec,
755						     session->ampdu_len);
756		txh->MModeLen = cpu_to_le16(mmodelen);
757		preamble_type = BRCMS_MM_PREAMBLE;
758	}
759	if (txh->MModeFbrLen) {
760		u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback,
761						     session->ampdu_len);
762		txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
763		fbr_preamble_type = BRCMS_MM_PREAMBLE;
764	}
765
766	/* set the preload length */
767	if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
768		dma_len = min(dma_len, f->ampdu_pld_size);
769		txh->PreloadSize = cpu_to_le16(dma_len);
770	} else {
771		txh->PreloadSize = 0;
772	}
773
774	mch = le16_to_cpu(txh->MacTxControlHigh);
775
776	/* update RTS dur fields */
777	if (use_rts || use_cts) {
778		u16 durid;
779		if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
780		    TXC_PREAMBLE_RTS_MAIN_SHORT)
781			rts_preamble_type = BRCMS_SHORT_PREAMBLE;
782
783		if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
784		     TXC_PREAMBLE_RTS_FB_SHORT)
785			rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
786
787		durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
788						   rspec, rts_preamble_type,
789						   preamble_type,
790						   session->ampdu_len, true);
791		rts->duration = cpu_to_le16(durid);
792		durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
793						   rts_rspec_fallback,
794						   rspec_fallback,
795						   rts_fbr_preamble_type,
796						   fbr_preamble_type,
797						   session->ampdu_len, true);
798		txh->RTSDurFallback = cpu_to_le16(durid);
799		/* set TxFesTimeNormal */
800		txh->TxFesTimeNormal = rts->duration;
801		/* set fallback rate version of TxFesTimeNormal */
802		txh->TxFesTimeFallback = txh->RTSDurFallback;
803	}
804
805	/* set flag and plcp for fallback rate */
806	if (fbr) {
807		mch |= TXC_AMPDU_FBR;
808		txh->MacTxControlHigh = cpu_to_le16(mch);
809		BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
810		BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
811	}
812
813	brcms_dbg_ht(wlc->hw->d11core, "wl%d: count %d ampdu_len %d\n",
814		     wlc->pub->unit, skb_queue_len(&session->skb_list),
815		     session->ampdu_len);
816}
817
818static void
819brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
820			  struct ieee80211_tx_info *tx_info,
821			  struct tx_status *txs, u8 mcs)
822{
823	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
824	int i;
825
826	/* clear the rest of the rates */
827	for (i = 2; i < IEEE80211_TX_MAX_RATES; i++) {
828		txrate[i].idx = -1;
829		txrate[i].count = 0;
830	}
831}
832
833static void
834brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
835			      struct sk_buff *p, struct tx_status *txs,
836			      u32 s1, u32 s2)
837{
838	struct scb_ampdu *scb_ampdu;
839	struct brcms_c_info *wlc = ampdu->wlc;
840	struct scb_ampdu_tid_ini *ini;
841	u8 bitmap[8], queue, tid;
842	struct d11txh *txh;
843	u8 *plcp;
844	struct ieee80211_hdr *h;
845	u16 seq, start_seq = 0, bindex, index, mcl;
846	u8 mcs = 0;
847	bool ba_recd = false, ack_recd = false;
848	u8 suc_mpdu = 0, tot_mpdu = 0;
849	uint supr_status;
850	bool retry = true;
851	u16 mimoantsel = 0;
852	u8 retry_limit;
853	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
854
855#ifdef DEBUG
856	u8 hole[AMPDU_MAX_MPDU];
857	memset(hole, 0, sizeof(hole));
858#endif
859
860	scb_ampdu = &scb->scb_ampdu;
861	tid = (u8) (p->priority);
862
863	ini = &scb_ampdu->ini[tid];
864	retry_limit = ampdu->retry_limit_tid[tid];
865	memset(bitmap, 0, sizeof(bitmap));
866	queue = txs->frameid & TXFID_QUEUE_MASK;
867	supr_status = txs->status & TX_STATUS_SUPR_MASK;
868
869	if (txs->status & TX_STATUS_ACK_RCV) {
870		WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
871		start_seq = txs->sequence >> SEQNUM_SHIFT;
872		bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
873		    TX_STATUS_BA_BMAP03_SHIFT;
874
875		WARN_ON(s1 & TX_STATUS_INTERMEDIATE);
876		WARN_ON(!(s1 & TX_STATUS_AMPDU));
877
878		bitmap[0] |=
879		    (s1 & TX_STATUS_BA_BMAP47_MASK) <<
880		    TX_STATUS_BA_BMAP47_SHIFT;
881		bitmap[1] = (s1 >> 8) & 0xff;
882		bitmap[2] = (s1 >> 16) & 0xff;
883		bitmap[3] = (s1 >> 24) & 0xff;
884
885		bitmap[4] = s2 & 0xff;
886		bitmap[5] = (s2 >> 8) & 0xff;
887		bitmap[6] = (s2 >> 16) & 0xff;
888		bitmap[7] = (s2 >> 24) & 0xff;
889
890		ba_recd = true;
891	} else {
892		if (supr_status) {
893			if (supr_status == TX_STATUS_SUPR_BADCH) {
894				brcms_dbg_ht(wlc->hw->d11core,
895					  "%s: Pkt tx suppressed, illegal channel possibly %d\n",
896					  __func__, CHSPEC_CHANNEL(
897					  wlc->default_bss->chanspec));
898			} else {
899				if (supr_status != TX_STATUS_SUPR_FRAG)
900					brcms_err(wlc->hw->d11core,
901						  "%s: supr_status 0x%x\n",
902						  __func__, supr_status);
903			}
904			/* no need to retry for badch; will fail again */
905			if (supr_status == TX_STATUS_SUPR_BADCH ||
906			    supr_status == TX_STATUS_SUPR_EXPTIME) {
907				retry = false;
908			} else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
909				/* TX underflow:
910				 *   try tuning pre-loading or ampdu size
911				 */
912			} else if (supr_status == TX_STATUS_SUPR_FRAG) {
913				/*
914				 * if there were underflows, but pre-loading
915				 * is not active, notify rate adaptation.
916				 */
917				brcms_c_ffpld_check_txfunfl(wlc, queue);
918			}
919		} else if (txs->phyerr) {
920			brcms_dbg_ht(wlc->hw->d11core,
921				     "%s: ampdu tx phy error (0x%x)\n",
922				     __func__, txs->phyerr);
923		}
924	}
925
926	/* loop through all pkts and retry if not acked */
927	while (p) {
928		tx_info = IEEE80211_SKB_CB(p);
929		txh = (struct d11txh *) p->data;
930		mcl = le16_to_cpu(txh->MacTxControlLow);
931		plcp = (u8 *) (txh + 1);
932		h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
933		seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
934
935		trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
936
937		if (tot_mpdu == 0) {
938			mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
939			mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
940		}
941
942		index = TX_SEQ_TO_INDEX(seq);
943		ack_recd = false;
944		if (ba_recd) {
945			bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
946			brcms_dbg_ht(wlc->hw->d11core,
947				     "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
948				     tid, seq, start_seq, bindex,
949				     isset(bitmap, bindex), index);
950			/* if acked then clear bit and free packet */
951			if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
952			    && isset(bitmap, bindex)) {
953				ini->txretry[index] = 0;
954
955				/*
956				 * ampdu_ack_len:
957				 *   number of acked aggregated frames
958				 */
959				/* ampdu_len: number of aggregated frames */
960				brcms_c_ampdu_rate_status(wlc, tx_info, txs,
961							  mcs);
962				tx_info->flags |= IEEE80211_TX_STAT_ACK;
963				tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
964				tx_info->status.ampdu_ack_len =
965					tx_info->status.ampdu_len = 1;
966
967				skb_pull(p, D11_PHY_HDR_LEN);
968				skb_pull(p, D11_TXH_LEN);
969
970				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
971							    p);
972				ack_recd = true;
973				suc_mpdu++;
974			}
975		}
976		/* either retransmit or send bar if ack not recd */
977		if (!ack_recd) {
978			if (retry && (ini->txretry[index] < (int)retry_limit)) {
979				int ret;
980				ini->txretry[index]++;
981				ret = brcms_c_txfifo(wlc, queue, p);
982				/*
983				 * We shouldn't be out of space in the DMA
984				 * ring here since we're reinserting a frame
985				 * that was just pulled out.
986				 */
987				WARN_ONCE(ret, "queue %d out of txds\n", queue);
988			} else {
989				/* Retry timeout */
990				ieee80211_tx_info_clear_status(tx_info);
991				tx_info->status.ampdu_ack_len = 0;
992				tx_info->status.ampdu_len = 1;
993				tx_info->flags |=
994				    IEEE80211_TX_STAT_AMPDU_NO_BACK;
995				skb_pull(p, D11_PHY_HDR_LEN);
996				skb_pull(p, D11_TXH_LEN);
997				brcms_dbg_ht(wlc->hw->d11core,
998					     "BA Timeout, seq %d\n",
999					     seq);
1000				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
1001							    p);
1002			}
1003		}
1004		tot_mpdu++;
1005
1006		/* break out if last packet of ampdu */
1007		if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1008		    TXC_AMPDU_LAST)
1009			break;
1010
1011		p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
1012	}
1013
1014	/* update rate state */
1015	brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
1016}
1017
1018void
1019brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
1020		     struct sk_buff *p, struct tx_status *txs)
1021{
1022	struct brcms_c_info *wlc = ampdu->wlc;
1023	u32 s1 = 0, s2 = 0;
1024
1025	/* BMAC_NOTE: For the split driver, second level txstatus comes later
1026	 * So if the ACK was received then wait for the second level else just
1027	 * call the first one
1028	 */
1029	if (txs->status & TX_STATUS_ACK_RCV) {
1030		u8 status_delay = 0;
1031
1032		/* wait till the next 8 bytes of txstatus is available */
1033		s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
1034		while ((s1 & TXS_V) == 0) {
1035			udelay(1);
1036			status_delay++;
1037			if (status_delay > 10)
1038				return; /* error condition */
1039			s1 = bcma_read32(wlc->hw->d11core,
1040					 D11REGOFFS(frmtxstatus));
1041		}
1042
1043		s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
1044	}
1045
1046	if (scb) {
1047		brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
1048	} else {
1049		/* loop through all pkts and free */
1050		u8 queue = txs->frameid & TXFID_QUEUE_MASK;
1051		struct d11txh *txh;
1052		u16 mcl;
1053		while (p) {
1054			txh = (struct d11txh *) p->data;
1055			trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
1056					   sizeof(*txh));
1057			mcl = le16_to_cpu(txh->MacTxControlLow);
1058			brcmu_pkt_buf_free_skb(p);
1059			/* break out if last packet of ampdu */
1060			if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
1061			    TXC_AMPDU_LAST)
1062				break;
1063			p = dma_getnexttxp(wlc->hw->di[queue],
1064					   DMA_RANGE_TRANSMITTED);
1065		}
1066	}
1067}
1068
1069void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc)
1070{
1071	char template[T_RAM_ACCESS_SZ * 2];
1072
1073	/* driver needs to write the ta in the template; ta is at offset 16 */
1074	memset(template, 0, sizeof(template));
1075	memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
1076	brcms_b_write_template_ram(wlc->hw, (T_BA_TPL_BASE + 16),
1077				  (T_RAM_ACCESS_SZ * 2),
1078				  template);
1079}
1080
1081bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid)
1082{
1083	return wlc->ampdu->ini_enable[tid];
1084}
1085
1086void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
1087{
1088	struct brcms_c_info *wlc = ampdu->wlc;
1089
1090	/*
1091	 * Extend ucode internal watchdog timer to
1092	 * match larger received frames
1093	 */
1094	if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
1095	    IEEE80211_HT_MAX_AMPDU_64K) {
1096		brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
1097		brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
1098	} else {
1099		brcms_b_write_shm(wlc->hw, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
1100		brcms_b_write_shm(wlc->hw, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
1101	}
1102}
1103
1104/*
1105 * callback function that helps invalidating ampdu packets in a DMA queue
1106 */
1107static void dma_cb_fn_ampdu(void *txi, void *arg_a)
1108{
1109	struct ieee80211_sta *sta = arg_a;
1110	struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
1111
1112	if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
1113	    (tx_info->rate_driver_data[0] == sta || sta == NULL))
1114		tx_info->rate_driver_data[0] = NULL;
1115}
1116
1117/*
1118 * When a remote party is no longer available for ampdu communication, any
1119 * pending tx ampdu packets in the driver have to be flushed.
1120 */
1121void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
1122		     struct ieee80211_sta *sta, u16 tid)
1123{
1124	brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
1125}
1126