1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license.  When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 *  Intel Linux Wireless <linuxwifi@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 * BSD LICENSE
29 *
30 * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 *  * Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 *  * Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in
43 *    the documentation and/or other materials provided with the
44 *    distribution.
45 *  * Neither the name Intel Corporation nor the names of its
46 *    contributors may be used to endorse or promote products derived
47 *    from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *
61 *****************************************************************************/
62#include <net/mac80211.h>
63
64#include "mvm.h"
65#include "sta.h"
66#include "rs.h"
67
68/*
69 * New version of ADD_STA_sta command added new fields at the end of the
70 * structure, so sending the size of the relevant API's structure is enough to
71 * support both API versions.
72 */
73static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
74{
75	if (iwl_mvm_has_new_rx_api(mvm) ||
76	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
77		return sizeof(struct iwl_mvm_add_sta_cmd);
78	else
79		return sizeof(struct iwl_mvm_add_sta_cmd_v7);
80}
81
82static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
83				    enum nl80211_iftype iftype)
84{
85	int sta_id;
86	u32 reserved_ids = 0;
87
88	BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
89	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
90
91	lockdep_assert_held(&mvm->mutex);
92
93	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
94	if (iftype != NL80211_IFTYPE_STATION)
95		reserved_ids = BIT(0);
96
97	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
98	for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
99		if (BIT(sta_id) & reserved_ids)
100			continue;
101
102		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
103					       lockdep_is_held(&mvm->mutex)))
104			return sta_id;
105	}
106	return IWL_MVM_INVALID_STA;
107}
108
109/* send station add/update command to firmware */
110int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
111			   bool update, unsigned int flags)
112{
113	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
114	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
115		.sta_id = mvm_sta->sta_id,
116		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
117		.add_modify = update ? 1 : 0,
118		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
119						 STA_FLG_MIMO_EN_MSK |
120						 STA_FLG_RTS_MIMO_PROT),
121		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
122	};
123	int ret;
124	u32 status;
125	u32 agg_size = 0, mpdu_dens = 0;
126
127	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
128		add_sta_cmd.station_type = mvm_sta->sta_type;
129
130	if (!update || (flags & STA_MODIFY_QUEUES)) {
131		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
132
133		if (!iwl_mvm_has_new_tx_api(mvm)) {
134			add_sta_cmd.tfd_queue_msk =
135				cpu_to_le32(mvm_sta->tfd_queue_msk);
136
137			if (flags & STA_MODIFY_QUEUES)
138				add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
139		} else {
140			WARN_ON(flags & STA_MODIFY_QUEUES);
141		}
142	}
143
144	switch (sta->bandwidth) {
145	case IEEE80211_STA_RX_BW_160:
146		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
147		/* fall through */
148	case IEEE80211_STA_RX_BW_80:
149		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
150		/* fall through */
151	case IEEE80211_STA_RX_BW_40:
152		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
153		/* fall through */
154	case IEEE80211_STA_RX_BW_20:
155		if (sta->ht_cap.ht_supported)
156			add_sta_cmd.station_flags |=
157				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
158		break;
159	}
160
161	switch (sta->rx_nss) {
162	case 1:
163		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
164		break;
165	case 2:
166		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
167		break;
168	case 3 ... 8:
169		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
170		break;
171	}
172
173	switch (sta->smps_mode) {
174	case IEEE80211_SMPS_AUTOMATIC:
175	case IEEE80211_SMPS_NUM_MODES:
176		WARN_ON(1);
177		break;
178	case IEEE80211_SMPS_STATIC:
179		/* override NSS */
180		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
181		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
182		break;
183	case IEEE80211_SMPS_DYNAMIC:
184		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
185		break;
186	case IEEE80211_SMPS_OFF:
187		/* nothing */
188		break;
189	}
190
191	if (sta->ht_cap.ht_supported) {
192		add_sta_cmd.station_flags_msk |=
193			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
194				    STA_FLG_AGG_MPDU_DENS_MSK);
195
196		mpdu_dens = sta->ht_cap.ampdu_density;
197	}
198
199
200	if (sta->vht_cap.vht_supported) {
201		agg_size = sta->vht_cap.cap &
202			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
203		agg_size >>=
204			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
205	} else if (sta->ht_cap.ht_supported) {
206		agg_size = sta->ht_cap.ampdu_factor;
207	}
208
209	/* D6.0 10.12.2 A-MPDU length limit rules
210	 * A STA indicates the maximum length of the A-MPDU preEOF padding
211	 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
212	 * Exponent field in its HT Capabilities, VHT Capabilities,
213	 * and HE 6 GHz Band Capabilities elements (if present) and the
214	 * Maximum AMPDU Length Exponent Extension field in its HE
215	 * Capabilities element
216	 */
217	if (sta->he_cap.has_he)
218		agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
219					IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
220
221	/* Limit to max A-MPDU supported by FW */
222	if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
223		agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
224			    STA_FLG_MAX_AGG_SIZE_SHIFT);
225
226	add_sta_cmd.station_flags |=
227		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
228	add_sta_cmd.station_flags |=
229		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
230	if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
231		add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
232
233	if (sta->wme) {
234		add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
235
236		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
237			add_sta_cmd.uapsd_acs |= BIT(AC_BK);
238		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
239			add_sta_cmd.uapsd_acs |= BIT(AC_BE);
240		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
241			add_sta_cmd.uapsd_acs |= BIT(AC_VI);
242		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
243			add_sta_cmd.uapsd_acs |= BIT(AC_VO);
244		add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
245		add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
246	}
247
248	status = ADD_STA_SUCCESS;
249	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
250					  iwl_mvm_add_sta_cmd_size(mvm),
251					  &add_sta_cmd, &status);
252	if (ret)
253		return ret;
254
255	switch (status & IWL_ADD_STA_STATUS_MASK) {
256	case ADD_STA_SUCCESS:
257		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
258		break;
259	default:
260		ret = -EIO;
261		IWL_ERR(mvm, "ADD_STA failed\n");
262		break;
263	}
264
265	return ret;
266}
267
268static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
269{
270	struct iwl_mvm_baid_data *data =
271		from_timer(data, t, session_timer);
272	struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
273	struct iwl_mvm_baid_data *ba_data;
274	struct ieee80211_sta *sta;
275	struct iwl_mvm_sta *mvm_sta;
276	unsigned long timeout;
277
278	rcu_read_lock();
279
280	ba_data = rcu_dereference(*rcu_ptr);
281
282	if (WARN_ON(!ba_data))
283		goto unlock;
284
285	if (!ba_data->timeout)
286		goto unlock;
287
288	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
289	if (time_is_after_jiffies(timeout)) {
290		mod_timer(&ba_data->session_timer, timeout);
291		goto unlock;
292	}
293
294	/* Timer expired */
295	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
296
297	/*
298	 * sta should be valid unless the following happens:
299	 * The firmware asserts which triggers a reconfig flow, but
300	 * the reconfig fails before we set the pointer to sta into
301	 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
302	 * A-MDPU and hence the timer continues to run. Then, the
303	 * timer expires and sta is NULL.
304	 */
305	if (!sta)
306		goto unlock;
307
308	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
309	ieee80211_rx_ba_timer_expired(mvm_sta->vif,
310				      sta->addr, ba_data->tid);
311unlock:
312	rcu_read_unlock();
313}
314
315/* Disable aggregations for a bitmap of TIDs for a given station */
316static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
317					unsigned long disable_agg_tids,
318					bool remove_queue)
319{
320	struct iwl_mvm_add_sta_cmd cmd = {};
321	struct ieee80211_sta *sta;
322	struct iwl_mvm_sta *mvmsta;
323	u32 status;
324	u8 sta_id;
325
326	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
327		return -EINVAL;
328
329	sta_id = mvm->queue_info[queue].ra_sta_id;
330
331	rcu_read_lock();
332
333	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
334
335	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
336		rcu_read_unlock();
337		return -EINVAL;
338	}
339
340	mvmsta = iwl_mvm_sta_from_mac80211(sta);
341
342	mvmsta->tid_disable_agg |= disable_agg_tids;
343
344	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
345	cmd.sta_id = mvmsta->sta_id;
346	cmd.add_modify = STA_MODE_MODIFY;
347	cmd.modify_mask = STA_MODIFY_QUEUES;
348	if (disable_agg_tids)
349		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
350	if (remove_queue)
351		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
352	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
353	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
354
355	rcu_read_unlock();
356
357	/* Notify FW of queue removal from the STA queues */
358	status = ADD_STA_SUCCESS;
359	return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
360					   iwl_mvm_add_sta_cmd_size(mvm),
361					   &cmd, &status);
362}
363
364static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
365			       u16 *queueptr, u8 tid, u8 flags)
366{
367	int queue = *queueptr;
368	struct iwl_scd_txq_cfg_cmd cmd = {
369		.scd_queue = queue,
370		.action = SCD_CFG_DISABLE_QUEUE,
371	};
372	int ret;
373
374	if (iwl_mvm_has_new_tx_api(mvm)) {
375		iwl_trans_txq_free(mvm->trans, queue);
376		*queueptr = IWL_MVM_INVALID_QUEUE;
377
378		return 0;
379	}
380
381	if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
382		return 0;
383
384	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
385
386	cmd.action = mvm->queue_info[queue].tid_bitmap ?
387		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
388	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
389		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
390
391	IWL_DEBUG_TX_QUEUES(mvm,
392			    "Disabling TXQ #%d tids=0x%x\n",
393			    queue,
394			    mvm->queue_info[queue].tid_bitmap);
395
396	/* If the queue is still enabled - nothing left to do in this func */
397	if (cmd.action == SCD_CFG_ENABLE_QUEUE)
398		return 0;
399
400	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
401	cmd.tid = mvm->queue_info[queue].txq_tid;
402
403	/* Make sure queue info is correct even though we overwrite it */
404	WARN(mvm->queue_info[queue].tid_bitmap,
405	     "TXQ #%d info out-of-sync - tids=0x%x\n",
406	     queue, mvm->queue_info[queue].tid_bitmap);
407
408	/* If we are here - the queue is freed and we can zero out these vals */
409	mvm->queue_info[queue].tid_bitmap = 0;
410
411	if (sta) {
412		struct iwl_mvm_txq *mvmtxq =
413			iwl_mvm_txq_from_tid(sta, tid);
414
415		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
416	}
417
418	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
419	mvm->queue_info[queue].reserved = false;
420
421	iwl_trans_txq_disable(mvm->trans, queue, false);
422	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
423				   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
424
425	if (ret)
426		IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
427			queue, ret);
428	return ret;
429}
430
431static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
432{
433	struct ieee80211_sta *sta;
434	struct iwl_mvm_sta *mvmsta;
435	unsigned long tid_bitmap;
436	unsigned long agg_tids = 0;
437	u8 sta_id;
438	int tid;
439
440	lockdep_assert_held(&mvm->mutex);
441
442	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
443		return -EINVAL;
444
445	sta_id = mvm->queue_info[queue].ra_sta_id;
446	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
447
448	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
449					lockdep_is_held(&mvm->mutex));
450
451	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
452		return -EINVAL;
453
454	mvmsta = iwl_mvm_sta_from_mac80211(sta);
455
456	spin_lock_bh(&mvmsta->lock);
457	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
458		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
459			agg_tids |= BIT(tid);
460	}
461	spin_unlock_bh(&mvmsta->lock);
462
463	return agg_tids;
464}
465
466/*
467 * Remove a queue from a station's resources.
468 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
469 * doesn't disable the queue
470 */
471static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
472{
473	struct ieee80211_sta *sta;
474	struct iwl_mvm_sta *mvmsta;
475	unsigned long tid_bitmap;
476	unsigned long disable_agg_tids = 0;
477	u8 sta_id;
478	int tid;
479
480	lockdep_assert_held(&mvm->mutex);
481
482	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
483		return -EINVAL;
484
485	sta_id = mvm->queue_info[queue].ra_sta_id;
486	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
487
488	rcu_read_lock();
489
490	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
491
492	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
493		rcu_read_unlock();
494		return 0;
495	}
496
497	mvmsta = iwl_mvm_sta_from_mac80211(sta);
498
499	spin_lock_bh(&mvmsta->lock);
500	/* Unmap MAC queues and TIDs from this queue */
501	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
502		struct iwl_mvm_txq *mvmtxq =
503			iwl_mvm_txq_from_tid(sta, tid);
504
505		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
506			disable_agg_tids |= BIT(tid);
507		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
508
509		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
510	}
511
512	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
513	spin_unlock_bh(&mvmsta->lock);
514
515	rcu_read_unlock();
516
517	/*
518	 * The TX path may have been using this TXQ_ID from the tid_data,
519	 * so make sure it's no longer running so that we can safely reuse
520	 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
521	 * above, but nothing guarantees we've stopped using them. Thus,
522	 * without this, we could get to iwl_mvm_disable_txq() and remove
523	 * the queue while still sending frames to it.
524	 */
525	synchronize_net();
526
527	return disable_agg_tids;
528}
529
530static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
531				       struct ieee80211_sta *old_sta,
532				       u8 new_sta_id)
533{
534	struct iwl_mvm_sta *mvmsta;
535	u8 sta_id, tid;
536	unsigned long disable_agg_tids = 0;
537	bool same_sta;
538	u16 queue_tmp = queue;
539	int ret;
540
541	lockdep_assert_held(&mvm->mutex);
542
543	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
544		return -EINVAL;
545
546	sta_id = mvm->queue_info[queue].ra_sta_id;
547	tid = mvm->queue_info[queue].txq_tid;
548
549	same_sta = sta_id == new_sta_id;
550
551	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
552	if (WARN_ON(!mvmsta))
553		return -EINVAL;
554
555	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
556	/* Disable the queue */
557	if (disable_agg_tids)
558		iwl_mvm_invalidate_sta_queue(mvm, queue,
559					     disable_agg_tids, false);
560
561	ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
562	if (ret) {
563		IWL_ERR(mvm,
564			"Failed to free inactive queue %d (ret=%d)\n",
565			queue, ret);
566
567		return ret;
568	}
569
570	/* If TXQ is allocated to another STA, update removal in FW */
571	if (!same_sta)
572		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
573
574	return 0;
575}
576
577static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
578				    unsigned long tfd_queue_mask, u8 ac)
579{
580	int queue = 0;
581	u8 ac_to_queue[IEEE80211_NUM_ACS];
582	int i;
583
584	/*
585	 * This protects us against grabbing a queue that's being reconfigured
586	 * by the inactivity checker.
587	 */
588	lockdep_assert_held(&mvm->mutex);
589
590	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
591		return -EINVAL;
592
593	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
594
595	/* See what ACs the existing queues for this STA have */
596	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
597		/* Only DATA queues can be shared */
598		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
599		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
600			continue;
601
602		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
603	}
604
605	/*
606	 * The queue to share is chosen only from DATA queues as follows (in
607	 * descending priority):
608	 * 1. An AC_BE queue
609	 * 2. Same AC queue
610	 * 3. Highest AC queue that is lower than new AC
611	 * 4. Any existing AC (there always is at least 1 DATA queue)
612	 */
613
614	/* Priority 1: An AC_BE queue */
615	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
616		queue = ac_to_queue[IEEE80211_AC_BE];
617	/* Priority 2: Same AC queue */
618	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
619		queue = ac_to_queue[ac];
620	/* Priority 3a: If new AC is VO and VI exists - use VI */
621	else if (ac == IEEE80211_AC_VO &&
622		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
623		queue = ac_to_queue[IEEE80211_AC_VI];
624	/* Priority 3b: No BE so only AC less than the new one is BK */
625	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
626		queue = ac_to_queue[IEEE80211_AC_BK];
627	/* Priority 4a: No BE nor BK - use VI if exists */
628	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
629		queue = ac_to_queue[IEEE80211_AC_VI];
630	/* Priority 4b: No BE, BK nor VI - use VO if exists */
631	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
632		queue = ac_to_queue[IEEE80211_AC_VO];
633
634	/* Make sure queue found (or not) is legal */
635	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
636	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
637	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
638		IWL_ERR(mvm, "No DATA queues available to share\n");
639		return -ENOSPC;
640	}
641
642	return queue;
643}
644
645/*
646 * If a given queue has a higher AC than the TID stream that is being compared
647 * to, the queue needs to be redirected to the lower AC. This function does that
648 * in such a case, otherwise - if no redirection required - it does nothing,
649 * unless the %force param is true.
650 */
651static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
652				  int ac, int ssn, unsigned int wdg_timeout,
653				  bool force, struct iwl_mvm_txq *txq)
654{
655	struct iwl_scd_txq_cfg_cmd cmd = {
656		.scd_queue = queue,
657		.action = SCD_CFG_DISABLE_QUEUE,
658	};
659	bool shared_queue;
660	int ret;
661
662	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
663		return -EINVAL;
664
665	/*
666	 * If the AC is lower than current one - FIFO needs to be redirected to
667	 * the lowest one of the streams in the queue. Check if this is needed
668	 * here.
669	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
670	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
671	 * we need to check if the numerical value of X is LARGER than of Y.
672	 */
673	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
674		IWL_DEBUG_TX_QUEUES(mvm,
675				    "No redirection needed on TXQ #%d\n",
676				    queue);
677		return 0;
678	}
679
680	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
681	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
682	cmd.tid = mvm->queue_info[queue].txq_tid;
683	shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
684
685	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
686			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
687
688	/* Stop the queue and wait for it to empty */
689	txq->stopped = true;
690
691	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
692	if (ret) {
693		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
694			queue);
695		ret = -EIO;
696		goto out;
697	}
698
699	/* Before redirecting the queue we need to de-activate it */
700	iwl_trans_txq_disable(mvm->trans, queue, false);
701	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
702	if (ret)
703		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
704			ret);
705
706	/* Make sure the SCD wrptr is correctly set before reconfiguring */
707	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
708
709	/* Update the TID "owner" of the queue */
710	mvm->queue_info[queue].txq_tid = tid;
711
712	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
713
714	/* Redirect to lower AC */
715	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
716			     cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
717
718	/* Update AC marking of the queue */
719	mvm->queue_info[queue].mac80211_ac = ac;
720
721	/*
722	 * Mark queue as shared in transport if shared
723	 * Note this has to be done after queue enablement because enablement
724	 * can also set this value, and there is no indication there to shared
725	 * queues
726	 */
727	if (shared_queue)
728		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
729
730out:
731	/* Continue using the queue */
732	txq->stopped = false;
733
734	return ret;
735}
736
737static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
738				   u8 minq, u8 maxq)
739{
740	int i;
741
742	lockdep_assert_held(&mvm->mutex);
743
744	if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
745		 "max queue %d >= num_of_queues (%d)", maxq,
746		 mvm->trans->trans_cfg->base_params->num_of_queues))
747		maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
748
749	/* This should not be hit with new TX path */
750	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
751		return -ENOSPC;
752
753	/* Start by looking for a free queue */
754	for (i = minq; i <= maxq; i++)
755		if (mvm->queue_info[i].tid_bitmap == 0 &&
756		    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
757			return i;
758
759	return -ENOSPC;
760}
761
762static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
763				   u8 sta_id, u8 tid, unsigned int timeout)
764{
765	int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
766				mvm->trans->cfg->min_256_ba_txq_size);
767
768	if (tid == IWL_MAX_TID_COUNT) {
769		tid = IWL_MGMT_TID;
770		size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
771			     mvm->trans->cfg->min_txq_size);
772	}
773
774	do {
775		__le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
776
777		queue = iwl_trans_txq_alloc(mvm->trans, enable,
778					    sta_id, tid, SCD_QUEUE_CFG,
779					    size, timeout);
780
781		if (queue < 0)
782			IWL_DEBUG_TX_QUEUES(mvm,
783					    "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
784					    size, sta_id, tid, queue);
785		size /= 2;
786	} while (queue < 0 && size >= 16);
787
788	if (queue < 0)
789		return queue;
790
791	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
792			    queue, sta_id, tid);
793
794	return queue;
795}
796
797static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
798					struct ieee80211_sta *sta, u8 ac,
799					int tid)
800{
801	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
802	struct iwl_mvm_txq *mvmtxq =
803		iwl_mvm_txq_from_tid(sta, tid);
804	unsigned int wdg_timeout =
805		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
806	int queue = -1;
807
808	lockdep_assert_held(&mvm->mutex);
809
810	IWL_DEBUG_TX_QUEUES(mvm,
811			    "Allocating queue for sta %d on tid %d\n",
812			    mvmsta->sta_id, tid);
813	queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
814	if (queue < 0)
815		return queue;
816
817	mvmtxq->txq_id = queue;
818	mvm->tvqm_info[queue].txq_tid = tid;
819	mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
820
821	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
822
823	spin_lock_bh(&mvmsta->lock);
824	mvmsta->tid_data[tid].txq_id = queue;
825	spin_unlock_bh(&mvmsta->lock);
826
827	return 0;
828}
829
830static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
831				       struct ieee80211_sta *sta,
832				       int queue, u8 sta_id, u8 tid)
833{
834	bool enable_queue = true;
835
836	/* Make sure this TID isn't already enabled */
837	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
838		IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
839			queue, tid);
840		return false;
841	}
842
843	/* Update mappings and refcounts */
844	if (mvm->queue_info[queue].tid_bitmap)
845		enable_queue = false;
846
847	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
848	mvm->queue_info[queue].ra_sta_id = sta_id;
849
850	if (enable_queue) {
851		if (tid != IWL_MAX_TID_COUNT)
852			mvm->queue_info[queue].mac80211_ac =
853				tid_to_mac80211_ac[tid];
854		else
855			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
856
857		mvm->queue_info[queue].txq_tid = tid;
858	}
859
860	if (sta) {
861		struct iwl_mvm_txq *mvmtxq =
862			iwl_mvm_txq_from_tid(sta, tid);
863
864		mvmtxq->txq_id = queue;
865	}
866
867	IWL_DEBUG_TX_QUEUES(mvm,
868			    "Enabling TXQ #%d tids=0x%x\n",
869			    queue, mvm->queue_info[queue].tid_bitmap);
870
871	return enable_queue;
872}
873
874static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
875			       int queue, u16 ssn,
876			       const struct iwl_trans_txq_scd_cfg *cfg,
877			       unsigned int wdg_timeout)
878{
879	struct iwl_scd_txq_cfg_cmd cmd = {
880		.scd_queue = queue,
881		.action = SCD_CFG_ENABLE_QUEUE,
882		.window = cfg->frame_limit,
883		.sta_id = cfg->sta_id,
884		.ssn = cpu_to_le16(ssn),
885		.tx_fifo = cfg->fifo,
886		.aggregate = cfg->aggregate,
887		.tid = cfg->tid,
888	};
889	bool inc_ssn;
890
891	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
892		return false;
893
894	/* Send the enabling command if we need to */
895	if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
896		return false;
897
898	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
899					   NULL, wdg_timeout);
900	if (inc_ssn)
901		le16_add_cpu(&cmd.ssn, 1);
902
903	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
904	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
905
906	return inc_ssn;
907}
908
909static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
910{
911	struct iwl_scd_txq_cfg_cmd cmd = {
912		.scd_queue = queue,
913		.action = SCD_CFG_UPDATE_QUEUE_TID,
914	};
915	int tid;
916	unsigned long tid_bitmap;
917	int ret;
918
919	lockdep_assert_held(&mvm->mutex);
920
921	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
922		return;
923
924	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
925
926	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
927		return;
928
929	/* Find any TID for queue */
930	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
931	cmd.tid = tid;
932	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
933
934	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
935	if (ret) {
936		IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
937			queue, ret);
938		return;
939	}
940
941	mvm->queue_info[queue].txq_tid = tid;
942	IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
943			    queue, tid);
944}
945
946static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
947{
948	struct ieee80211_sta *sta;
949	struct iwl_mvm_sta *mvmsta;
950	u8 sta_id;
951	int tid = -1;
952	unsigned long tid_bitmap;
953	unsigned int wdg_timeout;
954	int ssn;
955	int ret = true;
956
957	/* queue sharing is disabled on new TX path */
958	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
959		return;
960
961	lockdep_assert_held(&mvm->mutex);
962
963	sta_id = mvm->queue_info[queue].ra_sta_id;
964	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
965
966	/* Find TID for queue, and make sure it is the only one on the queue */
967	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
968	if (tid_bitmap != BIT(tid)) {
969		IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
970			queue, tid_bitmap);
971		return;
972	}
973
974	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
975			    tid);
976
977	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
978					lockdep_is_held(&mvm->mutex));
979
980	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
981		return;
982
983	mvmsta = iwl_mvm_sta_from_mac80211(sta);
984	wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
985
986	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
987
988	ret = iwl_mvm_redirect_queue(mvm, queue, tid,
989				     tid_to_mac80211_ac[tid], ssn,
990				     wdg_timeout, true,
991				     iwl_mvm_txq_from_tid(sta, tid));
992	if (ret) {
993		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
994		return;
995	}
996
997	/* If aggs should be turned back on - do it */
998	if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
999		struct iwl_mvm_add_sta_cmd cmd = {0};
1000
1001		mvmsta->tid_disable_agg &= ~BIT(tid);
1002
1003		cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1004		cmd.sta_id = mvmsta->sta_id;
1005		cmd.add_modify = STA_MODE_MODIFY;
1006		cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1007		cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1008		cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1009
1010		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1011					   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1012		if (!ret) {
1013			IWL_DEBUG_TX_QUEUES(mvm,
1014					    "TXQ #%d is now aggregated again\n",
1015					    queue);
1016
1017			/* Mark queue intenally as aggregating again */
1018			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1019		}
1020	}
1021
1022	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1023}
1024
1025/*
1026 * Remove inactive TIDs of a given queue.
1027 * If all queue TIDs are inactive - mark the queue as inactive
1028 * If only some the queue TIDs are inactive - unmap them from the queue
1029 *
1030 * Returns %true if all TIDs were removed and the queue could be reused.
1031 */
1032static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1033					 struct iwl_mvm_sta *mvmsta, int queue,
1034					 unsigned long tid_bitmap,
1035					 unsigned long *unshare_queues,
1036					 unsigned long *changetid_queues)
1037{
1038	int tid;
1039
1040	lockdep_assert_held(&mvmsta->lock);
1041	lockdep_assert_held(&mvm->mutex);
1042
1043	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1044		return false;
1045
1046	/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1047	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1048		/* If some TFDs are still queued - don't mark TID as inactive */
1049		if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1050			tid_bitmap &= ~BIT(tid);
1051
1052		/* Don't mark as inactive any TID that has an active BA */
1053		if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1054			tid_bitmap &= ~BIT(tid);
1055	}
1056
1057	/* If all TIDs in the queue are inactive - return it can be reused */
1058	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1059		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1060		return true;
1061	}
1062
1063	/*
1064	 * If we are here, this is a shared queue and not all TIDs timed-out.
1065	 * Remove the ones that did.
1066	 */
1067	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1068		u16 tid_bitmap;
1069
1070		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1071		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1072
1073		tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1074
1075		/*
1076		 * We need to take into account a situation in which a TXQ was
1077		 * allocated to TID x, and then turned shared by adding TIDs y
1078		 * and z. If TID x becomes inactive and is removed from the TXQ,
1079		 * ownership must be given to one of the remaining TIDs.
1080		 * This is mainly because if TID x continues - a new queue can't
1081		 * be allocated for it as long as it is an owner of another TXQ.
1082		 *
1083		 * Mark this queue in the right bitmap, we'll send the command
1084		 * to the firmware later.
1085		 */
1086		if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1087			set_bit(queue, changetid_queues);
1088
1089		IWL_DEBUG_TX_QUEUES(mvm,
1090				    "Removing inactive TID %d from shared Q:%d\n",
1091				    tid, queue);
1092	}
1093
1094	IWL_DEBUG_TX_QUEUES(mvm,
1095			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
1096			    mvm->queue_info[queue].tid_bitmap);
1097
1098	/*
1099	 * There may be different TIDs with the same mac queues, so make
1100	 * sure all TIDs have existing corresponding mac queues enabled
1101	 */
1102	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1103
1104	/* If the queue is marked as shared - "unshare" it */
1105	if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1106	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1107		IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1108				    queue);
1109		set_bit(queue, unshare_queues);
1110	}
1111
1112	return false;
1113}
1114
1115/*
1116 * Check for inactivity - this includes checking if any queue
1117 * can be unshared and finding one (and only one) that can be
1118 * reused.
1119 * This function is also invoked as a sort of clean-up task,
1120 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1121 *
1122 * Returns the queue number, or -ENOSPC.
1123 */
1124static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1125{
1126	unsigned long now = jiffies;
1127	unsigned long unshare_queues = 0;
1128	unsigned long changetid_queues = 0;
1129	int i, ret, free_queue = -ENOSPC;
1130	struct ieee80211_sta *queue_owner  = NULL;
1131
1132	lockdep_assert_held(&mvm->mutex);
1133
1134	if (iwl_mvm_has_new_tx_api(mvm))
1135		return -ENOSPC;
1136
1137	rcu_read_lock();
1138
1139	/* we skip the CMD queue below by starting at 1 */
1140	BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1141
1142	for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1143		struct ieee80211_sta *sta;
1144		struct iwl_mvm_sta *mvmsta;
1145		u8 sta_id;
1146		int tid;
1147		unsigned long inactive_tid_bitmap = 0;
1148		unsigned long queue_tid_bitmap;
1149
1150		queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1151		if (!queue_tid_bitmap)
1152			continue;
1153
1154		/* If TXQ isn't in active use anyway - nothing to do here... */
1155		if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1156		    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1157			continue;
1158
1159		/* Check to see if there are inactive TIDs on this queue */
1160		for_each_set_bit(tid, &queue_tid_bitmap,
1161				 IWL_MAX_TID_COUNT + 1) {
1162			if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1163				       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1164				continue;
1165
1166			inactive_tid_bitmap |= BIT(tid);
1167		}
1168
1169		/* If all TIDs are active - finish check on this queue */
1170		if (!inactive_tid_bitmap)
1171			continue;
1172
1173		/*
1174		 * If we are here - the queue hadn't been served recently and is
1175		 * in use
1176		 */
1177
1178		sta_id = mvm->queue_info[i].ra_sta_id;
1179		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1180
1181		/*
1182		 * If the STA doesn't exist anymore, it isn't an error. It could
1183		 * be that it was removed since getting the queues, and in this
1184		 * case it should've inactivated its queues anyway.
1185		 */
1186		if (IS_ERR_OR_NULL(sta))
1187			continue;
1188
1189		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1190
1191		spin_lock_bh(&mvmsta->lock);
1192		ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1193						   inactive_tid_bitmap,
1194						   &unshare_queues,
1195						   &changetid_queues);
1196		if (ret && free_queue < 0) {
1197			queue_owner = sta;
1198			free_queue = i;
1199		}
1200		/* only unlock sta lock - we still need the queue info lock */
1201		spin_unlock_bh(&mvmsta->lock);
1202	}
1203
1204
1205	/* Reconfigure queues requiring reconfiguation */
1206	for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1207		iwl_mvm_unshare_queue(mvm, i);
1208	for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1209		iwl_mvm_change_queue_tid(mvm, i);
1210
1211	rcu_read_unlock();
1212
1213	if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1214		ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1215						  alloc_for_sta);
1216		if (ret)
1217			return ret;
1218	}
1219
1220	return free_queue;
1221}
1222
1223static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1224				   struct ieee80211_sta *sta, u8 ac, int tid)
1225{
1226	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1227	struct iwl_trans_txq_scd_cfg cfg = {
1228		.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1229		.sta_id = mvmsta->sta_id,
1230		.tid = tid,
1231		.frame_limit = IWL_FRAME_LIMIT,
1232	};
1233	unsigned int wdg_timeout =
1234		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1235	int queue = -1;
1236	u16 queue_tmp;
1237	unsigned long disable_agg_tids = 0;
1238	enum iwl_mvm_agg_state queue_state;
1239	bool shared_queue = false, inc_ssn;
1240	int ssn;
1241	unsigned long tfd_queue_mask;
1242	int ret;
1243
1244	lockdep_assert_held(&mvm->mutex);
1245
1246	if (iwl_mvm_has_new_tx_api(mvm))
1247		return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1248
1249	spin_lock_bh(&mvmsta->lock);
1250	tfd_queue_mask = mvmsta->tfd_queue_msk;
1251	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1252	spin_unlock_bh(&mvmsta->lock);
1253
1254	if (tid == IWL_MAX_TID_COUNT) {
1255		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1256						IWL_MVM_DQA_MIN_MGMT_QUEUE,
1257						IWL_MVM_DQA_MAX_MGMT_QUEUE);
1258		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1259			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1260					    queue);
1261
1262		/* If no such queue is found, we'll use a DATA queue instead */
1263	}
1264
1265	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1266	    (mvm->queue_info[mvmsta->reserved_queue].status ==
1267			IWL_MVM_QUEUE_RESERVED)) {
1268		queue = mvmsta->reserved_queue;
1269		mvm->queue_info[queue].reserved = true;
1270		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1271	}
1272
1273	if (queue < 0)
1274		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1275						IWL_MVM_DQA_MIN_DATA_QUEUE,
1276						IWL_MVM_DQA_MAX_DATA_QUEUE);
1277	if (queue < 0) {
1278		/* try harder - perhaps kill an inactive queue */
1279		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1280	}
1281
1282	/* No free queue - we'll have to share */
1283	if (queue <= 0) {
1284		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1285		if (queue > 0) {
1286			shared_queue = true;
1287			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1288		}
1289	}
1290
1291	/*
1292	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1293	 * to make sure no one else takes it.
1294	 * This will allow avoiding re-acquiring the lock at the end of the
1295	 * configuration. On error we'll mark it back as free.
1296	 */
1297	if (queue > 0 && !shared_queue)
1298		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1299
1300	/* This shouldn't happen - out of queues */
1301	if (WARN_ON(queue <= 0)) {
1302		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1303			tid, cfg.sta_id);
1304		return queue;
1305	}
1306
1307	/*
1308	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1309	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1310	 * as aggregatable.
1311	 * Mark all DATA queues as allowing to be aggregated at some point
1312	 */
1313	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1314			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1315
1316	IWL_DEBUG_TX_QUEUES(mvm,
1317			    "Allocating %squeue #%d to sta %d on tid %d\n",
1318			    shared_queue ? "shared " : "", queue,
1319			    mvmsta->sta_id, tid);
1320
1321	if (shared_queue) {
1322		/* Disable any open aggs on this queue */
1323		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1324
1325		if (disable_agg_tids) {
1326			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1327					    queue);
1328			iwl_mvm_invalidate_sta_queue(mvm, queue,
1329						     disable_agg_tids, false);
1330		}
1331	}
1332
1333	inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1334
1335	/*
1336	 * Mark queue as shared in transport if shared
1337	 * Note this has to be done after queue enablement because enablement
1338	 * can also set this value, and there is no indication there to shared
1339	 * queues
1340	 */
1341	if (shared_queue)
1342		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1343
1344	spin_lock_bh(&mvmsta->lock);
1345	/*
1346	 * This looks racy, but it is not. We have only one packet for
1347	 * this ra/tid in our Tx path since we stop the Qdisc when we
1348	 * need to allocate a new TFD queue.
1349	 */
1350	if (inc_ssn) {
1351		mvmsta->tid_data[tid].seq_number += 0x10;
1352		ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1353	}
1354	mvmsta->tid_data[tid].txq_id = queue;
1355	mvmsta->tfd_queue_msk |= BIT(queue);
1356	queue_state = mvmsta->tid_data[tid].state;
1357
1358	if (mvmsta->reserved_queue == queue)
1359		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1360	spin_unlock_bh(&mvmsta->lock);
1361
1362	if (!shared_queue) {
1363		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1364		if (ret)
1365			goto out_err;
1366
1367		/* If we need to re-enable aggregations... */
1368		if (queue_state == IWL_AGG_ON) {
1369			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1370			if (ret)
1371				goto out_err;
1372		}
1373	} else {
1374		/* Redirect queue, if needed */
1375		ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1376					     wdg_timeout, false,
1377					     iwl_mvm_txq_from_tid(sta, tid));
1378		if (ret)
1379			goto out_err;
1380	}
1381
1382	return 0;
1383
1384out_err:
1385	queue_tmp = queue;
1386	iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
1387
1388	return ret;
1389}
1390
1391int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
1392			     struct ieee80211_txq *txq)
1393{
1394	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
1395	int ret = -EINVAL;
1396
1397	lockdep_assert_held(&mvm->mutex);
1398
1399	if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
1400	    !txq->sta) {
1401		return 0;
1402	}
1403
1404	if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
1405		set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1406		ret = 0;
1407	}
1408
1409	local_bh_disable();
1410	spin_lock(&mvm->add_stream_lock);
1411	if (!list_empty(&mvmtxq->list))
1412		list_del_init(&mvmtxq->list);
1413	spin_unlock(&mvm->add_stream_lock);
1414	local_bh_enable();
1415
1416	return ret;
1417}
1418
1419void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1420{
1421	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1422					   add_stream_wk);
1423
1424	mutex_lock(&mvm->mutex);
1425
1426	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1427
1428	while (!list_empty(&mvm->add_stream_txqs)) {
1429		struct iwl_mvm_txq *mvmtxq;
1430		struct ieee80211_txq *txq;
1431		u8 tid;
1432
1433		mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1434					  struct iwl_mvm_txq, list);
1435
1436		txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1437				   drv_priv);
1438		tid = txq->tid;
1439		if (tid == IEEE80211_NUM_TIDS)
1440			tid = IWL_MAX_TID_COUNT;
1441
1442		/*
1443		 * We can't really do much here, but if this fails we can't
1444		 * transmit anyway - so just don't transmit the frame etc.
1445		 * and let them back up ... we've tried our best to allocate
1446		 * a queue in the function itself.
1447		 */
1448		if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1449			list_del_init(&mvmtxq->list);
1450			continue;
1451		}
1452
1453		list_del_init(&mvmtxq->list);
1454		local_bh_disable();
1455		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1456		local_bh_enable();
1457	}
1458
1459	mutex_unlock(&mvm->mutex);
1460}
1461
1462static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1463				      struct ieee80211_sta *sta,
1464				      enum nl80211_iftype vif_type)
1465{
1466	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1467	int queue;
1468
1469	/* queue reserving is disabled on new TX path */
1470	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1471		return 0;
1472
1473	/* run the general cleanup/unsharing of queues */
1474	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1475
1476	/* Make sure we have free resources for this STA */
1477	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1478	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1479	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1480	     IWL_MVM_QUEUE_FREE))
1481		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1482	else
1483		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1484						IWL_MVM_DQA_MIN_DATA_QUEUE,
1485						IWL_MVM_DQA_MAX_DATA_QUEUE);
1486	if (queue < 0) {
1487		/* try again - this time kick out a queue if needed */
1488		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1489		if (queue < 0) {
1490			IWL_ERR(mvm, "No available queues for new station\n");
1491			return -ENOSPC;
1492		}
1493	}
1494	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1495
1496	mvmsta->reserved_queue = queue;
1497
1498	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1499			    queue, mvmsta->sta_id);
1500
1501	return 0;
1502}
1503
1504/*
1505 * In DQA mode, after a HW restart the queues should be allocated as before, in
1506 * order to avoid race conditions when there are shared queues. This function
1507 * does the re-mapping and queue allocation.
1508 *
1509 * Note that re-enabling aggregations isn't done in this function.
1510 */
1511static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1512						 struct ieee80211_sta *sta)
1513{
1514	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1515	unsigned int wdg =
1516		iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1517	int i;
1518	struct iwl_trans_txq_scd_cfg cfg = {
1519		.sta_id = mvm_sta->sta_id,
1520		.frame_limit = IWL_FRAME_LIMIT,
1521	};
1522
1523	/* Make sure reserved queue is still marked as such (if allocated) */
1524	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1525		mvm->queue_info[mvm_sta->reserved_queue].status =
1526			IWL_MVM_QUEUE_RESERVED;
1527
1528	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1529		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1530		int txq_id = tid_data->txq_id;
1531		int ac;
1532
1533		if (txq_id == IWL_MVM_INVALID_QUEUE)
1534			continue;
1535
1536		ac = tid_to_mac80211_ac[i];
1537
1538		if (iwl_mvm_has_new_tx_api(mvm)) {
1539			IWL_DEBUG_TX_QUEUES(mvm,
1540					    "Re-mapping sta %d tid %d\n",
1541					    mvm_sta->sta_id, i);
1542			txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1543							 i, wdg);
1544			/*
1545			 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1546			 * to try again later, we have no other good way of
1547			 * failing here
1548			 */
1549			if (txq_id < 0)
1550				txq_id = IWL_MVM_INVALID_QUEUE;
1551			tid_data->txq_id = txq_id;
1552
1553			/*
1554			 * Since we don't set the seq number after reset, and HW
1555			 * sets it now, FW reset will cause the seq num to start
1556			 * at 0 again, so driver will need to update it
1557			 * internally as well, so it keeps in sync with real val
1558			 */
1559			tid_data->seq_number = 0;
1560		} else {
1561			u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1562
1563			cfg.tid = i;
1564			cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1565			cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1566					 txq_id ==
1567					 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1568
1569			IWL_DEBUG_TX_QUEUES(mvm,
1570					    "Re-mapping sta %d tid %d to queue %d\n",
1571					    mvm_sta->sta_id, i, txq_id);
1572
1573			iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1574			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1575		}
1576	}
1577}
1578
1579static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1580				      struct iwl_mvm_int_sta *sta,
1581				      const u8 *addr,
1582				      u16 mac_id, u16 color)
1583{
1584	struct iwl_mvm_add_sta_cmd cmd;
1585	int ret;
1586	u32 status = ADD_STA_SUCCESS;
1587
1588	lockdep_assert_held(&mvm->mutex);
1589
1590	memset(&cmd, 0, sizeof(cmd));
1591	cmd.sta_id = sta->sta_id;
1592
1593	if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
1594				  0) >= 12 &&
1595	    sta->type == IWL_STA_AUX_ACTIVITY)
1596		cmd.mac_id_n_color = cpu_to_le32(mac_id);
1597	else
1598		cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1599								     color));
1600
1601	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1602		cmd.station_type = sta->type;
1603
1604	if (!iwl_mvm_has_new_tx_api(mvm))
1605		cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1606	cmd.tid_disable_tx = cpu_to_le16(0xffff);
1607
1608	if (addr)
1609		memcpy(cmd.addr, addr, ETH_ALEN);
1610
1611	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1612					  iwl_mvm_add_sta_cmd_size(mvm),
1613					  &cmd, &status);
1614	if (ret)
1615		return ret;
1616
1617	switch (status & IWL_ADD_STA_STATUS_MASK) {
1618	case ADD_STA_SUCCESS:
1619		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1620		return 0;
1621	default:
1622		ret = -EIO;
1623		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1624			status);
1625		break;
1626	}
1627	return ret;
1628}
1629
1630int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1631		    struct ieee80211_vif *vif,
1632		    struct ieee80211_sta *sta)
1633{
1634	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1635	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1636	struct iwl_mvm_rxq_dup_data *dup_data;
1637	int i, ret, sta_id;
1638	bool sta_update = false;
1639	unsigned int sta_flags = 0;
1640
1641	lockdep_assert_held(&mvm->mutex);
1642
1643	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1644		sta_id = iwl_mvm_find_free_sta_id(mvm,
1645						  ieee80211_vif_type_p2p(vif));
1646	else
1647		sta_id = mvm_sta->sta_id;
1648
1649	if (sta_id == IWL_MVM_INVALID_STA)
1650		return -ENOSPC;
1651
1652	spin_lock_init(&mvm_sta->lock);
1653
1654	/* if this is a HW restart re-alloc existing queues */
1655	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1656		struct iwl_mvm_int_sta tmp_sta = {
1657			.sta_id = sta_id,
1658			.type = mvm_sta->sta_type,
1659		};
1660
1661		/*
1662		 * First add an empty station since allocating
1663		 * a queue requires a valid station
1664		 */
1665		ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1666						 mvmvif->id, mvmvif->color);
1667		if (ret)
1668			goto err;
1669
1670		iwl_mvm_realloc_queues_after_restart(mvm, sta);
1671		sta_update = true;
1672		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1673		goto update_fw;
1674	}
1675
1676	mvm_sta->sta_id = sta_id;
1677	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1678						      mvmvif->color);
1679	mvm_sta->vif = vif;
1680	if (!mvm->trans->trans_cfg->gen2)
1681		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1682	else
1683		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1684	mvm_sta->tx_protection = 0;
1685	mvm_sta->tt_tx_protection = false;
1686	mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1687
1688	/* HW restart, don't assume the memory has been zeroed */
1689	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1690	mvm_sta->tfd_queue_msk = 0;
1691
1692	/* for HW restart - reset everything but the sequence number */
1693	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1694		u16 seq = mvm_sta->tid_data[i].seq_number;
1695		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1696		mvm_sta->tid_data[i].seq_number = seq;
1697
1698		/*
1699		 * Mark all queues for this STA as unallocated and defer TX
1700		 * frames until the queue is allocated
1701		 */
1702		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1703	}
1704
1705	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1706		struct iwl_mvm_txq *mvmtxq =
1707			iwl_mvm_txq_from_mac80211(sta->txq[i]);
1708
1709		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1710		INIT_LIST_HEAD(&mvmtxq->list);
1711		atomic_set(&mvmtxq->tx_request, 0);
1712	}
1713
1714	mvm_sta->agg_tids = 0;
1715
1716	if (iwl_mvm_has_new_rx_api(mvm) &&
1717	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1718		int q;
1719
1720		dup_data = kcalloc(mvm->trans->num_rx_queues,
1721				   sizeof(*dup_data), GFP_KERNEL);
1722		if (!dup_data)
1723			return -ENOMEM;
1724		/*
1725		 * Initialize all the last_seq values to 0xffff which can never
1726		 * compare equal to the frame's seq_ctrl in the check in
1727		 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1728		 * number and fragmented packets don't reach that function.
1729		 *
1730		 * This thus allows receiving a packet with seqno 0 and the
1731		 * retry bit set as the very first packet on a new TID.
1732		 */
1733		for (q = 0; q < mvm->trans->num_rx_queues; q++)
1734			memset(dup_data[q].last_seq, 0xff,
1735			       sizeof(dup_data[q].last_seq));
1736		mvm_sta->dup_data = dup_data;
1737	}
1738
1739	if (!iwl_mvm_has_new_tx_api(mvm)) {
1740		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1741						 ieee80211_vif_type_p2p(vif));
1742		if (ret)
1743			goto err;
1744	}
1745
1746	/*
1747	 * if rs is registered with mac80211, then "add station" will be handled
1748	 * via the corresponding ops, otherwise need to notify rate scaling here
1749	 */
1750	if (iwl_mvm_has_tlc_offload(mvm))
1751		iwl_mvm_rs_add_sta(mvm, mvm_sta);
1752	else
1753		spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1754
1755	iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1756
1757update_fw:
1758	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1759	if (ret)
1760		goto err;
1761
1762	if (vif->type == NL80211_IFTYPE_STATION) {
1763		if (!sta->tdls) {
1764			WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1765			mvmvif->ap_sta_id = sta_id;
1766		} else {
1767			WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1768		}
1769	}
1770
1771	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1772
1773	return 0;
1774
1775err:
1776	return ret;
1777}
1778
1779int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1780		      bool drain)
1781{
1782	struct iwl_mvm_add_sta_cmd cmd = {};
1783	int ret;
1784	u32 status;
1785
1786	lockdep_assert_held(&mvm->mutex);
1787
1788	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1789	cmd.sta_id = mvmsta->sta_id;
1790	cmd.add_modify = STA_MODE_MODIFY;
1791	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1792	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1793
1794	status = ADD_STA_SUCCESS;
1795	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1796					  iwl_mvm_add_sta_cmd_size(mvm),
1797					  &cmd, &status);
1798	if (ret)
1799		return ret;
1800
1801	switch (status & IWL_ADD_STA_STATUS_MASK) {
1802	case ADD_STA_SUCCESS:
1803		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1804			       mvmsta->sta_id);
1805		break;
1806	default:
1807		ret = -EIO;
1808		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1809			mvmsta->sta_id);
1810		break;
1811	}
1812
1813	return ret;
1814}
1815
1816/*
1817 * Remove a station from the FW table. Before sending the command to remove
1818 * the station validate that the station is indeed known to the driver (sanity
1819 * only).
1820 */
1821static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1822{
1823	struct ieee80211_sta *sta;
1824	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1825		.sta_id = sta_id,
1826	};
1827	int ret;
1828
1829	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1830					lockdep_is_held(&mvm->mutex));
1831
1832	/* Note: internal stations are marked as error values */
1833	if (!sta) {
1834		IWL_ERR(mvm, "Invalid station id\n");
1835		return -EINVAL;
1836	}
1837
1838	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1839				   sizeof(rm_sta_cmd), &rm_sta_cmd);
1840	if (ret) {
1841		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1842		return ret;
1843	}
1844
1845	return 0;
1846}
1847
1848static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1849				       struct ieee80211_vif *vif,
1850				       struct ieee80211_sta *sta)
1851{
1852	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1853	int i;
1854
1855	lockdep_assert_held(&mvm->mutex);
1856
1857	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1858		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1859			continue;
1860
1861		iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
1862				    0);
1863		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1864	}
1865
1866	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1867		struct iwl_mvm_txq *mvmtxq =
1868			iwl_mvm_txq_from_mac80211(sta->txq[i]);
1869
1870		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1871		list_del_init(&mvmtxq->list);
1872	}
1873}
1874
1875int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1876				  struct iwl_mvm_sta *mvm_sta)
1877{
1878	int i;
1879
1880	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1881		u16 txq_id;
1882		int ret;
1883
1884		spin_lock_bh(&mvm_sta->lock);
1885		txq_id = mvm_sta->tid_data[i].txq_id;
1886		spin_unlock_bh(&mvm_sta->lock);
1887
1888		if (txq_id == IWL_MVM_INVALID_QUEUE)
1889			continue;
1890
1891		ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1892		if (ret)
1893			return ret;
1894	}
1895
1896	return 0;
1897}
1898
1899int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1900		   struct ieee80211_vif *vif,
1901		   struct ieee80211_sta *sta)
1902{
1903	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1904	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1905	u8 sta_id = mvm_sta->sta_id;
1906	int ret;
1907
1908	lockdep_assert_held(&mvm->mutex);
1909
1910	if (iwl_mvm_has_new_rx_api(mvm))
1911		kfree(mvm_sta->dup_data);
1912
1913	ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1914	if (ret)
1915		return ret;
1916
1917	/* flush its queues here since we are freeing mvm_sta */
1918	ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1919	if (ret)
1920		return ret;
1921	if (iwl_mvm_has_new_tx_api(mvm)) {
1922		ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1923	} else {
1924		u32 q_mask = mvm_sta->tfd_queue_msk;
1925
1926		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1927						     q_mask);
1928	}
1929	if (ret)
1930		return ret;
1931
1932	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1933
1934	iwl_mvm_disable_sta_queues(mvm, vif, sta);
1935
1936	/* If there is a TXQ still marked as reserved - free it */
1937	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1938		u8 reserved_txq = mvm_sta->reserved_queue;
1939		enum iwl_mvm_queue_status *status;
1940
1941		/*
1942		 * If no traffic has gone through the reserved TXQ - it
1943		 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1944		 * should be manually marked as free again
1945		 */
1946		status = &mvm->queue_info[reserved_txq].status;
1947		if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1948			 (*status != IWL_MVM_QUEUE_FREE),
1949			 "sta_id %d reserved txq %d status %d",
1950			 sta_id, reserved_txq, *status))
1951			return -EINVAL;
1952
1953		*status = IWL_MVM_QUEUE_FREE;
1954	}
1955
1956	if (vif->type == NL80211_IFTYPE_STATION &&
1957	    mvmvif->ap_sta_id == sta_id) {
1958		/* if associated - we can't remove the AP STA now */
1959		if (vif->bss_conf.assoc)
1960			return ret;
1961
1962		/* unassoc - go ahead - remove the AP STA now */
1963		mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1964	}
1965
1966	/*
1967	 * This shouldn't happen - the TDLS channel switch should be canceled
1968	 * before the STA is removed.
1969	 */
1970	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1971		mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1972		cancel_delayed_work(&mvm->tdls_cs.dwork);
1973	}
1974
1975	/*
1976	 * Make sure that the tx response code sees the station as -EBUSY and
1977	 * calls the drain worker.
1978	 */
1979	spin_lock_bh(&mvm_sta->lock);
1980	spin_unlock_bh(&mvm_sta->lock);
1981
1982	ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1983	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1984
1985	return ret;
1986}
1987
1988int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1989		      struct ieee80211_vif *vif,
1990		      u8 sta_id)
1991{
1992	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1993
1994	lockdep_assert_held(&mvm->mutex);
1995
1996	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1997	return ret;
1998}
1999
2000int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2001			     struct iwl_mvm_int_sta *sta,
2002			     u32 qmask, enum nl80211_iftype iftype,
2003			     enum iwl_sta_type type)
2004{
2005	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2006	    sta->sta_id == IWL_MVM_INVALID_STA) {
2007		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2008		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2009			return -ENOSPC;
2010	}
2011
2012	sta->tfd_queue_msk = qmask;
2013	sta->type = type;
2014
2015	/* put a non-NULL value so iterating over the stations won't stop */
2016	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2017	return 0;
2018}
2019
2020void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2021{
2022	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2023	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2024	sta->sta_id = IWL_MVM_INVALID_STA;
2025}
2026
2027static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2028					  u8 sta_id, u8 fifo)
2029{
2030	unsigned int wdg_timeout =
2031		mvm->trans->trans_cfg->base_params->wd_timeout;
2032	struct iwl_trans_txq_scd_cfg cfg = {
2033		.fifo = fifo,
2034		.sta_id = sta_id,
2035		.tid = IWL_MAX_TID_COUNT,
2036		.aggregate = false,
2037		.frame_limit = IWL_FRAME_LIMIT,
2038	};
2039
2040	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2041
2042	iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2043}
2044
2045static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2046{
2047	unsigned int wdg_timeout =
2048		mvm->trans->trans_cfg->base_params->wd_timeout;
2049
2050	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2051
2052	return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2053				       wdg_timeout);
2054}
2055
2056static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2057					  int maccolor, u8 *addr,
2058					  struct iwl_mvm_int_sta *sta,
2059					  u16 *queue, int fifo)
2060{
2061	int ret;
2062
2063	/* Map queue to fifo - needs to happen before adding station */
2064	if (!iwl_mvm_has_new_tx_api(mvm))
2065		iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2066
2067	ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2068	if (ret) {
2069		if (!iwl_mvm_has_new_tx_api(mvm))
2070			iwl_mvm_disable_txq(mvm, NULL, queue,
2071					    IWL_MAX_TID_COUNT, 0);
2072		return ret;
2073	}
2074
2075	/*
2076	 * For 22000 firmware and on we cannot add queue to a station unknown
2077	 * to firmware so enable queue here - after the station was added
2078	 */
2079	if (iwl_mvm_has_new_tx_api(mvm)) {
2080		int txq;
2081
2082		txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2083		if (txq < 0) {
2084			iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2085			return txq;
2086		}
2087
2088		*queue = txq;
2089	}
2090
2091	return 0;
2092}
2093
2094int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2095{
2096	int ret;
2097
2098	lockdep_assert_held(&mvm->mutex);
2099
2100	/* Allocate aux station and assign to it the aux queue */
2101	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2102				       NL80211_IFTYPE_UNSPECIFIED,
2103				       IWL_STA_AUX_ACTIVITY);
2104	if (ret)
2105		return ret;
2106
2107	/*
2108	 * In CDB NICs we need to specify which lmac to use for aux activity
2109	 * using the mac_id argument place to send lmac_id to the function
2110	 */
2111	ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2112					     &mvm->aux_sta, &mvm->aux_queue,
2113					     IWL_MVM_TX_FIFO_MCAST);
2114	if (ret) {
2115		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2116		return ret;
2117	}
2118
2119	return 0;
2120}
2121
2122int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2123{
2124	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2125
2126	lockdep_assert_held(&mvm->mutex);
2127
2128	return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2129					      NULL, &mvm->snif_sta,
2130					      &mvm->snif_queue,
2131					      IWL_MVM_TX_FIFO_BE);
2132}
2133
2134int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2135{
2136	int ret;
2137
2138	lockdep_assert_held(&mvm->mutex);
2139
2140	if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2141		return -EINVAL;
2142
2143	iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2144	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2145	if (ret)
2146		IWL_WARN(mvm, "Failed sending remove station\n");
2147
2148	return ret;
2149}
2150
2151int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2152{
2153	int ret;
2154
2155	lockdep_assert_held(&mvm->mutex);
2156
2157	if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2158		return -EINVAL;
2159
2160	iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
2161	ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2162	if (ret)
2163		IWL_WARN(mvm, "Failed sending remove station\n");
2164	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2165
2166	return ret;
2167}
2168
2169void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2170{
2171	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2172}
2173
2174/*
2175 * Send the add station command for the vif's broadcast station.
2176 * Assumes that the station was already allocated.
2177 *
2178 * @mvm: the mvm component
2179 * @vif: the interface to which the broadcast station is added
2180 * @bsta: the broadcast station to add.
2181 */
2182int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2183{
2184	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2185	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2186	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2187	const u8 *baddr = _baddr;
2188	int queue;
2189	int ret;
2190	unsigned int wdg_timeout =
2191		iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2192	struct iwl_trans_txq_scd_cfg cfg = {
2193		.fifo = IWL_MVM_TX_FIFO_VO,
2194		.sta_id = mvmvif->bcast_sta.sta_id,
2195		.tid = IWL_MAX_TID_COUNT,
2196		.aggregate = false,
2197		.frame_limit = IWL_FRAME_LIMIT,
2198	};
2199
2200	lockdep_assert_held(&mvm->mutex);
2201
2202	if (!iwl_mvm_has_new_tx_api(mvm)) {
2203		if (vif->type == NL80211_IFTYPE_AP ||
2204		    vif->type == NL80211_IFTYPE_ADHOC) {
2205			queue = mvm->probe_queue;
2206		} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2207			queue = mvm->p2p_dev_queue;
2208		} else {
2209			WARN(1, "Missing required TXQ for adding bcast STA\n");
2210			return -EINVAL;
2211		}
2212
2213		bsta->tfd_queue_msk |= BIT(queue);
2214
2215		iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2216	}
2217
2218	if (vif->type == NL80211_IFTYPE_ADHOC)
2219		baddr = vif->bss_conf.bssid;
2220
2221	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2222		return -ENOSPC;
2223
2224	ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2225					 mvmvif->id, mvmvif->color);
2226	if (ret)
2227		return ret;
2228
2229	/*
2230	 * For 22000 firmware and on we cannot add queue to a station unknown
2231	 * to firmware so enable queue here - after the station was added
2232	 */
2233	if (iwl_mvm_has_new_tx_api(mvm)) {
2234		queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2235						IWL_MAX_TID_COUNT,
2236						wdg_timeout);
2237		if (queue < 0) {
2238			iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2239			return queue;
2240		}
2241
2242		if (vif->type == NL80211_IFTYPE_AP ||
2243		    vif->type == NL80211_IFTYPE_ADHOC)
2244			mvm->probe_queue = queue;
2245		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2246			mvm->p2p_dev_queue = queue;
2247	}
2248
2249	return 0;
2250}
2251
2252static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2253					  struct ieee80211_vif *vif)
2254{
2255	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2256	u16 *queueptr, queue;
2257
2258	lockdep_assert_held(&mvm->mutex);
2259
2260	iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2261
2262	switch (vif->type) {
2263	case NL80211_IFTYPE_AP:
2264	case NL80211_IFTYPE_ADHOC:
2265		queueptr = &mvm->probe_queue;
2266		break;
2267	case NL80211_IFTYPE_P2P_DEVICE:
2268		queueptr = &mvm->p2p_dev_queue;
2269		break;
2270	default:
2271		WARN(1, "Can't free bcast queue on vif type %d\n",
2272		     vif->type);
2273		return;
2274	}
2275
2276	queue = *queueptr;
2277	iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
2278	if (iwl_mvm_has_new_tx_api(mvm))
2279		return;
2280
2281	WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2282	mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2283}
2284
2285/* Send the FW a request to remove the station from it's internal data
2286 * structures, but DO NOT remove the entry from the local data structures. */
2287int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2288{
2289	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2290	int ret;
2291
2292	lockdep_assert_held(&mvm->mutex);
2293
2294	iwl_mvm_free_bcast_sta_queues(mvm, vif);
2295
2296	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2297	if (ret)
2298		IWL_WARN(mvm, "Failed sending remove station\n");
2299	return ret;
2300}
2301
2302int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2303{
2304	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2305
2306	lockdep_assert_held(&mvm->mutex);
2307
2308	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2309					ieee80211_vif_type_p2p(vif),
2310					IWL_STA_GENERAL_PURPOSE);
2311}
2312
2313/* Allocate a new station entry for the broadcast station to the given vif,
2314 * and send it to the FW.
2315 * Note that each P2P mac should have its own broadcast station.
2316 *
2317 * @mvm: the mvm component
2318 * @vif: the interface to which the broadcast station is added
2319 * @bsta: the broadcast station to add. */
2320int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2321{
2322	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2323	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2324	int ret;
2325
2326	lockdep_assert_held(&mvm->mutex);
2327
2328	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2329	if (ret)
2330		return ret;
2331
2332	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2333
2334	if (ret)
2335		iwl_mvm_dealloc_int_sta(mvm, bsta);
2336
2337	return ret;
2338}
2339
2340void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2341{
2342	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2343
2344	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2345}
2346
2347/*
2348 * Send the FW a request to remove the station from it's internal data
2349 * structures, and in addition remove it from the local data structure.
2350 */
2351int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2352{
2353	int ret;
2354
2355	lockdep_assert_held(&mvm->mutex);
2356
2357	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2358
2359	iwl_mvm_dealloc_bcast_sta(mvm, vif);
2360
2361	return ret;
2362}
2363
2364/*
2365 * Allocate a new station entry for the multicast station to the given vif,
2366 * and send it to the FW.
2367 * Note that each AP/GO mac should have its own multicast station.
2368 *
2369 * @mvm: the mvm component
2370 * @vif: the interface to which the multicast station is added
2371 */
2372int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2373{
2374	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2375	struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2376	static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2377	const u8 *maddr = _maddr;
2378	struct iwl_trans_txq_scd_cfg cfg = {
2379		.fifo = vif->type == NL80211_IFTYPE_AP ?
2380			IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2381		.sta_id = msta->sta_id,
2382		.tid = 0,
2383		.aggregate = false,
2384		.frame_limit = IWL_FRAME_LIMIT,
2385	};
2386	unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2387	int ret;
2388
2389	lockdep_assert_held(&mvm->mutex);
2390
2391	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2392		    vif->type != NL80211_IFTYPE_ADHOC))
2393		return -ENOTSUPP;
2394
2395	/*
2396	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2397	 * invalid, so make sure we use the queue we want.
2398	 * Note that this is done here as we want to avoid making DQA
2399	 * changes in mac80211 layer.
2400	 */
2401	if (vif->type == NL80211_IFTYPE_ADHOC)
2402		mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2403
2404	/*
2405	 * While in previous FWs we had to exclude cab queue from TFD queue
2406	 * mask, now it is needed as any other queue.
2407	 */
2408	if (!iwl_mvm_has_new_tx_api(mvm) &&
2409	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2410		iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2411				   timeout);
2412		msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2413	}
2414	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2415					 mvmvif->id, mvmvif->color);
2416	if (ret)
2417		goto err;
2418
2419	/*
2420	 * Enable cab queue after the ADD_STA command is sent.
2421	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2422	 * command with unknown station id, and for FW that doesn't support
2423	 * station API since the cab queue is not included in the
2424	 * tfd_queue_mask.
2425	 */
2426	if (iwl_mvm_has_new_tx_api(mvm)) {
2427		int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2428						    0,
2429						    timeout);
2430		if (queue < 0) {
2431			ret = queue;
2432			goto err;
2433		}
2434		mvmvif->cab_queue = queue;
2435	} else if (!fw_has_api(&mvm->fw->ucode_capa,
2436			       IWL_UCODE_TLV_API_STA_TYPE))
2437		iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2438				   timeout);
2439
2440	return 0;
2441err:
2442	iwl_mvm_dealloc_int_sta(mvm, msta);
2443	return ret;
2444}
2445
2446static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2447				    struct ieee80211_key_conf *keyconf,
2448				    bool mcast)
2449{
2450	union {
2451		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2452		struct iwl_mvm_add_sta_key_cmd cmd;
2453	} u = {};
2454	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2455				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2456	__le16 key_flags;
2457	int ret, size;
2458	u32 status;
2459
2460	/* This is a valid situation for GTK removal */
2461	if (sta_id == IWL_MVM_INVALID_STA)
2462		return 0;
2463
2464	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2465				 STA_KEY_FLG_KEYID_MSK);
2466	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2467	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2468
2469	if (mcast)
2470		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2471
2472	/*
2473	 * The fields assigned here are in the same location at the start
2474	 * of the command, so we can do this union trick.
2475	 */
2476	u.cmd.common.key_flags = key_flags;
2477	u.cmd.common.key_offset = keyconf->hw_key_idx;
2478	u.cmd.common.sta_id = sta_id;
2479
2480	size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2481
2482	status = ADD_STA_SUCCESS;
2483	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2484					  &status);
2485
2486	switch (status) {
2487	case ADD_STA_SUCCESS:
2488		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2489		break;
2490	default:
2491		ret = -EIO;
2492		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2493		break;
2494	}
2495
2496	return ret;
2497}
2498
2499/*
2500 * Send the FW a request to remove the station from it's internal data
2501 * structures, and in addition remove it from the local data structure.
2502 */
2503int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2504{
2505	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2506	int ret;
2507
2508	lockdep_assert_held(&mvm->mutex);
2509
2510	iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2511
2512	iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
2513
2514	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2515	if (ret)
2516		IWL_WARN(mvm, "Failed sending remove station\n");
2517
2518	return ret;
2519}
2520
2521#define IWL_MAX_RX_BA_SESSIONS 16
2522
2523static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2524{
2525	struct iwl_mvm_rss_sync_notif notif = {
2526		.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2527		.metadata.sync = 1,
2528		.delba.baid = baid,
2529	};
2530	iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2531};
2532
2533static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2534				 struct iwl_mvm_baid_data *data)
2535{
2536	int i;
2537
2538	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2539
2540	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2541		int j;
2542		struct iwl_mvm_reorder_buffer *reorder_buf =
2543			&data->reorder_buf[i];
2544		struct iwl_mvm_reorder_buf_entry *entries =
2545			&data->entries[i * data->entries_per_queue];
2546
2547		spin_lock_bh(&reorder_buf->lock);
2548		if (likely(!reorder_buf->num_stored)) {
2549			spin_unlock_bh(&reorder_buf->lock);
2550			continue;
2551		}
2552
2553		/*
2554		 * This shouldn't happen in regular DELBA since the internal
2555		 * delBA notification should trigger a release of all frames in
2556		 * the reorder buffer.
2557		 */
2558		WARN_ON(1);
2559
2560		for (j = 0; j < reorder_buf->buf_size; j++)
2561			__skb_queue_purge(&entries[j].e.frames);
2562		/*
2563		 * Prevent timer re-arm. This prevents a very far fetched case
2564		 * where we timed out on the notification. There may be prior
2565		 * RX frames pending in the RX queue before the notification
2566		 * that might get processed between now and the actual deletion
2567		 * and we would re-arm the timer although we are deleting the
2568		 * reorder buffer.
2569		 */
2570		reorder_buf->removed = true;
2571		spin_unlock_bh(&reorder_buf->lock);
2572		del_timer_sync(&reorder_buf->reorder_timer);
2573	}
2574}
2575
2576static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2577					struct iwl_mvm_baid_data *data,
2578					u16 ssn, u16 buf_size)
2579{
2580	int i;
2581
2582	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2583		struct iwl_mvm_reorder_buffer *reorder_buf =
2584			&data->reorder_buf[i];
2585		struct iwl_mvm_reorder_buf_entry *entries =
2586			&data->entries[i * data->entries_per_queue];
2587		int j;
2588
2589		reorder_buf->num_stored = 0;
2590		reorder_buf->head_sn = ssn;
2591		reorder_buf->buf_size = buf_size;
2592		/* rx reorder timer */
2593		timer_setup(&reorder_buf->reorder_timer,
2594			    iwl_mvm_reorder_timer_expired, 0);
2595		spin_lock_init(&reorder_buf->lock);
2596		reorder_buf->mvm = mvm;
2597		reorder_buf->queue = i;
2598		reorder_buf->valid = false;
2599		for (j = 0; j < reorder_buf->buf_size; j++)
2600			__skb_queue_head_init(&entries[j].e.frames);
2601	}
2602}
2603
2604int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2605		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2606{
2607	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2608	struct iwl_mvm_add_sta_cmd cmd = {};
2609	struct iwl_mvm_baid_data *baid_data = NULL;
2610	int ret;
2611	u32 status;
2612
2613	lockdep_assert_held(&mvm->mutex);
2614
2615	if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2616		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2617		return -ENOSPC;
2618	}
2619
2620	if (iwl_mvm_has_new_rx_api(mvm) && start) {
2621		u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2622
2623		/* sparse doesn't like the __align() so don't check */
2624#ifndef __CHECKER__
2625		/*
2626		 * The division below will be OK if either the cache line size
2627		 * can be divided by the entry size (ALIGN will round up) or if
2628		 * if the entry size can be divided by the cache line size, in
2629		 * which case the ALIGN() will do nothing.
2630		 */
2631		BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2632			     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2633#endif
2634
2635		/*
2636		 * Upward align the reorder buffer size to fill an entire cache
2637		 * line for each queue, to avoid sharing cache lines between
2638		 * different queues.
2639		 */
2640		reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2641
2642		/*
2643		 * Allocate here so if allocation fails we can bail out early
2644		 * before starting the BA session in the firmware
2645		 */
2646		baid_data = kzalloc(sizeof(*baid_data) +
2647				    mvm->trans->num_rx_queues *
2648				    reorder_buf_size,
2649				    GFP_KERNEL);
2650		if (!baid_data)
2651			return -ENOMEM;
2652
2653		/*
2654		 * This division is why we need the above BUILD_BUG_ON(),
2655		 * if that doesn't hold then this will not be right.
2656		 */
2657		baid_data->entries_per_queue =
2658			reorder_buf_size / sizeof(baid_data->entries[0]);
2659	}
2660
2661	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2662	cmd.sta_id = mvm_sta->sta_id;
2663	cmd.add_modify = STA_MODE_MODIFY;
2664	if (start) {
2665		cmd.add_immediate_ba_tid = (u8) tid;
2666		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2667		cmd.rx_ba_window = cpu_to_le16(buf_size);
2668	} else {
2669		cmd.remove_immediate_ba_tid = (u8) tid;
2670	}
2671	cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2672				  STA_MODIFY_REMOVE_BA_TID;
2673
2674	status = ADD_STA_SUCCESS;
2675	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2676					  iwl_mvm_add_sta_cmd_size(mvm),
2677					  &cmd, &status);
2678	if (ret)
2679		goto out_free;
2680
2681	switch (status & IWL_ADD_STA_STATUS_MASK) {
2682	case ADD_STA_SUCCESS:
2683		IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2684			     start ? "start" : "stopp");
2685		break;
2686	case ADD_STA_IMMEDIATE_BA_FAILURE:
2687		IWL_WARN(mvm, "RX BA Session refused by fw\n");
2688		ret = -ENOSPC;
2689		break;
2690	default:
2691		ret = -EIO;
2692		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2693			start ? "start" : "stopp", status);
2694		break;
2695	}
2696
2697	if (ret)
2698		goto out_free;
2699
2700	if (start) {
2701		u8 baid;
2702
2703		mvm->rx_ba_sessions++;
2704
2705		if (!iwl_mvm_has_new_rx_api(mvm))
2706			return 0;
2707
2708		if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2709			ret = -EINVAL;
2710			goto out_free;
2711		}
2712		baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2713			    IWL_ADD_STA_BAID_SHIFT);
2714		baid_data->baid = baid;
2715		baid_data->timeout = timeout;
2716		baid_data->last_rx = jiffies;
2717		baid_data->rcu_ptr = &mvm->baid_map[baid];
2718		timer_setup(&baid_data->session_timer,
2719			    iwl_mvm_rx_agg_session_expired, 0);
2720		baid_data->mvm = mvm;
2721		baid_data->tid = tid;
2722		baid_data->sta_id = mvm_sta->sta_id;
2723
2724		mvm_sta->tid_to_baid[tid] = baid;
2725		if (timeout)
2726			mod_timer(&baid_data->session_timer,
2727				  TU_TO_EXP_TIME(timeout * 2));
2728
2729		iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2730		/*
2731		 * protect the BA data with RCU to cover a case where our
2732		 * internal RX sync mechanism will timeout (not that it's
2733		 * supposed to happen) and we will free the session data while
2734		 * RX is being processed in parallel
2735		 */
2736		IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2737			     mvm_sta->sta_id, tid, baid);
2738		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2739		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2740	} else  {
2741		u8 baid = mvm_sta->tid_to_baid[tid];
2742
2743		if (mvm->rx_ba_sessions > 0)
2744			/* check that restart flow didn't zero the counter */
2745			mvm->rx_ba_sessions--;
2746		if (!iwl_mvm_has_new_rx_api(mvm))
2747			return 0;
2748
2749		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2750			return -EINVAL;
2751
2752		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2753		if (WARN_ON(!baid_data))
2754			return -EINVAL;
2755
2756		/* synchronize all rx queues so we can safely delete */
2757		iwl_mvm_free_reorder(mvm, baid_data);
2758		del_timer_sync(&baid_data->session_timer);
2759		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2760		kfree_rcu(baid_data, rcu_head);
2761		IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2762	}
2763	return 0;
2764
2765out_free:
2766	kfree(baid_data);
2767	return ret;
2768}
2769
2770int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2771		       int tid, u8 queue, bool start)
2772{
2773	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2774	struct iwl_mvm_add_sta_cmd cmd = {};
2775	int ret;
2776	u32 status;
2777
2778	lockdep_assert_held(&mvm->mutex);
2779
2780	if (start) {
2781		mvm_sta->tfd_queue_msk |= BIT(queue);
2782		mvm_sta->tid_disable_agg &= ~BIT(tid);
2783	} else {
2784		/* In DQA-mode the queue isn't removed on agg termination */
2785		mvm_sta->tid_disable_agg |= BIT(tid);
2786	}
2787
2788	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2789	cmd.sta_id = mvm_sta->sta_id;
2790	cmd.add_modify = STA_MODE_MODIFY;
2791	if (!iwl_mvm_has_new_tx_api(mvm))
2792		cmd.modify_mask = STA_MODIFY_QUEUES;
2793	cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2794	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2795	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2796
2797	status = ADD_STA_SUCCESS;
2798	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2799					  iwl_mvm_add_sta_cmd_size(mvm),
2800					  &cmd, &status);
2801	if (ret)
2802		return ret;
2803
2804	switch (status & IWL_ADD_STA_STATUS_MASK) {
2805	case ADD_STA_SUCCESS:
2806		break;
2807	default:
2808		ret = -EIO;
2809		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2810			start ? "start" : "stopp", status);
2811		break;
2812	}
2813
2814	return ret;
2815}
2816
2817const u8 tid_to_mac80211_ac[] = {
2818	IEEE80211_AC_BE,
2819	IEEE80211_AC_BK,
2820	IEEE80211_AC_BK,
2821	IEEE80211_AC_BE,
2822	IEEE80211_AC_VI,
2823	IEEE80211_AC_VI,
2824	IEEE80211_AC_VO,
2825	IEEE80211_AC_VO,
2826	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2827};
2828
2829static const u8 tid_to_ucode_ac[] = {
2830	AC_BE,
2831	AC_BK,
2832	AC_BK,
2833	AC_BE,
2834	AC_VI,
2835	AC_VI,
2836	AC_VO,
2837	AC_VO,
2838};
2839
2840int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2841			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2842{
2843	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2844	struct iwl_mvm_tid_data *tid_data;
2845	u16 normalized_ssn;
2846	u16 txq_id;
2847	int ret;
2848
2849	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2850		return -EINVAL;
2851
2852	if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2853	    mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2854		IWL_ERR(mvm,
2855			"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2856			mvmsta->tid_data[tid].state);
2857		return -ENXIO;
2858	}
2859
2860	lockdep_assert_held(&mvm->mutex);
2861
2862	if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2863	    iwl_mvm_has_new_tx_api(mvm)) {
2864		u8 ac = tid_to_mac80211_ac[tid];
2865
2866		ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2867		if (ret)
2868			return ret;
2869	}
2870
2871	spin_lock_bh(&mvmsta->lock);
2872
2873	/*
2874	 * Note the possible cases:
2875	 *  1. An enabled TXQ - TXQ needs to become agg'ed
2876	 *  2. The TXQ hasn't yet been enabled, so find a free one and mark
2877	 *	it as reserved
2878	 */
2879	txq_id = mvmsta->tid_data[tid].txq_id;
2880	if (txq_id == IWL_MVM_INVALID_QUEUE) {
2881		ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2882					      IWL_MVM_DQA_MIN_DATA_QUEUE,
2883					      IWL_MVM_DQA_MAX_DATA_QUEUE);
2884		if (ret < 0) {
2885			IWL_ERR(mvm, "Failed to allocate agg queue\n");
2886			goto out;
2887		}
2888
2889		txq_id = ret;
2890
2891		/* TXQ hasn't yet been enabled, so mark it only as reserved */
2892		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2893	} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2894		ret = -ENXIO;
2895		IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2896			tid, IWL_MAX_HW_QUEUES - 1);
2897		goto out;
2898
2899	} else if (unlikely(mvm->queue_info[txq_id].status ==
2900			    IWL_MVM_QUEUE_SHARED)) {
2901		ret = -ENXIO;
2902		IWL_DEBUG_TX_QUEUES(mvm,
2903				    "Can't start tid %d agg on shared queue!\n",
2904				    tid);
2905		goto out;
2906	}
2907
2908	IWL_DEBUG_TX_QUEUES(mvm,
2909			    "AGG for tid %d will be on queue #%d\n",
2910			    tid, txq_id);
2911
2912	tid_data = &mvmsta->tid_data[tid];
2913	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2914	tid_data->txq_id = txq_id;
2915	*ssn = tid_data->ssn;
2916
2917	IWL_DEBUG_TX_QUEUES(mvm,
2918			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2919			    mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2920			    tid_data->next_reclaimed);
2921
2922	/*
2923	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2924	 * to align the wrap around of ssn so we compare relevant values.
2925	 */
2926	normalized_ssn = tid_data->ssn;
2927	if (mvm->trans->trans_cfg->gen2)
2928		normalized_ssn &= 0xff;
2929
2930	if (normalized_ssn == tid_data->next_reclaimed) {
2931		tid_data->state = IWL_AGG_STARTING;
2932		ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
2933	} else {
2934		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2935		ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
2936	}
2937
2938out:
2939	spin_unlock_bh(&mvmsta->lock);
2940
2941	return ret;
2942}
2943
2944int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2945			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2946			    bool amsdu)
2947{
2948	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2949	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2950	unsigned int wdg_timeout =
2951		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2952	int queue, ret;
2953	bool alloc_queue = true;
2954	enum iwl_mvm_queue_status queue_status;
2955	u16 ssn;
2956
2957	struct iwl_trans_txq_scd_cfg cfg = {
2958		.sta_id = mvmsta->sta_id,
2959		.tid = tid,
2960		.frame_limit = buf_size,
2961		.aggregate = true,
2962	};
2963
2964	/*
2965	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2966	 * manager, so this function should never be called in this case.
2967	 */
2968	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2969		return -EINVAL;
2970
2971	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2972		     != IWL_MAX_TID_COUNT);
2973
2974	spin_lock_bh(&mvmsta->lock);
2975	ssn = tid_data->ssn;
2976	queue = tid_data->txq_id;
2977	tid_data->state = IWL_AGG_ON;
2978	mvmsta->agg_tids |= BIT(tid);
2979	tid_data->ssn = 0xffff;
2980	tid_data->amsdu_in_ampdu_allowed = amsdu;
2981	spin_unlock_bh(&mvmsta->lock);
2982
2983	if (iwl_mvm_has_new_tx_api(mvm)) {
2984		/*
2985		 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2986		 * would have failed, so if we are here there is no need to
2987		 * allocate a queue.
2988		 * However, if aggregation size is different than the default
2989		 * size, the scheduler should be reconfigured.
2990		 * We cannot do this with the new TX API, so return unsupported
2991		 * for now, until it will be offloaded to firmware..
2992		 * Note that if SCD default value changes - this condition
2993		 * should be updated as well.
2994		 */
2995		if (buf_size < IWL_FRAME_LIMIT)
2996			return -ENOTSUPP;
2997
2998		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2999		if (ret)
3000			return -EIO;
3001		goto out;
3002	}
3003
3004	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3005
3006	queue_status = mvm->queue_info[queue].status;
3007
3008	/* Maybe there is no need to even alloc a queue... */
3009	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3010		alloc_queue = false;
3011
3012	/*
3013	 * Only reconfig the SCD for the queue if the window size has
3014	 * changed from current (become smaller)
3015	 */
3016	if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3017		/*
3018		 * If reconfiguring an existing queue, it first must be
3019		 * drained
3020		 */
3021		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3022						     BIT(queue));
3023		if (ret) {
3024			IWL_ERR(mvm,
3025				"Error draining queue before reconfig\n");
3026			return ret;
3027		}
3028
3029		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3030					   mvmsta->sta_id, tid,
3031					   buf_size, ssn);
3032		if (ret) {
3033			IWL_ERR(mvm,
3034				"Error reconfiguring TXQ #%d\n", queue);
3035			return ret;
3036		}
3037	}
3038
3039	if (alloc_queue)
3040		iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3041				   &cfg, wdg_timeout);
3042
3043	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
3044	if (queue_status != IWL_MVM_QUEUE_SHARED) {
3045		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3046		if (ret)
3047			return -EIO;
3048	}
3049
3050	/* No need to mark as reserved */
3051	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3052
3053out:
3054	/*
3055	 * Even though in theory the peer could have different
3056	 * aggregation reorder buffer sizes for different sessions,
3057	 * our ucode doesn't allow for that and has a global limit
3058	 * for each station. Therefore, use the minimum of all the
3059	 * aggregation sessions and our default value.
3060	 */
3061	mvmsta->max_agg_bufsize =
3062		min(mvmsta->max_agg_bufsize, buf_size);
3063	mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3064
3065	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3066		     sta->addr, tid);
3067
3068	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3069}
3070
3071static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3072					struct iwl_mvm_sta *mvmsta,
3073					struct iwl_mvm_tid_data *tid_data)
3074{
3075	u16 txq_id = tid_data->txq_id;
3076
3077	lockdep_assert_held(&mvm->mutex);
3078
3079	if (iwl_mvm_has_new_tx_api(mvm))
3080		return;
3081
3082	/*
3083	 * The TXQ is marked as reserved only if no traffic came through yet
3084	 * This means no traffic has been sent on this TID (agg'd or not), so
3085	 * we no longer have use for the queue. Since it hasn't even been
3086	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3087	 * free.
3088	 */
3089	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3090		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3091		tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3092	}
3093}
3094
3095int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3096			    struct ieee80211_sta *sta, u16 tid)
3097{
3098	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3099	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3100	u16 txq_id;
3101	int err;
3102
3103	/*
3104	 * If mac80211 is cleaning its state, then say that we finished since
3105	 * our state has been cleared anyway.
3106	 */
3107	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3108		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3109		return 0;
3110	}
3111
3112	spin_lock_bh(&mvmsta->lock);
3113
3114	txq_id = tid_data->txq_id;
3115
3116	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3117			    mvmsta->sta_id, tid, txq_id, tid_data->state);
3118
3119	mvmsta->agg_tids &= ~BIT(tid);
3120
3121	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3122
3123	switch (tid_data->state) {
3124	case IWL_AGG_ON:
3125		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3126
3127		IWL_DEBUG_TX_QUEUES(mvm,
3128				    "ssn = %d, next_recl = %d\n",
3129				    tid_data->ssn, tid_data->next_reclaimed);
3130
3131		tid_data->ssn = 0xffff;
3132		tid_data->state = IWL_AGG_OFF;
3133		spin_unlock_bh(&mvmsta->lock);
3134
3135		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3136
3137		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3138		return 0;
3139	case IWL_AGG_STARTING:
3140	case IWL_EMPTYING_HW_QUEUE_ADDBA:
3141		/*
3142		 * The agg session has been stopped before it was set up. This
3143		 * can happen when the AddBA timer times out for example.
3144		 */
3145
3146		/* No barriers since we are under mutex */
3147		lockdep_assert_held(&mvm->mutex);
3148
3149		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3150		tid_data->state = IWL_AGG_OFF;
3151		err = 0;
3152		break;
3153	default:
3154		IWL_ERR(mvm,
3155			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3156			mvmsta->sta_id, tid, tid_data->state);
3157		IWL_ERR(mvm,
3158			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
3159		err = -EINVAL;
3160	}
3161
3162	spin_unlock_bh(&mvmsta->lock);
3163
3164	return err;
3165}
3166
3167int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3168			    struct ieee80211_sta *sta, u16 tid)
3169{
3170	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3171	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3172	u16 txq_id;
3173	enum iwl_mvm_agg_state old_state;
3174
3175	/*
3176	 * First set the agg state to OFF to avoid calling
3177	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3178	 */
3179	spin_lock_bh(&mvmsta->lock);
3180	txq_id = tid_data->txq_id;
3181	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3182			    mvmsta->sta_id, tid, txq_id, tid_data->state);
3183	old_state = tid_data->state;
3184	tid_data->state = IWL_AGG_OFF;
3185	mvmsta->agg_tids &= ~BIT(tid);
3186	spin_unlock_bh(&mvmsta->lock);
3187
3188	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3189
3190	if (old_state >= IWL_AGG_ON) {
3191		iwl_mvm_drain_sta(mvm, mvmsta, true);
3192
3193		if (iwl_mvm_has_new_tx_api(mvm)) {
3194			if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3195						   BIT(tid), 0))
3196				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3197			iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3198		} else {
3199			if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3200				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3201			iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3202		}
3203
3204		iwl_mvm_drain_sta(mvm, mvmsta, false);
3205
3206		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3207	}
3208
3209	return 0;
3210}
3211
3212static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3213{
3214	int i, max = -1, max_offs = -1;
3215
3216	lockdep_assert_held(&mvm->mutex);
3217
3218	/* Pick the unused key offset with the highest 'deleted'
3219	 * counter. Every time a key is deleted, all the counters
3220	 * are incremented and the one that was just deleted is
3221	 * reset to zero. Thus, the highest counter is the one
3222	 * that was deleted longest ago. Pick that one.
3223	 */
3224	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3225		if (test_bit(i, mvm->fw_key_table))
3226			continue;
3227		if (mvm->fw_key_deleted[i] > max) {
3228			max = mvm->fw_key_deleted[i];
3229			max_offs = i;
3230		}
3231	}
3232
3233	if (max_offs < 0)
3234		return STA_KEY_IDX_INVALID;
3235
3236	return max_offs;
3237}
3238
3239static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3240					       struct ieee80211_vif *vif,
3241					       struct ieee80211_sta *sta)
3242{
3243	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3244
3245	if (sta)
3246		return iwl_mvm_sta_from_mac80211(sta);
3247
3248	/*
3249	 * The device expects GTKs for station interfaces to be
3250	 * installed as GTKs for the AP station. If we have no
3251	 * station ID, then use AP's station ID.
3252	 */
3253	if (vif->type == NL80211_IFTYPE_STATION &&
3254	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3255		u8 sta_id = mvmvif->ap_sta_id;
3256
3257		sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3258					    lockdep_is_held(&mvm->mutex));
3259
3260		/*
3261		 * It is possible that the 'sta' parameter is NULL,
3262		 * for example when a GTK is removed - the sta_id will then
3263		 * be the AP ID, and no station was passed by mac80211.
3264		 */
3265		if (IS_ERR_OR_NULL(sta))
3266			return NULL;
3267
3268		return iwl_mvm_sta_from_mac80211(sta);
3269	}
3270
3271	return NULL;
3272}
3273
3274static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3275				u32 sta_id,
3276				struct ieee80211_key_conf *key, bool mcast,
3277				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3278				u8 key_offset, bool mfp)
3279{
3280	union {
3281		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3282		struct iwl_mvm_add_sta_key_cmd cmd;
3283	} u = {};
3284	__le16 key_flags;
3285	int ret;
3286	u32 status;
3287	u16 keyidx;
3288	u64 pn = 0;
3289	int i, size;
3290	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3291				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3292
3293	if (sta_id == IWL_MVM_INVALID_STA)
3294		return -EINVAL;
3295
3296	keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3297		 STA_KEY_FLG_KEYID_MSK;
3298	key_flags = cpu_to_le16(keyidx);
3299	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3300
3301	switch (key->cipher) {
3302	case WLAN_CIPHER_SUITE_TKIP:
3303		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3304		if (new_api) {
3305			memcpy((void *)&u.cmd.tx_mic_key,
3306			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3307			       IWL_MIC_KEY_SIZE);
3308
3309			memcpy((void *)&u.cmd.rx_mic_key,
3310			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3311			       IWL_MIC_KEY_SIZE);
3312			pn = atomic64_read(&key->tx_pn);
3313
3314		} else {
3315			u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3316			for (i = 0; i < 5; i++)
3317				u.cmd_v1.tkip_rx_ttak[i] =
3318					cpu_to_le16(tkip_p1k[i]);
3319		}
3320		memcpy(u.cmd.common.key, key->key, key->keylen);
3321		break;
3322	case WLAN_CIPHER_SUITE_CCMP:
3323		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3324		memcpy(u.cmd.common.key, key->key, key->keylen);
3325		if (new_api)
3326			pn = atomic64_read(&key->tx_pn);
3327		break;
3328	case WLAN_CIPHER_SUITE_WEP104:
3329		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3330		/* fall through */
3331	case WLAN_CIPHER_SUITE_WEP40:
3332		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3333		memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3334		break;
3335	case WLAN_CIPHER_SUITE_GCMP_256:
3336		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3337		/* fall through */
3338	case WLAN_CIPHER_SUITE_GCMP:
3339		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3340		memcpy(u.cmd.common.key, key->key, key->keylen);
3341		if (new_api)
3342			pn = atomic64_read(&key->tx_pn);
3343		break;
3344	default:
3345		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3346		memcpy(u.cmd.common.key, key->key, key->keylen);
3347	}
3348
3349	if (mcast)
3350		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3351	if (mfp)
3352		key_flags |= cpu_to_le16(STA_KEY_MFP);
3353
3354	u.cmd.common.key_offset = key_offset;
3355	u.cmd.common.key_flags = key_flags;
3356	u.cmd.common.sta_id = sta_id;
3357
3358	if (new_api) {
3359		u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3360		size = sizeof(u.cmd);
3361	} else {
3362		size = sizeof(u.cmd_v1);
3363	}
3364
3365	status = ADD_STA_SUCCESS;
3366	if (cmd_flags & CMD_ASYNC)
3367		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3368					   &u.cmd);
3369	else
3370		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3371						  &u.cmd, &status);
3372
3373	switch (status) {
3374	case ADD_STA_SUCCESS:
3375		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3376		break;
3377	default:
3378		ret = -EIO;
3379		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3380		break;
3381	}
3382
3383	return ret;
3384}
3385
3386static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3387				 struct ieee80211_key_conf *keyconf,
3388				 u8 sta_id, bool remove_key)
3389{
3390	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3391
3392	/* verify the key details match the required command's expectations */
3393	if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3394		    (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3395		    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3396		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3397		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3398		return -EINVAL;
3399
3400	if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3401		    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3402		return -EINVAL;
3403
3404	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3405	igtk_cmd.sta_id = cpu_to_le32(sta_id);
3406
3407	if (remove_key) {
3408		/* This is a valid situation for IGTK */
3409		if (sta_id == IWL_MVM_INVALID_STA)
3410			return 0;
3411
3412		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3413	} else {
3414		struct ieee80211_key_seq seq;
3415		const u8 *pn;
3416
3417		switch (keyconf->cipher) {
3418		case WLAN_CIPHER_SUITE_AES_CMAC:
3419			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3420			break;
3421		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3422		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3423			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3424			break;
3425		default:
3426			return -EINVAL;
3427		}
3428
3429		memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3430		if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3431			igtk_cmd.ctrl_flags |=
3432				cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3433		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3434		pn = seq.aes_cmac.pn;
3435		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3436						       ((u64) pn[4] << 8) |
3437						       ((u64) pn[3] << 16) |
3438						       ((u64) pn[2] << 24) |
3439						       ((u64) pn[1] << 32) |
3440						       ((u64) pn[0] << 40));
3441	}
3442
3443	IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3444		       remove_key ? "removing" : "installing",
3445		       igtk_cmd.sta_id);
3446
3447	if (!iwl_mvm_has_new_rx_api(mvm)) {
3448		struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3449			.ctrl_flags = igtk_cmd.ctrl_flags,
3450			.key_id = igtk_cmd.key_id,
3451			.sta_id = igtk_cmd.sta_id,
3452			.receive_seq_cnt = igtk_cmd.receive_seq_cnt
3453		};
3454
3455		memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3456		       ARRAY_SIZE(igtk_cmd_v1.igtk));
3457		return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3458					    sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3459	}
3460	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3461				    sizeof(igtk_cmd), &igtk_cmd);
3462}
3463
3464
3465static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3466				       struct ieee80211_vif *vif,
3467				       struct ieee80211_sta *sta)
3468{
3469	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3470
3471	if (sta)
3472		return sta->addr;
3473
3474	if (vif->type == NL80211_IFTYPE_STATION &&
3475	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3476		u8 sta_id = mvmvif->ap_sta_id;
3477		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3478						lockdep_is_held(&mvm->mutex));
3479		return sta->addr;
3480	}
3481
3482
3483	return NULL;
3484}
3485
3486static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3487				 struct ieee80211_vif *vif,
3488				 struct ieee80211_sta *sta,
3489				 struct ieee80211_key_conf *keyconf,
3490				 u8 key_offset,
3491				 bool mcast)
3492{
3493	int ret;
3494	const u8 *addr;
3495	struct ieee80211_key_seq seq;
3496	u16 p1k[5];
3497	u32 sta_id;
3498	bool mfp = false;
3499
3500	if (sta) {
3501		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3502
3503		sta_id = mvm_sta->sta_id;
3504		mfp = sta->mfp;
3505	} else if (vif->type == NL80211_IFTYPE_AP &&
3506		   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3507		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3508
3509		sta_id = mvmvif->mcast_sta.sta_id;
3510	} else {
3511		IWL_ERR(mvm, "Failed to find station id\n");
3512		return -EINVAL;
3513	}
3514
3515	switch (keyconf->cipher) {
3516	case WLAN_CIPHER_SUITE_TKIP:
3517		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3518		/* get phase 1 key from mac80211 */
3519		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3520		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3521		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3522					   seq.tkip.iv32, p1k, 0, key_offset,
3523					   mfp);
3524		break;
3525	case WLAN_CIPHER_SUITE_CCMP:
3526	case WLAN_CIPHER_SUITE_WEP40:
3527	case WLAN_CIPHER_SUITE_WEP104:
3528	case WLAN_CIPHER_SUITE_GCMP:
3529	case WLAN_CIPHER_SUITE_GCMP_256:
3530		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3531					   0, NULL, 0, key_offset, mfp);
3532		break;
3533	default:
3534		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3535					   0, NULL, 0, key_offset, mfp);
3536	}
3537
3538	return ret;
3539}
3540
3541int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3542			struct ieee80211_vif *vif,
3543			struct ieee80211_sta *sta,
3544			struct ieee80211_key_conf *keyconf,
3545			u8 key_offset)
3546{
3547	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3548	struct iwl_mvm_sta *mvm_sta;
3549	u8 sta_id = IWL_MVM_INVALID_STA;
3550	int ret;
3551	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3552
3553	lockdep_assert_held(&mvm->mutex);
3554
3555	if (vif->type != NL80211_IFTYPE_AP ||
3556	    keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3557		/* Get the station id from the mvm local station table */
3558		mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3559		if (!mvm_sta) {
3560			IWL_ERR(mvm, "Failed to find station\n");
3561			return -EINVAL;
3562		}
3563		sta_id = mvm_sta->sta_id;
3564
3565		/*
3566		 * It is possible that the 'sta' parameter is NULL, and thus
3567		 * there is a need to retrieve the sta from the local station
3568		 * table.
3569		 */
3570		if (!sta) {
3571			sta = rcu_dereference_protected(
3572				mvm->fw_id_to_mac_id[sta_id],
3573				lockdep_is_held(&mvm->mutex));
3574			if (IS_ERR_OR_NULL(sta)) {
3575				IWL_ERR(mvm, "Invalid station id\n");
3576				return -EINVAL;
3577			}
3578		}
3579
3580		if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3581			return -EINVAL;
3582	} else {
3583		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3584
3585		sta_id = mvmvif->mcast_sta.sta_id;
3586	}
3587
3588	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3589	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3590	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3591		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3592		goto end;
3593	}
3594
3595	/* If the key_offset is not pre-assigned, we need to find a
3596	 * new offset to use.  In normal cases, the offset is not
3597	 * pre-assigned, but during HW_RESTART we want to reuse the
3598	 * same indices, so we pass them when this function is called.
3599	 *
3600	 * In D3 entry, we need to hardcoded the indices (because the
3601	 * firmware hardcodes the PTK offset to 0).  In this case, we
3602	 * need to make sure we don't overwrite the hw_key_idx in the
3603	 * keyconf structure, because otherwise we cannot configure
3604	 * the original ones back when resuming.
3605	 */
3606	if (key_offset == STA_KEY_IDX_INVALID) {
3607		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3608		if (key_offset == STA_KEY_IDX_INVALID)
3609			return -ENOSPC;
3610		keyconf->hw_key_idx = key_offset;
3611	}
3612
3613	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3614	if (ret)
3615		goto end;
3616
3617	/*
3618	 * For WEP, the same key is used for multicast and unicast. Upload it
3619	 * again, using the same key offset, and now pointing the other one
3620	 * to the same key slot (offset).
3621	 * If this fails, remove the original as well.
3622	 */
3623	if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3624	     keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3625	    sta) {
3626		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3627					    key_offset, !mcast);
3628		if (ret) {
3629			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3630			goto end;
3631		}
3632	}
3633
3634	__set_bit(key_offset, mvm->fw_key_table);
3635
3636end:
3637	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3638		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3639		      sta ? sta->addr : zero_addr, ret);
3640	return ret;
3641}
3642
3643int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3644			   struct ieee80211_vif *vif,
3645			   struct ieee80211_sta *sta,
3646			   struct ieee80211_key_conf *keyconf)
3647{
3648	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3649	struct iwl_mvm_sta *mvm_sta;
3650	u8 sta_id = IWL_MVM_INVALID_STA;
3651	int ret, i;
3652
3653	lockdep_assert_held(&mvm->mutex);
3654
3655	/* Get the station from the mvm local station table */
3656	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3657	if (mvm_sta)
3658		sta_id = mvm_sta->sta_id;
3659	else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3660		sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3661
3662
3663	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3664		      keyconf->keyidx, sta_id);
3665
3666	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3667	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3668	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3669		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3670
3671	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3672		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3673			keyconf->hw_key_idx);
3674		return -ENOENT;
3675	}
3676
3677	/* track which key was deleted last */
3678	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3679		if (mvm->fw_key_deleted[i] < U8_MAX)
3680			mvm->fw_key_deleted[i]++;
3681	}
3682	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3683
3684	if (sta && !mvm_sta) {
3685		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3686		return 0;
3687	}
3688
3689	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3690	if (ret)
3691		return ret;
3692
3693	/* delete WEP key twice to get rid of (now useless) offset */
3694	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3695	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3696		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3697
3698	return ret;
3699}
3700
3701void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3702			     struct ieee80211_vif *vif,
3703			     struct ieee80211_key_conf *keyconf,
3704			     struct ieee80211_sta *sta, u32 iv32,
3705			     u16 *phase1key)
3706{
3707	struct iwl_mvm_sta *mvm_sta;
3708	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3709	bool mfp = sta ? sta->mfp : false;
3710
3711	rcu_read_lock();
3712
3713	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3714	if (WARN_ON_ONCE(!mvm_sta))
3715		goto unlock;
3716	iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3717			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3718			     mfp);
3719
3720 unlock:
3721	rcu_read_unlock();
3722}
3723
3724void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3725				struct ieee80211_sta *sta)
3726{
3727	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3728	struct iwl_mvm_add_sta_cmd cmd = {
3729		.add_modify = STA_MODE_MODIFY,
3730		.sta_id = mvmsta->sta_id,
3731		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
3732		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3733	};
3734	int ret;
3735
3736	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3737				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3738	if (ret)
3739		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3740}
3741
3742void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3743				       struct ieee80211_sta *sta,
3744				       enum ieee80211_frame_release_type reason,
3745				       u16 cnt, u16 tids, bool more_data,
3746				       bool single_sta_queue)
3747{
3748	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3749	struct iwl_mvm_add_sta_cmd cmd = {
3750		.add_modify = STA_MODE_MODIFY,
3751		.sta_id = mvmsta->sta_id,
3752		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3753		.sleep_tx_count = cpu_to_le16(cnt),
3754		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3755	};
3756	int tid, ret;
3757	unsigned long _tids = tids;
3758
3759	/* convert TIDs to ACs - we don't support TSPEC so that's OK
3760	 * Note that this field is reserved and unused by firmware not
3761	 * supporting GO uAPSD, so it's safe to always do this.
3762	 */
3763	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3764		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3765
3766	/* If we're releasing frames from aggregation or dqa queues then check
3767	 * if all the queues that we're releasing frames from, combined, have:
3768	 *  - more frames than the service period, in which case more_data
3769	 *    needs to be set
3770	 *  - fewer than 'cnt' frames, in which case we need to adjust the
3771	 *    firmware command (but do that unconditionally)
3772	 */
3773	if (single_sta_queue) {
3774		int remaining = cnt;
3775		int sleep_tx_count;
3776
3777		spin_lock_bh(&mvmsta->lock);
3778		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3779			struct iwl_mvm_tid_data *tid_data;
3780			u16 n_queued;
3781
3782			tid_data = &mvmsta->tid_data[tid];
3783
3784			n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3785			if (n_queued > remaining) {
3786				more_data = true;
3787				remaining = 0;
3788				break;
3789			}
3790			remaining -= n_queued;
3791		}
3792		sleep_tx_count = cnt - remaining;
3793		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3794			mvmsta->sleep_tx_count = sleep_tx_count;
3795		spin_unlock_bh(&mvmsta->lock);
3796
3797		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3798		if (WARN_ON(cnt - remaining == 0)) {
3799			ieee80211_sta_eosp(sta);
3800			return;
3801		}
3802	}
3803
3804	/* Note: this is ignored by firmware not supporting GO uAPSD */
3805	if (more_data)
3806		cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3807
3808	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3809		mvmsta->next_status_eosp = true;
3810		cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3811	} else {
3812		cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3813	}
3814
3815	/* block the Tx queues until the FW updated the sleep Tx count */
3816	iwl_trans_block_txq_ptrs(mvm->trans, true);
3817
3818	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3819				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3820				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3821	if (ret)
3822		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3823}
3824
3825void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3826			   struct iwl_rx_cmd_buffer *rxb)
3827{
3828	struct iwl_rx_packet *pkt = rxb_addr(rxb);
3829	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3830	struct ieee80211_sta *sta;
3831	u32 sta_id = le32_to_cpu(notif->sta_id);
3832
3833	if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3834		return;
3835
3836	rcu_read_lock();
3837	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3838	if (!IS_ERR_OR_NULL(sta))
3839		ieee80211_sta_eosp(sta);
3840	rcu_read_unlock();
3841}
3842
3843void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3844				   struct iwl_mvm_sta *mvmsta, bool disable)
3845{
3846	struct iwl_mvm_add_sta_cmd cmd = {
3847		.add_modify = STA_MODE_MODIFY,
3848		.sta_id = mvmsta->sta_id,
3849		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3850		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3851		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3852	};
3853	int ret;
3854
3855	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3856				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3857	if (ret)
3858		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3859}
3860
3861void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3862				      struct ieee80211_sta *sta,
3863				      bool disable)
3864{
3865	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3866
3867	spin_lock_bh(&mvm_sta->lock);
3868
3869	if (mvm_sta->disable_tx == disable) {
3870		spin_unlock_bh(&mvm_sta->lock);
3871		return;
3872	}
3873
3874	mvm_sta->disable_tx = disable;
3875
3876	/* Tell mac80211 to start/stop queuing tx for this station */
3877	ieee80211_sta_block_awake(mvm->hw, sta, disable);
3878
3879	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3880
3881	spin_unlock_bh(&mvm_sta->lock);
3882}
3883
3884static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3885					      struct iwl_mvm_vif *mvmvif,
3886					      struct iwl_mvm_int_sta *sta,
3887					      bool disable)
3888{
3889	u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3890	struct iwl_mvm_add_sta_cmd cmd = {
3891		.add_modify = STA_MODE_MODIFY,
3892		.sta_id = sta->sta_id,
3893		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3894		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3895		.mac_id_n_color = cpu_to_le32(id),
3896	};
3897	int ret;
3898
3899	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3900				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3901	if (ret)
3902		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3903}
3904
3905void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3906				       struct iwl_mvm_vif *mvmvif,
3907				       bool disable)
3908{
3909	struct ieee80211_sta *sta;
3910	struct iwl_mvm_sta *mvm_sta;
3911	int i;
3912
3913	lockdep_assert_held(&mvm->mutex);
3914
3915	/* Block/unblock all the stations of the given mvmvif */
3916	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
3917		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3918						lockdep_is_held(&mvm->mutex));
3919		if (IS_ERR_OR_NULL(sta))
3920			continue;
3921
3922		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3923		if (mvm_sta->mac_id_n_color !=
3924		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3925			continue;
3926
3927		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3928	}
3929
3930	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3931		return;
3932
3933	/* Need to block/unblock also multicast station */
3934	if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3935		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3936						  &mvmvif->mcast_sta, disable);
3937
3938	/*
3939	 * Only unblock the broadcast station (FW blocks it for immediate
3940	 * quiet, not the driver)
3941	 */
3942	if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3943		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3944						  &mvmvif->bcast_sta, disable);
3945}
3946
3947void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3948{
3949	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3950	struct iwl_mvm_sta *mvmsta;
3951
3952	rcu_read_lock();
3953
3954	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3955
3956	if (!WARN_ON(!mvmsta))
3957		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3958
3959	rcu_read_unlock();
3960}
3961
3962u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3963{
3964	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3965
3966	/*
3967	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3968	 * to align the wrap around of ssn so we compare relevant values.
3969	 */
3970	if (mvm->trans->trans_cfg->gen2)
3971		sn &= 0xff;
3972
3973	return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3974}
3975
3976int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3977			 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
3978			 u8 *key, u32 key_len)
3979{
3980	int ret;
3981	u16 queue;
3982	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3983	struct ieee80211_key_conf *keyconf;
3984
3985	ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
3986				       NL80211_IFTYPE_UNSPECIFIED,
3987				       IWL_STA_LINK);
3988	if (ret)
3989		return ret;
3990
3991	ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
3992					     addr, sta, &queue,
3993					     IWL_MVM_TX_FIFO_BE);
3994	if (ret)
3995		goto out;
3996
3997	keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
3998	if (!keyconf) {
3999		ret = -ENOBUFS;
4000		goto out;
4001	}
4002
4003	keyconf->cipher = cipher;
4004	memcpy(keyconf->key, key, key_len);
4005	keyconf->keylen = key_len;
4006
4007	ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4008				   0, NULL, 0, 0, true);
4009	kfree(keyconf);
4010	return 0;
4011out:
4012	iwl_mvm_dealloc_int_sta(mvm, sta);
4013	return ret;
4014}
4015