1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2012-2015, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7#include <net/mac80211.h>
8
9#include "mvm.h"
10#include "sta.h"
11#include "rs.h"
12
13/*
14 * New version of ADD_STA_sta command added new fields at the end of the
15 * structure, so sending the size of the relevant API's structure is enough to
16 * support both API versions.
17 */
18static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
19{
20	if (iwl_mvm_has_new_rx_api(mvm) ||
21	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
22		return sizeof(struct iwl_mvm_add_sta_cmd);
23	else
24		return sizeof(struct iwl_mvm_add_sta_cmd_v7);
25}
26
27int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
28{
29	int sta_id;
30	u32 reserved_ids = 0;
31
32	BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
33	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
34
35	lockdep_assert_held(&mvm->mutex);
36
37	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
38	if (iftype != NL80211_IFTYPE_STATION)
39		reserved_ids = BIT(0);
40
41	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
42	for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
43		if (BIT(sta_id) & reserved_ids)
44			continue;
45
46		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
47					       lockdep_is_held(&mvm->mutex)))
48			return sta_id;
49	}
50	return IWL_MVM_INVALID_STA;
51}
52
53/* Calculate the ampdu density and max size */
54u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
55			       struct ieee80211_bss_conf *link_conf,
56			       u32 *_agg_size)
57{
58	u32 agg_size = 0, mpdu_dens = 0;
59
60	if (WARN_ON(!link_sta))
61		return 0;
62
63	/* Note that we always use only legacy & highest supported PPDUs, so
64	 * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
65	 * the maximum A-MPDU size of various PPDU types in different bands,
66	 * we only need to worry about the highest supported PPDU type here.
67	 */
68
69	if (link_sta->ht_cap.ht_supported) {
70		agg_size = link_sta->ht_cap.ampdu_factor;
71		mpdu_dens = link_sta->ht_cap.ampdu_density;
72	}
73
74	if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
75		/* overwrite HT values on 6 GHz */
76		mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
77					  IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
78		agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa,
79					 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
80	} else if (link_sta->vht_cap.vht_supported) {
81		/* if VHT supported overwrite HT value */
82		agg_size = u32_get_bits(link_sta->vht_cap.cap,
83					IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
84	}
85
86	/* D6.0 10.12.2 A-MPDU length limit rules
87	 * A STA indicates the maximum length of the A-MPDU preEOF padding
88	 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
89	 * Exponent field in its HT Capabilities, VHT Capabilities,
90	 * and HE 6 GHz Band Capabilities elements (if present) and the
91	 * Maximum AMPDU Length Exponent Extension field in its HE
92	 * Capabilities element
93	 */
94	if (link_sta->he_cap.has_he)
95		agg_size +=
96			u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
97				    IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
98
99	if (link_sta->eht_cap.has_eht)
100		agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
101					IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
102
103	/* Limit to max A-MPDU supported by FW */
104	agg_size = min_t(u32, agg_size,
105			 STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
106
107	*_agg_size = agg_size;
108	return mpdu_dens;
109}
110
111u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta)
112{
113	u8 uapsd_acs = 0;
114
115	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
116		uapsd_acs |= BIT(AC_BK);
117	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
118		uapsd_acs |= BIT(AC_BE);
119	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
120		uapsd_acs |= BIT(AC_VI);
121	if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
122		uapsd_acs |= BIT(AC_VO);
123
124	return uapsd_acs | uapsd_acs << 4;
125}
126
127/* send station add/update command to firmware */
128int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
129			   bool update, unsigned int flags)
130{
131	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
132	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
133		.sta_id = mvm_sta->deflink.sta_id,
134		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
135		.add_modify = update ? 1 : 0,
136		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
137						 STA_FLG_MIMO_EN_MSK |
138						 STA_FLG_RTS_MIMO_PROT),
139		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
140	};
141	int ret;
142	u32 status;
143	u32 agg_size = 0, mpdu_dens = 0;
144
145	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
146		add_sta_cmd.station_type = mvm_sta->sta_type;
147
148	if (!update || (flags & STA_MODIFY_QUEUES)) {
149		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
150
151		if (!iwl_mvm_has_new_tx_api(mvm)) {
152			add_sta_cmd.tfd_queue_msk =
153				cpu_to_le32(mvm_sta->tfd_queue_msk);
154
155			if (flags & STA_MODIFY_QUEUES)
156				add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
157		} else {
158			WARN_ON(flags & STA_MODIFY_QUEUES);
159		}
160	}
161
162	switch (sta->deflink.bandwidth) {
163	case IEEE80211_STA_RX_BW_320:
164	case IEEE80211_STA_RX_BW_160:
165		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
166		fallthrough;
167	case IEEE80211_STA_RX_BW_80:
168		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
169		fallthrough;
170	case IEEE80211_STA_RX_BW_40:
171		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
172		fallthrough;
173	case IEEE80211_STA_RX_BW_20:
174		if (sta->deflink.ht_cap.ht_supported)
175			add_sta_cmd.station_flags |=
176				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
177		break;
178	}
179
180	switch (sta->deflink.rx_nss) {
181	case 1:
182		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
183		break;
184	case 2:
185		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
186		break;
187	case 3 ... 8:
188		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
189		break;
190	}
191
192	switch (sta->deflink.smps_mode) {
193	case IEEE80211_SMPS_AUTOMATIC:
194	case IEEE80211_SMPS_NUM_MODES:
195		WARN_ON(1);
196		break;
197	case IEEE80211_SMPS_STATIC:
198		/* override NSS */
199		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
200		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
201		break;
202	case IEEE80211_SMPS_DYNAMIC:
203		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
204		break;
205	case IEEE80211_SMPS_OFF:
206		/* nothing */
207		break;
208	}
209
210	if (sta->deflink.ht_cap.ht_supported ||
211	    mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
212		add_sta_cmd.station_flags_msk |=
213			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
214				    STA_FLG_AGG_MPDU_DENS_MSK);
215
216	mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink,
217					       &mvm_sta->vif->bss_conf,
218					       &agg_size);
219	add_sta_cmd.station_flags |=
220		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
221	add_sta_cmd.station_flags |=
222		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
223
224	if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
225		add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
226
227	if (sta->wme) {
228		add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
229		add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta);
230		add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
231	}
232
233	status = ADD_STA_SUCCESS;
234	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
235					  iwl_mvm_add_sta_cmd_size(mvm),
236					  &add_sta_cmd, &status);
237	if (ret)
238		return ret;
239
240	switch (status & IWL_ADD_STA_STATUS_MASK) {
241	case ADD_STA_SUCCESS:
242		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
243		break;
244	default:
245		ret = -EIO;
246		IWL_ERR(mvm, "ADD_STA failed\n");
247		break;
248	}
249
250	return ret;
251}
252
253static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
254{
255	struct iwl_mvm_baid_data *data =
256		from_timer(data, t, session_timer);
257	struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
258	struct iwl_mvm_baid_data *ba_data;
259	struct ieee80211_sta *sta;
260	struct iwl_mvm_sta *mvm_sta;
261	unsigned long timeout;
262	unsigned int sta_id;
263
264	rcu_read_lock();
265
266	ba_data = rcu_dereference(*rcu_ptr);
267
268	if (WARN_ON(!ba_data))
269		goto unlock;
270
271	if (!ba_data->timeout)
272		goto unlock;
273
274	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
275	if (time_is_after_jiffies(timeout)) {
276		mod_timer(&ba_data->session_timer, timeout);
277		goto unlock;
278	}
279
280	/* Timer expired */
281	sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */
282	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]);
283
284	/*
285	 * sta should be valid unless the following happens:
286	 * The firmware asserts which triggers a reconfig flow, but
287	 * the reconfig fails before we set the pointer to sta into
288	 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
289	 * A-MDPU and hence the timer continues to run. Then, the
290	 * timer expires and sta is NULL.
291	 */
292	if (IS_ERR_OR_NULL(sta))
293		goto unlock;
294
295	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
296	ieee80211_rx_ba_timer_expired(mvm_sta->vif,
297				      sta->addr, ba_data->tid);
298unlock:
299	rcu_read_unlock();
300}
301
302/* Disable aggregations for a bitmap of TIDs for a given station */
303static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
304					unsigned long disable_agg_tids,
305					bool remove_queue)
306{
307	struct iwl_mvm_add_sta_cmd cmd = {};
308	struct ieee80211_sta *sta;
309	struct iwl_mvm_sta *mvmsta;
310	u32 status;
311	u8 sta_id;
312
313	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
314		return -EINVAL;
315
316	sta_id = mvm->queue_info[queue].ra_sta_id;
317
318	rcu_read_lock();
319
320	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
321
322	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
323		rcu_read_unlock();
324		return -EINVAL;
325	}
326
327	mvmsta = iwl_mvm_sta_from_mac80211(sta);
328
329	mvmsta->tid_disable_agg |= disable_agg_tids;
330
331	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
332	cmd.sta_id = mvmsta->deflink.sta_id;
333	cmd.add_modify = STA_MODE_MODIFY;
334	cmd.modify_mask = STA_MODIFY_QUEUES;
335	if (disable_agg_tids)
336		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
337	if (remove_queue)
338		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
339	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
340	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
341
342	rcu_read_unlock();
343
344	/* Notify FW of queue removal from the STA queues */
345	status = ADD_STA_SUCCESS;
346	return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
347					   iwl_mvm_add_sta_cmd_size(mvm),
348					   &cmd, &status);
349}
350
351static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
352			       int sta_id, u16 *queueptr, u8 tid)
353{
354	int queue = *queueptr;
355	struct iwl_scd_txq_cfg_cmd cmd = {
356		.scd_queue = queue,
357		.action = SCD_CFG_DISABLE_QUEUE,
358	};
359	int ret;
360
361	lockdep_assert_held(&mvm->mutex);
362
363	if (iwl_mvm_has_new_tx_api(mvm)) {
364		if (mvm->sta_remove_requires_queue_remove) {
365			u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
366					     SCD_QUEUE_CONFIG_CMD);
367			struct iwl_scd_queue_cfg_cmd remove_cmd = {
368				.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
369				.u.remove.sta_mask = cpu_to_le32(BIT(sta_id)),
370			};
371
372			if (tid == IWL_MAX_TID_COUNT)
373				tid = IWL_MGMT_TID;
374
375			remove_cmd.u.remove.tid = cpu_to_le32(tid);
376
377			ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
378						   sizeof(remove_cmd),
379						   &remove_cmd);
380		} else {
381			ret = 0;
382		}
383
384		iwl_trans_txq_free(mvm->trans, queue);
385		*queueptr = IWL_MVM_INVALID_QUEUE;
386
387		return ret;
388	}
389
390	if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
391		return 0;
392
393	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
394
395	cmd.action = mvm->queue_info[queue].tid_bitmap ?
396		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
397	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
398		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
399
400	IWL_DEBUG_TX_QUEUES(mvm,
401			    "Disabling TXQ #%d tids=0x%x\n",
402			    queue,
403			    mvm->queue_info[queue].tid_bitmap);
404
405	/* If the queue is still enabled - nothing left to do in this func */
406	if (cmd.action == SCD_CFG_ENABLE_QUEUE)
407		return 0;
408
409	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
410	cmd.tid = mvm->queue_info[queue].txq_tid;
411
412	/* Make sure queue info is correct even though we overwrite it */
413	WARN(mvm->queue_info[queue].tid_bitmap,
414	     "TXQ #%d info out-of-sync - tids=0x%x\n",
415	     queue, mvm->queue_info[queue].tid_bitmap);
416
417	/* If we are here - the queue is freed and we can zero out these vals */
418	mvm->queue_info[queue].tid_bitmap = 0;
419
420	if (sta) {
421		struct iwl_mvm_txq *mvmtxq =
422			iwl_mvm_txq_from_tid(sta, tid);
423
424		spin_lock_bh(&mvm->add_stream_lock);
425		list_del_init(&mvmtxq->list);
426		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
427		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
428		spin_unlock_bh(&mvm->add_stream_lock);
429	}
430
431	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
432	mvm->queue_info[queue].reserved = false;
433
434	iwl_trans_txq_disable(mvm->trans, queue, false);
435	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
436				   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
437
438	if (ret)
439		IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
440			queue, ret);
441	return ret;
442}
443
444static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
445{
446	struct ieee80211_sta *sta;
447	struct iwl_mvm_sta *mvmsta;
448	unsigned long tid_bitmap;
449	unsigned long agg_tids = 0;
450	u8 sta_id;
451	int tid;
452
453	lockdep_assert_held(&mvm->mutex);
454
455	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
456		return -EINVAL;
457
458	sta_id = mvm->queue_info[queue].ra_sta_id;
459	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
460
461	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
462					lockdep_is_held(&mvm->mutex));
463
464	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
465		return -EINVAL;
466
467	mvmsta = iwl_mvm_sta_from_mac80211(sta);
468
469	spin_lock_bh(&mvmsta->lock);
470	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
471		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
472			agg_tids |= BIT(tid);
473	}
474	spin_unlock_bh(&mvmsta->lock);
475
476	return agg_tids;
477}
478
479/*
480 * Remove a queue from a station's resources.
481 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
482 * doesn't disable the queue
483 */
484static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
485{
486	struct ieee80211_sta *sta;
487	struct iwl_mvm_sta *mvmsta;
488	unsigned long tid_bitmap;
489	unsigned long disable_agg_tids = 0;
490	u8 sta_id;
491	int tid;
492
493	lockdep_assert_held(&mvm->mutex);
494
495	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
496		return -EINVAL;
497
498	sta_id = mvm->queue_info[queue].ra_sta_id;
499	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
500
501	rcu_read_lock();
502
503	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
504
505	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
506		rcu_read_unlock();
507		return 0;
508	}
509
510	mvmsta = iwl_mvm_sta_from_mac80211(sta);
511
512	spin_lock_bh(&mvmsta->lock);
513	/* Unmap MAC queues and TIDs from this queue */
514	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
515		struct iwl_mvm_txq *mvmtxq =
516			iwl_mvm_txq_from_tid(sta, tid);
517
518		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
519			disable_agg_tids |= BIT(tid);
520		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
521
522		spin_lock_bh(&mvm->add_stream_lock);
523		list_del_init(&mvmtxq->list);
524		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
525		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
526		spin_unlock_bh(&mvm->add_stream_lock);
527	}
528
529	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
530	spin_unlock_bh(&mvmsta->lock);
531
532	rcu_read_unlock();
533
534	/*
535	 * The TX path may have been using this TXQ_ID from the tid_data,
536	 * so make sure it's no longer running so that we can safely reuse
537	 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
538	 * above, but nothing guarantees we've stopped using them. Thus,
539	 * without this, we could get to iwl_mvm_disable_txq() and remove
540	 * the queue while still sending frames to it.
541	 */
542	synchronize_net();
543
544	return disable_agg_tids;
545}
546
547static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
548				       struct ieee80211_sta *old_sta,
549				       u8 new_sta_id)
550{
551	struct iwl_mvm_sta *mvmsta;
552	u8 sta_id, tid;
553	unsigned long disable_agg_tids = 0;
554	bool same_sta;
555	u16 queue_tmp = queue;
556	int ret;
557
558	lockdep_assert_held(&mvm->mutex);
559
560	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
561		return -EINVAL;
562
563	sta_id = mvm->queue_info[queue].ra_sta_id;
564	tid = mvm->queue_info[queue].txq_tid;
565
566	same_sta = sta_id == new_sta_id;
567
568	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
569	if (WARN_ON(!mvmsta))
570		return -EINVAL;
571
572	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
573	/* Disable the queue */
574	if (disable_agg_tids)
575		iwl_mvm_invalidate_sta_queue(mvm, queue,
576					     disable_agg_tids, false);
577
578	ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);
579	if (ret) {
580		IWL_ERR(mvm,
581			"Failed to free inactive queue %d (ret=%d)\n",
582			queue, ret);
583
584		return ret;
585	}
586
587	/* If TXQ is allocated to another STA, update removal in FW */
588	if (!same_sta)
589		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
590
591	return 0;
592}
593
594static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
595				    unsigned long tfd_queue_mask, u8 ac)
596{
597	int queue = 0;
598	u8 ac_to_queue[IEEE80211_NUM_ACS];
599	int i;
600
601	/*
602	 * This protects us against grabbing a queue that's being reconfigured
603	 * by the inactivity checker.
604	 */
605	lockdep_assert_held(&mvm->mutex);
606
607	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
608		return -EINVAL;
609
610	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
611
612	/* See what ACs the existing queues for this STA have */
613	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
614		/* Only DATA queues can be shared */
615		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
616		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
617			continue;
618
619		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
620	}
621
622	/*
623	 * The queue to share is chosen only from DATA queues as follows (in
624	 * descending priority):
625	 * 1. An AC_BE queue
626	 * 2. Same AC queue
627	 * 3. Highest AC queue that is lower than new AC
628	 * 4. Any existing AC (there always is at least 1 DATA queue)
629	 */
630
631	/* Priority 1: An AC_BE queue */
632	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
633		queue = ac_to_queue[IEEE80211_AC_BE];
634	/* Priority 2: Same AC queue */
635	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
636		queue = ac_to_queue[ac];
637	/* Priority 3a: If new AC is VO and VI exists - use VI */
638	else if (ac == IEEE80211_AC_VO &&
639		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
640		queue = ac_to_queue[IEEE80211_AC_VI];
641	/* Priority 3b: No BE so only AC less than the new one is BK */
642	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
643		queue = ac_to_queue[IEEE80211_AC_BK];
644	/* Priority 4a: No BE nor BK - use VI if exists */
645	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
646		queue = ac_to_queue[IEEE80211_AC_VI];
647	/* Priority 4b: No BE, BK nor VI - use VO if exists */
648	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
649		queue = ac_to_queue[IEEE80211_AC_VO];
650
651	/* Make sure queue found (or not) is legal */
652	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
653	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
654	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
655		IWL_ERR(mvm, "No DATA queues available to share\n");
656		return -ENOSPC;
657	}
658
659	return queue;
660}
661
662/* Re-configure the SCD for a queue that has already been configured */
663static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
664				int sta_id, int tid, int frame_limit, u16 ssn)
665{
666	struct iwl_scd_txq_cfg_cmd cmd = {
667		.scd_queue = queue,
668		.action = SCD_CFG_ENABLE_QUEUE,
669		.window = frame_limit,
670		.sta_id = sta_id,
671		.ssn = cpu_to_le16(ssn),
672		.tx_fifo = fifo,
673		.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
674			      queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
675		.tid = tid,
676	};
677	int ret;
678
679	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
680		return -EINVAL;
681
682	if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
683		 "Trying to reconfig unallocated queue %d\n", queue))
684		return -ENXIO;
685
686	IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
687
688	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
689	WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
690		  queue, fifo, ret);
691
692	return ret;
693}
694
695/*
696 * If a given queue has a higher AC than the TID stream that is being compared
697 * to, the queue needs to be redirected to the lower AC. This function does that
698 * in such a case, otherwise - if no redirection required - it does nothing,
699 * unless the %force param is true.
700 */
701static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
702				  int ac, int ssn, unsigned int wdg_timeout,
703				  bool force, struct iwl_mvm_txq *txq)
704{
705	struct iwl_scd_txq_cfg_cmd cmd = {
706		.scd_queue = queue,
707		.action = SCD_CFG_DISABLE_QUEUE,
708	};
709	bool shared_queue;
710	int ret;
711
712	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
713		return -EINVAL;
714
715	/*
716	 * If the AC is lower than current one - FIFO needs to be redirected to
717	 * the lowest one of the streams in the queue. Check if this is needed
718	 * here.
719	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
720	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
721	 * we need to check if the numerical value of X is LARGER than of Y.
722	 */
723	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
724		IWL_DEBUG_TX_QUEUES(mvm,
725				    "No redirection needed on TXQ #%d\n",
726				    queue);
727		return 0;
728	}
729
730	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
731	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
732	cmd.tid = mvm->queue_info[queue].txq_tid;
733	shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
734
735	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
736			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
737
738	/* Stop the queue and wait for it to empty */
739	set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
740
741	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
742	if (ret) {
743		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
744			queue);
745		ret = -EIO;
746		goto out;
747	}
748
749	/* Before redirecting the queue we need to de-activate it */
750	iwl_trans_txq_disable(mvm->trans, queue, false);
751	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
752	if (ret)
753		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
754			ret);
755
756	/* Make sure the SCD wrptr is correctly set before reconfiguring */
757	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
758
759	/* Update the TID "owner" of the queue */
760	mvm->queue_info[queue].txq_tid = tid;
761
762	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
763
764	/* Redirect to lower AC */
765	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
766			     cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
767
768	/* Update AC marking of the queue */
769	mvm->queue_info[queue].mac80211_ac = ac;
770
771	/*
772	 * Mark queue as shared in transport if shared
773	 * Note this has to be done after queue enablement because enablement
774	 * can also set this value, and there is no indication there to shared
775	 * queues
776	 */
777	if (shared_queue)
778		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
779
780out:
781	/* Continue using the queue */
782	clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
783
784	return ret;
785}
786
787static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
788				   u8 minq, u8 maxq)
789{
790	int i;
791
792	lockdep_assert_held(&mvm->mutex);
793
794	if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
795		 "max queue %d >= num_of_queues (%d)", maxq,
796		 mvm->trans->trans_cfg->base_params->num_of_queues))
797		maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
798
799	/* This should not be hit with new TX path */
800	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
801		return -ENOSPC;
802
803	/* Start by looking for a free queue */
804	for (i = minq; i <= maxq; i++)
805		if (mvm->queue_info[i].tid_bitmap == 0 &&
806		    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
807			return i;
808
809	return -ENOSPC;
810}
811
812static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta)
813{
814	int max_size = IWL_DEFAULT_QUEUE_SIZE;
815	unsigned int link_id;
816
817	/* this queue isn't used for traffic (cab_queue) */
818	if (!sta)
819		return IWL_MGMT_QUEUE_SIZE;
820
821	rcu_read_lock();
822
823	for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
824		struct ieee80211_link_sta *link =
825			rcu_dereference(sta->link[link_id]);
826
827		if (!link)
828			continue;
829
830		/* support for 1k ba size */
831		if (link->eht_cap.has_eht &&
832		    max_size < IWL_DEFAULT_QUEUE_SIZE_EHT)
833			max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
834
835		/* support for 256 ba size */
836		if (link->he_cap.has_he &&
837		    max_size < IWL_DEFAULT_QUEUE_SIZE_HE)
838			max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
839	}
840
841	rcu_read_unlock();
842	return max_size;
843}
844
845int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
846			    struct ieee80211_sta *sta,
847			    u8 sta_id, u8 tid, unsigned int timeout)
848{
849	int queue, size;
850	u32 sta_mask = 0;
851
852	if (tid == IWL_MAX_TID_COUNT) {
853		tid = IWL_MGMT_TID;
854		size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
855			     mvm->trans->cfg->min_txq_size);
856	} else {
857		size = iwl_mvm_get_queue_size(sta);
858	}
859
860	/* take the min with bc tbl entries allowed */
861	size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
862
863	/* size needs to be power of 2 values for calculating read/write pointers */
864	size = rounddown_pow_of_two(size);
865
866	if (sta) {
867		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
868		unsigned int link_id;
869
870		for (link_id = 0;
871		     link_id < ARRAY_SIZE(mvmsta->link);
872		     link_id++) {
873			struct iwl_mvm_link_sta *link =
874				rcu_dereference_protected(mvmsta->link[link_id],
875							  lockdep_is_held(&mvm->mutex));
876
877			if (!link)
878				continue;
879
880			sta_mask |= BIT(link->sta_id);
881		}
882	} else {
883		sta_mask |= BIT(sta_id);
884	}
885
886	if (!sta_mask)
887		return -EINVAL;
888
889	do {
890		queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
891					    tid, size, timeout);
892
893		if (queue < 0)
894			IWL_DEBUG_TX_QUEUES(mvm,
895					    "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n",
896					    size, sta_mask, tid, queue);
897		size /= 2;
898	} while (queue < 0 && size >= 16);
899
900	if (queue < 0)
901		return queue;
902
903	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
904			    queue, sta_mask, tid);
905
906	return queue;
907}
908
909static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
910					struct ieee80211_sta *sta, u8 ac,
911					int tid)
912{
913	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
914	struct iwl_mvm_txq *mvmtxq =
915		iwl_mvm_txq_from_tid(sta, tid);
916	unsigned int wdg_timeout =
917		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
918	int queue = -1;
919
920	lockdep_assert_held(&mvm->mutex);
921
922	IWL_DEBUG_TX_QUEUES(mvm,
923			    "Allocating queue for sta %d on tid %d\n",
924			    mvmsta->deflink.sta_id, tid);
925	queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id,
926					tid, wdg_timeout);
927	if (queue < 0)
928		return queue;
929
930	mvmtxq->txq_id = queue;
931	mvm->tvqm_info[queue].txq_tid = tid;
932	mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id;
933
934	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
935
936	spin_lock_bh(&mvmsta->lock);
937	mvmsta->tid_data[tid].txq_id = queue;
938	spin_unlock_bh(&mvmsta->lock);
939
940	return 0;
941}
942
943static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
944				       struct ieee80211_sta *sta,
945				       int queue, u8 sta_id, u8 tid)
946{
947	bool enable_queue = true;
948
949	/* Make sure this TID isn't already enabled */
950	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
951		IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
952			queue, tid);
953		return false;
954	}
955
956	/* Update mappings and refcounts */
957	if (mvm->queue_info[queue].tid_bitmap)
958		enable_queue = false;
959
960	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
961	mvm->queue_info[queue].ra_sta_id = sta_id;
962
963	if (enable_queue) {
964		if (tid != IWL_MAX_TID_COUNT)
965			mvm->queue_info[queue].mac80211_ac =
966				tid_to_mac80211_ac[tid];
967		else
968			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
969
970		mvm->queue_info[queue].txq_tid = tid;
971	}
972
973	if (sta) {
974		struct iwl_mvm_txq *mvmtxq =
975			iwl_mvm_txq_from_tid(sta, tid);
976
977		mvmtxq->txq_id = queue;
978	}
979
980	IWL_DEBUG_TX_QUEUES(mvm,
981			    "Enabling TXQ #%d tids=0x%x\n",
982			    queue, mvm->queue_info[queue].tid_bitmap);
983
984	return enable_queue;
985}
986
987static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
988			       int queue, u16 ssn,
989			       const struct iwl_trans_txq_scd_cfg *cfg,
990			       unsigned int wdg_timeout)
991{
992	struct iwl_scd_txq_cfg_cmd cmd = {
993		.scd_queue = queue,
994		.action = SCD_CFG_ENABLE_QUEUE,
995		.window = cfg->frame_limit,
996		.sta_id = cfg->sta_id,
997		.ssn = cpu_to_le16(ssn),
998		.tx_fifo = cfg->fifo,
999		.aggregate = cfg->aggregate,
1000		.tid = cfg->tid,
1001	};
1002	bool inc_ssn;
1003
1004	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1005		return false;
1006
1007	/* Send the enabling command if we need to */
1008	if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
1009		return false;
1010
1011	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
1012					   NULL, wdg_timeout);
1013	if (inc_ssn)
1014		le16_add_cpu(&cmd.ssn, 1);
1015
1016	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
1017	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
1018
1019	return inc_ssn;
1020}
1021
1022static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
1023{
1024	struct iwl_scd_txq_cfg_cmd cmd = {
1025		.scd_queue = queue,
1026		.action = SCD_CFG_UPDATE_QUEUE_TID,
1027	};
1028	int tid;
1029	unsigned long tid_bitmap;
1030	int ret;
1031
1032	lockdep_assert_held(&mvm->mutex);
1033
1034	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1035		return;
1036
1037	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1038
1039	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
1040		return;
1041
1042	/* Find any TID for queue */
1043	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1044	cmd.tid = tid;
1045	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1046
1047	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
1048	if (ret) {
1049		IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
1050			queue, ret);
1051		return;
1052	}
1053
1054	mvm->queue_info[queue].txq_tid = tid;
1055	IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
1056			    queue, tid);
1057}
1058
1059static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
1060{
1061	struct ieee80211_sta *sta;
1062	struct iwl_mvm_sta *mvmsta;
1063	u8 sta_id;
1064	int tid = -1;
1065	unsigned long tid_bitmap;
1066	unsigned int wdg_timeout;
1067	int ssn;
1068	int ret = true;
1069
1070	/* queue sharing is disabled on new TX path */
1071	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1072		return;
1073
1074	lockdep_assert_held(&mvm->mutex);
1075
1076	sta_id = mvm->queue_info[queue].ra_sta_id;
1077	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1078
1079	/* Find TID for queue, and make sure it is the only one on the queue */
1080	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1081	if (tid_bitmap != BIT(tid)) {
1082		IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1083			queue, tid_bitmap);
1084		return;
1085	}
1086
1087	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1088			    tid);
1089
1090	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1091					lockdep_is_held(&mvm->mutex));
1092
1093	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1094		return;
1095
1096	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1097	wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1098
1099	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1100
1101	ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1102				     tid_to_mac80211_ac[tid], ssn,
1103				     wdg_timeout, true,
1104				     iwl_mvm_txq_from_tid(sta, tid));
1105	if (ret) {
1106		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1107		return;
1108	}
1109
1110	/* If aggs should be turned back on - do it */
1111	if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1112		struct iwl_mvm_add_sta_cmd cmd = {0};
1113
1114		mvmsta->tid_disable_agg &= ~BIT(tid);
1115
1116		cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1117		cmd.sta_id = mvmsta->deflink.sta_id;
1118		cmd.add_modify = STA_MODE_MODIFY;
1119		cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1120		cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1121		cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1122
1123		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1124					   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1125		if (!ret) {
1126			IWL_DEBUG_TX_QUEUES(mvm,
1127					    "TXQ #%d is now aggregated again\n",
1128					    queue);
1129
1130			/* Mark queue intenally as aggregating again */
1131			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1132		}
1133	}
1134
1135	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1136}
1137
1138/*
1139 * Remove inactive TIDs of a given queue.
1140 * If all queue TIDs are inactive - mark the queue as inactive
1141 * If only some the queue TIDs are inactive - unmap them from the queue
1142 *
1143 * Returns %true if all TIDs were removed and the queue could be reused.
1144 */
1145static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1146					 struct iwl_mvm_sta *mvmsta, int queue,
1147					 unsigned long tid_bitmap,
1148					 unsigned long *unshare_queues,
1149					 unsigned long *changetid_queues)
1150{
1151	unsigned int tid;
1152
1153	lockdep_assert_held(&mvmsta->lock);
1154	lockdep_assert_held(&mvm->mutex);
1155
1156	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1157		return false;
1158
1159	/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1160	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1161		/* If some TFDs are still queued - don't mark TID as inactive */
1162		if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1163			tid_bitmap &= ~BIT(tid);
1164
1165		/* Don't mark as inactive any TID that has an active BA */
1166		if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1167			tid_bitmap &= ~BIT(tid);
1168	}
1169
1170	/* If all TIDs in the queue are inactive - return it can be reused */
1171	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1172		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1173		return true;
1174	}
1175
1176	/*
1177	 * If we are here, this is a shared queue and not all TIDs timed-out.
1178	 * Remove the ones that did.
1179	 */
1180	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1181		u16 q_tid_bitmap;
1182
1183		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1184		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1185
1186		q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1187
1188		/*
1189		 * We need to take into account a situation in which a TXQ was
1190		 * allocated to TID x, and then turned shared by adding TIDs y
1191		 * and z. If TID x becomes inactive and is removed from the TXQ,
1192		 * ownership must be given to one of the remaining TIDs.
1193		 * This is mainly because if TID x continues - a new queue can't
1194		 * be allocated for it as long as it is an owner of another TXQ.
1195		 *
1196		 * Mark this queue in the right bitmap, we'll send the command
1197		 * to the firmware later.
1198		 */
1199		if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1200			set_bit(queue, changetid_queues);
1201
1202		IWL_DEBUG_TX_QUEUES(mvm,
1203				    "Removing inactive TID %d from shared Q:%d\n",
1204				    tid, queue);
1205	}
1206
1207	IWL_DEBUG_TX_QUEUES(mvm,
1208			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
1209			    mvm->queue_info[queue].tid_bitmap);
1210
1211	/*
1212	 * There may be different TIDs with the same mac queues, so make
1213	 * sure all TIDs have existing corresponding mac queues enabled
1214	 */
1215	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1216
1217	/* If the queue is marked as shared - "unshare" it */
1218	if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1219	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1220		IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1221				    queue);
1222		set_bit(queue, unshare_queues);
1223	}
1224
1225	return false;
1226}
1227
1228/*
1229 * Check for inactivity - this includes checking if any queue
1230 * can be unshared and finding one (and only one) that can be
1231 * reused.
1232 * This function is also invoked as a sort of clean-up task,
1233 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1234 *
1235 * Returns the queue number, or -ENOSPC.
1236 */
1237static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1238{
1239	unsigned long now = jiffies;
1240	unsigned long unshare_queues = 0;
1241	unsigned long changetid_queues = 0;
1242	int i, ret, free_queue = -ENOSPC;
1243	struct ieee80211_sta *queue_owner  = NULL;
1244
1245	lockdep_assert_held(&mvm->mutex);
1246
1247	if (iwl_mvm_has_new_tx_api(mvm))
1248		return -ENOSPC;
1249
1250	rcu_read_lock();
1251
1252	/* we skip the CMD queue below by starting at 1 */
1253	BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1254
1255	for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1256		struct ieee80211_sta *sta;
1257		struct iwl_mvm_sta *mvmsta;
1258		u8 sta_id;
1259		int tid;
1260		unsigned long inactive_tid_bitmap = 0;
1261		unsigned long queue_tid_bitmap;
1262
1263		queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1264		if (!queue_tid_bitmap)
1265			continue;
1266
1267		/* If TXQ isn't in active use anyway - nothing to do here... */
1268		if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1269		    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1270			continue;
1271
1272		/* Check to see if there are inactive TIDs on this queue */
1273		for_each_set_bit(tid, &queue_tid_bitmap,
1274				 IWL_MAX_TID_COUNT + 1) {
1275			if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1276				       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1277				continue;
1278
1279			inactive_tid_bitmap |= BIT(tid);
1280		}
1281
1282		/* If all TIDs are active - finish check on this queue */
1283		if (!inactive_tid_bitmap)
1284			continue;
1285
1286		/*
1287		 * If we are here - the queue hadn't been served recently and is
1288		 * in use
1289		 */
1290
1291		sta_id = mvm->queue_info[i].ra_sta_id;
1292		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1293
1294		/*
1295		 * If the STA doesn't exist anymore, it isn't an error. It could
1296		 * be that it was removed since getting the queues, and in this
1297		 * case it should've inactivated its queues anyway.
1298		 */
1299		if (IS_ERR_OR_NULL(sta))
1300			continue;
1301
1302		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1303
1304		spin_lock_bh(&mvmsta->lock);
1305		ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1306						   inactive_tid_bitmap,
1307						   &unshare_queues,
1308						   &changetid_queues);
1309		if (ret && free_queue < 0) {
1310			queue_owner = sta;
1311			free_queue = i;
1312		}
1313		/* only unlock sta lock - we still need the queue info lock */
1314		spin_unlock_bh(&mvmsta->lock);
1315	}
1316
1317
1318	/* Reconfigure queues requiring reconfiguation */
1319	for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1320		iwl_mvm_unshare_queue(mvm, i);
1321	for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1322		iwl_mvm_change_queue_tid(mvm, i);
1323
1324	rcu_read_unlock();
1325
1326	if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1327		ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1328						  alloc_for_sta);
1329		if (ret)
1330			return ret;
1331	}
1332
1333	return free_queue;
1334}
1335
1336static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1337				   struct ieee80211_sta *sta, u8 ac, int tid)
1338{
1339	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1340	struct iwl_trans_txq_scd_cfg cfg = {
1341		.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1342		.sta_id = mvmsta->deflink.sta_id,
1343		.tid = tid,
1344		.frame_limit = IWL_FRAME_LIMIT,
1345	};
1346	unsigned int wdg_timeout =
1347		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1348	int queue = -1;
1349	u16 queue_tmp;
1350	unsigned long disable_agg_tids = 0;
1351	enum iwl_mvm_agg_state queue_state;
1352	bool shared_queue = false, inc_ssn;
1353	int ssn;
1354	unsigned long tfd_queue_mask;
1355	int ret;
1356
1357	lockdep_assert_held(&mvm->mutex);
1358
1359	if (iwl_mvm_has_new_tx_api(mvm))
1360		return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1361
1362	spin_lock_bh(&mvmsta->lock);
1363	tfd_queue_mask = mvmsta->tfd_queue_msk;
1364	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1365	spin_unlock_bh(&mvmsta->lock);
1366
1367	if (tid == IWL_MAX_TID_COUNT) {
1368		queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1369						IWL_MVM_DQA_MIN_MGMT_QUEUE,
1370						IWL_MVM_DQA_MAX_MGMT_QUEUE);
1371		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1372			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1373					    queue);
1374
1375		/* If no such queue is found, we'll use a DATA queue instead */
1376	}
1377
1378	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1379	    (mvm->queue_info[mvmsta->reserved_queue].status ==
1380			IWL_MVM_QUEUE_RESERVED)) {
1381		queue = mvmsta->reserved_queue;
1382		mvm->queue_info[queue].reserved = true;
1383		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1384	}
1385
1386	if (queue < 0)
1387		queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1388						IWL_MVM_DQA_MIN_DATA_QUEUE,
1389						IWL_MVM_DQA_MAX_DATA_QUEUE);
1390	if (queue < 0) {
1391		/* try harder - perhaps kill an inactive queue */
1392		queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
1393	}
1394
1395	/* No free queue - we'll have to share */
1396	if (queue <= 0) {
1397		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1398		if (queue > 0) {
1399			shared_queue = true;
1400			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1401		}
1402	}
1403
1404	/*
1405	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1406	 * to make sure no one else takes it.
1407	 * This will allow avoiding re-acquiring the lock at the end of the
1408	 * configuration. On error we'll mark it back as free.
1409	 */
1410	if (queue > 0 && !shared_queue)
1411		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1412
1413	/* This shouldn't happen - out of queues */
1414	if (WARN_ON(queue <= 0)) {
1415		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1416			tid, cfg.sta_id);
1417		return queue;
1418	}
1419
1420	/*
1421	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1422	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1423	 * as aggregatable.
1424	 * Mark all DATA queues as allowing to be aggregated at some point
1425	 */
1426	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1427			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1428
1429	IWL_DEBUG_TX_QUEUES(mvm,
1430			    "Allocating %squeue #%d to sta %d on tid %d\n",
1431			    shared_queue ? "shared " : "", queue,
1432			    mvmsta->deflink.sta_id, tid);
1433
1434	if (shared_queue) {
1435		/* Disable any open aggs on this queue */
1436		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1437
1438		if (disable_agg_tids) {
1439			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1440					    queue);
1441			iwl_mvm_invalidate_sta_queue(mvm, queue,
1442						     disable_agg_tids, false);
1443		}
1444	}
1445
1446	inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1447
1448	/*
1449	 * Mark queue as shared in transport if shared
1450	 * Note this has to be done after queue enablement because enablement
1451	 * can also set this value, and there is no indication there to shared
1452	 * queues
1453	 */
1454	if (shared_queue)
1455		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1456
1457	spin_lock_bh(&mvmsta->lock);
1458	/*
1459	 * This looks racy, but it is not. We have only one packet for
1460	 * this ra/tid in our Tx path since we stop the Qdisc when we
1461	 * need to allocate a new TFD queue.
1462	 */
1463	if (inc_ssn) {
1464		mvmsta->tid_data[tid].seq_number += 0x10;
1465		ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1466	}
1467	mvmsta->tid_data[tid].txq_id = queue;
1468	mvmsta->tfd_queue_msk |= BIT(queue);
1469	queue_state = mvmsta->tid_data[tid].state;
1470
1471	if (mvmsta->reserved_queue == queue)
1472		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1473	spin_unlock_bh(&mvmsta->lock);
1474
1475	if (!shared_queue) {
1476		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1477		if (ret)
1478			goto out_err;
1479
1480		/* If we need to re-enable aggregations... */
1481		if (queue_state == IWL_AGG_ON) {
1482			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1483			if (ret)
1484				goto out_err;
1485		}
1486	} else {
1487		/* Redirect queue, if needed */
1488		ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1489					     wdg_timeout, false,
1490					     iwl_mvm_txq_from_tid(sta, tid));
1491		if (ret)
1492			goto out_err;
1493	}
1494
1495	return 0;
1496
1497out_err:
1498	queue_tmp = queue;
1499	iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid);
1500
1501	return ret;
1502}
1503
1504void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1505{
1506	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1507					   add_stream_wk);
1508
1509	mutex_lock(&mvm->mutex);
1510
1511	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1512
1513	while (!list_empty(&mvm->add_stream_txqs)) {
1514		struct iwl_mvm_txq *mvmtxq;
1515		struct ieee80211_txq *txq;
1516		u8 tid;
1517
1518		mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1519					  struct iwl_mvm_txq, list);
1520
1521		txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1522				   drv_priv);
1523		tid = txq->tid;
1524		if (tid == IEEE80211_NUM_TIDS)
1525			tid = IWL_MAX_TID_COUNT;
1526
1527		/*
1528		 * We can't really do much here, but if this fails we can't
1529		 * transmit anyway - so just don't transmit the frame etc.
1530		 * and let them back up ... we've tried our best to allocate
1531		 * a queue in the function itself.
1532		 */
1533		if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1534			spin_lock_bh(&mvm->add_stream_lock);
1535			list_del_init(&mvmtxq->list);
1536			spin_unlock_bh(&mvm->add_stream_lock);
1537			continue;
1538		}
1539
1540		/* now we're ready, any remaining races/concurrency will be
1541		 * handled in iwl_mvm_mac_itxq_xmit()
1542		 */
1543		set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1544
1545		local_bh_disable();
1546		spin_lock(&mvm->add_stream_lock);
1547		list_del_init(&mvmtxq->list);
1548		spin_unlock(&mvm->add_stream_lock);
1549
1550		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1551		local_bh_enable();
1552	}
1553
1554	mutex_unlock(&mvm->mutex);
1555}
1556
1557static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1558				      struct ieee80211_sta *sta,
1559				      enum nl80211_iftype vif_type)
1560{
1561	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1562	int queue;
1563
1564	/* queue reserving is disabled on new TX path */
1565	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1566		return 0;
1567
1568	/* run the general cleanup/unsharing of queues */
1569	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1570
1571	/* Make sure we have free resources for this STA */
1572	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1573	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1574	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1575	     IWL_MVM_QUEUE_FREE))
1576		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1577	else
1578		queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
1579						IWL_MVM_DQA_MIN_DATA_QUEUE,
1580						IWL_MVM_DQA_MAX_DATA_QUEUE);
1581	if (queue < 0) {
1582		/* try again - this time kick out a queue if needed */
1583		queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
1584		if (queue < 0) {
1585			IWL_ERR(mvm, "No available queues for new station\n");
1586			return -ENOSPC;
1587		}
1588	}
1589	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1590
1591	mvmsta->reserved_queue = queue;
1592
1593	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1594			    queue, mvmsta->deflink.sta_id);
1595
1596	return 0;
1597}
1598
1599/*
1600 * In DQA mode, after a HW restart the queues should be allocated as before, in
1601 * order to avoid race conditions when there are shared queues. This function
1602 * does the re-mapping and queue allocation.
1603 *
1604 * Note that re-enabling aggregations isn't done in this function.
1605 */
1606void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1607					  struct ieee80211_sta *sta)
1608{
1609	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1610	unsigned int wdg =
1611		iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1612	int i;
1613	struct iwl_trans_txq_scd_cfg cfg = {
1614		.sta_id = mvm_sta->deflink.sta_id,
1615		.frame_limit = IWL_FRAME_LIMIT,
1616	};
1617
1618	/* Make sure reserved queue is still marked as such (if allocated) */
1619	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1620		mvm->queue_info[mvm_sta->reserved_queue].status =
1621			IWL_MVM_QUEUE_RESERVED;
1622
1623	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1624		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1625		int txq_id = tid_data->txq_id;
1626		int ac;
1627
1628		if (txq_id == IWL_MVM_INVALID_QUEUE)
1629			continue;
1630
1631		ac = tid_to_mac80211_ac[i];
1632
1633		if (iwl_mvm_has_new_tx_api(mvm)) {
1634			IWL_DEBUG_TX_QUEUES(mvm,
1635					    "Re-mapping sta %d tid %d\n",
1636					    mvm_sta->deflink.sta_id, i);
1637			txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta,
1638							 mvm_sta->deflink.sta_id,
1639							 i, wdg);
1640			/*
1641			 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1642			 * to try again later, we have no other good way of
1643			 * failing here
1644			 */
1645			if (txq_id < 0)
1646				txq_id = IWL_MVM_INVALID_QUEUE;
1647			tid_data->txq_id = txq_id;
1648
1649			/*
1650			 * Since we don't set the seq number after reset, and HW
1651			 * sets it now, FW reset will cause the seq num to start
1652			 * at 0 again, so driver will need to update it
1653			 * internally as well, so it keeps in sync with real val
1654			 */
1655			tid_data->seq_number = 0;
1656		} else {
1657			u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1658
1659			cfg.tid = i;
1660			cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1661			cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1662					 txq_id ==
1663					 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1664
1665			IWL_DEBUG_TX_QUEUES(mvm,
1666					    "Re-mapping sta %d tid %d to queue %d\n",
1667					    mvm_sta->deflink.sta_id, i,
1668					    txq_id);
1669
1670			iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1671			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1672		}
1673	}
1674}
1675
1676static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1677				      struct iwl_mvm_int_sta *sta,
1678				      const u8 *addr,
1679				      u16 mac_id, u16 color)
1680{
1681	struct iwl_mvm_add_sta_cmd cmd;
1682	int ret;
1683	u32 status = ADD_STA_SUCCESS;
1684
1685	lockdep_assert_held(&mvm->mutex);
1686
1687	memset(&cmd, 0, sizeof(cmd));
1688	cmd.sta_id = sta->sta_id;
1689
1690	if (iwl_mvm_has_new_station_api(mvm->fw) &&
1691	    sta->type == IWL_STA_AUX_ACTIVITY)
1692		cmd.mac_id_n_color = cpu_to_le32(mac_id);
1693	else
1694		cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1695								     color));
1696
1697	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1698		cmd.station_type = sta->type;
1699
1700	if (!iwl_mvm_has_new_tx_api(mvm))
1701		cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1702	cmd.tid_disable_tx = cpu_to_le16(0xffff);
1703
1704	if (addr)
1705		memcpy(cmd.addr, addr, ETH_ALEN);
1706
1707	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1708					  iwl_mvm_add_sta_cmd_size(mvm),
1709					  &cmd, &status);
1710	if (ret)
1711		return ret;
1712
1713	switch (status & IWL_ADD_STA_STATUS_MASK) {
1714	case ADD_STA_SUCCESS:
1715		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1716		return 0;
1717	default:
1718		ret = -EIO;
1719		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1720			status);
1721		break;
1722	}
1723	return ret;
1724}
1725
1726/* Initialize driver data of a new sta */
1727int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1728		     struct ieee80211_sta *sta, int sta_id, u8 sta_type)
1729{
1730	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1731	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1732	struct iwl_mvm_rxq_dup_data *dup_data;
1733	int i, ret = 0;
1734
1735	lockdep_assert_held(&mvm->mutex);
1736
1737	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1738						      mvmvif->color);
1739	mvm_sta->vif = vif;
1740
1741	/* for MLD sta_id(s) should be allocated for each link before calling
1742	 * this function
1743	 */
1744	if (!mvm->mld_api_is_used) {
1745		if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
1746			return -EINVAL;
1747
1748		mvm_sta->deflink.sta_id = sta_id;
1749		rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);
1750
1751		if (!mvm->trans->trans_cfg->gen2)
1752			mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1753				LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1754		else
1755			mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
1756				LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1757	}
1758
1759	mvm_sta->tt_tx_protection = false;
1760	mvm_sta->sta_type = sta_type;
1761
1762	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1763
1764	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1765		/*
1766		 * Mark all queues for this STA as unallocated and defer TX
1767		 * frames until the queue is allocated
1768		 */
1769		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1770	}
1771
1772	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1773		struct iwl_mvm_txq *mvmtxq =
1774			iwl_mvm_txq_from_mac80211(sta->txq[i]);
1775
1776		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1777		INIT_LIST_HEAD(&mvmtxq->list);
1778		atomic_set(&mvmtxq->tx_request, 0);
1779	}
1780
1781	if (iwl_mvm_has_new_rx_api(mvm)) {
1782		int q;
1783
1784		dup_data = kcalloc(mvm->trans->num_rx_queues,
1785				   sizeof(*dup_data), GFP_KERNEL);
1786		if (!dup_data)
1787			return -ENOMEM;
1788		/*
1789		 * Initialize all the last_seq values to 0xffff which can never
1790		 * compare equal to the frame's seq_ctrl in the check in
1791		 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1792		 * number and fragmented packets don't reach that function.
1793		 *
1794		 * This thus allows receiving a packet with seqno 0 and the
1795		 * retry bit set as the very first packet on a new TID.
1796		 */
1797		for (q = 0; q < mvm->trans->num_rx_queues; q++)
1798			memset(dup_data[q].last_seq, 0xff,
1799			       sizeof(dup_data[q].last_seq));
1800		mvm_sta->dup_data = dup_data;
1801	}
1802
1803	if (!iwl_mvm_has_new_tx_api(mvm)) {
1804		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1805						 ieee80211_vif_type_p2p(vif));
1806		if (ret)
1807			return ret;
1808	}
1809
1810	/*
1811	 * if rs is registered with mac80211, then "add station" will be handled
1812	 * via the corresponding ops, otherwise need to notify rate scaling here
1813	 */
1814	if (iwl_mvm_has_tlc_offload(mvm))
1815		iwl_mvm_rs_add_sta(mvm, mvm_sta);
1816	else
1817		spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock);
1818
1819	iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1820
1821	return 0;
1822}
1823
1824int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1825		    struct ieee80211_vif *vif,
1826		    struct ieee80211_sta *sta)
1827{
1828	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1829	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1830	int ret, sta_id;
1831	bool sta_update = false;
1832	unsigned int sta_flags = 0;
1833
1834	lockdep_assert_held(&mvm->mutex);
1835
1836	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1837		sta_id = iwl_mvm_find_free_sta_id(mvm,
1838						  ieee80211_vif_type_p2p(vif));
1839	else
1840		sta_id = mvm_sta->deflink.sta_id;
1841
1842	if (sta_id == IWL_MVM_INVALID_STA)
1843		return -ENOSPC;
1844
1845	spin_lock_init(&mvm_sta->lock);
1846
1847	/* if this is a HW restart re-alloc existing queues */
1848	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1849		struct iwl_mvm_int_sta tmp_sta = {
1850			.sta_id = sta_id,
1851			.type = mvm_sta->sta_type,
1852		};
1853
1854		/* First add an empty station since allocating
1855		 * a queue requires a valid station
1856		 */
1857		ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1858						 mvmvif->id, mvmvif->color);
1859		if (ret)
1860			goto err;
1861
1862		iwl_mvm_realloc_queues_after_restart(mvm, sta);
1863		sta_update = true;
1864		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1865		goto update_fw;
1866	}
1867
1868	ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id,
1869			       sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK);
1870	if (ret)
1871		goto err;
1872
1873update_fw:
1874	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1875	if (ret)
1876		goto err;
1877
1878	if (vif->type == NL80211_IFTYPE_STATION) {
1879		if (!sta->tdls) {
1880			WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA);
1881			mvmvif->deflink.ap_sta_id = sta_id;
1882		} else {
1883			WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA);
1884		}
1885	}
1886
1887	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1888
1889	return 0;
1890
1891err:
1892	return ret;
1893}
1894
1895int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1896		      bool drain)
1897{
1898	struct iwl_mvm_add_sta_cmd cmd = {};
1899	int ret;
1900	u32 status;
1901
1902	lockdep_assert_held(&mvm->mutex);
1903
1904	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1905	cmd.sta_id = mvmsta->deflink.sta_id;
1906	cmd.add_modify = STA_MODE_MODIFY;
1907	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1908	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1909
1910	status = ADD_STA_SUCCESS;
1911	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1912					  iwl_mvm_add_sta_cmd_size(mvm),
1913					  &cmd, &status);
1914	if (ret)
1915		return ret;
1916
1917	switch (status & IWL_ADD_STA_STATUS_MASK) {
1918	case ADD_STA_SUCCESS:
1919		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1920			       mvmsta->deflink.sta_id);
1921		break;
1922	default:
1923		ret = -EIO;
1924		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1925			mvmsta->deflink.sta_id);
1926		break;
1927	}
1928
1929	return ret;
1930}
1931
1932/*
1933 * Remove a station from the FW table. Before sending the command to remove
1934 * the station validate that the station is indeed known to the driver (sanity
1935 * only).
1936 */
1937static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1938{
1939	struct ieee80211_sta *sta;
1940	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1941		.sta_id = sta_id,
1942	};
1943	int ret;
1944
1945	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1946					lockdep_is_held(&mvm->mutex));
1947
1948	/* Note: internal stations are marked as error values */
1949	if (!sta) {
1950		IWL_ERR(mvm, "Invalid station id\n");
1951		return -EINVAL;
1952	}
1953
1954	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1955				   sizeof(rm_sta_cmd), &rm_sta_cmd);
1956	if (ret) {
1957		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1958		return ret;
1959	}
1960
1961	return 0;
1962}
1963
1964static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1965				       struct ieee80211_vif *vif,
1966				       struct ieee80211_sta *sta)
1967{
1968	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1969	int i;
1970
1971	lockdep_assert_held(&mvm->mutex);
1972
1973	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1974		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1975			continue;
1976
1977		iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id,
1978				    &mvm_sta->tid_data[i].txq_id, i);
1979		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1980	}
1981
1982	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1983		struct iwl_mvm_txq *mvmtxq =
1984			iwl_mvm_txq_from_mac80211(sta->txq[i]);
1985
1986		spin_lock_bh(&mvm->add_stream_lock);
1987		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1988		list_del_init(&mvmtxq->list);
1989		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
1990		spin_unlock_bh(&mvm->add_stream_lock);
1991	}
1992}
1993
1994int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1995				  struct iwl_mvm_sta *mvm_sta)
1996{
1997	int i;
1998
1999	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
2000		u16 txq_id;
2001		int ret;
2002
2003		spin_lock_bh(&mvm_sta->lock);
2004		txq_id = mvm_sta->tid_data[i].txq_id;
2005		spin_unlock_bh(&mvm_sta->lock);
2006
2007		if (txq_id == IWL_MVM_INVALID_QUEUE)
2008			continue;
2009
2010		ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2011		if (ret)
2012			return ret;
2013	}
2014
2015	return 0;
2016}
2017
2018/* Execute the common part for both MLD and non-MLD modes.
2019 * Returns if we're done with removing the station, either
2020 * with error or success
2021 */
2022bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2023		     struct ieee80211_sta *sta,
2024		     struct ieee80211_link_sta *link_sta, int *ret)
2025{
2026	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2027	struct iwl_mvm_vif_link_info *mvm_link =
2028		mvmvif->link[link_sta->link_id];
2029	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2030	struct iwl_mvm_link_sta *mvm_link_sta;
2031	u8 sta_id;
2032
2033	lockdep_assert_held(&mvm->mutex);
2034
2035	mvm_link_sta =
2036		rcu_dereference_protected(mvm_sta->link[link_sta->link_id],
2037					  lockdep_is_held(&mvm->mutex));
2038	sta_id = mvm_link_sta->sta_id;
2039
2040	/* If there is a TXQ still marked as reserved - free it */
2041	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
2042		u8 reserved_txq = mvm_sta->reserved_queue;
2043		enum iwl_mvm_queue_status *status;
2044
2045		/*
2046		 * If no traffic has gone through the reserved TXQ - it
2047		 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2048		 * should be manually marked as free again
2049		 */
2050		status = &mvm->queue_info[reserved_txq].status;
2051		if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2052			 (*status != IWL_MVM_QUEUE_FREE),
2053			 "sta_id %d reserved txq %d status %d",
2054			 sta_id, reserved_txq, *status)) {
2055			*ret = -EINVAL;
2056			return true;
2057		}
2058
2059		*status = IWL_MVM_QUEUE_FREE;
2060	}
2061
2062	if (vif->type == NL80211_IFTYPE_STATION &&
2063	    mvm_link->ap_sta_id == sta_id) {
2064		/* if associated - we can't remove the AP STA now */
2065		if (vif->cfg.assoc)
2066			return true;
2067
2068		/* first remove remaining keys */
2069		iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0);
2070
2071		/* unassoc - go ahead - remove the AP STA now */
2072		mvm_link->ap_sta_id = IWL_MVM_INVALID_STA;
2073	}
2074
2075	/*
2076	 * This shouldn't happen - the TDLS channel switch should be canceled
2077	 * before the STA is removed.
2078	 */
2079	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
2080		mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
2081		cancel_delayed_work(&mvm->tdls_cs.dwork);
2082	}
2083
2084	return false;
2085}
2086
2087int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
2088		   struct ieee80211_vif *vif,
2089		   struct ieee80211_sta *sta)
2090{
2091	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2092	int ret;
2093
2094	lockdep_assert_held(&mvm->mutex);
2095
2096	ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
2097	if (ret)
2098		return ret;
2099
2100	/* flush its queues here since we are freeing mvm_sta */
2101	ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
2102				mvm_sta->tfd_queue_msk);
2103	if (ret)
2104		return ret;
2105	if (iwl_mvm_has_new_tx_api(mvm)) {
2106		ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
2107	} else {
2108		u32 q_mask = mvm_sta->tfd_queue_msk;
2109
2110		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2111						     q_mask);
2112	}
2113	if (ret)
2114		return ret;
2115
2116	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
2117
2118	iwl_mvm_disable_sta_queues(mvm, vif, sta);
2119
2120	if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret))
2121		return ret;
2122
2123	ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);
2124	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);
2125
2126	return ret;
2127}
2128
2129int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2130		      struct ieee80211_vif *vif,
2131		      u8 sta_id)
2132{
2133	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2134
2135	lockdep_assert_held(&mvm->mutex);
2136
2137	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2138	return ret;
2139}
2140
2141int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2142			     struct iwl_mvm_int_sta *sta,
2143			     u32 qmask, enum nl80211_iftype iftype,
2144			     u8 type)
2145{
2146	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2147	    sta->sta_id == IWL_MVM_INVALID_STA) {
2148		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2149		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2150			return -ENOSPC;
2151	}
2152
2153	sta->tfd_queue_msk = qmask;
2154	sta->type = type;
2155
2156	/* put a non-NULL value so iterating over the stations won't stop */
2157	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2158	return 0;
2159}
2160
2161void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2162{
2163	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2164	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2165	sta->sta_id = IWL_MVM_INVALID_STA;
2166}
2167
2168static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2169					  u8 sta_id, u8 fifo)
2170{
2171	unsigned int wdg_timeout =
2172		mvm->trans->trans_cfg->base_params->wd_timeout;
2173	struct iwl_trans_txq_scd_cfg cfg = {
2174		.fifo = fifo,
2175		.sta_id = sta_id,
2176		.tid = IWL_MAX_TID_COUNT,
2177		.aggregate = false,
2178		.frame_limit = IWL_FRAME_LIMIT,
2179	};
2180
2181	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2182
2183	iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2184}
2185
2186static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2187{
2188	unsigned int wdg_timeout =
2189		mvm->trans->trans_cfg->base_params->wd_timeout;
2190
2191	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2192
2193	return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT,
2194				       wdg_timeout);
2195}
2196
2197static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2198					  int maccolor, u8 *addr,
2199					  struct iwl_mvm_int_sta *sta,
2200					  u16 *queue, int fifo)
2201{
2202	int ret;
2203
2204	/* Map queue to fifo - needs to happen before adding station */
2205	if (!iwl_mvm_has_new_tx_api(mvm))
2206		iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2207
2208	ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2209	if (ret) {
2210		if (!iwl_mvm_has_new_tx_api(mvm))
2211			iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue,
2212					    IWL_MAX_TID_COUNT);
2213		return ret;
2214	}
2215
2216	/*
2217	 * For 22000 firmware and on we cannot add queue to a station unknown
2218	 * to firmware so enable queue here - after the station was added
2219	 */
2220	if (iwl_mvm_has_new_tx_api(mvm)) {
2221		int txq;
2222
2223		txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2224		if (txq < 0) {
2225			iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2226			return txq;
2227		}
2228
2229		*queue = txq;
2230	}
2231
2232	return 0;
2233}
2234
2235int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2236{
2237	int ret;
2238	u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 :
2239		BIT(mvm->aux_queue);
2240
2241	lockdep_assert_held(&mvm->mutex);
2242
2243	/* Allocate aux station and assign to it the aux queue */
2244	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask,
2245				       NL80211_IFTYPE_UNSPECIFIED,
2246				       IWL_STA_AUX_ACTIVITY);
2247	if (ret)
2248		return ret;
2249
2250	/*
2251	 * In CDB NICs we need to specify which lmac to use for aux activity
2252	 * using the mac_id argument place to send lmac_id to the function
2253	 */
2254	ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2255					     &mvm->aux_sta, &mvm->aux_queue,
2256					     IWL_MVM_TX_FIFO_MCAST);
2257	if (ret) {
2258		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2259		return ret;
2260	}
2261
2262	return 0;
2263}
2264
2265int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2266{
2267	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2268
2269	lockdep_assert_held(&mvm->mutex);
2270
2271	return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2272					      NULL, &mvm->snif_sta,
2273					      &mvm->snif_queue,
2274					      IWL_MVM_TX_FIFO_BE);
2275}
2276
2277int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2278{
2279	int ret;
2280
2281	lockdep_assert_held(&mvm->mutex);
2282
2283	if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2284		return -EINVAL;
2285
2286	iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,
2287			    &mvm->snif_queue, IWL_MAX_TID_COUNT);
2288	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2289	if (ret)
2290		IWL_WARN(mvm, "Failed sending remove station\n");
2291
2292	return ret;
2293}
2294
2295int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2296{
2297	int ret;
2298
2299	lockdep_assert_held(&mvm->mutex);
2300
2301	if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2302		return -EINVAL;
2303
2304	iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,
2305			    &mvm->aux_queue, IWL_MAX_TID_COUNT);
2306	ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2307	if (ret)
2308		IWL_WARN(mvm, "Failed sending remove station\n");
2309	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2310
2311	return ret;
2312}
2313
2314void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2315{
2316	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2317}
2318
2319/*
2320 * Send the add station command for the vif's broadcast station.
2321 * Assumes that the station was already allocated.
2322 *
2323 * @mvm: the mvm component
2324 * @vif: the interface to which the broadcast station is added
2325 * @bsta: the broadcast station to add.
2326 */
2327int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2328{
2329	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2330	struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2331	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2332	const u8 *baddr = _baddr;
2333	int queue;
2334	int ret;
2335	unsigned int wdg_timeout =
2336		iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2337	struct iwl_trans_txq_scd_cfg cfg = {
2338		.fifo = IWL_MVM_TX_FIFO_VO,
2339		.sta_id = mvmvif->deflink.bcast_sta.sta_id,
2340		.tid = IWL_MAX_TID_COUNT,
2341		.aggregate = false,
2342		.frame_limit = IWL_FRAME_LIMIT,
2343	};
2344
2345	lockdep_assert_held(&mvm->mutex);
2346
2347	if (!iwl_mvm_has_new_tx_api(mvm)) {
2348		if (vif->type == NL80211_IFTYPE_AP ||
2349		    vif->type == NL80211_IFTYPE_ADHOC) {
2350			queue = mvm->probe_queue;
2351		} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2352			queue = mvm->p2p_dev_queue;
2353		} else {
2354			WARN(1, "Missing required TXQ for adding bcast STA\n");
2355			return -EINVAL;
2356		}
2357
2358		bsta->tfd_queue_msk |= BIT(queue);
2359
2360		iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2361	}
2362
2363	if (vif->type == NL80211_IFTYPE_ADHOC)
2364		baddr = vif->bss_conf.bssid;
2365
2366	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2367		return -ENOSPC;
2368
2369	ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2370					 mvmvif->id, mvmvif->color);
2371	if (ret)
2372		return ret;
2373
2374	/*
2375	 * For 22000 firmware and on we cannot add queue to a station unknown
2376	 * to firmware so enable queue here - after the station was added
2377	 */
2378	if (iwl_mvm_has_new_tx_api(mvm)) {
2379		queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id,
2380						IWL_MAX_TID_COUNT,
2381						wdg_timeout);
2382		if (queue < 0) {
2383			iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2384			return queue;
2385		}
2386
2387		if (vif->type == NL80211_IFTYPE_AP ||
2388		    vif->type == NL80211_IFTYPE_ADHOC) {
2389			/* for queue management */
2390			mvm->probe_queue = queue;
2391			/* for use in TX */
2392			mvmvif->deflink.mgmt_queue = queue;
2393		} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2394			mvm->p2p_dev_queue = queue;
2395		}
2396	} else if (vif->type == NL80211_IFTYPE_AP ||
2397		   vif->type == NL80211_IFTYPE_ADHOC) {
2398		/* set it for use in TX */
2399		mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2400	}
2401
2402	return 0;
2403}
2404
2405void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2406				   struct ieee80211_vif *vif)
2407{
2408	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2409	u16 *queueptr, queue;
2410
2411	lockdep_assert_held(&mvm->mutex);
2412
2413	iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
2414			  mvmvif->deflink.bcast_sta.tfd_queue_msk);
2415
2416	switch (vif->type) {
2417	case NL80211_IFTYPE_AP:
2418	case NL80211_IFTYPE_ADHOC:
2419		queueptr = &mvm->probe_queue;
2420		break;
2421	case NL80211_IFTYPE_P2P_DEVICE:
2422		queueptr = &mvm->p2p_dev_queue;
2423		break;
2424	default:
2425		WARN(1, "Can't free bcast queue on vif type %d\n",
2426		     vif->type);
2427		return;
2428	}
2429
2430	queue = *queueptr;
2431	iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id,
2432			    queueptr, IWL_MAX_TID_COUNT);
2433
2434	if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)
2435		mvmvif->deflink.mgmt_queue = mvm->probe_queue;
2436
2437	if (iwl_mvm_has_new_tx_api(mvm))
2438		return;
2439
2440	WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue)));
2441	mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue);
2442}
2443
2444/* Send the FW a request to remove the station from it's internal data
2445 * structures, but DO NOT remove the entry from the local data structures. */
2446int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2447{
2448	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2449	int ret;
2450
2451	lockdep_assert_held(&mvm->mutex);
2452
2453	iwl_mvm_free_bcast_sta_queues(mvm, vif);
2454
2455	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id);
2456	if (ret)
2457		IWL_WARN(mvm, "Failed sending remove station\n");
2458	return ret;
2459}
2460
2461int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2462{
2463	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2464
2465	lockdep_assert_held(&mvm->mutex);
2466
2467	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0,
2468					ieee80211_vif_type_p2p(vif),
2469					IWL_STA_GENERAL_PURPOSE);
2470}
2471
2472/* Allocate a new station entry for the broadcast station to the given vif,
2473 * and send it to the FW.
2474 * Note that each P2P mac should have its own broadcast station.
2475 *
2476 * @mvm: the mvm component
2477 * @vif: the interface to which the broadcast station is added
2478 * @bsta: the broadcast station to add. */
2479int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2480{
2481	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2482	struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
2483	int ret;
2484
2485	lockdep_assert_held(&mvm->mutex);
2486
2487	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2488	if (ret)
2489		return ret;
2490
2491	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2492
2493	if (ret)
2494		iwl_mvm_dealloc_int_sta(mvm, bsta);
2495
2496	return ret;
2497}
2498
2499void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2500{
2501	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2502
2503	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta);
2504}
2505
2506/*
2507 * Send the FW a request to remove the station from it's internal data
2508 * structures, and in addition remove it from the local data structure.
2509 */
2510int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2511{
2512	int ret;
2513
2514	lockdep_assert_held(&mvm->mutex);
2515
2516	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2517
2518	iwl_mvm_dealloc_bcast_sta(mvm, vif);
2519
2520	return ret;
2521}
2522
2523/*
2524 * Allocate a new station entry for the multicast station to the given vif,
2525 * and send it to the FW.
2526 * Note that each AP/GO mac should have its own multicast station.
2527 *
2528 * @mvm: the mvm component
2529 * @vif: the interface to which the multicast station is added
2530 */
2531int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2532{
2533	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2534	struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta;
2535	static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2536	const u8 *maddr = _maddr;
2537	struct iwl_trans_txq_scd_cfg cfg = {
2538		.fifo = vif->type == NL80211_IFTYPE_AP ?
2539			IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2540		.sta_id = msta->sta_id,
2541		.tid = 0,
2542		.aggregate = false,
2543		.frame_limit = IWL_FRAME_LIMIT,
2544	};
2545	unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2546	int ret;
2547
2548	lockdep_assert_held(&mvm->mutex);
2549
2550	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2551		    vif->type != NL80211_IFTYPE_ADHOC))
2552		return -ENOTSUPP;
2553
2554	/*
2555	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2556	 * invalid, so make sure we use the queue we want.
2557	 * Note that this is done here as we want to avoid making DQA
2558	 * changes in mac80211 layer.
2559	 */
2560	if (vif->type == NL80211_IFTYPE_ADHOC)
2561		mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2562
2563	/*
2564	 * While in previous FWs we had to exclude cab queue from TFD queue
2565	 * mask, now it is needed as any other queue.
2566	 */
2567	if (!iwl_mvm_has_new_tx_api(mvm) &&
2568	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2569		iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2570				   &cfg,
2571				   timeout);
2572		msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue);
2573	}
2574	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2575					 mvmvif->id, mvmvif->color);
2576	if (ret)
2577		goto err;
2578
2579	/*
2580	 * Enable cab queue after the ADD_STA command is sent.
2581	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2582	 * command with unknown station id, and for FW that doesn't support
2583	 * station API since the cab queue is not included in the
2584	 * tfd_queue_mask.
2585	 */
2586	if (iwl_mvm_has_new_tx_api(mvm)) {
2587		int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id,
2588						    0, timeout);
2589		if (queue < 0) {
2590			ret = queue;
2591			goto err;
2592		}
2593		mvmvif->deflink.cab_queue = queue;
2594	} else if (!fw_has_api(&mvm->fw->ucode_capa,
2595			       IWL_UCODE_TLV_API_STA_TYPE))
2596		iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
2597				   &cfg,
2598				   timeout);
2599
2600	return 0;
2601err:
2602	iwl_mvm_dealloc_int_sta(mvm, msta);
2603	return ret;
2604}
2605
2606static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2607				    struct ieee80211_key_conf *keyconf,
2608				    bool mcast)
2609{
2610	union {
2611		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2612		struct iwl_mvm_add_sta_key_cmd cmd;
2613	} u = {};
2614	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2615				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2616	__le16 key_flags;
2617	int ret, size;
2618	u32 status;
2619
2620	/* This is a valid situation for GTK removal */
2621	if (sta_id == IWL_MVM_INVALID_STA)
2622		return 0;
2623
2624	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2625				 STA_KEY_FLG_KEYID_MSK);
2626	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2627	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2628
2629	if (mcast)
2630		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2631
2632	/*
2633	 * The fields assigned here are in the same location at the start
2634	 * of the command, so we can do this union trick.
2635	 */
2636	u.cmd.common.key_flags = key_flags;
2637	u.cmd.common.key_offset = keyconf->hw_key_idx;
2638	u.cmd.common.sta_id = sta_id;
2639
2640	size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2641
2642	status = ADD_STA_SUCCESS;
2643	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2644					  &status);
2645
2646	switch (status) {
2647	case ADD_STA_SUCCESS:
2648		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2649		break;
2650	default:
2651		ret = -EIO;
2652		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2653		break;
2654	}
2655
2656	return ret;
2657}
2658
2659/*
2660 * Send the FW a request to remove the station from it's internal data
2661 * structures, and in addition remove it from the local data structure.
2662 */
2663int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2664{
2665	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2666	int ret;
2667
2668	lockdep_assert_held(&mvm->mutex);
2669
2670	iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
2671			  mvmvif->deflink.mcast_sta.tfd_queue_msk);
2672
2673	iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
2674			    &mvmvif->deflink.cab_queue, 0);
2675
2676	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id);
2677	if (ret)
2678		IWL_WARN(mvm, "Failed sending remove station\n");
2679
2680	return ret;
2681}
2682
2683static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2684{
2685	struct iwl_mvm_delba_data notif = {
2686		.baid = baid,
2687	};
2688
2689	iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2690					&notif, sizeof(notif));
2691};
2692
2693static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2694				 struct iwl_mvm_baid_data *data)
2695{
2696	int i;
2697
2698	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2699
2700	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2701		int j;
2702		struct iwl_mvm_reorder_buffer *reorder_buf =
2703			&data->reorder_buf[i];
2704		struct iwl_mvm_reorder_buf_entry *entries =
2705			&data->entries[i * data->entries_per_queue];
2706
2707		spin_lock_bh(&reorder_buf->lock);
2708		if (likely(!reorder_buf->num_stored)) {
2709			spin_unlock_bh(&reorder_buf->lock);
2710			continue;
2711		}
2712
2713		/*
2714		 * This shouldn't happen in regular DELBA since the internal
2715		 * delBA notification should trigger a release of all frames in
2716		 * the reorder buffer.
2717		 */
2718		WARN_ON(1);
2719
2720		for (j = 0; j < reorder_buf->buf_size; j++)
2721			__skb_queue_purge(&entries[j].e.frames);
2722		/*
2723		 * Prevent timer re-arm. This prevents a very far fetched case
2724		 * where we timed out on the notification. There may be prior
2725		 * RX frames pending in the RX queue before the notification
2726		 * that might get processed between now and the actual deletion
2727		 * and we would re-arm the timer although we are deleting the
2728		 * reorder buffer.
2729		 */
2730		reorder_buf->removed = true;
2731		spin_unlock_bh(&reorder_buf->lock);
2732		del_timer_sync(&reorder_buf->reorder_timer);
2733	}
2734}
2735
2736static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2737					struct iwl_mvm_baid_data *data,
2738					u16 ssn, u16 buf_size)
2739{
2740	int i;
2741
2742	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2743		struct iwl_mvm_reorder_buffer *reorder_buf =
2744			&data->reorder_buf[i];
2745		struct iwl_mvm_reorder_buf_entry *entries =
2746			&data->entries[i * data->entries_per_queue];
2747		int j;
2748
2749		reorder_buf->num_stored = 0;
2750		reorder_buf->head_sn = ssn;
2751		reorder_buf->buf_size = buf_size;
2752		/* rx reorder timer */
2753		timer_setup(&reorder_buf->reorder_timer,
2754			    iwl_mvm_reorder_timer_expired, 0);
2755		spin_lock_init(&reorder_buf->lock);
2756		reorder_buf->mvm = mvm;
2757		reorder_buf->queue = i;
2758		reorder_buf->valid = false;
2759		for (j = 0; j < reorder_buf->buf_size; j++)
2760			__skb_queue_head_init(&entries[j].e.frames);
2761	}
2762}
2763
2764static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2765				  struct ieee80211_sta *sta,
2766				  bool start, int tid, u16 ssn,
2767				  u16 buf_size)
2768{
2769	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2770	struct iwl_mvm_add_sta_cmd cmd = {
2771		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2772		.sta_id = mvm_sta->deflink.sta_id,
2773		.add_modify = STA_MODE_MODIFY,
2774	};
2775	u32 status;
2776	int ret;
2777
2778	if (start) {
2779		cmd.add_immediate_ba_tid = tid;
2780		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2781		cmd.rx_ba_window = cpu_to_le16(buf_size);
2782		cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2783	} else {
2784		cmd.remove_immediate_ba_tid = tid;
2785		cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2786	}
2787
2788	status = ADD_STA_SUCCESS;
2789	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2790					  iwl_mvm_add_sta_cmd_size(mvm),
2791					  &cmd, &status);
2792	if (ret)
2793		return ret;
2794
2795	switch (status & IWL_ADD_STA_STATUS_MASK) {
2796	case ADD_STA_SUCCESS:
2797		IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2798			     start ? "start" : "stopp");
2799		if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2800			    !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2801			return -EINVAL;
2802		return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2803	case ADD_STA_IMMEDIATE_BA_FAILURE:
2804		IWL_WARN(mvm, "RX BA Session refused by fw\n");
2805		return -ENOSPC;
2806	default:
2807		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2808			start ? "start" : "stopp", status);
2809		return -EIO;
2810	}
2811}
2812
2813static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2814				  struct ieee80211_sta *sta,
2815				  bool start, int tid, u16 ssn,
2816				  u16 buf_size, int baid)
2817{
2818	struct iwl_rx_baid_cfg_cmd cmd = {
2819		.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2820				  cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2821	};
2822	u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
2823	int ret;
2824
2825	BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2826
2827	if (start) {
2828		cmd.alloc.sta_id_mask =
2829			cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2830		cmd.alloc.tid = tid;
2831		cmd.alloc.ssn = cpu_to_le16(ssn);
2832		cmd.alloc.win_size = cpu_to_le16(buf_size);
2833		baid = -EIO;
2834	} else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
2835		cmd.remove_v1.baid = cpu_to_le32(baid);
2836		BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2837	} else {
2838		cmd.remove.sta_id_mask =
2839			cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
2840		cmd.remove.tid = cpu_to_le32(tid);
2841	}
2842
2843	ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
2844					  &cmd, &baid);
2845	if (ret)
2846		return ret;
2847
2848	if (!start) {
2849		/* ignore firmware baid on remove */
2850		baid = 0;
2851	}
2852
2853	IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2854		     start ? "start" : "stopp");
2855
2856	if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2857		return -EINVAL;
2858
2859	return baid;
2860}
2861
2862static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2863			      bool start, int tid, u16 ssn, u16 buf_size,
2864			      int baid)
2865{
2866	if (fw_has_capa(&mvm->fw->ucode_capa,
2867			IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2868		return iwl_mvm_fw_baid_op_cmd(mvm, sta, start,
2869					      tid, ssn, buf_size, baid);
2870
2871	return iwl_mvm_fw_baid_op_sta(mvm, sta, start,
2872				      tid, ssn, buf_size);
2873}
2874
2875int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2876		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2877{
2878	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2879	struct iwl_mvm_baid_data *baid_data = NULL;
2880	int ret, baid;
2881	u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2882							       IWL_MAX_BAID_OLD;
2883
2884	lockdep_assert_held(&mvm->mutex);
2885
2886	if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2887		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2888		return -ENOSPC;
2889	}
2890
2891	if (iwl_mvm_has_new_rx_api(mvm) && start) {
2892		u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2893
2894		/* sparse doesn't like the __align() so don't check */
2895#ifndef __CHECKER__
2896		/*
2897		 * The division below will be OK if either the cache line size
2898		 * can be divided by the entry size (ALIGN will round up) or if
2899		 * if the entry size can be divided by the cache line size, in
2900		 * which case the ALIGN() will do nothing.
2901		 */
2902		BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2903			     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2904#endif
2905
2906		/*
2907		 * Upward align the reorder buffer size to fill an entire cache
2908		 * line for each queue, to avoid sharing cache lines between
2909		 * different queues.
2910		 */
2911		reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2912
2913		/*
2914		 * Allocate here so if allocation fails we can bail out early
2915		 * before starting the BA session in the firmware
2916		 */
2917		baid_data = kzalloc(sizeof(*baid_data) +
2918				    mvm->trans->num_rx_queues *
2919				    reorder_buf_size,
2920				    GFP_KERNEL);
2921		if (!baid_data)
2922			return -ENOMEM;
2923
2924		/*
2925		 * This division is why we need the above BUILD_BUG_ON(),
2926		 * if that doesn't hold then this will not be right.
2927		 */
2928		baid_data->entries_per_queue =
2929			reorder_buf_size / sizeof(baid_data->entries[0]);
2930	}
2931
2932	if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2933		baid = mvm_sta->tid_to_baid[tid];
2934	} else {
2935		/* we don't really need it in this case */
2936		baid = -1;
2937	}
2938
2939	/* Don't send command to remove (start=0) BAID during restart */
2940	if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2941		baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size,
2942					  baid);
2943
2944	if (baid < 0) {
2945		ret = baid;
2946		goto out_free;
2947	}
2948
2949	if (start) {
2950		mvm->rx_ba_sessions++;
2951
2952		if (!iwl_mvm_has_new_rx_api(mvm))
2953			return 0;
2954
2955		baid_data->baid = baid;
2956		baid_data->timeout = timeout;
2957		baid_data->last_rx = jiffies;
2958		baid_data->rcu_ptr = &mvm->baid_map[baid];
2959		timer_setup(&baid_data->session_timer,
2960			    iwl_mvm_rx_agg_session_expired, 0);
2961		baid_data->mvm = mvm;
2962		baid_data->tid = tid;
2963		baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
2964
2965		mvm_sta->tid_to_baid[tid] = baid;
2966		if (timeout)
2967			mod_timer(&baid_data->session_timer,
2968				  TU_TO_EXP_TIME(timeout * 2));
2969
2970		iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2971		/*
2972		 * protect the BA data with RCU to cover a case where our
2973		 * internal RX sync mechanism will timeout (not that it's
2974		 * supposed to happen) and we will free the session data while
2975		 * RX is being processed in parallel
2976		 */
2977		IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2978			     mvm_sta->deflink.sta_id, tid, baid);
2979		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2980		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2981	} else  {
2982		baid = mvm_sta->tid_to_baid[tid];
2983
2984		if (mvm->rx_ba_sessions > 0)
2985			/* check that restart flow didn't zero the counter */
2986			mvm->rx_ba_sessions--;
2987		if (!iwl_mvm_has_new_rx_api(mvm))
2988			return 0;
2989
2990		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2991			return -EINVAL;
2992
2993		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2994		if (WARN_ON(!baid_data))
2995			return -EINVAL;
2996
2997		/* synchronize all rx queues so we can safely delete */
2998		iwl_mvm_free_reorder(mvm, baid_data);
2999		timer_shutdown_sync(&baid_data->session_timer);
3000		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
3001		kfree_rcu(baid_data, rcu_head);
3002		IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
3003
3004		/*
3005		 * After we've deleted it, do another queue sync
3006		 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
3007		 * running it won't find a new session in the old
3008		 * BAID. It can find the NULL pointer for the BAID,
3009		 * but we must not have it find a different session.
3010		 */
3011		iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
3012						true, NULL, 0);
3013	}
3014	return 0;
3015
3016out_free:
3017	kfree(baid_data);
3018	return ret;
3019}
3020
3021int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3022		       int tid, u8 queue, bool start)
3023{
3024	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3025	struct iwl_mvm_add_sta_cmd cmd = {};
3026	int ret;
3027	u32 status;
3028
3029	lockdep_assert_held(&mvm->mutex);
3030
3031	if (start) {
3032		mvm_sta->tfd_queue_msk |= BIT(queue);
3033		mvm_sta->tid_disable_agg &= ~BIT(tid);
3034	} else {
3035		/* In DQA-mode the queue isn't removed on agg termination */
3036		mvm_sta->tid_disable_agg |= BIT(tid);
3037	}
3038
3039	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
3040	cmd.sta_id = mvm_sta->deflink.sta_id;
3041	cmd.add_modify = STA_MODE_MODIFY;
3042	if (!iwl_mvm_has_new_tx_api(mvm))
3043		cmd.modify_mask = STA_MODIFY_QUEUES;
3044	cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
3045	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
3046	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
3047
3048	status = ADD_STA_SUCCESS;
3049	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
3050					  iwl_mvm_add_sta_cmd_size(mvm),
3051					  &cmd, &status);
3052	if (ret)
3053		return ret;
3054
3055	switch (status & IWL_ADD_STA_STATUS_MASK) {
3056	case ADD_STA_SUCCESS:
3057		break;
3058	default:
3059		ret = -EIO;
3060		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
3061			start ? "start" : "stopp", status);
3062		break;
3063	}
3064
3065	return ret;
3066}
3067
3068const u8 tid_to_mac80211_ac[] = {
3069	IEEE80211_AC_BE,
3070	IEEE80211_AC_BK,
3071	IEEE80211_AC_BK,
3072	IEEE80211_AC_BE,
3073	IEEE80211_AC_VI,
3074	IEEE80211_AC_VI,
3075	IEEE80211_AC_VO,
3076	IEEE80211_AC_VO,
3077	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
3078};
3079
3080static const u8 tid_to_ucode_ac[] = {
3081	AC_BE,
3082	AC_BK,
3083	AC_BK,
3084	AC_BE,
3085	AC_VI,
3086	AC_VI,
3087	AC_VO,
3088	AC_VO,
3089};
3090
3091int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3092			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3093{
3094	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3095	struct iwl_mvm_tid_data *tid_data;
3096	u16 normalized_ssn;
3097	u16 txq_id;
3098	int ret;
3099
3100	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
3101		return -EINVAL;
3102
3103	if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
3104	    mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
3105		IWL_ERR(mvm,
3106			"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
3107			mvmsta->tid_data[tid].state);
3108		return -ENXIO;
3109	}
3110
3111	lockdep_assert_held(&mvm->mutex);
3112
3113	if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
3114	    iwl_mvm_has_new_tx_api(mvm)) {
3115		u8 ac = tid_to_mac80211_ac[tid];
3116
3117		ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
3118		if (ret)
3119			return ret;
3120	}
3121
3122	spin_lock_bh(&mvmsta->lock);
3123
3124	/*
3125	 * Note the possible cases:
3126	 *  1. An enabled TXQ - TXQ needs to become agg'ed
3127	 *  2. The TXQ hasn't yet been enabled, so find a free one and mark
3128	 *	it as reserved
3129	 */
3130	txq_id = mvmsta->tid_data[tid].txq_id;
3131	if (txq_id == IWL_MVM_INVALID_QUEUE) {
3132		ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
3133					      IWL_MVM_DQA_MIN_DATA_QUEUE,
3134					      IWL_MVM_DQA_MAX_DATA_QUEUE);
3135		if (ret < 0) {
3136			IWL_ERR(mvm, "Failed to allocate agg queue\n");
3137			goto out;
3138		}
3139
3140		txq_id = ret;
3141
3142		/* TXQ hasn't yet been enabled, so mark it only as reserved */
3143		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
3144	} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
3145		ret = -ENXIO;
3146		IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
3147			tid, IWL_MAX_HW_QUEUES - 1);
3148		goto out;
3149
3150	} else if (unlikely(mvm->queue_info[txq_id].status ==
3151			    IWL_MVM_QUEUE_SHARED)) {
3152		ret = -ENXIO;
3153		IWL_DEBUG_TX_QUEUES(mvm,
3154				    "Can't start tid %d agg on shared queue!\n",
3155				    tid);
3156		goto out;
3157	}
3158
3159	IWL_DEBUG_TX_QUEUES(mvm,
3160			    "AGG for tid %d will be on queue #%d\n",
3161			    tid, txq_id);
3162
3163	tid_data = &mvmsta->tid_data[tid];
3164	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3165	tid_data->txq_id = txq_id;
3166	*ssn = tid_data->ssn;
3167
3168	IWL_DEBUG_TX_QUEUES(mvm,
3169			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3170			    mvmsta->deflink.sta_id, tid, txq_id,
3171			    tid_data->ssn,
3172			    tid_data->next_reclaimed);
3173
3174	/*
3175	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3176	 * to align the wrap around of ssn so we compare relevant values.
3177	 */
3178	normalized_ssn = tid_data->ssn;
3179	if (mvm->trans->trans_cfg->gen2)
3180		normalized_ssn &= 0xff;
3181
3182	if (normalized_ssn == tid_data->next_reclaimed) {
3183		tid_data->state = IWL_AGG_STARTING;
3184		ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3185	} else {
3186		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3187		ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3188	}
3189
3190out:
3191	spin_unlock_bh(&mvmsta->lock);
3192
3193	return ret;
3194}
3195
3196int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3197			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3198			    bool amsdu)
3199{
3200	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3201	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3202	unsigned int wdg_timeout =
3203		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
3204	int queue, ret;
3205	bool alloc_queue = true;
3206	enum iwl_mvm_queue_status queue_status;
3207	u16 ssn;
3208
3209	struct iwl_trans_txq_scd_cfg cfg = {
3210		.sta_id = mvmsta->deflink.sta_id,
3211		.tid = tid,
3212		.frame_limit = buf_size,
3213		.aggregate = true,
3214	};
3215
3216	/*
3217	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3218	 * manager, so this function should never be called in this case.
3219	 */
3220	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3221		return -EINVAL;
3222
3223	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3224		     != IWL_MAX_TID_COUNT);
3225
3226	spin_lock_bh(&mvmsta->lock);
3227	ssn = tid_data->ssn;
3228	queue = tid_data->txq_id;
3229	tid_data->state = IWL_AGG_ON;
3230	mvmsta->agg_tids |= BIT(tid);
3231	tid_data->ssn = 0xffff;
3232	tid_data->amsdu_in_ampdu_allowed = amsdu;
3233	spin_unlock_bh(&mvmsta->lock);
3234
3235	if (iwl_mvm_has_new_tx_api(mvm)) {
3236		/*
3237		 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3238		 * would have failed, so if we are here there is no need to
3239		 * allocate a queue.
3240		 * However, if aggregation size is different than the default
3241		 * size, the scheduler should be reconfigured.
3242		 * We cannot do this with the new TX API, so return unsupported
3243		 * for now, until it will be offloaded to firmware..
3244		 * Note that if SCD default value changes - this condition
3245		 * should be updated as well.
3246		 */
3247		if (buf_size < IWL_FRAME_LIMIT)
3248			return -ENOTSUPP;
3249
3250		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3251		if (ret)
3252			return -EIO;
3253		goto out;
3254	}
3255
3256	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3257
3258	queue_status = mvm->queue_info[queue].status;
3259
3260	/* Maybe there is no need to even alloc a queue... */
3261	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3262		alloc_queue = false;
3263
3264	/*
3265	 * Only reconfig the SCD for the queue if the window size has
3266	 * changed from current (become smaller)
3267	 */
3268	if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3269		/*
3270		 * If reconfiguring an existing queue, it first must be
3271		 * drained
3272		 */
3273		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3274						     BIT(queue));
3275		if (ret) {
3276			IWL_ERR(mvm,
3277				"Error draining queue before reconfig\n");
3278			return ret;
3279		}
3280
3281		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3282					   mvmsta->deflink.sta_id, tid,
3283					   buf_size, ssn);
3284		if (ret) {
3285			IWL_ERR(mvm,
3286				"Error reconfiguring TXQ #%d\n", queue);
3287			return ret;
3288		}
3289	}
3290
3291	if (alloc_queue)
3292		iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3293				   &cfg, wdg_timeout);
3294
3295	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
3296	if (queue_status != IWL_MVM_QUEUE_SHARED) {
3297		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3298		if (ret)
3299			return -EIO;
3300	}
3301
3302	/* No need to mark as reserved */
3303	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3304
3305out:
3306	/*
3307	 * Even though in theory the peer could have different
3308	 * aggregation reorder buffer sizes for different sessions,
3309	 * our ucode doesn't allow for that and has a global limit
3310	 * for each station. Therefore, use the minimum of all the
3311	 * aggregation sessions and our default value.
3312	 */
3313	mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
3314		min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize,
3315		    buf_size);
3316	mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit =
3317		mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize;
3318
3319	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3320		     sta->addr, tid);
3321
3322	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq);
3323}
3324
3325static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3326					struct iwl_mvm_sta *mvmsta,
3327					struct iwl_mvm_tid_data *tid_data)
3328{
3329	u16 txq_id = tid_data->txq_id;
3330
3331	lockdep_assert_held(&mvm->mutex);
3332
3333	if (iwl_mvm_has_new_tx_api(mvm))
3334		return;
3335
3336	/*
3337	 * The TXQ is marked as reserved only if no traffic came through yet
3338	 * This means no traffic has been sent on this TID (agg'd or not), so
3339	 * we no longer have use for the queue. Since it hasn't even been
3340	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3341	 * free.
3342	 */
3343	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3344		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3345		tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3346	}
3347}
3348
3349int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3350			    struct ieee80211_sta *sta, u16 tid)
3351{
3352	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3353	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3354	u16 txq_id;
3355	int err;
3356
3357	/*
3358	 * If mac80211 is cleaning its state, then say that we finished since
3359	 * our state has been cleared anyway.
3360	 */
3361	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3362		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3363		return 0;
3364	}
3365
3366	spin_lock_bh(&mvmsta->lock);
3367
3368	txq_id = tid_data->txq_id;
3369
3370	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3371			    mvmsta->deflink.sta_id, tid, txq_id,
3372			    tid_data->state);
3373
3374	mvmsta->agg_tids &= ~BIT(tid);
3375
3376	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3377
3378	switch (tid_data->state) {
3379	case IWL_AGG_ON:
3380		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3381
3382		IWL_DEBUG_TX_QUEUES(mvm,
3383				    "ssn = %d, next_recl = %d\n",
3384				    tid_data->ssn, tid_data->next_reclaimed);
3385
3386		tid_data->ssn = 0xffff;
3387		tid_data->state = IWL_AGG_OFF;
3388		spin_unlock_bh(&mvmsta->lock);
3389
3390		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3391
3392		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3393		return 0;
3394	case IWL_AGG_STARTING:
3395	case IWL_EMPTYING_HW_QUEUE_ADDBA:
3396		/*
3397		 * The agg session has been stopped before it was set up. This
3398		 * can happen when the AddBA timer times out for example.
3399		 */
3400
3401		/* No barriers since we are under mutex */
3402		lockdep_assert_held(&mvm->mutex);
3403
3404		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3405		tid_data->state = IWL_AGG_OFF;
3406		err = 0;
3407		break;
3408	default:
3409		IWL_ERR(mvm,
3410			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3411			mvmsta->deflink.sta_id, tid, tid_data->state);
3412		IWL_ERR(mvm,
3413			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
3414		err = -EINVAL;
3415	}
3416
3417	spin_unlock_bh(&mvmsta->lock);
3418
3419	return err;
3420}
3421
3422int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3423			    struct ieee80211_sta *sta, u16 tid)
3424{
3425	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3426	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3427	u16 txq_id;
3428	enum iwl_mvm_agg_state old_state;
3429
3430	/*
3431	 * First set the agg state to OFF to avoid calling
3432	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3433	 */
3434	spin_lock_bh(&mvmsta->lock);
3435	txq_id = tid_data->txq_id;
3436	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3437			    mvmsta->deflink.sta_id, tid, txq_id,
3438			    tid_data->state);
3439	old_state = tid_data->state;
3440	tid_data->state = IWL_AGG_OFF;
3441	mvmsta->agg_tids &= ~BIT(tid);
3442	spin_unlock_bh(&mvmsta->lock);
3443
3444	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3445
3446	if (old_state >= IWL_AGG_ON) {
3447		iwl_mvm_drain_sta(mvm, mvmsta, true);
3448
3449		if (iwl_mvm_has_new_tx_api(mvm)) {
3450			if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id,
3451						   BIT(tid)))
3452				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3453			iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3454		} else {
3455			if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3456				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3457			iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3458		}
3459
3460		iwl_mvm_drain_sta(mvm, mvmsta, false);
3461
3462		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3463	}
3464
3465	return 0;
3466}
3467
3468static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3469{
3470	int i, max = -1, max_offs = -1;
3471
3472	lockdep_assert_held(&mvm->mutex);
3473
3474	/* Pick the unused key offset with the highest 'deleted'
3475	 * counter. Every time a key is deleted, all the counters
3476	 * are incremented and the one that was just deleted is
3477	 * reset to zero. Thus, the highest counter is the one
3478	 * that was deleted longest ago. Pick that one.
3479	 */
3480	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3481		if (test_bit(i, mvm->fw_key_table))
3482			continue;
3483		if (mvm->fw_key_deleted[i] > max) {
3484			max = mvm->fw_key_deleted[i];
3485			max_offs = i;
3486		}
3487	}
3488
3489	if (max_offs < 0)
3490		return STA_KEY_IDX_INVALID;
3491
3492	return max_offs;
3493}
3494
3495static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3496					       struct ieee80211_vif *vif,
3497					       struct ieee80211_sta *sta)
3498{
3499	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3500
3501	if (sta)
3502		return iwl_mvm_sta_from_mac80211(sta);
3503
3504	/*
3505	 * The device expects GTKs for station interfaces to be
3506	 * installed as GTKs for the AP station. If we have no
3507	 * station ID, then use AP's station ID.
3508	 */
3509	if (vif->type == NL80211_IFTYPE_STATION &&
3510	    mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) {
3511		u8 sta_id = mvmvif->deflink.ap_sta_id;
3512
3513		sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3514					    lockdep_is_held(&mvm->mutex));
3515
3516		/*
3517		 * It is possible that the 'sta' parameter is NULL,
3518		 * for example when a GTK is removed - the sta_id will then
3519		 * be the AP ID, and no station was passed by mac80211.
3520		 */
3521		if (IS_ERR_OR_NULL(sta))
3522			return NULL;
3523
3524		return iwl_mvm_sta_from_mac80211(sta);
3525	}
3526
3527	return NULL;
3528}
3529
3530static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3531{
3532	int i;
3533
3534	for (i = len - 1; i >= 0; i--) {
3535		if (pn1[i] > pn2[i])
3536			return 1;
3537		if (pn1[i] < pn2[i])
3538			return -1;
3539	}
3540
3541	return 0;
3542}
3543
3544static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3545				u32 sta_id,
3546				struct ieee80211_key_conf *key, bool mcast,
3547				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3548				u8 key_offset, bool mfp)
3549{
3550	union {
3551		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3552		struct iwl_mvm_add_sta_key_cmd cmd;
3553	} u = {};
3554	__le16 key_flags;
3555	int ret;
3556	u32 status;
3557	u16 keyidx;
3558	u64 pn = 0;
3559	int i, size;
3560	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3561				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3562	int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3563					    new_api ? 2 : 1);
3564
3565	if (sta_id == IWL_MVM_INVALID_STA)
3566		return -EINVAL;
3567
3568	keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3569		 STA_KEY_FLG_KEYID_MSK;
3570	key_flags = cpu_to_le16(keyidx);
3571	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3572
3573	switch (key->cipher) {
3574	case WLAN_CIPHER_SUITE_TKIP:
3575		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3576		if (api_ver >= 2) {
3577			memcpy((void *)&u.cmd.tx_mic_key,
3578			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3579			       IWL_MIC_KEY_SIZE);
3580
3581			memcpy((void *)&u.cmd.rx_mic_key,
3582			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3583			       IWL_MIC_KEY_SIZE);
3584			pn = atomic64_read(&key->tx_pn);
3585
3586		} else {
3587			u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3588			for (i = 0; i < 5; i++)
3589				u.cmd_v1.tkip_rx_ttak[i] =
3590					cpu_to_le16(tkip_p1k[i]);
3591		}
3592		memcpy(u.cmd.common.key, key->key, key->keylen);
3593		break;
3594	case WLAN_CIPHER_SUITE_CCMP:
3595		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3596		memcpy(u.cmd.common.key, key->key, key->keylen);
3597		if (api_ver >= 2)
3598			pn = atomic64_read(&key->tx_pn);
3599		break;
3600	case WLAN_CIPHER_SUITE_WEP104:
3601		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3602		fallthrough;
3603	case WLAN_CIPHER_SUITE_WEP40:
3604		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3605		memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3606		break;
3607	case WLAN_CIPHER_SUITE_GCMP_256:
3608		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3609		fallthrough;
3610	case WLAN_CIPHER_SUITE_GCMP:
3611		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3612		memcpy(u.cmd.common.key, key->key, key->keylen);
3613		if (api_ver >= 2)
3614			pn = atomic64_read(&key->tx_pn);
3615		break;
3616	default:
3617		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3618		memcpy(u.cmd.common.key, key->key, key->keylen);
3619	}
3620
3621	if (mcast)
3622		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3623	if (mfp)
3624		key_flags |= cpu_to_le16(STA_KEY_MFP);
3625
3626	u.cmd.common.key_offset = key_offset;
3627	u.cmd.common.key_flags = key_flags;
3628	u.cmd.common.sta_id = sta_id;
3629
3630	if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3631		i = 0;
3632	else
3633		i = -1;
3634
3635	for (; i < IEEE80211_NUM_TIDS; i++) {
3636		struct ieee80211_key_seq seq = {};
3637		u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3638		int rx_pn_len = 8;
3639		/* there's a hole at 2/3 in FW format depending on version */
3640		int hole = api_ver >= 3 ? 0 : 2;
3641
3642		ieee80211_get_key_rx_seq(key, i, &seq);
3643
3644		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3645			rx_pn[0] = seq.tkip.iv16;
3646			rx_pn[1] = seq.tkip.iv16 >> 8;
3647			rx_pn[2 + hole] = seq.tkip.iv32;
3648			rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3649			rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3650			rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3651		} else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3652			rx_pn = seq.hw.seq;
3653			rx_pn_len = seq.hw.seq_len;
3654		} else {
3655			rx_pn[0] = seq.ccmp.pn[0];
3656			rx_pn[1] = seq.ccmp.pn[1];
3657			rx_pn[2 + hole] = seq.ccmp.pn[2];
3658			rx_pn[3 + hole] = seq.ccmp.pn[3];
3659			rx_pn[4 + hole] = seq.ccmp.pn[4];
3660			rx_pn[5 + hole] = seq.ccmp.pn[5];
3661		}
3662
3663		if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3664				   rx_pn_len) > 0)
3665			memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3666			       rx_pn_len);
3667	}
3668
3669	if (api_ver >= 2) {
3670		u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3671		size = sizeof(u.cmd);
3672	} else {
3673		size = sizeof(u.cmd_v1);
3674	}
3675
3676	status = ADD_STA_SUCCESS;
3677	if (cmd_flags & CMD_ASYNC)
3678		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3679					   &u.cmd);
3680	else
3681		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3682						  &u.cmd, &status);
3683
3684	switch (status) {
3685	case ADD_STA_SUCCESS:
3686		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3687		break;
3688	default:
3689		ret = -EIO;
3690		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3691		break;
3692	}
3693
3694	return ret;
3695}
3696
3697static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3698				 struct ieee80211_key_conf *keyconf,
3699				 u8 sta_id, bool remove_key)
3700{
3701	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3702
3703	/* verify the key details match the required command's expectations */
3704	if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3705		    (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3706		     keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3707		    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3708		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3709		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3710		return -EINVAL;
3711
3712	if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3713		    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3714		return -EINVAL;
3715
3716	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3717	igtk_cmd.sta_id = cpu_to_le32(sta_id);
3718
3719	if (remove_key) {
3720		/* This is a valid situation for IGTK */
3721		if (sta_id == IWL_MVM_INVALID_STA)
3722			return 0;
3723
3724		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3725	} else {
3726		struct ieee80211_key_seq seq;
3727		const u8 *pn;
3728
3729		switch (keyconf->cipher) {
3730		case WLAN_CIPHER_SUITE_AES_CMAC:
3731			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3732			break;
3733		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3734		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3735			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3736			break;
3737		default:
3738			return -EINVAL;
3739		}
3740
3741		memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3742		if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3743			igtk_cmd.ctrl_flags |=
3744				cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3745		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3746		pn = seq.aes_cmac.pn;
3747		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3748						       ((u64) pn[4] << 8) |
3749						       ((u64) pn[3] << 16) |
3750						       ((u64) pn[2] << 24) |
3751						       ((u64) pn[1] << 32) |
3752						       ((u64) pn[0] << 40));
3753	}
3754
3755	IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3756		       remove_key ? "removing" : "installing",
3757		       keyconf->keyidx >= 6 ? "B" : "",
3758		       keyconf->keyidx, igtk_cmd.sta_id);
3759
3760	if (!iwl_mvm_has_new_rx_api(mvm)) {
3761		struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3762			.ctrl_flags = igtk_cmd.ctrl_flags,
3763			.key_id = igtk_cmd.key_id,
3764			.sta_id = igtk_cmd.sta_id,
3765			.receive_seq_cnt = igtk_cmd.receive_seq_cnt
3766		};
3767
3768		memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3769		       ARRAY_SIZE(igtk_cmd_v1.igtk));
3770		return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3771					    sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3772	}
3773	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3774				    sizeof(igtk_cmd), &igtk_cmd);
3775}
3776
3777
3778static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3779				       struct ieee80211_vif *vif,
3780				       struct ieee80211_sta *sta)
3781{
3782	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3783
3784	if (sta)
3785		return sta->addr;
3786
3787	if (vif->type == NL80211_IFTYPE_STATION &&
3788	    mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) {
3789		u8 sta_id = mvmvif->deflink.ap_sta_id;
3790		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3791						lockdep_is_held(&mvm->mutex));
3792		if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
3793			return NULL;
3794
3795		return sta->addr;
3796	}
3797
3798
3799	return NULL;
3800}
3801
3802static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3803				 struct ieee80211_vif *vif,
3804				 struct ieee80211_sta *sta,
3805				 struct ieee80211_key_conf *keyconf,
3806				 u8 key_offset,
3807				 bool mcast)
3808{
3809	const u8 *addr;
3810	struct ieee80211_key_seq seq;
3811	u16 p1k[5];
3812	u32 sta_id;
3813	bool mfp = false;
3814
3815	if (sta) {
3816		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3817
3818		sta_id = mvm_sta->deflink.sta_id;
3819		mfp = sta->mfp;
3820	} else if (vif->type == NL80211_IFTYPE_AP &&
3821		   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3822		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3823
3824		sta_id = mvmvif->deflink.mcast_sta.sta_id;
3825	} else {
3826		IWL_ERR(mvm, "Failed to find station id\n");
3827		return -EINVAL;
3828	}
3829
3830	if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3831		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3832		if (!addr) {
3833			IWL_ERR(mvm, "Failed to find mac address\n");
3834			return -EINVAL;
3835		}
3836
3837		/* get phase 1 key from mac80211 */
3838		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3839		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3840
3841		return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3842					    seq.tkip.iv32, p1k, 0, key_offset,
3843					    mfp);
3844	}
3845
3846	return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3847				    0, NULL, 0, key_offset, mfp);
3848}
3849
3850int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3851			struct ieee80211_vif *vif,
3852			struct ieee80211_sta *sta,
3853			struct ieee80211_key_conf *keyconf,
3854			u8 key_offset)
3855{
3856	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3857	struct iwl_mvm_sta *mvm_sta;
3858	u8 sta_id = IWL_MVM_INVALID_STA;
3859	int ret;
3860	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3861
3862	lockdep_assert_held(&mvm->mutex);
3863
3864	if (vif->type != NL80211_IFTYPE_AP ||
3865	    keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3866		/* Get the station id from the mvm local station table */
3867		mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3868		if (!mvm_sta) {
3869			IWL_ERR(mvm, "Failed to find station\n");
3870			return -EINVAL;
3871		}
3872		sta_id = mvm_sta->deflink.sta_id;
3873
3874		/*
3875		 * It is possible that the 'sta' parameter is NULL, and thus
3876		 * there is a need to retrieve the sta from the local station
3877		 * table.
3878		 */
3879		if (!sta) {
3880			sta = rcu_dereference_protected(
3881				mvm->fw_id_to_mac_id[sta_id],
3882				lockdep_is_held(&mvm->mutex));
3883			if (IS_ERR_OR_NULL(sta)) {
3884				IWL_ERR(mvm, "Invalid station id\n");
3885				return -EINVAL;
3886			}
3887		}
3888
3889		if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3890			return -EINVAL;
3891	} else {
3892		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3893
3894		sta_id = mvmvif->deflink.mcast_sta.sta_id;
3895	}
3896
3897	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3898	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3899	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3900		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3901		goto end;
3902	}
3903
3904	/* If the key_offset is not pre-assigned, we need to find a
3905	 * new offset to use.  In normal cases, the offset is not
3906	 * pre-assigned, but during HW_RESTART we want to reuse the
3907	 * same indices, so we pass them when this function is called.
3908	 *
3909	 * In D3 entry, we need to hardcoded the indices (because the
3910	 * firmware hardcodes the PTK offset to 0).  In this case, we
3911	 * need to make sure we don't overwrite the hw_key_idx in the
3912	 * keyconf structure, because otherwise we cannot configure
3913	 * the original ones back when resuming.
3914	 */
3915	if (key_offset == STA_KEY_IDX_INVALID) {
3916		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3917		if (key_offset == STA_KEY_IDX_INVALID)
3918			return -ENOSPC;
3919		keyconf->hw_key_idx = key_offset;
3920	}
3921
3922	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3923	if (ret)
3924		goto end;
3925
3926	/*
3927	 * For WEP, the same key is used for multicast and unicast. Upload it
3928	 * again, using the same key offset, and now pointing the other one
3929	 * to the same key slot (offset).
3930	 * If this fails, remove the original as well.
3931	 */
3932	if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3933	     keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3934	    sta) {
3935		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3936					    key_offset, !mcast);
3937		if (ret) {
3938			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3939			goto end;
3940		}
3941	}
3942
3943	__set_bit(key_offset, mvm->fw_key_table);
3944
3945end:
3946	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3947		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3948		      sta ? sta->addr : zero_addr, ret);
3949	return ret;
3950}
3951
3952int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3953			   struct ieee80211_vif *vif,
3954			   struct ieee80211_sta *sta,
3955			   struct ieee80211_key_conf *keyconf)
3956{
3957	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3958	struct iwl_mvm_sta *mvm_sta;
3959	u8 sta_id = IWL_MVM_INVALID_STA;
3960	int ret, i;
3961
3962	lockdep_assert_held(&mvm->mutex);
3963
3964	/* Get the station from the mvm local station table */
3965	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3966	if (mvm_sta)
3967		sta_id = mvm_sta->deflink.sta_id;
3968	else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3969		sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id;
3970
3971
3972	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3973		      keyconf->keyidx, sta_id);
3974
3975	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3976	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3977	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3978		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3979
3980	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3981		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3982			keyconf->hw_key_idx);
3983		return -ENOENT;
3984	}
3985
3986	/* track which key was deleted last */
3987	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3988		if (mvm->fw_key_deleted[i] < U8_MAX)
3989			mvm->fw_key_deleted[i]++;
3990	}
3991	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3992
3993	if (sta && !mvm_sta) {
3994		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3995		return 0;
3996	}
3997
3998	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3999	if (ret)
4000		return ret;
4001
4002	/* delete WEP key twice to get rid of (now useless) offset */
4003	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
4004	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
4005		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
4006
4007	return ret;
4008}
4009
4010void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
4011			     struct ieee80211_vif *vif,
4012			     struct ieee80211_key_conf *keyconf,
4013			     struct ieee80211_sta *sta, u32 iv32,
4014			     u16 *phase1key)
4015{
4016	struct iwl_mvm_sta *mvm_sta;
4017	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
4018	bool mfp = sta ? sta->mfp : false;
4019
4020	rcu_read_lock();
4021
4022	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
4023	if (WARN_ON_ONCE(!mvm_sta))
4024		goto unlock;
4025	iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast,
4026			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
4027			     mfp);
4028
4029 unlock:
4030	rcu_read_unlock();
4031}
4032
4033void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
4034				struct ieee80211_sta *sta)
4035{
4036	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4037	struct iwl_mvm_add_sta_cmd cmd = {
4038		.add_modify = STA_MODE_MODIFY,
4039		.sta_id = mvmsta->deflink.sta_id,
4040		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
4041		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4042	};
4043	int ret;
4044
4045	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4046				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4047	if (ret)
4048		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4049}
4050
4051void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
4052				       struct ieee80211_sta *sta,
4053				       enum ieee80211_frame_release_type reason,
4054				       u16 cnt, u16 tids, bool more_data,
4055				       bool single_sta_queue)
4056{
4057	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4058	struct iwl_mvm_add_sta_cmd cmd = {
4059		.add_modify = STA_MODE_MODIFY,
4060		.sta_id = mvmsta->deflink.sta_id,
4061		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
4062		.sleep_tx_count = cpu_to_le16(cnt),
4063		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4064	};
4065	int tid, ret;
4066	unsigned long _tids = tids;
4067
4068	/* convert TIDs to ACs - we don't support TSPEC so that's OK
4069	 * Note that this field is reserved and unused by firmware not
4070	 * supporting GO uAPSD, so it's safe to always do this.
4071	 */
4072	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
4073		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
4074
4075	/* If we're releasing frames from aggregation or dqa queues then check
4076	 * if all the queues that we're releasing frames from, combined, have:
4077	 *  - more frames than the service period, in which case more_data
4078	 *    needs to be set
4079	 *  - fewer than 'cnt' frames, in which case we need to adjust the
4080	 *    firmware command (but do that unconditionally)
4081	 */
4082	if (single_sta_queue) {
4083		int remaining = cnt;
4084		int sleep_tx_count;
4085
4086		spin_lock_bh(&mvmsta->lock);
4087		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
4088			struct iwl_mvm_tid_data *tid_data;
4089			u16 n_queued;
4090
4091			tid_data = &mvmsta->tid_data[tid];
4092
4093			n_queued = iwl_mvm_tid_queued(mvm, tid_data);
4094			if (n_queued > remaining) {
4095				more_data = true;
4096				remaining = 0;
4097				break;
4098			}
4099			remaining -= n_queued;
4100		}
4101		sleep_tx_count = cnt - remaining;
4102		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
4103			mvmsta->sleep_tx_count = sleep_tx_count;
4104		spin_unlock_bh(&mvmsta->lock);
4105
4106		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
4107		if (WARN_ON(cnt - remaining == 0)) {
4108			ieee80211_sta_eosp(sta);
4109			return;
4110		}
4111	}
4112
4113	/* Note: this is ignored by firmware not supporting GO uAPSD */
4114	if (more_data)
4115		cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
4116
4117	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
4118		mvmsta->next_status_eosp = true;
4119		cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
4120	} else {
4121		cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
4122	}
4123
4124	/* block the Tx queues until the FW updated the sleep Tx count */
4125	iwl_trans_block_txq_ptrs(mvm->trans, true);
4126
4127	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
4128				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
4129				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4130	if (ret)
4131		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4132}
4133
4134void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
4135			   struct iwl_rx_cmd_buffer *rxb)
4136{
4137	struct iwl_rx_packet *pkt = rxb_addr(rxb);
4138	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
4139	struct ieee80211_sta *sta;
4140	u32 sta_id = le32_to_cpu(notif->sta_id);
4141
4142	if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
4143		return;
4144
4145	rcu_read_lock();
4146	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
4147	if (!IS_ERR_OR_NULL(sta))
4148		ieee80211_sta_eosp(sta);
4149	rcu_read_unlock();
4150}
4151
4152void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
4153				   struct iwl_mvm_sta *mvmsta,
4154				   bool disable)
4155{
4156	struct iwl_mvm_add_sta_cmd cmd = {
4157		.add_modify = STA_MODE_MODIFY,
4158		.sta_id = mvmsta->deflink.sta_id,
4159		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4160		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4161		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
4162	};
4163	int ret;
4164
4165	if (mvm->mld_api_is_used) {
4166		iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
4167		return;
4168	}
4169
4170	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4171				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4172	if (ret)
4173		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4174}
4175
4176void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
4177				      struct ieee80211_sta *sta,
4178				      bool disable)
4179{
4180	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4181
4182	if (mvm->mld_api_is_used) {
4183		iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
4184		return;
4185	}
4186
4187	spin_lock_bh(&mvm_sta->lock);
4188
4189	if (mvm_sta->disable_tx == disable) {
4190		spin_unlock_bh(&mvm_sta->lock);
4191		return;
4192	}
4193
4194	mvm_sta->disable_tx = disable;
4195
4196	/*
4197	 * If sta PS state is handled by mac80211, tell it to start/stop
4198	 * queuing tx for this station.
4199	 */
4200	if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4201		ieee80211_sta_block_awake(mvm->hw, sta, disable);
4202
4203	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4204
4205	spin_unlock_bh(&mvm_sta->lock);
4206}
4207
4208static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4209					      struct iwl_mvm_vif *mvmvif,
4210					      struct iwl_mvm_int_sta *sta,
4211					      bool disable)
4212{
4213	u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4214	struct iwl_mvm_add_sta_cmd cmd = {
4215		.add_modify = STA_MODE_MODIFY,
4216		.sta_id = sta->sta_id,
4217		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4218		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4219		.mac_id_n_color = cpu_to_le32(id),
4220	};
4221	int ret;
4222
4223	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4224				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4225	if (ret)
4226		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4227}
4228
4229void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4230				       struct iwl_mvm_vif *mvmvif,
4231				       bool disable)
4232{
4233	struct ieee80211_sta *sta;
4234	struct iwl_mvm_sta *mvm_sta;
4235	int i;
4236
4237	if (mvm->mld_api_is_used) {
4238		iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, disable);
4239		return;
4240	}
4241
4242	rcu_read_lock();
4243
4244	/* Block/unblock all the stations of the given mvmvif */
4245	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4246		sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4247		if (IS_ERR_OR_NULL(sta))
4248			continue;
4249
4250		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4251		if (mvm_sta->mac_id_n_color !=
4252		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4253			continue;
4254
4255		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4256	}
4257
4258	rcu_read_unlock();
4259
4260	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4261		return;
4262
4263	/* Need to block/unblock also multicast station */
4264	if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4265		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4266						  &mvmvif->deflink.mcast_sta,
4267						  disable);
4268
4269	/*
4270	 * Only unblock the broadcast station (FW blocks it for immediate
4271	 * quiet, not the driver)
4272	 */
4273	if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4274		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4275						  &mvmvif->deflink.bcast_sta,
4276						  disable);
4277}
4278
4279void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4280{
4281	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4282	struct iwl_mvm_sta *mvmsta;
4283
4284	rcu_read_lock();
4285
4286	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id);
4287
4288	if (mvmsta)
4289		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4290
4291	rcu_read_unlock();
4292}
4293
4294u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4295{
4296	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4297
4298	/*
4299	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4300	 * to align the wrap around of ssn so we compare relevant values.
4301	 */
4302	if (mvm->trans->trans_cfg->gen2)
4303		sn &= 0xff;
4304
4305	return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4306}
4307
4308int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4309			 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4310			 u8 *key, u32 key_len)
4311{
4312	int ret;
4313	u16 queue;
4314	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4315	struct ieee80211_key_conf *keyconf;
4316	unsigned int wdg_timeout =
4317		iwl_mvm_get_wd_timeout(mvm, vif, false, false);
4318	bool mld = iwl_mvm_has_mld_api(mvm->fw);
4319	u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK;
4320
4321	ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4322				       NL80211_IFTYPE_UNSPECIFIED, type);
4323	if (ret)
4324		return ret;
4325
4326	if (mld)
4327		ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr,
4328							 mvmvif->deflink.fw_link_id,
4329							 &queue,
4330							 IWL_MAX_TID_COUNT,
4331							 &wdg_timeout);
4332	else
4333		ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id,
4334						     mvmvif->color, addr, sta,
4335						     &queue,
4336						     IWL_MVM_TX_FIFO_BE);
4337	if (ret)
4338		goto out;
4339
4340	keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4341	if (!keyconf) {
4342		ret = -ENOBUFS;
4343		goto out;
4344	}
4345
4346	keyconf->cipher = cipher;
4347	memcpy(keyconf->key, key, key_len);
4348	keyconf->keylen = key_len;
4349	keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE;
4350
4351	if (mld) {
4352		/* The MFP flag is set according to the station mfp field. Since
4353		 * we don't have a station, set it manually.
4354		 */
4355		u32 key_flags =
4356			iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
4357			IWL_SEC_KEY_FLAG_MFP;
4358		u32 sta_mask = BIT(sta->sta_id);
4359
4360		ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf);
4361	} else {
4362		ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4363					   0, NULL, 0, 0, true);
4364	}
4365
4366	kfree(keyconf);
4367	return 0;
4368out:
4369	iwl_mvm_dealloc_int_sta(mvm, sta);
4370	return ret;
4371}
4372
4373void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4374				   struct ieee80211_vif *vif,
4375				   u32 id)
4376{
4377	struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4378		.id = cpu_to_le32(id),
4379	};
4380	int ret;
4381
4382	ret = iwl_mvm_send_cmd_pdu(mvm,
4383				   WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4384				   CMD_ASYNC,
4385				   sizeof(cancel_channel_switch_cmd),
4386				   &cancel_channel_switch_cmd);
4387	if (ret)
4388		IWL_ERR(mvm, "Failed to cancel the channel switch\n");
4389}
4390