1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
6 */
7#include <net/mac80211.h>
8
9#include "iwl-debug.h"
10#include "iwl-io.h"
11#include "iwl-prph.h"
12#include "iwl-csr.h"
13#include "mvm.h"
14#include "fw/api/rs.h"
15#include "fw/img.h"
16
17/*
18 * Will return 0 even if the cmd failed when RFKILL is asserted unless
19 * CMD_WANT_SKB is set in cmd->flags.
20 */
21int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
22{
23	int ret;
24
25#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
26	if (WARN_ON(mvm->d3_test_active))
27		return -EIO;
28#endif
29
30	/*
31	 * Synchronous commands from this op-mode must hold
32	 * the mutex, this ensures we don't try to send two
33	 * (or more) synchronous commands at a time.
34	 */
35	if (!(cmd->flags & CMD_ASYNC))
36		lockdep_assert_held(&mvm->mutex);
37
38	ret = iwl_trans_send_cmd(mvm->trans, cmd);
39
40	/*
41	 * If the caller wants the SKB, then don't hide any problems, the
42	 * caller might access the response buffer which will be NULL if
43	 * the command failed.
44	 */
45	if (cmd->flags & CMD_WANT_SKB)
46		return ret;
47
48	/*
49	 * Silently ignore failures if RFKILL is asserted or
50	 * we are in suspend\resume process
51	 */
52	if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
53		return 0;
54	return ret;
55}
56
57int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
58			 u32 flags, u16 len, const void *data)
59{
60	struct iwl_host_cmd cmd = {
61		.id = id,
62		.len = { len, },
63		.data = { data, },
64		.flags = flags,
65	};
66
67	return iwl_mvm_send_cmd(mvm, &cmd);
68}
69
70/*
71 * We assume that the caller set the status to the success value
72 */
73int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
74			    u32 *status)
75{
76	struct iwl_rx_packet *pkt;
77	struct iwl_cmd_response *resp;
78	int ret, resp_len;
79
80	lockdep_assert_held(&mvm->mutex);
81
82#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
83	if (WARN_ON(mvm->d3_test_active))
84		return -EIO;
85#endif
86
87	/*
88	 * Only synchronous commands can wait for status,
89	 * we use WANT_SKB so the caller can't.
90	 */
91	if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
92		      "cmd flags %x", cmd->flags))
93		return -EINVAL;
94
95	cmd->flags |= CMD_WANT_SKB;
96
97	ret = iwl_trans_send_cmd(mvm->trans, cmd);
98	if (ret == -ERFKILL) {
99		/*
100		 * The command failed because of RFKILL, don't update
101		 * the status, leave it as success and return 0.
102		 */
103		return 0;
104	} else if (ret) {
105		return ret;
106	}
107
108	pkt = cmd->resp_pkt;
109
110	resp_len = iwl_rx_packet_payload_len(pkt);
111	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
112		ret = -EIO;
113		goto out_free_resp;
114	}
115
116	resp = (void *)pkt->data;
117	*status = le32_to_cpu(resp->status);
118 out_free_resp:
119	iwl_free_resp(cmd);
120	return ret;
121}
122
123/*
124 * We assume that the caller set the status to the sucess value
125 */
126int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
127				const void *data, u32 *status)
128{
129	struct iwl_host_cmd cmd = {
130		.id = id,
131		.len = { len, },
132		.data = { data, },
133	};
134
135	return iwl_mvm_send_cmd_status(mvm, &cmd, status);
136}
137
138int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
139					  enum nl80211_band band)
140{
141	int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
142	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
143	bool is_LB = band == NL80211_BAND_2GHZ;
144
145	if (format == RATE_MCS_LEGACY_OFDM_MSK)
146		return is_LB ? rate + IWL_FIRST_OFDM_RATE :
147			rate;
148
149	/* CCK is not allowed in HB */
150	return is_LB ? rate : -1;
151}
152
153int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
154					enum nl80211_band band)
155{
156	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
157	int idx;
158	int band_offset = 0;
159
160	/* Legacy rate format, search for match in table */
161	if (band != NL80211_BAND_2GHZ)
162		band_offset = IWL_FIRST_OFDM_RATE;
163	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
164		if (iwl_fw_rate_idx_to_plcp(idx) == rate)
165			return idx - band_offset;
166
167	return -1;
168}
169
170u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
171{
172	if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
173		/* In the new rate legacy rates are indexed:
174		 * 0 - 3 for CCK and 0 - 7 for OFDM.
175		 */
176		return (rate_idx >= IWL_FIRST_OFDM_RATE ?
177			rate_idx - IWL_FIRST_OFDM_RATE :
178			rate_idx);
179
180	return iwl_fw_rate_idx_to_plcp(rate_idx);
181}
182
183u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
184{
185	static const u8 mac80211_ac_to_ucode_ac[] = {
186		AC_VO,
187		AC_VI,
188		AC_BE,
189		AC_BK
190	};
191
192	return mac80211_ac_to_ucode_ac[ac];
193}
194
195void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
196{
197	struct iwl_rx_packet *pkt = rxb_addr(rxb);
198	struct iwl_error_resp *err_resp = (void *)pkt->data;
199
200	IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
201		le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
202	IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
203		le16_to_cpu(err_resp->bad_cmd_seq_num),
204		le32_to_cpu(err_resp->error_service));
205	IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
206		le64_to_cpu(err_resp->timestamp));
207}
208
209/*
210 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
211 * The parameter should also be a combination of ANT_[ABC].
212 */
213u8 first_antenna(u8 mask)
214{
215	BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
216	if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
217		return BIT(0);
218	return BIT(ffs(mask) - 1);
219}
220
221#define MAX_ANT_NUM 2
222/*
223 * Toggles between TX antennas to send the probe request on.
224 * Receives the bitmask of valid TX antennas and the *index* used
225 * for the last TX, and returns the next valid *index* to use.
226 * In order to set it in the tx_cmd, must do BIT(idx).
227 */
228u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
229{
230	u8 ind = last_idx;
231	int i;
232
233	for (i = 0; i < MAX_ANT_NUM; i++) {
234		ind = (ind + 1) % MAX_ANT_NUM;
235		if (valid & BIT(ind))
236			return ind;
237	}
238
239	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
240	return last_idx;
241}
242
243/**
244 * iwl_mvm_send_lq_cmd() - Send link quality command
245 * @mvm: Driver data.
246 * @lq: Link quality command to send.
247 *
248 * The link quality command is sent as the last step of station creation.
249 * This is the special case in which init is set and we call a callback in
250 * this case to clear the state indicating that station creation is in
251 * progress.
252 */
253int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
254{
255	struct iwl_host_cmd cmd = {
256		.id = LQ_CMD,
257		.len = { sizeof(struct iwl_lq_cmd), },
258		.flags = CMD_ASYNC,
259		.data = { lq, },
260	};
261
262	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
263		    iwl_mvm_has_tlc_offload(mvm)))
264		return -EINVAL;
265
266	return iwl_mvm_send_cmd(mvm, &cmd);
267}
268
269/**
270 * iwl_mvm_update_smps - Get a request to change the SMPS mode
271 * @mvm: Driver data.
272 * @vif: Pointer to the ieee80211_vif structure
273 * @req_type: The part of the driver who call for a change.
274 * @smps_request: The request to change the SMPS mode.
275 * @link_id: for MLO link_id, otherwise 0 (deflink)
276 *
277 * Get a requst to change the SMPS mode,
278 * and change it according to all other requests in the driver.
279 */
280void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
281			 enum iwl_mvm_smps_type_request req_type,
282			 enum ieee80211_smps_mode smps_request,
283			 unsigned int link_id)
284{
285	struct iwl_mvm_vif *mvmvif;
286	enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
287	int i;
288
289	lockdep_assert_held(&mvm->mutex);
290
291	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
292	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
293		return;
294
295	if (vif->type != NL80211_IFTYPE_STATION)
296		return;
297
298	mvmvif = iwl_mvm_vif_from_mac80211(vif);
299
300	if (WARN_ON_ONCE(!mvmvif->link[link_id]))
301		return;
302
303	mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
304	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
305		if (mvmvif->link[link_id]->smps_requests[i] ==
306		    IEEE80211_SMPS_STATIC) {
307			smps_mode = IEEE80211_SMPS_STATIC;
308			break;
309		}
310		if (mvmvif->link[link_id]->smps_requests[i] ==
311		    IEEE80211_SMPS_DYNAMIC)
312			smps_mode = IEEE80211_SMPS_DYNAMIC;
313	}
314
315	/* SMPS is disabled in eSR */
316	if (mvmvif->esr_active)
317		smps_mode = IEEE80211_SMPS_OFF;
318
319	ieee80211_request_smps(vif, link_id, smps_mode);
320}
321
322void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
323					 struct ieee80211_vif *vif,
324					 enum iwl_mvm_smps_type_request req_type,
325					 enum ieee80211_smps_mode smps_request)
326{
327	struct ieee80211_bss_conf *link_conf;
328	unsigned int link_id;
329
330	rcu_read_lock();
331	for_each_vif_active_link(vif, link_conf, link_id)
332		iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
333				    link_id);
334	rcu_read_unlock();
335}
336
337static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
338				    struct iwl_rx_packet *pkt, void *data)
339{
340	WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
341
342	return true;
343}
344
345int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
346{
347	struct iwl_statistics_cmd scmd = {
348		.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
349	};
350
351	struct iwl_host_cmd cmd = {
352		.id = STATISTICS_CMD,
353		.len[0] = sizeof(scmd),
354		.data[0] = &scmd,
355	};
356	int ret;
357
358	/* From version 15 - STATISTICS_NOTIFICATION, the reply for
359	 * STATISTICS_CMD is empty, and the response is with
360	 * STATISTICS_NOTIFICATION notification
361	 */
362	if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
363				    STATISTICS_NOTIFICATION, 0) < 15) {
364		cmd.flags = CMD_WANT_SKB;
365
366		ret = iwl_mvm_send_cmd(mvm, &cmd);
367		if (ret)
368			return ret;
369
370		iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
371		iwl_free_resp(&cmd);
372	} else {
373		struct iwl_notification_wait stats_wait;
374		static const u16 stats_complete[] = {
375			STATISTICS_NOTIFICATION,
376		};
377
378		iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
379					   stats_complete, ARRAY_SIZE(stats_complete),
380					   iwl_wait_stats_complete, NULL);
381
382		ret = iwl_mvm_send_cmd(mvm, &cmd);
383		if (ret) {
384			iwl_remove_notification(&mvm->notif_wait, &stats_wait);
385			return ret;
386		}
387
388		/* 200ms should be enough for FW to collect data from all
389		 * LMACs and send STATISTICS_NOTIFICATION to host
390		 */
391		ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
392		if (ret)
393			return ret;
394	}
395
396	if (clear)
397		iwl_mvm_accu_radio_stats(mvm);
398
399	return 0;
400}
401
402void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
403{
404	mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
405	mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
406	mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
407	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
408}
409
410struct iwl_mvm_diversity_iter_data {
411	struct iwl_mvm_phy_ctxt *ctxt;
412	bool result;
413};
414
415static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
416				   struct ieee80211_vif *vif)
417{
418	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
419	struct iwl_mvm_diversity_iter_data *data = _data;
420	int i, link_id;
421
422	for_each_mvm_vif_valid_link(mvmvif, link_id) {
423		struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
424
425		if (link_info->phy_ctxt != data->ctxt)
426			continue;
427
428		for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
429			if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
430			    link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
431				data->result = false;
432				break;
433			}
434		}
435	}
436}
437
438bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
439				  struct iwl_mvm_phy_ctxt *ctxt)
440{
441	struct iwl_mvm_diversity_iter_data data = {
442		.ctxt = ctxt,
443		.result = true,
444	};
445
446	lockdep_assert_held(&mvm->mutex);
447
448	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
449		return false;
450
451	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
452		return false;
453
454	if (mvm->cfg->rx_with_siso_diversity)
455		return false;
456
457	ieee80211_iterate_active_interfaces_atomic(
458			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
459			iwl_mvm_diversity_iter, &data);
460
461	return data.result;
462}
463
464void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
465				  bool low_latency, u16 mac_id)
466{
467	struct iwl_mac_low_latency_cmd cmd = {
468		.mac_id = cpu_to_le32(mac_id)
469	};
470
471	if (!fw_has_capa(&mvm->fw->ucode_capa,
472			 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
473		return;
474
475	if (low_latency) {
476		/* currently we don't care about the direction */
477		cmd.low_latency_rx = 1;
478		cmd.low_latency_tx = 1;
479	}
480
481	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
482				 0, sizeof(cmd), &cmd))
483		IWL_ERR(mvm, "Failed to send low latency command\n");
484}
485
486int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
487			       bool low_latency,
488			       enum iwl_mvm_low_latency_cause cause)
489{
490	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
491	int res;
492	bool prev;
493
494	lockdep_assert_held(&mvm->mutex);
495
496	prev = iwl_mvm_vif_low_latency(mvmvif);
497	iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
498
499	low_latency = iwl_mvm_vif_low_latency(mvmvif);
500
501	if (low_latency == prev)
502		return 0;
503
504	iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
505
506	res = iwl_mvm_update_quotas(mvm, false, NULL);
507	if (res)
508		return res;
509
510	iwl_mvm_bt_coex_vif_change(mvm);
511
512	return iwl_mvm_power_update_mac(mvm);
513}
514
515struct iwl_mvm_low_latency_iter {
516	bool result;
517	bool result_per_band[NUM_NL80211_BANDS];
518};
519
520static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
521{
522	struct iwl_mvm_low_latency_iter *result = _data;
523	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
524	enum nl80211_band band;
525
526	if (iwl_mvm_vif_low_latency(mvmvif)) {
527		result->result = true;
528
529		if (!mvmvif->deflink.phy_ctxt)
530			return;
531
532		band = mvmvif->deflink.phy_ctxt->channel->band;
533		result->result_per_band[band] = true;
534	}
535}
536
537bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
538{
539	struct iwl_mvm_low_latency_iter data = {};
540
541	ieee80211_iterate_active_interfaces_atomic(
542			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
543			iwl_mvm_ll_iter, &data);
544
545	return data.result;
546}
547
548bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
549{
550	struct iwl_mvm_low_latency_iter data = {};
551
552	ieee80211_iterate_active_interfaces_atomic(
553			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
554			iwl_mvm_ll_iter, &data);
555
556	return data.result_per_band[band];
557}
558
559struct iwl_bss_iter_data {
560	struct ieee80211_vif *vif;
561	bool error;
562};
563
564static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
565				       struct ieee80211_vif *vif)
566{
567	struct iwl_bss_iter_data *data = _data;
568
569	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
570		return;
571
572	if (data->vif) {
573		data->error = true;
574		return;
575	}
576
577	data->vif = vif;
578}
579
580struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
581{
582	struct iwl_bss_iter_data bss_iter_data = {};
583
584	ieee80211_iterate_active_interfaces_atomic(
585		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
586		iwl_mvm_bss_iface_iterator, &bss_iter_data);
587
588	if (bss_iter_data.error) {
589		IWL_ERR(mvm, "More than one managed interface active!\n");
590		return ERR_PTR(-EINVAL);
591	}
592
593	return bss_iter_data.vif;
594}
595
596struct iwl_bss_find_iter_data {
597	struct ieee80211_vif *vif;
598	u32 macid;
599};
600
601static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
602					    struct ieee80211_vif *vif)
603{
604	struct iwl_bss_find_iter_data *data = _data;
605	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
606
607	if (mvmvif->id == data->macid)
608		data->vif = vif;
609}
610
611struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
612{
613	struct iwl_bss_find_iter_data data = {
614		.macid = macid,
615	};
616
617	lockdep_assert_held(&mvm->mutex);
618
619	ieee80211_iterate_active_interfaces_atomic(
620		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
621		iwl_mvm_bss_find_iface_iterator, &data);
622
623	return data.vif;
624}
625
626struct iwl_sta_iter_data {
627	bool assoc;
628};
629
630static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
631				       struct ieee80211_vif *vif)
632{
633	struct iwl_sta_iter_data *data = _data;
634
635	if (vif->type != NL80211_IFTYPE_STATION)
636		return;
637
638	if (vif->cfg.assoc)
639		data->assoc = true;
640}
641
642bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
643{
644	struct iwl_sta_iter_data data = {
645		.assoc = false,
646	};
647
648	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
649						   IEEE80211_IFACE_ITER_NORMAL,
650						   iwl_mvm_sta_iface_iterator,
651						   &data);
652	return data.assoc;
653}
654
655unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
656				    struct ieee80211_vif *vif,
657				    bool tdls, bool cmd_q)
658{
659	struct iwl_fw_dbg_trigger_tlv *trigger;
660	struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
661	unsigned int default_timeout = cmd_q ?
662		IWL_DEF_WD_TIMEOUT :
663		mvm->trans->trans_cfg->base_params->wd_timeout;
664
665	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
666		/*
667		 * We can't know when the station is asleep or awake, so we
668		 * must disable the queue hang detection.
669		 */
670		if (fw_has_capa(&mvm->fw->ucode_capa,
671				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
672		    vif && vif->type == NL80211_IFTYPE_AP)
673			return IWL_WATCHDOG_DISABLED;
674		return default_timeout;
675	}
676
677	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
678	txq_timer = (void *)trigger->data;
679
680	if (tdls)
681		return le32_to_cpu(txq_timer->tdls);
682
683	if (cmd_q)
684		return le32_to_cpu(txq_timer->command_queue);
685
686	if (WARN_ON(!vif))
687		return default_timeout;
688
689	switch (ieee80211_vif_type_p2p(vif)) {
690	case NL80211_IFTYPE_ADHOC:
691		return le32_to_cpu(txq_timer->ibss);
692	case NL80211_IFTYPE_STATION:
693		return le32_to_cpu(txq_timer->bss);
694	case NL80211_IFTYPE_AP:
695		return le32_to_cpu(txq_timer->softap);
696	case NL80211_IFTYPE_P2P_CLIENT:
697		return le32_to_cpu(txq_timer->p2p_client);
698	case NL80211_IFTYPE_P2P_GO:
699		return le32_to_cpu(txq_timer->p2p_go);
700	case NL80211_IFTYPE_P2P_DEVICE:
701		return le32_to_cpu(txq_timer->p2p_device);
702	case NL80211_IFTYPE_MONITOR:
703		return default_timeout;
704	default:
705		WARN_ON(1);
706		return mvm->trans->trans_cfg->base_params->wd_timeout;
707	}
708}
709
710void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
711			     const char *errmsg)
712{
713	struct iwl_fw_dbg_trigger_tlv *trig;
714	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
715
716	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
717				     FW_DBG_TRIGGER_MLME);
718	if (!trig)
719		goto out;
720
721	trig_mlme = (void *)trig->data;
722
723	if (trig_mlme->stop_connection_loss &&
724	    --trig_mlme->stop_connection_loss)
725		goto out;
726
727	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
728
729out:
730	ieee80211_connection_loss(vif);
731}
732
733void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
734					  struct ieee80211_vif *vif,
735					  const struct ieee80211_sta *sta,
736					  u16 tid)
737{
738	struct iwl_fw_dbg_trigger_tlv *trig;
739	struct iwl_fw_dbg_trigger_ba *ba_trig;
740
741	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
742				     FW_DBG_TRIGGER_BA);
743	if (!trig)
744		return;
745
746	ba_trig = (void *)trig->data;
747
748	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
749		return;
750
751	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
752				"Frame from %pM timed out, tid %d",
753				sta->addr, tid);
754}
755
756u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
757{
758	if (!elapsed)
759		return 0;
760
761	return (100 * airtime / elapsed) / USEC_PER_MSEC;
762}
763
764static enum iwl_mvm_traffic_load
765iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
766{
767	u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
768
769	if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
770		return IWL_MVM_TRAFFIC_HIGH;
771	if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
772		return IWL_MVM_TRAFFIC_MEDIUM;
773
774	return IWL_MVM_TRAFFIC_LOW;
775}
776
777static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
778{
779	struct iwl_mvm *mvm = _data;
780	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
781	bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
782
783	if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
784		return;
785
786	low_latency = mvm->tcm.result.low_latency[mvmvif->id];
787
788	if (!mvm->tcm.result.change[mvmvif->id] &&
789	    prev == low_latency) {
790		iwl_mvm_update_quotas(mvm, false, NULL);
791		return;
792	}
793
794	if (prev != low_latency) {
795		/* this sends traffic load and updates quota as well */
796		iwl_mvm_update_low_latency(mvm, vif, low_latency,
797					   LOW_LATENCY_TRAFFIC);
798	} else {
799		iwl_mvm_update_quotas(mvm, false, NULL);
800	}
801}
802
803static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
804{
805	mutex_lock(&mvm->mutex);
806
807	ieee80211_iterate_active_interfaces(
808		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
809		iwl_mvm_tcm_iter, mvm);
810
811	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
812		iwl_mvm_config_scan(mvm);
813
814	mutex_unlock(&mvm->mutex);
815}
816
817static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
818{
819	struct iwl_mvm *mvm;
820	struct iwl_mvm_vif *mvmvif;
821	struct ieee80211_vif *vif;
822
823	mvmvif = container_of(wk, struct iwl_mvm_vif,
824			      uapsd_nonagg_detected_wk.work);
825	vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
826	mvm = mvmvif->mvm;
827
828	if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
829		return;
830
831	/* remember that this AP is broken */
832	memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
833	       vif->bss_conf.bssid, ETH_ALEN);
834	mvm->uapsd_noagg_bssid_write_idx++;
835	if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
836		mvm->uapsd_noagg_bssid_write_idx = 0;
837
838	iwl_mvm_connection_loss(mvm, vif,
839				"AP isn't using AMPDU with uAPSD enabled");
840}
841
842static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
843					 struct ieee80211_vif *vif)
844{
845	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
846
847	if (vif->type != NL80211_IFTYPE_STATION)
848		return;
849
850	if (!vif->cfg.assoc)
851		return;
852
853	if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
854	    !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
855	    !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
856	    !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
857		return;
858
859	if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
860		return;
861
862	mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
863	IWL_INFO(mvm,
864		 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
865	schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
866			      15 * HZ);
867}
868
869static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
870						 unsigned int elapsed,
871						 int mac)
872{
873	u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
874	u64 tpt;
875	unsigned long rate;
876	struct ieee80211_vif *vif;
877
878	rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
879
880	if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
881	    mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
882		return;
883
884	if (iwl_mvm_has_new_rx_api(mvm)) {
885		tpt = 8 * bytes; /* kbps */
886		do_div(tpt, elapsed);
887		rate *= 1000; /* kbps */
888		if (tpt < 22 * rate / 100)
889			return;
890	} else {
891		/*
892		 * the rate here is actually the threshold, in 100Kbps units,
893		 * so do the needed conversion from bytes to 100Kbps:
894		 * 100kb = bits / (100 * 1000),
895		 * 100kbps = 100kb / (msecs / 1000) ==
896		 *           (bits / (100 * 1000)) / (msecs / 1000) ==
897		 *           bits / (100 * msecs)
898		 */
899		tpt = (8 * bytes);
900		do_div(tpt, elapsed * 100);
901		if (tpt < rate)
902			return;
903	}
904
905	rcu_read_lock();
906	vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
907	if (vif)
908		iwl_mvm_uapsd_agg_disconnect(mvm, vif);
909	rcu_read_unlock();
910}
911
912static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
913				 struct ieee80211_vif *vif)
914{
915	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
916	u32 *band = _data;
917
918	if (!mvmvif->deflink.phy_ctxt)
919		return;
920
921	band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
922}
923
924static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
925					    unsigned long ts,
926					    bool handle_uapsd)
927{
928	unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
929	unsigned int uapsd_elapsed =
930		jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
931	u32 total_airtime = 0;
932	u32 band_airtime[NUM_NL80211_BANDS] = {0};
933	u32 band[NUM_MAC_INDEX_DRIVER] = {0};
934	int ac, mac, i;
935	bool low_latency = false;
936	enum iwl_mvm_traffic_load load, band_load;
937	bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
938
939	if (handle_ll)
940		mvm->tcm.ll_ts = ts;
941	if (handle_uapsd)
942		mvm->tcm.uapsd_nonagg_ts = ts;
943
944	mvm->tcm.result.elapsed = elapsed;
945
946	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
947						   IEEE80211_IFACE_ITER_NORMAL,
948						   iwl_mvm_tcm_iterator,
949						   &band);
950
951	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
952		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
953		u32 vo_vi_pkts = 0;
954		u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
955
956		total_airtime += airtime;
957		band_airtime[band[mac]] += airtime;
958
959		load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
960		mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
961		mvm->tcm.result.load[mac] = load;
962		mvm->tcm.result.airtime[mac] = airtime;
963
964		for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
965			vo_vi_pkts += mdata->rx.pkts[ac] +
966				      mdata->tx.pkts[ac];
967
968		/* enable immediately with enough packets but defer disabling */
969		if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
970			mvm->tcm.result.low_latency[mac] = true;
971		else if (handle_ll)
972			mvm->tcm.result.low_latency[mac] = false;
973
974		if (handle_ll) {
975			/* clear old data */
976			memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
977			memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
978		}
979		low_latency |= mvm->tcm.result.low_latency[mac];
980
981		if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
982			iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
983							     mac);
984		/* clear old data */
985		if (handle_uapsd)
986			mdata->uapsd_nonagg_detect.rx_bytes = 0;
987		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
988		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
989	}
990
991	load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
992	mvm->tcm.result.global_load = load;
993
994	for (i = 0; i < NUM_NL80211_BANDS; i++) {
995		band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
996		mvm->tcm.result.band_load[i] = band_load;
997	}
998
999	/*
1000	 * If the current load isn't low we need to force re-evaluation
1001	 * in the TCM period, so that we can return to low load if there
1002	 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1003	 * triggered by traffic).
1004	 */
1005	if (load != IWL_MVM_TRAFFIC_LOW)
1006		return MVM_TCM_PERIOD;
1007	/*
1008	 * If low-latency is active we need to force re-evaluation after
1009	 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1010	 * when there's no traffic at all.
1011	 */
1012	if (low_latency)
1013		return MVM_LL_PERIOD;
1014	/*
1015	 * Otherwise, we don't need to run the work struct because we're
1016	 * in the default "idle" state - traffic indication is low (which
1017	 * also covers the "no traffic" case) and low-latency is disabled
1018	 * so there's no state that may need to be disabled when there's
1019	 * no traffic at all.
1020	 *
1021	 * Note that this has no impact on the regular scheduling of the
1022	 * updates triggered by traffic - those happen whenever one of the
1023	 * two timeouts expire (if there's traffic at all.)
1024	 */
1025	return 0;
1026}
1027
1028void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1029{
1030	unsigned long ts = jiffies;
1031	bool handle_uapsd =
1032		time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1033			       msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1034
1035	spin_lock(&mvm->tcm.lock);
1036	if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1037		spin_unlock(&mvm->tcm.lock);
1038		return;
1039	}
1040	spin_unlock(&mvm->tcm.lock);
1041
1042	if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1043		mutex_lock(&mvm->mutex);
1044		if (iwl_mvm_request_statistics(mvm, true))
1045			handle_uapsd = false;
1046		mutex_unlock(&mvm->mutex);
1047	}
1048
1049	spin_lock(&mvm->tcm.lock);
1050	/* re-check if somebody else won the recheck race */
1051	if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1052		/* calculate statistics */
1053		unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1054								  handle_uapsd);
1055
1056		/* the memset needs to be visible before the timestamp */
1057		smp_mb();
1058		mvm->tcm.ts = ts;
1059		if (work_delay)
1060			schedule_delayed_work(&mvm->tcm.work, work_delay);
1061	}
1062	spin_unlock(&mvm->tcm.lock);
1063
1064	iwl_mvm_tcm_results(mvm);
1065}
1066
1067void iwl_mvm_tcm_work(struct work_struct *work)
1068{
1069	struct delayed_work *delayed_work = to_delayed_work(work);
1070	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1071					   tcm.work);
1072
1073	iwl_mvm_recalc_tcm(mvm);
1074}
1075
1076void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1077{
1078	spin_lock_bh(&mvm->tcm.lock);
1079	mvm->tcm.paused = true;
1080	spin_unlock_bh(&mvm->tcm.lock);
1081	if (with_cancel)
1082		cancel_delayed_work_sync(&mvm->tcm.work);
1083}
1084
1085void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1086{
1087	int mac;
1088	bool low_latency = false;
1089
1090	spin_lock_bh(&mvm->tcm.lock);
1091	mvm->tcm.ts = jiffies;
1092	mvm->tcm.ll_ts = jiffies;
1093	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1094		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1095
1096		memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1097		memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1098		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1099		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1100
1101		if (mvm->tcm.result.low_latency[mac])
1102			low_latency = true;
1103	}
1104	/* The TCM data needs to be reset before "paused" flag changes */
1105	smp_mb();
1106	mvm->tcm.paused = false;
1107
1108	/*
1109	 * if the current load is not low or low latency is active, force
1110	 * re-evaluation to cover the case of no traffic.
1111	 */
1112	if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1113		schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1114	else if (low_latency)
1115		schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1116
1117	spin_unlock_bh(&mvm->tcm.lock);
1118}
1119
1120void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1121{
1122	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1123
1124	INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1125			  iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1126}
1127
1128void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1129{
1130	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1131
1132	cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1133}
1134
1135u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1136{
1137	u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1138
1139	if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1140	    mvm->trans->cfg->gp2_reg_addr)
1141		reg_addr = mvm->trans->cfg->gp2_reg_addr;
1142
1143	return iwl_read_prph(mvm->trans, reg_addr);
1144}
1145
1146void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1147			   u32 *gp2, u64 *boottime, ktime_t *realtime)
1148{
1149	bool ps_disabled;
1150
1151	lockdep_assert_held(&mvm->mutex);
1152
1153	/* Disable power save when reading GP2 */
1154	ps_disabled = mvm->ps_disabled;
1155	if (!ps_disabled) {
1156		mvm->ps_disabled = true;
1157		iwl_mvm_power_update_device(mvm);
1158	}
1159
1160	*gp2 = iwl_mvm_get_systime(mvm);
1161
1162	if (clock_type == CLOCK_BOOTTIME && boottime)
1163		*boottime = ktime_get_boottime_ns();
1164	else if (clock_type == CLOCK_REALTIME && realtime)
1165		*realtime = ktime_get_real();
1166
1167	if (!ps_disabled) {
1168		mvm->ps_disabled = ps_disabled;
1169		iwl_mvm_power_update_device(mvm);
1170	}
1171}
1172
1173/* Find if at least two links from different vifs use same channel
1174 * FIXME: consider having a refcount array in struct iwl_mvm_vif for
1175 * used phy_ctxt ids.
1176 */
1177bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
1178				     struct iwl_mvm_vif *vif2)
1179{
1180	unsigned int i, j;
1181
1182	for_each_mvm_vif_valid_link(vif1, i) {
1183		for_each_mvm_vif_valid_link(vif2, j) {
1184			if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
1185				return true;
1186		}
1187	}
1188
1189	return false;
1190}
1191
1192bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
1193{
1194	unsigned int i;
1195
1196	/* FIXME: can it fail when phy_ctxt is assigned? */
1197	for_each_mvm_vif_valid_link(mvmvif, i) {
1198		if (mvmvif->link[i]->phy_ctxt &&
1199		    mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
1200			return true;
1201	}
1202
1203	return false;
1204}
1205