1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2019-2020  Realtek Corporation
3 */
4
5#include "coex.h"
6#include "debug.h"
7#include "fw.h"
8#include "mac.h"
9#include "phy.h"
10#include "ps.h"
11#include "reg.h"
12#include "sar.h"
13#include "txrx.h"
14#include "util.h"
15
16static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
17			     const struct rtw89_ra_report *report)
18{
19	u32 bit_rate = report->bit_rate;
20
21	/* lower than ofdm, do not aggregate */
22	if (bit_rate < 550)
23		return 1;
24
25	/* avoid AMSDU for legacy rate */
26	if (report->might_fallback_legacy)
27		return 1;
28
29	/* lower than 20M vht 2ss mcs8, make it small */
30	if (bit_rate < 1800)
31		return 1200;
32
33	/* lower than 40M vht 2ss mcs9, make it medium */
34	if (bit_rate < 4000)
35		return 2600;
36
37	/* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
38	if (bit_rate < 7000)
39		return 3500;
40
41	return rtwdev->chip->max_amsdu_limit;
42}
43
44static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
45{
46	u64 ra_mask = 0;
47	u8 mcs_cap;
48	int i, nss;
49
50	for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
51		mcs_cap = mcs_map & 0x3;
52		switch (mcs_cap) {
53		case 2:
54			ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
55			break;
56		case 1:
57			ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
58			break;
59		case 0:
60			ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
61			break;
62		default:
63			break;
64		}
65	}
66
67	return ra_mask;
68}
69
70static u64 get_he_ra_mask(struct ieee80211_sta *sta)
71{
72	struct ieee80211_sta_he_cap cap = sta->deflink.he_cap;
73	u16 mcs_map;
74
75	switch (sta->deflink.bandwidth) {
76	case IEEE80211_STA_RX_BW_160:
77		if (cap.he_cap_elem.phy_cap_info[0] &
78		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
79			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
80		else
81			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
82		break;
83	default:
84		mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
85	}
86
87	/* MCS11, MCS9, MCS7 */
88	return get_mcs_ra_mask(mcs_map, 11, 2);
89}
90
91#define RA_FLOOR_TABLE_SIZE	7
92#define RA_FLOOR_UP_GAP		3
93static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
94				  u8 ratr_state)
95{
96	u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
97	u8 rssi_lv = 0;
98	u8 i;
99
100	rssi >>= 1;
101	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
102		if (i >= ratr_state)
103			rssi_lv_t[i] += RA_FLOOR_UP_GAP;
104		if (rssi < rssi_lv_t[i]) {
105			rssi_lv = i;
106			break;
107		}
108	}
109	if (rssi_lv == 0)
110		return 0xffffffffffffffffULL;
111	else if (rssi_lv == 1)
112		return 0xfffffffffffffff0ULL;
113	else if (rssi_lv == 2)
114		return 0xffffffffffffefe0ULL;
115	else if (rssi_lv == 3)
116		return 0xffffffffffffcfc0ULL;
117	else if (rssi_lv == 4)
118		return 0xffffffffffff8f80ULL;
119	else if (rssi_lv >= 5)
120		return 0xffffffffffff0f00ULL;
121
122	return 0xffffffffffffffffULL;
123}
124
125static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
126{
127	if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
128		ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
129
130	if (ra_mask == 0)
131		ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
132
133	return ra_mask;
134}
135
136static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
137				 const struct rtw89_chan *chan)
138{
139	struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
140	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
141	enum nl80211_band band;
142	u64 cfg_mask;
143
144	if (!rtwsta->use_cfg_mask)
145		return -1;
146
147	switch (chan->band_type) {
148	case RTW89_BAND_2G:
149		band = NL80211_BAND_2GHZ;
150		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
151					   RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
152		break;
153	case RTW89_BAND_5G:
154		band = NL80211_BAND_5GHZ;
155		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
156					   RA_MASK_OFDM_RATES);
157		break;
158	case RTW89_BAND_6G:
159		band = NL80211_BAND_6GHZ;
160		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
161					   RA_MASK_OFDM_RATES);
162		break;
163	default:
164		rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
165		return -1;
166	}
167
168	if (sta->deflink.he_cap.has_he) {
169		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
170					    RA_MASK_HE_1SS_RATES);
171		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
172					    RA_MASK_HE_2SS_RATES);
173	} else if (sta->deflink.vht_cap.vht_supported) {
174		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
175					    RA_MASK_VHT_1SS_RATES);
176		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
177					    RA_MASK_VHT_2SS_RATES);
178	} else if (sta->deflink.ht_cap.ht_supported) {
179		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
180					    RA_MASK_HT_1SS_RATES);
181		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
182					    RA_MASK_HT_2SS_RATES);
183	}
184
185	return cfg_mask;
186}
187
188static const u64
189rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
190			     RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
191static const u64
192rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
193			      RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
194static const u64
195rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
196			     RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
197
198static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
199				struct rtw89_sta *rtwsta,
200				const struct rtw89_chan *chan,
201				bool *fix_giltf_en, u8 *fix_giltf)
202{
203	struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
204	u8 band = chan->band_type;
205	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
206	u8 he_gi = mask->control[nl_band].he_gi;
207	u8 he_ltf = mask->control[nl_band].he_ltf;
208
209	if (!rtwsta->use_cfg_mask)
210		return;
211
212	if (he_ltf == 2 && he_gi == 2) {
213		*fix_giltf = RTW89_GILTF_LGI_4XHE32;
214	} else if (he_ltf == 2 && he_gi == 0) {
215		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
216	} else if (he_ltf == 1 && he_gi == 1) {
217		*fix_giltf = RTW89_GILTF_2XHE16;
218	} else if (he_ltf == 1 && he_gi == 0) {
219		*fix_giltf = RTW89_GILTF_2XHE08;
220	} else if (he_ltf == 0 && he_gi == 1) {
221		*fix_giltf = RTW89_GILTF_1XHE16;
222	} else if (he_ltf == 0 && he_gi == 0) {
223		*fix_giltf = RTW89_GILTF_1XHE08;
224	} else {
225		*fix_giltf_en = false;
226		return;
227	}
228
229	*fix_giltf_en = true;
230}
231
232static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
233				    struct ieee80211_sta *sta, bool csi)
234{
235	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
236	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
237	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
238	struct rtw89_ra_info *ra = &rtwsta->ra;
239	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
240						       rtwvif->sub_entity_idx);
241	struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
242	const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
243	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
244	u64 ra_mask = 0;
245	u64 ra_mask_bak;
246	u8 mode = 0;
247	u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
248	u8 bw_mode = 0;
249	u8 stbc_en = 0;
250	u8 ldpc_en = 0;
251	u8 fix_giltf = 0;
252	u8 i;
253	bool sgi = false;
254	bool fix_giltf_en = false;
255
256	memset(ra, 0, sizeof(*ra));
257	/* Set the ra mask from sta's capability */
258	if (sta->deflink.he_cap.has_he) {
259		mode |= RTW89_RA_MODE_HE;
260		csi_mode = RTW89_RA_RPT_MODE_HE;
261		ra_mask |= get_he_ra_mask(sta);
262		high_rate_masks = rtw89_ra_mask_he_rates;
263		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] &
264		    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
265			stbc_en = 1;
266		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
267		    IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
268			ldpc_en = 1;
269		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, chan, &fix_giltf_en, &fix_giltf);
270	} else if (sta->deflink.vht_cap.vht_supported) {
271		u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
272
273		mode |= RTW89_RA_MODE_VHT;
274		csi_mode = RTW89_RA_RPT_MODE_VHT;
275		/* MCS9, MCS8, MCS7 */
276		ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
277		high_rate_masks = rtw89_ra_mask_vht_rates;
278		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
279			stbc_en = 1;
280		if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
281			ldpc_en = 1;
282	} else if (sta->deflink.ht_cap.ht_supported) {
283		mode |= RTW89_RA_MODE_HT;
284		csi_mode = RTW89_RA_RPT_MODE_HT;
285		ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
286			   ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
287			   (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
288			   (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
289		high_rate_masks = rtw89_ra_mask_ht_rates;
290		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
291			stbc_en = 1;
292		if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
293			ldpc_en = 1;
294	}
295
296	switch (chan->band_type) {
297	case RTW89_BAND_2G:
298		ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
299		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
300			mode |= RTW89_RA_MODE_CCK;
301		if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
302			mode |= RTW89_RA_MODE_OFDM;
303		break;
304	case RTW89_BAND_5G:
305		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
306		mode |= RTW89_RA_MODE_OFDM;
307		break;
308	case RTW89_BAND_6G:
309		ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4;
310		mode |= RTW89_RA_MODE_OFDM;
311		break;
312	default:
313		rtw89_err(rtwdev, "Unknown band type\n");
314		break;
315	}
316
317	ra_mask_bak = ra_mask;
318
319	if (mode >= RTW89_RA_MODE_HT) {
320		u64 mask = 0;
321		for (i = 0; i < rtwdev->hal.tx_nss; i++)
322			mask |= high_rate_masks[i];
323		if (mode & RTW89_RA_MODE_OFDM)
324			mask |= RA_MASK_SUBOFDM_RATES;
325		if (mode & RTW89_RA_MODE_CCK)
326			mask |= RA_MASK_SUBCCK_RATES;
327		ra_mask &= mask;
328	} else if (mode & RTW89_RA_MODE_OFDM) {
329		ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
330	}
331
332	if (mode != RTW89_RA_MODE_CCK)
333		ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
334
335	ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
336	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
337
338	switch (sta->deflink.bandwidth) {
339	case IEEE80211_STA_RX_BW_160:
340		bw_mode = RTW89_CHANNEL_WIDTH_160;
341		sgi = sta->deflink.vht_cap.vht_supported &&
342		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
343		break;
344	case IEEE80211_STA_RX_BW_80:
345		bw_mode = RTW89_CHANNEL_WIDTH_80;
346		sgi = sta->deflink.vht_cap.vht_supported &&
347		      (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
348		break;
349	case IEEE80211_STA_RX_BW_40:
350		bw_mode = RTW89_CHANNEL_WIDTH_40;
351		sgi = sta->deflink.ht_cap.ht_supported &&
352		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
353		break;
354	default:
355		bw_mode = RTW89_CHANNEL_WIDTH_20;
356		sgi = sta->deflink.ht_cap.ht_supported &&
357		      (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
358		break;
359	}
360
361	if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] &
362	    IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
363		ra->dcm_cap = 1;
364
365	if (rate_pattern->enable && !vif->p2p) {
366		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan);
367		ra_mask &= rate_pattern->ra_mask;
368		mode = rate_pattern->ra_mode;
369	}
370
371	ra->bw_cap = bw_mode;
372	ra->er_cap = rtwsta->er_cap;
373	ra->mode_ctrl = mode;
374	ra->macid = rtwsta->mac_id;
375	ra->stbc_cap = stbc_en;
376	ra->ldpc_cap = ldpc_en;
377	ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
378	ra->en_sgi = sgi;
379	ra->ra_mask = ra_mask;
380	ra->fix_giltf_en = fix_giltf_en;
381	ra->fix_giltf = fix_giltf;
382
383	if (!csi)
384		return;
385
386	ra->fixed_csi_rate_en = false;
387	ra->ra_csi_rate_en = true;
388	ra->cr_tbl_sel = false;
389	ra->band_num = rtwvif->phy_idx;
390	ra->csi_bw = bw_mode;
391	ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
392	ra->csi_mcs_ss_idx = 5;
393	ra->csi_mode = csi_mode;
394}
395
396void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
397			     u32 changed)
398{
399	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
400	struct rtw89_ra_info *ra = &rtwsta->ra;
401
402	rtw89_phy_ra_sta_update(rtwdev, sta, false);
403
404	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
405		ra->upd_mask = 1;
406	if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
407		ra->upd_bw_nss_mask = 1;
408
409	rtw89_debug(rtwdev, RTW89_DBG_RA,
410		    "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
411		    ra->macid,
412		    ra->bw_cap,
413		    ra->ss_num,
414		    ra->en_sgi,
415		    ra->giltf);
416
417	rtw89_fw_h2c_ra(rtwdev, ra, false);
418}
419
420static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
421				 u16 rate_base, u64 ra_mask, u8 ra_mode,
422				 u32 rate_ctrl, u32 ctrl_skip, bool force)
423{
424	u8 n, c;
425
426	if (rate_ctrl == ctrl_skip)
427		return true;
428
429	n = hweight32(rate_ctrl);
430	if (n == 0)
431		return true;
432
433	if (force && n != 1)
434		return false;
435
436	if (next->enable)
437		return false;
438
439	c = __fls(rate_ctrl);
440	next->rate = rate_base + c;
441	next->ra_mode = ra_mode;
442	next->ra_mask = ra_mask;
443	next->enable = true;
444
445	return true;
446}
447
448#define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
449	{ \
450		[RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
451		[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
452	}
453
454void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
455				struct ieee80211_vif *vif,
456				const struct cfg80211_bitrate_mask *mask)
457{
458	struct ieee80211_supported_band *sband;
459	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
460	struct rtw89_phy_rate_pattern next_pattern = {0};
461	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
462						       rtwvif->sub_entity_idx);
463	static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
464		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
465		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
466		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
467		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
468	};
469	static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
470		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
471		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
472		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
473		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
474	};
475	static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
476		RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
477		RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
478		RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
479		RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
480	};
481	u8 band = chan->band_type;
482	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
483	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
484	u8 tx_nss = rtwdev->hal.tx_nss;
485	u8 i;
486
487	for (i = 0; i < tx_nss; i++)
488		if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
489					  RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
490					  mask->control[nl_band].he_mcs[i],
491					  0, true))
492			goto out;
493
494	for (i = 0; i < tx_nss; i++)
495		if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
496					  RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
497					  mask->control[nl_band].vht_mcs[i],
498					  0, true))
499			goto out;
500
501	for (i = 0; i < tx_nss; i++)
502		if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
503					  RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
504					  mask->control[nl_band].ht_mcs[i],
505					  0, true))
506			goto out;
507
508	/* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
509	 * require at least one basic rate for ieee80211_set_bitrate_mask,
510	 * so the decision just depends on if all bitrates are set or not.
511	 */
512	sband = rtwdev->hw->wiphy->bands[nl_band];
513	if (band == RTW89_BAND_2G) {
514		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
515					  RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
516					  RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
517					  mask->control[nl_band].legacy,
518					  BIT(sband->n_bitrates) - 1, false))
519			goto out;
520	} else {
521		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
522					  RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
523					  mask->control[nl_band].legacy,
524					  BIT(sband->n_bitrates) - 1, false))
525			goto out;
526	}
527
528	if (!next_pattern.enable)
529		goto out;
530
531	rtwvif->rate_pattern = next_pattern;
532	rtw89_debug(rtwdev, RTW89_DBG_RA,
533		    "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
534		    next_pattern.rate,
535		    next_pattern.ra_mask,
536		    next_pattern.ra_mode);
537	return;
538
539out:
540	rtwvif->rate_pattern.enable = false;
541	rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
542}
543
544static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
545{
546	struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
547
548	rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
549}
550
551void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
552{
553	ieee80211_iterate_stations_atomic(rtwdev->hw,
554					  rtw89_phy_ra_updata_sta_iter,
555					  rtwdev);
556}
557
558void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
559{
560	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
561	struct rtw89_ra_info *ra = &rtwsta->ra;
562	u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
563	bool csi = rtw89_sta_has_beamformer_cap(sta);
564
565	rtw89_phy_ra_sta_update(rtwdev, sta, csi);
566
567	if (rssi > 40)
568		ra->init_rate_lv = 1;
569	else if (rssi > 20)
570		ra->init_rate_lv = 2;
571	else if (rssi > 1)
572		ra->init_rate_lv = 3;
573	else
574		ra->init_rate_lv = 0;
575	ra->upd_all = 1;
576	rtw89_debug(rtwdev, RTW89_DBG_RA,
577		    "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
578		    ra->macid,
579		    ra->mode_ctrl,
580		    ra->bw_cap,
581		    ra->ss_num,
582		    ra->init_rate_lv);
583	rtw89_debug(rtwdev, RTW89_DBG_RA,
584		    "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
585		    ra->dcm_cap,
586		    ra->er_cap,
587		    ra->ldpc_cap,
588		    ra->stbc_cap,
589		    ra->en_sgi,
590		    ra->giltf);
591
592	rtw89_fw_h2c_ra(rtwdev, ra, csi);
593}
594
595u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
596		      const struct rtw89_chan *chan,
597		      enum rtw89_bandwidth dbw)
598{
599	enum rtw89_bandwidth cbw = chan->band_width;
600	u8 pri_ch = chan->primary_channel;
601	u8 central_ch = chan->channel;
602	u8 txsc_idx = 0;
603	u8 tmp = 0;
604
605	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
606		return txsc_idx;
607
608	switch (cbw) {
609	case RTW89_CHANNEL_WIDTH_40:
610		txsc_idx = pri_ch > central_ch ? 1 : 2;
611		break;
612	case RTW89_CHANNEL_WIDTH_80:
613		if (dbw == RTW89_CHANNEL_WIDTH_20) {
614			if (pri_ch > central_ch)
615				txsc_idx = (pri_ch - central_ch) >> 1;
616			else
617				txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
618		} else {
619			txsc_idx = pri_ch > central_ch ? 9 : 10;
620		}
621		break;
622	case RTW89_CHANNEL_WIDTH_160:
623		if (pri_ch > central_ch)
624			tmp = (pri_ch - central_ch) >> 1;
625		else
626			tmp = ((central_ch - pri_ch) >> 1) + 1;
627
628		if (dbw == RTW89_CHANNEL_WIDTH_20) {
629			txsc_idx = tmp;
630		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
631			if (tmp == 1 || tmp == 3)
632				txsc_idx = 9;
633			else if (tmp == 5 || tmp == 7)
634				txsc_idx = 11;
635			else if (tmp == 2 || tmp == 4)
636				txsc_idx = 10;
637			else if (tmp == 6 || tmp == 8)
638				txsc_idx = 12;
639			else
640				return 0xff;
641		} else {
642			txsc_idx = pri_ch > central_ch ? 13 : 14;
643		}
644		break;
645	case RTW89_CHANNEL_WIDTH_80_80:
646		if (dbw == RTW89_CHANNEL_WIDTH_20) {
647			if (pri_ch > central_ch)
648				txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
649			else
650				txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
651		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
652			txsc_idx = pri_ch > central_ch ? 10 : 12;
653		} else {
654			txsc_idx = 14;
655		}
656		break;
657	default:
658		break;
659	}
660
661	return txsc_idx;
662}
663EXPORT_SYMBOL(rtw89_phy_get_txsc);
664
665static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
666{
667	return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
668	       !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
669}
670
671u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
672		      u32 addr, u32 mask)
673{
674	const struct rtw89_chip_info *chip = rtwdev->chip;
675	const u32 *base_addr = chip->rf_base_addr;
676	u32 val, direct_addr;
677
678	if (rf_path >= rtwdev->chip->rf_path_num) {
679		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
680		return INV_RF_DATA;
681	}
682
683	addr &= 0xff;
684	direct_addr = base_addr[rf_path] + (addr << 2);
685	mask &= RFREG_MASK;
686
687	val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
688
689	return val;
690}
691EXPORT_SYMBOL(rtw89_phy_read_rf);
692
693static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
694			       enum rtw89_rf_path rf_path, u32 addr, u32 mask)
695{
696	bool busy;
697	bool done;
698	u32 val;
699	int ret;
700
701	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
702				       1, 30, false, rtwdev);
703	if (ret) {
704		rtw89_err(rtwdev, "read rf busy swsi\n");
705		return INV_RF_DATA;
706	}
707
708	mask &= RFREG_MASK;
709
710	val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
711	      FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
712	rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
713	udelay(2);
714
715	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
716				       30, false, rtwdev, R_SWSI_V1,
717				       B_SWSI_R_DATA_DONE_V1);
718	if (ret) {
719		rtw89_err(rtwdev, "read swsi busy\n");
720		return INV_RF_DATA;
721	}
722
723	return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
724}
725
726u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
727			 u32 addr, u32 mask)
728{
729	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
730
731	if (rf_path >= rtwdev->chip->rf_path_num) {
732		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
733		return INV_RF_DATA;
734	}
735
736	if (ad_sel)
737		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
738	else
739		return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
740}
741EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
742
743bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
744			u32 addr, u32 mask, u32 data)
745{
746	const struct rtw89_chip_info *chip = rtwdev->chip;
747	const u32 *base_addr = chip->rf_base_addr;
748	u32 direct_addr;
749
750	if (rf_path >= rtwdev->chip->rf_path_num) {
751		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
752		return false;
753	}
754
755	addr &= 0xff;
756	direct_addr = base_addr[rf_path] + (addr << 2);
757	mask &= RFREG_MASK;
758
759	rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
760
761	/* delay to ensure writing properly */
762	udelay(1);
763
764	return true;
765}
766EXPORT_SYMBOL(rtw89_phy_write_rf);
767
768static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
769				 enum rtw89_rf_path rf_path, u32 addr, u32 mask,
770				 u32 data)
771{
772	u8 bit_shift;
773	u32 val;
774	bool busy, b_msk_en = false;
775	int ret;
776
777	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
778				       1, 30, false, rtwdev);
779	if (ret) {
780		rtw89_err(rtwdev, "write rf busy swsi\n");
781		return false;
782	}
783
784	data &= RFREG_MASK;
785	mask &= RFREG_MASK;
786
787	if (mask != RFREG_MASK) {
788		b_msk_en = true;
789		rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
790				       mask);
791		bit_shift = __ffs(mask);
792		data = (data << bit_shift) & RFREG_MASK;
793	}
794
795	val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
796	      FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
797	      FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
798	      FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
799
800	rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
801
802	return true;
803}
804
805bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
806			   u32 addr, u32 mask, u32 data)
807{
808	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
809
810	if (rf_path >= rtwdev->chip->rf_path_num) {
811		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
812		return false;
813	}
814
815	if (ad_sel)
816		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
817	else
818		return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
819}
820EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
821
822static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
823{
824	return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
825}
826
827static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
828			       enum rtw89_phy_idx phy_idx)
829{
830	const struct rtw89_chip_info *chip = rtwdev->chip;
831
832	chip->ops->bb_reset(rtwdev, phy_idx);
833}
834
835static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
836				    const struct rtw89_reg2_def *reg,
837				    enum rtw89_rf_path rf_path,
838				    void *extra_data)
839{
840	if (reg->addr == 0xfe)
841		mdelay(50);
842	else if (reg->addr == 0xfd)
843		mdelay(5);
844	else if (reg->addr == 0xfc)
845		mdelay(1);
846	else if (reg->addr == 0xfb)
847		udelay(50);
848	else if (reg->addr == 0xfa)
849		udelay(5);
850	else if (reg->addr == 0xf9)
851		udelay(1);
852	else
853		rtw89_phy_write32(rtwdev, reg->addr, reg->data);
854}
855
856union rtw89_phy_bb_gain_arg {
857	u32 addr;
858	struct {
859		union {
860			u8 type;
861			struct {
862				u8 rxsc_start:4;
863				u8 bw:4;
864			};
865		};
866		u8 path;
867		u8 gain_band;
868		u8 cfg_type;
869	};
870} __packed;
871
872static void
873rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
874			    union rtw89_phy_bb_gain_arg arg, u32 data)
875{
876	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
877	u8 type = arg.type;
878	u8 path = arg.path;
879	u8 gband = arg.gain_band;
880	int i;
881
882	switch (type) {
883	case 0:
884		for (i = 0; i < 4; i++, data >>= 8)
885			gain->lna_gain[gband][path][i] = data & 0xff;
886		break;
887	case 1:
888		for (i = 4; i < 7; i++, data >>= 8)
889			gain->lna_gain[gband][path][i] = data & 0xff;
890		break;
891	case 2:
892		for (i = 0; i < 2; i++, data >>= 8)
893			gain->tia_gain[gband][path][i] = data & 0xff;
894		break;
895	default:
896		rtw89_warn(rtwdev,
897			   "bb gain error {0x%x:0x%x} with unknown type: %d\n",
898			   arg.addr, data, type);
899		break;
900	}
901}
902
903enum rtw89_phy_bb_rxsc_start_idx {
904	RTW89_BB_RXSC_START_IDX_FULL = 0,
905	RTW89_BB_RXSC_START_IDX_20 = 1,
906	RTW89_BB_RXSC_START_IDX_20_1 = 5,
907	RTW89_BB_RXSC_START_IDX_40 = 9,
908	RTW89_BB_RXSC_START_IDX_80 = 13,
909};
910
911static void
912rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
913			  union rtw89_phy_bb_gain_arg arg, u32 data)
914{
915	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
916	u8 rxsc_start = arg.rxsc_start;
917	u8 bw = arg.bw;
918	u8 path = arg.path;
919	u8 gband = arg.gain_band;
920	u8 rxsc;
921	s8 ofst;
922	int i;
923
924	switch (bw) {
925	case RTW89_CHANNEL_WIDTH_20:
926		gain->rpl_ofst_20[gband][path] = (s8)data;
927		break;
928	case RTW89_CHANNEL_WIDTH_40:
929		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
930			gain->rpl_ofst_40[gband][path][0] = (s8)data;
931		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
932			for (i = 0; i < 2; i++, data >>= 8) {
933				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
934				ofst = (s8)(data & 0xff);
935				gain->rpl_ofst_40[gband][path][rxsc] = ofst;
936			}
937		}
938		break;
939	case RTW89_CHANNEL_WIDTH_80:
940		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
941			gain->rpl_ofst_80[gband][path][0] = (s8)data;
942		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
943			for (i = 0; i < 4; i++, data >>= 8) {
944				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
945				ofst = (s8)(data & 0xff);
946				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
947			}
948		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
949			for (i = 0; i < 2; i++, data >>= 8) {
950				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
951				ofst = (s8)(data & 0xff);
952				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
953			}
954		}
955		break;
956	case RTW89_CHANNEL_WIDTH_160:
957		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
958			gain->rpl_ofst_160[gband][path][0] = (s8)data;
959		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
960			for (i = 0; i < 4; i++, data >>= 8) {
961				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
962				ofst = (s8)(data & 0xff);
963				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
964			}
965		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
966			for (i = 0; i < 4; i++, data >>= 8) {
967				rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
968				ofst = (s8)(data & 0xff);
969				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
970			}
971		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
972			for (i = 0; i < 4; i++, data >>= 8) {
973				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
974				ofst = (s8)(data & 0xff);
975				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
976			}
977		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
978			for (i = 0; i < 2; i++, data >>= 8) {
979				rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
980				ofst = (s8)(data & 0xff);
981				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
982			}
983		}
984		break;
985	default:
986		rtw89_warn(rtwdev,
987			   "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
988			   arg.addr, data, bw);
989		break;
990	}
991}
992
993static void
994rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
995			     union rtw89_phy_bb_gain_arg arg, u32 data)
996{
997	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
998	u8 type = arg.type;
999	u8 path = arg.path;
1000	u8 gband = arg.gain_band;
1001	int i;
1002
1003	switch (type) {
1004	case 0:
1005		for (i = 0; i < 4; i++, data >>= 8)
1006			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1007		break;
1008	case 1:
1009		for (i = 4; i < 7; i++, data >>= 8)
1010			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1011		break;
1012	default:
1013		rtw89_warn(rtwdev,
1014			   "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
1015			   arg.addr, data, type);
1016		break;
1017	}
1018}
1019
1020static void
1021rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
1022			    union rtw89_phy_bb_gain_arg arg, u32 data)
1023{
1024	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
1025	u8 type = arg.type;
1026	u8 path = arg.path;
1027	u8 gband = arg.gain_band;
1028	int i;
1029
1030	switch (type) {
1031	case 0:
1032		for (i = 0; i < 4; i++, data >>= 8)
1033			gain->lna_op1db[gband][path][i] = data & 0xff;
1034		break;
1035	case 1:
1036		for (i = 4; i < 7; i++, data >>= 8)
1037			gain->lna_op1db[gband][path][i] = data & 0xff;
1038		break;
1039	case 2:
1040		for (i = 0; i < 4; i++, data >>= 8)
1041			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1042		break;
1043	case 3:
1044		for (i = 4; i < 8; i++, data >>= 8)
1045			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1046		break;
1047	default:
1048		rtw89_warn(rtwdev,
1049			   "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
1050			   arg.addr, data, type);
1051		break;
1052	}
1053}
1054
1055static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
1056				     const struct rtw89_reg2_def *reg,
1057				     enum rtw89_rf_path rf_path,
1058				     void *extra_data)
1059{
1060	const struct rtw89_chip_info *chip = rtwdev->chip;
1061	union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
1062	struct rtw89_efuse *efuse = &rtwdev->efuse;
1063
1064	if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
1065		return;
1066
1067	if (arg.path >= chip->rf_path_num)
1068		return;
1069
1070	if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
1071		rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
1072		return;
1073	}
1074
1075	switch (arg.cfg_type) {
1076	case 0:
1077		rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
1078		break;
1079	case 1:
1080		rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
1081		break;
1082	case 2:
1083		rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
1084		break;
1085	case 3:
1086		rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
1087		break;
1088	case 4:
1089		/* This cfg_type is only used by rfe_type >= 50 with eFEM */
1090		if (efuse->rfe_type < 50)
1091			break;
1092		fallthrough;
1093	default:
1094		rtw89_warn(rtwdev,
1095			   "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
1096			   arg.addr, reg->data, arg.cfg_type);
1097		break;
1098	}
1099}
1100
1101static void
1102rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
1103			     const struct rtw89_reg2_def *reg,
1104			     enum rtw89_rf_path rf_path,
1105			     struct rtw89_fw_h2c_rf_reg_info *info)
1106{
1107	u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
1108	u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
1109
1110	if (page >= RTW89_H2C_RF_PAGE_NUM) {
1111		rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
1112			   rf_path, info->curr_idx);
1113		return;
1114	}
1115
1116	info->rtw89_phy_config_rf_h2c[page][idx] =
1117		cpu_to_le32((reg->addr << 20) | reg->data);
1118	info->curr_idx++;
1119}
1120
1121static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
1122				      struct rtw89_fw_h2c_rf_reg_info *info)
1123{
1124	u16 remain = info->curr_idx;
1125	u16 len = 0;
1126	u8 i;
1127	int ret = 0;
1128
1129	if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
1130		rtw89_warn(rtwdev,
1131			   "rf reg h2c total len %d larger than %d\n",
1132			   remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
1133		ret = -EINVAL;
1134		goto out;
1135	}
1136
1137	for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
1138		len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
1139		ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
1140		if (ret)
1141			goto out;
1142	}
1143out:
1144	info->curr_idx = 0;
1145
1146	return ret;
1147}
1148
1149static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
1150					 const struct rtw89_reg2_def *reg,
1151					 enum rtw89_rf_path rf_path,
1152					 void *extra_data)
1153{
1154	u32 addr = reg->addr;
1155
1156	if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
1157	    addr == 0xfa || addr == 0xf9)
1158		return;
1159
1160	if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
1161		return;
1162
1163	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1164				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1165}
1166
1167static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
1168				    const struct rtw89_reg2_def *reg,
1169				    enum rtw89_rf_path rf_path,
1170				    void *extra_data)
1171{
1172	if (reg->addr == 0xfe) {
1173		mdelay(50);
1174	} else if (reg->addr == 0xfd) {
1175		mdelay(5);
1176	} else if (reg->addr == 0xfc) {
1177		mdelay(1);
1178	} else if (reg->addr == 0xfb) {
1179		udelay(50);
1180	} else if (reg->addr == 0xfa) {
1181		udelay(5);
1182	} else if (reg->addr == 0xf9) {
1183		udelay(1);
1184	} else {
1185		rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
1186		rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1187					     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1188	}
1189}
1190
1191void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
1192				const struct rtw89_reg2_def *reg,
1193				enum rtw89_rf_path rf_path,
1194				void *extra_data)
1195{
1196	rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
1197
1198	if (reg->addr < 0x100)
1199		return;
1200
1201	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1202				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1203}
1204EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
1205
1206static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
1207				  const struct rtw89_phy_table *table,
1208				  u32 *headline_size, u32 *headline_idx,
1209				  u8 rfe, u8 cv)
1210{
1211	const struct rtw89_reg2_def *reg;
1212	u32 headline;
1213	u32 compare, target;
1214	u8 rfe_para, cv_para;
1215	u8 cv_max = 0;
1216	bool case_matched = false;
1217	u32 i;
1218
1219	for (i = 0; i < table->n_regs; i++) {
1220		reg = &table->regs[i];
1221		headline = get_phy_headline(reg->addr);
1222		if (headline != PHY_HEADLINE_VALID)
1223			break;
1224	}
1225	*headline_size = i;
1226	if (*headline_size == 0)
1227		return 0;
1228
1229	/* case 1: RFE match, CV match */
1230	compare = get_phy_compare(rfe, cv);
1231	for (i = 0; i < *headline_size; i++) {
1232		reg = &table->regs[i];
1233		target = get_phy_target(reg->addr);
1234		if (target == compare) {
1235			*headline_idx = i;
1236			return 0;
1237		}
1238	}
1239
1240	/* case 2: RFE match, CV don't care */
1241	compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
1242	for (i = 0; i < *headline_size; i++) {
1243		reg = &table->regs[i];
1244		target = get_phy_target(reg->addr);
1245		if (target == compare) {
1246			*headline_idx = i;
1247			return 0;
1248		}
1249	}
1250
1251	/* case 3: RFE match, CV max in table */
1252	for (i = 0; i < *headline_size; i++) {
1253		reg = &table->regs[i];
1254		rfe_para = get_phy_cond_rfe(reg->addr);
1255		cv_para = get_phy_cond_cv(reg->addr);
1256		if (rfe_para == rfe) {
1257			if (cv_para >= cv_max) {
1258				cv_max = cv_para;
1259				*headline_idx = i;
1260				case_matched = true;
1261			}
1262		}
1263	}
1264
1265	if (case_matched)
1266		return 0;
1267
1268	/* case 4: RFE don't care, CV max in table */
1269	for (i = 0; i < *headline_size; i++) {
1270		reg = &table->regs[i];
1271		rfe_para = get_phy_cond_rfe(reg->addr);
1272		cv_para = get_phy_cond_cv(reg->addr);
1273		if (rfe_para == PHY_COND_DONT_CARE) {
1274			if (cv_para >= cv_max) {
1275				cv_max = cv_para;
1276				*headline_idx = i;
1277				case_matched = true;
1278			}
1279		}
1280	}
1281
1282	if (case_matched)
1283		return 0;
1284
1285	return -EINVAL;
1286}
1287
1288static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
1289			       const struct rtw89_phy_table *table,
1290			       void (*config)(struct rtw89_dev *rtwdev,
1291					      const struct rtw89_reg2_def *reg,
1292					      enum rtw89_rf_path rf_path,
1293					      void *data),
1294			       void *extra_data)
1295{
1296	const struct rtw89_reg2_def *reg;
1297	enum rtw89_rf_path rf_path = table->rf_path;
1298	u8 rfe = rtwdev->efuse.rfe_type;
1299	u8 cv = rtwdev->hal.cv;
1300	u32 i;
1301	u32 headline_size = 0, headline_idx = 0;
1302	u32 target = 0, cfg_target;
1303	u8 cond;
1304	bool is_matched = true;
1305	bool target_found = false;
1306	int ret;
1307
1308	ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
1309				     &headline_idx, rfe, cv);
1310	if (ret) {
1311		rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
1312		return;
1313	}
1314
1315	cfg_target = get_phy_target(table->regs[headline_idx].addr);
1316	for (i = headline_size; i < table->n_regs; i++) {
1317		reg = &table->regs[i];
1318		cond = get_phy_cond(reg->addr);
1319		switch (cond) {
1320		case PHY_COND_BRANCH_IF:
1321		case PHY_COND_BRANCH_ELIF:
1322			target = get_phy_target(reg->addr);
1323			break;
1324		case PHY_COND_BRANCH_ELSE:
1325			is_matched = false;
1326			if (!target_found) {
1327				rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
1328					   reg->addr, reg->data);
1329				return;
1330			}
1331			break;
1332		case PHY_COND_BRANCH_END:
1333			is_matched = true;
1334			target_found = false;
1335			break;
1336		case PHY_COND_CHECK:
1337			if (target_found) {
1338				is_matched = false;
1339				break;
1340			}
1341
1342			if (target == cfg_target) {
1343				is_matched = true;
1344				target_found = true;
1345			} else {
1346				is_matched = false;
1347				target_found = false;
1348			}
1349			break;
1350		default:
1351			if (is_matched)
1352				config(rtwdev, reg, rf_path, extra_data);
1353			break;
1354		}
1355	}
1356}
1357
1358void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
1359{
1360	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1361	const struct rtw89_chip_info *chip = rtwdev->chip;
1362	const struct rtw89_phy_table *bb_table;
1363	const struct rtw89_phy_table *bb_gain_table;
1364
1365	bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
1366	rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
1367	rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
1368
1369	bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
1370	if (bb_gain_table)
1371		rtw89_phy_init_reg(rtwdev, bb_gain_table,
1372				   rtw89_phy_config_bb_gain, NULL);
1373	rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
1374}
1375
1376static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
1377{
1378	rtw89_phy_write32(rtwdev, 0x8080, 0x4);
1379	udelay(1);
1380	return rtw89_phy_read32(rtwdev, 0x8080);
1381}
1382
1383void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
1384{
1385	void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
1386		       enum rtw89_rf_path rf_path, void *data);
1387	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1388	const struct rtw89_chip_info *chip = rtwdev->chip;
1389	const struct rtw89_phy_table *rf_table;
1390	struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
1391	u8 path;
1392
1393	rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL);
1394	if (!rf_reg_info)
1395		return;
1396
1397	for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
1398		rf_table = elm_info->rf_radio[path] ?
1399			   elm_info->rf_radio[path] : chip->rf_table[path];
1400		rf_reg_info->rf_path = rf_table->rf_path;
1401		if (noio)
1402			config = rtw89_phy_config_rf_reg_noio;
1403		else
1404			config = rf_table->config ? rf_table->config :
1405				 rtw89_phy_config_rf_reg;
1406		rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
1407		if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
1408			rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
1409				   rf_reg_info->rf_path);
1410	}
1411	kfree(rf_reg_info);
1412}
1413
1414static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1415{
1416	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1417	const struct rtw89_chip_info *chip = rtwdev->chip;
1418	const struct rtw89_phy_table *nctl_table;
1419	u32 val;
1420	int ret;
1421
1422	/* IQK/DPK clock & reset */
1423	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
1424	rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
1425	rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
1426	if (chip->chip_id != RTL8851B)
1427		rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
1428	if (chip->chip_id == RTL8852B)
1429		rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
1430
1431	/* check 0x8080 */
1432	rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
1433
1434	ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
1435				1000, false, rtwdev);
1436	if (ret)
1437		rtw89_err(rtwdev, "failed to poll nctl block\n");
1438
1439	nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
1440	rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
1441
1442	if (chip->nctl_post_table)
1443		rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
1444}
1445
1446static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
1447{
1448	u32 phy_page = addr >> 8;
1449	u32 ofst = 0;
1450
1451	if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
1452		return addr < 0x10000 ? 0x20000 : 0;
1453
1454	switch (phy_page) {
1455	case 0x6:
1456	case 0x7:
1457	case 0x8:
1458	case 0x9:
1459	case 0xa:
1460	case 0xb:
1461	case 0xc:
1462	case 0xd:
1463	case 0x19:
1464	case 0x1a:
1465	case 0x1b:
1466		ofst = 0x2000;
1467		break;
1468	default:
1469		/* warning case */
1470		ofst = 0;
1471		break;
1472	}
1473
1474	if (phy_page >= 0x40 && phy_page <= 0x4f)
1475		ofst = 0x2000;
1476
1477	return ofst;
1478}
1479
1480void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1481			   u32 data, enum rtw89_phy_idx phy_idx)
1482{
1483	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1484		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1485	rtw89_phy_write32_mask(rtwdev, addr, mask, data);
1486}
1487EXPORT_SYMBOL(rtw89_phy_write32_idx);
1488
1489u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1490			 enum rtw89_phy_idx phy_idx)
1491{
1492	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
1493		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
1494	return rtw89_phy_read32_mask(rtwdev, addr, mask);
1495}
1496EXPORT_SYMBOL(rtw89_phy_read32_idx);
1497
1498void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
1499			    u32 val)
1500{
1501	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
1502
1503	if (!rtwdev->dbcc_en)
1504		return;
1505
1506	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
1507}
1508
1509void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
1510			      const struct rtw89_phy_reg3_tbl *tbl)
1511{
1512	const struct rtw89_reg3_def *reg3;
1513	int i;
1514
1515	for (i = 0; i < tbl->size; i++) {
1516		reg3 = &tbl->reg3[i];
1517		rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
1518	}
1519}
1520EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
1521
1522static const u8 rtw89_rs_idx_num[] = {
1523	[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
1524	[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
1525	[RTW89_RS_MCS] = RTW89_RATE_MCS_NUM,
1526	[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
1527	[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM,
1528};
1529
1530static const u8 rtw89_rs_nss_num[] = {
1531	[RTW89_RS_CCK] = 1,
1532	[RTW89_RS_OFDM] = 1,
1533	[RTW89_RS_MCS] = RTW89_NSS_NUM,
1534	[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
1535	[RTW89_RS_OFFSET] = 1,
1536};
1537
1538static const u8 _byr_of_rs[] = {
1539	[RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
1540	[RTW89_RS_OFDM] = offsetof(struct rtw89_txpwr_byrate, ofdm),
1541	[RTW89_RS_MCS] = offsetof(struct rtw89_txpwr_byrate, mcs),
1542	[RTW89_RS_HEDCM] = offsetof(struct rtw89_txpwr_byrate, hedcm),
1543	[RTW89_RS_OFFSET] = offsetof(struct rtw89_txpwr_byrate, offset),
1544};
1545
1546#define _byr_seek(rs, raw) ((s8 *)(raw) + _byr_of_rs[rs])
1547#define _byr_idx(rs, nss, idx) ((nss) * rtw89_rs_idx_num[rs] + (idx))
1548#define _byr_chk(rs, nss, idx) \
1549	((nss) < rtw89_rs_nss_num[rs] && (idx) < rtw89_rs_idx_num[rs])
1550
1551void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
1552				 const struct rtw89_txpwr_table *tbl)
1553{
1554	const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
1555	const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
1556	s8 *byr;
1557	u32 data;
1558	u8 i, idx;
1559
1560	for (; cfg < end; cfg++) {
1561		byr = _byr_seek(cfg->rs, &rtwdev->byr[cfg->band]);
1562		data = cfg->data;
1563
1564		for (i = 0; i < cfg->len; i++, data >>= 8) {
1565			idx = _byr_idx(cfg->rs, cfg->nss, (cfg->shf + i));
1566			byr[idx] = (s8)(data & 0xff);
1567		}
1568	}
1569}
1570EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
1571
1572#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf)				\
1573({									\
1574	const struct rtw89_chip_info *__c = (rtwdev)->chip;		\
1575	(txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac);	\
1576})
1577
1578static
1579s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
1580			       const struct rtw89_rate_desc *rate_desc)
1581{
1582	s8 *byr;
1583	u8 idx;
1584
1585	if (rate_desc->rs == RTW89_RS_CCK)
1586		band = RTW89_BAND_2G;
1587
1588	if (!_byr_chk(rate_desc->rs, rate_desc->nss, rate_desc->idx)) {
1589		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
1590			    "[TXPWR] unknown byrate desc rs=%d nss=%d idx=%d\n",
1591			    rate_desc->rs, rate_desc->nss, rate_desc->idx);
1592
1593		return 0;
1594	}
1595
1596	byr = _byr_seek(rate_desc->rs, &rtwdev->byr[band]);
1597	idx = _byr_idx(rate_desc->rs, rate_desc->nss, rate_desc->idx);
1598
1599	return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
1600}
1601
1602static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
1603{
1604	switch (channel_6g) {
1605	case 1 ... 29:
1606		return (channel_6g - 1) / 2;
1607	case 33 ... 61:
1608		return (channel_6g - 3) / 2;
1609	case 65 ... 93:
1610		return (channel_6g - 5) / 2;
1611	case 97 ... 125:
1612		return (channel_6g - 7) / 2;
1613	case 129 ... 157:
1614		return (channel_6g - 9) / 2;
1615	case 161 ... 189:
1616		return (channel_6g - 11) / 2;
1617	case 193 ... 221:
1618		return (channel_6g - 13) / 2;
1619	case 225 ... 253:
1620		return (channel_6g - 15) / 2;
1621	default:
1622		rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
1623		return 0;
1624	}
1625}
1626
1627static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
1628{
1629	if (band == RTW89_BAND_6G)
1630		return rtw89_channel_6g_to_idx(rtwdev, channel);
1631
1632	switch (channel) {
1633	case 1 ... 14:
1634		return channel - 1;
1635	case 36 ... 64:
1636		return (channel - 36) / 2;
1637	case 100 ... 144:
1638		return ((channel - 100) / 2) + 15;
1639	case 149 ... 177:
1640		return ((channel - 149) / 2) + 38;
1641	default:
1642		rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
1643		return 0;
1644	}
1645}
1646
1647s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
1648			      u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
1649{
1650	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
1651	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
1652	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
1653	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
1654	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
1655	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
1656	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
1657	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
1658	u8 regd = rtw89_regd_get(rtwdev, band);
1659	u8 reg6 = regulatory->reg_6ghz_power;
1660	s8 lmt = 0, sar;
1661
1662	switch (band) {
1663	case RTW89_BAND_2G:
1664		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
1665		if (lmt)
1666			break;
1667
1668		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
1669		break;
1670	case RTW89_BAND_5G:
1671		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
1672		if (lmt)
1673			break;
1674
1675		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
1676		break;
1677	case RTW89_BAND_6G:
1678		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
1679		if (lmt)
1680			break;
1681
1682		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
1683				       [RTW89_REG_6GHZ_POWER_DFLT]
1684				       [ch_idx];
1685		break;
1686	default:
1687		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
1688		return 0;
1689	}
1690
1691	lmt = _phy_txpwr_rf_to_mac(rtwdev, lmt);
1692	sar = rtw89_query_sar(rtwdev, freq);
1693
1694	return min(lmt, sar);
1695}
1696EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
1697
1698#define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch)		\
1699	do {								\
1700		u8 __i;							\
1701		for (__i = 0; __i < RTW89_BF_NUM; __i++)		\
1702			ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev,	\
1703							      band,	\
1704							      bw, ntx,	\
1705							      rs, __i,	\
1706							      (ch));	\
1707	} while (0)
1708
1709static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
1710					   struct rtw89_txpwr_limit *lmt,
1711					   u8 band, u8 ntx, u8 ch)
1712{
1713	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
1714				    ntx, RTW89_RS_CCK, ch);
1715	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
1716				    ntx, RTW89_RS_CCK, ch);
1717	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1718				    ntx, RTW89_RS_OFDM, ch);
1719	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1720				    RTW89_CHANNEL_WIDTH_20,
1721				    ntx, RTW89_RS_MCS, ch);
1722}
1723
1724static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
1725					   struct rtw89_txpwr_limit *lmt,
1726					   u8 band, u8 ntx, u8 ch, u8 pri_ch)
1727{
1728	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
1729				    ntx, RTW89_RS_CCK, ch - 2);
1730	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
1731				    ntx, RTW89_RS_CCK, ch);
1732	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1733				    ntx, RTW89_RS_OFDM, pri_ch);
1734	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1735				    RTW89_CHANNEL_WIDTH_20,
1736				    ntx, RTW89_RS_MCS, ch - 2);
1737	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
1738				    RTW89_CHANNEL_WIDTH_20,
1739				    ntx, RTW89_RS_MCS, ch + 2);
1740	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
1741				    RTW89_CHANNEL_WIDTH_40,
1742				    ntx, RTW89_RS_MCS, ch);
1743}
1744
1745static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
1746					   struct rtw89_txpwr_limit *lmt,
1747					   u8 band, u8 ntx, u8 ch, u8 pri_ch)
1748{
1749	s8 val_0p5_n[RTW89_BF_NUM];
1750	s8 val_0p5_p[RTW89_BF_NUM];
1751	u8 i;
1752
1753	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1754				    ntx, RTW89_RS_OFDM, pri_ch);
1755	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1756				    RTW89_CHANNEL_WIDTH_20,
1757				    ntx, RTW89_RS_MCS, ch - 6);
1758	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
1759				    RTW89_CHANNEL_WIDTH_20,
1760				    ntx, RTW89_RS_MCS, ch - 2);
1761	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
1762				    RTW89_CHANNEL_WIDTH_20,
1763				    ntx, RTW89_RS_MCS, ch + 2);
1764	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
1765				    RTW89_CHANNEL_WIDTH_20,
1766				    ntx, RTW89_RS_MCS, ch + 6);
1767	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
1768				    RTW89_CHANNEL_WIDTH_40,
1769				    ntx, RTW89_RS_MCS, ch - 4);
1770	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
1771				    RTW89_CHANNEL_WIDTH_40,
1772				    ntx, RTW89_RS_MCS, ch + 4);
1773	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
1774				    RTW89_CHANNEL_WIDTH_80,
1775				    ntx, RTW89_RS_MCS, ch);
1776
1777	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
1778				    ntx, RTW89_RS_MCS, ch - 4);
1779	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
1780				    ntx, RTW89_RS_MCS, ch + 4);
1781
1782	for (i = 0; i < RTW89_BF_NUM; i++)
1783		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
1784}
1785
1786static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
1787					    struct rtw89_txpwr_limit *lmt,
1788					    u8 band, u8 ntx, u8 ch, u8 pri_ch)
1789{
1790	s8 val_0p5_n[RTW89_BF_NUM];
1791	s8 val_0p5_p[RTW89_BF_NUM];
1792	s8 val_2p5_n[RTW89_BF_NUM];
1793	s8 val_2p5_p[RTW89_BF_NUM];
1794	u8 i;
1795
1796	/* fill ofdm section */
1797	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
1798				    ntx, RTW89_RS_OFDM, pri_ch);
1799
1800	/* fill mcs 20m section */
1801	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
1802				    RTW89_CHANNEL_WIDTH_20,
1803				    ntx, RTW89_RS_MCS, ch - 14);
1804	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
1805				    RTW89_CHANNEL_WIDTH_20,
1806				    ntx, RTW89_RS_MCS, ch - 10);
1807	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
1808				    RTW89_CHANNEL_WIDTH_20,
1809				    ntx, RTW89_RS_MCS, ch - 6);
1810	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
1811				    RTW89_CHANNEL_WIDTH_20,
1812				    ntx, RTW89_RS_MCS, ch - 2);
1813	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
1814				    RTW89_CHANNEL_WIDTH_20,
1815				    ntx, RTW89_RS_MCS, ch + 2);
1816	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
1817				    RTW89_CHANNEL_WIDTH_20,
1818				    ntx, RTW89_RS_MCS, ch + 6);
1819	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
1820				    RTW89_CHANNEL_WIDTH_20,
1821				    ntx, RTW89_RS_MCS, ch + 10);
1822	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
1823				    RTW89_CHANNEL_WIDTH_20,
1824				    ntx, RTW89_RS_MCS, ch + 14);
1825
1826	/* fill mcs 40m section */
1827	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
1828				    RTW89_CHANNEL_WIDTH_40,
1829				    ntx, RTW89_RS_MCS, ch - 12);
1830	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
1831				    RTW89_CHANNEL_WIDTH_40,
1832				    ntx, RTW89_RS_MCS, ch - 4);
1833	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
1834				    RTW89_CHANNEL_WIDTH_40,
1835				    ntx, RTW89_RS_MCS, ch + 4);
1836	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
1837				    RTW89_CHANNEL_WIDTH_40,
1838				    ntx, RTW89_RS_MCS, ch + 12);
1839
1840	/* fill mcs 80m section */
1841	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
1842				    RTW89_CHANNEL_WIDTH_80,
1843				    ntx, RTW89_RS_MCS, ch - 8);
1844	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
1845				    RTW89_CHANNEL_WIDTH_80,
1846				    ntx, RTW89_RS_MCS, ch + 8);
1847
1848	/* fill mcs 160m section */
1849	__fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
1850				    RTW89_CHANNEL_WIDTH_160,
1851				    ntx, RTW89_RS_MCS, ch);
1852
1853	/* fill mcs 40m 0p5 section */
1854	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
1855				    ntx, RTW89_RS_MCS, ch - 4);
1856	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
1857				    ntx, RTW89_RS_MCS, ch + 4);
1858
1859	for (i = 0; i < RTW89_BF_NUM; i++)
1860		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
1861
1862	/* fill mcs 40m 2p5 section */
1863	__fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
1864				    ntx, RTW89_RS_MCS, ch - 8);
1865	__fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
1866				    ntx, RTW89_RS_MCS, ch + 8);
1867
1868	for (i = 0; i < RTW89_BF_NUM; i++)
1869		lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
1870}
1871
1872static
1873void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
1874				const struct rtw89_chan *chan,
1875				struct rtw89_txpwr_limit *lmt,
1876				u8 ntx)
1877{
1878	u8 band = chan->band_type;
1879	u8 pri_ch = chan->primary_channel;
1880	u8 ch = chan->channel;
1881	u8 bw = chan->band_width;
1882
1883	memset(lmt, 0, sizeof(*lmt));
1884
1885	switch (bw) {
1886	case RTW89_CHANNEL_WIDTH_20:
1887		rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch);
1888		break;
1889	case RTW89_CHANNEL_WIDTH_40:
1890		rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch,
1891					       pri_ch);
1892		break;
1893	case RTW89_CHANNEL_WIDTH_80:
1894		rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch,
1895					       pri_ch);
1896		break;
1897	case RTW89_CHANNEL_WIDTH_160:
1898		rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch,
1899						pri_ch);
1900		break;
1901	}
1902}
1903
1904static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
1905					u8 ru, u8 ntx, u8 ch)
1906{
1907	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
1908	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
1909	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
1910	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
1911	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
1912	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
1913	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
1914	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
1915	u8 regd = rtw89_regd_get(rtwdev, band);
1916	u8 reg6 = regulatory->reg_6ghz_power;
1917	s8 lmt_ru = 0, sar;
1918
1919	switch (band) {
1920	case RTW89_BAND_2G:
1921		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
1922		if (lmt_ru)
1923			break;
1924
1925		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
1926		break;
1927	case RTW89_BAND_5G:
1928		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
1929		if (lmt_ru)
1930			break;
1931
1932		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
1933		break;
1934	case RTW89_BAND_6G:
1935		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
1936		if (lmt_ru)
1937			break;
1938
1939		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW]
1940					     [RTW89_REG_6GHZ_POWER_DFLT]
1941					     [ch_idx];
1942		break;
1943	default:
1944		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
1945		return 0;
1946	}
1947
1948	lmt_ru = _phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
1949	sar = rtw89_query_sar(rtwdev, freq);
1950
1951	return min(lmt_ru, sar);
1952}
1953
1954static void
1955rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev,
1956				  struct rtw89_txpwr_limit_ru *lmt_ru,
1957				  u8 band, u8 ntx, u8 ch)
1958{
1959	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1960							RTW89_RU26,
1961							ntx, ch);
1962	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1963							RTW89_RU52,
1964							ntx, ch);
1965	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1966							 RTW89_RU106,
1967							 ntx, ch);
1968}
1969
1970static void
1971rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev,
1972				  struct rtw89_txpwr_limit_ru *lmt_ru,
1973				  u8 band, u8 ntx, u8 ch)
1974{
1975	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1976							RTW89_RU26,
1977							ntx, ch - 2);
1978	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1979							RTW89_RU26,
1980							ntx, ch + 2);
1981	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1982							RTW89_RU52,
1983							ntx, ch - 2);
1984	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1985							RTW89_RU52,
1986							ntx, ch + 2);
1987	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1988							 RTW89_RU106,
1989							 ntx, ch - 2);
1990	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
1991							 RTW89_RU106,
1992							 ntx, ch + 2);
1993}
1994
1995static void
1996rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
1997				  struct rtw89_txpwr_limit_ru *lmt_ru,
1998				  u8 band, u8 ntx, u8 ch)
1999{
2000	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2001							RTW89_RU26,
2002							ntx, ch - 6);
2003	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2004							RTW89_RU26,
2005							ntx, ch - 2);
2006	lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2007							RTW89_RU26,
2008							ntx, ch + 2);
2009	lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2010							RTW89_RU26,
2011							ntx, ch + 6);
2012	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2013							RTW89_RU52,
2014							ntx, ch - 6);
2015	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2016							RTW89_RU52,
2017							ntx, ch - 2);
2018	lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2019							RTW89_RU52,
2020							ntx, ch + 2);
2021	lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2022							RTW89_RU52,
2023							ntx, ch + 6);
2024	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2025							 RTW89_RU106,
2026							 ntx, ch - 6);
2027	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2028							 RTW89_RU106,
2029							 ntx, ch - 2);
2030	lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2031							 RTW89_RU106,
2032							 ntx, ch + 2);
2033	lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2034							 RTW89_RU106,
2035							 ntx, ch + 6);
2036}
2037
2038static void
2039rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
2040				   struct rtw89_txpwr_limit_ru *lmt_ru,
2041				   u8 band, u8 ntx, u8 ch)
2042{
2043	static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
2044	int i;
2045
2046	static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
2047	for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
2048		lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2049								RTW89_RU26,
2050								ntx,
2051								ch + ofst[i]);
2052		lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2053								RTW89_RU52,
2054								ntx,
2055								ch + ofst[i]);
2056		lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2057								 RTW89_RU106,
2058								 ntx,
2059								 ch + ofst[i]);
2060	}
2061}
2062
2063static
2064void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
2065				   const struct rtw89_chan *chan,
2066				   struct rtw89_txpwr_limit_ru *lmt_ru,
2067				   u8 ntx)
2068{
2069	u8 band = chan->band_type;
2070	u8 ch = chan->channel;
2071	u8 bw = chan->band_width;
2072
2073	memset(lmt_ru, 0, sizeof(*lmt_ru));
2074
2075	switch (bw) {
2076	case RTW89_CHANNEL_WIDTH_20:
2077		rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx,
2078						  ch);
2079		break;
2080	case RTW89_CHANNEL_WIDTH_40:
2081		rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx,
2082						  ch);
2083		break;
2084	case RTW89_CHANNEL_WIDTH_80:
2085		rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx,
2086						  ch);
2087		break;
2088	case RTW89_CHANNEL_WIDTH_160:
2089		rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx,
2090						   ch);
2091		break;
2092	}
2093}
2094
2095void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
2096				const struct rtw89_chan *chan,
2097				enum rtw89_phy_idx phy_idx)
2098{
2099	u8 max_nss_num = rtwdev->chip->rf_path_num;
2100	static const u8 rs[] = {
2101		RTW89_RS_CCK,
2102		RTW89_RS_OFDM,
2103		RTW89_RS_MCS,
2104		RTW89_RS_HEDCM,
2105	};
2106	struct rtw89_rate_desc cur;
2107	u8 band = chan->band_type;
2108	u8 ch = chan->channel;
2109	u32 addr, val;
2110	s8 v[4] = {};
2111	u8 i;
2112
2113	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2114		    "[TXPWR] set txpwr byrate with ch=%d\n", ch);
2115
2116	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_CCK] % 4);
2117	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_OFDM] % 4);
2118	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_MCS] % 4);
2119	BUILD_BUG_ON(rtw89_rs_idx_num[RTW89_RS_HEDCM] % 4);
2120
2121	addr = R_AX_PWR_BY_RATE;
2122	for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
2123		for (i = 0; i < ARRAY_SIZE(rs); i++) {
2124			if (cur.nss >= rtw89_rs_nss_num[rs[i]])
2125				continue;
2126
2127			cur.rs = rs[i];
2128			for (cur.idx = 0; cur.idx < rtw89_rs_idx_num[rs[i]];
2129			     cur.idx++) {
2130				v[cur.idx % 4] =
2131					rtw89_phy_read_txpwr_byrate(rtwdev,
2132								    band,
2133								    &cur);
2134
2135				if ((cur.idx + 1) % 4)
2136					continue;
2137
2138				val = FIELD_PREP(GENMASK(7, 0), v[0]) |
2139				      FIELD_PREP(GENMASK(15, 8), v[1]) |
2140				      FIELD_PREP(GENMASK(23, 16), v[2]) |
2141				      FIELD_PREP(GENMASK(31, 24), v[3]);
2142
2143				rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
2144							val);
2145				addr += 4;
2146			}
2147		}
2148	}
2149}
2150EXPORT_SYMBOL(rtw89_phy_set_txpwr_byrate);
2151
2152void rtw89_phy_set_txpwr_offset(struct rtw89_dev *rtwdev,
2153				const struct rtw89_chan *chan,
2154				enum rtw89_phy_idx phy_idx)
2155{
2156	struct rtw89_rate_desc desc = {
2157		.nss = RTW89_NSS_1,
2158		.rs = RTW89_RS_OFFSET,
2159	};
2160	u8 band = chan->band_type;
2161	s8 v[RTW89_RATE_OFFSET_NUM] = {};
2162	u32 val;
2163
2164	rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
2165
2166	for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM; desc.idx++)
2167		v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
2168
2169	BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM != 5);
2170	val = FIELD_PREP(GENMASK(3, 0), v[0]) |
2171	      FIELD_PREP(GENMASK(7, 4), v[1]) |
2172	      FIELD_PREP(GENMASK(11, 8), v[2]) |
2173	      FIELD_PREP(GENMASK(15, 12), v[3]) |
2174	      FIELD_PREP(GENMASK(19, 16), v[4]);
2175
2176	rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
2177				     GENMASK(19, 0), val);
2178}
2179EXPORT_SYMBOL(rtw89_phy_set_txpwr_offset);
2180
2181void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
2182			       const struct rtw89_chan *chan,
2183			       enum rtw89_phy_idx phy_idx)
2184{
2185	u8 max_ntx_num = rtwdev->chip->rf_path_num;
2186	struct rtw89_txpwr_limit lmt;
2187	u8 ch = chan->channel;
2188	u8 bw = chan->band_width;
2189	const s8 *ptr;
2190	u32 addr, val;
2191	u8 i, j;
2192
2193	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2194		    "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
2195
2196	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit) !=
2197		     RTW89_TXPWR_LMT_PAGE_SIZE);
2198
2199	addr = R_AX_PWR_LMT;
2200	for (i = 0; i < max_ntx_num; i++) {
2201		rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i);
2202
2203		ptr = (s8 *)&lmt;
2204		for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE;
2205		     j += 4, addr += 4, ptr += 4) {
2206			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
2207			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
2208			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
2209			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
2210
2211			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
2212		}
2213	}
2214}
2215EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit);
2216
2217void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
2218				  const struct rtw89_chan *chan,
2219				  enum rtw89_phy_idx phy_idx)
2220{
2221	u8 max_ntx_num = rtwdev->chip->rf_path_num;
2222	struct rtw89_txpwr_limit_ru lmt_ru;
2223	u8 ch = chan->channel;
2224	u8 bw = chan->band_width;
2225	const s8 *ptr;
2226	u32 addr, val;
2227	u8 i, j;
2228
2229	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2230		    "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
2231
2232	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru) !=
2233		     RTW89_TXPWR_LMT_RU_PAGE_SIZE);
2234
2235	addr = R_AX_PWR_RU_LMT;
2236	for (i = 0; i < max_ntx_num; i++) {
2237		rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i);
2238
2239		ptr = (s8 *)&lmt_ru;
2240		for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE;
2241		     j += 4, addr += 4, ptr += 4) {
2242			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
2243			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
2244			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
2245			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
2246
2247			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
2248		}
2249	}
2250}
2251EXPORT_SYMBOL(rtw89_phy_set_txpwr_limit_ru);
2252
2253struct rtw89_phy_iter_ra_data {
2254	struct rtw89_dev *rtwdev;
2255	struct sk_buff *c2h;
2256};
2257
2258static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
2259{
2260	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
2261	struct rtw89_dev *rtwdev = ra_data->rtwdev;
2262	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
2263	const struct rtw89_c2h_ra_rpt *c2h =
2264		(const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
2265	struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
2266	const struct rtw89_chip_info *chip = rtwdev->chip;
2267	bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
2268	u8 mode, rate, bw, giltf, mac_id;
2269	u16 legacy_bitrate;
2270	bool valid;
2271	u8 mcs = 0;
2272	u8 t;
2273
2274	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
2275	if (mac_id != rtwsta->mac_id)
2276		return;
2277
2278	rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
2279	bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW);
2280	giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF);
2281	mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL);
2282
2283	if (format_v1) {
2284		t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7);
2285		rate |= u8_encode_bits(t, BIT(7));
2286		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2);
2287		bw |= u8_encode_bits(t, BIT(2));
2288		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2);
2289		mode |= u8_encode_bits(t, BIT(2));
2290	}
2291
2292	if (mode == RTW89_RA_RPT_MODE_LEGACY) {
2293		valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
2294		if (!valid)
2295			return;
2296	}
2297
2298	memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
2299
2300	switch (mode) {
2301	case RTW89_RA_RPT_MODE_LEGACY:
2302		ra_report->txrate.legacy = legacy_bitrate;
2303		break;
2304	case RTW89_RA_RPT_MODE_HT:
2305		ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
2306		if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
2307			rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
2308						FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
2309		else
2310			rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
2311		ra_report->txrate.mcs = rate;
2312		if (giltf)
2313			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2314		mcs = ra_report->txrate.mcs & 0x07;
2315		break;
2316	case RTW89_RA_RPT_MODE_VHT:
2317		ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
2318		ra_report->txrate.mcs = format_v1 ?
2319			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
2320			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
2321		ra_report->txrate.nss = format_v1 ?
2322			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
2323			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
2324		if (giltf)
2325			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2326		mcs = ra_report->txrate.mcs;
2327		break;
2328	case RTW89_RA_RPT_MODE_HE:
2329		ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
2330		ra_report->txrate.mcs = format_v1 ?
2331			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
2332			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
2333		ra_report->txrate.nss  = format_v1 ?
2334			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
2335			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
2336		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
2337			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
2338		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
2339			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
2340		else
2341			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
2342		mcs = ra_report->txrate.mcs;
2343		break;
2344	}
2345
2346	ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
2347	ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
2348	ra_report->hw_rate = format_v1 ?
2349			     u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) |
2350			     u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) :
2351			     u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
2352			     u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
2353	ra_report->might_fallback_legacy = mcs <= 2;
2354	sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
2355	rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
2356}
2357
2358static void
2359rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
2360{
2361	struct rtw89_phy_iter_ra_data ra_data;
2362
2363	ra_data.rtwdev = rtwdev;
2364	ra_data.c2h = c2h;
2365	ieee80211_iterate_stations_atomic(rtwdev->hw,
2366					  rtw89_phy_c2h_ra_rpt_iter,
2367					  &ra_data);
2368}
2369
2370static
2371void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
2372					  struct sk_buff *c2h, u32 len) = {
2373	[RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
2374	[RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
2375	[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
2376};
2377
2378void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
2379			  u32 len, u8 class, u8 func)
2380{
2381	void (*handler)(struct rtw89_dev *rtwdev,
2382			struct sk_buff *c2h, u32 len) = NULL;
2383
2384	switch (class) {
2385	case RTW89_PHY_C2H_CLASS_RA:
2386		if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
2387			handler = rtw89_phy_c2h_ra_handler[func];
2388		break;
2389	case RTW89_PHY_C2H_CLASS_DM:
2390		if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
2391			return;
2392		fallthrough;
2393	default:
2394		rtw89_info(rtwdev, "c2h class %d not support\n", class);
2395		return;
2396	}
2397	if (!handler) {
2398		rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
2399			   func);
2400		return;
2401	}
2402	handler(rtwdev, skb, len);
2403}
2404
2405static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
2406{
2407	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
2408	u32 reg_mask;
2409
2410	if (sc_xo)
2411		reg_mask = xtal->sc_xo_mask;
2412	else
2413		reg_mask = xtal->sc_xi_mask;
2414
2415	return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
2416}
2417
2418static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
2419				       u8 val)
2420{
2421	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
2422	u32 reg_mask;
2423
2424	if (sc_xo)
2425		reg_mask = xtal->sc_xo_mask;
2426	else
2427		reg_mask = xtal->sc_xi_mask;
2428
2429	rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
2430}
2431
2432static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
2433					  u8 crystal_cap, bool force)
2434{
2435	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2436	const struct rtw89_chip_info *chip = rtwdev->chip;
2437	u8 sc_xi_val, sc_xo_val;
2438
2439	if (!force && cfo->crystal_cap == crystal_cap)
2440		return;
2441	crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
2442	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
2443		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
2444		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
2445		sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
2446		sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
2447	} else {
2448		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
2449					crystal_cap, XTAL_SC_XO_MASK);
2450		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
2451					crystal_cap, XTAL_SC_XI_MASK);
2452		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
2453		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
2454	}
2455	cfo->crystal_cap = sc_xi_val;
2456	cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
2457
2458	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
2459	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
2460	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
2461		    cfo->x_cap_ofst);
2462	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
2463}
2464
2465static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
2466{
2467	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2468	u8 cap;
2469
2470	cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
2471	cfo->is_adjust = false;
2472	if (cfo->crystal_cap == cfo->def_x_cap)
2473		return;
2474	cap = cfo->crystal_cap;
2475	cap += (cap > cfo->def_x_cap ? -1 : 1);
2476	rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
2477	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2478		    "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
2479		    cfo->def_x_cap);
2480}
2481
2482static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
2483{
2484	const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
2485	bool is_linked = rtwdev->total_sta_assoc > 0;
2486	s32 cfo_avg_312;
2487	s32 dcfo_comp_val;
2488	int sign;
2489
2490	if (!is_linked) {
2491		rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
2492			    is_linked);
2493		return;
2494	}
2495	rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
2496	if (curr_cfo == 0)
2497		return;
2498	dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
2499	sign = curr_cfo > 0 ? 1 : -1;
2500	cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val;
2501	rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312);
2502	if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
2503		cfo_avg_312 = -cfo_avg_312;
2504	rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
2505			       cfo_avg_312);
2506}
2507
2508static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
2509{
2510	const struct rtw89_chip_info *chip = rtwdev->chip;
2511
2512	rtw89_phy_set_phy_regs(rtwdev, R_DCFO_OPT, B_DCFO_OPT_EN, 1);
2513	rtw89_phy_set_phy_regs(rtwdev, R_DCFO_WEIGHT, B_DCFO_WEIGHT_MSK, 8);
2514
2515	if (chip->cfo_hw_comp)
2516		rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2,
2517				   B_AX_PWR_UL_CFO_MASK, 0x6);
2518	else
2519		rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, B_AX_PWR_UL_CFO_MASK);
2520}
2521
2522static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
2523{
2524	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2525	struct rtw89_efuse *efuse = &rtwdev->efuse;
2526
2527	cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
2528	cfo->crystal_cap = cfo->crystal_cap_default;
2529	cfo->def_x_cap = cfo->crystal_cap;
2530	cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
2531	cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
2532	cfo->is_adjust = false;
2533	cfo->divergence_lock_en = false;
2534	cfo->x_cap_ofst = 0;
2535	cfo->lock_cnt = 0;
2536	cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
2537	cfo->apply_compensation = false;
2538	cfo->residual_cfo_acc = 0;
2539	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
2540		    cfo->crystal_cap_default);
2541	rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
2542	rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
2543	rtw89_dcfo_comp_init(rtwdev);
2544	cfo->cfo_timer_ms = 2000;
2545	cfo->cfo_trig_by_timer_en = false;
2546	cfo->phy_cfo_trk_cnt = 0;
2547	cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2548	cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
2549}
2550
2551static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
2552					     s32 curr_cfo)
2553{
2554	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2555	s8 crystal_cap = cfo->crystal_cap;
2556	s32 cfo_abs = abs(curr_cfo);
2557	int sign;
2558
2559	if (!cfo->is_adjust) {
2560		if (cfo_abs > CFO_TRK_ENABLE_TH)
2561			cfo->is_adjust = true;
2562	} else {
2563		if (cfo_abs < CFO_TRK_STOP_TH)
2564			cfo->is_adjust = false;
2565	}
2566	if (!cfo->is_adjust) {
2567		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
2568		return;
2569	}
2570	sign = curr_cfo > 0 ? 1 : -1;
2571	if (cfo_abs > CFO_TRK_STOP_TH_4)
2572		crystal_cap += 7 * sign;
2573	else if (cfo_abs > CFO_TRK_STOP_TH_3)
2574		crystal_cap += 5 * sign;
2575	else if (cfo_abs > CFO_TRK_STOP_TH_2)
2576		crystal_cap += 3 * sign;
2577	else if (cfo_abs > CFO_TRK_STOP_TH_1)
2578		crystal_cap += 1 * sign;
2579	else
2580		return;
2581	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
2582	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2583		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
2584		    cfo->crystal_cap, cfo->def_x_cap);
2585}
2586
2587static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
2588{
2589	const struct rtw89_chip_info *chip = rtwdev->chip;
2590	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2591	s32 cfo_khz_all = 0;
2592	s32 cfo_cnt_all = 0;
2593	s32 cfo_all_avg = 0;
2594	u8 i;
2595
2596	if (rtwdev->total_sta_assoc != 1)
2597		return 0;
2598	rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
2599	for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2600		if (cfo->cfo_cnt[i] == 0)
2601			continue;
2602		cfo_khz_all += cfo->cfo_tail[i];
2603		cfo_cnt_all += cfo->cfo_cnt[i];
2604		cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
2605		cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
2606		cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft,
2607					cfo_cnt_all);
2608	}
2609	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2610		    "CFO track for macid = %d\n", i);
2611	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2612		    "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
2613		    cfo_khz_all, cfo_cnt_all, cfo_all_avg);
2614	return cfo_all_avg;
2615}
2616
2617static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
2618{
2619	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2620	struct rtw89_traffic_stats *stats = &rtwdev->stats;
2621	s32 target_cfo = 0;
2622	s32 cfo_khz_all = 0;
2623	s32 cfo_khz_all_tp_wgt = 0;
2624	s32 cfo_avg = 0;
2625	s32 max_cfo_lb = BIT(31);
2626	s32 min_cfo_ub = GENMASK(30, 0);
2627	u16 cfo_cnt_all = 0;
2628	u8 active_entry_cnt = 0;
2629	u8 sta_cnt = 0;
2630	u32 tp_all = 0;
2631	u8 i;
2632	u8 cfo_tol = 0;
2633
2634	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
2635	if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
2636		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
2637		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2638			if (cfo->cfo_cnt[i] == 0)
2639				continue;
2640			cfo_khz_all += cfo->cfo_tail[i];
2641			cfo_cnt_all += cfo->cfo_cnt[i];
2642			cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
2643			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2644				    "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
2645				    cfo_khz_all, cfo_cnt_all, cfo_avg);
2646			target_cfo = cfo_avg;
2647		}
2648	} else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
2649		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
2650		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2651			if (cfo->cfo_cnt[i] == 0)
2652				continue;
2653			cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
2654						  (s32)cfo->cfo_cnt[i]);
2655			cfo_khz_all += cfo->cfo_avg[i];
2656			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2657				    "Macid=%d, cfo_avg=%d\n", i,
2658				    cfo->cfo_avg[i]);
2659		}
2660		sta_cnt = rtwdev->total_sta_assoc;
2661		cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
2662		rtw89_debug(rtwdev, RTW89_DBG_CFO,
2663			    "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
2664			    cfo_khz_all, sta_cnt, cfo_avg);
2665		target_cfo = cfo_avg;
2666	} else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
2667		rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
2668		cfo_tol = cfo->sta_cfo_tolerance;
2669		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
2670			sta_cnt++;
2671			if (cfo->cfo_cnt[i] != 0) {
2672				cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
2673							  (s32)cfo->cfo_cnt[i]);
2674				active_entry_cnt++;
2675			} else {
2676				cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
2677			}
2678			max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
2679			min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
2680			cfo_khz_all += cfo->cfo_avg[i];
2681			/* need tp for each entry */
2682			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2683				    "[%d] cfo_avg=%d, tp=tbd\n",
2684				    i, cfo->cfo_avg[i]);
2685			if (sta_cnt >= rtwdev->total_sta_assoc)
2686				break;
2687		}
2688		tp_all = stats->rx_throughput; /* need tp for each entry */
2689		cfo_avg =  phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
2690
2691		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
2692			    sta_cnt);
2693		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
2694			    active_entry_cnt);
2695		rtw89_debug(rtwdev, RTW89_DBG_CFO,
2696			    "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
2697			    cfo_khz_all_tp_wgt, cfo_avg);
2698		rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
2699			    max_cfo_lb, min_cfo_ub);
2700		if (max_cfo_lb <= min_cfo_ub) {
2701			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2702				    "cfo win_size=%d\n",
2703				    min_cfo_ub - max_cfo_lb);
2704			target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
2705		} else {
2706			rtw89_debug(rtwdev, RTW89_DBG_CFO,
2707				    "No intersection of cfo tolerance windows\n");
2708			target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
2709		}
2710		for (i = 0; i < CFO_TRACK_MAX_USER; i++)
2711			cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
2712	}
2713	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
2714	return target_cfo;
2715}
2716
2717static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
2718{
2719	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2720
2721	memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
2722	memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
2723	cfo->packet_count = 0;
2724	cfo->packet_count_pre = 0;
2725	cfo->cfo_avg_pre = 0;
2726}
2727
2728static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
2729{
2730	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2731	s32 new_cfo = 0;
2732	bool x_cap_update = false;
2733	u8 pre_x_cap = cfo->crystal_cap;
2734	u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
2735
2736	cfo->dcfo_avg = 0;
2737	rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
2738		    rtwdev->total_sta_assoc);
2739	if (rtwdev->total_sta_assoc == 0) {
2740		rtw89_phy_cfo_reset(rtwdev);
2741		return;
2742	}
2743	if (cfo->packet_count == 0) {
2744		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
2745		return;
2746	}
2747	if (cfo->packet_count == cfo->packet_count_pre) {
2748		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
2749		return;
2750	}
2751	if (rtwdev->total_sta_assoc == 1)
2752		new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
2753	else
2754		new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
2755	if (new_cfo == 0) {
2756		rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
2757		return;
2758	}
2759	if (cfo->divergence_lock_en) {
2760		cfo->lock_cnt++;
2761		if (cfo->lock_cnt > CFO_PERIOD_CNT) {
2762			cfo->divergence_lock_en = false;
2763			cfo->lock_cnt = 0;
2764		} else {
2765			rtw89_phy_cfo_reset(rtwdev);
2766		}
2767		return;
2768	}
2769	if (cfo->crystal_cap >= cfo->x_cap_ub ||
2770	    cfo->crystal_cap <= cfo->x_cap_lb) {
2771		cfo->divergence_lock_en = true;
2772		rtw89_phy_cfo_reset(rtwdev);
2773		return;
2774	}
2775
2776	rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
2777	cfo->cfo_avg_pre = new_cfo;
2778	cfo->dcfo_avg_pre = cfo->dcfo_avg;
2779	x_cap_update =  cfo->crystal_cap != pre_x_cap;
2780	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
2781	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
2782		    cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
2783		    cfo->x_cap_ofst);
2784	if (x_cap_update) {
2785		if (cfo->dcfo_avg > 0)
2786			cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
2787		else
2788			cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
2789	}
2790	rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg);
2791	rtw89_phy_cfo_statistics_reset(rtwdev);
2792}
2793
2794void rtw89_phy_cfo_track_work(struct work_struct *work)
2795{
2796	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
2797						cfo_track_work.work);
2798	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2799
2800	mutex_lock(&rtwdev->mutex);
2801	if (!cfo->cfo_trig_by_timer_en)
2802		goto out;
2803	rtw89_leave_ps_mode(rtwdev);
2804	rtw89_phy_cfo_dm(rtwdev);
2805	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
2806				     msecs_to_jiffies(cfo->cfo_timer_ms));
2807out:
2808	mutex_unlock(&rtwdev->mutex);
2809}
2810
2811static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
2812{
2813	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2814
2815	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
2816				     msecs_to_jiffies(cfo->cfo_timer_ms));
2817}
2818
2819void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
2820{
2821	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2822	struct rtw89_traffic_stats *stats = &rtwdev->stats;
2823	bool is_ul_ofdma = false, ofdma_acc_en = false;
2824
2825	if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
2826		is_ul_ofdma = true;
2827	if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
2828	    is_ul_ofdma)
2829		ofdma_acc_en = true;
2830
2831	switch (cfo->phy_cfo_status) {
2832	case RTW89_PHY_DCFO_STATE_NORMAL:
2833		if (stats->tx_throughput >= CFO_TP_UPPER) {
2834			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
2835			cfo->cfo_trig_by_timer_en = true;
2836			cfo->cfo_timer_ms = CFO_COMP_PERIOD;
2837			rtw89_phy_cfo_start_work(rtwdev);
2838		}
2839		break;
2840	case RTW89_PHY_DCFO_STATE_ENHANCE:
2841		if (stats->tx_throughput <= CFO_TP_LOWER)
2842			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2843		else if (ofdma_acc_en &&
2844			 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
2845			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
2846		else
2847			cfo->phy_cfo_trk_cnt++;
2848
2849		if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
2850			cfo->phy_cfo_trk_cnt = 0;
2851			cfo->cfo_trig_by_timer_en = false;
2852		}
2853		break;
2854	case RTW89_PHY_DCFO_STATE_HOLD:
2855		if (stats->tx_throughput <= CFO_TP_LOWER) {
2856			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2857			cfo->phy_cfo_trk_cnt = 0;
2858			cfo->cfo_trig_by_timer_en = false;
2859		} else {
2860			cfo->phy_cfo_trk_cnt++;
2861		}
2862		break;
2863	default:
2864		cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
2865		cfo->phy_cfo_trk_cnt = 0;
2866		break;
2867	}
2868	rtw89_debug(rtwdev, RTW89_DBG_CFO,
2869		    "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
2870		    stats->tx_throughput, cfo->phy_cfo_status,
2871		    cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
2872		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
2873	if (cfo->cfo_trig_by_timer_en)
2874		return;
2875	rtw89_phy_cfo_dm(rtwdev);
2876}
2877
2878void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
2879			 struct rtw89_rx_phy_ppdu *phy_ppdu)
2880{
2881	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
2882	u8 macid = phy_ppdu->mac_id;
2883
2884	if (macid >= CFO_TRACK_MAX_USER) {
2885		rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
2886		return;
2887	}
2888
2889	cfo->cfo_tail[macid] += cfo_val;
2890	cfo->cfo_cnt[macid]++;
2891	cfo->packet_count++;
2892}
2893
2894void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2895{
2896	const struct rtw89_chip_info *chip = rtwdev->chip;
2897	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
2898						       rtwvif->sub_entity_idx);
2899	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
2900
2901	if (!chip->support_ul_tb_ctrl)
2902		return;
2903
2904	rtwvif->def_tri_idx =
2905		rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
2906
2907	if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
2908		rtwvif->dyn_tb_bedge_en = false;
2909	else if (chan->band_type >= RTW89_BAND_5G &&
2910		 chan->band_width >= RTW89_CHANNEL_WIDTH_40)
2911		rtwvif->dyn_tb_bedge_en = true;
2912	else
2913		rtwvif->dyn_tb_bedge_en = false;
2914
2915	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2916		    "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
2917		    ul_tb_info->def_if_bandedge, rtwvif->def_tri_idx);
2918	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2919		    "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
2920		    rtwvif->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
2921}
2922
2923struct rtw89_phy_ul_tb_check_data {
2924	bool valid;
2925	bool high_tf_client;
2926	bool low_tf_client;
2927	bool dyn_tb_bedge_en;
2928	u8 def_tri_idx;
2929};
2930
2931static
2932void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
2933				struct rtw89_vif *rtwvif,
2934				struct rtw89_phy_ul_tb_check_data *ul_tb_data)
2935{
2936	struct rtw89_traffic_stats *stats = &rtwdev->stats;
2937	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
2938
2939	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
2940		return;
2941
2942	if (!vif->cfg.assoc)
2943		return;
2944
2945	if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
2946		ul_tb_data->high_tf_client = true;
2947	else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
2948		ul_tb_data->low_tf_client = true;
2949
2950	ul_tb_data->valid = true;
2951	ul_tb_data->def_tri_idx = rtwvif->def_tri_idx;
2952	ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en;
2953}
2954
2955void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
2956{
2957	const struct rtw89_chip_info *chip = rtwdev->chip;
2958	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
2959	struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
2960	struct rtw89_vif *rtwvif;
2961
2962	if (!chip->support_ul_tb_ctrl)
2963		return;
2964
2965	if (rtwdev->total_sta_assoc != 1)
2966		return;
2967
2968	rtw89_for_each_rtwvif(rtwdev, rtwvif)
2969		rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data);
2970
2971	if (!ul_tb_data.valid)
2972		return;
2973
2974	if (ul_tb_data.dyn_tb_bedge_en) {
2975		if (ul_tb_data.high_tf_client) {
2976			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
2977			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2978				    "[ULTB] Turn off if_bandedge\n");
2979		} else if (ul_tb_data.low_tf_client) {
2980			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
2981					       ul_tb_info->def_if_bandedge);
2982			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2983				    "[ULTB] Set to default if_bandedge = %d\n",
2984				    ul_tb_info->def_if_bandedge);
2985		}
2986	}
2987
2988	if (ul_tb_info->dyn_tb_tri_en) {
2989		if (ul_tb_data.high_tf_client) {
2990			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
2991					       B_TXSHAPE_TRIANGULAR_CFG, 0);
2992			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2993				    "[ULTB] Turn off Tx triangle\n");
2994		} else if (ul_tb_data.low_tf_client) {
2995			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
2996					       B_TXSHAPE_TRIANGULAR_CFG,
2997					       ul_tb_data.def_tri_idx);
2998			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
2999				    "[ULTB] Set to default tx_shap_idx = %d\n",
3000				    ul_tb_data.def_tri_idx);
3001		}
3002	}
3003}
3004
3005static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
3006{
3007	const struct rtw89_chip_info *chip = rtwdev->chip;
3008	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
3009
3010	if (!chip->support_ul_tb_ctrl)
3011		return;
3012
3013	ul_tb_info->dyn_tb_tri_en = true;
3014	ul_tb_info->def_if_bandedge =
3015		rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
3016}
3017
3018static
3019void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
3020{
3021	ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
3022	ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
3023	ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
3024	antdiv_sts->pkt_cnt_cck = 0;
3025	antdiv_sts->pkt_cnt_ofdm = 0;
3026	antdiv_sts->pkt_cnt_non_legacy = 0;
3027	antdiv_sts->evm = 0;
3028}
3029
3030static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
3031					      struct rtw89_rx_phy_ppdu *phy_ppdu,
3032					      struct rtw89_antdiv_stats *stats)
3033{
3034	if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
3035		if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
3036			ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
3037			stats->pkt_cnt_cck++;
3038		} else {
3039			ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
3040			stats->pkt_cnt_ofdm++;
3041			stats->evm += phy_ppdu->ofdm.evm_min;
3042		}
3043	} else {
3044		ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
3045		stats->pkt_cnt_non_legacy++;
3046		stats->evm += phy_ppdu->ofdm.evm_min;
3047	}
3048}
3049
3050static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
3051{
3052	if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
3053	    stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
3054		return ewma_rssi_read(&stats->non_legacy_rssi_avg);
3055	else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
3056		 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
3057		return ewma_rssi_read(&stats->ofdm_rssi_avg);
3058	else
3059		return ewma_rssi_read(&stats->cck_rssi_avg);
3060}
3061
3062static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
3063{
3064	return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
3065}
3066
3067void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
3068			    struct rtw89_rx_phy_ppdu *phy_ppdu)
3069{
3070	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
3071	struct rtw89_hal *hal = &rtwdev->hal;
3072
3073	if (!hal->ant_diversity || hal->ant_diversity_fixed)
3074		return;
3075
3076	rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
3077
3078	if (!antdiv->get_stats)
3079		return;
3080
3081	if (hal->antenna_rx == RF_A)
3082		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
3083	else if (hal->antenna_rx == RF_B)
3084		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
3085}
3086
3087static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
3088{
3089	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
3090			      0x0, RTW89_PHY_0);
3091	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
3092			      0x0, RTW89_PHY_0);
3093
3094	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
3095			      0x0, RTW89_PHY_0);
3096	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
3097			      0x0, RTW89_PHY_0);
3098
3099	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
3100			      0x0, RTW89_PHY_0);
3101
3102	rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
3103			      0x0100, RTW89_PHY_0);
3104
3105	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
3106			      0x1, RTW89_PHY_0);
3107	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
3108			      0x0, RTW89_PHY_0);
3109	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
3110			      0x0, RTW89_PHY_0);
3111	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
3112			      0x0, RTW89_PHY_0);
3113}
3114
3115static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
3116{
3117	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
3118
3119	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
3120	rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
3121	rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
3122}
3123
3124static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
3125{
3126	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
3127	struct rtw89_hal *hal = &rtwdev->hal;
3128
3129	if (!hal->ant_diversity)
3130		return;
3131
3132	antdiv->get_stats = false;
3133	antdiv->rssi_pre = 0;
3134	rtw89_phy_antdiv_sts_reset(rtwdev);
3135	rtw89_phy_antdiv_reg_init(rtwdev);
3136}
3137
3138static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
3139{
3140	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
3141	int i;
3142	u8 th;
3143
3144	for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
3145		th = rtw89_chip_get_thermal(rtwdev, i);
3146		if (th)
3147			ewma_thermal_add(&phystat->avg_thermal[i], th);
3148
3149		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
3150			    "path(%d) thermal cur=%u avg=%ld", i, th,
3151			    ewma_thermal_read(&phystat->avg_thermal[i]));
3152	}
3153}
3154
3155struct rtw89_phy_iter_rssi_data {
3156	struct rtw89_dev *rtwdev;
3157	struct rtw89_phy_ch_info *ch_info;
3158	bool rssi_changed;
3159};
3160
3161static void rtw89_phy_stat_rssi_update_iter(void *data,
3162					    struct ieee80211_sta *sta)
3163{
3164	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
3165	struct rtw89_phy_iter_rssi_data *rssi_data =
3166					(struct rtw89_phy_iter_rssi_data *)data;
3167	struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
3168	unsigned long rssi_curr;
3169
3170	rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi);
3171
3172	if (rssi_curr < ch_info->rssi_min) {
3173		ch_info->rssi_min = rssi_curr;
3174		ch_info->rssi_min_macid = rtwsta->mac_id;
3175	}
3176
3177	if (rtwsta->prev_rssi == 0) {
3178		rtwsta->prev_rssi = rssi_curr;
3179	} else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) {
3180		rtwsta->prev_rssi = rssi_curr;
3181		rssi_data->rssi_changed = true;
3182	}
3183}
3184
3185static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
3186{
3187	struct rtw89_phy_iter_rssi_data rssi_data = {0};
3188
3189	rssi_data.rtwdev = rtwdev;
3190	rssi_data.ch_info = &rtwdev->ch_info;
3191	rssi_data.ch_info->rssi_min = U8_MAX;
3192	ieee80211_iterate_stations_atomic(rtwdev->hw,
3193					  rtw89_phy_stat_rssi_update_iter,
3194					  &rssi_data);
3195	if (rssi_data.rssi_changed)
3196		rtw89_btc_ntfy_wl_sta(rtwdev);
3197}
3198
3199static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
3200{
3201	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
3202	int i;
3203
3204	for (i = 0; i < rtwdev->chip->rf_path_num; i++)
3205		ewma_thermal_init(&phystat->avg_thermal[i]);
3206
3207	rtw89_phy_stat_thermal_update(rtwdev);
3208
3209	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
3210	memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
3211}
3212
3213void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
3214{
3215	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
3216
3217	rtw89_phy_stat_thermal_update(rtwdev);
3218	rtw89_phy_stat_rssi_update(rtwdev);
3219
3220	phystat->last_pkt_stat = phystat->cur_pkt_stat;
3221	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
3222}
3223
3224static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us)
3225{
3226	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3227
3228	return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
3229}
3230
3231static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx)
3232{
3233	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3234
3235	return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
3236}
3237
3238static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
3239{
3240	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3241	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3242	const struct rtw89_ccx_regs *ccx = phy->ccx;
3243
3244	env->ccx_manual_ctrl = false;
3245	env->ccx_ongoing = false;
3246	env->ccx_rac_lv = RTW89_RAC_RELEASE;
3247	env->ccx_period = 0;
3248	env->ccx_unit_idx = RTW89_CCX_32_US;
3249
3250	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1);
3251	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1);
3252	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
3253	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
3254			       RTW89_CCX_EDCCA_BW20_0);
3255}
3256
3257static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report,
3258				    u16 score)
3259{
3260	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3261	u32 numer = 0;
3262	u16 ret = 0;
3263
3264	numer = report * score + (env->ccx_period >> 1);
3265	if (env->ccx_period)
3266		ret = numer / env->ccx_period;
3267
3268	return ret >= score ? score - 1 : ret;
3269}
3270
3271static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
3272					    u16 time_ms, u32 *period,
3273					    u32 *unit_idx)
3274{
3275	u32 idx;
3276	u8 quotient;
3277
3278	if (time_ms >= CCX_MAX_PERIOD)
3279		time_ms = CCX_MAX_PERIOD;
3280
3281	quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
3282
3283	if (quotient < 4)
3284		idx = RTW89_CCX_4_US;
3285	else if (quotient < 8)
3286		idx = RTW89_CCX_8_US;
3287	else if (quotient < 16)
3288		idx = RTW89_CCX_16_US;
3289	else
3290		idx = RTW89_CCX_32_US;
3291
3292	*unit_idx = idx;
3293	*period = (time_ms * MS_TO_4US_RATIO) >> idx;
3294
3295	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3296		    "[Trigger Time] period:%d, unit_idx:%d\n",
3297		    *period, *unit_idx);
3298}
3299
3300static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
3301{
3302	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3303
3304	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3305		    "lv:(%d)->(0)\n", env->ccx_rac_lv);
3306
3307	env->ccx_ongoing = false;
3308	env->ccx_rac_lv = RTW89_RAC_RELEASE;
3309	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
3310}
3311
3312static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
3313					      struct rtw89_ccx_para_info *para)
3314{
3315	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3316	bool is_update = env->ifs_clm_app != para->ifs_clm_app;
3317	u8 i = 0;
3318	u16 *ifs_th_l = env->ifs_clm_th_l;
3319	u16 *ifs_th_h = env->ifs_clm_th_h;
3320	u32 ifs_th0_us = 0, ifs_th_times = 0;
3321	u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
3322
3323	if (!is_update)
3324		goto ifs_update_finished;
3325
3326	switch (para->ifs_clm_app) {
3327	case RTW89_IFS_CLM_INIT:
3328	case RTW89_IFS_CLM_BACKGROUND:
3329	case RTW89_IFS_CLM_ACS:
3330	case RTW89_IFS_CLM_DBG:
3331	case RTW89_IFS_CLM_DIG:
3332	case RTW89_IFS_CLM_TDMA_DIG:
3333		ifs_th0_us = IFS_CLM_TH0_UPPER;
3334		ifs_th_times = IFS_CLM_TH_MUL;
3335		break;
3336	case RTW89_IFS_CLM_DBG_MANUAL:
3337		ifs_th0_us = para->ifs_clm_manual_th0;
3338		ifs_th_times = para->ifs_clm_manual_th_times;
3339		break;
3340	default:
3341		break;
3342	}
3343
3344	/* Set sampling threshold for 4 different regions, unit in idx_cnt.
3345	 * low[i] = high[i-1] + 1
3346	 * high[i] = high[i-1] * ifs_th_times
3347	 */
3348	ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
3349	ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
3350	ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev,
3351								 ifs_th0_us);
3352	for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
3353		ifs_th_l[i] = ifs_th_h[i - 1] + 1;
3354		ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
3355		ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]);
3356	}
3357
3358ifs_update_finished:
3359	if (!is_update)
3360		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3361			    "No need to update IFS_TH\n");
3362
3363	return is_update;
3364}
3365
3366static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
3367{
3368	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3369	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3370	const struct rtw89_ccx_regs *ccx = phy->ccx;
3371	u8 i = 0;
3372
3373	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
3374			       env->ifs_clm_th_l[0]);
3375	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
3376			       env->ifs_clm_th_l[1]);
3377	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
3378			       env->ifs_clm_th_l[2]);
3379	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
3380			       env->ifs_clm_th_l[3]);
3381
3382	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
3383			       env->ifs_clm_th_h[0]);
3384	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
3385			       env->ifs_clm_th_h[1]);
3386	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
3387			       env->ifs_clm_th_h[2]);
3388	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
3389			       env->ifs_clm_th_h[3]);
3390
3391	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
3392		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3393			    "Update IFS_T%d_th{low, high} : {%d, %d}\n",
3394			    i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
3395}
3396
3397static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev)
3398{
3399	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3400	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3401	const struct rtw89_ccx_regs *ccx = phy->ccx;
3402	struct rtw89_ccx_para_info para = {0};
3403
3404	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
3405	env->ifs_clm_mntr_time = 0;
3406
3407	para.ifs_clm_app = RTW89_IFS_CLM_INIT;
3408	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, &para))
3409		rtw89_phy_ifs_clm_set_th_reg(rtwdev);
3410
3411	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true);
3412	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true);
3413	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true);
3414	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true);
3415	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true);
3416}
3417
3418static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
3419				     enum rtw89_env_racing_lv level)
3420{
3421	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3422	int ret = 0;
3423
3424	if (level >= RTW89_RAC_MAX_NUM) {
3425		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3426			    "[WARNING] Wrong LV=%d\n", level);
3427		return -EINVAL;
3428	}
3429
3430	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3431		    "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
3432		    env->ccx_rac_lv, level);
3433
3434	if (env->ccx_ongoing) {
3435		if (level <= env->ccx_rac_lv)
3436			ret = -EINVAL;
3437		else
3438			env->ccx_ongoing = false;
3439	}
3440
3441	if (ret == 0)
3442		env->ccx_rac_lv = level;
3443
3444	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
3445		    !ret);
3446
3447	return ret;
3448}
3449
3450static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev)
3451{
3452	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3453	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3454	const struct rtw89_ccx_regs *ccx = phy->ccx;
3455
3456	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0);
3457	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0);
3458	rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1);
3459	rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
3460
3461	env->ccx_ongoing = true;
3462}
3463
3464static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
3465{
3466	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3467	u8 i = 0;
3468	u32 res = 0;
3469
3470	env->ifs_clm_tx_ratio =
3471		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT);
3472	env->ifs_clm_edcca_excl_cca_ratio =
3473		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca,
3474					 PERCENT);
3475	env->ifs_clm_cck_fa_ratio =
3476		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT);
3477	env->ifs_clm_ofdm_fa_ratio =
3478		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT);
3479	env->ifs_clm_cck_cca_excl_fa_ratio =
3480		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa,
3481					 PERCENT);
3482	env->ifs_clm_ofdm_cca_excl_fa_ratio =
3483		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa,
3484					 PERCENT);
3485	env->ifs_clm_cck_fa_permil =
3486		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL);
3487	env->ifs_clm_ofdm_fa_permil =
3488		rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL);
3489
3490	for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
3491		if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
3492			env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
3493		} else {
3494			env->ifs_clm_ifs_avg[i] =
3495				rtw89_phy_ccx_idx_to_us(rtwdev,
3496							env->ifs_clm_avg[i]);
3497		}
3498
3499		res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]);
3500		res += env->ifs_clm_his[i] >> 1;
3501		if (env->ifs_clm_his[i])
3502			res /= env->ifs_clm_his[i];
3503		else
3504			res = 0;
3505		env->ifs_clm_cca_avg[i] = res;
3506	}
3507
3508	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3509		    "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
3510		    env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
3511	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3512		    "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
3513		    env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
3514	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3515		    "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
3516		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
3517	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3518		    "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
3519		    env->ifs_clm_cck_cca_excl_fa_ratio,
3520		    env->ifs_clm_ofdm_cca_excl_fa_ratio);
3521	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3522		    "Time:[his, ifs_avg(us), cca_avg(us)]\n");
3523	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
3524		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
3525			    i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
3526			    env->ifs_clm_cca_avg[i]);
3527}
3528
3529static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
3530{
3531	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3532	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3533	const struct rtw89_ccx_regs *ccx = phy->ccx;
3534	u8 i = 0;
3535
3536	if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
3537				  ccx->ifs_cnt_done_mask) == 0) {
3538		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3539			    "Get IFS_CLM report Fail\n");
3540		return false;
3541	}
3542
3543	env->ifs_clm_tx =
3544		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
3545				      ccx->ifs_clm_tx_cnt_msk);
3546	env->ifs_clm_edcca_excl_cca =
3547		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
3548				      ccx->ifs_clm_edcca_excl_cca_fa_mask);
3549	env->ifs_clm_cckcca_excl_fa =
3550		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
3551				      ccx->ifs_clm_cckcca_excl_fa_mask);
3552	env->ifs_clm_ofdmcca_excl_fa =
3553		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
3554				      ccx->ifs_clm_ofdmcca_excl_fa_mask);
3555	env->ifs_clm_cckfa =
3556		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
3557				      ccx->ifs_clm_cck_fa_mask);
3558	env->ifs_clm_ofdmfa =
3559		rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
3560				      ccx->ifs_clm_ofdm_fa_mask);
3561
3562	env->ifs_clm_his[0] =
3563		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
3564				      ccx->ifs_t1_his_mask);
3565	env->ifs_clm_his[1] =
3566		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
3567				      ccx->ifs_t2_his_mask);
3568	env->ifs_clm_his[2] =
3569		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
3570				      ccx->ifs_t3_his_mask);
3571	env->ifs_clm_his[3] =
3572		rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
3573				      ccx->ifs_t4_his_mask);
3574
3575	env->ifs_clm_avg[0] =
3576		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
3577				      ccx->ifs_t1_avg_mask);
3578	env->ifs_clm_avg[1] =
3579		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
3580				      ccx->ifs_t2_avg_mask);
3581	env->ifs_clm_avg[2] =
3582		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
3583				      ccx->ifs_t3_avg_mask);
3584	env->ifs_clm_avg[3] =
3585		rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
3586				      ccx->ifs_t4_avg_mask);
3587
3588	env->ifs_clm_cca[0] =
3589		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
3590				      ccx->ifs_t1_cca_mask);
3591	env->ifs_clm_cca[1] =
3592		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
3593				      ccx->ifs_t2_cca_mask);
3594	env->ifs_clm_cca[2] =
3595		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
3596				      ccx->ifs_t3_cca_mask);
3597	env->ifs_clm_cca[3] =
3598		rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
3599				      ccx->ifs_t4_cca_mask);
3600
3601	env->ifs_clm_total_ifs =
3602		rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
3603				      ccx->ifs_total_mask);
3604
3605	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
3606		    env->ifs_clm_total_ifs);
3607	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3608		    "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
3609		    env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
3610	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3611		    "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
3612		    env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
3613	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3614		    "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
3615		    env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
3616
3617	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
3618	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
3619		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3620			    "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
3621			    env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
3622
3623	rtw89_phy_ifs_clm_get_utility(rtwdev);
3624
3625	return true;
3626}
3627
3628static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
3629				 struct rtw89_ccx_para_info *para)
3630{
3631	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3632	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3633	const struct rtw89_ccx_regs *ccx = phy->ccx;
3634	u32 period = 0;
3635	u32 unit_idx = 0;
3636
3637	if (para->mntr_time == 0) {
3638		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3639			    "[WARN] MNTR_TIME is 0\n");
3640		return -EINVAL;
3641	}
3642
3643	if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv))
3644		return -EINVAL;
3645
3646	if (para->mntr_time != env->ifs_clm_mntr_time) {
3647		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
3648						&period, &unit_idx);
3649		rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
3650				       ccx->ifs_clm_period_mask, period);
3651		rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
3652				       ccx->ifs_clm_cnt_unit_mask,
3653				       unit_idx);
3654
3655		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3656			    "Update IFS-CLM time ((%d)) -> ((%d))\n",
3657			    env->ifs_clm_mntr_time, para->mntr_time);
3658
3659		env->ifs_clm_mntr_time = para->mntr_time;
3660		env->ccx_period = (u16)period;
3661		env->ccx_unit_idx = (u8)unit_idx;
3662	}
3663
3664	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) {
3665		env->ifs_clm_app = para->ifs_clm_app;
3666		rtw89_phy_ifs_clm_set_th_reg(rtwdev);
3667	}
3668
3669	return 0;
3670}
3671
3672void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
3673{
3674	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
3675	struct rtw89_ccx_para_info para = {0};
3676	u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
3677
3678	env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
3679	if (env->ccx_manual_ctrl) {
3680		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3681			    "CCX in manual ctrl\n");
3682		return;
3683	}
3684
3685	/* only ifs_clm for now */
3686	if (rtw89_phy_ifs_clm_get_result(rtwdev))
3687		env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
3688
3689	rtw89_phy_ccx_racing_release(rtwdev);
3690	para.mntr_time = 1900;
3691	para.rac_lv = RTW89_RAC_LV_1;
3692	para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
3693
3694	if (rtw89_phy_ifs_clm_set(rtwdev, &para) == 0)
3695		chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
3696	if (chk_result)
3697		rtw89_phy_ccx_trigger(rtwdev);
3698
3699	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
3700		    "get_result=0x%x, chk_result:0x%x\n",
3701		    env->ccx_watchdog_result, chk_result);
3702}
3703
3704static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
3705{
3706	if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
3707	    *ie_page == RTW89_RSVD_9)
3708		return false;
3709	else if (*ie_page > RTW89_RSVD_9)
3710		*ie_page -= 1;
3711
3712	return true;
3713}
3714
3715static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
3716{
3717	static const u8 ie_page_shift = 2;
3718
3719	return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
3720}
3721
3722static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
3723				      enum rtw89_phy_status_bitmap ie_page)
3724{
3725	u32 addr;
3726
3727	if (!rtw89_physts_ie_page_valid(&ie_page))
3728		return 0;
3729
3730	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
3731
3732	return rtw89_phy_read32(rtwdev, addr);
3733}
3734
3735static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
3736				       enum rtw89_phy_status_bitmap ie_page,
3737				       u32 val)
3738{
3739	const struct rtw89_chip_info *chip = rtwdev->chip;
3740	u32 addr;
3741
3742	if (!rtw89_physts_ie_page_valid(&ie_page))
3743		return;
3744
3745	if (chip->chip_id == RTL8852A)
3746		val &= B_PHY_STS_BITMAP_MSK_52A;
3747
3748	addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
3749	rtw89_phy_write32(rtwdev, addr, val);
3750}
3751
3752static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev,
3753					  enum rtw89_phy_status_bitmap bitmap,
3754					  enum rtw89_phy_status_ie_type ie,
3755					  bool enable)
3756{
3757	u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap);
3758
3759	if (enable)
3760		val |= BIT(ie);
3761	else
3762		val &= ~BIT(ie);
3763
3764	rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val);
3765}
3766
3767static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
3768					    bool enable,
3769					    enum rtw89_phy_idx phy_idx)
3770{
3771	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
3772	const struct rtw89_physts_regs *physts = phy->physts;
3773
3774	if (enable) {
3775		rtw89_phy_write32_clr(rtwdev, physts->setting_addr,
3776				      physts->dis_trigger_fail_mask);
3777		rtw89_phy_write32_clr(rtwdev, physts->setting_addr,
3778				      physts->dis_trigger_brk_mask);
3779	} else {
3780		rtw89_phy_write32_set(rtwdev, physts->setting_addr,
3781				      physts->dis_trigger_fail_mask);
3782		rtw89_phy_write32_set(rtwdev, physts->setting_addr,
3783				      physts->dis_trigger_brk_mask);
3784	}
3785}
3786
3787static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
3788{
3789	u8 i;
3790
3791	rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0);
3792
3793	for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
3794		if (i >= RTW89_CCK_PKT)
3795			rtw89_physts_enable_ie_bitmap(rtwdev, i,
3796						      RTW89_PHYSTS_IE09_FTR_0,
3797						      true);
3798		if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) ||
3799		    (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT))
3800			continue;
3801		rtw89_physts_enable_ie_bitmap(rtwdev, i,
3802					      RTW89_PHYSTS_IE24_OFDM_TD_PATH_A,
3803					      true);
3804	}
3805	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT,
3806				      RTW89_PHYSTS_IE13_DL_MU_DEF, true);
3807	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT,
3808				      RTW89_PHYSTS_IE13_DL_MU_DEF, true);
3809
3810	/* force IE01 for channel index, only channel field is valid */
3811	rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT,
3812				      RTW89_PHYSTS_IE01_CMN_OFDM, true);
3813}
3814
3815static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
3816{
3817	const struct rtw89_chip_info *chip = rtwdev->chip;
3818	struct rtw89_dig_info *dig = &rtwdev->dig;
3819	const struct rtw89_phy_dig_gain_cfg *cfg;
3820	const char *msg;
3821	u8 i;
3822	s8 gain_base;
3823	s8 *gain_arr;
3824	u32 tmp;
3825
3826	switch (type) {
3827	case RTW89_DIG_GAIN_LNA_G:
3828		gain_arr = dig->lna_gain_g;
3829		gain_base = LNA0_GAIN;
3830		cfg = chip->dig_table->cfg_lna_g;
3831		msg = "lna_gain_g";
3832		break;
3833	case RTW89_DIG_GAIN_TIA_G:
3834		gain_arr = dig->tia_gain_g;
3835		gain_base = TIA0_GAIN_G;
3836		cfg = chip->dig_table->cfg_tia_g;
3837		msg = "tia_gain_g";
3838		break;
3839	case RTW89_DIG_GAIN_LNA_A:
3840		gain_arr = dig->lna_gain_a;
3841		gain_base = LNA0_GAIN;
3842		cfg = chip->dig_table->cfg_lna_a;
3843		msg = "lna_gain_a";
3844		break;
3845	case RTW89_DIG_GAIN_TIA_A:
3846		gain_arr = dig->tia_gain_a;
3847		gain_base = TIA0_GAIN_A;
3848		cfg = chip->dig_table->cfg_tia_a;
3849		msg = "tia_gain_a";
3850		break;
3851	default:
3852		return;
3853	}
3854
3855	for (i = 0; i < cfg->size; i++) {
3856		tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr,
3857					    cfg->table[i].mask);
3858		tmp >>= DIG_GAIN_SHIFT;
3859		gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
3860		gain_base += DIG_GAIN;
3861
3862		rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
3863			    msg, i, gain_arr[i]);
3864	}
3865}
3866
3867static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
3868{
3869	struct rtw89_dig_info *dig = &rtwdev->dig;
3870	u32 tmp;
3871	u8 i;
3872
3873	if (!rtwdev->hal.support_igi)
3874		return;
3875
3876	tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
3877				    B_PATH0_IB_PKPW_MSK);
3878	dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
3879	dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK,
3880					    B_PATH0_IB_PBK_MSK);
3881	rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
3882		    dig->ib_pkpwr, dig->ib_pbk);
3883
3884	for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
3885		rtw89_phy_dig_read_gain_table(rtwdev, i);
3886}
3887
3888static const u8 rssi_nolink = 22;
3889static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
3890static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
3891static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
3892static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
3893
3894static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
3895{
3896	struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
3897	struct rtw89_dig_info *dig = &rtwdev->dig;
3898	bool is_linked = rtwdev->total_sta_assoc > 0;
3899
3900	if (is_linked) {
3901		dig->igi_rssi = ch_info->rssi_min >> 1;
3902	} else {
3903		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
3904		dig->igi_rssi = rssi_nolink;
3905	}
3906}
3907
3908static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
3909{
3910	struct rtw89_dig_info *dig = &rtwdev->dig;
3911	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3912	bool is_linked = rtwdev->total_sta_assoc > 0;
3913	const u16 *fa_th_src = NULL;
3914
3915	switch (chan->band_type) {
3916	case RTW89_BAND_2G:
3917		dig->lna_gain = dig->lna_gain_g;
3918		dig->tia_gain = dig->tia_gain_g;
3919		fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
3920		dig->force_gaincode_idx_en = false;
3921		dig->dyn_pd_th_en = true;
3922		break;
3923	case RTW89_BAND_5G:
3924	default:
3925		dig->lna_gain = dig->lna_gain_a;
3926		dig->tia_gain = dig->tia_gain_a;
3927		fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
3928		dig->force_gaincode_idx_en = true;
3929		dig->dyn_pd_th_en = true;
3930		break;
3931	}
3932	memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
3933	memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
3934}
3935
3936static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
3937static const u8 igi_max_performance_mode = 0x5a;
3938static const u8 dynamic_pd_threshold_max;
3939
3940static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
3941{
3942	struct rtw89_dig_info *dig = &rtwdev->dig;
3943
3944	dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
3945	dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
3946	dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
3947	dig->force_gaincode.lna_idx = LNA_IDX_MAX;
3948	dig->force_gaincode.tia_idx = TIA_IDX_MAX;
3949	dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
3950
3951	dig->dyn_igi_max = igi_max_performance_mode;
3952	dig->dyn_igi_min = dynamic_igi_min;
3953	dig->dyn_pd_th_max = dynamic_pd_threshold_max;
3954	dig->pd_low_th_ofst = pd_low_th_offset;
3955	dig->is_linked_pre = false;
3956}
3957
3958static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
3959{
3960	rtw89_phy_dig_update_gain_para(rtwdev);
3961	rtw89_phy_dig_reset(rtwdev);
3962}
3963
3964static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
3965{
3966	struct rtw89_dig_info *dig = &rtwdev->dig;
3967	u8 lna_idx;
3968
3969	if (rssi < dig->igi_rssi_th[0])
3970		lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
3971	else if (rssi < dig->igi_rssi_th[1])
3972		lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
3973	else if (rssi < dig->igi_rssi_th[2])
3974		lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
3975	else if (rssi < dig->igi_rssi_th[3])
3976		lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
3977	else if (rssi < dig->igi_rssi_th[4])
3978		lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
3979	else
3980		lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
3981
3982	return lna_idx;
3983}
3984
3985static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
3986{
3987	struct rtw89_dig_info *dig = &rtwdev->dig;
3988	u8 tia_idx;
3989
3990	if (rssi < dig->igi_rssi_th[0])
3991		tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
3992	else
3993		tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
3994
3995	return tia_idx;
3996}
3997
3998#define IB_PBK_BASE 110
3999#define WB_RSSI_BASE 10
4000static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
4001					struct rtw89_agc_gaincode_set *set)
4002{
4003	struct rtw89_dig_info *dig = &rtwdev->dig;
4004	s8 lna_gain = dig->lna_gain[set->lna_idx];
4005	s8 tia_gain = dig->tia_gain[set->tia_idx];
4006	s32 wb_rssi = rssi + lna_gain + tia_gain;
4007	s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
4008	u8 rxb_idx;
4009
4010	rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
4011	rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
4012
4013	rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
4014		    wb_rssi, rxb_idx_tmp);
4015
4016	return rxb_idx;
4017}
4018
4019static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
4020					   struct rtw89_agc_gaincode_set *set)
4021{
4022	set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi);
4023	set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi);
4024	set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set);
4025
4026	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4027		    "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
4028		    rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
4029}
4030
4031#define IGI_OFFSET_MAX 25
4032#define IGI_OFFSET_MUL 2
4033static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
4034{
4035	struct rtw89_dig_info *dig = &rtwdev->dig;
4036	struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
4037	enum rtw89_dig_noisy_level noisy_lv;
4038	u8 igi_offset = dig->fa_rssi_ofst;
4039	u16 fa_ratio = 0;
4040
4041	fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
4042
4043	if (fa_ratio < dig->fa_th[0])
4044		noisy_lv = RTW89_DIG_NOISY_LEVEL0;
4045	else if (fa_ratio < dig->fa_th[1])
4046		noisy_lv = RTW89_DIG_NOISY_LEVEL1;
4047	else if (fa_ratio < dig->fa_th[2])
4048		noisy_lv = RTW89_DIG_NOISY_LEVEL2;
4049	else if (fa_ratio < dig->fa_th[3])
4050		noisy_lv = RTW89_DIG_NOISY_LEVEL3;
4051	else
4052		noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
4053
4054	if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
4055		igi_offset = 0;
4056	else
4057		igi_offset += noisy_lv * IGI_OFFSET_MUL;
4058
4059	igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
4060	dig->fa_rssi_ofst = igi_offset;
4061
4062	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4063		    "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
4064		    dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
4065
4066	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4067		    "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
4068		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
4069		    env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
4070		    noisy_lv, igi_offset);
4071}
4072
4073static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
4074{
4075	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4076
4077	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
4078			       dig_regs->p0_lna_init.mask, lna_idx);
4079	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
4080			       dig_regs->p1_lna_init.mask, lna_idx);
4081}
4082
4083static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
4084{
4085	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4086
4087	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
4088			       dig_regs->p0_tia_init.mask, tia_idx);
4089	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
4090			       dig_regs->p1_tia_init.mask, tia_idx);
4091}
4092
4093static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
4094{
4095	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4096
4097	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
4098			       dig_regs->p0_rxb_init.mask, rxb_idx);
4099	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
4100			       dig_regs->p1_rxb_init.mask, rxb_idx);
4101}
4102
4103static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
4104				     const struct rtw89_agc_gaincode_set set)
4105{
4106	rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
4107	rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
4108	rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
4109
4110	rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
4111		    set.lna_idx, set.tia_idx, set.rxb_idx);
4112}
4113
4114static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
4115						   bool enable)
4116{
4117	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4118
4119	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
4120			       dig_regs->p0_p20_pagcugc_en.mask, enable);
4121	rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
4122			       dig_regs->p0_s20_pagcugc_en.mask, enable);
4123	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
4124			       dig_regs->p1_p20_pagcugc_en.mask, enable);
4125	rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
4126			       dig_regs->p1_s20_pagcugc_en.mask, enable);
4127
4128	rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
4129}
4130
4131static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
4132{
4133	struct rtw89_dig_info *dig = &rtwdev->dig;
4134
4135	if (!rtwdev->hal.support_igi)
4136		return;
4137
4138	if (dig->force_gaincode_idx_en) {
4139		rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
4140		rtw89_debug(rtwdev, RTW89_DBG_DIG,
4141			    "Force gaincode index enabled.\n");
4142	} else {
4143		rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
4144					       &dig->cur_gaincode);
4145		rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
4146	}
4147}
4148
4149static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
4150				    bool enable)
4151{
4152	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
4153	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
4154	enum rtw89_bandwidth cbw = chan->band_width;
4155	struct rtw89_dig_info *dig = &rtwdev->dig;
4156	u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
4157	u8 ofdm_cca_th;
4158	s8 cck_cca_th;
4159	u32 pd_val = 0;
4160
4161	under_region += PD_TH_SB_FLTR_CMP_VAL;
4162
4163	switch (cbw) {
4164	case RTW89_CHANNEL_WIDTH_40:
4165		under_region += PD_TH_BW40_CMP_VAL;
4166		break;
4167	case RTW89_CHANNEL_WIDTH_80:
4168		under_region += PD_TH_BW80_CMP_VAL;
4169		break;
4170	case RTW89_CHANNEL_WIDTH_160:
4171		under_region += PD_TH_BW160_CMP_VAL;
4172		break;
4173	case RTW89_CHANNEL_WIDTH_20:
4174		fallthrough;
4175	default:
4176		under_region += PD_TH_BW20_CMP_VAL;
4177		break;
4178	}
4179
4180	dig->dyn_pd_th_max = dig->igi_rssi;
4181
4182	final_rssi = min_t(u8, rssi, dig->igi_rssi);
4183	ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
4184			      PD_TH_MAX_RSSI + under_region);
4185
4186	if (enable) {
4187		pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
4188		rtw89_debug(rtwdev, RTW89_DBG_DIG,
4189			    "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
4190			    final_rssi, ofdm_cca_th, under_region, pd_val);
4191	} else {
4192		rtw89_debug(rtwdev, RTW89_DBG_DIG,
4193			    "Dynamic PD th disabled, Set PD_low_bd=0\n");
4194	}
4195
4196	rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
4197			       dig_regs->pd_lower_bound_mask, pd_val);
4198	rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
4199			       dig_regs->pd_spatial_reuse_en, enable);
4200
4201	if (!rtwdev->hal.support_cckpd)
4202		return;
4203
4204	cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
4205	pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
4206
4207	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4208		    "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
4209		    final_rssi, cck_cca_th, under_region, pd_val);
4210
4211	rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg,
4212			       dig_regs->bmode_cca_rssi_limit_en, enable);
4213	rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
4214			       dig_regs->bmode_rssi_nocca_low_th_mask, pd_val);
4215}
4216
4217void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
4218{
4219	struct rtw89_dig_info *dig = &rtwdev->dig;
4220
4221	dig->bypass_dig = false;
4222	rtw89_phy_dig_para_reset(rtwdev);
4223	rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
4224	rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false);
4225	rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
4226	rtw89_phy_dig_update_para(rtwdev);
4227}
4228
4229#define IGI_RSSI_MIN 10
4230void rtw89_phy_dig(struct rtw89_dev *rtwdev)
4231{
4232	struct rtw89_dig_info *dig = &rtwdev->dig;
4233	bool is_linked = rtwdev->total_sta_assoc > 0;
4234
4235	if (unlikely(dig->bypass_dig)) {
4236		dig->bypass_dig = false;
4237		return;
4238	}
4239
4240	if (!dig->is_linked_pre && is_linked) {
4241		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
4242		rtw89_phy_dig_update_para(rtwdev);
4243	} else if (dig->is_linked_pre && !is_linked) {
4244		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
4245		rtw89_phy_dig_update_para(rtwdev);
4246	}
4247	dig->is_linked_pre = is_linked;
4248
4249	rtw89_phy_dig_igi_offset_by_env(rtwdev);
4250	rtw89_phy_dig_update_rssi_info(rtwdev);
4251
4252	dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
4253			    dig->igi_rssi - IGI_RSSI_MIN : 0;
4254	dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
4255	dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
4256
4257	dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
4258				 dig->dyn_igi_max);
4259
4260	rtw89_debug(rtwdev, RTW89_DBG_DIG,
4261		    "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
4262		    dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
4263		    dig->igi_fa_rssi);
4264
4265	rtw89_phy_dig_config_igi(rtwdev);
4266
4267	rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
4268
4269	if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
4270		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true);
4271	else
4272		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
4273}
4274
4275static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
4276{
4277	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
4278	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
4279	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
4280	struct rtw89_hal *hal = &rtwdev->hal;
4281	bool *done = data;
4282	u8 rssi_a, rssi_b;
4283	u32 candidate;
4284
4285	if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
4286		return;
4287
4288	if (*done)
4289		return;
4290
4291	*done = true;
4292
4293	rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
4294	rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
4295
4296	if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
4297		candidate = RF_A;
4298	else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
4299		candidate = RF_B;
4300	else
4301		return;
4302
4303	if (hal->antenna_tx == candidate)
4304		return;
4305
4306	hal->antenna_tx = candidate;
4307	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
4308
4309	if (hal->antenna_tx == RF_A) {
4310		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
4311		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
4312	} else if (hal->antenna_tx == RF_B) {
4313		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
4314		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
4315	}
4316}
4317
4318void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
4319{
4320	struct rtw89_hal *hal = &rtwdev->hal;
4321	bool done = false;
4322
4323	if (!hal->tx_path_diversity)
4324		return;
4325
4326	ieee80211_iterate_stations_atomic(rtwdev->hw,
4327					  rtw89_phy_tx_path_div_sta_iter,
4328					  &done);
4329}
4330
4331#define ANTDIV_MAIN 0
4332#define ANTDIV_AUX 1
4333
4334static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
4335{
4336	struct rtw89_hal *hal = &rtwdev->hal;
4337	u8 default_ant, optional_ant;
4338
4339	if (!hal->ant_diversity || hal->antenna_tx == 0)
4340		return;
4341
4342	if (hal->antenna_tx == RF_B) {
4343		default_ant = ANTDIV_AUX;
4344		optional_ant = ANTDIV_MAIN;
4345	} else {
4346		default_ant = ANTDIV_MAIN;
4347		optional_ant = ANTDIV_AUX;
4348	}
4349
4350	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
4351			      default_ant, RTW89_PHY_0);
4352	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
4353			      default_ant, RTW89_PHY_0);
4354	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
4355			      optional_ant, RTW89_PHY_0);
4356	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
4357			      default_ant, RTW89_PHY_0);
4358}
4359
4360static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
4361{
4362	struct rtw89_hal *hal = &rtwdev->hal;
4363
4364	hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
4365	hal->antenna_tx = hal->antenna_rx;
4366}
4367
4368static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
4369{
4370	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4371	struct rtw89_hal *hal = &rtwdev->hal;
4372	bool no_change = false;
4373	u8 main_rssi, aux_rssi;
4374	u8 main_evm, aux_evm;
4375	u32 candidate;
4376
4377	antdiv->get_stats = false;
4378	antdiv->training_count = 0;
4379
4380	main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
4381	main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
4382	aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
4383	aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
4384
4385	if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
4386		candidate = RF_A;
4387	else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
4388		candidate = RF_B;
4389	else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
4390		candidate = RF_A;
4391	else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
4392		candidate = RF_B;
4393	else
4394		no_change = true;
4395
4396	if (no_change) {
4397		/* swap back from training antenna to original */
4398		rtw89_phy_swap_hal_antenna(rtwdev);
4399		return;
4400	}
4401
4402	hal->antenna_tx = candidate;
4403	hal->antenna_rx = candidate;
4404}
4405
4406static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
4407{
4408	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4409	u64 state_period;
4410
4411	if (antdiv->training_count % 2 == 0) {
4412		if (antdiv->training_count == 0)
4413			rtw89_phy_antdiv_sts_reset(rtwdev);
4414
4415		antdiv->get_stats = true;
4416		state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
4417	} else {
4418		antdiv->get_stats = false;
4419		state_period = msecs_to_jiffies(ANTDIV_DELAY);
4420
4421		rtw89_phy_swap_hal_antenna(rtwdev);
4422		rtw89_phy_antdiv_set_ant(rtwdev);
4423	}
4424
4425	antdiv->training_count++;
4426	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work,
4427				     state_period);
4428}
4429
4430void rtw89_phy_antdiv_work(struct work_struct *work)
4431{
4432	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
4433						antdiv_work.work);
4434	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4435
4436	mutex_lock(&rtwdev->mutex);
4437
4438	if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
4439		rtw89_phy_antdiv_training_state(rtwdev);
4440	} else {
4441		rtw89_phy_antdiv_decision_state(rtwdev);
4442		rtw89_phy_antdiv_set_ant(rtwdev);
4443	}
4444
4445	mutex_unlock(&rtwdev->mutex);
4446}
4447
4448void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
4449{
4450	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
4451	struct rtw89_hal *hal = &rtwdev->hal;
4452	u8 rssi, rssi_pre;
4453
4454	if (!hal->ant_diversity || hal->ant_diversity_fixed)
4455		return;
4456
4457	rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
4458	rssi_pre = antdiv->rssi_pre;
4459	antdiv->rssi_pre = rssi;
4460	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
4461
4462	if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
4463		return;
4464
4465	antdiv->training_count = 0;
4466	ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0);
4467}
4468
4469static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
4470{
4471	rtw89_phy_ccx_top_setting_init(rtwdev);
4472	rtw89_phy_ifs_clm_setting_init(rtwdev);
4473}
4474
4475void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
4476{
4477	const struct rtw89_chip_info *chip = rtwdev->chip;
4478
4479	rtw89_phy_stat_init(rtwdev);
4480
4481	rtw89_chip_bb_sethw(rtwdev);
4482
4483	rtw89_phy_env_monitor_init(rtwdev);
4484	rtw89_physts_parsing_init(rtwdev);
4485	rtw89_phy_dig_init(rtwdev);
4486	rtw89_phy_cfo_init(rtwdev);
4487	rtw89_phy_ul_tb_info_init(rtwdev);
4488	rtw89_phy_antdiv_init(rtwdev);
4489	rtw89_chip_rfe_gpio(rtwdev);
4490	rtw89_phy_antdiv_set_ant(rtwdev);
4491
4492	rtw89_phy_init_rf_nctl(rtwdev);
4493	rtw89_chip_rfk_init(rtwdev);
4494	rtw89_load_txpwr_table(rtwdev, chip->byr_table);
4495	rtw89_chip_set_txpwr_ctrl(rtwdev);
4496	rtw89_chip_power_trim(rtwdev);
4497	rtw89_chip_cfg_txrx_path(rtwdev);
4498}
4499
4500void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
4501{
4502	const struct rtw89_chip_info *chip = rtwdev->chip;
4503	enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
4504	u8 bss_color;
4505
4506	if (!vif->bss_conf.he_support || !vif->cfg.assoc)
4507		return;
4508
4509	bss_color = vif->bss_conf.he_bss_color.color;
4510
4511	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_VLD0, 0x1,
4512			      phy_idx);
4513	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
4514			      bss_color, phy_idx);
4515	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
4516			      vif->cfg.aid, phy_idx);
4517}
4518
4519static void
4520_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4521{
4522	rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
4523}
4524
4525static void
4526_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4527{
4528	rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
4529}
4530
4531static void
4532_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4533{
4534	rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
4535}
4536
4537static void
4538_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4539{
4540	rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
4541}
4542
4543static void
4544_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
4545{
4546	udelay(def->data);
4547}
4548
4549static void
4550(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
4551	[RTW89_RFK_F_WRF] = _rfk_write_rf,
4552	[RTW89_RFK_F_WM] = _rfk_write32_mask,
4553	[RTW89_RFK_F_WS] = _rfk_write32_set,
4554	[RTW89_RFK_F_WC] = _rfk_write32_clr,
4555	[RTW89_RFK_F_DELAY] = _rfk_delay,
4556};
4557
4558static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
4559
4560void
4561rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
4562{
4563	const struct rtw89_reg5_def *p = tbl->defs;
4564	const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
4565
4566	for (; p < end; p++)
4567		_rfk_handler[p->flag](rtwdev, p);
4568}
4569EXPORT_SYMBOL(rtw89_rfk_parser);
4570
4571#define RTW89_TSSI_FAST_MODE_NUM 4
4572
4573static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
4574	{0xD934, 0xff0000},
4575	{0xD934, 0xff000000},
4576	{0xD938, 0xff},
4577	{0xD934, 0xff00},
4578};
4579
4580static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
4581	{0xD930, 0xff0000},
4582	{0xD930, 0xff000000},
4583	{0xD934, 0xff},
4584	{0xD930, 0xff00},
4585};
4586
4587static
4588void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
4589					   enum rtw89_mac_idx mac_idx,
4590					   enum rtw89_tssi_bandedge_cfg bandedge_cfg,
4591					   u32 val)
4592{
4593	const struct rtw89_reg_def *regs;
4594	u32 reg;
4595	int i;
4596
4597	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
4598		regs = rtw89_tssi_fastmode_regs_flat;
4599	else
4600		regs = rtw89_tssi_fastmode_regs_level;
4601
4602	for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
4603		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
4604		rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
4605	}
4606}
4607
4608static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
4609	{0xD91C, 0xff000000},
4610	{0xD920, 0xff},
4611	{0xD920, 0xff00},
4612	{0xD920, 0xff0000},
4613	{0xD920, 0xff000000},
4614	{0xD924, 0xff},
4615	{0xD924, 0xff00},
4616	{0xD914, 0xff000000},
4617	{0xD918, 0xff},
4618	{0xD918, 0xff00},
4619	{0xD918, 0xff0000},
4620	{0xD918, 0xff000000},
4621	{0xD91C, 0xff},
4622	{0xD91C, 0xff00},
4623	{0xD91C, 0xff0000},
4624};
4625
4626static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
4627	{0xD910, 0xff},
4628	{0xD910, 0xff00},
4629	{0xD910, 0xff0000},
4630	{0xD910, 0xff000000},
4631	{0xD914, 0xff},
4632	{0xD914, 0xff00},
4633	{0xD914, 0xff0000},
4634	{0xD908, 0xff},
4635	{0xD908, 0xff00},
4636	{0xD908, 0xff0000},
4637	{0xD908, 0xff000000},
4638	{0xD90C, 0xff},
4639	{0xD90C, 0xff00},
4640	{0xD90C, 0xff0000},
4641	{0xD90C, 0xff000000},
4642};
4643
4644void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
4645					  enum rtw89_mac_idx mac_idx,
4646					  enum rtw89_tssi_bandedge_cfg bandedge_cfg)
4647{
4648	const struct rtw89_chip_info *chip = rtwdev->chip;
4649	const struct rtw89_reg_def *regs;
4650	const u32 *data;
4651	u32 reg;
4652	int i;
4653
4654	if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
4655		return;
4656
4657	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
4658		regs = rtw89_tssi_bandedge_regs_flat;
4659	else
4660		regs = rtw89_tssi_bandedge_regs_level;
4661
4662	data = chip->tssi_dbw_table->data[bandedge_cfg];
4663
4664	for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
4665		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
4666		rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
4667	}
4668
4669	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx);
4670	rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
4671
4672	rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
4673					      data[RTW89_TSSI_SBW20]);
4674}
4675EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
4676
4677static
4678const u8 rtw89_ch_base_table[16] = {1, 0xff,
4679				    36, 100, 132, 149, 0xff,
4680				    1, 33, 65, 97, 129, 161, 193, 225, 0xff};
4681#define RTW89_CH_BASE_IDX_2G		0
4682#define RTW89_CH_BASE_IDX_5G_FIRST	2
4683#define RTW89_CH_BASE_IDX_5G_LAST	5
4684#define RTW89_CH_BASE_IDX_6G_FIRST	7
4685#define RTW89_CH_BASE_IDX_6G_LAST	14
4686
4687#define RTW89_CH_BASE_IDX_MASK		GENMASK(7, 4)
4688#define RTW89_CH_OFFSET_MASK		GENMASK(3, 0)
4689
4690u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
4691{
4692	u8 chan_idx;
4693	u8 last, first;
4694	u8 idx;
4695
4696	switch (band) {
4697	case RTW89_BAND_2G:
4698		chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) |
4699			   FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch);
4700		return chan_idx;
4701	case RTW89_BAND_5G:
4702		first = RTW89_CH_BASE_IDX_5G_FIRST;
4703		last = RTW89_CH_BASE_IDX_5G_LAST;
4704		break;
4705	case RTW89_BAND_6G:
4706		first = RTW89_CH_BASE_IDX_6G_FIRST;
4707		last = RTW89_CH_BASE_IDX_6G_LAST;
4708		break;
4709	default:
4710		rtw89_warn(rtwdev, "Unsupported band %d\n", band);
4711		return 0;
4712	}
4713
4714	for (idx = last; idx >= first; idx--)
4715		if (central_ch >= rtw89_ch_base_table[idx])
4716			break;
4717
4718	if (idx < first) {
4719		rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
4720		return 0;
4721	}
4722
4723	chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) |
4724		   FIELD_PREP(RTW89_CH_OFFSET_MASK,
4725			      (central_ch - rtw89_ch_base_table[idx]) >> 1);
4726	return chan_idx;
4727}
4728EXPORT_SYMBOL(rtw89_encode_chan_idx);
4729
4730void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
4731			   u8 *ch, enum nl80211_band *band)
4732{
4733	u8 idx, offset;
4734
4735	idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx);
4736	offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx);
4737
4738	if (idx == RTW89_CH_BASE_IDX_2G) {
4739		*band = NL80211_BAND_2GHZ;
4740		*ch = offset;
4741		return;
4742	}
4743
4744	*band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
4745	*ch = rtw89_ch_base_table[idx] + (offset << 1);
4746}
4747EXPORT_SYMBOL(rtw89_decode_chan_idx);
4748
4749#define EDCCA_DEFAULT 249
4750void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan)
4751{
4752	u32 reg = rtwdev->chip->edcca_lvl_reg;
4753	struct rtw89_hal *hal = &rtwdev->hal;
4754	u32 val;
4755
4756	if (scan) {
4757		hal->edcca_bak = rtw89_phy_read32(rtwdev, reg);
4758		val = hal->edcca_bak;
4759		u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_A_MSK);
4760		u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_P_MSK);
4761		u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_PPDU_LVL_MSK);
4762		rtw89_phy_write32(rtwdev, reg, val);
4763	} else {
4764		rtw89_phy_write32(rtwdev, reg, hal->edcca_bak);
4765	}
4766}
4767
4768static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
4769	.setting_addr = R_CCX,
4770	.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
4771	.measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
4772	.trig_opt_mask = B_CCX_TRIG_OPT_MSK,
4773	.en_mask = B_CCX_EN_MSK,
4774	.ifs_cnt_addr = R_IFS_COUNTER,
4775	.ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
4776	.ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
4777	.ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
4778	.ifs_collect_en_mask = B_IFS_COLLECT_EN,
4779	.ifs_t1_addr = R_IFS_T1,
4780	.ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
4781	.ifs_t1_en_mask = B_IFS_T1_EN_MSK,
4782	.ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
4783	.ifs_t2_addr = R_IFS_T2,
4784	.ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
4785	.ifs_t2_en_mask = B_IFS_T2_EN_MSK,
4786	.ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
4787	.ifs_t3_addr = R_IFS_T3,
4788	.ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
4789	.ifs_t3_en_mask = B_IFS_T3_EN_MSK,
4790	.ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
4791	.ifs_t4_addr = R_IFS_T4,
4792	.ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
4793	.ifs_t4_en_mask = B_IFS_T4_EN_MSK,
4794	.ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
4795	.ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT,
4796	.ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
4797	.ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
4798	.ifs_clm_cca_addr = R_IFS_CLM_CCA,
4799	.ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
4800	.ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
4801	.ifs_clm_fa_addr = R_IFS_CLM_FA,
4802	.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
4803	.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
4804	.ifs_his_addr = R_IFS_HIS,
4805	.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
4806	.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
4807	.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
4808	.ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
4809	.ifs_avg_l_addr = R_IFS_AVG_L,
4810	.ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
4811	.ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
4812	.ifs_avg_h_addr = R_IFS_AVG_H,
4813	.ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
4814	.ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
4815	.ifs_cca_l_addr = R_IFS_CCA_L,
4816	.ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
4817	.ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
4818	.ifs_cca_h_addr = R_IFS_CCA_H,
4819	.ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
4820	.ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
4821	.ifs_total_addr = R_IFSCNT,
4822	.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
4823	.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
4824};
4825
4826static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
4827	.setting_addr = R_PLCP_HISTOGRAM,
4828	.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
4829	.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
4830};
4831
4832const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
4833	.cr_base = 0x10000,
4834	.ccx = &rtw89_ccx_regs_ax,
4835	.physts = &rtw89_physts_regs_ax,
4836};
4837EXPORT_SYMBOL(rtw89_phy_gen_ax);
4838