1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2019-2022  Realtek Corporation
3 */
4
5#include "coex.h"
6#include "debug.h"
7#include "phy.h"
8#include "reg.h"
9#include "rtw8852c.h"
10#include "rtw8852c_rfk.h"
11#include "rtw8852c_rfk_table.h"
12#include "rtw8852c_table.h"
13
14struct rxck_def {
15	u32 ctl;
16	u32 en;
17	u32 bw0;
18	u32 bw1;
19	u32 mul;
20	u32 lp;
21};
22
23#define _TSSI_DE_MASK GENMASK(21, 12)
24static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
25static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
26static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838};
27static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840};
28static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848};
29static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850};
30static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828};
31static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830};
32
33static const u32 rtw8852c_backup_bb_regs[] = {
34	0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x8220, 0xc1d4, 0xc1d8, 0xc1e8
35};
36
37static const u32 rtw8852c_backup_rf_regs[] = {
38	0xdf, 0x5f, 0x8f, 0x97, 0xa3, 0x5, 0x10005
39};
40
41#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
42#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs)
43
44#define RXK_GROUP_NR 4
45static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316};
46static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00};
47static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318};
48static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00};
49static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360};
50static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3};
51
52#define TXK_GROUP_NR 3
53static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
54static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7};
55static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
56static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
57static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
58static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7};
59static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e};
60static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12};
61static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0};
62static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6};
63static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e};
64static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12};
65
66static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
67	{0x8190, 0x8194, 0x8198, 0x81a4},
68	{0x81a8, 0x81c4, 0x81c8, 0x81e8},
69};
70
71static const u8 _dck_addr_bs[RF_PATH_NUM_8852C] = {0x0, 0x10};
72static const u8 _dck_addr[RF_PATH_NUM_8852C] = {0xc, 0x1c};
73
74static const struct rxck_def _ck480M = {0x8, 0x2, 0x3, 0xf, 0x0, 0x9};
75static const struct rxck_def _ck960M = {0x8, 0x2, 0x2, 0x8, 0x0, 0x9};
76static const struct rxck_def _ck1920M = {0x8, 0x0, 0x2, 0x4, 0x6, 0x9};
77
78static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
79{
80	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,  PHY%d\n",
81		    rtwdev->dbcc_en, phy_idx);
82
83	if (!rtwdev->dbcc_en)
84		return RF_AB;
85
86	if (phy_idx == RTW89_PHY_0)
87		return RF_A;
88	else
89		return RF_B;
90}
91
92static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
93{
94	u32 i;
95
96	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
97		backup_bb_reg_val[i] =
98			rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
99					      MASKDWORD);
100		rtw89_debug(rtwdev, RTW89_DBG_RFK,
101			    "[IQK]backup bb reg : %x, value =%x\n",
102			    rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
103	}
104}
105
106static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
107			       u8 rf_path)
108{
109	u32 i;
110
111	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
112		backup_rf_reg_val[i] =
113			rtw89_read_rf(rtwdev, rf_path,
114				      rtw8852c_backup_rf_regs[i], RFREG_MASK);
115		rtw89_debug(rtwdev, RTW89_DBG_RFK,
116			    "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
117			    rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
118	}
119}
120
121static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
122{
123	u32 i;
124
125	for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
126		rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i],
127				       MASKDWORD, backup_bb_reg_val[i]);
128		rtw89_debug(rtwdev, RTW89_DBG_RFK,
129			    "[IQK]restore bb reg : %x, value =%x\n",
130			    rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]);
131	}
132}
133
134static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
135				u8 rf_path)
136{
137	u32 i;
138
139	for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
140		rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i],
141			       RFREG_MASK, backup_rf_reg_val[i]);
142
143		rtw89_debug(rtwdev, RTW89_DBG_RFK,
144			    "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
145			    rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]);
146	}
147}
148
149static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
150{
151	u8 path;
152	u32 rf_mode;
153	int ret;
154
155	for (path = 0; path < RF_PATH_MAX; path++) {
156		if (!(kpath & BIT(path)))
157			continue;
158
159		ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
160					       2, 5000, false, rtwdev, path, 0x00,
161					       RR_MOD_MASK);
162		rtw89_debug(rtwdev, RTW89_DBG_RFK,
163			    "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
164			    path, ret);
165	}
166}
167
168static void _dack_dump(struct rtw89_dev *rtwdev)
169{
170	struct rtw89_dack_info *dack = &rtwdev->dack;
171	u8 i;
172	u8 t;
173
174	rtw89_debug(rtwdev, RTW89_DBG_RFK,
175		    "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
176		    dack->addck_d[0][0], dack->addck_d[0][1]);
177	rtw89_debug(rtwdev, RTW89_DBG_RFK,
178		    "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
179		    dack->addck_d[1][0], dack->addck_d[1][1]);
180	rtw89_debug(rtwdev, RTW89_DBG_RFK,
181		    "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
182		    dack->dadck_d[0][0], dack->dadck_d[0][1]);
183	rtw89_debug(rtwdev, RTW89_DBG_RFK,
184		    "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
185		    dack->dadck_d[1][0], dack->dadck_d[1][1]);
186
187	rtw89_debug(rtwdev, RTW89_DBG_RFK,
188		    "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
189		    dack->biask_d[0][0], dack->biask_d[0][1]);
190	rtw89_debug(rtwdev, RTW89_DBG_RFK,
191		    "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
192		    dack->biask_d[1][0], dack->biask_d[1][1]);
193
194	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
195	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
196		t = dack->msbk_d[0][0][i];
197		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
198	}
199	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
200	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
201		t = dack->msbk_d[0][1][i];
202		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
203	}
204	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
205	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
206		t = dack->msbk_d[1][0][i];
207		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
208	}
209	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
210	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
211		t = dack->msbk_d[1][1][i];
212		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
213	}
214}
215
216static void _addck_backup(struct rtw89_dev *rtwdev)
217{
218	struct rtw89_dack_info *dack = &rtwdev->dack;
219
220	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
221	dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
222						    B_ADDCKR0_A0);
223	dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0,
224						    B_ADDCKR0_A1);
225
226	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
227	dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
228						    B_ADDCKR1_A0);
229	dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1,
230						    B_ADDCKR1_A1);
231}
232
233static void _addck_reload(struct rtw89_dev *rtwdev)
234{
235	struct rtw89_dack_info *dack = &rtwdev->dack;
236
237	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1,
238			       dack->addck_d[0][0]);
239	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0,
240			       dack->addck_d[0][1]);
241	rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
242	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1,
243			       dack->addck_d[1][0]);
244	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0,
245			       dack->addck_d[1][1]);
246	rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
247}
248
249static void _dack_backup_s0(struct rtw89_dev *rtwdev)
250{
251	struct rtw89_dack_info *dack = &rtwdev->dack;
252	u8 i;
253
254	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
255	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
256		rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
257		dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev,
258							      R_DACK_S0P2,
259							      B_DACK_S0M0);
260		rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
261		dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev,
262							      R_DACK_S0P3,
263							      B_DACK_S0M1);
264	}
265	dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00,
266						    B_DACK_BIAS00);
267	dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01,
268						    B_DACK_BIAS01);
269	dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00,
270						    B_DACK_DADCK00);
271	dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01,
272						    B_DACK_DADCK01);
273}
274
275static void _dack_backup_s1(struct rtw89_dev *rtwdev)
276{
277	struct rtw89_dack_info *dack = &rtwdev->dack;
278	u8 i;
279
280	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
281	for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
282		rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
283		dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev,
284							      R_DACK10S,
285							      B_DACK10S);
286		rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
287		dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev,
288							      R_DACK11S,
289							      B_DACK11S);
290	}
291	dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10,
292						    B_DACK_BIAS10);
293	dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11,
294						    B_DACK_BIAS11);
295	dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10,
296						    B_DACK_DADCK10);
297	dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11,
298						    B_DACK_DADCK11);
299}
300
301static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
302				 enum rtw89_rf_path path, u8 index)
303{
304	struct rtw89_dack_info *dack = &rtwdev->dack;
305	u32 idx_offset, path_offset;
306	u32 val32, offset, addr;
307	u8 i;
308
309	idx_offset = (index == 0 ? 0 : 0x14);
310	path_offset = (path == RF_PATH_A ? 0 : 0x28);
311	offset = idx_offset + path_offset;
312
313	rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl);
314
315	/* msbk_d: 15/14/13/12 */
316	val32 = 0x0;
317	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
318		val32 |= dack->msbk_d[path][index][i + 12] << (i * 8);
319	addr = 0xc200 + offset;
320	rtw89_phy_write32(rtwdev, addr, val32);
321	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
322		    rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
323
324	/* msbk_d: 11/10/9/8 */
325	val32 = 0x0;
326	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
327		val32 |= dack->msbk_d[path][index][i + 8] << (i * 8);
328	addr = 0xc204 + offset;
329	rtw89_phy_write32(rtwdev, addr, val32);
330	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
331		    rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
332
333	/* msbk_d: 7/6/5/4 */
334	val32 = 0x0;
335	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
336		val32 |= dack->msbk_d[path][index][i + 4] << (i * 8);
337	addr = 0xc208 + offset;
338	rtw89_phy_write32(rtwdev, addr, val32);
339	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
340		    rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
341
342	/* msbk_d: 3/2/1/0 */
343	val32 = 0x0;
344	for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
345		val32 |= dack->msbk_d[path][index][i] << (i * 8);
346	addr = 0xc20c + offset;
347	rtw89_phy_write32(rtwdev, addr, val32);
348	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr,
349		    rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD));
350
351	/* dadak_d/biask_d */
352	val32 = (dack->biask_d[path][index] << 22) |
353		(dack->dadck_d[path][index] << 14);
354	addr = 0xc210 + offset;
355	rtw89_phy_write32(rtwdev, addr, val32);
356	rtw89_phy_write32_set(rtwdev, addr, BIT(0));
357}
358
359static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
360{
361	u8 i;
362
363	for (i = 0; i < 2; i++)
364		_dack_reload_by_path(rtwdev, path, i);
365}
366
367static void _addck(struct rtw89_dev *rtwdev)
368{
369	struct rtw89_dack_info *dack = &rtwdev->dack;
370	u32 val;
371	int ret;
372
373	/* S0 */
374	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
375	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
376	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
377	fsleep(1);
378	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
379
380	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
381				       1, 10000, false, rtwdev, 0xc0fc, BIT(0));
382	if (ret) {
383		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
384		dack->addck_timeout[0] = true;
385	}
386
387	rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
388
389	/* S1 */
390	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1);
391	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1);
392	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0);
393	udelay(1);
394	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
395
396	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
397				       1, 10000, false, rtwdev, 0xc1fc, BIT(0));
398	if (ret) {
399		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
400		dack->addck_timeout[0] = true;
401	}
402	rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0);
403}
404
405static void _dack_reset(struct rtw89_dev *rtwdev, u8 path)
406{
407	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
408				 &rtw8852c_dack_reset_defs_a_tbl,
409				 &rtw8852c_dack_reset_defs_b_tbl);
410}
411
412enum adc_ck {
413	ADC_NA = 0,
414	ADC_480M = 1,
415	ADC_960M = 2,
416	ADC_1920M = 3,
417};
418
419enum dac_ck {
420	DAC_40M = 0,
421	DAC_80M = 1,
422	DAC_120M = 2,
423	DAC_160M = 3,
424	DAC_240M = 4,
425	DAC_320M = 5,
426	DAC_480M = 6,
427	DAC_960M = 7,
428};
429
430enum rf_mode {
431	RF_SHUT_DOWN = 0x0,
432	RF_STANDBY = 0x1,
433	RF_TX = 0x2,
434	RF_RX = 0x3,
435	RF_TXIQK = 0x4,
436	RF_DPK = 0x5,
437	RF_RXK1 = 0x6,
438	RF_RXK2 = 0x7,
439};
440
441static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
442				enum dac_ck ck)
443{
444	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
445
446	if (!force)
447		return;
448
449	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
450	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
451}
452
453static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
454				enum adc_ck ck)
455{
456	const struct rxck_def *def;
457
458	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
459
460	if (!force)
461		return;
462
463	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
464	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
465
466	switch (ck) {
467	case ADC_480M:
468		def = &_ck480M;
469		break;
470	case ADC_960M:
471		def = &_ck960M;
472		break;
473	case ADC_1920M:
474	default:
475		def = &_ck1920M;
476		break;
477	}
478
479	rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_CTL, def->ctl);
480	rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_EN, def->en);
481	rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, def->bw0);
482	rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, def->bw1);
483	rtw89_phy_write32_mask(rtwdev, R_DRCK | (path << 8), B_DRCK_MUL, def->mul);
484	rtw89_phy_write32_mask(rtwdev, R_ADCMOD | (path << 8), B_ADCMOD_LP, def->lp);
485}
486
487static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
488{
489	if (s0) {
490		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
491		    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
492		    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
493		    rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
494			return false;
495	} else {
496		if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
497		    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
498		    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
499		    rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
500			return false;
501	}
502
503	return true;
504}
505
506static void _dack_s0(struct rtw89_dev *rtwdev)
507{
508	struct rtw89_dack_info *dack = &rtwdev->dack;
509	bool done;
510	int ret;
511
512	rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
513	rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl);
514
515	_dack_reset(rtwdev, RF_PATH_A);
516
517	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
518	ret = read_poll_timeout_atomic(_check_dack_done, done, done,
519				       1, 10000, false, rtwdev, true);
520	if (ret) {
521		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
522		dack->msbk_timeout[0] = true;
523	}
524	rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
525	rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
526	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
527
528	_dack_backup_s0(rtwdev);
529	_dack_reload(rtwdev, RF_PATH_A);
530	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
531}
532
533static void _dack_s1(struct rtw89_dev *rtwdev)
534{
535	struct rtw89_dack_info *dack = &rtwdev->dack;
536	bool done;
537	int ret;
538
539	rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
540	rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl);
541
542	_dack_reset(rtwdev, RF_PATH_B);
543
544	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
545	ret = read_poll_timeout_atomic(_check_dack_done, done, done,
546				       1, 10000, false, rtwdev, false);
547	if (ret) {
548		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
549		dack->msbk_timeout[0] = true;
550	}
551	rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
552	rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
553	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
554
555	_dack_backup_s1(rtwdev);
556	_dack_reload(rtwdev, RF_PATH_B);
557	rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
558}
559
560static void _dack(struct rtw89_dev *rtwdev)
561{
562	_dack_s0(rtwdev);
563	_dack_s1(rtwdev);
564}
565
566static void _drck(struct rtw89_dev *rtwdev)
567{
568	u32 val;
569	int ret;
570
571	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
572	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
573				       1, 10000, false, rtwdev, 0xc0c8, BIT(3));
574	if (ret)
575		rtw89_debug(rtwdev, RTW89_DBG_RFK,  "[DACK]DRCK timeout\n");
576
577	rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl);
578
579	val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES);
580	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
581	rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val);
582	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
583		    rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
584}
585
586static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
587{
588	struct rtw89_dack_info *dack = &rtwdev->dack;
589	u32 rf0_0, rf1_0;
590	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
591
592	dack->dack_done = false;
593	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
594	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
595	rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
596	rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
597	_drck(rtwdev);
598
599	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
600	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
601	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
602	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
603	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
604	_addck(rtwdev);
605	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
606
607	_addck_backup(rtwdev);
608	_addck_reload(rtwdev);
609	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
610	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
611	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
612	_dack(rtwdev);
613	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
614
615	_dack_dump(rtwdev);
616	dack->dack_done = true;
617	rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
618	rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
619	rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
620	rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
621	dack->dack_cnt++;
622	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
623}
624
625#define RTW8852C_NCTL_VER 0xd
626#define RTW8852C_IQK_VER 0x2a
627#define RTW8852C_IQK_SS 2
628#define RTW8852C_IQK_THR_REK 8
629#define RTW8852C_IQK_CFIR_GROUP_NR 4
630
631enum rtw8852c_iqk_type {
632	ID_TXAGC,
633	ID_G_FLOK_COARSE,
634	ID_A_FLOK_COARSE,
635	ID_G_FLOK_FINE,
636	ID_A_FLOK_FINE,
637	ID_FLOK_VBUFFER,
638	ID_TXK,
639	ID_RXAGC,
640	ID_RXK,
641	ID_NBTXK,
642	ID_NBRXK,
643};
644
645static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac)
646{
647	if (path == RF_PATH_A)
648		rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac);
649	else
650		rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac);
651}
652
653static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
654{
655	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
656
657	if (path == RF_PATH_A)
658		rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
659	else
660		rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202);
661
662	switch (iqk_info->iqk_bw[path]) {
663	case RTW89_CHANNEL_WIDTH_20:
664	case RTW89_CHANNEL_WIDTH_40:
665		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
666		rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
667		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
668		rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
669		rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
670		break;
671	case RTW89_CHANNEL_WIDTH_80:
672		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
673		rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
674		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
675		rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
676		rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
677	break;
678	case RTW89_CHANNEL_WIDTH_160:
679		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
680		rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
681		rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
682		rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
683		rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
684		break;
685	default:
686		break;
687	}
688
689	rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl);
690
691	if (path == RF_PATH_A)
692		rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101);
693	else
694		rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202);
695}
696
697static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
698{
699	u32 tmp;
700	u32 val;
701	int ret;
702
703	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
704				       1, 8200, false, rtwdev, 0xbff8, MASKBYTE0);
705	if (ret)
706		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
707
708	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
709	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
710	tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
711	rtw89_debug(rtwdev, RTW89_DBG_RFK,
712		    "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
713
714	return false;
715}
716
717static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
718			  enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
719{
720	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
721	u32 addr_rfc_ctl = R_UPD_CLK + (path << 13);
722	u32 iqk_cmd;
723	bool fail;
724
725	switch (ktype) {
726	case ID_TXAGC:
727		iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
728		break;
729	case ID_A_FLOK_COARSE:
730		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
731		iqk_cmd = 0x008 | (1 << (4 + path));
732		break;
733	case ID_G_FLOK_COARSE:
734		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
735		iqk_cmd = 0x108 | (1 << (4 + path));
736		break;
737	case ID_A_FLOK_FINE:
738		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
739		iqk_cmd = 0x508 | (1 << (4 + path));
740		break;
741	case ID_G_FLOK_FINE:
742		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
743		iqk_cmd = 0x208 | (1 << (4 + path));
744		break;
745	case ID_FLOK_VBUFFER:
746		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
747		iqk_cmd = 0x308 | (1 << (4 + path));
748		break;
749	case ID_TXK:
750		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
751		iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8);
752		break;
753	case ID_RXAGC:
754		iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
755		break;
756	case ID_RXK:
757		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
758		iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8);
759		break;
760	case ID_NBTXK:
761		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
762		iqk_cmd = 0x408 | (1 << (4 + path));
763		break;
764	case ID_NBRXK:
765		rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1);
766		iqk_cmd = 0x608 | (1 << (4 + path));
767		break;
768	default:
769		return false;
770	}
771
772	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
773	fsleep(15);
774	fail = _iqk_check_cal(rtwdev, path, ktype);
775	rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0);
776
777	return fail;
778}
779
780static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
781			   enum rtw89_phy_idx phy_idx, u8 path)
782{
783	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
784	bool fail;
785	u32 tmp;
786	u32 bkrf0;
787	u8 gp;
788
789	bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
790	if (path == RF_PATH_B) {
791		rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
792		tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
793		rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
794		tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
795		rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
796	}
797
798	switch (iqk_info->iqk_band[path]) {
799	case RTW89_BAND_2G:
800	default:
801		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
802		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
803		rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
804		break;
805	case RTW89_BAND_5G:
806		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
807		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
808		rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
809		break;
810	case RTW89_BAND_6G:
811		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
812		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
813		rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
814		break;
815	}
816
817	fsleep(10);
818
819	for (gp = 0; gp < RXK_GROUP_NR; gp++) {
820		switch (iqk_info->iqk_band[path]) {
821		case RTW89_BAND_2G:
822		default:
823			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
824				       _rxk_g_idxrxgain[gp]);
825			rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF,
826				       _rxk_g_idxattc2[gp]);
827			break;
828		case RTW89_BAND_5G:
829			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
830				       _rxk_a_idxrxgain[gp]);
831			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
832				       _rxk_a_idxattc2[gp]);
833			break;
834		case RTW89_BAND_6G:
835			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG,
836				       _rxk_a6_idxrxgain[gp]);
837			rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT,
838				       _rxk_a6_idxattc2[gp]);
839			break;
840		}
841		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
842				       B_CFIR_LUT_SEL, 0x1);
843		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
844				       B_CFIR_LUT_SET, 0x0);
845		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
846				       B_CFIR_LUT_GP_V1, gp);
847		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
848	}
849
850	if (path == RF_PATH_B)
851		rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
852	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
853
854	if (fail) {
855		iqk_info->nb_rxcfir[path] = 0x40000002;
856		iqk_info->is_wb_rxiqk[path] = false;
857	} else {
858		iqk_info->nb_rxcfir[path] = 0x40000000;
859		iqk_info->is_wb_rxiqk[path] = true;
860	}
861
862	return false;
863}
864
865static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
866		       enum rtw89_phy_idx phy_idx, u8 path)
867{
868	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
869	bool fail;
870	u32 tmp;
871	u32 bkrf0;
872	u8 gp = 0x2;
873
874	bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW);
875	if (path == RF_PATH_B) {
876		rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3);
877		tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD);
878		rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp);
879		tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX);
880		rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp);
881	}
882
883	switch (iqk_info->iqk_band[path]) {
884	case RTW89_BAND_2G:
885	default:
886		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
887		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
888		rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9);
889		break;
890	case RTW89_BAND_5G:
891		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
892		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
893		rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8);
894		break;
895	case RTW89_BAND_6G:
896		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
897		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0);
898		rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9);
899		break;
900	}
901
902	fsleep(10);
903
904	switch (iqk_info->iqk_band[path]) {
905	case RTW89_BAND_2G:
906	default:
907		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]);
908		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]);
909		break;
910	case RTW89_BAND_5G:
911		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]);
912		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]);
913		break;
914	case RTW89_BAND_6G:
915		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]);
916		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]);
917		break;
918	}
919
920	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
921	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0);
922	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp);
923	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
924
925	if (path == RF_PATH_B)
926		rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0);
927
928	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0);
929
930	if (fail)
931		iqk_info->nb_rxcfir[path] =
932			rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
933					      MASKDWORD) | 0x2;
934	else
935		iqk_info->nb_rxcfir[path] = 0x40000002;
936
937	iqk_info->is_wb_rxiqk[path] = false;
938	return fail;
939}
940
941static bool _txk_group_sel(struct rtw89_dev *rtwdev,
942			   enum rtw89_phy_idx phy_idx, u8 path)
943{
944	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
945	bool fail;
946	u8 gp;
947
948	for (gp = 0; gp < TXK_GROUP_NR; gp++) {
949		switch (iqk_info->iqk_band[path]) {
950		case RTW89_BAND_2G:
951			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
952				       _txk_g_power_range[gp]);
953			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
954				       _txk_g_track_range[gp]);
955			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
956				       _txk_g_gain_bb[gp]);
957			rtw89_phy_write32_mask(rtwdev,
958					       R_KIP_IQP + (path << 8),
959					       MASKDWORD, _txk_g_itqt[gp]);
960			break;
961		case RTW89_BAND_5G:
962			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
963				       _txk_a_power_range[gp]);
964			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
965				       _txk_a_track_range[gp]);
966			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
967				       _txk_a_gain_bb[gp]);
968			rtw89_phy_write32_mask(rtwdev,
969					       R_KIP_IQP + (path << 8),
970					       MASKDWORD, _txk_a_itqt[gp]);
971			break;
972		case RTW89_BAND_6G:
973			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
974				       _txk_a6_power_range[gp]);
975			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
976				       _txk_a6_track_range[gp]);
977			rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
978				       _txk_a6_gain_bb[gp]);
979			rtw89_phy_write32_mask(rtwdev,
980					       R_KIP_IQP + (path << 8),
981					       MASKDWORD, _txk_a6_itqt[gp]);
982			break;
983		default:
984			break;
985		}
986		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
987				       B_CFIR_LUT_SEL, 0x1);
988		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
989				       B_CFIR_LUT_SET, 0x1);
990		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
991				       B_CFIR_LUT_G2, 0x0);
992		rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
993				       B_CFIR_LUT_GP, gp + 1);
994		rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
995		rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
996		fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
997	}
998
999	if (fail) {
1000		iqk_info->nb_txcfir[path] = 0x40000002;
1001		iqk_info->is_wb_txiqk[path] = false;
1002	} else {
1003		iqk_info->nb_txcfir[path] = 0x40000000;
1004		iqk_info->is_wb_txiqk[path] = true;
1005	}
1006
1007	return fail;
1008}
1009
1010static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
1011		       enum rtw89_phy_idx phy_idx, u8 path)
1012{
1013	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1014	bool fail;
1015	u8 gp = 0x2;
1016
1017	switch (iqk_info->iqk_band[path]) {
1018	case RTW89_BAND_2G:
1019		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]);
1020		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]);
1021		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]);
1022		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1023				       MASKDWORD, _txk_g_itqt[gp]);
1024		break;
1025	case RTW89_BAND_5G:
1026		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]);
1027		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]);
1028		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]);
1029		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1030				       MASKDWORD, _txk_a_itqt[gp]);
1031		break;
1032	case RTW89_BAND_6G:
1033		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]);
1034		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]);
1035		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]);
1036		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1037				       MASKDWORD, _txk_a6_itqt[gp]);
1038	break;
1039	default:
1040		break;
1041	}
1042
1043	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
1044	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1);
1045	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0);
1046	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1);
1047	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b);
1048	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1049	fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
1050
1051	if (!fail)
1052		iqk_info->nb_txcfir[path] =
1053			rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
1054					      MASKDWORD) | 0x2;
1055	else
1056		iqk_info->nb_txcfir[path] = 0x40000002;
1057
1058	iqk_info->is_wb_txiqk[path] = false;
1059
1060	return fail;
1061}
1062
1063static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
1064{
1065	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
1066	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1067	u8 idx = rfk_mcc->table_idx;
1068	bool is_fail1,  is_fail2;
1069	u32 val;
1070	u32 core_i;
1071	u32 core_q;
1072	u32 vbuff_i;
1073	u32 vbuff_q;
1074
1075	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1076	val = rtw89_read_rf(rtwdev,  path, RR_TXMO, RFREG_MASK);
1077	core_i = FIELD_GET(RR_TXMO_COI, val);
1078	core_q = FIELD_GET(RR_TXMO_COQ, val);
1079
1080	if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
1081		is_fail1 = true;
1082	else
1083		is_fail1 = false;
1084
1085	iqk_info->lok_idac[idx][path] = val;
1086
1087	val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK);
1088	vbuff_i = FIELD_GET(RR_LOKVB_COI, val);
1089	vbuff_q = FIELD_GET(RR_LOKVB_COQ, val);
1090
1091	if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d)
1092		is_fail2 = true;
1093	else
1094		is_fail2 = false;
1095
1096	iqk_info->lok_vbuf[idx][path] = val;
1097
1098	return is_fail1 || is_fail2;
1099}
1100
1101static bool _iqk_lok(struct rtw89_dev *rtwdev,
1102		     enum rtw89_phy_idx phy_idx, u8 path)
1103{
1104	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1105	u8 tmp_id = 0x0;
1106	bool fail = false;
1107	bool tmp = false;
1108
1109	/* Step 0: Init RF gain & tone idx= 8.25Mhz */
1110	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ);
1111
1112	/* Step 1  START: _lok_coarse_fine_wi_swap */
1113	switch (iqk_info->iqk_band[path]) {
1114	case RTW89_BAND_2G:
1115		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1116		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1117				       B_KIP_IQP_IQSW, 0x9);
1118		tmp_id = ID_G_FLOK_COARSE;
1119		break;
1120	case RTW89_BAND_5G:
1121		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1122		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1123				       B_KIP_IQP_IQSW, 0x9);
1124		tmp_id = ID_A_FLOK_COARSE;
1125		break;
1126	case RTW89_BAND_6G:
1127		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1128		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1129				       B_KIP_IQP_IQSW, 0x9);
1130		tmp_id = ID_A_FLOK_COARSE;
1131		break;
1132	default:
1133		break;
1134	}
1135	tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
1136	iqk_info->lok_cor_fail[0][path] = tmp;
1137
1138	/* Step 2 */
1139	switch (iqk_info->iqk_band[path]) {
1140	case RTW89_BAND_2G:
1141		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1142		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1143				       B_KIP_IQP_IQSW, 0x1b);
1144		break;
1145	case RTW89_BAND_5G:
1146		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1147		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1148				       B_KIP_IQP_IQSW, 0x1b);
1149		break;
1150	case RTW89_BAND_6G:
1151		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1152		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1153				       B_KIP_IQP_IQSW, 0x1b);
1154		break;
1155	default:
1156		break;
1157	}
1158	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1159
1160	/* Step 3 */
1161	switch (iqk_info->iqk_band[path]) {
1162	case RTW89_BAND_2G:
1163		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1164		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1165				       B_KIP_IQP_IQSW, 0x9);
1166		tmp_id = ID_G_FLOK_FINE;
1167		break;
1168	case RTW89_BAND_5G:
1169		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1170		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1171				       B_KIP_IQP_IQSW, 0x9);
1172		tmp_id = ID_A_FLOK_FINE;
1173		break;
1174	case RTW89_BAND_6G:
1175		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6);
1176		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1177				       B_KIP_IQP_IQSW, 0x9);
1178		tmp_id = ID_A_FLOK_FINE;
1179		break;
1180	default:
1181		break;
1182	}
1183	tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id);
1184	iqk_info->lok_fin_fail[0][path] = tmp;
1185
1186	/* Step 4 large rf gain */
1187	switch (iqk_info->iqk_band[path]) {
1188	case RTW89_BAND_2G:
1189	default:
1190		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1191		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1192				       B_KIP_IQP_IQSW, 0x1b);
1193		break;
1194	case RTW89_BAND_5G:
1195		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1196		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1197				       B_KIP_IQP_IQSW, 0x1b);
1198		break;
1199	case RTW89_BAND_6G:
1200		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
1201		rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
1202				       B_KIP_IQP_IQSW, 0x1b);
1203		break;
1204	}
1205	tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER);
1206	fail = _lok_finetune_check(rtwdev, path);
1207
1208	return fail;
1209}
1210
1211static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
1212{
1213	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1214
1215	switch (iqk_info->iqk_band[path]) {
1216	case RTW89_BAND_2G:
1217	default:
1218		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
1219		rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
1220		rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
1221		rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1222		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1223		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1224		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1225			       0x403e0 | iqk_info->syn1to2);
1226		fsleep(10);
1227		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1228		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1229		break;
1230	case RTW89_BAND_5G:
1231		rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
1232		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
1233		rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1234		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1235		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1236		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1237			       0x403e0 | iqk_info->syn1to2);
1238		fsleep(10);
1239		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1240		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1241		break;
1242	case RTW89_BAND_6G:
1243		rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0);
1244		rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1);
1245		rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf);
1246		rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
1247		rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
1248		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
1249			       0x403e0  | iqk_info->syn1to2);
1250		fsleep(10);
1251		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
1252		rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6);
1253		break;
1254	}
1255}
1256
1257static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1258			  u8 path)
1259{
1260	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1261	u32 tmp;
1262	bool flag;
1263
1264	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %lu\n", path,
1265		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]));
1266	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
1267		    iqk_info->lok_cor_fail[0][path]);
1268	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
1269		    iqk_info->lok_fin_fail[0][path]);
1270	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
1271		    iqk_info->iqk_tx_fail[0][path]);
1272	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
1273		    iqk_info->iqk_rx_fail[0][path]);
1274
1275	flag = iqk_info->lok_cor_fail[0][path];
1276	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag);
1277	flag = iqk_info->lok_fin_fail[0][path];
1278	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag);
1279	flag = iqk_info->iqk_tx_fail[0][path];
1280	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag);
1281	flag = iqk_info->iqk_rx_fail[0][path];
1282	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag);
1283
1284	tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
1285	iqk_info->bp_iqkenable[path] = tmp;
1286	tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
1287	iqk_info->bp_txkresult[path] = tmp;
1288	tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
1289	iqk_info->bp_rxkresult[path] = tmp;
1290
1291	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
1292			       iqk_info->iqk_times);
1293
1294	tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4));
1295	if (tmp != 0x0)
1296		iqk_info->iqk_fail_cnt++;
1297	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4),
1298			       iqk_info->iqk_fail_cnt);
1299}
1300
1301static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
1302{
1303	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1304
1305	_iqk_txk_setting(rtwdev, path);
1306	iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path);
1307
1308	if (iqk_info->is_nbiqk)
1309		iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
1310	else
1311		iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
1312
1313	_iqk_rxk_setting(rtwdev, path);
1314	if (iqk_info->is_nbiqk)
1315		iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
1316	else
1317		iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
1318
1319	_iqk_info_iqk(rtwdev, phy_idx, path);
1320}
1321
1322static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
1323			     enum rtw89_phy_idx phy, u8 path)
1324{
1325	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1326	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1327
1328	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1329
1330	iqk_info->iqk_band[path] = chan->band_type;
1331	iqk_info->iqk_bw[path] = chan->band_width;
1332	iqk_info->iqk_ch[path] = chan->channel;
1333
1334	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1335		    "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
1336		    iqk_info->iqk_band[path]);
1337	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
1338		    path, iqk_info->iqk_bw[path]);
1339	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
1340		    path, iqk_info->iqk_ch[path]);
1341	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1342		    "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
1343		    rtwdev->dbcc_en ? "on" : "off",
1344		    iqk_info->iqk_band[path] == 0 ? "2G" :
1345		    iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
1346		    iqk_info->iqk_ch[path],
1347		    iqk_info->iqk_bw[path] == 0 ? "20M" :
1348		    iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
1349	if (!rtwdev->dbcc_en)
1350		iqk_info->syn1to2 = 0x1;
1351	else
1352		iqk_info->syn1to2 = 0x3;
1353
1354	rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER);
1355	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16),
1356			       iqk_info->iqk_band[path]);
1357	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16),
1358			       iqk_info->iqk_bw[path]);
1359	rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16),
1360			       iqk_info->iqk_ch[path]);
1361
1362	rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER);
1363}
1364
1365static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
1366			   u8 path)
1367{
1368	_iqk_by_path(rtwdev, phy_idx, path);
1369}
1370
1371static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
1372{
1373	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1374	bool fail;
1375
1376	rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
1377			       iqk_info->nb_txcfir[path]);
1378	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
1379			       iqk_info->nb_rxcfir[path]);
1380	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
1381			       0x00001219 + (path << 4));
1382	fsleep(200);
1383	fail = _iqk_check_cal(rtwdev, path, 0x12);
1384	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail  = %x\n", fail);
1385
1386	rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
1387	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
1388	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
1389
1390	rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
1391	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1392	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1393}
1394
1395static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
1396			       enum rtw89_phy_idx phy_idx, u8 path)
1397{
1398	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
1399				 &rtw8852c_iqk_afebb_restore_defs_a_tbl,
1400				 &rtw8852c_iqk_afebb_restore_defs_b_tbl);
1401
1402	rtw8852c_disable_rxagc(rtwdev, path, 0x1);
1403}
1404
1405static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
1406{
1407	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
1408	u8 idx = 0;
1409
1410	idx = rfk_mcc->table_idx;
1411	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
1412	rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
1413	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1414	rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
1415	rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
1416}
1417
1418static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
1419			       enum rtw89_phy_idx phy_idx, u8 path)
1420{
1421	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
1422
1423	/* 01_BB_AFE_for DPK_S0_20210820 */
1424	rtw89_write_rf(rtwdev,  path, RR_BBDC, RR_BBDC_SEL, 0x0);
1425	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1426	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1427	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1428	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1429
1430	/* disable rxgac */
1431	rtw8852c_disable_rxagc(rtwdev, path, 0x0);
1432	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd);
1433	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1);
1434	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1);
1435
1436	rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
1437	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1);
1438
1439	rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
1440	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
1441
1442	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
1443	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
1444	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
1445	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
1446	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
1447	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
1448	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
1449}
1450
1451static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
1452{
1453	u32 rf_reg5, rck_val = 0;
1454	u32 val;
1455	int ret;
1456
1457	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
1458
1459	rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
1460
1461	rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1462	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
1463
1464	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
1465		    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
1466
1467	/* RCK trigger */
1468	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
1469
1470	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
1471				       false, rtwdev, path, 0x1c, BIT(3));
1472	if (ret)
1473		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
1474
1475	rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
1476	rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
1477
1478	rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
1479
1480	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1481		    "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n",
1482		    rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
1483		    rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK));
1484}
1485
1486static void _iqk_init(struct rtw89_dev *rtwdev)
1487{
1488	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1489	u8 ch, path;
1490
1491	rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
1492	if (iqk_info->is_iqk_init)
1493		return;
1494
1495	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
1496	iqk_info->is_iqk_init = true;
1497	iqk_info->is_nbiqk = false;
1498	iqk_info->iqk_fft_en = false;
1499	iqk_info->iqk_sram_en = false;
1500	iqk_info->iqk_cfir_en = false;
1501	iqk_info->iqk_xym_en = false;
1502	iqk_info->iqk_times = 0x0;
1503
1504	for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
1505		iqk_info->iqk_channel[ch] = 0x0;
1506		for (path = 0; path < RTW8852C_IQK_SS; path++) {
1507			iqk_info->lok_cor_fail[ch][path] = false;
1508			iqk_info->lok_fin_fail[ch][path] = false;
1509			iqk_info->iqk_tx_fail[ch][path] = false;
1510			iqk_info->iqk_rx_fail[ch][path] = false;
1511			iqk_info->iqk_mcc_ch[ch][path] = 0x0;
1512			iqk_info->iqk_table_idx[path] = 0x0;
1513		}
1514	}
1515}
1516
1517static void _doiqk(struct rtw89_dev *rtwdev, bool force,
1518		   enum rtw89_phy_idx phy_idx, u8 path)
1519{
1520	struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
1521	u32 backup_bb_val[BACKUP_BB_REGS_NR];
1522	u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
1523	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
1524
1525	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
1526
1527	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1528		    "[IQK]==========IQK start!!!!!==========\n");
1529	iqk_info->iqk_times++;
1530	iqk_info->version = RTW8852C_IQK_VER;
1531
1532	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
1533	_iqk_get_ch_info(rtwdev, phy_idx, path);
1534	_rfk_backup_bb_reg(rtwdev, backup_bb_val);
1535	_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
1536	_iqk_macbb_setting(rtwdev, phy_idx, path);
1537	_iqk_preset(rtwdev, path);
1538	_iqk_start_iqk(rtwdev, phy_idx, path);
1539	_iqk_restore(rtwdev, path);
1540	_iqk_afebb_restore(rtwdev, phy_idx, path);
1541	_rfk_restore_bb_reg(rtwdev, backup_bb_val);
1542	_rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
1543	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
1544}
1545
1546static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
1547{
1548	switch (_kpath(rtwdev, phy_idx)) {
1549	case RF_A:
1550		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1551		break;
1552	case RF_B:
1553		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1554		break;
1555	case RF_AB:
1556		_doiqk(rtwdev, force, phy_idx, RF_PATH_A);
1557		_doiqk(rtwdev, force, phy_idx, RF_PATH_B);
1558		break;
1559	default:
1560		break;
1561	}
1562}
1563
1564static void _rx_dck_value_rewrite(struct rtw89_dev *rtwdev, u8 path, u8 addr,
1565				  u8 val_i, u8 val_q)
1566{
1567	u32 ofst_val;
1568
1569	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1570		    "[RX_DCK] rewrite val_i = 0x%x, val_q = 0x%x\n", val_i, val_q);
1571
1572	/* val_i and val_q are 7 bits, and target is 6 bits. */
1573	ofst_val = u32_encode_bits(val_q >> 1, RR_LUTWD0_MB) |
1574		   u32_encode_bits(val_i >> 1, RR_LUTWD0_LB);
1575
1576	rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x1);
1577	rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x1);
1578	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x1);
1579	rtw89_write_rf(rtwdev, path, RR_LUTWA, MASKBYTE0, addr);
1580	rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
1581	rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
1582	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
1583	rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x0);
1584	rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x0);
1585
1586	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] Final val_i = 0x%x, val_q = 0x%x\n",
1587		    u32_get_bits(ofst_val, RR_LUTWD0_LB) << 1,
1588		    u32_get_bits(ofst_val, RR_LUTWD0_MB) << 1);
1589}
1590
1591static bool _rx_dck_rek_check(struct rtw89_dev *rtwdev, u8 path)
1592{
1593	u8 i_even_bs, q_even_bs;
1594	u8 i_odd_bs, q_odd_bs;
1595	u8 i_even, q_even;
1596	u8 i_odd, q_odd;
1597	const u8 th = 10;
1598	u8 i;
1599
1600	for (i = 0; i < RF_PATH_NUM_8852C; i++) {
1601		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
1602		i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1603		q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1604		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1605			    "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
1606			    _dck_addr_bs[i], i_even_bs, q_even_bs);
1607
1608		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
1609		i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1610		q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1611		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1612			    "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
1613			    _dck_addr[i], i_even, q_even);
1614
1615		if (abs(i_even_bs - i_even) > th || abs(q_even_bs - q_even) > th)
1616			return true;
1617
1618		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
1619		i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1620		q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1621		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1622			    "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
1623			    _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
1624
1625		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
1626		i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1627		q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1628		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1629			    "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
1630			    _dck_addr[i] + 1, i_odd, q_odd);
1631
1632		if (abs(i_odd_bs - i_odd) > th || abs(q_odd_bs - q_odd) > th)
1633			return true;
1634	}
1635
1636	return false;
1637}
1638
1639static void _rx_dck_fix_if_need(struct rtw89_dev *rtwdev, u8 path, u8 addr,
1640				u8 val_i_bs, u8 val_q_bs, u8 val_i, u8 val_q)
1641{
1642	const u8 th = 10;
1643
1644	if ((abs(val_i_bs - val_i) < th) && (abs(val_q_bs - val_q) <= th)) {
1645		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] offset check PASS!!\n");
1646		return;
1647	}
1648
1649	if (abs(val_i_bs - val_i) > th) {
1650		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1651			    "[RX_DCK] val_i over TH (0x%x / 0x%x)\n", val_i_bs, val_i);
1652		val_i = val_i_bs;
1653	}
1654
1655	if (abs(val_q_bs - val_q) > th) {
1656		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1657			    "[RX_DCK] val_q over TH (0x%x / 0x%x)\n", val_q_bs, val_q);
1658		val_q = val_q_bs;
1659	}
1660
1661	_rx_dck_value_rewrite(rtwdev, path, addr, val_i, val_q);
1662}
1663
1664static void _rx_dck_recover(struct rtw89_dev *rtwdev, u8 path)
1665{
1666	u8 i_even_bs, q_even_bs;
1667	u8 i_odd_bs, q_odd_bs;
1668	u8 i_even, q_even;
1669	u8 i_odd, q_odd;
1670	u8 i;
1671
1672	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ===> recovery\n");
1673
1674	for (i = 0; i < RF_PATH_NUM_8852C; i++) {
1675		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
1676		i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1677		q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1678
1679		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
1680		i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1681		q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1682
1683		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1684			    "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
1685			    _dck_addr_bs[i], i_even_bs, q_even_bs);
1686
1687		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
1688		i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1689		q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1690
1691		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1692			    "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
1693			    _dck_addr[i], i_even, q_even);
1694		_rx_dck_fix_if_need(rtwdev, path, _dck_addr[i],
1695				    i_even_bs, q_even_bs, i_even, q_even);
1696
1697		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1698			    "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
1699			    _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
1700
1701		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
1702		i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
1703		q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
1704
1705		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1706			    "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
1707			    _dck_addr[i] + 1, i_odd, q_odd);
1708		_rx_dck_fix_if_need(rtwdev, path, _dck_addr[i] + 1,
1709				    i_odd_bs, q_odd_bs, i_odd, q_odd);
1710	}
1711}
1712
1713static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
1714{
1715	int ret;
1716	u32 val;
1717
1718	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1719	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
1720
1721	ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
1722				       2, 2000, false, rtwdev, path,
1723				       RR_DCK1, RR_DCK1_DONE);
1724	if (ret)
1725		rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
1726	else
1727		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path);
1728
1729	rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
1730}
1731
1732static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
1733			bool is_afe)
1734{
1735	u8 res;
1736
1737	rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
1738
1739	_rx_dck_toggle(rtwdev, path);
1740	if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0)
1741		return;
1742	res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE);
1743	if (res > 1) {
1744		rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res);
1745		_rx_dck_toggle(rtwdev, path);
1746		rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1);
1747	}
1748}
1749
1750static
1751u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
1752{
1753	u8 target_ch = 0;
1754
1755	if (chan->band_type == RTW89_BAND_5G) {
1756		if (chan->channel >= 36 && chan->channel <= 64) {
1757			target_ch = 100;
1758		} else if (chan->channel >= 100 && chan->channel <= 144) {
1759			target_ch = chan->channel + 32;
1760			if (target_ch > 144)
1761				target_ch = chan->channel + 33;
1762		} else if (chan->channel >= 149 && chan->channel <= 177) {
1763			target_ch = chan->channel - 33;
1764		}
1765	} else if (chan->band_type == RTW89_BAND_6G) {
1766		if (chan->channel >= 1 && chan->channel <= 125)
1767			target_ch = chan->channel + 32;
1768		else
1769			target_ch = chan->channel - 32;
1770	} else {
1771		target_ch = chan->channel;
1772	}
1773
1774	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1775		    "[RX_DCK] cur_ch / target_ch = %d / %d\n",
1776		    chan->channel, target_ch);
1777
1778	return target_ch;
1779}
1780
1781#define RTW8852C_RF_REL_VERSION 34
1782#define RTW8852C_DPK_VER 0xf
1783#define RTW8852C_DPK_TH_AVG_NUM 4
1784#define RTW8852C_DPK_RF_PATH 2
1785#define RTW8852C_DPK_KIP_REG_NUM 7
1786#define RTW8852C_DPK_RXSRAM_DBG 0
1787
1788enum rtw8852c_dpk_id {
1789	LBK_RXIQK	= 0x06,
1790	SYNC		= 0x10,
1791	MDPK_IDL	= 0x11,
1792	MDPK_MPA	= 0x12,
1793	GAIN_LOSS	= 0x13,
1794	GAIN_CAL	= 0x14,
1795	DPK_RXAGC	= 0x15,
1796	KIP_PRESET	= 0x16,
1797	KIP_RESTORE	= 0x17,
1798	DPK_TXAGC	= 0x19,
1799	D_KIP_PRESET	= 0x28,
1800	D_TXAGC		= 0x29,
1801	D_RXAGC		= 0x2a,
1802	D_SYNC		= 0x2b,
1803	D_GAIN_LOSS	= 0x2c,
1804	D_MDPK_IDL	= 0x2d,
1805	D_GAIN_NORM	= 0x2f,
1806	D_KIP_THERMAL	= 0x30,
1807	D_KIP_RESTORE	= 0x31
1808};
1809
1810#define DPK_TXAGC_LOWER 0x2e
1811#define DPK_TXAGC_UPPER 0x3f
1812#define DPK_TXAGC_INVAL 0xff
1813
1814enum dpk_agc_step {
1815	DPK_AGC_STEP_SYNC_DGAIN,
1816	DPK_AGC_STEP_GAIN_LOSS_IDX,
1817	DPK_AGC_STEP_GL_GT_CRITERION,
1818	DPK_AGC_STEP_GL_LT_CRITERION,
1819	DPK_AGC_STEP_SET_TX_GAIN,
1820};
1821
1822enum dpk_pas_result {
1823	DPK_PAS_NOR,
1824	DPK_PAS_GT,
1825	DPK_PAS_LT,
1826};
1827
1828static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
1829			     enum rtw89_rf_path path, bool is_bybb)
1830{
1831	if (is_bybb)
1832		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
1833	else
1834		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
1835}
1836
1837static void _dpk_onoff(struct rtw89_dev *rtwdev,
1838		       enum rtw89_rf_path path, bool off);
1839
1840static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1841			  u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
1842{
1843	u8 i;
1844
1845	for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
1846		reg_bkup[path][i] =
1847			rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
1848
1849		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
1850			    reg[i] + (path << 8), reg_bkup[path][i]);
1851	}
1852}
1853
1854static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
1855			    u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path)
1856{
1857	u8 i;
1858
1859	for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) {
1860		rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
1861				       MASKDWORD, reg_bkup[path][i]);
1862		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
1863			    reg[i] + (path << 8), reg_bkup[path][i]);
1864	}
1865}
1866
1867static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
1868			enum rtw89_rf_path path, enum rtw8852c_dpk_id id)
1869{
1870	u16 dpk_cmd;
1871	u32 val;
1872	int ret;
1873
1874	dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12));
1875
1876	rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
1877
1878	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
1879				       10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
1880	udelay(10);
1881	rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
1882
1883	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1884		    "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
1885		    id == 0x06 ? "LBK_RXIQK" :
1886		    id == 0x10 ? "SYNC" :
1887		    id == 0x11 ? "MDPK_IDL" :
1888		    id == 0x12 ? "MDPK_MPA" :
1889		    id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
1890		    dpk_cmd, ret);
1891
1892	if (ret) {
1893		rtw89_debug(rtwdev, RTW89_DBG_RFK,
1894			    "[DPK] one-shot over 20ms!!!!\n");
1895		return 1;
1896	}
1897
1898	return 0;
1899}
1900
1901static void _dpk_information(struct rtw89_dev *rtwdev,
1902			     enum rtw89_phy_idx phy,
1903			     enum rtw89_rf_path path)
1904{
1905	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1906	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
1907
1908	u8 kidx = dpk->cur_idx[path];
1909
1910	dpk->bp[path][kidx].band = chan->band_type;
1911	dpk->bp[path][kidx].ch = chan->channel;
1912	dpk->bp[path][kidx].bw = chan->band_width;
1913
1914	rtw89_debug(rtwdev, RTW89_DBG_RFK,
1915		    "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
1916		    path, dpk->cur_idx[path], phy,
1917		    rtwdev->is_tssi_mode[path] ? "on" : "off",
1918		    rtwdev->dbcc_en ? "on" : "off",
1919		    dpk->bp[path][kidx].band == 0 ? "2G" :
1920		    dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
1921		    dpk->bp[path][kidx].ch,
1922		    dpk->bp[path][kidx].bw == 0 ? "20M" :
1923		    dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
1924}
1925
1926static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
1927				enum rtw89_phy_idx phy,
1928				enum rtw89_rf_path path, u8 kpath)
1929{
1930	/*1. Keep ADC_fifo reset*/
1931	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1932	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1933	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1934	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1935
1936	/*2. BB for IQK DBG mode*/
1937	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
1938
1939	/*3.Set DAC clk*/
1940	rtw8852c_txck_force(rtwdev, path, true, DAC_960M);
1941
1942	/*4. Set ADC clk*/
1943	rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
1944	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
1945			       B_P0_NRBW_DBG, 0x1);
1946	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
1947	rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
1948	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
1949	rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
1950
1951	/*5. ADDA fifo rst*/
1952	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1);
1953	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1);
1954
1955	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path);
1956}
1957
1958static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path)
1959{
1960	rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
1961			       B_P0_NRBW_DBG, 0x0);
1962	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1);
1963	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0);
1964	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1);
1965	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0);
1966	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000);
1967	rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00);
1968	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0);
1969	rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0);
1970
1971	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path);
1972}
1973
1974static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
1975			    enum rtw89_rf_path path, bool is_pause)
1976{
1977	rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
1978			       B_P0_TSSI_TRK_EN, is_pause);
1979
1980	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
1981		    is_pause ? "pause" : "resume");
1982}
1983
1984static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip)
1985{
1986	rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip);
1987	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
1988		    ctrl_by_kip ? "KIP" : "BB");
1989}
1990
1991static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force)
1992{
1993	rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force);
1994	rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force);
1995
1996	rtw89_debug(rtwdev, RTW89_DBG_RFK,  "[DPK] S%d txpwr_bb_force %s\n",
1997		    path, force ? "on" : "off");
1998}
1999
2000static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2001			     enum rtw89_rf_path path)
2002{
2003	_dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE);
2004	_dpk_kip_control_rfc(rtwdev, path, false);
2005	_dpk_txpwr_bb_force(rtwdev, path, false);
2006	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
2007}
2008
2009static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
2010			   enum rtw89_phy_idx phy,
2011			   enum rtw89_rf_path path)
2012{
2013#define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */
2014	u8 cur_rxbb;
2015	u32 rf_11, reg_81cc;
2016
2017	rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
2018	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
2019
2020	_dpk_kip_control_rfc(rtwdev, path, false);
2021
2022	cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2023	rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK);
2024	reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8),
2025					 B_KIP_IQP_SW);
2026
2027	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
2028	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3);
2029	rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd);
2030	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f);
2031
2032	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12);
2033	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3);
2034
2035	_dpk_kip_control_rfc(rtwdev, path, true);
2036
2037	rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX);
2038
2039	_dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
2040
2041	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
2042		    rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD));
2043
2044	_dpk_kip_control_rfc(rtwdev, path, false);
2045
2046	rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11);
2047	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb);
2048	rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc);
2049
2050	rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
2051	rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
2052	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
2053
2054	_dpk_kip_control_rfc(rtwdev, path, true);
2055}
2056
2057static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
2058			    enum rtw89_rf_path path, u8 kidx)
2059{
2060	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2061
2062	if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
2063		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
2064			       0x50121 | BIT(rtwdev->dbcc_en));
2065		rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
2066		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2);
2067		rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
2068		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
2069		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
2070
2071		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2072			    "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n",
2073			    rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
2074			    rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK),
2075			    rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK),
2076			    rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK),
2077			    rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK),
2078			    rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK));
2079	} else {
2080		rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
2081			       0x50101 | BIT(rtwdev->dbcc_en));
2082		rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
2083
2084		if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161)
2085			rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
2086
2087		rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
2088		rtw89_write_rf(rtwdev, path, RR_TXAC, RR_TXAC_IQG, 0x8);
2089
2090		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
2091		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
2092		rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
2093		rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
2094
2095		if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160)
2096			rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0);
2097	}
2098}
2099
2100static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2101{
2102	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2103
2104	if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) {
2105		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3);
2106		rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30);
2107	} else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) {
2108		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
2109		rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00);
2110	} else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) {
2111		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
2112		rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0);
2113	} else {
2114		rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
2115		rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0);
2116	}
2117	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
2118		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" :
2119		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
2120		    dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
2121}
2122
2123static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2124{
2125#define DPK_SYNC_TH_DC_I 200
2126#define DPK_SYNC_TH_DC_Q 200
2127#define DPK_SYNC_TH_CORR 170
2128	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2129	u16 dc_i, dc_q;
2130	u8 corr_val, corr_idx, rxbb;
2131	u8 rxbb_ov;
2132
2133	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2134
2135	corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
2136	corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
2137
2138	dpk->corr_idx[path][kidx] = corr_idx;
2139	dpk->corr_val[path][kidx] = corr_val;
2140
2141	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
2142
2143	dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2144	dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
2145
2146	dc_i = abs(sign_extend32(dc_i, 11));
2147	dc_q = abs(sign_extend32(dc_q, 11));
2148
2149	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2150		    "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n",
2151		    path, corr_idx, corr_val, dc_i, dc_q);
2152
2153	dpk->dc_i[path][kidx] = dc_i;
2154	dpk->dc_q[path][kidx] = dc_q;
2155
2156	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8);
2157	rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB);
2158
2159	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31);
2160	rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV);
2161
2162	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2163		    "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n",
2164		    path, rxbb,
2165		    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE),
2166		    rxbb_ov);
2167
2168	if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
2169	    corr_val < DPK_SYNC_TH_CORR)
2170		return true;
2171	else
2172		return false;
2173}
2174
2175static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
2176{
2177	u16 dgain = 0x0;
2178
2179	rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
2180
2181	dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
2182
2183	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain);
2184
2185	return dgain;
2186}
2187
2188static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
2189{
2190	u8 result;
2191
2192	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
2193	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
2194
2195	result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
2196
2197	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result);
2198
2199	return result;
2200}
2201
2202static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
2203{
2204	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2205
2206	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10);
2207	dpk->cur_k_set =
2208		rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1;
2209}
2210
2211static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2212			       enum rtw89_rf_path path, u8 dbm, bool set_from_bb)
2213{
2214	if (set_from_bb) {
2215		dbm = clamp_t(u8, dbm, 7, 24);
2216		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm);
2217		rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2);
2218	}
2219	_dpk_one_shot(rtwdev, phy, path, D_TXAGC);
2220	_dpk_kset_query(rtwdev, path);
2221}
2222
2223static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2224			enum rtw89_rf_path path, u8 kidx)
2225{
2226	_dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS);
2227	_dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false);
2228
2229	rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0);
2230	rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0);
2231
2232	return _dpk_gainloss_read(rtwdev);
2233}
2234
2235static enum dpk_pas_result _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
2236{
2237	u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
2238	u32 val1_sqrt_sum, val2_sqrt_sum;
2239	u8 i;
2240
2241	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
2242	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
2243	rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
2244
2245	if (is_check) {
2246		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
2247		val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2248		val1_i = abs(sign_extend32(val1_i, 11));
2249		val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2250		val1_q = abs(sign_extend32(val1_q, 11));
2251
2252		rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
2253		val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
2254		val2_i = abs(sign_extend32(val2_i, 11));
2255		val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
2256		val2_q = abs(sign_extend32(val2_q, 11));
2257
2258		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
2259			    phy_div(val1_i * val1_i + val1_q * val1_q,
2260				    val2_i * val2_i + val2_q * val2_q));
2261	} else {
2262		for (i = 0; i < 32; i++) {
2263			rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
2264			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
2265				    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2266		}
2267	}
2268
2269	val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q;
2270	val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q;
2271
2272	if (val1_sqrt_sum < val2_sqrt_sum)
2273		return DPK_PAS_LT;
2274	else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5)
2275		return DPK_PAS_GT;
2276	else
2277		return DPK_PAS_NOR;
2278}
2279
2280static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2281			       enum rtw89_rf_path path, u8 kidx)
2282{
2283	_dpk_kip_control_rfc(rtwdev, path, false);
2284	rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
2285			       rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
2286	_dpk_kip_control_rfc(rtwdev, path, true);
2287
2288	_dpk_one_shot(rtwdev, phy, path, D_RXAGC);
2289
2290	return _dpk_sync_check(rtwdev, path, kidx);
2291}
2292
2293static void _dpk_read_rxsram(struct rtw89_dev *rtwdev)
2294{
2295	u32 addr;
2296
2297	rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl);
2298
2299	for (addr = 0; addr < 0x200; addr++) {
2300		rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr);
2301
2302		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr,
2303			    rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
2304	}
2305
2306	rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl);
2307}
2308
2309static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
2310{
2311	rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1);
2312	rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002);
2313
2314	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n");
2315}
2316
2317static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2318		   enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only)
2319{
2320	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2321	u8 step = DPK_AGC_STEP_SYNC_DGAIN;
2322	u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
2323	u8 tmp_rxbb;
2324	u8 goout = 0, agc_cnt = 0;
2325	enum dpk_pas_result pas;
2326	u16 dgain = 0;
2327	bool is_fail = false;
2328	int limit = 200;
2329
2330	do {
2331		switch (step) {
2332		case DPK_AGC_STEP_SYNC_DGAIN:
2333			is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx);
2334
2335			if (RTW8852C_DPK_RXSRAM_DBG)
2336				_dpk_read_rxsram(rtwdev);
2337
2338			if (is_fail) {
2339				goout = 1;
2340				break;
2341			}
2342
2343			dgain = _dpk_dgain_read(rtwdev);
2344
2345			if (dgain > 0x5fc || dgain < 0x556) {
2346				_dpk_one_shot(rtwdev, phy, path, D_SYNC);
2347				dgain = _dpk_dgain_read(rtwdev);
2348			}
2349
2350			if (agc_cnt == 0) {
2351				if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
2352					_dpk_bypass_rxiqc(rtwdev, path);
2353				else
2354					_dpk_lbk_rxiqk(rtwdev, phy, path);
2355			}
2356			step = DPK_AGC_STEP_GAIN_LOSS_IDX;
2357			break;
2358
2359		case DPK_AGC_STEP_GAIN_LOSS_IDX:
2360			tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
2361			pas = _dpk_pas_read(rtwdev, true);
2362
2363			if (pas == DPK_PAS_LT && tmp_gl_idx > 0)
2364				step = DPK_AGC_STEP_GL_LT_CRITERION;
2365			else if (pas == DPK_PAS_GT && tmp_gl_idx == 0)
2366				step = DPK_AGC_STEP_GL_GT_CRITERION;
2367			else if (tmp_gl_idx >= 7)
2368				step = DPK_AGC_STEP_GL_GT_CRITERION;
2369			else if (tmp_gl_idx == 0)
2370				step = DPK_AGC_STEP_GL_LT_CRITERION;
2371			else
2372				step = DPK_AGC_STEP_SET_TX_GAIN;
2373			break;
2374
2375		case DPK_AGC_STEP_GL_GT_CRITERION:
2376			if (tmp_dbm <= 7) {
2377				goout = 1;
2378				rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n");
2379			} else {
2380				tmp_dbm = max_t(u8, tmp_dbm - 3, 7);
2381				_dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
2382			}
2383			step = DPK_AGC_STEP_SYNC_DGAIN;
2384			agc_cnt++;
2385			break;
2386
2387		case DPK_AGC_STEP_GL_LT_CRITERION:
2388			if (tmp_dbm >= 24) {
2389				goout = 1;
2390				rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n");
2391			} else {
2392				tmp_dbm = min_t(u8, tmp_dbm + 2, 24);
2393				_dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true);
2394			}
2395			step = DPK_AGC_STEP_SYNC_DGAIN;
2396			agc_cnt++;
2397			break;
2398
2399		case DPK_AGC_STEP_SET_TX_GAIN:
2400			_dpk_kip_control_rfc(rtwdev, path, false);
2401			tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
2402			if (tmp_rxbb + tmp_gl_idx > 0x1f)
2403				tmp_rxbb = 0x1f;
2404			else
2405				tmp_rxbb = tmp_rxbb + tmp_gl_idx;
2406
2407			rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
2408			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n",
2409				    tmp_gl_idx, tmp_rxbb);
2410			_dpk_kip_control_rfc(rtwdev, path, true);
2411			goout = 1;
2412			break;
2413		default:
2414			goout = 1;
2415			break;
2416		}
2417	} while (!goout && agc_cnt < 6 && --limit > 0);
2418
2419	if (limit <= 0)
2420		rtw89_warn(rtwdev, "[DPK] exceed loop limit\n");
2421
2422	return is_fail;
2423}
2424
2425static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
2426{
2427	static const struct rtw89_rfk_tbl *order_tbls[] = {
2428		&rtw8852c_dpk_mdpd_order0_defs_tbl,
2429		&rtw8852c_dpk_mdpd_order1_defs_tbl,
2430		&rtw8852c_dpk_mdpd_order2_defs_tbl,
2431		&rtw8852c_dpk_mdpd_order3_defs_tbl,
2432	};
2433
2434	if (order >= ARRAY_SIZE(order_tbls)) {
2435		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order);
2436		return;
2437	}
2438
2439	rtw89_rfk_parser(rtwdev, order_tbls[order]);
2440
2441	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
2442		    order == 0x0 ? "(5,3,1)" :
2443		    order == 0x1 ? "(5,3,0)" :
2444		    order == 0x2 ? "(5,0,0)" : "(7,3,1)");
2445}
2446
2447static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2448			 enum rtw89_rf_path path, u8 kidx)
2449{
2450	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2451	u8 cnt;
2452	u8 ov_flag;
2453	u32 dpk_sync;
2454
2455	rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
2456
2457	if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1)
2458		_dpk_set_mdpd_para(rtwdev, 0x2);
2459	else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1)
2460		_dpk_set_mdpd_para(rtwdev, 0x1);
2461	else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1)
2462		_dpk_set_mdpd_para(rtwdev, 0x0);
2463	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 ||
2464		 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 ||
2465		 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20)
2466		_dpk_set_mdpd_para(rtwdev, 0x2);
2467	else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ||
2468		 dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
2469		_dpk_set_mdpd_para(rtwdev, 0x1);
2470	else
2471		_dpk_set_mdpd_para(rtwdev, 0x0);
2472
2473	rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0);
2474	fsleep(1000);
2475
2476	_dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2477	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
2478	dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
2479	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync);
2480
2481	rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
2482	ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2483	for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) {
2484		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n");
2485		_dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2486		rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf);
2487		ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
2488	}
2489
2490	if (ov_flag) {
2491		_dpk_set_mdpd_para(rtwdev, 0x2);
2492		_dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL);
2493	}
2494}
2495
2496static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2497			      enum rtw89_rf_path path)
2498{
2499	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2500	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2501	bool is_reload = false;
2502	u8 idx, cur_band, cur_ch;
2503
2504	cur_band = chan->band_type;
2505	cur_ch = chan->channel;
2506
2507	for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
2508		if (cur_band != dpk->bp[path][idx].band ||
2509		    cur_ch != dpk->bp[path][idx].ch)
2510			continue;
2511
2512		rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
2513				       B_COEF_SEL_MDPD, idx);
2514		dpk->cur_idx[path] = idx;
2515		is_reload = true;
2516		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2517			    "[DPK] reload S%d[%d] success\n", path, idx);
2518	}
2519
2520	return is_reload;
2521}
2522
2523static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on)
2524{
2525	rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl :
2526					   &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl);
2527}
2528
2529static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2530				  enum rtw89_rf_path path, u8 kidx)
2531{
2532	rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
2533			       rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
2534
2535	if (rtwdev->hal.cv == CHIP_CAV)
2536		rtw89_phy_write32_mask(rtwdev,
2537				       R_DPD_CH0A + (path << 8) + (kidx << 2),
2538				       B_DPD_SEL, 0x01);
2539	else
2540		rtw89_phy_write32_mask(rtwdev,
2541				       R_DPD_CH0A + (path << 8) + (kidx << 2),
2542				       B_DPD_SEL, 0x0c);
2543
2544	_dpk_kip_control_rfc(rtwdev, path, true);
2545	rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
2546
2547	_dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET);
2548}
2549
2550static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
2551{
2552#define _DPK_PARA_TXAGC GENMASK(15, 10)
2553#define _DPK_PARA_THER GENMASK(31, 26)
2554	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2555	u32 para;
2556
2557	para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2558				     MASKDWORD);
2559
2560	dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para);
2561	dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para);
2562
2563	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n",
2564		    dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk);
2565}
2566
2567static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2568				      enum rtw89_rf_path path, u8 kidx, bool is_execute)
2569{
2570	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2571
2572	if (is_execute) {
2573		rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200);
2574		rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3);
2575
2576		_dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM);
2577	} else {
2578		rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2579				       0x0000007F, 0x5b);
2580	}
2581	dpk->bp[path][kidx].gs =
2582		rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8),
2583				      0x0000007F);
2584}
2585
2586static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
2587{
2588	u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
2589	u8 val;
2590
2591	switch (val32) {
2592	case 0:
2593		val = 0x6;
2594		break;
2595	case 1:
2596		val = 0x2;
2597		break;
2598	case 2:
2599		val = 0x0;
2600		break;
2601	case 3:
2602		val = 0x7;
2603		break;
2604	default:
2605		val = 0xff;
2606		break;
2607	}
2608
2609	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
2610
2611	return val;
2612}
2613
2614static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2615		    enum rtw89_rf_path path, u8 kidx)
2616{
2617	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2618
2619	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
2620	rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
2621	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2622			       B_DPD_ORDER, _dpk_order_convert(rtwdev));
2623
2624	dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set);
2625	dpk->bp[path][kidx].path_ok = true;
2626
2627	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n",
2628		    path, kidx, dpk->bp[path][kidx].mdpd_en);
2629
2630	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2631			       B_DPD_MEN, dpk->bp[path][kidx].mdpd_en);
2632
2633	_dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false);
2634}
2635
2636static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2637		      enum rtw89_rf_path path, u8 gain)
2638{
2639	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2640	u8 kidx = dpk->cur_idx[path];
2641	u8 init_xdbm = 15;
2642	bool is_fail;
2643
2644	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2645		    "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
2646	_dpk_kip_control_rfc(rtwdev, path, false);
2647	_rf_direct_cntrl(rtwdev, path, false);
2648	rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
2649	_dpk_rf_setting(rtwdev, gain, path, kidx);
2650	_set_rx_dck(rtwdev, phy, path, false);
2651	_dpk_kip_pwr_clk_onoff(rtwdev, true);
2652	_dpk_kip_preset_8852c(rtwdev, phy, path, kidx);
2653	_dpk_txpwr_bb_force(rtwdev, path, true);
2654	_dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true);
2655	_dpk_tpg_sel(rtwdev, path, kidx);
2656
2657	is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false);
2658	if (is_fail)
2659		goto _error;
2660
2661	_dpk_idl_mpa(rtwdev, phy, path, kidx);
2662	_dpk_para_query(rtwdev, path, kidx);
2663	_dpk_on(rtwdev, phy, path, kidx);
2664
2665_error:
2666	_dpk_kip_control_rfc(rtwdev, path, false);
2667	rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX);
2668	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx,
2669		    dpk->cur_k_set, is_fail ? "need Check" : "is Success");
2670
2671	return is_fail;
2672}
2673
2674static void _dpk_init(struct rtw89_dev *rtwdev, u8 path)
2675{
2676	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2677	u8 kidx = dpk->cur_idx[path];
2678
2679	dpk->bp[path][kidx].path_ok = false;
2680}
2681
2682static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb)
2683{
2684	if (is_bybb)
2685		rtw89_write_rf(rtwdev,  path, RR_BBDC, RR_BBDC_SEL, 0x1);
2686	else
2687		rtw89_write_rf(rtwdev,  path, RR_BBDC, RR_BBDC_SEL, 0x0);
2688}
2689
2690static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
2691			    enum rtw89_phy_idx phy, u8 kpath)
2692{
2693	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2694	static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
2695	u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
2696	u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
2697	u8 path;
2698	bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
2699
2700	static_assert(ARRAY_SIZE(kip_reg) == RTW8852C_DPK_KIP_REG_NUM);
2701
2702	if (dpk->is_dpk_reload_en) {
2703		for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2704			if (!(kpath & BIT(path)))
2705				continue;
2706
2707			reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
2708			if (!reloaded[path] && dpk->bp[path][0].ch != 0)
2709				dpk->cur_idx[path] = !dpk->cur_idx[path];
2710			else
2711				_dpk_onoff(rtwdev, path, false);
2712		}
2713	} else {
2714		for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
2715			dpk->cur_idx[path] = 0;
2716	}
2717
2718	for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2719		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2720			    "[DPK] ========= S%d[%d] DPK Init =========\n",
2721			    path, dpk->cur_idx[path]);
2722		_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
2723		_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
2724		_dpk_information(rtwdev, phy, path);
2725		_dpk_init(rtwdev, path);
2726		if (rtwdev->is_tssi_mode[path])
2727			_dpk_tssi_pause(rtwdev, path, true);
2728	}
2729
2730	for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2731		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2732			    "[DPK] ========= S%d[%d] DPK Start =========\n",
2733			    path, dpk->cur_idx[path]);
2734		rtw8852c_disable_rxagc(rtwdev, path, 0x0);
2735		_dpk_drf_direct_cntrl(rtwdev, path, false);
2736		_dpk_bb_afe_setting(rtwdev, phy, path, kpath);
2737		is_fail = _dpk_main(rtwdev, phy, path, 1);
2738		_dpk_onoff(rtwdev, path, is_fail);
2739	}
2740
2741	for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2742		rtw89_debug(rtwdev, RTW89_DBG_RFK,
2743			    "[DPK] ========= S%d[%d] DPK Restore =========\n",
2744			    path, dpk->cur_idx[path]);
2745		_dpk_kip_restore(rtwdev, phy, path);
2746		_dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
2747		_rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path);
2748		_dpk_bb_afe_restore(rtwdev, path);
2749		rtw8852c_disable_rxagc(rtwdev, path, 0x1);
2750		if (rtwdev->is_tssi_mode[path])
2751			_dpk_tssi_pause(rtwdev, path, false);
2752	}
2753
2754	_dpk_kip_pwr_clk_onoff(rtwdev, false);
2755}
2756
2757static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2758{
2759	struct rtw89_fem_info *fem = &rtwdev->fem;
2760	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2761	u8 band = chan->band_type;
2762
2763	if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
2764		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
2765		return true;
2766	} else if (fem->epa_2g && band == RTW89_BAND_2G) {
2767		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
2768		return true;
2769	} else if (fem->epa_5g && band == RTW89_BAND_5G) {
2770		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
2771		return true;
2772	} else if (fem->epa_6g && band == RTW89_BAND_6G) {
2773		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
2774		return true;
2775	}
2776
2777	return false;
2778}
2779
2780static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
2781{
2782	u8 path, kpath;
2783
2784	kpath = _kpath(rtwdev, phy);
2785
2786	for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2787		if (kpath & BIT(path))
2788			_dpk_onoff(rtwdev, path, true);
2789	}
2790}
2791
2792static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
2793{
2794	rtw89_debug(rtwdev, RTW89_DBG_RFK,
2795		    "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
2796		    RTW8852C_DPK_VER, rtwdev->hal.cv,
2797		    RTW8852C_RF_REL_VERSION);
2798
2799	if (_dpk_bypass_check(rtwdev, phy))
2800		_dpk_force_bypass(rtwdev, phy);
2801	else
2802		_dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
2803
2804	if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
2805		rtw8852c_rx_dck(rtwdev, phy, false);
2806}
2807
2808static void _dpk_onoff(struct rtw89_dev *rtwdev,
2809		       enum rtw89_rf_path path, bool off)
2810{
2811	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2812	u8 val, kidx = dpk->cur_idx[path];
2813
2814	val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ?
2815	      dpk->bp[path][kidx].mdpd_en : 0;
2816
2817	rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
2818			       B_DPD_MEN, val);
2819
2820	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
2821		    kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
2822}
2823
2824static void _dpk_track(struct rtw89_dev *rtwdev)
2825{
2826	struct rtw89_dpk_info *dpk = &rtwdev->dpk;
2827	u8 path, kidx;
2828	u8 txagc_rf = 0;
2829	s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0;
2830	u8 cur_ther;
2831	s8 delta_ther = 0;
2832	s16 pwsf_tssi_ofst;
2833
2834	for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
2835		kidx = dpk->cur_idx[path];
2836		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2837			    "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
2838			    path, kidx, dpk->bp[path][kidx].ch);
2839
2840		txagc_rf =
2841			rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f);
2842		txagc_bb =
2843			rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2);
2844		txagc_bb_tp =
2845			rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP);
2846
2847		/* report from KIP */
2848		rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf);
2849		cur_ther =
2850			rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH);
2851		txagc_ofst =
2852			rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF);
2853		pwsf_tssi_ofst =
2854			rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI);
2855		pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12);
2856
2857		cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
2858
2859		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2860			    "[DPK_TRK] thermal now = %d\n", cur_ther);
2861
2862		if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
2863			delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther;
2864
2865		delta_ther = delta_ther * 1 / 2;
2866
2867		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2868			    "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n",
2869			    delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk);
2870		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2871			    "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n",
2872			    txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf,
2873			    dpk->bp[path][kidx].txagc_dpk);
2874		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2875			    "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n",
2876			    txagc_ofst, pwsf_tssi_ofst);
2877		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2878			    "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
2879			    txagc_bb_tp, txagc_bb);
2880
2881		if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 &&
2882		    txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) {
2883			rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
2884				    "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther);
2885
2886			rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
2887					       0x07FC0000, 0x78 - delta_ther);
2888		}
2889	}
2890}
2891
2892static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2893			  enum rtw89_rf_path path)
2894{
2895	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2896	enum rtw89_band band = chan->band_type;
2897
2898	rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
2899
2900	if (path == RF_PATH_A)
2901		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2902					 &rtw8852c_tssi_sys_defs_2g_a_tbl,
2903					 &rtw8852c_tssi_sys_defs_5g_a_tbl);
2904	else
2905		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2906					 &rtw8852c_tssi_sys_defs_2g_b_tbl,
2907					 &rtw8852c_tssi_sys_defs_5g_b_tbl);
2908}
2909
2910static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2911				    enum rtw89_rf_path path)
2912{
2913	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2914				 &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl,
2915				 &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl);
2916}
2917
2918static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
2919					  enum rtw89_phy_idx phy,
2920					  enum rtw89_rf_path path)
2921{
2922	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2923				 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
2924				 &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
2925}
2926
2927static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2928			  enum rtw89_rf_path path)
2929{
2930	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2931	enum rtw89_band band = chan->band_type;
2932
2933	if (path == RF_PATH_A) {
2934		rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
2935		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2936					 &rtw8852c_tssi_dck_defs_2g_a_tbl,
2937					 &rtw8852c_tssi_dck_defs_5g_a_tbl);
2938	} else {
2939		rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl);
2940		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
2941					 &rtw8852c_tssi_dck_defs_2g_b_tbl,
2942					 &rtw8852c_tssi_dck_defs_5g_b_tbl);
2943	}
2944}
2945
2946static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2947				   enum rtw89_rf_path path)
2948{
2949	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
2950				 &rtw8852c_tssi_set_bbgain_split_a_tbl,
2951				 &rtw8852c_tssi_set_bbgain_split_b_tbl);
2952}
2953
2954static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
2955				 enum rtw89_rf_path path)
2956{
2957#define RTW8852C_TSSI_GET_VAL(ptr, idx)			\
2958({							\
2959	s8 *__ptr = (ptr);				\
2960	u8 __idx = (idx), __i, __v;			\
2961	u32 __val = 0;					\
2962	for (__i = 0; __i < 4; __i++) {			\
2963		__v = (__ptr[__idx + __i]);		\
2964		__val |= (__v << (8 * __i));		\
2965	}						\
2966	__val;						\
2967})
2968	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
2969	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2970	u8 ch = chan->channel;
2971	u8 subband = chan->subband_type;
2972	const s8 *thm_up_a = NULL;
2973	const s8 *thm_down_a = NULL;
2974	const s8 *thm_up_b = NULL;
2975	const s8 *thm_down_b = NULL;
2976	u8 thermal = 0xff;
2977	s8 thm_ofst[64] = {0};
2978	u32 tmp = 0;
2979	u8 i, j;
2980
2981	switch (subband) {
2982	default:
2983	case RTW89_CH_2G:
2984		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
2985		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
2986		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
2987		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
2988		break;
2989	case RTW89_CH_5G_BAND_1:
2990		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
2991		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
2992		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
2993		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
2994		break;
2995	case RTW89_CH_5G_BAND_3:
2996		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
2997		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
2998		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
2999		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
3000		break;
3001	case RTW89_CH_5G_BAND_4:
3002		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
3003		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
3004		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
3005		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
3006		break;
3007	case RTW89_CH_6G_BAND_IDX0:
3008	case RTW89_CH_6G_BAND_IDX1:
3009		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
3010		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
3011		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
3012		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
3013		break;
3014	case RTW89_CH_6G_BAND_IDX2:
3015	case RTW89_CH_6G_BAND_IDX3:
3016		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
3017		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
3018		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
3019		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
3020		break;
3021	case RTW89_CH_6G_BAND_IDX4:
3022	case RTW89_CH_6G_BAND_IDX5:
3023		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
3024		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
3025		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
3026		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
3027		break;
3028	case RTW89_CH_6G_BAND_IDX6:
3029	case RTW89_CH_6G_BAND_IDX7:
3030		thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
3031		thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
3032		thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
3033		thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
3034		break;
3035	}
3036
3037	if (path == RF_PATH_A) {
3038		thermal = tssi_info->thermal[RF_PATH_A];
3039
3040		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3041			    "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
3042
3043		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
3044		rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
3045
3046		if (thermal == 0xff) {
3047			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
3048			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
3049
3050			for (i = 0; i < 64; i += 4) {
3051				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
3052
3053				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3054					    "[TSSI] write 0x%x val=0x%08x\n",
3055					    0x5c00 + i, 0x0);
3056			}
3057
3058		} else {
3059			rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
3060			rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
3061					       thermal);
3062
3063			i = 0;
3064			for (j = 0; j < 32; j++)
3065				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3066					      -thm_down_a[i++] :
3067					      -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
3068
3069			i = 1;
3070			for (j = 63; j >= 32; j--)
3071				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3072					      thm_up_a[i++] :
3073					      thm_up_a[DELTA_SWINGIDX_SIZE - 1];
3074
3075			for (i = 0; i < 64; i += 4) {
3076				tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
3077				rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
3078
3079				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3080					    "[TSSI] write 0x%x val=0x%08x\n",
3081					    0x5c00 + i, tmp);
3082			}
3083		}
3084		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
3085		rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
3086
3087	} else {
3088		thermal = tssi_info->thermal[RF_PATH_B];
3089
3090		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3091			    "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
3092
3093		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
3094		rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
3095
3096		if (thermal == 0xff) {
3097			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
3098			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
3099
3100			for (i = 0; i < 64; i += 4) {
3101				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
3102
3103				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3104					    "[TSSI] write 0x%x val=0x%08x\n",
3105					    0x7c00 + i, 0x0);
3106			}
3107
3108		} else {
3109			rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
3110			rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
3111					       thermal);
3112
3113			i = 0;
3114			for (j = 0; j < 32; j++)
3115				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3116					      -thm_down_b[i++] :
3117					      -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
3118
3119			i = 1;
3120			for (j = 63; j >= 32; j--)
3121				thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
3122					      thm_up_b[i++] :
3123					      thm_up_b[DELTA_SWINGIDX_SIZE - 1];
3124
3125			for (i = 0; i < 64; i += 4) {
3126				tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i);
3127				rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
3128
3129				rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3130					    "[TSSI] write 0x%x val=0x%08x\n",
3131					    0x7c00 + i, tmp);
3132			}
3133		}
3134		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
3135		rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
3136	}
3137#undef RTW8852C_TSSI_GET_VAL
3138}
3139
3140static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3141				enum rtw89_rf_path path)
3142{
3143	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3144	enum rtw89_band band = chan->band_type;
3145
3146	if (path == RF_PATH_A) {
3147		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3148					 &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl,
3149					 &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl);
3150	} else {
3151		rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
3152					 &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl,
3153					 &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl);
3154	}
3155}
3156
3157static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3158				    enum rtw89_rf_path path)
3159{
3160	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3161	enum rtw89_band band = chan->band_type;
3162	const struct rtw89_rfk_tbl *tbl;
3163
3164	if (path == RF_PATH_A) {
3165		if (band == RTW89_BAND_2G)
3166			tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl;
3167		else if (band == RTW89_BAND_6G)
3168			tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl;
3169		else
3170			tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl;
3171	} else {
3172		if (band == RTW89_BAND_2G)
3173			tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl;
3174		else if (band == RTW89_BAND_6G)
3175			tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl;
3176		else
3177			tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl;
3178	}
3179
3180	rtw89_rfk_parser(rtwdev, tbl);
3181}
3182
3183static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3184			    enum rtw89_rf_path path)
3185{
3186	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3187				 &rtw8852c_tssi_slope_defs_a_tbl,
3188				 &rtw8852c_tssi_slope_defs_b_tbl);
3189}
3190
3191static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3192			    enum rtw89_rf_path path)
3193{
3194	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3195				 &rtw8852c_tssi_run_slope_defs_a_tbl,
3196				 &rtw8852c_tssi_run_slope_defs_b_tbl);
3197}
3198
3199static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3200			    enum rtw89_rf_path path)
3201{
3202	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3203				 &rtw8852c_tssi_track_defs_a_tbl,
3204				 &rtw8852c_tssi_track_defs_b_tbl);
3205}
3206
3207static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
3208					  enum rtw89_phy_idx phy,
3209					  enum rtw89_rf_path path)
3210{
3211	rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
3212				 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl,
3213				 &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl);
3214}
3215
3216static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3217{
3218	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3219	u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3220
3221	if (rtwdev->dbcc_en) {
3222		if (phy == RTW89_PHY_0) {
3223			path = RF_PATH_A;
3224			path_max = RF_PATH_B;
3225		} else if (phy == RTW89_PHY_1) {
3226			path = RF_PATH_B;
3227			path_max = RF_PATH_NUM_8852C;
3228		}
3229	}
3230
3231	for (i = path; i < path_max; i++) {
3232		_tssi_set_track(rtwdev, phy, i);
3233		_tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
3234
3235		rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
3236					 &rtw8852c_tssi_enable_defs_a_tbl,
3237					 &rtw8852c_tssi_enable_defs_b_tbl);
3238
3239		tssi_info->base_thermal[i] =
3240			ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
3241		rtwdev->is_tssi_mode[i] = true;
3242	}
3243}
3244
3245static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
3246{
3247	u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3248
3249	if (rtwdev->dbcc_en) {
3250		if (phy == RTW89_PHY_0) {
3251			path = RF_PATH_A;
3252			path_max = RF_PATH_B;
3253		} else if (phy == RTW89_PHY_1) {
3254			path = RF_PATH_B;
3255			path_max = RF_PATH_NUM_8852C;
3256		}
3257	}
3258
3259	for (i = path; i < path_max; i++) {
3260		if (i == RF_PATH_A) {
3261			rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl);
3262			rtwdev->is_tssi_mode[RF_PATH_A] = false;
3263		}  else if (i == RF_PATH_B) {
3264			rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl);
3265			rtwdev->is_tssi_mode[RF_PATH_B] = false;
3266		}
3267	}
3268}
3269
3270static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
3271{
3272	switch (ch) {
3273	case 1 ... 2:
3274		return 0;
3275	case 3 ... 5:
3276		return 1;
3277	case 6 ... 8:
3278		return 2;
3279	case 9 ... 11:
3280		return 3;
3281	case 12 ... 13:
3282		return 4;
3283	case 14:
3284		return 5;
3285	}
3286
3287	return 0;
3288}
3289
3290#define TSSI_EXTRA_GROUP_BIT (BIT(31))
3291#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
3292#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
3293#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
3294#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
3295
3296static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3297{
3298	switch (ch) {
3299	case 1 ... 2:
3300		return 0;
3301	case 3 ... 5:
3302		return 1;
3303	case 6 ... 8:
3304		return 2;
3305	case 9 ... 11:
3306		return 3;
3307	case 12 ... 14:
3308		return 4;
3309	case 36 ... 40:
3310		return 5;
3311	case 41 ... 43:
3312		return TSSI_EXTRA_GROUP(5);
3313	case 44 ... 48:
3314		return 6;
3315	case 49 ... 51:
3316		return TSSI_EXTRA_GROUP(6);
3317	case 52 ... 56:
3318		return 7;
3319	case 57 ... 59:
3320		return TSSI_EXTRA_GROUP(7);
3321	case 60 ... 64:
3322		return 8;
3323	case 100 ... 104:
3324		return 9;
3325	case 105 ... 107:
3326		return TSSI_EXTRA_GROUP(9);
3327	case 108 ... 112:
3328		return 10;
3329	case 113 ... 115:
3330		return TSSI_EXTRA_GROUP(10);
3331	case 116 ... 120:
3332		return 11;
3333	case 121 ... 123:
3334		return TSSI_EXTRA_GROUP(11);
3335	case 124 ... 128:
3336		return 12;
3337	case 129 ... 131:
3338		return TSSI_EXTRA_GROUP(12);
3339	case 132 ... 136:
3340		return 13;
3341	case 137 ... 139:
3342		return TSSI_EXTRA_GROUP(13);
3343	case 140 ... 144:
3344		return 14;
3345	case 149 ... 153:
3346		return 15;
3347	case 154 ... 156:
3348		return TSSI_EXTRA_GROUP(15);
3349	case 157 ... 161:
3350		return 16;
3351	case 162 ... 164:
3352		return TSSI_EXTRA_GROUP(16);
3353	case 165 ... 169:
3354		return 17;
3355	case 170 ... 172:
3356		return TSSI_EXTRA_GROUP(17);
3357	case 173 ... 177:
3358		return 18;
3359	}
3360
3361	return 0;
3362}
3363
3364static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
3365{
3366	switch (ch) {
3367	case 1 ... 5:
3368		return 0;
3369	case 6 ... 8:
3370		return TSSI_EXTRA_GROUP(0);
3371	case 9 ... 13:
3372		return 1;
3373	case 14 ... 16:
3374		return TSSI_EXTRA_GROUP(1);
3375	case 17 ... 21:
3376		return 2;
3377	case 22 ... 24:
3378		return TSSI_EXTRA_GROUP(2);
3379	case 25 ... 29:
3380		return 3;
3381	case 33 ... 37:
3382		return 4;
3383	case 38 ... 40:
3384		return TSSI_EXTRA_GROUP(4);
3385	case 41 ... 45:
3386		return 5;
3387	case 46 ... 48:
3388		return TSSI_EXTRA_GROUP(5);
3389	case 49 ... 53:
3390		return 6;
3391	case 54 ... 56:
3392		return TSSI_EXTRA_GROUP(6);
3393	case 57 ... 61:
3394		return 7;
3395	case 65 ... 69:
3396		return 8;
3397	case 70 ... 72:
3398		return TSSI_EXTRA_GROUP(8);
3399	case 73 ... 77:
3400		return 9;
3401	case 78 ... 80:
3402		return TSSI_EXTRA_GROUP(9);
3403	case 81 ... 85:
3404		return 10;
3405	case 86 ... 88:
3406		return TSSI_EXTRA_GROUP(10);
3407	case 89 ... 93:
3408		return 11;
3409	case 97 ... 101:
3410		return 12;
3411	case 102 ... 104:
3412		return TSSI_EXTRA_GROUP(12);
3413	case 105 ... 109:
3414		return 13;
3415	case 110 ... 112:
3416		return TSSI_EXTRA_GROUP(13);
3417	case 113 ... 117:
3418		return 14;
3419	case 118 ... 120:
3420		return TSSI_EXTRA_GROUP(14);
3421	case 121 ... 125:
3422		return 15;
3423	case 129 ... 133:
3424		return 16;
3425	case 134 ... 136:
3426		return TSSI_EXTRA_GROUP(16);
3427	case 137 ... 141:
3428		return 17;
3429	case 142 ... 144:
3430		return TSSI_EXTRA_GROUP(17);
3431	case 145 ... 149:
3432		return 18;
3433	case 150 ... 152:
3434		return TSSI_EXTRA_GROUP(18);
3435	case 153 ... 157:
3436		return 19;
3437	case 161 ... 165:
3438		return 20;
3439	case 166 ... 168:
3440		return TSSI_EXTRA_GROUP(20);
3441	case 169 ... 173:
3442		return 21;
3443	case 174 ... 176:
3444		return TSSI_EXTRA_GROUP(21);
3445	case 177 ... 181:
3446		return 22;
3447	case 182 ... 184:
3448		return TSSI_EXTRA_GROUP(22);
3449	case 185 ... 189:
3450		return 23;
3451	case 193 ... 197:
3452		return 24;
3453	case 198 ... 200:
3454		return TSSI_EXTRA_GROUP(24);
3455	case 201 ... 205:
3456		return 25;
3457	case 206 ... 208:
3458		return TSSI_EXTRA_GROUP(25);
3459	case 209 ... 213:
3460		return 26;
3461	case 214 ... 216:
3462		return TSSI_EXTRA_GROUP(26);
3463	case 217 ... 221:
3464		return 27;
3465	case 225 ... 229:
3466		return 28;
3467	case 230 ... 232:
3468		return TSSI_EXTRA_GROUP(28);
3469	case 233 ... 237:
3470		return 29;
3471	case 238 ... 240:
3472		return TSSI_EXTRA_GROUP(29);
3473	case 241 ... 245:
3474		return 30;
3475	case 246 ... 248:
3476		return TSSI_EXTRA_GROUP(30);
3477	case 249 ... 253:
3478		return 31;
3479	}
3480
3481	return 0;
3482}
3483
3484static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3485{
3486	switch (ch) {
3487	case 1 ... 8:
3488		return 0;
3489	case 9 ... 14:
3490		return 1;
3491	case 36 ... 48:
3492		return 2;
3493	case 49 ... 51:
3494		return TSSI_EXTRA_GROUP(2);
3495	case 52 ... 64:
3496		return 3;
3497	case 100 ... 112:
3498		return 4;
3499	case 113 ... 115:
3500		return TSSI_EXTRA_GROUP(4);
3501	case 116 ... 128:
3502		return 5;
3503	case 132 ... 144:
3504		return 6;
3505	case 149 ... 177:
3506		return 7;
3507	}
3508
3509	return 0;
3510}
3511
3512static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
3513{
3514	switch (ch) {
3515	case 1 ... 13:
3516		return 0;
3517	case 14 ... 16:
3518		return TSSI_EXTRA_GROUP(0);
3519	case 17 ... 29:
3520		return 1;
3521	case 33 ... 45:
3522		return 2;
3523	case 46 ... 48:
3524		return TSSI_EXTRA_GROUP(2);
3525	case 49 ... 61:
3526		return 3;
3527	case 65 ... 77:
3528		return 4;
3529	case 78 ... 80:
3530		return TSSI_EXTRA_GROUP(4);
3531	case 81 ... 93:
3532		return 5;
3533	case 97 ... 109:
3534		return 6;
3535	case 110 ... 112:
3536		return TSSI_EXTRA_GROUP(6);
3537	case 113 ... 125:
3538		return 7;
3539	case 129 ... 141:
3540		return 8;
3541	case 142 ... 144:
3542		return TSSI_EXTRA_GROUP(8);
3543	case 145 ... 157:
3544		return 9;
3545	case 161 ... 173:
3546		return 10;
3547	case 174 ... 176:
3548		return TSSI_EXTRA_GROUP(10);
3549	case 177 ... 189:
3550		return 11;
3551	case 193 ... 205:
3552		return 12;
3553	case 206 ... 208:
3554		return TSSI_EXTRA_GROUP(12);
3555	case 209 ... 221:
3556		return 13;
3557	case 225 ... 237:
3558		return 14;
3559	case 238 ... 240:
3560		return TSSI_EXTRA_GROUP(14);
3561	case 241 ... 253:
3562		return 15;
3563	}
3564
3565	return 0;
3566}
3567
3568static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3569			    enum rtw89_rf_path path)
3570{
3571	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3572	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3573	enum rtw89_band band = chan->band_type;
3574	u8 ch = chan->channel;
3575	u32 gidx, gidx_1st, gidx_2nd;
3576	s8 de_1st;
3577	s8 de_2nd;
3578	s8 val;
3579
3580	if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
3581		gidx = _tssi_get_ofdm_group(rtwdev, ch);
3582
3583		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3584			    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3585			    path, gidx);
3586
3587		if (IS_TSSI_EXTRA_GROUP(gidx)) {
3588			gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3589			gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3590			de_1st = tssi_info->tssi_mcs[path][gidx_1st];
3591			de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
3592			val = (de_1st + de_2nd) / 2;
3593
3594			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3595				    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3596				    path, val, de_1st, de_2nd);
3597		} else {
3598			val = tssi_info->tssi_mcs[path][gidx];
3599
3600			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3601				    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3602		}
3603	} else {
3604		gidx = _tssi_get_6g_ofdm_group(rtwdev, ch);
3605
3606		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3607			    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
3608			    path, gidx);
3609
3610		if (IS_TSSI_EXTRA_GROUP(gidx)) {
3611			gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
3612			gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
3613			de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
3614			de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
3615			val = (de_1st + de_2nd) / 2;
3616
3617			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3618				    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
3619				    path, val, de_1st, de_2nd);
3620		} else {
3621			val = tssi_info->tssi_6g_mcs[path][gidx];
3622
3623			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3624				    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
3625		}
3626	}
3627
3628	return val;
3629}
3630
3631static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
3632				 enum rtw89_phy_idx phy,
3633				 enum rtw89_rf_path path)
3634{
3635	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3636	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3637	enum rtw89_band band = chan->band_type;
3638	u8 ch = chan->channel;
3639	u32 tgidx, tgidx_1st, tgidx_2nd;
3640	s8 tde_1st = 0;
3641	s8 tde_2nd = 0;
3642	s8 val;
3643
3644	if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) {
3645		tgidx = _tssi_get_trim_group(rtwdev, ch);
3646
3647		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3648			    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3649			    path, tgidx);
3650
3651		if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3652			tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3653			tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3654			tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
3655			tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
3656			val = (tde_1st + tde_2nd) / 2;
3657
3658			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3659				    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3660				    path, val, tde_1st, tde_2nd);
3661		} else {
3662			val = tssi_info->tssi_trim[path][tgidx];
3663
3664			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3665				    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3666				    path, val);
3667		}
3668	} else {
3669		tgidx = _tssi_get_6g_trim_group(rtwdev, ch);
3670
3671		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3672			    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
3673			    path, tgidx);
3674
3675		if (IS_TSSI_EXTRA_GROUP(tgidx)) {
3676			tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
3677			tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
3678			tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
3679			tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
3680			val = (tde_1st + tde_2nd) / 2;
3681
3682			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3683				    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
3684				    path, val, tde_1st, tde_2nd);
3685		} else {
3686			val = tssi_info->tssi_trim_6g[path][tgidx];
3687
3688			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3689				    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
3690				    path, val);
3691		}
3692	}
3693
3694	return val;
3695}
3696
3697static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
3698				  enum rtw89_phy_idx phy)
3699{
3700	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
3701	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3702	u8 ch = chan->channel;
3703	u8 gidx;
3704	s8 ofdm_de;
3705	s8 trim_de;
3706	s32 val;
3707	u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
3708
3709	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
3710		    phy, ch);
3711
3712	if (rtwdev->dbcc_en) {
3713		if (phy == RTW89_PHY_0) {
3714			path = RF_PATH_A;
3715			path_max = RF_PATH_B;
3716		} else if (phy == RTW89_PHY_1) {
3717			path = RF_PATH_B;
3718			path_max = RF_PATH_NUM_8852C;
3719		}
3720	}
3721
3722	for (i = path; i < path_max; i++) {
3723		gidx = _tssi_get_cck_group(rtwdev, ch);
3724		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3725		val = tssi_info->tssi_cck[i][gidx] + trim_de;
3726
3727		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3728			    "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
3729			    i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
3730
3731		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
3732		rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
3733
3734		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3735			    "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
3736			    _tssi_de_cck_long[i],
3737			    rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
3738						  _TSSI_DE_MASK));
3739
3740		ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
3741		trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
3742		val = ofdm_de + trim_de;
3743
3744		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3745			    "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
3746			    i, ofdm_de, trim_de);
3747
3748		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
3749		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
3750		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
3751		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val);
3752		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
3753		rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
3754
3755		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
3756			    "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
3757			    _tssi_de_mcs_20m[i],
3758			    rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
3759						  _TSSI_DE_MASK));
3760	}
3761}
3762
3763static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
3764				  enum rtw89_rf_path path)
3765{
3766	static const u32 tssi_trk[2] = {0x5818, 0x7818};
3767	static const u32 tssi_en[2] = {0x5820, 0x7820};
3768
3769	if (en) {
3770		rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
3771		rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
3772		if (rtwdev->dbcc_en && path == RF_PATH_B)
3773			_tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
3774		else
3775			_tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
3776	} else {
3777		rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
3778		rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
3779	}
3780}
3781
3782void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
3783{
3784	if (!rtwdev->dbcc_en) {
3785		rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
3786		rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
3787	} else {
3788		if (phy_idx == RTW89_PHY_0)
3789			rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
3790		else
3791			rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
3792	}
3793}
3794
3795static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3796			enum rtw89_bandwidth bw, bool is_dav)
3797{
3798	u32 rf_reg18;
3799	u32 reg_reg18_addr;
3800
3801	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3802	if (is_dav)
3803		reg_reg18_addr = RR_CFGCH;
3804	else
3805		reg_reg18_addr = RR_CFGCH_V1;
3806
3807	rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
3808	rf_reg18 &= ~RR_CFGCH_BW;
3809
3810	switch (bw) {
3811	case RTW89_CHANNEL_WIDTH_5:
3812	case RTW89_CHANNEL_WIDTH_10:
3813	case RTW89_CHANNEL_WIDTH_20:
3814		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
3815		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
3816		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
3817		break;
3818	case RTW89_CHANNEL_WIDTH_40:
3819		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
3820		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3);
3821		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf);
3822		break;
3823	case RTW89_CHANNEL_WIDTH_80:
3824		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
3825		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2);
3826		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd);
3827		break;
3828	case RTW89_CHANNEL_WIDTH_160:
3829		rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M);
3830		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
3831		rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
3832		break;
3833	default:
3834		break;
3835	}
3836
3837	rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
3838}
3839
3840static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3841		     enum rtw89_bandwidth bw)
3842{
3843	bool is_dav;
3844	u8 kpath, path;
3845	u32 tmp = 0;
3846
3847	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3848	kpath = _kpath(rtwdev, phy);
3849
3850	for (path = 0; path < 2; path++) {
3851		if (!(kpath & BIT(path)))
3852			continue;
3853
3854		is_dav = true;
3855		_bw_setting(rtwdev, path, bw, is_dav);
3856		is_dav = false;
3857		_bw_setting(rtwdev, path, bw, is_dav);
3858		if (rtwdev->dbcc_en)
3859			continue;
3860
3861		if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) {
3862			rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
3863			tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
3864			rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3);
3865			rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp);
3866			fsleep(100);
3867			rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
3868		}
3869	}
3870}
3871
3872static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
3873			u8 central_ch, enum rtw89_band band, bool is_dav)
3874{
3875	u32 rf_reg18;
3876	u32 reg_reg18_addr;
3877
3878	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3879	if (is_dav)
3880		reg_reg18_addr = 0x18;
3881	else
3882		reg_reg18_addr = 0x10018;
3883
3884	rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK);
3885	rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH);
3886	rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
3887
3888	switch (band) {
3889	case RTW89_BAND_2G:
3890		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G);
3891		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G);
3892		break;
3893	case RTW89_BAND_5G:
3894		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G);
3895		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
3896		break;
3897	case RTW89_BAND_6G:
3898		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G);
3899		rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G);
3900		break;
3901	default:
3902		break;
3903	}
3904	rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18);
3905	fsleep(100);
3906}
3907
3908static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3909		     u8 central_ch, enum rtw89_band band)
3910{
3911	u8 kpath, path;
3912
3913	rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__);
3914	if (band != RTW89_BAND_6G) {
3915		if ((central_ch > 14 && central_ch < 36) ||
3916		    (central_ch > 64 && central_ch < 100) ||
3917		    (central_ch > 144 && central_ch < 149) || central_ch > 177)
3918			return;
3919	} else {
3920		if (central_ch > 253 || central_ch  == 2)
3921			return;
3922	}
3923
3924	kpath = _kpath(rtwdev, phy);
3925
3926	for (path = 0; path < 2; path++) {
3927		if (kpath & BIT(path)) {
3928			_ch_setting(rtwdev, path, central_ch, band, true);
3929			_ch_setting(rtwdev, path, central_ch, band, false);
3930		}
3931	}
3932}
3933
3934static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
3935		     enum rtw89_bandwidth bw)
3936{
3937	u8 kpath;
3938	u8 path;
3939	u32 val;
3940
3941	kpath = _kpath(rtwdev, phy);
3942	for (path = 0; path < 2; path++) {
3943		if (!(kpath & BIT(path)))
3944			continue;
3945
3946		rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
3947		rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa);
3948		switch (bw) {
3949		case RTW89_CHANNEL_WIDTH_20:
3950			val = 0x1b;
3951			break;
3952		case RTW89_CHANNEL_WIDTH_40:
3953			val = 0x13;
3954			break;
3955		case RTW89_CHANNEL_WIDTH_80:
3956			val = 0xb;
3957			break;
3958		case RTW89_CHANNEL_WIDTH_160:
3959		default:
3960			val = 0x3;
3961			break;
3962		}
3963		rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val);
3964		rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
3965	}
3966}
3967
3968static void _lck_keep_thermal(struct rtw89_dev *rtwdev)
3969{
3970	struct rtw89_lck_info *lck = &rtwdev->lck;
3971	int path;
3972
3973	for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
3974		lck->thermal[path] =
3975			ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
3976		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
3977			    "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]);
3978	}
3979}
3980
3981static void _lck(struct rtw89_dev *rtwdev)
3982{
3983	u32 tmp18[2];
3984	int path = rtwdev->dbcc_en ? 2 : 1;
3985	int i;
3986
3987	rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n");
3988
3989	tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
3990	tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
3991
3992	for (i = 0; i < path; i++) {
3993		rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
3994		rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]);
3995		rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
3996	}
3997
3998	_lck_keep_thermal(rtwdev);
3999}
4000
4001#define RTW8852C_LCK_TH 8
4002
4003void rtw8852c_lck_track(struct rtw89_dev *rtwdev)
4004{
4005	struct rtw89_lck_info *lck = &rtwdev->lck;
4006	u8 cur_thermal;
4007	int delta;
4008	int path;
4009
4010	for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
4011		cur_thermal =
4012			ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
4013		delta = abs((int)cur_thermal - lck->thermal[path]);
4014
4015		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
4016			    "[LCK] path=%d current thermal=0x%x delta=0x%x\n",
4017			    path, cur_thermal, delta);
4018
4019		if (delta >= RTW8852C_LCK_TH) {
4020			_lck(rtwdev);
4021			return;
4022		}
4023	}
4024}
4025
4026void rtw8852c_lck_init(struct rtw89_dev *rtwdev)
4027{
4028	_lck_keep_thermal(rtwdev);
4029}
4030
4031static
4032void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4033			 u8 central_ch, enum rtw89_band band,
4034			 enum rtw89_bandwidth bw)
4035{
4036	_ctrl_ch(rtwdev, phy, central_ch, band);
4037	_ctrl_bw(rtwdev, phy, bw);
4038	_rxbb_bw(rtwdev, phy, bw);
4039}
4040
4041void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
4042			     const struct rtw89_chan *chan,
4043			     enum rtw89_phy_idx phy_idx)
4044{
4045	rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
4046			    chan->band_type,
4047			    chan->band_width);
4048}
4049
4050void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
4051{
4052	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
4053	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
4054	u8 idx = rfk_mcc->table_idx;
4055	int i;
4056
4057	for (i = 0; i < RTW89_IQK_CHS_NR; i++) {
4058		if (rfk_mcc->ch[idx] == 0)
4059			break;
4060		if (++idx >= RTW89_IQK_CHS_NR)
4061			idx = 0;
4062	}
4063
4064	rfk_mcc->table_idx = idx;
4065	rfk_mcc->ch[idx] = chan->channel;
4066	rfk_mcc->band[idx] = chan->band_type;
4067}
4068
4069void rtw8852c_rck(struct rtw89_dev *rtwdev)
4070{
4071	u8 path;
4072
4073	for (path = 0; path < 2; path++)
4074		_rck(rtwdev, path);
4075}
4076
4077void rtw8852c_dack(struct rtw89_dev *rtwdev)
4078{
4079	u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
4080
4081	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
4082	_dac_cal(rtwdev, false);
4083	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
4084}
4085
4086void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
4087{
4088	u32 tx_en;
4089	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
4090
4091	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
4092	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
4093	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
4094
4095	_iqk_init(rtwdev);
4096	_iqk(rtwdev, phy_idx, false);
4097
4098	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
4099	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
4100}
4101
4102#define RXDCK_VER_8852C 0xe
4103
4104static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
4105		    bool is_afe, u8 retry_limit)
4106{
4107	struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
4108	u8 path, kpath;
4109	u32 rf_reg5;
4110	bool is_fail;
4111	u8 rek_cnt;
4112
4113	kpath = _kpath(rtwdev, phy);
4114	rtw89_debug(rtwdev, RTW89_DBG_RFK,
4115		    "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
4116		    RXDCK_VER_8852C, rtwdev->hal.cv);
4117
4118	for (path = 0; path < 2; path++) {
4119		rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
4120		if (!(kpath & BIT(path)))
4121			continue;
4122
4123		if (rtwdev->is_tssi_mode[path])
4124			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
4125					       B_P0_TSSI_TRK_EN, 0x1);
4126		rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
4127		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
4128		rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_LO_SEL, rtwdev->dbcc_en);
4129
4130		for (rek_cnt = 0; rek_cnt < retry_limit; rek_cnt++) {
4131			_set_rx_dck(rtwdev, phy, path, is_afe);
4132
4133			/* To reduce IO of dck_rek_check(), the last try is seen
4134			 * as failure always, and then do recovery procedure.
4135			 */
4136			if (rek_cnt == retry_limit - 1) {
4137				_rx_dck_recover(rtwdev, path);
4138				break;
4139			}
4140
4141			is_fail = _rx_dck_rek_check(rtwdev, path);
4142			if (!is_fail)
4143				break;
4144		}
4145
4146		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] rek_cnt[%d]=%d",
4147			    path, rek_cnt);
4148
4149		rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
4150		rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
4151
4152		if (rtwdev->is_tssi_mode[path])
4153			rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
4154					       B_P0_TSSI_TRK_EN, 0x0);
4155	}
4156}
4157
4158void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
4159{
4160	_rx_dck(rtwdev, phy, is_afe, 1);
4161}
4162
4163#define RTW8852C_RX_DCK_TH 12
4164
4165void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
4166{
4167	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
4168	struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
4169	enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
4170	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
4171	u8 dck_channel;
4172	u8 cur_thermal;
4173	u32 tx_en;
4174	int delta;
4175	int path;
4176
4177	if (chan->band_type == RTW89_BAND_2G)
4178		return;
4179
4180	if (rtwdev->scanning)
4181		return;
4182
4183	for (path = 0; path < RF_PATH_NUM_8852C; path++) {
4184		cur_thermal =
4185			ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
4186		delta = abs((int)cur_thermal - rx_dck->thermal[path]);
4187
4188		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
4189			    "[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
4190			    path, cur_thermal, delta);
4191
4192		if (delta >= RTW8852C_RX_DCK_TH)
4193			goto trigger_rx_dck;
4194	}
4195
4196	return;
4197
4198trigger_rx_dck:
4199	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
4200	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
4201
4202	for (path = 0; path < RF_PATH_NUM_8852C; path++) {
4203		dck_channel = _rx_dck_channel_calc(rtwdev, chan);
4204		_ctrl_ch(rtwdev, RTW89_PHY_0, dck_channel, chan->band_type);
4205	}
4206
4207	_rx_dck(rtwdev, RTW89_PHY_0, false, 20);
4208
4209	for (path = 0; path < RF_PATH_NUM_8852C; path++)
4210		_ctrl_ch(rtwdev, RTW89_PHY_0, chan->channel, chan->band_type);
4211
4212	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
4213	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
4214}
4215
4216void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
4217{
4218	u32 tx_en;
4219	u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
4220
4221	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
4222	rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
4223	_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
4224
4225	rtwdev->dpk.is_dpk_enable = true;
4226	rtwdev->dpk.is_dpk_reload_en = false;
4227	_dpk(rtwdev, phy_idx, false);
4228
4229	rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
4230	rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
4231}
4232
4233void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
4234{
4235	_dpk_track(rtwdev);
4236}
4237
4238void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
4239{
4240	u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
4241
4242	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
4243
4244	if (rtwdev->dbcc_en) {
4245		if (phy == RTW89_PHY_0) {
4246			path = RF_PATH_A;
4247			path_max = RF_PATH_B;
4248		} else if (phy == RTW89_PHY_1) {
4249			path = RF_PATH_B;
4250			path_max = RF_PATH_NUM_8852C;
4251		}
4252	}
4253
4254	_tssi_disable(rtwdev, phy);
4255
4256	for (i = path; i < path_max; i++) {
4257		_tssi_set_sys(rtwdev, phy, i);
4258		_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
4259		_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
4260		_tssi_set_dck(rtwdev, phy, i);
4261		_tssi_set_bbgain_split(rtwdev, phy, i);
4262		_tssi_set_tmeter_tbl(rtwdev, phy, i);
4263		_tssi_slope_cal_org(rtwdev, phy, i);
4264		_tssi_set_aligk_default(rtwdev, phy, i);
4265		_tssi_set_slope(rtwdev, phy, i);
4266		_tssi_run_slope(rtwdev, phy, i);
4267	}
4268
4269	_tssi_enable(rtwdev, phy);
4270	_tssi_set_efuse_to_de(rtwdev, phy);
4271}
4272
4273void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
4274{
4275	u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
4276
4277	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
4278		    __func__, phy);
4279
4280	if (!rtwdev->is_tssi_mode[RF_PATH_A])
4281		return;
4282	if (!rtwdev->is_tssi_mode[RF_PATH_B])
4283		return;
4284
4285	if (rtwdev->dbcc_en) {
4286		if (phy == RTW89_PHY_0) {
4287			path = RF_PATH_A;
4288			path_max = RF_PATH_B;
4289		} else if (phy == RTW89_PHY_1) {
4290			path = RF_PATH_B;
4291			path_max = RF_PATH_NUM_8852C;
4292		}
4293	}
4294
4295	_tssi_disable(rtwdev, phy);
4296
4297	for (i = path; i < path_max; i++) {
4298		_tssi_set_sys(rtwdev, phy, i);
4299		_tssi_set_dck(rtwdev, phy, i);
4300		_tssi_set_tmeter_tbl(rtwdev, phy, i);
4301		_tssi_slope_cal_org(rtwdev, phy, i);
4302		_tssi_set_aligk_default(rtwdev, phy, i);
4303	}
4304
4305	_tssi_enable(rtwdev, phy);
4306	_tssi_set_efuse_to_de(rtwdev, phy);
4307}
4308
4309static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
4310					enum rtw89_phy_idx phy, bool enable)
4311{
4312	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4313	u8 i;
4314
4315	if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
4316		return;
4317
4318	if (enable) {
4319		/* SCAN_START */
4320		if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
4321		    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
4322			for (i = 0; i < 6; i++) {
4323				tssi_info->default_txagc_offset[RF_PATH_A] =
4324					rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
4325							      B_TXAGC_BB);
4326				if (tssi_info->default_txagc_offset[RF_PATH_A])
4327					break;
4328			}
4329		}
4330
4331		if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
4332		    rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
4333			for (i = 0; i < 6; i++) {
4334				tssi_info->default_txagc_offset[RF_PATH_B] =
4335					rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
4336							      B_TXAGC_BB_S1);
4337				if (tssi_info->default_txagc_offset[RF_PATH_B])
4338					break;
4339			}
4340		}
4341	} else {
4342		/* SCAN_END */
4343		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
4344				       tssi_info->default_txagc_offset[RF_PATH_A]);
4345		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
4346				       tssi_info->default_txagc_offset[RF_PATH_B]);
4347
4348		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
4349		rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
4350
4351		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
4352		rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
4353	}
4354}
4355
4356void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev,
4357			       bool scan_start, enum rtw89_phy_idx phy_idx)
4358{
4359	if (scan_start)
4360		rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true);
4361	else
4362		rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false);
4363}
4364