1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2018-2019  Realtek Corporation
3 */
4
5#include <linux/iopoll.h>
6
7#include "main.h"
8#include "coex.h"
9#include "fw.h"
10#include "tx.h"
11#include "reg.h"
12#include "sec.h"
13#include "debug.h"
14#include "util.h"
15#include "wow.h"
16
17static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
18				      struct sk_buff *skb)
19{
20	struct rtw_c2h_cmd *c2h;
21	u8 sub_cmd_id;
22
23	c2h = get_c2h_from_skb(skb);
24	sub_cmd_id = c2h->payload[0];
25
26	switch (sub_cmd_id) {
27	case C2H_CCX_RPT:
28		rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
29		break;
30	default:
31		break;
32	}
33}
34
35static u16 get_max_amsdu_len(u32 bit_rate)
36{
37	/* lower than ofdm, do not aggregate */
38	if (bit_rate < 550)
39		return 1;
40
41	/* lower than 20M 2ss mcs8, make it small */
42	if (bit_rate < 1800)
43		return 1200;
44
45	/* lower than 40M 2ss mcs9, make it medium */
46	if (bit_rate < 4000)
47		return 2600;
48
49	/* not yet 80M 2ss mcs8/9, make it twice regular packet size */
50	if (bit_rate < 7000)
51		return 3500;
52
53	/* unlimited */
54	return 0;
55}
56
57struct rtw_fw_iter_ra_data {
58	struct rtw_dev *rtwdev;
59	u8 *payload;
60};
61
62static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
63{
64	struct rtw_fw_iter_ra_data *ra_data = data;
65	struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
66	u8 mac_id, rate, sgi, bw;
67	u8 mcs, nss;
68	u32 bit_rate;
69
70	mac_id = GET_RA_REPORT_MACID(ra_data->payload);
71	if (si->mac_id != mac_id)
72		return;
73
74	si->ra_report.txrate.flags = 0;
75
76	rate = GET_RA_REPORT_RATE(ra_data->payload);
77	sgi = GET_RA_REPORT_SGI(ra_data->payload);
78	bw = GET_RA_REPORT_BW(ra_data->payload);
79
80	if (rate < DESC_RATEMCS0) {
81		si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
82		goto legacy;
83	}
84
85	rtw_desc_to_mcsrate(rate, &mcs, &nss);
86	if (rate >= DESC_RATEVHT1SS_MCS0)
87		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
88	else if (rate >= DESC_RATEMCS0)
89		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
90
91	if (rate >= DESC_RATEMCS0) {
92		si->ra_report.txrate.mcs = mcs;
93		si->ra_report.txrate.nss = nss;
94	}
95
96	if (sgi)
97		si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
98
99	if (bw == RTW_CHANNEL_WIDTH_80)
100		si->ra_report.txrate.bw = RATE_INFO_BW_80;
101	else if (bw == RTW_CHANNEL_WIDTH_40)
102		si->ra_report.txrate.bw = RATE_INFO_BW_40;
103	else
104		si->ra_report.txrate.bw = RATE_INFO_BW_20;
105
106legacy:
107	bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
108
109	si->ra_report.desc_rate = rate;
110	si->ra_report.bit_rate = bit_rate;
111
112	sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
113}
114
115static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
116				    u8 length)
117{
118	struct rtw_fw_iter_ra_data ra_data;
119
120	if (WARN(length < 7, "invalid ra report c2h length\n"))
121		return;
122
123	rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
124	ra_data.rtwdev = rtwdev;
125	ra_data.payload = payload;
126	rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
127}
128
129void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
130{
131	struct rtw_c2h_cmd *c2h;
132	u32 pkt_offset;
133	u8 len;
134
135	pkt_offset = *((u32 *)skb->cb);
136	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
137	len = skb->len - pkt_offset - 2;
138
139	mutex_lock(&rtwdev->mutex);
140
141	if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
142		goto unlock;
143
144	switch (c2h->id) {
145	case C2H_CCX_TX_RPT:
146		rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
147		break;
148	case C2H_BT_INFO:
149		rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
150		break;
151	case C2H_WLAN_INFO:
152		rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
153		break;
154	case C2H_HALMAC:
155		rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
156		break;
157	case C2H_RA_RPT:
158		rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
159		break;
160	default:
161		rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
162		break;
163	}
164
165unlock:
166	mutex_unlock(&rtwdev->mutex);
167}
168
169void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
170			       struct sk_buff *skb)
171{
172	struct rtw_c2h_cmd *c2h;
173	u8 len;
174
175	c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
176	len = skb->len - pkt_offset - 2;
177	*((u32 *)skb->cb) = pkt_offset;
178
179	rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
180		c2h->id, c2h->seq, len);
181
182	switch (c2h->id) {
183	case C2H_BT_MP_INFO:
184		rtw_coex_info_response(rtwdev, skb);
185		break;
186	default:
187		/* pass offset for further operation */
188		*((u32 *)skb->cb) = pkt_offset;
189		skb_queue_tail(&rtwdev->c2h_queue, skb);
190		ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
191		break;
192	}
193}
194EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
195
196void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
197{
198	if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
199		rtw_fw_recovery(rtwdev);
200	else
201		rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
202}
203EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
204
205static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
206				    u8 *h2c)
207{
208	u8 box;
209	u8 box_state;
210	u32 box_reg, box_ex_reg;
211	int idx;
212	int ret;
213
214	rtw_dbg(rtwdev, RTW_DBG_FW,
215		"send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
216		h2c[3], h2c[2], h2c[1], h2c[0],
217		h2c[7], h2c[6], h2c[5], h2c[4]);
218
219	spin_lock(&rtwdev->h2c.lock);
220
221	box = rtwdev->h2c.last_box_num;
222	switch (box) {
223	case 0:
224		box_reg = REG_HMEBOX0;
225		box_ex_reg = REG_HMEBOX0_EX;
226		break;
227	case 1:
228		box_reg = REG_HMEBOX1;
229		box_ex_reg = REG_HMEBOX1_EX;
230		break;
231	case 2:
232		box_reg = REG_HMEBOX2;
233		box_ex_reg = REG_HMEBOX2_EX;
234		break;
235	case 3:
236		box_reg = REG_HMEBOX3;
237		box_ex_reg = REG_HMEBOX3_EX;
238		break;
239	default:
240		WARN(1, "invalid h2c mail box number\n");
241		goto out;
242	}
243
244	ret = read_poll_timeout_atomic(rtw_read8, box_state,
245				       !((box_state >> box) & 0x1), 100, 3000,
246				       false, rtwdev, REG_HMETFR);
247
248	if (ret) {
249		rtw_err(rtwdev, "failed to send h2c command\n");
250		goto out;
251	}
252
253	for (idx = 0; idx < 4; idx++)
254		rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
255	for (idx = 0; idx < 4; idx++)
256		rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
257
258	if (++rtwdev->h2c.last_box_num >= 4)
259		rtwdev->h2c.last_box_num = 0;
260
261out:
262	spin_unlock(&rtwdev->h2c.lock);
263}
264
265void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
266{
267	rtw_fw_send_h2c_command(rtwdev, h2c);
268}
269
270static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
271{
272	int ret;
273
274	spin_lock(&rtwdev->h2c.lock);
275
276	FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
277	ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
278	if (ret)
279		rtw_err(rtwdev, "failed to send h2c packet\n");
280	rtwdev->h2c.seq++;
281
282	spin_unlock(&rtwdev->h2c.lock);
283}
284
285void
286rtw_fw_send_general_info(struct rtw_dev *rtwdev)
287{
288	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
289	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
290	u16 total_size = H2C_PKT_HDR_SIZE + 4;
291
292	if (rtw_chip_wcpu_11n(rtwdev))
293		return;
294
295	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
296
297	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
298
299	GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
300					fifo->rsvd_fw_txbuf_addr -
301					fifo->rsvd_boundary);
302
303	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
304}
305
306void
307rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
308{
309	struct rtw_hal *hal = &rtwdev->hal;
310	struct rtw_efuse *efuse = &rtwdev->efuse;
311	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
312	u16 total_size = H2C_PKT_HDR_SIZE + 8;
313	u8 fw_rf_type = 0;
314
315	if (rtw_chip_wcpu_11n(rtwdev))
316		return;
317
318	if (hal->rf_type == RF_1T1R)
319		fw_rf_type = FW_RF_1T1R;
320	else if (hal->rf_type == RF_2T2R)
321		fw_rf_type = FW_RF_2T2R;
322
323	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
324
325	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
326	PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
327	PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
328	PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
329	PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
330	PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
331
332	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
333}
334
335void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
336{
337	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
338	u16 total_size = H2C_PKT_HDR_SIZE + 1;
339
340	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
341	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
342	IQK_SET_CLEAR(h2c_pkt, para->clear);
343	IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
344
345	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
346}
347EXPORT_SYMBOL(rtw_fw_do_iqk);
348
349void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
350{
351	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
352
353	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
354
355	SET_QUERY_BT_INFO(h2c_pkt, true);
356
357	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
358}
359
360void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
361{
362	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
363
364	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
365
366	SET_WL_CH_INFO_LINK(h2c_pkt, link);
367	SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
368	SET_WL_CH_INFO_BW(h2c_pkt, bw);
369
370	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
371}
372
373void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
374			     struct rtw_coex_info_req *req)
375{
376	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
377
378	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
379
380	SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
381	SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
382	SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
383	SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
384	SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
385
386	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
387}
388
389void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
390{
391	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
392	u8 index = 0 - bt_pwr_dec_lvl;
393
394	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
395
396	SET_BT_TX_POWER_INDEX(h2c_pkt, index);
397
398	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
399}
400
401void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
402{
403	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
404
405	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
406
407	SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
408
409	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
410}
411
412void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
413			   u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
414{
415	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
416
417	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
418
419	SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
420	SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
421	SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
422	SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
423	SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
424
425	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
426}
427
428void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
429{
430	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
431
432	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
433
434	SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
435
436	SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
437	SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
438	SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
439	SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
440	SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
441
442	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
443}
444
445void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
446{
447	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
448	u8 rssi = ewma_rssi_read(&si->avg_rssi);
449	bool stbc_en = si->stbc_en ? true : false;
450
451	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
452
453	SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
454	SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
455	SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
456
457	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
458}
459
460void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
461{
462	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
463	bool no_update = si->updated;
464	bool disable_pt = true;
465
466	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
467
468	SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
469	SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
470	SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
471	SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
472	SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
473	SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
474	SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
475	SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
476	SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
477	SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
478	SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
479	SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
480	SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
481
482	si->init_ra_lv = 0;
483	si->updated = true;
484
485	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
486}
487
488void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
489{
490	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
491
492	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
493	MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
494	MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
495
496	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
497}
498
499void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
500{
501	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
502	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
503
504	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
505
506	SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
507	SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
508	SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
509	SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
510	SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
511	SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
512
513	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
514}
515
516void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
517{
518	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
519	struct rtw_fw_wow_keep_alive_para mode = {
520		.adopt = true,
521		.pkt_type = KEEP_ALIVE_NULL_PKT,
522		.period = 5,
523	};
524
525	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
526	SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
527	SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
528	SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
529	SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
530
531	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
532}
533
534void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
535{
536	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
537	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
538	struct rtw_fw_wow_disconnect_para mode = {
539		.adopt = true,
540		.period = 30,
541		.retry_count = 5,
542	};
543
544	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
545
546	if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
547		SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
548		SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
549		SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
550		SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
551	}
552
553	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
554}
555
556void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
557{
558	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
559	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
560
561	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
562
563	SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
564	if (rtw_wow_mgd_linked(rtwdev)) {
565		if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
566			SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
567		if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
568			SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
569		if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
570			SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
571		if (rtw_wow->pattern_cnt)
572			SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
573	}
574
575	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
576}
577
578void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
579				     u8 pairwise_key_enc,
580				     u8 group_key_enc)
581{
582	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
583
584	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
585
586	SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
587	SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
588
589	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
590}
591
592void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
593{
594	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
595
596	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
597
598	SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
599
600	if (rtw_wow_no_link(rtwdev))
601		SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
602
603	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
604}
605
606static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
607				     enum rtw_rsvd_packet_type type)
608{
609	struct rtw_rsvd_page *rsvd_pkt;
610	u8 location = 0;
611
612	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
613		if (type == rsvd_pkt->type)
614			location = rsvd_pkt->page;
615	}
616
617	return location;
618}
619
620void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
621{
622	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
623	u8 loc_nlo;
624
625	loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
626
627	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
628
629	SET_NLO_FUN_EN(h2c_pkt, enable);
630	if (enable) {
631		if (rtw_fw_lps_deep_mode)
632			SET_NLO_PS_32K(h2c_pkt, enable);
633		SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
634		SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
635	}
636
637	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
638}
639
640void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
641{
642	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
643	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
644	u8 loc_pg, loc_dpk;
645
646	loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
647	loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
648
649	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
650
651	LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
652	LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
653	LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
654	LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
655
656	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
657}
658
659static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
660					       struct cfg80211_ssid *ssid)
661{
662	struct rtw_rsvd_page *rsvd_pkt;
663	u8 location = 0;
664
665	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
666		if (rsvd_pkt->type != RSVD_PROBE_REQ)
667			continue;
668		if ((!ssid && !rsvd_pkt->ssid) ||
669		    rtw_ssid_equal(rsvd_pkt->ssid, ssid))
670			location = rsvd_pkt->page;
671	}
672
673	return location;
674}
675
676static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
677					    struct cfg80211_ssid *ssid)
678{
679	struct rtw_rsvd_page *rsvd_pkt;
680	u16 size = 0;
681
682	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
683		if (rsvd_pkt->type != RSVD_PROBE_REQ)
684			continue;
685		if ((!ssid && !rsvd_pkt->ssid) ||
686		    rtw_ssid_equal(rsvd_pkt->ssid, ssid))
687			size = rsvd_pkt->probe_req_size;
688	}
689
690	return size;
691}
692
693void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
694{
695	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
696	u8 location = 0;
697
698	SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
699
700	location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
701	*(h2c_pkt + 1) = location;
702	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
703
704	location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
705	*(h2c_pkt + 2) = location;
706	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
707
708	location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
709	*(h2c_pkt + 3) = location;
710	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
711
712	location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
713	*(h2c_pkt + 4) = location;
714	rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
715
716	rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
717}
718
719static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
720{
721	struct rtw_dev *rtwdev = hw->priv;
722	struct rtw_chip_info *chip = rtwdev->chip;
723	struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
724	struct rtw_nlo_info_hdr *nlo_hdr;
725	struct cfg80211_ssid *ssid;
726	struct sk_buff *skb;
727	u8 *pos, loc;
728	u32 size;
729	int i;
730
731	if (!pno_req->inited || !pno_req->match_set_cnt)
732		return NULL;
733
734	size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
735		      IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
736
737	skb = alloc_skb(size, GFP_KERNEL);
738	if (!skb)
739		return NULL;
740
741	skb_reserve(skb, chip->tx_pkt_desc_sz);
742
743	nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
744
745	nlo_hdr->nlo_count = pno_req->match_set_cnt;
746	nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
747
748	/* pattern check for firmware */
749	memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
750
751	for (i = 0; i < pno_req->match_set_cnt; i++)
752		nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
753
754	for (i = 0; i < pno_req->match_set_cnt; i++) {
755		ssid = &pno_req->match_sets[i].ssid;
756		loc  = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
757		if (!loc) {
758			rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
759			kfree_skb(skb);
760			return NULL;
761		}
762		nlo_hdr->location[i] = loc;
763	}
764
765	for (i = 0; i < pno_req->match_set_cnt; i++) {
766		pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
767		memcpy(pos, pno_req->match_sets[i].ssid.ssid,
768		       pno_req->match_sets[i].ssid.ssid_len);
769	}
770
771	return skb;
772}
773
774static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
775{
776	struct rtw_dev *rtwdev = hw->priv;
777	struct rtw_chip_info *chip = rtwdev->chip;
778	struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
779	struct ieee80211_channel *channels = pno_req->channels;
780	struct sk_buff *skb;
781	int count =  pno_req->channel_cnt;
782	u8 *pos;
783	int i = 0;
784
785	skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
786	if (!skb)
787		return NULL;
788
789	skb_reserve(skb, chip->tx_pkt_desc_sz);
790
791	for (i = 0; i < count; i++) {
792		pos = skb_put_zero(skb, 4);
793
794		CHSW_INFO_SET_CH(pos, channels[i].hw_value);
795
796		if (channels[i].flags & IEEE80211_CHAN_RADAR)
797			CHSW_INFO_SET_ACTION_ID(pos, 0);
798		else
799			CHSW_INFO_SET_ACTION_ID(pos, 1);
800		CHSW_INFO_SET_TIMEOUT(pos, 1);
801		CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
802		CHSW_INFO_SET_BW(pos, 0);
803	}
804
805	return skb;
806}
807
808static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
809{
810	struct rtw_dev *rtwdev = hw->priv;
811	struct rtw_chip_info *chip = rtwdev->chip;
812	struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
813	struct rtw_lps_pg_dpk_hdr *dpk_hdr;
814	struct sk_buff *skb;
815	u32 size;
816
817	size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
818	skb = alloc_skb(size, GFP_KERNEL);
819	if (!skb)
820		return NULL;
821
822	skb_reserve(skb, chip->tx_pkt_desc_sz);
823	dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
824	dpk_hdr->dpk_ch = dpk_info->dpk_ch;
825	dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
826	memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
827	memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
828	memcpy(dpk_hdr->coef, dpk_info->coef, 160);
829
830	return skb;
831}
832
833static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
834{
835	struct rtw_dev *rtwdev = hw->priv;
836	struct rtw_chip_info *chip = rtwdev->chip;
837	struct rtw_lps_conf *conf = &rtwdev->lps_conf;
838	struct rtw_lps_pg_info_hdr *pg_info_hdr;
839	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
840	struct sk_buff *skb;
841	u32 size;
842
843	size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
844	skb = alloc_skb(size, GFP_KERNEL);
845	if (!skb)
846		return NULL;
847
848	skb_reserve(skb, chip->tx_pkt_desc_sz);
849	pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
850	pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
851	pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
852	pg_info_hdr->sec_cam_count =
853		rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
854	pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
855
856	conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
857	conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
858
859	return skb;
860}
861
862static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
863					     struct rtw_rsvd_page *rsvd_pkt)
864{
865	struct ieee80211_vif *vif;
866	struct rtw_vif *rtwvif;
867	struct sk_buff *skb_new;
868	struct cfg80211_ssid *ssid;
869
870	if (rsvd_pkt->type == RSVD_DUMMY) {
871		skb_new = alloc_skb(1, GFP_KERNEL);
872		if (!skb_new)
873			return NULL;
874
875		skb_put(skb_new, 1);
876		return skb_new;
877	}
878
879	rtwvif = rsvd_pkt->rtwvif;
880	if (!rtwvif)
881		return NULL;
882
883	vif = rtwvif_to_vif(rtwvif);
884
885	switch (rsvd_pkt->type) {
886	case RSVD_BEACON:
887		skb_new = ieee80211_beacon_get(hw, vif);
888		break;
889	case RSVD_PS_POLL:
890		skb_new = ieee80211_pspoll_get(hw, vif);
891		break;
892	case RSVD_PROBE_RESP:
893		skb_new = ieee80211_proberesp_get(hw, vif);
894		break;
895	case RSVD_NULL:
896		skb_new = ieee80211_nullfunc_get(hw, vif, false);
897		break;
898	case RSVD_QOS_NULL:
899		skb_new = ieee80211_nullfunc_get(hw, vif, true);
900		break;
901	case RSVD_LPS_PG_DPK:
902		skb_new = rtw_lps_pg_dpk_get(hw);
903		break;
904	case RSVD_LPS_PG_INFO:
905		skb_new = rtw_lps_pg_info_get(hw);
906		break;
907	case RSVD_PROBE_REQ:
908		ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
909		if (ssid)
910			skb_new = ieee80211_probereq_get(hw, vif->addr,
911							 ssid->ssid,
912							 ssid->ssid_len, 0);
913		else
914			skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
915		if (skb_new)
916			rsvd_pkt->probe_req_size = (u16)skb_new->len;
917		break;
918	case RSVD_NLO_INFO:
919		skb_new = rtw_nlo_info_get(hw);
920		break;
921	case RSVD_CH_INFO:
922		skb_new = rtw_cs_channel_info_get(hw);
923		break;
924	default:
925		return NULL;
926	}
927
928	if (!skb_new)
929		return NULL;
930
931	return skb_new;
932}
933
934static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
935				    enum rtw_rsvd_packet_type type)
936{
937	struct rtw_tx_pkt_info pkt_info = {0};
938	struct rtw_chip_info *chip = rtwdev->chip;
939	u8 *pkt_desc;
940
941	rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
942	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
943	memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
944	rtw_tx_fill_tx_desc(&pkt_info, skb);
945}
946
947static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
948{
949	return DIV_ROUND_UP(len, page_size);
950}
951
952static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
953				      u8 page_margin, u32 page, u8 *buf,
954				      struct rtw_rsvd_page *rsvd_pkt)
955{
956	struct sk_buff *skb = rsvd_pkt->skb;
957
958	if (page >= 1)
959		memcpy(buf + page_margin + page_size * (page - 1),
960		       skb->data, skb->len);
961	else
962		memcpy(buf, skb->data, skb->len);
963}
964
965static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
966						 enum rtw_rsvd_packet_type type,
967						 bool txdesc)
968{
969	struct rtw_rsvd_page *rsvd_pkt = NULL;
970
971	rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
972
973	if (!rsvd_pkt)
974		return NULL;
975
976	INIT_LIST_HEAD(&rsvd_pkt->vif_list);
977	INIT_LIST_HEAD(&rsvd_pkt->build_list);
978	rsvd_pkt->type = type;
979	rsvd_pkt->add_txdesc = txdesc;
980
981	return rsvd_pkt;
982}
983
984static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
985				 struct rtw_vif *rtwvif,
986				 struct rtw_rsvd_page *rsvd_pkt)
987{
988	lockdep_assert_held(&rtwdev->mutex);
989
990	list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
991}
992
993static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
994			      struct rtw_vif *rtwvif,
995			      enum rtw_rsvd_packet_type type,
996			      bool txdesc)
997{
998	struct rtw_rsvd_page *rsvd_pkt;
999
1000	rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
1001	if (!rsvd_pkt) {
1002		rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
1003		return;
1004	}
1005
1006	rsvd_pkt->rtwvif = rtwvif;
1007	rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1008}
1009
1010static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
1011					struct rtw_vif *rtwvif,
1012					struct cfg80211_ssid *ssid)
1013{
1014	struct rtw_rsvd_page *rsvd_pkt;
1015
1016	rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
1017	if (!rsvd_pkt) {
1018		rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
1019		return;
1020	}
1021
1022	rsvd_pkt->rtwvif = rtwvif;
1023	rsvd_pkt->ssid = ssid;
1024	rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
1025}
1026
1027void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
1028			  struct rtw_vif *rtwvif)
1029{
1030	struct rtw_rsvd_page *rsvd_pkt, *tmp;
1031
1032	lockdep_assert_held(&rtwdev->mutex);
1033
1034	/* remove all of the rsvd pages for vif */
1035	list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
1036				 vif_list) {
1037		list_del(&rsvd_pkt->vif_list);
1038		if (!list_empty(&rsvd_pkt->build_list))
1039			list_del(&rsvd_pkt->build_list);
1040		kfree(rsvd_pkt);
1041	}
1042}
1043
1044void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
1045			   struct rtw_vif *rtwvif)
1046{
1047	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1048
1049	if (vif->type != NL80211_IFTYPE_AP &&
1050	    vif->type != NL80211_IFTYPE_ADHOC &&
1051	    vif->type != NL80211_IFTYPE_MESH_POINT) {
1052		rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
1053			 vif->type);
1054		return;
1055	}
1056
1057	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
1058}
1059
1060void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
1061			   struct rtw_vif *rtwvif)
1062{
1063	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1064	struct rtw_wow_param *rtw_wow = &rtwdev->wow;
1065	struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
1066	struct cfg80211_ssid *ssid;
1067	int i;
1068
1069	if (vif->type != NL80211_IFTYPE_STATION) {
1070		rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
1071			 vif->type);
1072		return;
1073	}
1074
1075	for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
1076		ssid = &rtw_pno_req->match_sets[i].ssid;
1077		rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
1078	}
1079
1080	rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
1081	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
1082	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
1083}
1084
1085void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
1086			   struct rtw_vif *rtwvif)
1087{
1088	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1089
1090	if (vif->type != NL80211_IFTYPE_STATION) {
1091		rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
1092			 vif->type);
1093		return;
1094	}
1095
1096	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
1097	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
1098	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
1099	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
1100	rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
1101}
1102
1103int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
1104				u8 *buf, u32 size)
1105{
1106	u8 bckp[2];
1107	u8 val;
1108	u16 rsvd_pg_head;
1109	u32 bcn_valid_addr;
1110	u32 bcn_valid_mask;
1111	int ret;
1112
1113	lockdep_assert_held(&rtwdev->mutex);
1114
1115	if (!size)
1116		return -EINVAL;
1117
1118	if (rtw_chip_wcpu_11n(rtwdev)) {
1119		rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
1120	} else {
1121		pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
1122		pg_addr |= BIT_BCN_VALID_V1;
1123		rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
1124	}
1125
1126	val = rtw_read8(rtwdev, REG_CR + 1);
1127	bckp[0] = val;
1128	val |= BIT_ENSWBCN >> 8;
1129	rtw_write8(rtwdev, REG_CR + 1, val);
1130
1131	val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
1132	bckp[1] = val;
1133	val &= ~(BIT_EN_BCNQ_DL >> 16);
1134	rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
1135
1136	ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
1137	if (ret) {
1138		rtw_err(rtwdev, "failed to write data to rsvd page\n");
1139		goto restore;
1140	}
1141
1142	if (rtw_chip_wcpu_11n(rtwdev)) {
1143		bcn_valid_addr = REG_DWBCN0_CTRL;
1144		bcn_valid_mask = BIT_BCN_VALID;
1145	} else {
1146		bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
1147		bcn_valid_mask = BIT_BCN_VALID_V1;
1148	}
1149
1150	if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
1151		rtw_err(rtwdev, "error beacon valid\n");
1152		ret = -EBUSY;
1153	}
1154
1155restore:
1156	rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
1157	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
1158		    rsvd_pg_head | BIT_BCN_VALID_V1);
1159	rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
1160	rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
1161
1162	return ret;
1163}
1164
1165static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
1166{
1167	u32 pg_size;
1168	u32 pg_num = 0;
1169	u16 pg_addr = 0;
1170
1171	pg_size = rtwdev->chip->page_size;
1172	pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
1173	if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
1174		return -ENOMEM;
1175
1176	pg_addr = rtwdev->fifo.rsvd_drv_addr;
1177
1178	return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
1179}
1180
1181static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
1182{
1183	struct rtw_rsvd_page *rsvd_pkt, *tmp;
1184
1185	list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
1186				 build_list) {
1187		list_del_init(&rsvd_pkt->build_list);
1188
1189		/* Don't free except for the dummy rsvd page,
1190		 * others will be freed when removing vif
1191		 */
1192		if (rsvd_pkt->type == RSVD_DUMMY)
1193			kfree(rsvd_pkt);
1194	}
1195}
1196
1197static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
1198				     struct ieee80211_vif *vif)
1199{
1200	struct rtw_dev *rtwdev = data;
1201	struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
1202	struct rtw_rsvd_page *rsvd_pkt;
1203
1204	list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
1205		if (rsvd_pkt->type == RSVD_BEACON)
1206			list_add(&rsvd_pkt->build_list,
1207				 &rtwdev->rsvd_page_list);
1208		else
1209			list_add_tail(&rsvd_pkt->build_list,
1210				      &rtwdev->rsvd_page_list);
1211	}
1212}
1213
1214static int  __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
1215{
1216	struct rtw_rsvd_page *rsvd_pkt;
1217
1218	__rtw_build_rsvd_page_reset(rtwdev);
1219
1220	/* gather rsvd page from vifs */
1221	rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
1222
1223	rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1224					    struct rtw_rsvd_page, build_list);
1225	if (!rsvd_pkt) {
1226		WARN(1, "Should not have an empty reserved page\n");
1227		return -EINVAL;
1228	}
1229
1230	/* the first rsvd should be beacon, otherwise add a dummy one */
1231	if (rsvd_pkt->type != RSVD_BEACON) {
1232		struct rtw_rsvd_page *dummy_pkt;
1233
1234		dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
1235		if (!dummy_pkt) {
1236			rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
1237			return -ENOMEM;
1238		}
1239
1240		list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
1241	}
1242
1243	return 0;
1244}
1245
1246static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
1247{
1248	struct ieee80211_hw *hw = rtwdev->hw;
1249	struct rtw_chip_info *chip = rtwdev->chip;
1250	struct sk_buff *iter;
1251	struct rtw_rsvd_page *rsvd_pkt;
1252	u32 page = 0;
1253	u8 total_page = 0;
1254	u8 page_size, page_margin, tx_desc_sz;
1255	u8 *buf;
1256	int ret;
1257
1258	page_size = chip->page_size;
1259	tx_desc_sz = chip->tx_pkt_desc_sz;
1260	page_margin = page_size - tx_desc_sz;
1261
1262	ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
1263	if (ret) {
1264		rtw_err(rtwdev,
1265			"failed to build rsvd page from vifs, ret %d\n", ret);
1266		return NULL;
1267	}
1268
1269	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1270		iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1271		if (!iter) {
1272			rtw_err(rtwdev, "failed to build rsvd packet\n");
1273			goto release_skb;
1274		}
1275
1276		/* Fill the tx_desc for the rsvd pkt that requires one.
1277		 * And iter->len will be added with size of tx_desc_sz.
1278		 */
1279		if (rsvd_pkt->add_txdesc)
1280			rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
1281
1282		rsvd_pkt->skb = iter;
1283		rsvd_pkt->page = total_page;
1284
1285		/* Reserved page is downloaded via TX path, and TX path will
1286		 * generate a tx_desc at the header to describe length of
1287		 * the buffer. If we are not counting page numbers with the
1288		 * size of tx_desc added at the first rsvd_pkt (usually a
1289		 * beacon, firmware default refer to the first page as the
1290		 * content of beacon), we could generate a buffer which size
1291		 * is smaller than the actual size of the whole rsvd_page
1292		 */
1293		if (total_page == 0) {
1294			if (rsvd_pkt->type != RSVD_BEACON &&
1295			    rsvd_pkt->type != RSVD_DUMMY) {
1296				rtw_err(rtwdev, "first page should be a beacon\n");
1297				goto release_skb;
1298			}
1299			total_page += rtw_len_to_page(iter->len + tx_desc_sz,
1300						      page_size);
1301		} else {
1302			total_page += rtw_len_to_page(iter->len, page_size);
1303		}
1304	}
1305
1306	if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
1307		rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
1308		goto release_skb;
1309	}
1310
1311	*size = (total_page - 1) * page_size + page_margin;
1312	buf = kzalloc(*size, GFP_KERNEL);
1313	if (!buf)
1314		goto release_skb;
1315
1316	/* Copy the content of each rsvd_pkt to the buf, and they should
1317	 * be aligned to the pages.
1318	 *
1319	 * Note that the first rsvd_pkt is a beacon no matter what vif->type.
1320	 * And that rsvd_pkt does not require tx_desc because when it goes
1321	 * through TX path, the TX path will generate one for it.
1322	 */
1323	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1324		rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
1325					  page, buf, rsvd_pkt);
1326		if (page == 0)
1327			page += rtw_len_to_page(rsvd_pkt->skb->len +
1328						tx_desc_sz, page_size);
1329		else
1330			page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
1331
1332		kfree_skb(rsvd_pkt->skb);
1333		rsvd_pkt->skb = NULL;
1334	}
1335
1336	return buf;
1337
1338release_skb:
1339	list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
1340		kfree_skb(rsvd_pkt->skb);
1341		rsvd_pkt->skb = NULL;
1342	}
1343
1344	return NULL;
1345}
1346
1347static int rtw_download_beacon(struct rtw_dev *rtwdev)
1348{
1349	struct ieee80211_hw *hw = rtwdev->hw;
1350	struct rtw_rsvd_page *rsvd_pkt;
1351	struct sk_buff *skb;
1352	int ret = 0;
1353
1354	rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
1355					    struct rtw_rsvd_page, build_list);
1356	if (!rsvd_pkt) {
1357		rtw_err(rtwdev, "failed to get rsvd page from build list\n");
1358		return -ENOENT;
1359	}
1360
1361	if (rsvd_pkt->type != RSVD_BEACON &&
1362	    rsvd_pkt->type != RSVD_DUMMY) {
1363		rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
1364			rsvd_pkt->type);
1365		return -EINVAL;
1366	}
1367
1368	skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
1369	if (!skb) {
1370		rtw_err(rtwdev, "failed to get beacon skb\n");
1371		return -ENOMEM;
1372	}
1373
1374	ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
1375	if (ret)
1376		rtw_err(rtwdev, "failed to download drv rsvd page\n");
1377
1378	dev_kfree_skb(skb);
1379
1380	return ret;
1381}
1382
1383int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
1384{
1385	u8 *buf;
1386	u32 size;
1387	int ret;
1388
1389	buf = rtw_build_rsvd_page(rtwdev, &size);
1390	if (!buf) {
1391		rtw_err(rtwdev, "failed to build rsvd page pkt\n");
1392		return -ENOMEM;
1393	}
1394
1395	ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
1396	if (ret) {
1397		rtw_err(rtwdev, "failed to download drv rsvd page\n");
1398		goto free;
1399	}
1400
1401	/* The last thing is to download the *ONLY* beacon again, because
1402	 * the previous tx_desc is to describe the total rsvd page. Download
1403	 * the beacon again to replace the TX desc header, and we will get
1404	 * a correct tx_desc for the beacon in the rsvd page.
1405	 */
1406	ret = rtw_download_beacon(rtwdev);
1407	if (ret) {
1408		rtw_err(rtwdev, "failed to download beacon\n");
1409		goto free;
1410	}
1411
1412free:
1413	kfree(buf);
1414
1415	return ret;
1416}
1417
1418static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
1419				  u32 *buf, u32 residue, u16 start_pg)
1420{
1421	u32 i;
1422	u16 idx = 0;
1423	u16 ctl;
1424
1425	ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
1426	/* disable rx clock gate */
1427	rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
1428
1429	do {
1430		rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
1431
1432		for (i = FIFO_DUMP_ADDR + residue;
1433		     i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
1434			buf[idx++] = rtw_read32(rtwdev, i);
1435			size -= 4;
1436			if (size == 0)
1437				goto out;
1438		}
1439
1440		residue = 0;
1441		start_pg++;
1442	} while (size);
1443
1444out:
1445	rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
1446	/* restore rx clock gate */
1447	rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
1448}
1449
1450static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
1451			     u32 offset, u32 size, u32 *buf)
1452{
1453	struct rtw_chip_info *chip = rtwdev->chip;
1454	u32 start_pg, residue;
1455
1456	if (sel >= RTW_FW_FIFO_MAX) {
1457		rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
1458		return;
1459	}
1460	if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
1461		offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
1462	residue = offset & (FIFO_PAGE_SIZE - 1);
1463	start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
1464
1465	rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
1466}
1467
1468static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
1469				   enum rtw_fw_fifo_sel sel,
1470				   u32 start_addr, u32 size)
1471{
1472	switch (sel) {
1473	case RTW_FW_FIFO_SEL_TX:
1474	case RTW_FW_FIFO_SEL_RX:
1475		if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
1476			return false;
1477		/*fall through*/
1478	default:
1479		return true;
1480	}
1481}
1482
1483int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
1484		     u32 *buffer)
1485{
1486	if (!rtwdev->chip->fw_fifo_addr[0]) {
1487		rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
1488		return -ENOTSUPP;
1489	}
1490
1491	if (size == 0 || !buffer)
1492		return -EINVAL;
1493
1494	if (size & 0x3) {
1495		rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
1496		return -EINVAL;
1497	}
1498
1499	if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
1500		rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
1501		return -EINVAL;
1502	}
1503
1504	rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
1505
1506	return 0;
1507}
1508
1509static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
1510				u8 location)
1511{
1512	struct rtw_chip_info *chip = rtwdev->chip;
1513	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1514	u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
1515
1516	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
1517
1518	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1519	UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
1520	UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
1521
1522	/* include txdesc size */
1523	size += chip->tx_pkt_desc_sz;
1524	UPDATE_PKT_SET_SIZE(h2c_pkt, size);
1525
1526	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1527}
1528
1529void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
1530				 struct cfg80211_ssid *ssid)
1531{
1532	u8 loc;
1533	u16 size;
1534
1535	loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
1536	if (!loc) {
1537		rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
1538		return;
1539	}
1540
1541	size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
1542	if (!size) {
1543		rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
1544		return;
1545	}
1546
1547	__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
1548}
1549
1550void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
1551{
1552	struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
1553	u8 h2c_pkt[H2C_PKT_SIZE] = {0};
1554	u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
1555	u8 loc_ch_info;
1556	const struct rtw_ch_switch_option cs_option = {
1557		.dest_ch_en = 1,
1558		.dest_ch = 1,
1559		.periodic_option = 2,
1560		.normal_period = 5,
1561		.normal_period_sel = 0,
1562		.normal_cycle = 10,
1563		.slow_period = 1,
1564		.slow_period_sel = 1,
1565	};
1566
1567	rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
1568	SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
1569
1570	CH_SWITCH_SET_START(h2c_pkt, enable);
1571	CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
1572	CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
1573	CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
1574	CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
1575	CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
1576	CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
1577	CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
1578	CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
1579
1580	CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
1581	CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
1582
1583	loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
1584	CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
1585
1586	rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
1587}
1588