1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2019-2020  Realtek Corporation
3 */
4
5#include "cam.h"
6#include "chan.h"
7#include "coex.h"
8#include "debug.h"
9#include "fw.h"
10#include "mac.h"
11#include "phy.h"
12#include "ps.h"
13#include "reg.h"
14#include "util.h"
15
16static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
17				    struct sk_buff *skb);
18static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
19				 struct rtw89_wait_info *wait, unsigned int cond);
20
21static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
22					      bool header)
23{
24	struct sk_buff *skb;
25	u32 header_len = 0;
26	u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
27
28	if (header)
29		header_len = H2C_HEADER_LEN;
30
31	skb = dev_alloc_skb(len + header_len + h2c_desc_size);
32	if (!skb)
33		return NULL;
34	skb_reserve(skb, header_len + h2c_desc_size);
35	memset(skb->data, 0, len);
36
37	return skb;
38}
39
40struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
41{
42	return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
43}
44
45struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
46{
47	return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
48}
49
50static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
51{
52	u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
53
54	return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
55}
56
57#define FWDL_WAIT_CNT 400000
58int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
59{
60	u8 val;
61	int ret;
62
63	ret = read_poll_timeout_atomic(_fw_get_rdy, val,
64				       val == RTW89_FWDL_WCPU_FW_INIT_RDY,
65				       1, FWDL_WAIT_CNT, false, rtwdev);
66	if (ret) {
67		switch (val) {
68		case RTW89_FWDL_CHECKSUM_FAIL:
69			rtw89_err(rtwdev, "fw checksum fail\n");
70			return -EINVAL;
71
72		case RTW89_FWDL_SECURITY_FAIL:
73			rtw89_err(rtwdev, "fw security fail\n");
74			return -EINVAL;
75
76		case RTW89_FWDL_CV_NOT_MATCH:
77			rtw89_err(rtwdev, "fw cv not match\n");
78			return -EINVAL;
79
80		default:
81			return -EBUSY;
82		}
83	}
84
85	set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
86
87	return 0;
88}
89
90static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
91				  struct rtw89_fw_bin_info *info)
92{
93	const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
94	struct rtw89_fw_hdr_section_info *section_info;
95	const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
96	const struct rtw89_fw_hdr_section *section;
97	const u8 *fw_end = fw + len;
98	const u8 *bin;
99	u32 base_hdr_len;
100	u32 mssc_len = 0;
101	u32 i;
102
103	if (!info)
104		return -EINVAL;
105
106	info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
107	base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
108	info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
109
110	if (info->dynamic_hdr_en) {
111		info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
112		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
113		fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
114		if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
115			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
116			return -EINVAL;
117		}
118	} else {
119		info->hdr_len = base_hdr_len;
120		info->dynamic_hdr_len = 0;
121	}
122
123	bin = fw + info->hdr_len;
124
125	/* jump to section header */
126	section_info = info->section_info;
127	for (i = 0; i < info->section_num; i++) {
128		section = &fw_hdr->sections[i];
129		section_info->type =
130			le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
131		if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
132			section_info->mssc =
133				le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
134			mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
135		} else {
136			section_info->mssc = 0;
137		}
138
139		section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
140		if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
141			section_info->len += FWDL_SECTION_CHKSUM_LEN;
142		section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
143		section_info->dladdr =
144			le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
145		section_info->addr = bin;
146		bin += section_info->len;
147		section_info++;
148	}
149
150	if (fw_end != bin + mssc_len) {
151		rtw89_err(rtwdev, "[ERR]fw bin size\n");
152		return -EINVAL;
153	}
154
155	return 0;
156}
157
158static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
159				  struct rtw89_fw_bin_info *info)
160{
161	const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
162	struct rtw89_fw_hdr_section_info *section_info;
163	const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
164	const struct rtw89_fw_hdr_section_v1 *section;
165	const u8 *fw_end = fw + len;
166	const u8 *bin;
167	u32 base_hdr_len;
168	u32 mssc_len = 0;
169	u32 i;
170
171	info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
172	base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
173	info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
174
175	if (info->dynamic_hdr_en) {
176		info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
177		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
178		fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
179		if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
180			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
181			return -EINVAL;
182		}
183	} else {
184		info->hdr_len = base_hdr_len;
185		info->dynamic_hdr_len = 0;
186	}
187
188	bin = fw + info->hdr_len;
189
190	/* jump to section header */
191	section_info = info->section_info;
192	for (i = 0; i < info->section_num; i++) {
193		section = &fw_hdr->sections[i];
194		section_info->type =
195			le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
196		if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
197			section_info->mssc =
198				le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
199			mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
200		} else {
201			section_info->mssc = 0;
202		}
203
204		section_info->len =
205			le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
206		if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
207			section_info->len += FWDL_SECTION_CHKSUM_LEN;
208		section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
209		section_info->dladdr =
210			le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
211		section_info->addr = bin;
212		bin += section_info->len;
213		section_info++;
214	}
215
216	if (fw_end != bin + mssc_len) {
217		rtw89_err(rtwdev, "[ERR]fw bin size\n");
218		return -EINVAL;
219	}
220
221	return 0;
222}
223
224static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
225			       const struct rtw89_fw_suit *fw_suit,
226			       struct rtw89_fw_bin_info *info)
227{
228	const u8 *fw = fw_suit->data;
229	u32 len = fw_suit->size;
230
231	if (!fw || !len) {
232		rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
233		return -ENOENT;
234	}
235
236	switch (fw_suit->hdr_ver) {
237	case 0:
238		return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
239	case 1:
240		return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
241	default:
242		return -ENOENT;
243	}
244}
245
246static
247int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
248			struct rtw89_fw_suit *fw_suit, bool nowarn)
249{
250	struct rtw89_fw_info *fw_info = &rtwdev->fw;
251	const struct firmware *firmware = fw_info->req.firmware;
252	const u8 *mfw = firmware->data;
253	u32 mfw_len = firmware->size;
254	const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
255	const struct rtw89_mfw_info *mfw_info;
256	int i;
257
258	if (mfw_hdr->sig != RTW89_MFW_SIG) {
259		rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
260		/* legacy firmware support normal type only */
261		if (type != RTW89_FW_NORMAL)
262			return -EINVAL;
263		fw_suit->data = mfw;
264		fw_suit->size = mfw_len;
265		return 0;
266	}
267
268	for (i = 0; i < mfw_hdr->fw_nr; i++) {
269		mfw_info = &mfw_hdr->info[i];
270		if (mfw_info->type == type) {
271			if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp)
272				goto found;
273			if (type == RTW89_FW_LOGFMT)
274				goto found;
275		}
276	}
277
278	if (!nowarn)
279		rtw89_err(rtwdev, "no suitable firmware found\n");
280	return -ENOENT;
281
282found:
283	fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
284	fw_suit->size = le32_to_cpu(mfw_info->size);
285	return 0;
286}
287
288static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
289{
290	struct rtw89_fw_info *fw_info = &rtwdev->fw;
291	const struct firmware *firmware = fw_info->req.firmware;
292	const struct rtw89_mfw_hdr *mfw_hdr =
293		(const struct rtw89_mfw_hdr *)firmware->data;
294	const struct rtw89_mfw_info *mfw_info;
295	u32 size;
296
297	if (mfw_hdr->sig != RTW89_MFW_SIG) {
298		rtw89_warn(rtwdev, "not mfw format\n");
299		return 0;
300	}
301
302	mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
303	size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
304
305	return size;
306}
307
308static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
309				   struct rtw89_fw_suit *fw_suit,
310				   const struct rtw89_fw_hdr *hdr)
311{
312	fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
313	fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
314	fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
315	fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
316	fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
317	fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
318	fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
319	fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
320	fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
321	fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
322	fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
323}
324
325static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
326				   struct rtw89_fw_suit *fw_suit,
327				   const struct rtw89_fw_hdr_v1 *hdr)
328{
329	fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
330	fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
331	fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
332	fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
333	fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
334	fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
335	fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
336	fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
337	fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
338	fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
339	fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
340}
341
342static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
343			       enum rtw89_fw_type type,
344			       struct rtw89_fw_suit *fw_suit)
345{
346	const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
347	const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
348
349	if (type == RTW89_FW_LOGFMT)
350		return 0;
351
352	fw_suit->type = type;
353	fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
354
355	switch (fw_suit->hdr_ver) {
356	case 0:
357		rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
358		break;
359	case 1:
360		rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
361		break;
362	default:
363		rtw89_err(rtwdev, "Unknown firmware header version %u\n",
364			  fw_suit->hdr_ver);
365		return -ENOENT;
366	}
367
368	rtw89_info(rtwdev,
369		   "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
370		   fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
371		   fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
372
373	return 0;
374}
375
376static
377int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
378			 bool nowarn)
379{
380	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
381	int ret;
382
383	ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
384	if (ret)
385		return ret;
386
387	return rtw89_fw_update_ver(rtwdev, type, fw_suit);
388}
389
390static
391int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
392				  const struct rtw89_fw_element_hdr *elm,
393				  const void *data)
394{
395	enum rtw89_fw_type type = (enum rtw89_fw_type)data;
396	struct rtw89_fw_suit *fw_suit;
397
398	fw_suit = rtw89_fw_suit_get(rtwdev, type);
399	fw_suit->data = elm->u.common.contents;
400	fw_suit->size = le32_to_cpu(elm->size);
401
402	return rtw89_fw_update_ver(rtwdev, type, fw_suit);
403}
404
405#define __DEF_FW_FEAT_COND(__cond, __op) \
406static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
407{ \
408	return suit_ver_code __op comp_ver_code; \
409}
410
411__DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
412__DEF_FW_FEAT_COND(le, <=); /* less or equal */
413__DEF_FW_FEAT_COND(lt, <); /* less than */
414
415struct __fw_feat_cfg {
416	enum rtw89_core_chip_id chip_id;
417	enum rtw89_fw_feature feature;
418	u32 ver_code;
419	bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
420};
421
422#define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
423	{ \
424		.chip_id = _chip, \
425		.feature = RTW89_FW_FEATURE_ ## _feat, \
426		.ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
427		.cond = __fw_feat_cond_ ## _cond, \
428	}
429
430static const struct __fw_feat_cfg fw_feat_tbl[] = {
431	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
432	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
433	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
434	__CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
435	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
436	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
437	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
438	__CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
439	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
440	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
441	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
442	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
443	__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
444	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
445	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
446	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
447	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
448};
449
450static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
451					 const struct rtw89_chip_info *chip,
452					 u32 ver_code)
453{
454	int i;
455
456	for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
457		const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
458
459		if (chip->chip_id != ent->chip_id)
460			continue;
461
462		if (ent->cond(ver_code, ent->ver_code))
463			RTW89_SET_FW_FEATURE(ent->feature, fw);
464	}
465}
466
467static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
468{
469	const struct rtw89_chip_info *chip = rtwdev->chip;
470	const struct rtw89_fw_suit *fw_suit;
471	u32 suit_ver_code;
472
473	fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
474	suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
475
476	rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
477}
478
479const struct firmware *
480rtw89_early_fw_feature_recognize(struct device *device,
481				 const struct rtw89_chip_info *chip,
482				 struct rtw89_fw_info *early_fw,
483				 int *used_fw_format)
484{
485	const struct firmware *firmware;
486	char fw_name[64];
487	int fw_format;
488	u32 ver_code;
489	int ret;
490
491	for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
492		rtw89_fw_get_filename(fw_name, sizeof(fw_name),
493				      chip->fw_basename, fw_format);
494
495		ret = request_firmware(&firmware, fw_name, device);
496		if (!ret) {
497			dev_info(device, "loaded firmware %s\n", fw_name);
498			*used_fw_format = fw_format;
499			break;
500		}
501	}
502
503	if (ret) {
504		dev_err(device, "failed to early request firmware: %d\n", ret);
505		return NULL;
506	}
507
508	ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
509
510	if (!ver_code)
511		goto out;
512
513	rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
514
515out:
516	return firmware;
517}
518
519int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
520{
521	const struct rtw89_chip_info *chip = rtwdev->chip;
522	int ret;
523
524	if (chip->try_ce_fw) {
525		ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
526		if (!ret)
527			goto normal_done;
528	}
529
530	ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
531	if (ret)
532		return ret;
533
534normal_done:
535	/* It still works if wowlan firmware isn't existing. */
536	__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
537
538	/* It still works if log format file isn't existing. */
539	__rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
540
541	rtw89_fw_recognize_features(rtwdev);
542
543	rtw89_coex_recognize_ver(rtwdev);
544
545	return 0;
546}
547
548static
549int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
550				 const struct rtw89_fw_element_hdr *elm,
551				 const void *data)
552{
553	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
554	struct rtw89_phy_table *tbl;
555	struct rtw89_reg2_def *regs;
556	enum rtw89_rf_path rf_path;
557	u32 n_regs, i;
558	u8 idx;
559
560	tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
561	if (!tbl)
562		return -ENOMEM;
563
564	switch (le32_to_cpu(elm->id)) {
565	case RTW89_FW_ELEMENT_ID_BB_REG:
566		elm_info->bb_tbl = tbl;
567		break;
568	case RTW89_FW_ELEMENT_ID_BB_GAIN:
569		elm_info->bb_gain = tbl;
570		break;
571	case RTW89_FW_ELEMENT_ID_RADIO_A:
572	case RTW89_FW_ELEMENT_ID_RADIO_B:
573	case RTW89_FW_ELEMENT_ID_RADIO_C:
574	case RTW89_FW_ELEMENT_ID_RADIO_D:
575		rf_path = (enum rtw89_rf_path)data;
576		idx = elm->u.reg2.idx;
577
578		elm_info->rf_radio[idx] = tbl;
579		tbl->rf_path = rf_path;
580		tbl->config = rtw89_phy_config_rf_reg_v1;
581		break;
582	case RTW89_FW_ELEMENT_ID_RF_NCTL:
583		elm_info->rf_nctl = tbl;
584		break;
585	default:
586		kfree(tbl);
587		return -ENOENT;
588	}
589
590	n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
591	regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
592	if (!regs)
593		goto out;
594
595	for (i = 0; i < n_regs; i++) {
596		regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
597		regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
598	}
599
600	tbl->n_regs = n_regs;
601	tbl->regs = regs;
602
603	return 0;
604
605out:
606	kfree(tbl);
607	return -ENOMEM;
608}
609
610struct rtw89_fw_element_handler {
611	int (*fn)(struct rtw89_dev *rtwdev,
612		  const struct rtw89_fw_element_hdr *elm, const void *data);
613	const void *data;
614	const char *name;
615};
616
617static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
618	[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
619					(const void *)RTW89_FW_BBMCU0, NULL},
620	[RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
621					(const void *)RTW89_FW_BBMCU1, NULL},
622	[RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, NULL, "BB"},
623	[RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, NULL, NULL},
624	[RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
625					 (const void *)RF_PATH_A, "radio A"},
626	[RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
627					 (const void *)RF_PATH_B, NULL},
628	[RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
629					 (const void *)RF_PATH_C, NULL},
630	[RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
631					 (const void *)RF_PATH_D, NULL},
632	[RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, NULL, "NCTL"},
633};
634
635int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
636{
637	struct rtw89_fw_info *fw_info = &rtwdev->fw;
638	const struct firmware *firmware = fw_info->req.firmware;
639	const struct rtw89_chip_info *chip = rtwdev->chip;
640	u32 unrecognized_elements = chip->needed_fw_elms;
641	const struct rtw89_fw_element_handler *handler;
642	const struct rtw89_fw_element_hdr *hdr;
643	u32 elm_size;
644	u32 elem_id;
645	u32 offset;
646	int ret;
647
648	BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
649
650	offset = rtw89_mfw_get_size(rtwdev);
651	offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
652	if (offset == 0)
653		return -EINVAL;
654
655	while (offset + sizeof(*hdr) < firmware->size) {
656		hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
657
658		elm_size = le32_to_cpu(hdr->size);
659		if (offset + elm_size >= firmware->size) {
660			rtw89_warn(rtwdev, "firmware element size exceeds\n");
661			break;
662		}
663
664		elem_id = le32_to_cpu(hdr->id);
665		if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
666			goto next;
667
668		handler = &__fw_element_handlers[elem_id];
669		if (!handler->fn)
670			goto next;
671
672		ret = handler->fn(rtwdev, hdr, handler->data);
673		if (ret)
674			return ret;
675
676		if (handler->name)
677			rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
678				   handler->name, hdr->ver);
679
680		unrecognized_elements &= ~BIT(elem_id);
681next:
682		offset += sizeof(*hdr) + elm_size;
683		offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
684	}
685
686	if (unrecognized_elements) {
687		rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
688			  unrecognized_elements);
689		return -ENOENT;
690	}
691
692	return 0;
693}
694
695void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
696			   u8 type, u8 cat, u8 class, u8 func,
697			   bool rack, bool dack, u32 len)
698{
699	struct fwcmd_hdr *hdr;
700
701	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
702
703	if (!(rtwdev->fw.h2c_seq % 4))
704		rack = true;
705	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
706				FIELD_PREP(H2C_HDR_CAT, cat) |
707				FIELD_PREP(H2C_HDR_CLASS, class) |
708				FIELD_PREP(H2C_HDR_FUNC, func) |
709				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
710
711	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
712					   len + H2C_HEADER_LEN) |
713				(rack ? H2C_HDR_REC_ACK : 0) |
714				(dack ? H2C_HDR_DONE_ACK : 0));
715
716	rtwdev->fw.h2c_seq++;
717}
718
719static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
720				       struct sk_buff *skb,
721				       u8 type, u8 cat, u8 class, u8 func,
722				       u32 len)
723{
724	struct fwcmd_hdr *hdr;
725
726	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
727
728	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
729				FIELD_PREP(H2C_HDR_CAT, cat) |
730				FIELD_PREP(H2C_HDR_CLASS, class) |
731				FIELD_PREP(H2C_HDR_FUNC, func) |
732				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
733
734	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
735					   len + H2C_HEADER_LEN));
736}
737
738static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
739{
740	struct sk_buff *skb;
741	u32 ret = 0;
742
743	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
744	if (!skb) {
745		rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
746		return -ENOMEM;
747	}
748
749	skb_put_data(skb, fw, len);
750	SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
751	rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
752				   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
753				   H2C_FUNC_MAC_FWHDR_DL, len);
754
755	ret = rtw89_h2c_tx(rtwdev, skb, false);
756	if (ret) {
757		rtw89_err(rtwdev, "failed to send h2c\n");
758		ret = -1;
759		goto fail;
760	}
761
762	return 0;
763fail:
764	dev_kfree_skb_any(skb);
765
766	return ret;
767}
768
769static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
770{
771	u8 val;
772	int ret;
773
774	ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
775	if (ret) {
776		rtw89_err(rtwdev, "[ERR]FW header download\n");
777		return ret;
778	}
779
780	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
781				       1, FWDL_WAIT_CNT, false,
782				       rtwdev, R_AX_WCPU_FW_CTRL);
783	if (ret) {
784		rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
785		return ret;
786	}
787
788	rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
789	rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
790
791	return 0;
792}
793
794static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
795				    struct rtw89_fw_hdr_section_info *info)
796{
797	struct sk_buff *skb;
798	const u8 *section = info->addr;
799	u32 residue_len = info->len;
800	u32 pkt_len;
801	int ret;
802
803	while (residue_len) {
804		if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
805			pkt_len = FWDL_SECTION_PER_PKT_LEN;
806		else
807			pkt_len = residue_len;
808
809		skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
810		if (!skb) {
811			rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
812			return -ENOMEM;
813		}
814		skb_put_data(skb, section, pkt_len);
815
816		ret = rtw89_h2c_tx(rtwdev, skb, true);
817		if (ret) {
818			rtw89_err(rtwdev, "failed to send h2c\n");
819			ret = -1;
820			goto fail;
821		}
822
823		section += pkt_len;
824		residue_len -= pkt_len;
825	}
826
827	return 0;
828fail:
829	dev_kfree_skb_any(skb);
830
831	return ret;
832}
833
834static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
835				  struct rtw89_fw_bin_info *info)
836{
837	struct rtw89_fw_hdr_section_info *section_info = info->section_info;
838	u8 section_num = info->section_num;
839	int ret;
840
841	while (section_num--) {
842		ret = __rtw89_fw_download_main(rtwdev, section_info);
843		if (ret)
844			return ret;
845		section_info++;
846	}
847
848	mdelay(5);
849
850	ret = rtw89_fw_check_rdy(rtwdev);
851	if (ret) {
852		rtw89_warn(rtwdev, "download firmware fail\n");
853		return ret;
854	}
855
856	return 0;
857}
858
859static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
860{
861	u32 val32;
862	u16 index;
863
864	rtw89_write32(rtwdev, R_AX_DBG_CTRL,
865		      FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
866		      FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
867	rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
868
869	for (index = 0; index < 15; index++) {
870		val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
871		rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
872		fsleep(10);
873	}
874}
875
876static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
877{
878	u32 val32;
879	u16 val16;
880
881	val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
882	rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
883
884	val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
885	rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
886
887	rtw89_fw_prog_cnt_dump(rtwdev);
888}
889
890int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
891{
892	struct rtw89_fw_info *fw_info = &rtwdev->fw;
893	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
894	struct rtw89_fw_bin_info info;
895	u8 val;
896	int ret;
897
898	rtw89_mac_disable_cpu(rtwdev);
899	ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
900	if (ret)
901		return ret;
902
903	ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
904	if (ret) {
905		rtw89_err(rtwdev, "parse fw header fail\n");
906		goto fwdl_err;
907	}
908
909	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
910				       1, FWDL_WAIT_CNT, false,
911				       rtwdev, R_AX_WCPU_FW_CTRL);
912	if (ret) {
913		rtw89_err(rtwdev, "[ERR]H2C path ready\n");
914		goto fwdl_err;
915	}
916
917	ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
918							   info.dynamic_hdr_len);
919	if (ret) {
920		ret = -EBUSY;
921		goto fwdl_err;
922	}
923
924	ret = rtw89_fw_download_main(rtwdev, fw_suit->data, &info);
925	if (ret) {
926		ret = -EBUSY;
927		goto fwdl_err;
928	}
929
930	fw_info->h2c_seq = 0;
931	fw_info->rec_seq = 0;
932	fw_info->h2c_counter = 0;
933	fw_info->c2h_counter = 0;
934	rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
935	rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
936
937	return ret;
938
939fwdl_err:
940	rtw89_fw_dl_fail_dump(rtwdev);
941	return ret;
942}
943
944int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
945{
946	struct rtw89_fw_info *fw = &rtwdev->fw;
947
948	wait_for_completion(&fw->req.completion);
949	if (!fw->req.firmware)
950		return -EINVAL;
951
952	return 0;
953}
954
955static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
956				   struct rtw89_fw_req_info *req,
957				   const char *fw_name, bool nowarn)
958{
959	int ret;
960
961	if (req->firmware) {
962		rtw89_debug(rtwdev, RTW89_DBG_FW,
963			    "full firmware has been early requested\n");
964		complete_all(&req->completion);
965		return 0;
966	}
967
968	if (nowarn)
969		ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
970	else
971		ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
972
973	complete_all(&req->completion);
974
975	return ret;
976}
977
978void rtw89_load_firmware_work(struct work_struct *work)
979{
980	struct rtw89_dev *rtwdev =
981		container_of(work, struct rtw89_dev, load_firmware_work);
982	const struct rtw89_chip_info *chip = rtwdev->chip;
983	char fw_name[64];
984
985	rtw89_fw_get_filename(fw_name, sizeof(fw_name),
986			      chip->fw_basename, rtwdev->fw.fw_format);
987
988	rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
989}
990
991static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
992{
993	if (!tbl)
994		return;
995
996	kfree(tbl->regs);
997	kfree(tbl);
998}
999
1000static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
1001{
1002	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1003	int i;
1004
1005	rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1006	rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1007	for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1008		rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1009	rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1010}
1011
1012void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1013{
1014	struct rtw89_fw_info *fw = &rtwdev->fw;
1015
1016	cancel_work_sync(&rtwdev->load_firmware_work);
1017
1018	if (fw->req.firmware) {
1019		release_firmware(fw->req.firmware);
1020
1021		/* assign NULL back in case rtw89_free_ieee80211_hw()
1022		 * try to release the same one again.
1023		 */
1024		fw->req.firmware = NULL;
1025	}
1026
1027	kfree(fw->log.fmts);
1028	rtw89_unload_firmware_elements(rtwdev);
1029}
1030
1031static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1032{
1033	struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1034	u32 i;
1035
1036	if (fmt_id > fw_log->last_fmt_id)
1037		return 0;
1038
1039	for (i = 0; i < fw_log->fmt_count; i++) {
1040		if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1041			return i;
1042	}
1043	return 0;
1044}
1045
1046static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1047{
1048	struct rtw89_fw_log *log = &rtwdev->fw.log;
1049	const struct rtw89_fw_logsuit_hdr *suit_hdr;
1050	struct rtw89_fw_suit *suit = &log->suit;
1051	const void *fmts_ptr, *fmts_end_ptr;
1052	u32 fmt_count;
1053	int i;
1054
1055	suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1056	fmt_count = le32_to_cpu(suit_hdr->count);
1057	log->fmt_ids = suit_hdr->ids;
1058	fmts_ptr = &suit_hdr->ids[fmt_count];
1059	fmts_end_ptr = suit->data + suit->size;
1060	log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1061	if (!log->fmts)
1062		return -ENOMEM;
1063
1064	for (i = 0; i < fmt_count; i++) {
1065		fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1066		if (!fmts_ptr)
1067			break;
1068
1069		(*log->fmts)[i] = fmts_ptr;
1070		log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1071		log->fmt_count++;
1072		fmts_ptr += strlen(fmts_ptr);
1073	}
1074
1075	return 0;
1076}
1077
1078int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1079{
1080	struct rtw89_fw_log *log = &rtwdev->fw.log;
1081	struct rtw89_fw_suit *suit = &log->suit;
1082
1083	if (!suit || !suit->data) {
1084		rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
1085		return -EINVAL;
1086	}
1087	if (log->fmts)
1088		return 0;
1089
1090	return rtw89_fw_log_create_fmts_dict(rtwdev);
1091}
1092
1093static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
1094				   const struct rtw89_fw_c2h_log_fmt *log_fmt,
1095				   u32 fmt_idx, u8 para_int, bool raw_data)
1096{
1097	const char *(*fmts)[] = rtwdev->fw.log.fmts;
1098	char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
1099	u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
1100	int i;
1101
1102	if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
1103		rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
1104			   log_fmt->argc);
1105		return;
1106	}
1107
1108	if (para_int)
1109		for (i = 0 ; i < log_fmt->argc; i++)
1110			args[i] = le32_to_cpu(log_fmt->u.argv[i]);
1111
1112	if (raw_data) {
1113		if (para_int)
1114			snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1115				 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
1116				 para_int, log_fmt->argc, (int)sizeof(args), args);
1117		else
1118			snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1119				 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
1120				 para_int, log_fmt->argc, log_fmt->u.raw);
1121	} else {
1122		snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
1123			 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
1124			 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
1125			 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
1126			 args[0xf]);
1127	}
1128
1129	rtw89_info(rtwdev, "C2H log: %s", str_buf);
1130}
1131
1132void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
1133{
1134	const struct rtw89_fw_c2h_log_fmt *log_fmt;
1135	u8 para_int;
1136	u32 fmt_idx;
1137
1138	if (len < RTW89_C2H_HEADER_LEN) {
1139		rtw89_err(rtwdev, "c2h log length is wrong!\n");
1140		return;
1141	}
1142
1143	buf += RTW89_C2H_HEADER_LEN;
1144	len -= RTW89_C2H_HEADER_LEN;
1145	log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
1146
1147	if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
1148		goto plain_log;
1149
1150	if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
1151		goto plain_log;
1152
1153	if (!rtwdev->fw.log.fmts)
1154		return;
1155
1156	para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
1157	fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
1158
1159	if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
1160		rtw89_info(rtwdev, "C2H log: %s%s",
1161			   (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
1162	else if (fmt_idx != 0 && para_int)
1163		rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
1164	else
1165		rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
1166	return;
1167
1168plain_log:
1169	rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
1170
1171}
1172
1173#define H2C_CAM_LEN 60
1174int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1175		     struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
1176{
1177	struct sk_buff *skb;
1178	int ret;
1179
1180	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
1181	if (!skb) {
1182		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1183		return -ENOMEM;
1184	}
1185	skb_put(skb, H2C_CAM_LEN);
1186	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
1187	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
1188
1189	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1190			      H2C_CAT_MAC,
1191			      H2C_CL_MAC_ADDR_CAM_UPDATE,
1192			      H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
1193			      H2C_CAM_LEN);
1194
1195	ret = rtw89_h2c_tx(rtwdev, skb, false);
1196	if (ret) {
1197		rtw89_err(rtwdev, "failed to send h2c\n");
1198		goto fail;
1199	}
1200
1201	return 0;
1202fail:
1203	dev_kfree_skb_any(skb);
1204
1205	return ret;
1206}
1207
1208#define H2C_DCTL_SEC_CAM_LEN 68
1209int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
1210				 struct rtw89_vif *rtwvif,
1211				 struct rtw89_sta *rtwsta)
1212{
1213	struct sk_buff *skb;
1214	int ret;
1215
1216	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
1217	if (!skb) {
1218		rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
1219		return -ENOMEM;
1220	}
1221	skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
1222
1223	rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
1224
1225	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1226			      H2C_CAT_MAC,
1227			      H2C_CL_MAC_FR_EXCHG,
1228			      H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
1229			      H2C_DCTL_SEC_CAM_LEN);
1230
1231	ret = rtw89_h2c_tx(rtwdev, skb, false);
1232	if (ret) {
1233		rtw89_err(rtwdev, "failed to send h2c\n");
1234		goto fail;
1235	}
1236
1237	return 0;
1238fail:
1239	dev_kfree_skb_any(skb);
1240
1241	return ret;
1242}
1243EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
1244
1245#define H2C_BA_CAM_LEN 8
1246int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
1247			bool valid, struct ieee80211_ampdu_params *params)
1248{
1249	const struct rtw89_chip_info *chip = rtwdev->chip;
1250	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
1251	u8 macid = rtwsta->mac_id;
1252	struct sk_buff *skb;
1253	u8 entry_idx;
1254	int ret;
1255
1256	ret = valid ?
1257	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
1258	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
1259	if (ret) {
1260		/* it still works even if we don't have static BA CAM, because
1261		 * hardware can create dynamic BA CAM automatically.
1262		 */
1263		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
1264			    "failed to %s entry tid=%d for h2c ba cam\n",
1265			    valid ? "alloc" : "free", params->tid);
1266		return 0;
1267	}
1268
1269	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
1270	if (!skb) {
1271		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
1272		return -ENOMEM;
1273	}
1274	skb_put(skb, H2C_BA_CAM_LEN);
1275	SET_BA_CAM_MACID(skb->data, macid);
1276	if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
1277		SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
1278	else
1279		SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
1280	if (!valid)
1281		goto end;
1282	SET_BA_CAM_VALID(skb->data, valid);
1283	SET_BA_CAM_TID(skb->data, params->tid);
1284	if (params->buf_size > 64)
1285		SET_BA_CAM_BMAP_SIZE(skb->data, 4);
1286	else
1287		SET_BA_CAM_BMAP_SIZE(skb->data, 0);
1288	/* If init req is set, hw will set the ssn */
1289	SET_BA_CAM_INIT_REQ(skb->data, 1);
1290	SET_BA_CAM_SSN(skb->data, params->ssn);
1291
1292	if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
1293		SET_BA_CAM_STD_EN(skb->data, 1);
1294		SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
1295	}
1296
1297end:
1298	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1299			      H2C_CAT_MAC,
1300			      H2C_CL_BA_CAM,
1301			      H2C_FUNC_MAC_BA_CAM, 0, 1,
1302			      H2C_BA_CAM_LEN);
1303
1304	ret = rtw89_h2c_tx(rtwdev, skb, false);
1305	if (ret) {
1306		rtw89_err(rtwdev, "failed to send h2c\n");
1307		goto fail;
1308	}
1309
1310	return 0;
1311fail:
1312	dev_kfree_skb_any(skb);
1313
1314	return ret;
1315}
1316
1317static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
1318					   u8 entry_idx, u8 uid)
1319{
1320	struct sk_buff *skb;
1321	int ret;
1322
1323	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
1324	if (!skb) {
1325		rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
1326		return -ENOMEM;
1327	}
1328	skb_put(skb, H2C_BA_CAM_LEN);
1329
1330	SET_BA_CAM_VALID(skb->data, 1);
1331	SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
1332	SET_BA_CAM_UID(skb->data, uid);
1333	SET_BA_CAM_BAND(skb->data, 0);
1334	SET_BA_CAM_STD_EN(skb->data, 0);
1335
1336	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1337			      H2C_CAT_MAC,
1338			      H2C_CL_BA_CAM,
1339			      H2C_FUNC_MAC_BA_CAM, 0, 1,
1340			      H2C_BA_CAM_LEN);
1341
1342	ret = rtw89_h2c_tx(rtwdev, skb, false);
1343	if (ret) {
1344		rtw89_err(rtwdev, "failed to send h2c\n");
1345		goto fail;
1346	}
1347
1348	return 0;
1349fail:
1350	dev_kfree_skb_any(skb);
1351
1352	return ret;
1353}
1354
1355void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
1356{
1357	const struct rtw89_chip_info *chip = rtwdev->chip;
1358	u8 entry_idx = chip->bacam_num;
1359	u8 uid = 0;
1360	int i;
1361
1362	for (i = 0; i < chip->bacam_dynamic_num; i++) {
1363		rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
1364		entry_idx++;
1365		uid++;
1366	}
1367}
1368
1369#define H2C_LOG_CFG_LEN 12
1370int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
1371{
1372	struct sk_buff *skb;
1373	u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
1374			    BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
1375	int ret;
1376
1377	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
1378	if (!skb) {
1379		rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
1380		return -ENOMEM;
1381	}
1382
1383	skb_put(skb, H2C_LOG_CFG_LEN);
1384	SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
1385	SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
1386	SET_LOG_CFG_COMP(skb->data, comp);
1387	SET_LOG_CFG_COMP_EXT(skb->data, 0);
1388
1389	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1390			      H2C_CAT_MAC,
1391			      H2C_CL_FW_INFO,
1392			      H2C_FUNC_LOG_CFG, 0, 0,
1393			      H2C_LOG_CFG_LEN);
1394
1395	ret = rtw89_h2c_tx(rtwdev, skb, false);
1396	if (ret) {
1397		rtw89_err(rtwdev, "failed to send h2c\n");
1398		goto fail;
1399	}
1400
1401	return 0;
1402fail:
1403	dev_kfree_skb_any(skb);
1404
1405	return ret;
1406}
1407
1408static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
1409					struct rtw89_vif *rtwvif,
1410					enum rtw89_fw_pkt_ofld_type type,
1411					u8 *id)
1412{
1413	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1414	struct rtw89_pktofld_info *info;
1415	struct sk_buff *skb;
1416	int ret;
1417
1418	info = kzalloc(sizeof(*info), GFP_KERNEL);
1419	if (!info)
1420		return -ENOMEM;
1421
1422	switch (type) {
1423	case RTW89_PKT_OFLD_TYPE_PS_POLL:
1424		skb = ieee80211_pspoll_get(rtwdev->hw, vif);
1425		break;
1426	case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
1427		skb = ieee80211_proberesp_get(rtwdev->hw, vif);
1428		break;
1429	case RTW89_PKT_OFLD_TYPE_NULL_DATA:
1430		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
1431		break;
1432	case RTW89_PKT_OFLD_TYPE_QOS_NULL:
1433		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
1434		break;
1435	default:
1436		goto err;
1437	}
1438
1439	if (!skb)
1440		goto err;
1441
1442	ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
1443	kfree_skb(skb);
1444
1445	if (ret)
1446		goto err;
1447
1448	list_add_tail(&info->list, &rtwvif->general_pkt_list);
1449	*id = info->id;
1450	return 0;
1451
1452err:
1453	kfree(info);
1454	return -ENOMEM;
1455}
1456
1457void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
1458					   struct rtw89_vif *rtwvif, bool notify_fw)
1459{
1460	struct list_head *pkt_list = &rtwvif->general_pkt_list;
1461	struct rtw89_pktofld_info *info, *tmp;
1462
1463	list_for_each_entry_safe(info, tmp, pkt_list, list) {
1464		if (notify_fw)
1465			rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
1466		else
1467			rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
1468		list_del(&info->list);
1469		kfree(info);
1470	}
1471}
1472
1473void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
1474{
1475	struct rtw89_vif *rtwvif;
1476
1477	rtw89_for_each_rtwvif(rtwdev, rtwvif)
1478		rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
1479}
1480
1481#define H2C_GENERAL_PKT_LEN 6
1482#define H2C_GENERAL_PKT_ID_UND 0xff
1483int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
1484			     struct rtw89_vif *rtwvif, u8 macid)
1485{
1486	u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
1487	u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
1488	u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
1489	struct sk_buff *skb;
1490	int ret;
1491
1492	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1493				     RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
1494	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1495				     RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
1496	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1497				     RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
1498
1499	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
1500	if (!skb) {
1501		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1502		return -ENOMEM;
1503	}
1504	skb_put(skb, H2C_GENERAL_PKT_LEN);
1505	SET_GENERAL_PKT_MACID(skb->data, macid);
1506	SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1507	SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
1508	SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
1509	SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
1510	SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1511
1512	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1513			      H2C_CAT_MAC,
1514			      H2C_CL_FW_INFO,
1515			      H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
1516			      H2C_GENERAL_PKT_LEN);
1517
1518	ret = rtw89_h2c_tx(rtwdev, skb, false);
1519	if (ret) {
1520		rtw89_err(rtwdev, "failed to send h2c\n");
1521		goto fail;
1522	}
1523
1524	return 0;
1525fail:
1526	dev_kfree_skb_any(skb);
1527
1528	return ret;
1529}
1530
1531#define H2C_LPS_PARM_LEN 8
1532int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
1533			  struct rtw89_lps_parm *lps_param)
1534{
1535	struct sk_buff *skb;
1536	int ret;
1537
1538	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
1539	if (!skb) {
1540		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1541		return -ENOMEM;
1542	}
1543	skb_put(skb, H2C_LPS_PARM_LEN);
1544
1545	SET_LPS_PARM_MACID(skb->data, lps_param->macid);
1546	SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
1547	SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
1548	SET_LPS_PARM_RLBM(skb->data, 1);
1549	SET_LPS_PARM_SMARTPS(skb->data, 1);
1550	SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
1551	SET_LPS_PARM_VOUAPSD(skb->data, 0);
1552	SET_LPS_PARM_VIUAPSD(skb->data, 0);
1553	SET_LPS_PARM_BEUAPSD(skb->data, 0);
1554	SET_LPS_PARM_BKUAPSD(skb->data, 0);
1555
1556	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1557			      H2C_CAT_MAC,
1558			      H2C_CL_MAC_PS,
1559			      H2C_FUNC_MAC_LPS_PARM, 0, 1,
1560			      H2C_LPS_PARM_LEN);
1561
1562	ret = rtw89_h2c_tx(rtwdev, skb, false);
1563	if (ret) {
1564		rtw89_err(rtwdev, "failed to send h2c\n");
1565		goto fail;
1566	}
1567
1568	return 0;
1569fail:
1570	dev_kfree_skb_any(skb);
1571
1572	return ret;
1573}
1574
1575#define H2C_P2P_ACT_LEN 20
1576int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
1577			 struct ieee80211_p2p_noa_desc *desc,
1578			 u8 act, u8 noa_id)
1579{
1580	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1581	bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
1582	u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
1583	struct sk_buff *skb;
1584	u8 *cmd;
1585	int ret;
1586
1587	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
1588	if (!skb) {
1589		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
1590		return -ENOMEM;
1591	}
1592	skb_put(skb, H2C_P2P_ACT_LEN);
1593	cmd = skb->data;
1594
1595	RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
1596	RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
1597	RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
1598	RTW89_SET_FWCMD_P2P_ACT(cmd, act);
1599	RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
1600	RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
1601	if (desc) {
1602		RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
1603		RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
1604		RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
1605		RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
1606		RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
1607	}
1608
1609	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1610			      H2C_CAT_MAC, H2C_CL_MAC_PS,
1611			      H2C_FUNC_P2P_ACT, 0, 0,
1612			      H2C_P2P_ACT_LEN);
1613
1614	ret = rtw89_h2c_tx(rtwdev, skb, false);
1615	if (ret) {
1616		rtw89_err(rtwdev, "failed to send h2c\n");
1617		goto fail;
1618	}
1619
1620	return 0;
1621fail:
1622	dev_kfree_skb_any(skb);
1623
1624	return ret;
1625}
1626
1627static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
1628				       struct sk_buff *skb)
1629{
1630	const struct rtw89_chip_info *chip = rtwdev->chip;
1631	struct rtw89_hal *hal = &rtwdev->hal;
1632	u8 ntx_path;
1633	u8 map_b;
1634
1635	if (chip->rf_path_num == 1) {
1636		ntx_path = RF_A;
1637		map_b = 0;
1638	} else {
1639		ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
1640		map_b = hal->antenna_tx == RF_AB ? 1 : 0;
1641	}
1642
1643	SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
1644	SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
1645	SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
1646	SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
1647	SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
1648}
1649
1650#define H2C_CMC_TBL_LEN 68
1651int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
1652				  struct rtw89_vif *rtwvif)
1653{
1654	const struct rtw89_chip_info *chip = rtwdev->chip;
1655	struct sk_buff *skb;
1656	u8 macid = rtwvif->mac_id;
1657	int ret;
1658
1659	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1660	if (!skb) {
1661		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1662		return -ENOMEM;
1663	}
1664	skb_put(skb, H2C_CMC_TBL_LEN);
1665	SET_CTRL_INFO_MACID(skb->data, macid);
1666	SET_CTRL_INFO_OPERATION(skb->data, 1);
1667	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1668		SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
1669		__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1670		SET_CMC_TBL_ANTSEL_A(skb->data, 0);
1671		SET_CMC_TBL_ANTSEL_B(skb->data, 0);
1672		SET_CMC_TBL_ANTSEL_C(skb->data, 0);
1673		SET_CMC_TBL_ANTSEL_D(skb->data, 0);
1674	}
1675	SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
1676	SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
1677	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1678		SET_CMC_TBL_DATA_DCM(skb->data, 0);
1679
1680	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1681			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1682			      chip->h2c_cctl_func_id, 0, 1,
1683			      H2C_CMC_TBL_LEN);
1684
1685	ret = rtw89_h2c_tx(rtwdev, skb, false);
1686	if (ret) {
1687		rtw89_err(rtwdev, "failed to send h2c\n");
1688		goto fail;
1689	}
1690
1691	return 0;
1692fail:
1693	dev_kfree_skb_any(skb);
1694
1695	return ret;
1696}
1697
1698static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
1699				     struct ieee80211_sta *sta, u8 *pads)
1700{
1701	bool ppe_th;
1702	u8 ppe16, ppe8;
1703	u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
1704	u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
1705	u8 ru_bitmap;
1706	u8 n, idx, sh;
1707	u16 ppe;
1708	int i;
1709
1710	if (!sta->deflink.he_cap.has_he)
1711		return;
1712
1713	ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
1714			   sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
1715	if (!ppe_th) {
1716		u8 pad;
1717
1718		pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
1719				sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
1720
1721		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
1722			pads[i] = pad;
1723
1724		return;
1725	}
1726
1727	ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
1728	n = hweight8(ru_bitmap);
1729	n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
1730
1731	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
1732		if (!(ru_bitmap & BIT(i))) {
1733			pads[i] = 1;
1734			continue;
1735		}
1736
1737		idx = n >> 3;
1738		sh = n & 7;
1739		n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
1740
1741		ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
1742		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1743		sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
1744		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1745
1746		if (ppe16 != 7 && ppe8 == 7)
1747			pads[i] = 2;
1748		else if (ppe8 != 7)
1749			pads[i] = 1;
1750		else
1751			pads[i] = 0;
1752	}
1753}
1754
1755int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
1756				struct ieee80211_vif *vif,
1757				struct ieee80211_sta *sta)
1758{
1759	const struct rtw89_chip_info *chip = rtwdev->chip;
1760	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
1761	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1762	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
1763						       rtwvif->sub_entity_idx);
1764	struct sk_buff *skb;
1765	u8 pads[RTW89_PPE_BW_NUM];
1766	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1767	u16 lowest_rate;
1768	int ret;
1769
1770	memset(pads, 0, sizeof(pads));
1771	if (sta)
1772		__get_sta_he_pkt_padding(rtwdev, sta, pads);
1773
1774	if (vif->p2p)
1775		lowest_rate = RTW89_HW_RATE_OFDM6;
1776	else if (chan->band_type == RTW89_BAND_2G)
1777		lowest_rate = RTW89_HW_RATE_CCK1;
1778	else
1779		lowest_rate = RTW89_HW_RATE_OFDM6;
1780
1781	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1782	if (!skb) {
1783		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1784		return -ENOMEM;
1785	}
1786	skb_put(skb, H2C_CMC_TBL_LEN);
1787	SET_CTRL_INFO_MACID(skb->data, mac_id);
1788	SET_CTRL_INFO_OPERATION(skb->data, 1);
1789	SET_CMC_TBL_DISRTSFB(skb->data, 1);
1790	SET_CMC_TBL_DISDATAFB(skb->data, 1);
1791	SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
1792	SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
1793	SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
1794	if (vif->type == NL80211_IFTYPE_STATION)
1795		SET_CMC_TBL_ULDL(skb->data, 1);
1796	else
1797		SET_CMC_TBL_ULDL(skb->data, 0);
1798	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
1799	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
1800		SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1801		SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1802		SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1803		SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1804	} else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1805		SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1806		SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1807		SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1808		SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1809	}
1810	if (sta)
1811		SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
1812						  sta->deflink.he_cap.has_he);
1813	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1814		SET_CMC_TBL_DATA_DCM(skb->data, 0);
1815
1816	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1817			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1818			      chip->h2c_cctl_func_id, 0, 1,
1819			      H2C_CMC_TBL_LEN);
1820
1821	ret = rtw89_h2c_tx(rtwdev, skb, false);
1822	if (ret) {
1823		rtw89_err(rtwdev, "failed to send h2c\n");
1824		goto fail;
1825	}
1826
1827	return 0;
1828fail:
1829	dev_kfree_skb_any(skb);
1830
1831	return ret;
1832}
1833
1834int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
1835				 struct rtw89_sta *rtwsta)
1836{
1837	const struct rtw89_chip_info *chip = rtwdev->chip;
1838	struct sk_buff *skb;
1839	int ret;
1840
1841	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1842	if (!skb) {
1843		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1844		return -ENOMEM;
1845	}
1846	skb_put(skb, H2C_CMC_TBL_LEN);
1847	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1848	SET_CTRL_INFO_OPERATION(skb->data, 1);
1849	if (rtwsta->cctl_tx_time) {
1850		SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
1851		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
1852	}
1853	if (rtwsta->cctl_tx_retry_limit) {
1854		SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
1855		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
1856	}
1857
1858	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1859			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1860			      chip->h2c_cctl_func_id, 0, 1,
1861			      H2C_CMC_TBL_LEN);
1862
1863	ret = rtw89_h2c_tx(rtwdev, skb, false);
1864	if (ret) {
1865		rtw89_err(rtwdev, "failed to send h2c\n");
1866		goto fail;
1867	}
1868
1869	return 0;
1870fail:
1871	dev_kfree_skb_any(skb);
1872
1873	return ret;
1874}
1875
1876int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
1877				 struct rtw89_sta *rtwsta)
1878{
1879	const struct rtw89_chip_info *chip = rtwdev->chip;
1880	struct sk_buff *skb;
1881	int ret;
1882
1883	if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
1884		return 0;
1885
1886	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1887	if (!skb) {
1888		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1889		return -ENOMEM;
1890	}
1891	skb_put(skb, H2C_CMC_TBL_LEN);
1892	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1893	SET_CTRL_INFO_OPERATION(skb->data, 1);
1894
1895	__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1896
1897	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1898			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1899			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
1900			      H2C_CMC_TBL_LEN);
1901
1902	ret = rtw89_h2c_tx(rtwdev, skb, false);
1903	if (ret) {
1904		rtw89_err(rtwdev, "failed to send h2c\n");
1905		goto fail;
1906	}
1907
1908	return 0;
1909fail:
1910	dev_kfree_skb_any(skb);
1911
1912	return ret;
1913}
1914
1915#define H2C_BCN_BASE_LEN 12
1916int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
1917			       struct rtw89_vif *rtwvif)
1918{
1919	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1920	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
1921						       rtwvif->sub_entity_idx);
1922	struct sk_buff *skb;
1923	struct sk_buff *skb_beacon;
1924	u16 tim_offset;
1925	int bcn_total_len;
1926	u16 beacon_rate;
1927	void *noa_data;
1928	u8 noa_len;
1929	int ret;
1930
1931	if (vif->p2p)
1932		beacon_rate = RTW89_HW_RATE_OFDM6;
1933	else if (chan->band_type == RTW89_BAND_2G)
1934		beacon_rate = RTW89_HW_RATE_CCK1;
1935	else
1936		beacon_rate = RTW89_HW_RATE_OFDM6;
1937
1938	skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
1939					      NULL, 0);
1940	if (!skb_beacon) {
1941		rtw89_err(rtwdev, "failed to get beacon skb\n");
1942		return -ENOMEM;
1943	}
1944
1945	noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
1946	if (noa_len &&
1947	    (noa_len <= skb_tailroom(skb_beacon) ||
1948	     pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
1949		skb_put_data(skb_beacon, noa_data, noa_len);
1950	}
1951
1952	bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
1953	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
1954	if (!skb) {
1955		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1956		dev_kfree_skb_any(skb_beacon);
1957		return -ENOMEM;
1958	}
1959	skb_put(skb, H2C_BCN_BASE_LEN);
1960
1961	SET_BCN_UPD_PORT(skb->data, rtwvif->port);
1962	SET_BCN_UPD_MBSSID(skb->data, 0);
1963	SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
1964	SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
1965	SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
1966	SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
1967	SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
1968	SET_BCN_UPD_RATE(skb->data, beacon_rate);
1969
1970	skb_put_data(skb, skb_beacon->data, skb_beacon->len);
1971	dev_kfree_skb_any(skb_beacon);
1972
1973	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1974			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1975			      H2C_FUNC_MAC_BCN_UPD, 0, 1,
1976			      bcn_total_len);
1977
1978	ret = rtw89_h2c_tx(rtwdev, skb, false);
1979	if (ret) {
1980		rtw89_err(rtwdev, "failed to send h2c\n");
1981		dev_kfree_skb_any(skb);
1982		return ret;
1983	}
1984
1985	return 0;
1986}
1987
1988#define H2C_ROLE_MAINTAIN_LEN 4
1989int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
1990			       struct rtw89_vif *rtwvif,
1991			       struct rtw89_sta *rtwsta,
1992			       enum rtw89_upd_mode upd_mode)
1993{
1994	struct sk_buff *skb;
1995	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1996	u8 self_role;
1997	int ret;
1998
1999	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
2000		if (rtwsta)
2001			self_role = RTW89_SELF_ROLE_AP_CLIENT;
2002		else
2003			self_role = rtwvif->self_role;
2004	} else {
2005		self_role = rtwvif->self_role;
2006	}
2007
2008	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
2009	if (!skb) {
2010		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2011		return -ENOMEM;
2012	}
2013	skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
2014	SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
2015	SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
2016	SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
2017	SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
2018
2019	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2020			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2021			      H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
2022			      H2C_ROLE_MAINTAIN_LEN);
2023
2024	ret = rtw89_h2c_tx(rtwdev, skb, false);
2025	if (ret) {
2026		rtw89_err(rtwdev, "failed to send h2c\n");
2027		goto fail;
2028	}
2029
2030	return 0;
2031fail:
2032	dev_kfree_skb_any(skb);
2033
2034	return ret;
2035}
2036
2037#define H2C_JOIN_INFO_LEN 4
2038int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2039			   struct rtw89_sta *rtwsta, bool dis_conn)
2040{
2041	struct sk_buff *skb;
2042	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2043	u8 self_role = rtwvif->self_role;
2044	u8 net_type = rtwvif->net_type;
2045	int ret;
2046
2047	if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
2048		self_role = RTW89_SELF_ROLE_AP_CLIENT;
2049		net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
2050	}
2051
2052	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
2053	if (!skb) {
2054		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2055		return -ENOMEM;
2056	}
2057	skb_put(skb, H2C_JOIN_INFO_LEN);
2058	SET_JOININFO_MACID(skb->data, mac_id);
2059	SET_JOININFO_OP(skb->data, dis_conn);
2060	SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
2061	SET_JOININFO_WMM(skb->data, rtwvif->wmm);
2062	SET_JOININFO_TGR(skb->data, rtwvif->trigger);
2063	SET_JOININFO_ISHESTA(skb->data, 0);
2064	SET_JOININFO_DLBW(skb->data, 0);
2065	SET_JOININFO_TF_MAC_PAD(skb->data, 0);
2066	SET_JOININFO_DL_T_PE(skb->data, 0);
2067	SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
2068	SET_JOININFO_NET_TYPE(skb->data, net_type);
2069	SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
2070	SET_JOININFO_SELF_ROLE(skb->data, self_role);
2071
2072	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2073			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2074			      H2C_FUNC_MAC_JOININFO, 0, 1,
2075			      H2C_JOIN_INFO_LEN);
2076
2077	ret = rtw89_h2c_tx(rtwdev, skb, false);
2078	if (ret) {
2079		rtw89_err(rtwdev, "failed to send h2c\n");
2080		goto fail;
2081	}
2082
2083	return 0;
2084fail:
2085	dev_kfree_skb_any(skb);
2086
2087	return ret;
2088}
2089
2090int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
2091			     bool pause)
2092{
2093	struct rtw89_fw_macid_pause_grp h2c = {{0}};
2094	u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
2095	struct sk_buff *skb;
2096	int ret;
2097
2098	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
2099	if (!skb) {
2100		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2101		return -ENOMEM;
2102	}
2103	h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
2104	if (pause)
2105		h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
2106	skb_put_data(skb, &h2c, len);
2107
2108	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2109			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2110			      H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
2111			      len);
2112
2113	ret = rtw89_h2c_tx(rtwdev, skb, false);
2114	if (ret) {
2115		rtw89_err(rtwdev, "failed to send h2c\n");
2116		goto fail;
2117	}
2118
2119	return 0;
2120fail:
2121	dev_kfree_skb_any(skb);
2122
2123	return ret;
2124}
2125
2126#define H2C_EDCA_LEN 12
2127int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2128			  u8 ac, u32 val)
2129{
2130	struct sk_buff *skb;
2131	int ret;
2132
2133	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
2134	if (!skb) {
2135		rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
2136		return -ENOMEM;
2137	}
2138	skb_put(skb, H2C_EDCA_LEN);
2139	RTW89_SET_EDCA_SEL(skb->data, 0);
2140	RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
2141	RTW89_SET_EDCA_WMM(skb->data, 0);
2142	RTW89_SET_EDCA_AC(skb->data, ac);
2143	RTW89_SET_EDCA_PARAM(skb->data, val);
2144
2145	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2146			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2147			      H2C_FUNC_USR_EDCA, 0, 1,
2148			      H2C_EDCA_LEN);
2149
2150	ret = rtw89_h2c_tx(rtwdev, skb, false);
2151	if (ret) {
2152		rtw89_err(rtwdev, "failed to send h2c\n");
2153		goto fail;
2154	}
2155
2156	return 0;
2157fail:
2158	dev_kfree_skb_any(skb);
2159
2160	return ret;
2161}
2162
2163#define H2C_TSF32_TOGL_LEN 4
2164int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2165			      bool en)
2166{
2167	struct sk_buff *skb;
2168	u16 early_us = en ? 2000 : 0;
2169	u8 *cmd;
2170	int ret;
2171
2172	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
2173	if (!skb) {
2174		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
2175		return -ENOMEM;
2176	}
2177	skb_put(skb, H2C_TSF32_TOGL_LEN);
2178	cmd = skb->data;
2179
2180	RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
2181	RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
2182	RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
2183	RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
2184
2185	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2186			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2187			      H2C_FUNC_TSF32_TOGL, 0, 0,
2188			      H2C_TSF32_TOGL_LEN);
2189
2190	ret = rtw89_h2c_tx(rtwdev, skb, false);
2191	if (ret) {
2192		rtw89_err(rtwdev, "failed to send h2c\n");
2193		goto fail;
2194	}
2195
2196	return 0;
2197fail:
2198	dev_kfree_skb_any(skb);
2199
2200	return ret;
2201}
2202
2203#define H2C_OFLD_CFG_LEN 8
2204int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
2205{
2206	static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
2207	struct sk_buff *skb;
2208	int ret;
2209
2210	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
2211	if (!skb) {
2212		rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
2213		return -ENOMEM;
2214	}
2215	skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
2216
2217	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2218			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2219			      H2C_FUNC_OFLD_CFG, 0, 1,
2220			      H2C_OFLD_CFG_LEN);
2221
2222	ret = rtw89_h2c_tx(rtwdev, skb, false);
2223	if (ret) {
2224		rtw89_err(rtwdev, "failed to send h2c\n");
2225		goto fail;
2226	}
2227
2228	return 0;
2229fail:
2230	dev_kfree_skb_any(skb);
2231
2232	return ret;
2233}
2234
2235int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
2236				  struct ieee80211_vif *vif,
2237				  bool connect)
2238{
2239	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
2240	struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
2241	struct rtw89_h2c_bcnfltr *h2c;
2242	u32 len = sizeof(*h2c);
2243	struct sk_buff *skb;
2244	int ret;
2245
2246	if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
2247		return -EINVAL;
2248
2249	if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA)
2250		return -EINVAL;
2251
2252	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2253	if (!skb) {
2254		rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
2255		return -ENOMEM;
2256	}
2257
2258	skb_put(skb, len);
2259	h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
2260
2261	h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
2262		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
2263		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
2264		  le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
2265				   RTW89_H2C_BCNFLTR_W0_MODE) |
2266		  le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
2267		  le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
2268		  le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI,
2269				   RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
2270		  le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
2271
2272	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2273			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2274			      H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
2275
2276	ret = rtw89_h2c_tx(rtwdev, skb, false);
2277	if (ret) {
2278		rtw89_err(rtwdev, "failed to send h2c\n");
2279		goto fail;
2280	}
2281
2282	return 0;
2283fail:
2284	dev_kfree_skb_any(skb);
2285
2286	return ret;
2287}
2288
2289int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
2290			      struct rtw89_rx_phy_ppdu *phy_ppdu)
2291{
2292	struct rtw89_h2c_ofld_rssi *h2c;
2293	u32 len = sizeof(*h2c);
2294	struct sk_buff *skb;
2295	s8 rssi;
2296	int ret;
2297
2298	if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
2299		return -EINVAL;
2300
2301	if (!phy_ppdu)
2302		return -EINVAL;
2303
2304	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2305	if (!skb) {
2306		rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
2307		return -ENOMEM;
2308	}
2309
2310	rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
2311	skb_put(skb, len);
2312	h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
2313
2314	h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
2315		  le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
2316	h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
2317
2318	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2319			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2320			      H2C_FUNC_OFLD_RSSI, 0, 1, len);
2321
2322	ret = rtw89_h2c_tx(rtwdev, skb, false);
2323	if (ret) {
2324		rtw89_err(rtwdev, "failed to send h2c\n");
2325		goto fail;
2326	}
2327
2328	return 0;
2329fail:
2330	dev_kfree_skb_any(skb);
2331
2332	return ret;
2333}
2334
2335int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2336{
2337	struct rtw89_traffic_stats *stats = &rtwvif->stats;
2338	struct rtw89_h2c_ofld *h2c;
2339	u32 len = sizeof(*h2c);
2340	struct sk_buff *skb;
2341	int ret;
2342
2343	if (rtwvif->net_type != RTW89_NET_TYPE_INFRA)
2344		return -EINVAL;
2345
2346	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2347	if (!skb) {
2348		rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
2349		return -ENOMEM;
2350	}
2351
2352	skb_put(skb, len);
2353	h2c = (struct rtw89_h2c_ofld *)skb->data;
2354
2355	h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
2356		  le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
2357		  le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
2358
2359	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2360			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2361			      H2C_FUNC_OFLD_TP, 0, 1, len);
2362
2363	ret = rtw89_h2c_tx(rtwdev, skb, false);
2364	if (ret) {
2365		rtw89_err(rtwdev, "failed to send h2c\n");
2366		goto fail;
2367	}
2368
2369	return 0;
2370fail:
2371	dev_kfree_skb_any(skb);
2372
2373	return ret;
2374}
2375
2376int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
2377{
2378	const struct rtw89_chip_info *chip = rtwdev->chip;
2379	struct rtw89_h2c_ra_v1 *h2c_v1;
2380	struct rtw89_h2c_ra *h2c;
2381	u32 len = sizeof(*h2c);
2382	bool format_v1 = false;
2383	struct sk_buff *skb;
2384	int ret;
2385
2386	if (chip->chip_gen == RTW89_CHIP_BE) {
2387		len = sizeof(*h2c_v1);
2388		format_v1 = true;
2389	}
2390
2391	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2392	if (!skb) {
2393		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2394		return -ENOMEM;
2395	}
2396	skb_put(skb, len);
2397	h2c = (struct rtw89_h2c_ra *)skb->data;
2398	rtw89_debug(rtwdev, RTW89_DBG_RA,
2399		    "ra cmd msk: %llx ", ra->ra_mask);
2400
2401	h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
2402		  le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
2403		  le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
2404		  le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
2405		  le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
2406		  le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
2407		  le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
2408		  le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
2409		  le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
2410		  le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
2411		  le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
2412		  le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
2413		  le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
2414		  le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
2415	h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
2416	h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
2417	h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
2418		  le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
2419
2420	if (!format_v1)
2421		goto csi;
2422
2423	h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
2424	h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
2425		     le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
2426
2427csi:
2428	if (!csi)
2429		goto done;
2430
2431	h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
2432	h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
2433		   le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
2434		   le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
2435		   le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
2436		   le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
2437		   le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
2438		   le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
2439		   le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
2440
2441done:
2442	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2443			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
2444			      H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
2445			      len);
2446
2447	ret = rtw89_h2c_tx(rtwdev, skb, false);
2448	if (ret) {
2449		rtw89_err(rtwdev, "failed to send h2c\n");
2450		goto fail;
2451	}
2452
2453	return 0;
2454fail:
2455	dev_kfree_skb_any(skb);
2456
2457	return ret;
2458}
2459
2460int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
2461{
2462	struct rtw89_btc *btc = &rtwdev->btc;
2463	struct rtw89_btc_dm *dm = &btc->dm;
2464	struct rtw89_btc_init_info *init_info = &dm->init_info;
2465	struct rtw89_btc_module *module = &init_info->module;
2466	struct rtw89_btc_ant_info *ant = &module->ant;
2467	struct rtw89_h2c_cxinit *h2c;
2468	u32 len = sizeof(*h2c);
2469	struct sk_buff *skb;
2470	int ret;
2471
2472	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2473	if (!skb) {
2474		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
2475		return -ENOMEM;
2476	}
2477	skb_put(skb, len);
2478	h2c = (struct rtw89_h2c_cxinit *)skb->data;
2479
2480	h2c->hdr.type = CXDRVINFO_INIT;
2481	h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
2482
2483	h2c->ant_type = ant->type;
2484	h2c->ant_num = ant->num;
2485	h2c->ant_iso = ant->isolation;
2486	h2c->ant_info =
2487		u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
2488		u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
2489		u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
2490		u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
2491
2492	h2c->mod_rfe = module->rfe_type;
2493	h2c->mod_cv = module->cv;
2494	h2c->mod_info =
2495		u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
2496		u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
2497		u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
2498		u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
2499	h2c->mod_adie_kt = module->kt_ver_adie;
2500	h2c->wl_gch = init_info->wl_guard_ch;
2501
2502	h2c->info =
2503		u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
2504		u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
2505		u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
2506		u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
2507		u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
2508
2509	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2510			      H2C_CAT_OUTSRC, BTFC_SET,
2511			      SET_DRV_INFO, 0, 0,
2512			      len);
2513
2514	ret = rtw89_h2c_tx(rtwdev, skb, false);
2515	if (ret) {
2516		rtw89_err(rtwdev, "failed to send h2c\n");
2517		goto fail;
2518	}
2519
2520	return 0;
2521fail:
2522	dev_kfree_skb_any(skb);
2523
2524	return ret;
2525}
2526
2527#define PORT_DATA_OFFSET 4
2528#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
2529#define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
2530	(4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
2531
2532int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
2533{
2534	struct rtw89_btc *btc = &rtwdev->btc;
2535	const struct rtw89_btc_ver *ver = btc->ver;
2536	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2537	struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
2538	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2539	struct rtw89_btc_wl_active_role *active = role_info->active_role;
2540	struct sk_buff *skb;
2541	u32 len;
2542	u8 offset = 0;
2543	u8 *cmd;
2544	int ret;
2545	int i;
2546
2547	len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
2548
2549	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2550	if (!skb) {
2551		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2552		return -ENOMEM;
2553	}
2554	skb_put(skb, len);
2555	cmd = skb->data;
2556
2557	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2558	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2559
2560	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2561	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2562
2563	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2564	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2565	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2566	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2567	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2568	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2569	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2570	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2571	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2572	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2573	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2574	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2575
2576	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2577		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
2578		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
2579		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
2580		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
2581		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
2582		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
2583		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
2584		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
2585		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
2586		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
2587		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
2588		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
2589		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
2590	}
2591
2592	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2593			      H2C_CAT_OUTSRC, BTFC_SET,
2594			      SET_DRV_INFO, 0, 0,
2595			      len);
2596
2597	ret = rtw89_h2c_tx(rtwdev, skb, false);
2598	if (ret) {
2599		rtw89_err(rtwdev, "failed to send h2c\n");
2600		goto fail;
2601	}
2602
2603	return 0;
2604fail:
2605	dev_kfree_skb_any(skb);
2606
2607	return ret;
2608}
2609
2610#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
2611	(4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
2612
2613int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
2614{
2615	struct rtw89_btc *btc = &rtwdev->btc;
2616	const struct rtw89_btc_ver *ver = btc->ver;
2617	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2618	struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
2619	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2620	struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
2621	struct sk_buff *skb;
2622	u32 len;
2623	u8 *cmd, offset;
2624	int ret;
2625	int i;
2626
2627	len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
2628
2629	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2630	if (!skb) {
2631		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2632		return -ENOMEM;
2633	}
2634	skb_put(skb, len);
2635	cmd = skb->data;
2636
2637	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2638	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2639
2640	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2641	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2642
2643	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2644	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2645	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2646	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2647	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2648	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2649	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2650	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2651	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2652	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2653	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2654	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2655
2656	offset = PORT_DATA_OFFSET;
2657	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2658		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
2659		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
2660		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
2661		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
2662		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
2663		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
2664		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
2665		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
2666		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
2667		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
2668		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
2669		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
2670		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
2671		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
2672	}
2673
2674	offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
2675	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
2676	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
2677	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
2678	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
2679	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
2680	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
2681
2682	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2683			      H2C_CAT_OUTSRC, BTFC_SET,
2684			      SET_DRV_INFO, 0, 0,
2685			      len);
2686
2687	ret = rtw89_h2c_tx(rtwdev, skb, false);
2688	if (ret) {
2689		rtw89_err(rtwdev, "failed to send h2c\n");
2690		goto fail;
2691	}
2692
2693	return 0;
2694fail:
2695	dev_kfree_skb_any(skb);
2696
2697	return ret;
2698}
2699
2700#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
2701	(4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
2702
2703int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
2704{
2705	struct rtw89_btc *btc = &rtwdev->btc;
2706	const struct rtw89_btc_ver *ver = btc->ver;
2707	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2708	struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
2709	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2710	struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
2711	struct sk_buff *skb;
2712	u32 len;
2713	u8 *cmd, offset;
2714	int ret;
2715	int i;
2716
2717	len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
2718
2719	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2720	if (!skb) {
2721		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2722		return -ENOMEM;
2723	}
2724	skb_put(skb, len);
2725	cmd = skb->data;
2726
2727	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2728	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2729
2730	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2731	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2732
2733	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2734	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2735	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2736	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2737	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2738	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2739	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2740	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2741	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2742	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2743	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2744	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2745
2746	offset = PORT_DATA_OFFSET;
2747	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2748		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
2749		RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
2750		RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
2751		RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
2752		RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
2753		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
2754		RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
2755		RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
2756		RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
2757		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
2758	}
2759
2760	offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
2761	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
2762	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
2763	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
2764	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
2765	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
2766	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
2767
2768	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2769			      H2C_CAT_OUTSRC, BTFC_SET,
2770			      SET_DRV_INFO, 0, 0,
2771			      len);
2772
2773	ret = rtw89_h2c_tx(rtwdev, skb, false);
2774	if (ret) {
2775		rtw89_err(rtwdev, "failed to send h2c\n");
2776		goto fail;
2777	}
2778
2779	return 0;
2780fail:
2781	dev_kfree_skb_any(skb);
2782
2783	return ret;
2784}
2785
2786#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
2787int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
2788{
2789	struct rtw89_btc *btc = &rtwdev->btc;
2790	const struct rtw89_btc_ver *ver = btc->ver;
2791	struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
2792	struct sk_buff *skb;
2793	u8 *cmd;
2794	int ret;
2795
2796	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
2797	if (!skb) {
2798		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2799		return -ENOMEM;
2800	}
2801	skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
2802	cmd = skb->data;
2803
2804	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
2805	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
2806
2807	RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
2808	RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
2809	RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
2810	if (ver->fcxctrl == 0)
2811		RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
2812
2813	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2814			      H2C_CAT_OUTSRC, BTFC_SET,
2815			      SET_DRV_INFO, 0, 0,
2816			      H2C_LEN_CXDRVINFO_CTRL);
2817
2818	ret = rtw89_h2c_tx(rtwdev, skb, false);
2819	if (ret) {
2820		rtw89_err(rtwdev, "failed to send h2c\n");
2821		goto fail;
2822	}
2823
2824	return 0;
2825fail:
2826	dev_kfree_skb_any(skb);
2827
2828	return ret;
2829}
2830
2831#define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
2832int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
2833{
2834	struct rtw89_btc *btc = &rtwdev->btc;
2835	struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
2836	struct sk_buff *skb;
2837	u8 *cmd;
2838	int ret;
2839
2840	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
2841	if (!skb) {
2842		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
2843		return -ENOMEM;
2844	}
2845	skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
2846	cmd = skb->data;
2847
2848	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX);
2849	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
2850
2851	RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
2852	RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
2853	RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
2854	RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
2855	RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
2856	RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
2857	RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
2858	RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
2859	RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
2860	RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
2861	RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
2862	RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
2863	RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
2864	RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
2865	RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
2866	RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
2867	RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
2868
2869	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2870			      H2C_CAT_OUTSRC, BTFC_SET,
2871			      SET_DRV_INFO, 0, 0,
2872			      H2C_LEN_CXDRVINFO_TRX);
2873
2874	ret = rtw89_h2c_tx(rtwdev, skb, false);
2875	if (ret) {
2876		rtw89_err(rtwdev, "failed to send h2c\n");
2877		goto fail;
2878	}
2879
2880	return 0;
2881fail:
2882	dev_kfree_skb_any(skb);
2883
2884	return ret;
2885}
2886
2887#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
2888int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
2889{
2890	struct rtw89_btc *btc = &rtwdev->btc;
2891	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2892	struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
2893	struct sk_buff *skb;
2894	u8 *cmd;
2895	int ret;
2896
2897	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
2898	if (!skb) {
2899		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2900		return -ENOMEM;
2901	}
2902	skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
2903	cmd = skb->data;
2904
2905	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
2906	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
2907
2908	RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
2909	RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
2910	RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
2911	RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
2912	RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
2913
2914	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2915			      H2C_CAT_OUTSRC, BTFC_SET,
2916			      SET_DRV_INFO, 0, 0,
2917			      H2C_LEN_CXDRVINFO_RFK);
2918
2919	ret = rtw89_h2c_tx(rtwdev, skb, false);
2920	if (ret) {
2921		rtw89_err(rtwdev, "failed to send h2c\n");
2922		goto fail;
2923	}
2924
2925	return 0;
2926fail:
2927	dev_kfree_skb_any(skb);
2928
2929	return ret;
2930}
2931
2932#define H2C_LEN_PKT_OFLD 4
2933int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
2934{
2935	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
2936	struct sk_buff *skb;
2937	unsigned int cond;
2938	u8 *cmd;
2939	int ret;
2940
2941	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
2942	if (!skb) {
2943		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2944		return -ENOMEM;
2945	}
2946	skb_put(skb, H2C_LEN_PKT_OFLD);
2947	cmd = skb->data;
2948
2949	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
2950	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
2951
2952	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2953			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2954			      H2C_FUNC_PACKET_OFLD, 1, 1,
2955			      H2C_LEN_PKT_OFLD);
2956
2957	cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
2958
2959	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
2960	if (ret < 0) {
2961		rtw89_debug(rtwdev, RTW89_DBG_FW,
2962			    "failed to del pkt ofld: id %d, ret %d\n",
2963			    id, ret);
2964		return ret;
2965	}
2966
2967	rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
2968	return 0;
2969}
2970
2971int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
2972				 struct sk_buff *skb_ofld)
2973{
2974	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
2975	struct sk_buff *skb;
2976	unsigned int cond;
2977	u8 *cmd;
2978	u8 alloc_id;
2979	int ret;
2980
2981	alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
2982					      RTW89_MAX_PKT_OFLD_NUM);
2983	if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
2984		return -ENOSPC;
2985
2986	*id = alloc_id;
2987
2988	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
2989	if (!skb) {
2990		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2991		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
2992		return -ENOMEM;
2993	}
2994	skb_put(skb, H2C_LEN_PKT_OFLD);
2995	cmd = skb->data;
2996
2997	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
2998	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
2999	RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
3000	skb_put_data(skb, skb_ofld->data, skb_ofld->len);
3001
3002	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3003			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3004			      H2C_FUNC_PACKET_OFLD, 1, 1,
3005			      H2C_LEN_PKT_OFLD + skb_ofld->len);
3006
3007	cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
3008
3009	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3010	if (ret < 0) {
3011		rtw89_debug(rtwdev, RTW89_DBG_FW,
3012			    "failed to add pkt ofld: id %d, ret %d\n",
3013			    alloc_id, ret);
3014		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
3015		return ret;
3016	}
3017
3018	return 0;
3019}
3020
3021#define H2C_LEN_SCAN_LIST_OFFLOAD 4
3022int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
3023				   struct list_head *chan_list)
3024{
3025	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3026	struct rtw89_mac_chinfo *ch_info;
3027	struct sk_buff *skb;
3028	int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
3029	unsigned int cond;
3030	u8 *cmd;
3031	int ret;
3032
3033	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
3034	if (!skb) {
3035		rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
3036		return -ENOMEM;
3037	}
3038	skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
3039	cmd = skb->data;
3040
3041	RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
3042	/* in unit of 4 bytes */
3043	RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
3044
3045	list_for_each_entry(ch_info, chan_list, list) {
3046		cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
3047
3048		RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
3049		RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
3050		RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
3051		RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
3052		RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
3053		RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
3054		RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
3055		RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
3056		RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
3057		RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
3058		RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
3059		RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
3060		RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
3061		RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
3062		RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
3063		RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
3064		RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
3065		RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
3066		RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
3067		RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
3068		RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
3069		RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
3070	}
3071
3072	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3073			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3074			      H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
3075
3076	cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
3077
3078	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3079	if (ret) {
3080		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
3081		return ret;
3082	}
3083
3084	return 0;
3085}
3086
3087int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
3088			      struct rtw89_scan_option *option,
3089			      struct rtw89_vif *rtwvif)
3090{
3091	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3092	struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
3093	struct rtw89_h2c_scanofld *h2c;
3094	u32 len = sizeof(*h2c);
3095	struct sk_buff *skb;
3096	unsigned int cond;
3097	int ret;
3098
3099	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3100	if (!skb) {
3101		rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
3102		return -ENOMEM;
3103	}
3104	skb_put(skb, len);
3105	h2c = (struct rtw89_h2c_scanofld *)skb->data;
3106
3107	h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
3108		  le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
3109		  le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) |
3110		  le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
3111
3112	h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
3113		  le32_encode_bits(option->target_ch_mode,
3114				   RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
3115		  le32_encode_bits(RTW89_SCAN_IMMEDIATE,
3116				   RTW89_H2C_SCANOFLD_W1_START_MODE) |
3117		  le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
3118
3119	if (option->target_ch_mode) {
3120		h2c->w1 |= le32_encode_bits(op->band_width,
3121					    RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
3122			   le32_encode_bits(op->primary_channel,
3123					    RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
3124			   le32_encode_bits(op->channel,
3125					    RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
3126		h2c->w0 |= le32_encode_bits(op->band_type,
3127					    RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
3128	}
3129
3130	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3131			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3132			      H2C_FUNC_SCANOFLD, 1, 1,
3133			      len);
3134
3135	cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
3136
3137	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3138	if (ret) {
3139		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
3140		return ret;
3141	}
3142
3143	return 0;
3144}
3145
3146int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
3147			struct rtw89_fw_h2c_rf_reg_info *info,
3148			u16 len, u8 page)
3149{
3150	struct sk_buff *skb;
3151	u8 class = info->rf_path == RF_PATH_A ?
3152		   H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
3153	int ret;
3154
3155	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3156	if (!skb) {
3157		rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
3158		return -ENOMEM;
3159	}
3160	skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
3161
3162	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3163			      H2C_CAT_OUTSRC, class, page, 0, 0,
3164			      len);
3165
3166	ret = rtw89_h2c_tx(rtwdev, skb, false);
3167	if (ret) {
3168		rtw89_err(rtwdev, "failed to send h2c\n");
3169		goto fail;
3170	}
3171
3172	return 0;
3173fail:
3174	dev_kfree_skb_any(skb);
3175
3176	return ret;
3177}
3178
3179int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
3180{
3181	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3182	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
3183	struct rtw89_fw_h2c_rf_get_mccch *mccch;
3184	struct sk_buff *skb;
3185	int ret;
3186
3187	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
3188	if (!skb) {
3189		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
3190		return -ENOMEM;
3191	}
3192	skb_put(skb, sizeof(*mccch));
3193	mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
3194
3195	mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
3196	mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
3197	mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
3198	mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
3199	mccch->current_channel = cpu_to_le32(chan->channel);
3200	mccch->current_band_type = cpu_to_le32(chan->band_type);
3201
3202	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3203			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
3204			      H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
3205			      sizeof(*mccch));
3206
3207	ret = rtw89_h2c_tx(rtwdev, skb, false);
3208	if (ret) {
3209		rtw89_err(rtwdev, "failed to send h2c\n");
3210		goto fail;
3211	}
3212
3213	return 0;
3214fail:
3215	dev_kfree_skb_any(skb);
3216
3217	return ret;
3218}
3219EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
3220
3221int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
3222			      u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
3223			      bool rack, bool dack)
3224{
3225	struct sk_buff *skb;
3226	int ret;
3227
3228	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3229	if (!skb) {
3230		rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
3231		return -ENOMEM;
3232	}
3233	skb_put_data(skb, buf, len);
3234
3235	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3236			      H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
3237			      len);
3238
3239	ret = rtw89_h2c_tx(rtwdev, skb, false);
3240	if (ret) {
3241		rtw89_err(rtwdev, "failed to send h2c\n");
3242		goto fail;
3243	}
3244
3245	return 0;
3246fail:
3247	dev_kfree_skb_any(skb);
3248
3249	return ret;
3250}
3251
3252int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
3253{
3254	struct sk_buff *skb;
3255	int ret;
3256
3257	skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
3258	if (!skb) {
3259		rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
3260		return -ENOMEM;
3261	}
3262	skb_put_data(skb, buf, len);
3263
3264	ret = rtw89_h2c_tx(rtwdev, skb, false);
3265	if (ret) {
3266		rtw89_err(rtwdev, "failed to send h2c\n");
3267		goto fail;
3268	}
3269
3270	return 0;
3271fail:
3272	dev_kfree_skb_any(skb);
3273
3274	return ret;
3275}
3276
3277void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
3278{
3279	struct rtw89_early_h2c *early_h2c;
3280
3281	lockdep_assert_held(&rtwdev->mutex);
3282
3283	list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
3284		rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
3285	}
3286}
3287
3288void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
3289{
3290	struct rtw89_early_h2c *early_h2c, *tmp;
3291
3292	mutex_lock(&rtwdev->mutex);
3293	list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
3294		list_del(&early_h2c->list);
3295		kfree(early_h2c->h2c);
3296		kfree(early_h2c);
3297	}
3298	mutex_unlock(&rtwdev->mutex);
3299}
3300
3301static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
3302{
3303	const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
3304	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
3305
3306	attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
3307	attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
3308	attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
3309	attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
3310}
3311
3312static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
3313				    struct sk_buff *c2h)
3314{
3315	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
3316	u8 category = attr->category;
3317	u8 class = attr->class;
3318	u8 func = attr->func;
3319
3320	switch (category) {
3321	default:
3322		return false;
3323	case RTW89_C2H_CAT_MAC:
3324		return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
3325	}
3326}
3327
3328void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
3329{
3330	rtw89_fw_c2h_parse_attr(c2h);
3331	if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
3332		goto enqueue;
3333
3334	rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
3335	dev_kfree_skb_any(c2h);
3336	return;
3337
3338enqueue:
3339	skb_queue_tail(&rtwdev->c2h_queue, c2h);
3340	ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
3341}
3342
3343static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
3344				    struct sk_buff *skb)
3345{
3346	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
3347	u8 category = attr->category;
3348	u8 class = attr->class;
3349	u8 func = attr->func;
3350	u16 len = attr->len;
3351	bool dump = true;
3352
3353	if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
3354		return;
3355
3356	switch (category) {
3357	case RTW89_C2H_CAT_TEST:
3358		break;
3359	case RTW89_C2H_CAT_MAC:
3360		rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
3361		if (class == RTW89_MAC_C2H_CLASS_INFO &&
3362		    func == RTW89_MAC_C2H_FUNC_C2H_LOG)
3363			dump = false;
3364		break;
3365	case RTW89_C2H_CAT_OUTSRC:
3366		if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
3367		    class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
3368			rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
3369		else
3370			rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
3371		break;
3372	}
3373
3374	if (dump)
3375		rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
3376}
3377
3378void rtw89_fw_c2h_work(struct work_struct *work)
3379{
3380	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
3381						c2h_work);
3382	struct sk_buff *skb, *tmp;
3383
3384	skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
3385		skb_unlink(skb, &rtwdev->c2h_queue);
3386		mutex_lock(&rtwdev->mutex);
3387		rtw89_fw_c2h_cmd_handle(rtwdev, skb);
3388		mutex_unlock(&rtwdev->mutex);
3389		dev_kfree_skb_any(skb);
3390	}
3391}
3392
3393static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
3394				  struct rtw89_mac_h2c_info *info)
3395{
3396	const struct rtw89_chip_info *chip = rtwdev->chip;
3397	struct rtw89_fw_info *fw_info = &rtwdev->fw;
3398	const u32 *h2c_reg = chip->h2c_regs;
3399	u8 i, val, len;
3400	int ret;
3401
3402	ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
3403				rtwdev, chip->h2c_ctrl_reg);
3404	if (ret) {
3405		rtw89_warn(rtwdev, "FW does not process h2c registers\n");
3406		return ret;
3407	}
3408
3409	len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
3410			   sizeof(info->u.h2creg[0]));
3411
3412	u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
3413	u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
3414
3415	for (i = 0; i < RTW89_H2CREG_MAX; i++)
3416		rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
3417
3418	fw_info->h2c_counter++;
3419	rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
3420			  chip->h2c_counter_reg.mask, fw_info->h2c_counter);
3421	rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
3422
3423	return 0;
3424}
3425
3426static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
3427				 struct rtw89_mac_c2h_info *info)
3428{
3429	const struct rtw89_chip_info *chip = rtwdev->chip;
3430	struct rtw89_fw_info *fw_info = &rtwdev->fw;
3431	const u32 *c2h_reg = chip->c2h_regs;
3432	u32 ret;
3433	u8 i, val;
3434
3435	info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
3436
3437	ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
3438				       RTW89_C2H_TIMEOUT, false, rtwdev,
3439				       chip->c2h_ctrl_reg);
3440	if (ret) {
3441		rtw89_warn(rtwdev, "c2h reg timeout\n");
3442		return ret;
3443	}
3444
3445	for (i = 0; i < RTW89_C2HREG_MAX; i++)
3446		info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
3447
3448	rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
3449
3450	info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
3451	info->content_len =
3452		(u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
3453		RTW89_C2HREG_HDR_LEN;
3454
3455	fw_info->c2h_counter++;
3456	rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
3457			  chip->c2h_counter_reg.mask, fw_info->c2h_counter);
3458
3459	return 0;
3460}
3461
3462int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
3463		     struct rtw89_mac_h2c_info *h2c_info,
3464		     struct rtw89_mac_c2h_info *c2h_info)
3465{
3466	u32 ret;
3467
3468	if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
3469		lockdep_assert_held(&rtwdev->mutex);
3470
3471	if (!h2c_info && !c2h_info)
3472		return -EINVAL;
3473
3474	if (!h2c_info)
3475		goto recv_c2h;
3476
3477	ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
3478	if (ret)
3479		return ret;
3480
3481recv_c2h:
3482	if (!c2h_info)
3483		return 0;
3484
3485	ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
3486	if (ret)
3487		return ret;
3488
3489	return 0;
3490}
3491
3492void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
3493{
3494	if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
3495		rtw89_err(rtwdev, "[ERR]pwr is off\n");
3496		return;
3497	}
3498
3499	rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
3500	rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
3501	rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
3502	rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
3503	rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
3504		   rtw89_read32(rtwdev, R_AX_HALT_C2H));
3505	rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
3506		   rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
3507
3508	rtw89_fw_prog_cnt_dump(rtwdev);
3509}
3510
3511static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
3512{
3513	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
3514	struct rtw89_pktofld_info *info, *tmp;
3515	u8 idx;
3516
3517	for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
3518		if (!(rtwdev->chip->support_bands & BIT(idx)))
3519			continue;
3520
3521		list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
3522			if (test_bit(info->id, rtwdev->pkt_offload))
3523				rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
3524			list_del(&info->list);
3525			kfree(info);
3526		}
3527	}
3528}
3529
3530static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
3531					     struct rtw89_vif *rtwvif,
3532					     struct rtw89_pktofld_info *info,
3533					     enum nl80211_band band, u8 ssid_idx)
3534{
3535	struct cfg80211_scan_request *req = rtwvif->scan_req;
3536
3537	if (band != NL80211_BAND_6GHZ)
3538		return false;
3539
3540	if (req->ssids[ssid_idx].ssid_len) {
3541		memcpy(info->ssid, req->ssids[ssid_idx].ssid,
3542		       req->ssids[ssid_idx].ssid_len);
3543		info->ssid_len = req->ssids[ssid_idx].ssid_len;
3544		return false;
3545	} else {
3546		return true;
3547	}
3548}
3549
3550static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
3551				     struct rtw89_vif *rtwvif,
3552				     struct sk_buff *skb, u8 ssid_idx)
3553{
3554	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3555	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
3556	struct rtw89_pktofld_info *info;
3557	struct sk_buff *new;
3558	int ret = 0;
3559	u8 band;
3560
3561	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
3562		if (!(rtwdev->chip->support_bands & BIT(band)))
3563			continue;
3564
3565		new = skb_copy(skb, GFP_KERNEL);
3566		if (!new) {
3567			ret = -ENOMEM;
3568			goto out;
3569		}
3570		skb_put_data(new, ies->ies[band], ies->len[band]);
3571		skb_put_data(new, ies->common_ies, ies->common_ie_len);
3572
3573		info = kzalloc(sizeof(*info), GFP_KERNEL);
3574		if (!info) {
3575			ret = -ENOMEM;
3576			kfree_skb(new);
3577			goto out;
3578		}
3579
3580		if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
3581						     ssid_idx)) {
3582			kfree_skb(new);
3583			kfree(info);
3584			goto out;
3585		}
3586
3587		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
3588		if (ret) {
3589			kfree_skb(new);
3590			kfree(info);
3591			goto out;
3592		}
3593
3594		list_add_tail(&info->list, &scan_info->pkt_list[band]);
3595		kfree_skb(new);
3596	}
3597out:
3598	return ret;
3599}
3600
3601static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
3602					  struct rtw89_vif *rtwvif)
3603{
3604	struct cfg80211_scan_request *req = rtwvif->scan_req;
3605	struct sk_buff *skb;
3606	u8 num = req->n_ssids, i;
3607	int ret;
3608
3609	for (i = 0; i < num; i++) {
3610		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
3611					     req->ssids[i].ssid,
3612					     req->ssids[i].ssid_len,
3613					     req->ie_len);
3614		if (!skb)
3615			return -ENOMEM;
3616
3617		ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i);
3618		kfree_skb(skb);
3619
3620		if (ret)
3621			return ret;
3622	}
3623
3624	return 0;
3625}
3626
3627static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
3628				      struct cfg80211_scan_request *req,
3629				      struct rtw89_mac_chinfo *ch_info)
3630{
3631	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
3632	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
3633	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
3634	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
3635	struct cfg80211_scan_6ghz_params *params;
3636	struct rtw89_pktofld_info *info, *tmp;
3637	struct ieee80211_hdr *hdr;
3638	struct sk_buff *skb;
3639	bool found;
3640	int ret = 0;
3641	u8 i;
3642
3643	if (!req->n_6ghz_params)
3644		return 0;
3645
3646	for (i = 0; i < req->n_6ghz_params; i++) {
3647		params = &req->scan_6ghz_params[i];
3648
3649		if (req->channels[params->channel_idx]->hw_value !=
3650		    ch_info->pri_ch)
3651			continue;
3652
3653		found = false;
3654		list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
3655			if (ether_addr_equal(tmp->bssid, params->bssid)) {
3656				found = true;
3657				break;
3658			}
3659		}
3660		if (found)
3661			continue;
3662
3663		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
3664					     NULL, 0, req->ie_len);
3665		skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
3666		skb_put_data(skb, ies->common_ies, ies->common_ie_len);
3667		hdr = (struct ieee80211_hdr *)skb->data;
3668		ether_addr_copy(hdr->addr3, params->bssid);
3669
3670		info = kzalloc(sizeof(*info), GFP_KERNEL);
3671		if (!info) {
3672			ret = -ENOMEM;
3673			kfree_skb(skb);
3674			goto out;
3675		}
3676
3677		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
3678		if (ret) {
3679			kfree_skb(skb);
3680			kfree(info);
3681			goto out;
3682		}
3683
3684		ether_addr_copy(info->bssid, params->bssid);
3685		info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
3686		list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
3687
3688		ch_info->tx_pkt = true;
3689		ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
3690
3691		kfree_skb(skb);
3692	}
3693
3694out:
3695	return ret;
3696}
3697
3698static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
3699				   int ssid_num,
3700				   struct rtw89_mac_chinfo *ch_info)
3701{
3702	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3703	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
3704	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
3705	struct cfg80211_scan_request *req = rtwvif->scan_req;
3706	struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
3707	struct rtw89_pktofld_info *info;
3708	u8 band, probe_count = 0;
3709	int ret;
3710
3711	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
3712	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
3713	ch_info->bw = RTW89_SCAN_WIDTH;
3714	ch_info->tx_pkt = true;
3715	ch_info->cfg_tx_pwr = false;
3716	ch_info->tx_pwr_idx = 0;
3717	ch_info->tx_null = false;
3718	ch_info->pause_data = false;
3719	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
3720
3721	if (ch_info->ch_band == RTW89_BAND_6G) {
3722		if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
3723		    !ch_info->is_psc) {
3724			ch_info->tx_pkt = false;
3725			if (!req->duration_mandatory)
3726				ch_info->period -= RTW89_DWELL_TIME_6G;
3727		}
3728	}
3729
3730	ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info);
3731	if (ret)
3732		rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
3733
3734	if (ssid_num) {
3735		band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
3736
3737		list_for_each_entry(info, &scan_info->pkt_list[band], list) {
3738			if (info->channel_6ghz &&
3739			    ch_info->pri_ch != info->channel_6ghz)
3740				continue;
3741			ch_info->pkt_id[probe_count++] = info->id;
3742			if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
3743				break;
3744		}
3745		ch_info->num_pkt = probe_count;
3746	}
3747
3748	switch (chan_type) {
3749	case RTW89_CHAN_OPERATE:
3750		ch_info->central_ch = op->channel;
3751		ch_info->pri_ch = op->primary_channel;
3752		ch_info->ch_band = op->band_type;
3753		ch_info->bw = op->band_width;
3754		ch_info->tx_null = true;
3755		ch_info->num_pkt = 0;
3756		break;
3757	case RTW89_CHAN_DFS:
3758		if (ch_info->ch_band != RTW89_BAND_6G)
3759			ch_info->period = max_t(u8, ch_info->period,
3760						RTW89_DFS_CHAN_TIME);
3761		ch_info->dwell_time = RTW89_DWELL_TIME;
3762		break;
3763	case RTW89_CHAN_ACTIVE:
3764		break;
3765	default:
3766		rtw89_err(rtwdev, "Channel type out of bound\n");
3767	}
3768}
3769
3770static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
3771				       struct rtw89_vif *rtwvif, bool connected)
3772{
3773	struct cfg80211_scan_request *req = rtwvif->scan_req;
3774	struct rtw89_mac_chinfo	*ch_info, *tmp;
3775	struct ieee80211_channel *channel;
3776	struct list_head chan_list;
3777	bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
3778	int list_len, off_chan_time = 0;
3779	enum rtw89_chan_type type;
3780	int ret = 0;
3781	u32 idx;
3782
3783	INIT_LIST_HEAD(&chan_list);
3784	for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
3785	     idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
3786	     idx++, list_len++) {
3787		channel = req->channels[idx];
3788		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
3789		if (!ch_info) {
3790			ret = -ENOMEM;
3791			goto out;
3792		}
3793
3794		if (req->duration_mandatory)
3795			ch_info->period = req->duration;
3796		else if (channel->band == NL80211_BAND_6GHZ)
3797			ch_info->period = RTW89_CHANNEL_TIME_6G +
3798					  RTW89_DWELL_TIME_6G;
3799		else
3800			ch_info->period = RTW89_CHANNEL_TIME;
3801
3802		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
3803		ch_info->central_ch = channel->hw_value;
3804		ch_info->pri_ch = channel->hw_value;
3805		ch_info->rand_seq_num = random_seq;
3806		ch_info->is_psc = cfg80211_channel_is_psc(channel);
3807
3808		if (channel->flags &
3809		    (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
3810			type = RTW89_CHAN_DFS;
3811		else
3812			type = RTW89_CHAN_ACTIVE;
3813		rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
3814
3815		if (connected &&
3816		    off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
3817			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3818			if (!tmp) {
3819				ret = -ENOMEM;
3820				kfree(ch_info);
3821				goto out;
3822			}
3823
3824			type = RTW89_CHAN_OPERATE;
3825			tmp->period = req->duration_mandatory ?
3826				      req->duration : RTW89_CHANNEL_TIME;
3827			rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
3828			list_add_tail(&tmp->list, &chan_list);
3829			off_chan_time = 0;
3830			list_len++;
3831		}
3832		list_add_tail(&ch_info->list, &chan_list);
3833		off_chan_time += ch_info->period;
3834	}
3835	rtwdev->scan_info.last_chan_idx = idx;
3836	ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
3837
3838out:
3839	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
3840		list_del(&ch_info->list);
3841		kfree(ch_info);
3842	}
3843
3844	return ret;
3845}
3846
3847static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
3848				   struct rtw89_vif *rtwvif, bool connected)
3849{
3850	int ret;
3851
3852	ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
3853	if (ret) {
3854		rtw89_err(rtwdev, "Update probe request failed\n");
3855		goto out;
3856	}
3857	ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
3858out:
3859	return ret;
3860}
3861
3862void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3863			 struct ieee80211_scan_request *scan_req)
3864{
3865	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
3866	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
3867	struct cfg80211_scan_request *req = &scan_req->req;
3868	u32 rx_fltr = rtwdev->hal.rx_fltr;
3869	u8 mac_addr[ETH_ALEN];
3870
3871	rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
3872	rtwdev->scan_info.scanning_vif = vif;
3873	rtwdev->scan_info.last_chan_idx = 0;
3874	rtwvif->scan_ies = &scan_req->ies;
3875	rtwvif->scan_req = req;
3876	ieee80211_stop_queues(rtwdev->hw);
3877
3878	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
3879		get_random_mask_addr(mac_addr, req->mac_addr,
3880				     req->mac_addr_mask);
3881	else
3882		ether_addr_copy(mac_addr, vif->addr);
3883	rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
3884
3885	rx_fltr &= ~B_AX_A_BCN_CHK_EN;
3886	rx_fltr &= ~B_AX_A_BC;
3887	rx_fltr &= ~B_AX_A_A1_MATCH;
3888	rtw89_write32_mask(rtwdev,
3889			   rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
3890			   B_AX_RX_FLTR_CFG_MASK,
3891			   rx_fltr);
3892}
3893
3894void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3895			    bool aborted)
3896{
3897	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
3898	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3899	struct cfg80211_scan_info info = {
3900		.aborted = aborted,
3901	};
3902	struct rtw89_vif *rtwvif;
3903
3904	if (!vif)
3905		return;
3906
3907	rtw89_write32_mask(rtwdev,
3908			   rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
3909			   B_AX_RX_FLTR_CFG_MASK,
3910			   rtwdev->hal.rx_fltr);
3911
3912	rtw89_core_scan_complete(rtwdev, vif, true);
3913	ieee80211_scan_completed(rtwdev->hw, &info);
3914	ieee80211_wake_queues(rtwdev->hw);
3915	rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
3916
3917	rtw89_release_pkt_list(rtwdev);
3918	rtwvif = (struct rtw89_vif *)vif->drv_priv;
3919	rtwvif->scan_req = NULL;
3920	rtwvif->scan_ies = NULL;
3921	scan_info->last_chan_idx = 0;
3922	scan_info->scanning_vif = NULL;
3923
3924	rtw89_set_channel(rtwdev);
3925}
3926
3927void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
3928{
3929	rtw89_hw_scan_offload(rtwdev, vif, false);
3930	rtw89_hw_scan_complete(rtwdev, vif, true);
3931}
3932
3933static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
3934{
3935	struct rtw89_vif *rtwvif;
3936
3937	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
3938		/* This variable implies connected or during attempt to connect */
3939		if (!is_zero_ether_addr(rtwvif->bssid))
3940			return true;
3941	}
3942
3943	return false;
3944}
3945
3946int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3947			  bool enable)
3948{
3949	struct rtw89_scan_option opt = {0};
3950	struct rtw89_vif *rtwvif;
3951	bool connected;
3952	int ret = 0;
3953
3954	rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
3955	if (!rtwvif)
3956		return -EINVAL;
3957
3958	connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
3959	opt.enable = enable;
3960	opt.target_ch_mode = connected;
3961	if (enable) {
3962		ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected);
3963		if (ret)
3964			goto out;
3965	}
3966	ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
3967out:
3968	return ret;
3969}
3970
3971#define H2C_FW_CPU_EXCEPTION_LEN 4
3972#define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
3973int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
3974{
3975	struct sk_buff *skb;
3976	int ret;
3977
3978	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
3979	if (!skb) {
3980		rtw89_err(rtwdev,
3981			  "failed to alloc skb for fw cpu exception\n");
3982		return -ENOMEM;
3983	}
3984
3985	skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
3986	RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
3987					   H2C_FW_CPU_EXCEPTION_TYPE_DEF);
3988
3989	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3990			      H2C_CAT_TEST,
3991			      H2C_CL_FW_STATUS_TEST,
3992			      H2C_FUNC_CPU_EXCEPTION, 0, 0,
3993			      H2C_FW_CPU_EXCEPTION_LEN);
3994
3995	ret = rtw89_h2c_tx(rtwdev, skb, false);
3996	if (ret) {
3997		rtw89_err(rtwdev, "failed to send h2c\n");
3998		goto fail;
3999	}
4000
4001	return 0;
4002
4003fail:
4004	dev_kfree_skb_any(skb);
4005	return ret;
4006}
4007
4008#define H2C_PKT_DROP_LEN 24
4009int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
4010			  const struct rtw89_pkt_drop_params *params)
4011{
4012	struct sk_buff *skb;
4013	int ret;
4014
4015	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
4016	if (!skb) {
4017		rtw89_err(rtwdev,
4018			  "failed to alloc skb for packet drop\n");
4019		return -ENOMEM;
4020	}
4021
4022	switch (params->sel) {
4023	case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
4024	case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
4025	case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
4026	case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
4027	case RTW89_PKT_DROP_SEL_BAND_ONCE:
4028		break;
4029	default:
4030		rtw89_debug(rtwdev, RTW89_DBG_FW,
4031			    "H2C of pkt drop might not fully support sel: %d yet\n",
4032			    params->sel);
4033		break;
4034	}
4035
4036	skb_put(skb, H2C_PKT_DROP_LEN);
4037	RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
4038	RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
4039	RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
4040	RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
4041	RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
4042	RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
4043	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
4044						  params->macid_band_sel[0]);
4045	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
4046						  params->macid_band_sel[1]);
4047	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
4048						  params->macid_band_sel[2]);
4049	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
4050						  params->macid_band_sel[3]);
4051
4052	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4053			      H2C_CAT_MAC,
4054			      H2C_CL_MAC_FW_OFLD,
4055			      H2C_FUNC_PKT_DROP, 0, 0,
4056			      H2C_PKT_DROP_LEN);
4057
4058	ret = rtw89_h2c_tx(rtwdev, skb, false);
4059	if (ret) {
4060		rtw89_err(rtwdev, "failed to send h2c\n");
4061		goto fail;
4062	}
4063
4064	return 0;
4065
4066fail:
4067	dev_kfree_skb_any(skb);
4068	return ret;
4069}
4070
4071#define H2C_KEEP_ALIVE_LEN 4
4072int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
4073			    bool enable)
4074{
4075	struct sk_buff *skb;
4076	u8 pkt_id = 0;
4077	int ret;
4078
4079	if (enable) {
4080		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
4081						   RTW89_PKT_OFLD_TYPE_NULL_DATA,
4082						   &pkt_id);
4083		if (ret)
4084			return -EPERM;
4085	}
4086
4087	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
4088	if (!skb) {
4089		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4090		return -ENOMEM;
4091	}
4092
4093	skb_put(skb, H2C_KEEP_ALIVE_LEN);
4094
4095	RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
4096	RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
4097	RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
4098	RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
4099
4100	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4101			      H2C_CAT_MAC,
4102			      H2C_CL_MAC_WOW,
4103			      H2C_FUNC_KEEP_ALIVE, 0, 1,
4104			      H2C_KEEP_ALIVE_LEN);
4105
4106	ret = rtw89_h2c_tx(rtwdev, skb, false);
4107	if (ret) {
4108		rtw89_err(rtwdev, "failed to send h2c\n");
4109		goto fail;
4110	}
4111
4112	return 0;
4113
4114fail:
4115	dev_kfree_skb_any(skb);
4116
4117	return ret;
4118}
4119
4120#define H2C_DISCONNECT_DETECT_LEN 8
4121int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
4122				   struct rtw89_vif *rtwvif, bool enable)
4123{
4124	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
4125	struct sk_buff *skb;
4126	u8 macid = rtwvif->mac_id;
4127	int ret;
4128
4129	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
4130	if (!skb) {
4131		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4132		return -ENOMEM;
4133	}
4134
4135	skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
4136
4137	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
4138		RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
4139		RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
4140		RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
4141		RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
4142		RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
4143	}
4144
4145	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4146			      H2C_CAT_MAC,
4147			      H2C_CL_MAC_WOW,
4148			      H2C_FUNC_DISCONNECT_DETECT, 0, 1,
4149			      H2C_DISCONNECT_DETECT_LEN);
4150
4151	ret = rtw89_h2c_tx(rtwdev, skb, false);
4152	if (ret) {
4153		rtw89_err(rtwdev, "failed to send h2c\n");
4154		goto fail;
4155	}
4156
4157	return 0;
4158
4159fail:
4160	dev_kfree_skb_any(skb);
4161
4162	return ret;
4163}
4164
4165#define H2C_WOW_GLOBAL_LEN 8
4166int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
4167			    bool enable)
4168{
4169	struct sk_buff *skb;
4170	u8 macid = rtwvif->mac_id;
4171	int ret;
4172
4173	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
4174	if (!skb) {
4175		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4176		return -ENOMEM;
4177	}
4178
4179	skb_put(skb, H2C_WOW_GLOBAL_LEN);
4180
4181	RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
4182	RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
4183
4184	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4185			      H2C_CAT_MAC,
4186			      H2C_CL_MAC_WOW,
4187			      H2C_FUNC_WOW_GLOBAL, 0, 1,
4188			      H2C_WOW_GLOBAL_LEN);
4189
4190	ret = rtw89_h2c_tx(rtwdev, skb, false);
4191	if (ret) {
4192		rtw89_err(rtwdev, "failed to send h2c\n");
4193		goto fail;
4194	}
4195
4196	return 0;
4197
4198fail:
4199	dev_kfree_skb_any(skb);
4200
4201	return ret;
4202}
4203
4204#define H2C_WAKEUP_CTRL_LEN 4
4205int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
4206				 struct rtw89_vif *rtwvif,
4207				 bool enable)
4208{
4209	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
4210	struct sk_buff *skb;
4211	u8 macid = rtwvif->mac_id;
4212	int ret;
4213
4214	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
4215	if (!skb) {
4216		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4217		return -ENOMEM;
4218	}
4219
4220	skb_put(skb, H2C_WAKEUP_CTRL_LEN);
4221
4222	if (rtw_wow->pattern_cnt)
4223		RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
4224	if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
4225		RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
4226	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
4227		RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
4228
4229	RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
4230
4231	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4232			      H2C_CAT_MAC,
4233			      H2C_CL_MAC_WOW,
4234			      H2C_FUNC_WAKEUP_CTRL, 0, 1,
4235			      H2C_WAKEUP_CTRL_LEN);
4236
4237	ret = rtw89_h2c_tx(rtwdev, skb, false);
4238	if (ret) {
4239		rtw89_err(rtwdev, "failed to send h2c\n");
4240		goto fail;
4241	}
4242
4243	return 0;
4244
4245fail:
4246	dev_kfree_skb_any(skb);
4247
4248	return ret;
4249}
4250
4251#define H2C_WOW_CAM_UPD_LEN 24
4252int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
4253			    struct rtw89_wow_cam_info *cam_info)
4254{
4255	struct sk_buff *skb;
4256	int ret;
4257
4258	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
4259	if (!skb) {
4260		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4261		return -ENOMEM;
4262	}
4263
4264	skb_put(skb, H2C_WOW_CAM_UPD_LEN);
4265
4266	RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
4267	RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
4268	if (cam_info->valid) {
4269		RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
4270		RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
4271		RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
4272		RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
4273		RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
4274		RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
4275							     cam_info->negative_pattern_match);
4276		RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
4277						   cam_info->skip_mac_hdr);
4278		RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
4279		RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
4280		RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
4281	}
4282	RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
4283
4284	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4285			      H2C_CAT_MAC,
4286			      H2C_CL_MAC_WOW,
4287			      H2C_FUNC_WOW_CAM_UPD, 0, 1,
4288			      H2C_WOW_CAM_UPD_LEN);
4289
4290	ret = rtw89_h2c_tx(rtwdev, skb, false);
4291	if (ret) {
4292		rtw89_err(rtwdev, "failed to send h2c\n");
4293		goto fail;
4294	}
4295
4296	return 0;
4297fail:
4298	dev_kfree_skb_any(skb);
4299
4300	return ret;
4301}
4302
4303/* Return < 0, if failures happen during waiting for the condition.
4304 * Return 0, when waiting for the condition succeeds.
4305 * Return > 0, if the wait is considered unreachable due to driver/FW design,
4306 * where 1 means during SER.
4307 */
4308static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
4309				 struct rtw89_wait_info *wait, unsigned int cond)
4310{
4311	int ret;
4312
4313	ret = rtw89_h2c_tx(rtwdev, skb, false);
4314	if (ret) {
4315		rtw89_err(rtwdev, "failed to send h2c\n");
4316		dev_kfree_skb_any(skb);
4317		return -EBUSY;
4318	}
4319
4320	if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
4321		return 1;
4322
4323	return rtw89_wait_for_cond(wait, cond);
4324}
4325
4326#define H2C_ADD_MCC_LEN 16
4327int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
4328			 const struct rtw89_fw_mcc_add_req *p)
4329{
4330	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4331	struct sk_buff *skb;
4332	unsigned int cond;
4333
4334	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
4335	if (!skb) {
4336		rtw89_err(rtwdev,
4337			  "failed to alloc skb for add mcc\n");
4338		return -ENOMEM;
4339	}
4340
4341	skb_put(skb, H2C_ADD_MCC_LEN);
4342	RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
4343	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
4344	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
4345	RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
4346	RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
4347	RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
4348	RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
4349	RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
4350	RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
4351	RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
4352	RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
4353	RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
4354	RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
4355	RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
4356	RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
4357	RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
4358	RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
4359	RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
4360	RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
4361	RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
4362
4363	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4364			      H2C_CAT_MAC,
4365			      H2C_CL_MCC,
4366			      H2C_FUNC_ADD_MCC, 0, 0,
4367			      H2C_ADD_MCC_LEN);
4368
4369	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
4370	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4371}
4372
4373#define H2C_START_MCC_LEN 12
4374int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
4375			   const struct rtw89_fw_mcc_start_req *p)
4376{
4377	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4378	struct sk_buff *skb;
4379	unsigned int cond;
4380
4381	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
4382	if (!skb) {
4383		rtw89_err(rtwdev,
4384			  "failed to alloc skb for start mcc\n");
4385		return -ENOMEM;
4386	}
4387
4388	skb_put(skb, H2C_START_MCC_LEN);
4389	RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
4390	RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
4391	RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
4392	RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
4393	RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
4394	RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
4395	RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
4396	RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
4397	RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
4398
4399	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4400			      H2C_CAT_MAC,
4401			      H2C_CL_MCC,
4402			      H2C_FUNC_START_MCC, 0, 0,
4403			      H2C_START_MCC_LEN);
4404
4405	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
4406	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4407}
4408
4409#define H2C_STOP_MCC_LEN 4
4410int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
4411			  bool prev_groups)
4412{
4413	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4414	struct sk_buff *skb;
4415	unsigned int cond;
4416
4417	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
4418	if (!skb) {
4419		rtw89_err(rtwdev,
4420			  "failed to alloc skb for stop mcc\n");
4421		return -ENOMEM;
4422	}
4423
4424	skb_put(skb, H2C_STOP_MCC_LEN);
4425	RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
4426	RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
4427	RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
4428
4429	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4430			      H2C_CAT_MAC,
4431			      H2C_CL_MCC,
4432			      H2C_FUNC_STOP_MCC, 0, 0,
4433			      H2C_STOP_MCC_LEN);
4434
4435	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
4436	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4437}
4438
4439#define H2C_DEL_MCC_GROUP_LEN 4
4440int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
4441			       bool prev_groups)
4442{
4443	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4444	struct sk_buff *skb;
4445	unsigned int cond;
4446
4447	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
4448	if (!skb) {
4449		rtw89_err(rtwdev,
4450			  "failed to alloc skb for del mcc group\n");
4451		return -ENOMEM;
4452	}
4453
4454	skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
4455	RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
4456	RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
4457
4458	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4459			      H2C_CAT_MAC,
4460			      H2C_CL_MCC,
4461			      H2C_FUNC_DEL_MCC_GROUP, 0, 0,
4462			      H2C_DEL_MCC_GROUP_LEN);
4463
4464	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
4465	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4466}
4467
4468#define H2C_RESET_MCC_GROUP_LEN 4
4469int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
4470{
4471	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4472	struct sk_buff *skb;
4473	unsigned int cond;
4474
4475	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
4476	if (!skb) {
4477		rtw89_err(rtwdev,
4478			  "failed to alloc skb for reset mcc group\n");
4479		return -ENOMEM;
4480	}
4481
4482	skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
4483	RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
4484
4485	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4486			      H2C_CAT_MAC,
4487			      H2C_CL_MCC,
4488			      H2C_FUNC_RESET_MCC_GROUP, 0, 0,
4489			      H2C_RESET_MCC_GROUP_LEN);
4490
4491	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
4492	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4493}
4494
4495#define H2C_MCC_REQ_TSF_LEN 4
4496int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
4497			     const struct rtw89_fw_mcc_tsf_req *req,
4498			     struct rtw89_mac_mcc_tsf_rpt *rpt)
4499{
4500	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4501	struct rtw89_mac_mcc_tsf_rpt *tmp;
4502	struct sk_buff *skb;
4503	unsigned int cond;
4504	int ret;
4505
4506	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
4507	if (!skb) {
4508		rtw89_err(rtwdev,
4509			  "failed to alloc skb for mcc req tsf\n");
4510		return -ENOMEM;
4511	}
4512
4513	skb_put(skb, H2C_MCC_REQ_TSF_LEN);
4514	RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
4515	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
4516	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
4517
4518	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4519			      H2C_CAT_MAC,
4520			      H2C_CL_MCC,
4521			      H2C_FUNC_MCC_REQ_TSF, 0, 0,
4522			      H2C_MCC_REQ_TSF_LEN);
4523
4524	cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
4525	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4526	if (ret)
4527		return ret;
4528
4529	tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
4530	*rpt = *tmp;
4531
4532	return 0;
4533}
4534
4535#define H2C_MCC_MACID_BITMAP_DSC_LEN 4
4536int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid,
4537				  u8 *bitmap)
4538{
4539	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4540	struct sk_buff *skb;
4541	unsigned int cond;
4542	u8 map_len;
4543	u8 h2c_len;
4544
4545	BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
4546	map_len = RTW89_MAX_MAC_ID_NUM / 8;
4547	h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
4548	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
4549	if (!skb) {
4550		rtw89_err(rtwdev,
4551			  "failed to alloc skb for mcc macid bitmap\n");
4552		return -ENOMEM;
4553	}
4554
4555	skb_put(skb, h2c_len);
4556	RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
4557	RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
4558	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
4559	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
4560
4561	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4562			      H2C_CAT_MAC,
4563			      H2C_CL_MCC,
4564			      H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
4565			      h2c_len);
4566
4567	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
4568	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4569}
4570
4571#define H2C_MCC_SYNC_LEN 4
4572int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
4573			  u8 target, u8 offset)
4574{
4575	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4576	struct sk_buff *skb;
4577	unsigned int cond;
4578
4579	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
4580	if (!skb) {
4581		rtw89_err(rtwdev,
4582			  "failed to alloc skb for mcc sync\n");
4583		return -ENOMEM;
4584	}
4585
4586	skb_put(skb, H2C_MCC_SYNC_LEN);
4587	RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
4588	RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
4589	RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
4590	RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
4591
4592	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4593			      H2C_CAT_MAC,
4594			      H2C_CL_MCC,
4595			      H2C_FUNC_MCC_SYNC, 0, 0,
4596			      H2C_MCC_SYNC_LEN);
4597
4598	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
4599	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4600}
4601
4602#define H2C_MCC_SET_DURATION_LEN 20
4603int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
4604				  const struct rtw89_fw_mcc_duration *p)
4605{
4606	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4607	struct sk_buff *skb;
4608	unsigned int cond;
4609
4610	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
4611	if (!skb) {
4612		rtw89_err(rtwdev,
4613			  "failed to alloc skb for mcc set duration\n");
4614		return -ENOMEM;
4615	}
4616
4617	skb_put(skb, H2C_MCC_SET_DURATION_LEN);
4618	RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
4619	RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
4620	RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
4621	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
4622	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
4623	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
4624						       p->start_tsf_low);
4625	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
4626							p->start_tsf_high);
4627	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
4628	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
4629
4630	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4631			      H2C_CAT_MAC,
4632			      H2C_CL_MCC,
4633			      H2C_FUNC_MCC_SET_DURATION, 0, 0,
4634			      H2C_MCC_SET_DURATION_LEN);
4635
4636	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
4637	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4638}
4639