1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6 */
7#include "core.h"
8#include "debug.h"
9#include "mac.h"
10#include "hw.h"
11#include "wmi.h"
12#include "wmi-ops.h"
13#include "wmi-tlv.h"
14#include "p2p.h"
15#include "testmode.h"
16#include <linux/bitfield.h>
17
18/***************/
19/* TLV helpers */
20/**************/
21
22struct wmi_tlv_policy {
23	size_t min_len;
24};
25
26static const struct wmi_tlv_policy wmi_tlv_policies[] = {
27	[WMI_TLV_TAG_ARRAY_BYTE]
28		= { .min_len = 0 },
29	[WMI_TLV_TAG_ARRAY_UINT32]
30		= { .min_len = 0 },
31	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
32		= { .min_len = sizeof(struct wmi_scan_event) },
33	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
34		= { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
35	[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
36		= { .min_len = sizeof(struct wmi_chan_info_event) },
37	[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
38		= { .min_len = sizeof(struct wmi_vdev_start_response_event) },
39	[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
40		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
41	[WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
42		= { .min_len = sizeof(struct wmi_host_swba_event) },
43	[WMI_TLV_TAG_STRUCT_TIM_INFO]
44		= { .min_len = sizeof(struct wmi_tim_info) },
45	[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
46		= { .min_len = sizeof(struct wmi_p2p_noa_info) },
47	[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
48		= { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
49	[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
50		= { .min_len = sizeof(struct hal_reg_capabilities) },
51	[WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
52		= { .min_len = sizeof(struct wlan_host_mem_req) },
53	[WMI_TLV_TAG_STRUCT_READY_EVENT]
54		= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
55	[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
56		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
57	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
58		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
59	[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
60		= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
61	[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
62		= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
63	[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
64		= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
65	[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
66		= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
67};
68
69static int
70ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
71		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
72				const void *ptr, void *data),
73		    void *data)
74{
75	const void *begin = ptr;
76	const struct wmi_tlv *tlv;
77	u16 tlv_tag, tlv_len;
78	int ret;
79
80	while (len > 0) {
81		if (len < sizeof(*tlv)) {
82			ath10k_dbg(ar, ATH10K_DBG_WMI,
83				   "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
84				   ptr - begin, len, sizeof(*tlv));
85			return -EINVAL;
86		}
87
88		tlv = ptr;
89		tlv_tag = __le16_to_cpu(tlv->tag);
90		tlv_len = __le16_to_cpu(tlv->len);
91		ptr += sizeof(*tlv);
92		len -= sizeof(*tlv);
93
94		if (tlv_len > len) {
95			ath10k_dbg(ar, ATH10K_DBG_WMI,
96				   "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
97				   tlv_tag, ptr - begin, len, tlv_len);
98			return -EINVAL;
99		}
100
101		if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
102		    wmi_tlv_policies[tlv_tag].min_len &&
103		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
104			ath10k_dbg(ar, ATH10K_DBG_WMI,
105				   "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
106				   tlv_tag, ptr - begin, tlv_len,
107				   wmi_tlv_policies[tlv_tag].min_len);
108			return -EINVAL;
109		}
110
111		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
112		if (ret)
113			return ret;
114
115		ptr += tlv_len;
116		len -= tlv_len;
117	}
118
119	return 0;
120}
121
122static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
123				     const void *ptr, void *data)
124{
125	const void **tb = data;
126
127	if (tag < WMI_TLV_TAG_MAX)
128		tb[tag] = ptr;
129
130	return 0;
131}
132
133static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
134				const void *ptr, size_t len)
135{
136	return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
137				   (void *)tb);
138}
139
140static const void **
141ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
142			   size_t len, gfp_t gfp)
143{
144	const void **tb;
145	int ret;
146
147	tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
148	if (!tb)
149		return ERR_PTR(-ENOMEM);
150
151	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
152	if (ret) {
153		kfree(tb);
154		return ERR_PTR(ret);
155	}
156
157	return tb;
158}
159
160static u16 ath10k_wmi_tlv_len(const void *ptr)
161{
162	return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
163}
164
165/**************/
166/* TLV events */
167/**************/
168static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
169					      struct sk_buff *skb)
170{
171	const void **tb;
172	const struct wmi_tlv_bcn_tx_status_ev *ev;
173	struct ath10k_vif *arvif;
174	u32 vdev_id, tx_status;
175	int ret;
176
177	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
178	if (IS_ERR(tb)) {
179		ret = PTR_ERR(tb);
180		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
181		return ret;
182	}
183
184	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
185	if (!ev) {
186		kfree(tb);
187		return -EPROTO;
188	}
189
190	tx_status = __le32_to_cpu(ev->tx_status);
191	vdev_id = __le32_to_cpu(ev->vdev_id);
192
193	switch (tx_status) {
194	case WMI_TLV_BCN_TX_STATUS_OK:
195		break;
196	case WMI_TLV_BCN_TX_STATUS_XRETRY:
197	case WMI_TLV_BCN_TX_STATUS_DROP:
198	case WMI_TLV_BCN_TX_STATUS_FILTERED:
199		/* FIXME: It's probably worth telling mac80211 to stop the
200		 * interface as it is crippled.
201		 */
202		ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
203			    vdev_id, tx_status);
204		break;
205	}
206
207	arvif = ath10k_get_arvif(ar, vdev_id);
208	if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
209		ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
210
211	kfree(tb);
212	return 0;
213}
214
215static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
216						  struct sk_buff *skb)
217{
218	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
219	complete(&ar->vdev_delete_done);
220}
221
222static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
223						const void *ptr, void *data)
224{
225	const struct wmi_tlv_peer_stats_info *stat = ptr;
226	struct ieee80211_sta *sta;
227	struct ath10k_sta *arsta;
228
229	if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
230		return -EPROTO;
231
232	ath10k_dbg(ar, ATH10K_DBG_WMI,
233		   "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
234		   stat->peer_macaddr.addr,
235		   __le32_to_cpu(stat->last_rx_rate_code),
236		   __le32_to_cpu(stat->last_rx_bitrate_kbps));
237
238	ath10k_dbg(ar, ATH10K_DBG_WMI,
239		   "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
240		   __le32_to_cpu(stat->last_tx_rate_code),
241		   __le32_to_cpu(stat->last_tx_bitrate_kbps));
242
243	rcu_read_lock();
244	sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
245	if (!sta) {
246		rcu_read_unlock();
247		ath10k_warn(ar, "not found station for peer stats\n");
248		return -EINVAL;
249	}
250
251	arsta = (struct ath10k_sta *)sta->drv_priv;
252	arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
253	arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
254	arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
255	arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
256	rcu_read_unlock();
257
258	return 0;
259}
260
261static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
262						  struct sk_buff *skb)
263{
264	const void **tb;
265	const struct wmi_tlv_peer_stats_info_ev *ev;
266	const void *data;
267	u32 num_peer_stats;
268	int ret;
269
270	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
271	if (IS_ERR(tb)) {
272		ret = PTR_ERR(tb);
273		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
274		return ret;
275	}
276
277	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
278	data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
279
280	if (!ev || !data) {
281		kfree(tb);
282		return -EPROTO;
283	}
284
285	num_peer_stats = __le32_to_cpu(ev->num_peers);
286
287	ath10k_dbg(ar, ATH10K_DBG_WMI,
288		   "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
289		   __le32_to_cpu(ev->vdev_id),
290		   num_peer_stats,
291		   __le32_to_cpu(ev->more_data));
292
293	ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
294				  ath10k_wmi_tlv_parse_peer_stats_info, NULL);
295	if (ret)
296		ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
297
298	kfree(tb);
299	return 0;
300}
301
302static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
303						 struct sk_buff *skb)
304{
305	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
306	ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
307	complete(&ar->peer_stats_info_complete);
308}
309
310static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
311					  struct sk_buff *skb)
312{
313	const void **tb;
314	const struct wmi_tlv_diag_data_ev *ev;
315	const struct wmi_tlv_diag_item *item;
316	const void *data;
317	int ret, num_items, len;
318
319	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
320	if (IS_ERR(tb)) {
321		ret = PTR_ERR(tb);
322		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
323		return ret;
324	}
325
326	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
327	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
328	if (!ev || !data) {
329		kfree(tb);
330		return -EPROTO;
331	}
332
333	num_items = __le32_to_cpu(ev->num_items);
334	len = ath10k_wmi_tlv_len(data);
335
336	while (num_items--) {
337		if (len == 0)
338			break;
339		if (len < sizeof(*item)) {
340			ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
341			break;
342		}
343
344		item = data;
345
346		if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
347			ath10k_warn(ar, "failed to parse diag data: item is too long\n");
348			break;
349		}
350
351		trace_ath10k_wmi_diag_container(ar,
352						item->type,
353						__le32_to_cpu(item->timestamp),
354						__le32_to_cpu(item->code),
355						__le16_to_cpu(item->len),
356						item->payload);
357
358		len -= sizeof(*item);
359		len -= roundup(__le16_to_cpu(item->len), 4);
360
361		data += sizeof(*item);
362		data += roundup(__le16_to_cpu(item->len), 4);
363	}
364
365	if (num_items != -1 || len != 0)
366		ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
367			    num_items, len);
368
369	kfree(tb);
370	return 0;
371}
372
373static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
374				     struct sk_buff *skb)
375{
376	const void **tb;
377	const void *data;
378	int ret, len;
379
380	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
381	if (IS_ERR(tb)) {
382		ret = PTR_ERR(tb);
383		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
384		return ret;
385	}
386
387	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
388	if (!data) {
389		kfree(tb);
390		return -EPROTO;
391	}
392	len = ath10k_wmi_tlv_len(data);
393
394	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
395	trace_ath10k_wmi_diag(ar, data, len);
396
397	kfree(tb);
398	return 0;
399}
400
401static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
402					struct sk_buff *skb)
403{
404	const void **tb;
405	const struct wmi_tlv_p2p_noa_ev *ev;
406	const struct wmi_p2p_noa_info *noa;
407	int ret, vdev_id;
408
409	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
410	if (IS_ERR(tb)) {
411		ret = PTR_ERR(tb);
412		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
413		return ret;
414	}
415
416	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
417	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
418
419	if (!ev || !noa) {
420		kfree(tb);
421		return -EPROTO;
422	}
423
424	vdev_id = __le32_to_cpu(ev->vdev_id);
425
426	ath10k_dbg(ar, ATH10K_DBG_WMI,
427		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
428		   vdev_id, noa->num_descriptors);
429
430	ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
431	kfree(tb);
432	return 0;
433}
434
435static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
436					 struct sk_buff *skb)
437{
438	const void **tb;
439	const struct wmi_tlv_tx_pause_ev *ev;
440	int ret, vdev_id;
441	u32 pause_id, action, vdev_map, peer_id, tid_map;
442
443	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
444	if (IS_ERR(tb)) {
445		ret = PTR_ERR(tb);
446		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
447		return ret;
448	}
449
450	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
451	if (!ev) {
452		kfree(tb);
453		return -EPROTO;
454	}
455
456	pause_id = __le32_to_cpu(ev->pause_id);
457	action = __le32_to_cpu(ev->action);
458	vdev_map = __le32_to_cpu(ev->vdev_map);
459	peer_id = __le32_to_cpu(ev->peer_id);
460	tid_map = __le32_to_cpu(ev->tid_map);
461
462	ath10k_dbg(ar, ATH10K_DBG_WMI,
463		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
464		   pause_id, action, vdev_map, peer_id, tid_map);
465
466	switch (pause_id) {
467	case WMI_TLV_TX_PAUSE_ID_MCC:
468	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
469	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
470	case WMI_TLV_TX_PAUSE_ID_AP_PS:
471	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
472		for (vdev_id = 0; vdev_map; vdev_id++) {
473			if (!(vdev_map & BIT(vdev_id)))
474				continue;
475
476			vdev_map &= ~BIT(vdev_id);
477			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
478							action);
479		}
480		break;
481	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
482	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
483	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
484	case WMI_TLV_TX_PAUSE_ID_HOST:
485		ath10k_dbg(ar, ATH10K_DBG_MAC,
486			   "mac ignoring unsupported tx pause id %d\n",
487			   pause_id);
488		break;
489	default:
490		ath10k_dbg(ar, ATH10K_DBG_MAC,
491			   "mac ignoring unknown tx pause vdev %d\n",
492			   pause_id);
493		break;
494	}
495
496	kfree(tb);
497	return 0;
498}
499
500static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
501						     struct sk_buff *skb)
502{
503	const struct wmi_tlv_rfkill_state_change_ev *ev;
504	const void **tb;
505	bool radio;
506	int ret;
507
508	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
509	if (IS_ERR(tb)) {
510		ret = PTR_ERR(tb);
511		ath10k_warn(ar,
512			    "failed to parse rfkill state change event: %d\n",
513			    ret);
514		return;
515	}
516
517	ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
518	if (!ev) {
519		kfree(tb);
520		return;
521	}
522
523	ath10k_dbg(ar, ATH10K_DBG_MAC,
524		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
525		   __le32_to_cpu(ev->gpio_pin_num),
526		   __le32_to_cpu(ev->int_type),
527		   __le32_to_cpu(ev->radio_state));
528
529	radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
530
531	spin_lock_bh(&ar->data_lock);
532
533	if (!radio)
534		ar->hw_rfkill_on = true;
535
536	spin_unlock_bh(&ar->data_lock);
537
538	/* notify cfg80211 radio state change */
539	ath10k_mac_rfkill_enable_radio(ar, radio);
540	wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
541}
542
543static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
544					    struct sk_buff *skb)
545{
546	const struct wmi_tlv_pdev_temperature_event *ev;
547
548	ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
549	if (WARN_ON(skb->len < sizeof(*ev)))
550		return -EPROTO;
551
552	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
553	return 0;
554}
555
556static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
557{
558	struct ieee80211_sta *station;
559	const struct wmi_tlv_tdls_peer_event *ev;
560	const void **tb;
561	struct ath10k_vif *arvif;
562
563	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
564	if (IS_ERR(tb)) {
565		ath10k_warn(ar, "tdls peer failed to parse tlv");
566		return;
567	}
568	ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
569	if (!ev) {
570		kfree(tb);
571		ath10k_warn(ar, "tdls peer NULL event");
572		return;
573	}
574
575	switch (__le32_to_cpu(ev->peer_reason)) {
576	case WMI_TDLS_TEARDOWN_REASON_TX:
577	case WMI_TDLS_TEARDOWN_REASON_RSSI:
578	case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
579		rcu_read_lock();
580		station = ieee80211_find_sta_by_ifaddr(ar->hw,
581						       ev->peer_macaddr.addr,
582						       NULL);
583		if (!station) {
584			ath10k_warn(ar, "did not find station from tdls peer event");
585			goto exit;
586		}
587
588		arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
589		if (!arvif) {
590			ath10k_warn(ar, "no vif for vdev_id %d found",
591				    __le32_to_cpu(ev->vdev_id));
592			goto exit;
593		}
594
595		ieee80211_tdls_oper_request(
596					arvif->vif, station->addr,
597					NL80211_TDLS_TEARDOWN,
598					WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
599					GFP_ATOMIC
600					);
601		break;
602	default:
603		kfree(tb);
604		return;
605	}
606
607exit:
608	rcu_read_unlock();
609	kfree(tb);
610}
611
612static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
613						 struct sk_buff *skb)
614{
615	struct wmi_peer_delete_resp_ev_arg *arg;
616	struct wmi_tlv *tlv_hdr;
617
618	tlv_hdr = (struct wmi_tlv *)skb->data;
619	arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
620
621	ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
622	ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
623	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
624
625	complete(&ar->peer_delete_done);
626
627	return 0;
628}
629
630/***********/
631/* TLV ops */
632/***********/
633
634static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
635{
636	struct wmi_cmd_hdr *cmd_hdr;
637	enum wmi_tlv_event_id id;
638	bool consumed;
639
640	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
641	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
642
643	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
644		goto out;
645
646	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
647
648	consumed = ath10k_tm_event_wmi(ar, id, skb);
649
650	/* Ready event must be handled normally also in UTF mode so that we
651	 * know the UTF firmware has booted, others we are just bypass WMI
652	 * events to testmode.
653	 */
654	if (consumed && id != WMI_TLV_READY_EVENTID) {
655		ath10k_dbg(ar, ATH10K_DBG_WMI,
656			   "wmi tlv testmode consumed 0x%x\n", id);
657		goto out;
658	}
659
660	switch (id) {
661	case WMI_TLV_MGMT_RX_EVENTID:
662		ath10k_wmi_event_mgmt_rx(ar, skb);
663		/* mgmt_rx() owns the skb now! */
664		return;
665	case WMI_TLV_SCAN_EVENTID:
666		ath10k_wmi_event_scan(ar, skb);
667		break;
668	case WMI_TLV_CHAN_INFO_EVENTID:
669		ath10k_wmi_event_chan_info(ar, skb);
670		break;
671	case WMI_TLV_ECHO_EVENTID:
672		ath10k_wmi_event_echo(ar, skb);
673		break;
674	case WMI_TLV_DEBUG_MESG_EVENTID:
675		ath10k_wmi_event_debug_mesg(ar, skb);
676		break;
677	case WMI_TLV_UPDATE_STATS_EVENTID:
678		ath10k_wmi_event_update_stats(ar, skb);
679		break;
680	case WMI_TLV_PEER_STATS_INFO_EVENTID:
681		ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
682		break;
683	case WMI_TLV_VDEV_START_RESP_EVENTID:
684		ath10k_wmi_event_vdev_start_resp(ar, skb);
685		break;
686	case WMI_TLV_VDEV_STOPPED_EVENTID:
687		ath10k_wmi_event_vdev_stopped(ar, skb);
688		break;
689	case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
690		ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
691		break;
692	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
693		ath10k_wmi_event_peer_sta_kickout(ar, skb);
694		break;
695	case WMI_TLV_HOST_SWBA_EVENTID:
696		ath10k_wmi_event_host_swba(ar, skb);
697		break;
698	case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
699		ath10k_wmi_event_tbttoffset_update(ar, skb);
700		break;
701	case WMI_TLV_PHYERR_EVENTID:
702		ath10k_wmi_event_phyerr(ar, skb);
703		break;
704	case WMI_TLV_ROAM_EVENTID:
705		ath10k_wmi_event_roam(ar, skb);
706		break;
707	case WMI_TLV_PROFILE_MATCH:
708		ath10k_wmi_event_profile_match(ar, skb);
709		break;
710	case WMI_TLV_DEBUG_PRINT_EVENTID:
711		ath10k_wmi_event_debug_print(ar, skb);
712		break;
713	case WMI_TLV_PDEV_QVIT_EVENTID:
714		ath10k_wmi_event_pdev_qvit(ar, skb);
715		break;
716	case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
717		ath10k_wmi_event_wlan_profile_data(ar, skb);
718		break;
719	case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
720		ath10k_wmi_event_rtt_measurement_report(ar, skb);
721		break;
722	case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
723		ath10k_wmi_event_tsf_measurement_report(ar, skb);
724		break;
725	case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
726		ath10k_wmi_event_rtt_error_report(ar, skb);
727		break;
728	case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
729		ath10k_wmi_event_wow_wakeup_host(ar, skb);
730		break;
731	case WMI_TLV_DCS_INTERFERENCE_EVENTID:
732		ath10k_wmi_event_dcs_interference(ar, skb);
733		break;
734	case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
735		ath10k_wmi_event_pdev_tpc_config(ar, skb);
736		break;
737	case WMI_TLV_PDEV_FTM_INTG_EVENTID:
738		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
739		break;
740	case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
741		ath10k_wmi_event_gtk_offload_status(ar, skb);
742		break;
743	case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
744		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
745		break;
746	case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
747		ath10k_wmi_event_delba_complete(ar, skb);
748		break;
749	case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
750		ath10k_wmi_event_addba_complete(ar, skb);
751		break;
752	case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
753		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
754		break;
755	case WMI_TLV_SERVICE_READY_EVENTID:
756		ath10k_wmi_event_service_ready(ar, skb);
757		return;
758	case WMI_TLV_READY_EVENTID:
759		ath10k_wmi_event_ready(ar, skb);
760		break;
761	case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
762		ath10k_wmi_event_service_available(ar, skb);
763		break;
764	case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
765		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
766		break;
767	case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
768		ath10k_wmi_tlv_event_diag_data(ar, skb);
769		break;
770	case WMI_TLV_DIAG_EVENTID:
771		ath10k_wmi_tlv_event_diag(ar, skb);
772		break;
773	case WMI_TLV_P2P_NOA_EVENTID:
774		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
775		break;
776	case WMI_TLV_TX_PAUSE_EVENTID:
777		ath10k_wmi_tlv_event_tx_pause(ar, skb);
778		break;
779	case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
780		ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
781		break;
782	case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
783		ath10k_wmi_tlv_event_temperature(ar, skb);
784		break;
785	case WMI_TLV_TDLS_PEER_EVENTID:
786		ath10k_wmi_event_tdls_peer(ar, skb);
787		break;
788	case WMI_TLV_PEER_DELETE_RESP_EVENTID:
789		ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
790		break;
791	case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
792		ath10k_wmi_event_mgmt_tx_compl(ar, skb);
793		break;
794	case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
795		ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
796		break;
797	default:
798		ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
799		break;
800	}
801
802out:
803	dev_kfree_skb(skb);
804}
805
806static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
807					  struct sk_buff *skb,
808					  struct wmi_scan_ev_arg *arg)
809{
810	const void **tb;
811	const struct wmi_scan_event *ev;
812	int ret;
813
814	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
815	if (IS_ERR(tb)) {
816		ret = PTR_ERR(tb);
817		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
818		return ret;
819	}
820
821	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
822	if (!ev) {
823		kfree(tb);
824		return -EPROTO;
825	}
826
827	arg->event_type = ev->event_type;
828	arg->reason = ev->reason;
829	arg->channel_freq = ev->channel_freq;
830	arg->scan_req_id = ev->scan_req_id;
831	arg->scan_id = ev->scan_id;
832	arg->vdev_id = ev->vdev_id;
833
834	kfree(tb);
835	return 0;
836}
837
838static int
839ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
840					struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
841{
842	const void **tb;
843	const struct wmi_tlv_mgmt_tx_compl_ev *ev;
844	int ret;
845
846	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
847	if (IS_ERR(tb)) {
848		ret = PTR_ERR(tb);
849		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
850		return ret;
851	}
852
853	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
854	if (!ev) {
855		kfree(tb);
856		return -EPROTO;
857	}
858
859	arg->desc_id = ev->desc_id;
860	arg->status = ev->status;
861	arg->pdev_id = ev->pdev_id;
862	arg->ppdu_id = ev->ppdu_id;
863
864	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
865		arg->ack_rssi = ev->ack_rssi;
866
867	kfree(tb);
868	return 0;
869}
870
871struct wmi_tlv_tx_bundle_compl_parse {
872	const __le32 *num_reports;
873	const __le32 *desc_ids;
874	const __le32 *status;
875	const __le32 *ppdu_ids;
876	const __le32 *ack_rssi;
877	bool desc_ids_done;
878	bool status_done;
879	bool ppdu_ids_done;
880	bool ack_rssi_done;
881};
882
883static int
884ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
885					  const void *ptr, void *data)
886{
887	struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
888
889	switch (tag) {
890	case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
891		bundle_tx_compl->num_reports = ptr;
892		break;
893	case WMI_TLV_TAG_ARRAY_UINT32:
894		if (!bundle_tx_compl->desc_ids_done) {
895			bundle_tx_compl->desc_ids_done = true;
896			bundle_tx_compl->desc_ids = ptr;
897		} else if (!bundle_tx_compl->status_done) {
898			bundle_tx_compl->status_done = true;
899			bundle_tx_compl->status = ptr;
900		} else if (!bundle_tx_compl->ppdu_ids_done) {
901			bundle_tx_compl->ppdu_ids_done = true;
902			bundle_tx_compl->ppdu_ids = ptr;
903		} else if (!bundle_tx_compl->ack_rssi_done) {
904			bundle_tx_compl->ack_rssi_done = true;
905			bundle_tx_compl->ack_rssi = ptr;
906		}
907		break;
908	default:
909		break;
910	}
911	return 0;
912}
913
914static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
915				struct ath10k *ar, struct sk_buff *skb,
916				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
917{
918	struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
919	int ret;
920
921	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
922				  ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
923				  &bundle_tx_compl);
924	if (ret) {
925		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
926		return ret;
927	}
928
929	if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
930	    !bundle_tx_compl.status)
931		return -EPROTO;
932
933	arg->num_reports = *bundle_tx_compl.num_reports;
934	arg->desc_ids = bundle_tx_compl.desc_ids;
935	arg->status = bundle_tx_compl.status;
936	arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
937
938	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
939		arg->ack_rssi = bundle_tx_compl.ack_rssi;
940
941	return 0;
942}
943
944static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
945					     struct sk_buff *skb,
946					     struct wmi_mgmt_rx_ev_arg *arg)
947{
948	const void **tb;
949	const struct wmi_tlv_mgmt_rx_ev *ev;
950	const u8 *frame;
951	u32 msdu_len;
952	int ret, i;
953
954	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
955	if (IS_ERR(tb)) {
956		ret = PTR_ERR(tb);
957		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
958		return ret;
959	}
960
961	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
962	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
963
964	if (!ev || !frame) {
965		kfree(tb);
966		return -EPROTO;
967	}
968
969	arg->channel = ev->channel;
970	arg->buf_len = ev->buf_len;
971	arg->status = ev->status;
972	arg->snr = ev->snr;
973	arg->phy_mode = ev->phy_mode;
974	arg->rate = ev->rate;
975
976	for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
977		arg->rssi[i] = ev->rssi[i];
978
979	msdu_len = __le32_to_cpu(arg->buf_len);
980
981	if (skb->len < (frame - skb->data) + msdu_len) {
982		kfree(tb);
983		return -EPROTO;
984	}
985
986	/* shift the sk_buff to point to `frame` */
987	skb_trim(skb, 0);
988	skb_put(skb, frame - skb->data);
989	skb_pull(skb, frame - skb->data);
990	skb_put(skb, msdu_len);
991
992	kfree(tb);
993	return 0;
994}
995
996static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
997					     struct sk_buff *skb,
998					     struct wmi_ch_info_ev_arg *arg)
999{
1000	const void **tb;
1001	const struct wmi_tlv_chan_info_event *ev;
1002	int ret;
1003
1004	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1005	if (IS_ERR(tb)) {
1006		ret = PTR_ERR(tb);
1007		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1008		return ret;
1009	}
1010
1011	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
1012	if (!ev) {
1013		kfree(tb);
1014		return -EPROTO;
1015	}
1016
1017	arg->err_code = ev->err_code;
1018	arg->freq = ev->freq;
1019	arg->cmd_flags = ev->cmd_flags;
1020	arg->noise_floor = ev->noise_floor;
1021	arg->rx_clear_count = ev->rx_clear_count;
1022	arg->cycle_count = ev->cycle_count;
1023	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
1024		     ar->running_fw->fw_file.fw_features))
1025		arg->mac_clk_mhz = ev->mac_clk_mhz;
1026
1027	kfree(tb);
1028	return 0;
1029}
1030
1031static int
1032ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
1033				     struct wmi_vdev_start_ev_arg *arg)
1034{
1035	const void **tb;
1036	const struct wmi_vdev_start_response_event *ev;
1037	int ret;
1038
1039	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1040	if (IS_ERR(tb)) {
1041		ret = PTR_ERR(tb);
1042		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1043		return ret;
1044	}
1045
1046	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
1047	if (!ev) {
1048		kfree(tb);
1049		return -EPROTO;
1050	}
1051
1052	skb_pull(skb, sizeof(*ev));
1053	arg->vdev_id = ev->vdev_id;
1054	arg->req_id = ev->req_id;
1055	arg->resp_type = ev->resp_type;
1056	arg->status = ev->status;
1057
1058	kfree(tb);
1059	return 0;
1060}
1061
1062static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
1063					       struct sk_buff *skb,
1064					       struct wmi_peer_kick_ev_arg *arg)
1065{
1066	const void **tb;
1067	const struct wmi_peer_sta_kickout_event *ev;
1068	int ret;
1069
1070	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1071	if (IS_ERR(tb)) {
1072		ret = PTR_ERR(tb);
1073		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1074		return ret;
1075	}
1076
1077	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
1078	if (!ev) {
1079		kfree(tb);
1080		return -EPROTO;
1081	}
1082
1083	arg->mac_addr = ev->peer_macaddr.addr;
1084
1085	kfree(tb);
1086	return 0;
1087}
1088
1089struct wmi_tlv_swba_parse {
1090	const struct wmi_host_swba_event *ev;
1091	bool tim_done;
1092	bool noa_done;
1093	size_t n_tim;
1094	size_t n_noa;
1095	struct wmi_swba_ev_arg *arg;
1096};
1097
1098static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
1099					 const void *ptr, void *data)
1100{
1101	struct wmi_tlv_swba_parse *swba = data;
1102	struct wmi_tim_info_arg *tim_info_arg;
1103	const struct wmi_tim_info *tim_info_ev = ptr;
1104
1105	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
1106		return -EPROTO;
1107
1108	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
1109		return -ENOBUFS;
1110
1111	if (__le32_to_cpu(tim_info_ev->tim_len) >
1112	     sizeof(tim_info_ev->tim_bitmap)) {
1113		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
1114		return -EPROTO;
1115	}
1116
1117	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
1118	tim_info_arg->tim_len = tim_info_ev->tim_len;
1119	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
1120	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
1121	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
1122	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
1123
1124	swba->n_tim++;
1125
1126	return 0;
1127}
1128
1129static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
1130					 const void *ptr, void *data)
1131{
1132	struct wmi_tlv_swba_parse *swba = data;
1133
1134	if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
1135		return -EPROTO;
1136
1137	if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
1138		return -ENOBUFS;
1139
1140	swba->arg->noa_info[swba->n_noa++] = ptr;
1141	return 0;
1142}
1143
1144static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
1145				     const void *ptr, void *data)
1146{
1147	struct wmi_tlv_swba_parse *swba = data;
1148	int ret;
1149
1150	switch (tag) {
1151	case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
1152		swba->ev = ptr;
1153		break;
1154	case WMI_TLV_TAG_ARRAY_STRUCT:
1155		if (!swba->tim_done) {
1156			swba->tim_done = true;
1157			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1158						  ath10k_wmi_tlv_swba_tim_parse,
1159						  swba);
1160			if (ret)
1161				return ret;
1162		} else if (!swba->noa_done) {
1163			swba->noa_done = true;
1164			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
1165						  ath10k_wmi_tlv_swba_noa_parse,
1166						  swba);
1167			if (ret)
1168				return ret;
1169		}
1170		break;
1171	default:
1172		break;
1173	}
1174	return 0;
1175}
1176
1177static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
1178					  struct sk_buff *skb,
1179					  struct wmi_swba_ev_arg *arg)
1180{
1181	struct wmi_tlv_swba_parse swba = { .arg = arg };
1182	u32 map;
1183	size_t n_vdevs;
1184	int ret;
1185
1186	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1187				  ath10k_wmi_tlv_swba_parse, &swba);
1188	if (ret) {
1189		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1190		return ret;
1191	}
1192
1193	if (!swba.ev)
1194		return -EPROTO;
1195
1196	arg->vdev_map = swba.ev->vdev_map;
1197
1198	for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
1199		if (map & BIT(0))
1200			n_vdevs++;
1201
1202	if (n_vdevs != swba.n_tim ||
1203	    n_vdevs != swba.n_noa)
1204		return -EPROTO;
1205
1206	return 0;
1207}
1208
1209static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
1210						struct sk_buff *skb,
1211						struct wmi_phyerr_hdr_arg *arg)
1212{
1213	const void **tb;
1214	const struct wmi_tlv_phyerr_ev *ev;
1215	const void *phyerrs;
1216	int ret;
1217
1218	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1219	if (IS_ERR(tb)) {
1220		ret = PTR_ERR(tb);
1221		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1222		return ret;
1223	}
1224
1225	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
1226	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
1227
1228	if (!ev || !phyerrs) {
1229		kfree(tb);
1230		return -EPROTO;
1231	}
1232
1233	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
1234	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
1235	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
1236	arg->buf_len = __le32_to_cpu(ev->buf_len);
1237	arg->phyerrs = phyerrs;
1238
1239	kfree(tb);
1240	return 0;
1241}
1242
1243#define WMI_TLV_ABI_VER_NS0 0x5F414351
1244#define WMI_TLV_ABI_VER_NS1 0x00004C4D
1245#define WMI_TLV_ABI_VER_NS2 0x00000000
1246#define WMI_TLV_ABI_VER_NS3 0x00000000
1247
1248#define WMI_TLV_ABI_VER0_MAJOR 1
1249#define WMI_TLV_ABI_VER0_MINOR 0
1250#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
1251			  (((WMI_TLV_ABI_VER0_MINOR) <<  0) & 0x00FFFFFF))
1252#define WMI_TLV_ABI_VER1 53
1253
1254static int
1255ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
1256			      const void *ptr, void *data)
1257{
1258	struct wmi_svc_rdy_ev_arg *arg = data;
1259	int i;
1260
1261	if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
1262		return -EPROTO;
1263
1264	for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
1265		if (!arg->mem_reqs[i]) {
1266			arg->mem_reqs[i] = ptr;
1267			return 0;
1268		}
1269	}
1270
1271	return -ENOMEM;
1272}
1273
1274struct wmi_tlv_svc_rdy_parse {
1275	const struct hal_reg_capabilities *reg;
1276	const struct wmi_tlv_svc_rdy_ev *ev;
1277	const __le32 *svc_bmap;
1278	const struct wlan_host_mem_req *mem_reqs;
1279	bool svc_bmap_done;
1280	bool dbs_hw_mode_done;
1281};
1282
1283static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
1284					const void *ptr, void *data)
1285{
1286	struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
1287
1288	switch (tag) {
1289	case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
1290		svc_rdy->ev = ptr;
1291		break;
1292	case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
1293		svc_rdy->reg = ptr;
1294		break;
1295	case WMI_TLV_TAG_ARRAY_STRUCT:
1296		svc_rdy->mem_reqs = ptr;
1297		break;
1298	case WMI_TLV_TAG_ARRAY_UINT32:
1299		if (!svc_rdy->svc_bmap_done) {
1300			svc_rdy->svc_bmap_done = true;
1301			svc_rdy->svc_bmap = ptr;
1302		} else if (!svc_rdy->dbs_hw_mode_done) {
1303			svc_rdy->dbs_hw_mode_done = true;
1304		}
1305		break;
1306	default:
1307		break;
1308	}
1309	return 0;
1310}
1311
1312static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
1313					     struct sk_buff *skb,
1314					     struct wmi_svc_rdy_ev_arg *arg)
1315{
1316	const struct hal_reg_capabilities *reg;
1317	const struct wmi_tlv_svc_rdy_ev *ev;
1318	const __le32 *svc_bmap;
1319	const struct wlan_host_mem_req *mem_reqs;
1320	struct wmi_tlv_svc_rdy_parse svc_rdy = { };
1321	int ret;
1322
1323	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1324				  ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
1325	if (ret) {
1326		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1327		return ret;
1328	}
1329
1330	ev = svc_rdy.ev;
1331	reg = svc_rdy.reg;
1332	svc_bmap = svc_rdy.svc_bmap;
1333	mem_reqs = svc_rdy.mem_reqs;
1334
1335	if (!ev || !reg || !svc_bmap || !mem_reqs)
1336		return -EPROTO;
1337
1338	/* This is an internal ABI compatibility check for WMI TLV so check it
1339	 * here instead of the generic WMI code.
1340	 */
1341	ath10k_dbg(ar, ATH10K_DBG_WMI,
1342		   "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
1343		   __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
1344		   __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
1345		   __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
1346		   __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
1347		   __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
1348
1349	if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
1350	    __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
1351	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
1352	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
1353	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
1354		return -ENOTSUPP;
1355	}
1356
1357	arg->min_tx_power = ev->hw_min_tx_power;
1358	arg->max_tx_power = ev->hw_max_tx_power;
1359	arg->ht_cap = ev->ht_cap_info;
1360	arg->vht_cap = ev->vht_cap_info;
1361	arg->vht_supp_mcs = ev->vht_supp_mcs;
1362	arg->sw_ver0 = ev->abi.abi_ver0;
1363	arg->sw_ver1 = ev->abi.abi_ver1;
1364	arg->fw_build = ev->fw_build_vers;
1365	arg->phy_capab = ev->phy_capability;
1366	arg->num_rf_chains = ev->num_rf_chains;
1367	arg->eeprom_rd = reg->eeprom_rd;
1368	arg->low_2ghz_chan = reg->low_2ghz_chan;
1369	arg->high_2ghz_chan = reg->high_2ghz_chan;
1370	arg->low_5ghz_chan = reg->low_5ghz_chan;
1371	arg->high_5ghz_chan = reg->high_5ghz_chan;
1372	arg->num_mem_reqs = ev->num_mem_reqs;
1373	arg->service_map = svc_bmap;
1374	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
1375	arg->sys_cap_info = ev->sys_cap_info;
1376
1377	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
1378				  ath10k_wmi_tlv_parse_mem_reqs, arg);
1379	if (ret) {
1380		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
1381		return ret;
1382	}
1383
1384	return 0;
1385}
1386
1387static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
1388					 struct sk_buff *skb,
1389					 struct wmi_rdy_ev_arg *arg)
1390{
1391	const void **tb;
1392	const struct wmi_tlv_rdy_ev *ev;
1393	int ret;
1394
1395	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1396	if (IS_ERR(tb)) {
1397		ret = PTR_ERR(tb);
1398		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1399		return ret;
1400	}
1401
1402	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
1403	if (!ev) {
1404		kfree(tb);
1405		return -EPROTO;
1406	}
1407
1408	arg->sw_version = ev->abi.abi_ver0;
1409	arg->abi_version = ev->abi.abi_ver1;
1410	arg->status = ev->status;
1411	arg->mac_addr = ev->mac_addr.addr;
1412
1413	kfree(tb);
1414	return 0;
1415}
1416
1417static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
1418					  const void *ptr, void *data)
1419{
1420	struct wmi_svc_avail_ev_arg *arg = data;
1421
1422	switch (tag) {
1423	case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
1424		arg->service_map_ext_valid = true;
1425		arg->service_map_ext_len = *(__le32 *)ptr;
1426		arg->service_map_ext = ptr + sizeof(__le32);
1427		return 0;
1428	default:
1429		break;
1430	}
1431
1432	return 0;
1433}
1434
1435static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
1436					    struct sk_buff *skb,
1437					    struct wmi_svc_avail_ev_arg *arg)
1438{
1439	int ret;
1440
1441	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
1442				  ath10k_wmi_tlv_svc_avail_parse, arg);
1443
1444	if (ret) {
1445		ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
1446		return ret;
1447	}
1448
1449	return 0;
1450}
1451
1452static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
1453					   struct ath10k_fw_stats_vdev *dst)
1454{
1455	int i;
1456
1457	dst->vdev_id = __le32_to_cpu(src->vdev_id);
1458	dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
1459	dst->data_snr = __le32_to_cpu(src->data_snr);
1460	dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
1461	dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
1462	dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
1463	dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
1464	dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
1465	dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
1466
1467	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
1468		dst->num_tx_frames[i] =
1469			__le32_to_cpu(src->num_tx_frames[i]);
1470
1471	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
1472		dst->num_tx_frames_retries[i] =
1473			__le32_to_cpu(src->num_tx_frames_retries[i]);
1474
1475	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
1476		dst->num_tx_frames_failures[i] =
1477			__le32_to_cpu(src->num_tx_frames_failures[i]);
1478
1479	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
1480		dst->tx_rate_history[i] =
1481			__le32_to_cpu(src->tx_rate_history[i]);
1482
1483	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
1484		dst->beacon_rssi_history[i] =
1485			__le32_to_cpu(src->beacon_rssi_history[i]);
1486}
1487
1488static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
1489					   struct sk_buff *skb,
1490					   struct ath10k_fw_stats *stats)
1491{
1492	const void **tb;
1493	const struct wmi_tlv_stats_ev *ev;
1494	u32 num_peer_stats_extd;
1495	const void *data;
1496	u32 num_pdev_stats;
1497	u32 num_vdev_stats;
1498	u32 num_peer_stats;
1499	u32 num_bcnflt_stats;
1500	u32 num_chan_stats;
1501	size_t data_len;
1502	u32 stats_id;
1503	int ret;
1504	int i;
1505
1506	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1507	if (IS_ERR(tb)) {
1508		ret = PTR_ERR(tb);
1509		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1510		return ret;
1511	}
1512
1513	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
1514	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
1515
1516	if (!ev || !data) {
1517		kfree(tb);
1518		return -EPROTO;
1519	}
1520
1521	data_len = ath10k_wmi_tlv_len(data);
1522	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1523	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1524	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1525	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
1526	num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
1527	stats_id = __le32_to_cpu(ev->stats_id);
1528	num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
1529
1530	ath10k_dbg(ar, ATH10K_DBG_WMI,
1531		   "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
1532		   num_pdev_stats, num_vdev_stats, num_peer_stats,
1533		   num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
1534
1535	for (i = 0; i < num_pdev_stats; i++) {
1536		const struct wmi_pdev_stats *src;
1537		struct ath10k_fw_stats_pdev *dst;
1538
1539		src = data;
1540		if (data_len < sizeof(*src)) {
1541			kfree(tb);
1542			return -EPROTO;
1543		}
1544
1545		data += sizeof(*src);
1546		data_len -= sizeof(*src);
1547
1548		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1549		if (!dst)
1550			continue;
1551
1552		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1553		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1554		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1555		list_add_tail(&dst->list, &stats->pdevs);
1556	}
1557
1558	for (i = 0; i < num_vdev_stats; i++) {
1559		const struct wmi_tlv_vdev_stats *src;
1560		struct ath10k_fw_stats_vdev *dst;
1561
1562		src = data;
1563		if (data_len < sizeof(*src)) {
1564			kfree(tb);
1565			return -EPROTO;
1566		}
1567
1568		data += sizeof(*src);
1569		data_len -= sizeof(*src);
1570
1571		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1572		if (!dst)
1573			continue;
1574
1575		ath10k_wmi_tlv_pull_vdev_stats(src, dst);
1576		list_add_tail(&dst->list, &stats->vdevs);
1577	}
1578
1579	for (i = 0; i < num_peer_stats; i++) {
1580		const struct wmi_10x_peer_stats *src;
1581		struct ath10k_fw_stats_peer *dst;
1582
1583		src = data;
1584		if (data_len < sizeof(*src)) {
1585			kfree(tb);
1586			return -EPROTO;
1587		}
1588
1589		data += sizeof(*src);
1590		data_len -= sizeof(*src);
1591
1592		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1593		if (!dst)
1594			continue;
1595
1596		ath10k_wmi_pull_peer_stats(&src->old, dst);
1597		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1598
1599		if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
1600			const struct wmi_tlv_peer_stats_extd *extd;
1601			unsigned long rx_duration_high;
1602
1603			extd = data + sizeof(*src) * (num_peer_stats - i - 1)
1604			       + sizeof(*extd) * i;
1605
1606			dst->rx_duration = __le32_to_cpu(extd->rx_duration);
1607			rx_duration_high = __le32_to_cpu
1608						(extd->rx_duration_high);
1609
1610			if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
1611				     &rx_duration_high)) {
1612				rx_duration_high =
1613					FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
1614						  rx_duration_high);
1615				dst->rx_duration |= (u64)rx_duration_high <<
1616						    WMI_TLV_PEER_RX_DURATION_SHIFT;
1617			}
1618		}
1619
1620		list_add_tail(&dst->list, &stats->peers);
1621	}
1622
1623	kfree(tb);
1624	return 0;
1625}
1626
1627static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
1628					  struct sk_buff *skb,
1629					  struct wmi_roam_ev_arg *arg)
1630{
1631	const void **tb;
1632	const struct wmi_tlv_roam_ev *ev;
1633	int ret;
1634
1635	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1636	if (IS_ERR(tb)) {
1637		ret = PTR_ERR(tb);
1638		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1639		return ret;
1640	}
1641
1642	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
1643	if (!ev) {
1644		kfree(tb);
1645		return -EPROTO;
1646	}
1647
1648	arg->vdev_id = ev->vdev_id;
1649	arg->reason = ev->reason;
1650	arg->rssi = ev->rssi;
1651
1652	kfree(tb);
1653	return 0;
1654}
1655
1656static int
1657ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
1658			      struct wmi_wow_ev_arg *arg)
1659{
1660	const void **tb;
1661	const struct wmi_tlv_wow_event_info *ev;
1662	int ret;
1663
1664	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1665	if (IS_ERR(tb)) {
1666		ret = PTR_ERR(tb);
1667		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1668		return ret;
1669	}
1670
1671	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
1672	if (!ev) {
1673		kfree(tb);
1674		return -EPROTO;
1675	}
1676
1677	arg->vdev_id = __le32_to_cpu(ev->vdev_id);
1678	arg->flag = __le32_to_cpu(ev->flag);
1679	arg->wake_reason = __le32_to_cpu(ev->wake_reason);
1680	arg->data_len = __le32_to_cpu(ev->data_len);
1681
1682	kfree(tb);
1683	return 0;
1684}
1685
1686static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
1687					  struct sk_buff *skb,
1688					  struct wmi_echo_ev_arg *arg)
1689{
1690	const void **tb;
1691	const struct wmi_echo_event *ev;
1692	int ret;
1693
1694	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1695	if (IS_ERR(tb)) {
1696		ret = PTR_ERR(tb);
1697		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1698		return ret;
1699	}
1700
1701	ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
1702	if (!ev) {
1703		kfree(tb);
1704		return -EPROTO;
1705	}
1706
1707	arg->value = ev->value;
1708
1709	kfree(tb);
1710	return 0;
1711}
1712
1713static struct sk_buff *
1714ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1715{
1716	struct wmi_tlv_pdev_suspend *cmd;
1717	struct wmi_tlv *tlv;
1718	struct sk_buff *skb;
1719
1720	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1721	if (!skb)
1722		return ERR_PTR(-ENOMEM);
1723
1724	tlv = (void *)skb->data;
1725	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1726	tlv->len = __cpu_to_le16(sizeof(*cmd));
1727	cmd = (void *)tlv->value;
1728	cmd->opt = __cpu_to_le32(opt);
1729
1730	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1731	return skb;
1732}
1733
1734static struct sk_buff *
1735ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1736{
1737	struct wmi_tlv_resume_cmd *cmd;
1738	struct wmi_tlv *tlv;
1739	struct sk_buff *skb;
1740
1741	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1742	if (!skb)
1743		return ERR_PTR(-ENOMEM);
1744
1745	tlv = (void *)skb->data;
1746	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1747	tlv->len = __cpu_to_le16(sizeof(*cmd));
1748	cmd = (void *)tlv->value;
1749	cmd->reserved = __cpu_to_le32(0);
1750
1751	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1752	return skb;
1753}
1754
1755static struct sk_buff *
1756ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1757				  u16 rd, u16 rd2g, u16 rd5g,
1758				  u16 ctl2g, u16 ctl5g,
1759				  enum wmi_dfs_region dfs_reg)
1760{
1761	struct wmi_tlv_pdev_set_rd_cmd *cmd;
1762	struct wmi_tlv *tlv;
1763	struct sk_buff *skb;
1764
1765	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1766	if (!skb)
1767		return ERR_PTR(-ENOMEM);
1768
1769	tlv = (void *)skb->data;
1770	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1771	tlv->len = __cpu_to_le16(sizeof(*cmd));
1772	cmd = (void *)tlv->value;
1773	cmd->regd = __cpu_to_le32(rd);
1774	cmd->regd_2ghz = __cpu_to_le32(rd2g);
1775	cmd->regd_5ghz = __cpu_to_le32(rd5g);
1776	cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
1777	cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
1778
1779	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1780	return skb;
1781}
1782
1783static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
1784{
1785	return WMI_TXBF_CONF_AFTER_ASSOC;
1786}
1787
1788static struct sk_buff *
1789ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1790				     u32 param_value)
1791{
1792	struct wmi_tlv_pdev_set_param_cmd *cmd;
1793	struct wmi_tlv *tlv;
1794	struct sk_buff *skb;
1795
1796	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1797	if (!skb)
1798		return ERR_PTR(-ENOMEM);
1799
1800	tlv = (void *)skb->data;
1801	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1802	tlv->len = __cpu_to_le16(sizeof(*cmd));
1803	cmd = (void *)tlv->value;
1804	cmd->param_id = __cpu_to_le32(param_id);
1805	cmd->param_value = __cpu_to_le32(param_value);
1806
1807	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
1808		   param_id, param_value);
1809	return skb;
1810}
1811
1812static void
1813ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
1814{
1815	struct host_memory_chunk_tlv *chunk;
1816	struct wmi_tlv *tlv;
1817	dma_addr_t paddr;
1818	int i;
1819	__le16 tlv_len, tlv_tag;
1820
1821	tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
1822	tlv_len = __cpu_to_le16(sizeof(*chunk));
1823	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
1824		tlv = host_mem_chunks;
1825		tlv->tag = tlv_tag;
1826		tlv->len = tlv_len;
1827		chunk = (void *)tlv->value;
1828
1829		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
1830		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
1831		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
1832
1833		if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
1834			     ar->wmi.svc_map)) {
1835			paddr = ar->wmi.mem_chunks[i].paddr;
1836			chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
1837		}
1838
1839		ath10k_dbg(ar, ATH10K_DBG_WMI,
1840			   "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
1841			   i,
1842			   ar->wmi.mem_chunks[i].len,
1843			   (unsigned long long)ar->wmi.mem_chunks[i].paddr,
1844			   ar->wmi.mem_chunks[i].req_id);
1845
1846		host_mem_chunks += sizeof(*tlv);
1847		host_mem_chunks += sizeof(*chunk);
1848	}
1849}
1850
1851static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1852{
1853	struct sk_buff *skb;
1854	struct wmi_tlv *tlv;
1855	struct wmi_tlv_init_cmd *cmd;
1856	struct wmi_tlv_resource_config *cfg;
1857	void *chunks;
1858	size_t len, chunks_len;
1859	void *ptr;
1860
1861	chunks_len = ar->wmi.num_mem_chunks *
1862		     (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
1863	len = (sizeof(*tlv) + sizeof(*cmd)) +
1864	      (sizeof(*tlv) + sizeof(*cfg)) +
1865	      (sizeof(*tlv) + chunks_len);
1866
1867	skb = ath10k_wmi_alloc_skb(ar, len);
1868	if (!skb)
1869		return ERR_PTR(-ENOMEM);
1870
1871	ptr = skb->data;
1872
1873	tlv = ptr;
1874	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1875	tlv->len = __cpu_to_le16(sizeof(*cmd));
1876	cmd = (void *)tlv->value;
1877	ptr += sizeof(*tlv);
1878	ptr += sizeof(*cmd);
1879
1880	tlv = ptr;
1881	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1882	tlv->len = __cpu_to_le16(sizeof(*cfg));
1883	cfg = (void *)tlv->value;
1884	ptr += sizeof(*tlv);
1885	ptr += sizeof(*cfg);
1886
1887	tlv = ptr;
1888	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1889	tlv->len = __cpu_to_le16(chunks_len);
1890	chunks = (void *)tlv->value;
1891
1892	ptr += sizeof(*tlv);
1893	ptr += chunks_len;
1894
1895	cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1896	cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1897	cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1898	cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1899	cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1900	cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1901	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1902
1903	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1904
1905	if (ar->hw_params.num_peers)
1906		cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
1907	else
1908		cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1909	cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
1910	cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
1911
1912	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1913		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1914		cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1915	} else {
1916		cfg->num_offload_peers = __cpu_to_le32(0);
1917		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1918	}
1919
1920	cfg->num_peer_keys = __cpu_to_le32(2);
1921	if (ar->hw_params.num_peers)
1922		cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
1923	else
1924		cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1925	cfg->tx_chain_mask = __cpu_to_le32(0x7);
1926	cfg->rx_chain_mask = __cpu_to_le32(0x7);
1927	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1928	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1929	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1930	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1931	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
1932	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1933	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1934	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1935	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1936	cfg->num_mcast_groups = __cpu_to_le32(0);
1937	cfg->num_mcast_table_elems = __cpu_to_le32(0);
1938	cfg->mcast2ucast_mode = __cpu_to_le32(0);
1939	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1940	cfg->dma_burst_size = __cpu_to_le32(0);
1941	cfg->mac_aggr_delim = __cpu_to_le32(0);
1942	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1943	cfg->vow_config = __cpu_to_le32(0);
1944	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1945	cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
1946	cfg->max_frag_entries = __cpu_to_le32(2);
1947	cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
1948	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1949	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1950	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1951	cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
1952	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1953	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1954	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1955	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1956	cfg->wmi_send_separate = __cpu_to_le32(0);
1957	cfg->num_ocb_vdevs = __cpu_to_le32(0);
1958	cfg->num_ocb_channels = __cpu_to_le32(0);
1959	cfg->num_ocb_schedules = __cpu_to_le32(0);
1960	cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
1961
1962	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
1963		cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
1964
1965	ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
1966
1967	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1968	return skb;
1969}
1970
1971static struct sk_buff *
1972ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1973				 const struct wmi_start_scan_arg *arg)
1974{
1975	struct wmi_tlv_start_scan_cmd *cmd;
1976	struct wmi_tlv *tlv;
1977	struct sk_buff *skb;
1978	size_t len, chan_len, ssid_len, bssid_len, ie_len;
1979	__le32 *chans;
1980	struct wmi_ssid *ssids;
1981	struct wmi_mac_addr *addrs;
1982	void *ptr;
1983	int i, ret;
1984
1985	ret = ath10k_wmi_start_scan_verify(arg);
1986	if (ret)
1987		return ERR_PTR(ret);
1988
1989	chan_len = arg->n_channels * sizeof(__le32);
1990	ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1991	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1992	ie_len = roundup(arg->ie_len, 4);
1993	len = (sizeof(*tlv) + sizeof(*cmd)) +
1994	      sizeof(*tlv) + chan_len +
1995	      sizeof(*tlv) + ssid_len +
1996	      sizeof(*tlv) + bssid_len +
1997	      sizeof(*tlv) + ie_len;
1998
1999	skb = ath10k_wmi_alloc_skb(ar, len);
2000	if (!skb)
2001		return ERR_PTR(-ENOMEM);
2002
2003	ptr = (void *)skb->data;
2004	tlv = ptr;
2005	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
2006	tlv->len = __cpu_to_le16(sizeof(*cmd));
2007	cmd = (void *)tlv->value;
2008
2009	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
2010	cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
2011	cmd->num_channels = __cpu_to_le32(arg->n_channels);
2012	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
2013	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
2014	cmd->ie_len = __cpu_to_le32(arg->ie_len);
2015	cmd->num_probes = __cpu_to_le32(3);
2016	ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
2017	ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
2018
2019	/* FIXME: There are some scan flag inconsistencies across firmwares,
2020	 * e.g. WMI-TLV inverts the logic behind the following flag.
2021	 */
2022	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2023
2024	ptr += sizeof(*tlv);
2025	ptr += sizeof(*cmd);
2026
2027	tlv = ptr;
2028	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2029	tlv->len = __cpu_to_le16(chan_len);
2030	chans = (void *)tlv->value;
2031	for (i = 0; i < arg->n_channels; i++)
2032		chans[i] = __cpu_to_le32(arg->channels[i]);
2033
2034	ptr += sizeof(*tlv);
2035	ptr += chan_len;
2036
2037	tlv = ptr;
2038	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2039	tlv->len = __cpu_to_le16(ssid_len);
2040	ssids = (void *)tlv->value;
2041	for (i = 0; i < arg->n_ssids; i++) {
2042		ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
2043		memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
2044	}
2045
2046	ptr += sizeof(*tlv);
2047	ptr += ssid_len;
2048
2049	tlv = ptr;
2050	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
2051	tlv->len = __cpu_to_le16(bssid_len);
2052	addrs = (void *)tlv->value;
2053	for (i = 0; i < arg->n_bssids; i++)
2054		ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
2055
2056	ptr += sizeof(*tlv);
2057	ptr += bssid_len;
2058
2059	tlv = ptr;
2060	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2061	tlv->len = __cpu_to_le16(ie_len);
2062	memcpy(tlv->value, arg->ie, arg->ie_len);
2063
2064	ptr += sizeof(*tlv);
2065	ptr += ie_len;
2066
2067	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
2068	return skb;
2069}
2070
2071static struct sk_buff *
2072ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
2073				const struct wmi_stop_scan_arg *arg)
2074{
2075	struct wmi_stop_scan_cmd *cmd;
2076	struct wmi_tlv *tlv;
2077	struct sk_buff *skb;
2078	u32 scan_id;
2079	u32 req_id;
2080
2081	if (arg->req_id > 0xFFF)
2082		return ERR_PTR(-EINVAL);
2083	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
2084		return ERR_PTR(-EINVAL);
2085
2086	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2087	if (!skb)
2088		return ERR_PTR(-ENOMEM);
2089
2090	scan_id = arg->u.scan_id;
2091	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
2092
2093	req_id = arg->req_id;
2094	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
2095
2096	tlv = (void *)skb->data;
2097	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
2098	tlv->len = __cpu_to_le16(sizeof(*cmd));
2099	cmd = (void *)tlv->value;
2100	cmd->req_type = __cpu_to_le32(arg->req_type);
2101	cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
2102	cmd->scan_id = __cpu_to_le32(scan_id);
2103	cmd->scan_req_id = __cpu_to_le32(req_id);
2104
2105	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
2106	return skb;
2107}
2108
2109static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
2110					      enum wmi_vdev_subtype subtype)
2111{
2112	switch (subtype) {
2113	case WMI_VDEV_SUBTYPE_NONE:
2114		return WMI_TLV_VDEV_SUBTYPE_NONE;
2115	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
2116		return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
2117	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
2118		return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
2119	case WMI_VDEV_SUBTYPE_P2P_GO:
2120		return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
2121	case WMI_VDEV_SUBTYPE_PROXY_STA:
2122		return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
2123	case WMI_VDEV_SUBTYPE_MESH_11S:
2124		return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
2125	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
2126		return -ENOTSUPP;
2127	}
2128	return -ENOTSUPP;
2129}
2130
2131static struct sk_buff *
2132ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
2133				  u32 vdev_id,
2134				  enum wmi_vdev_type vdev_type,
2135				  enum wmi_vdev_subtype vdev_subtype,
2136				  const u8 mac_addr[ETH_ALEN])
2137{
2138	struct wmi_vdev_create_cmd *cmd;
2139	struct wmi_tlv *tlv;
2140	struct sk_buff *skb;
2141
2142	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2143	if (!skb)
2144		return ERR_PTR(-ENOMEM);
2145
2146	tlv = (void *)skb->data;
2147	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
2148	tlv->len = __cpu_to_le16(sizeof(*cmd));
2149	cmd = (void *)tlv->value;
2150	cmd->vdev_id = __cpu_to_le32(vdev_id);
2151	cmd->vdev_type = __cpu_to_le32(vdev_type);
2152	cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
2153	ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
2154
2155	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
2156	return skb;
2157}
2158
2159static struct sk_buff *
2160ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
2161{
2162	struct wmi_vdev_delete_cmd *cmd;
2163	struct wmi_tlv *tlv;
2164	struct sk_buff *skb;
2165
2166	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2167	if (!skb)
2168		return ERR_PTR(-ENOMEM);
2169
2170	tlv = (void *)skb->data;
2171	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
2172	tlv->len = __cpu_to_le16(sizeof(*cmd));
2173	cmd = (void *)tlv->value;
2174	cmd->vdev_id = __cpu_to_le32(vdev_id);
2175
2176	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
2177	return skb;
2178}
2179
2180static struct sk_buff *
2181ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
2182				 const struct wmi_vdev_start_request_arg *arg,
2183				 bool restart)
2184{
2185	struct wmi_tlv_vdev_start_cmd *cmd;
2186	struct wmi_channel *ch;
2187	struct wmi_tlv *tlv;
2188	struct sk_buff *skb;
2189	size_t len;
2190	void *ptr;
2191	u32 flags = 0;
2192
2193	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
2194		return ERR_PTR(-EINVAL);
2195	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
2196		return ERR_PTR(-EINVAL);
2197
2198	len = (sizeof(*tlv) + sizeof(*cmd)) +
2199	      (sizeof(*tlv) + sizeof(*ch)) +
2200	      (sizeof(*tlv) + 0);
2201	skb = ath10k_wmi_alloc_skb(ar, len);
2202	if (!skb)
2203		return ERR_PTR(-ENOMEM);
2204
2205	if (arg->hidden_ssid)
2206		flags |= WMI_VDEV_START_HIDDEN_SSID;
2207	if (arg->pmf_enabled)
2208		flags |= WMI_VDEV_START_PMF_ENABLED;
2209
2210	ptr = (void *)skb->data;
2211
2212	tlv = ptr;
2213	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
2214	tlv->len = __cpu_to_le16(sizeof(*cmd));
2215	cmd = (void *)tlv->value;
2216	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2217	cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
2218	cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
2219	cmd->flags = __cpu_to_le32(flags);
2220	cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
2221	cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
2222	cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
2223
2224	if (arg->ssid) {
2225		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
2226		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
2227	}
2228
2229	ptr += sizeof(*tlv);
2230	ptr += sizeof(*cmd);
2231
2232	tlv = ptr;
2233	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2234	tlv->len = __cpu_to_le16(sizeof(*ch));
2235	ch = (void *)tlv->value;
2236	ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
2237
2238	ptr += sizeof(*tlv);
2239	ptr += sizeof(*ch);
2240
2241	tlv = ptr;
2242	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2243	tlv->len = 0;
2244
2245	/* Note: This is a nested TLV containing:
2246	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
2247	 */
2248
2249	ptr += sizeof(*tlv);
2250	ptr += 0;
2251
2252	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
2253	return skb;
2254}
2255
2256static struct sk_buff *
2257ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
2258{
2259	struct wmi_vdev_stop_cmd *cmd;
2260	struct wmi_tlv *tlv;
2261	struct sk_buff *skb;
2262
2263	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2264	if (!skb)
2265		return ERR_PTR(-ENOMEM);
2266
2267	tlv = (void *)skb->data;
2268	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
2269	tlv->len = __cpu_to_le16(sizeof(*cmd));
2270	cmd = (void *)tlv->value;
2271	cmd->vdev_id = __cpu_to_le32(vdev_id);
2272
2273	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
2274	return skb;
2275}
2276
2277static struct sk_buff *
2278ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
2279			      const u8 *bssid)
2280
2281{
2282	struct wmi_vdev_up_cmd *cmd;
2283	struct wmi_tlv *tlv;
2284	struct sk_buff *skb;
2285
2286	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2287	if (!skb)
2288		return ERR_PTR(-ENOMEM);
2289
2290	tlv = (void *)skb->data;
2291	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
2292	tlv->len = __cpu_to_le16(sizeof(*cmd));
2293	cmd = (void *)tlv->value;
2294	cmd->vdev_id = __cpu_to_le32(vdev_id);
2295	cmd->vdev_assoc_id = __cpu_to_le32(aid);
2296	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
2297
2298	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
2299	return skb;
2300}
2301
2302static struct sk_buff *
2303ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
2304{
2305	struct wmi_vdev_down_cmd *cmd;
2306	struct wmi_tlv *tlv;
2307	struct sk_buff *skb;
2308
2309	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2310	if (!skb)
2311		return ERR_PTR(-ENOMEM);
2312
2313	tlv = (void *)skb->data;
2314	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
2315	tlv->len = __cpu_to_le16(sizeof(*cmd));
2316	cmd = (void *)tlv->value;
2317	cmd->vdev_id = __cpu_to_le32(vdev_id);
2318
2319	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
2320	return skb;
2321}
2322
2323static struct sk_buff *
2324ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
2325				     u32 param_id, u32 param_value)
2326{
2327	struct wmi_vdev_set_param_cmd *cmd;
2328	struct wmi_tlv *tlv;
2329	struct sk_buff *skb;
2330
2331	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2332	if (!skb)
2333		return ERR_PTR(-ENOMEM);
2334
2335	tlv = (void *)skb->data;
2336	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
2337	tlv->len = __cpu_to_le16(sizeof(*cmd));
2338	cmd = (void *)tlv->value;
2339	cmd->vdev_id = __cpu_to_le32(vdev_id);
2340	cmd->param_id = __cpu_to_le32(param_id);
2341	cmd->param_value = __cpu_to_le32(param_value);
2342
2343	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
2344		   vdev_id, param_id, param_value);
2345	return skb;
2346}
2347
2348static struct sk_buff *
2349ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
2350				       const struct wmi_vdev_install_key_arg *arg)
2351{
2352	struct wmi_vdev_install_key_cmd *cmd;
2353	struct wmi_tlv *tlv;
2354	struct sk_buff *skb;
2355	size_t len;
2356	void *ptr;
2357
2358	if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2359	    arg->key_data)
2360		return ERR_PTR(-EINVAL);
2361	if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
2362	    !arg->key_data)
2363		return ERR_PTR(-EINVAL);
2364
2365	len = sizeof(*tlv) + sizeof(*cmd) +
2366	      sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
2367	skb = ath10k_wmi_alloc_skb(ar, len);
2368	if (!skb)
2369		return ERR_PTR(-ENOMEM);
2370
2371	ptr = (void *)skb->data;
2372	tlv = ptr;
2373	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
2374	tlv->len = __cpu_to_le16(sizeof(*cmd));
2375	cmd = (void *)tlv->value;
2376	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2377	cmd->key_idx = __cpu_to_le32(arg->key_idx);
2378	cmd->key_flags = __cpu_to_le32(arg->key_flags);
2379	cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
2380	cmd->key_len = __cpu_to_le32(arg->key_len);
2381	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
2382	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
2383
2384	if (arg->macaddr)
2385		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2386
2387	ptr += sizeof(*tlv);
2388	ptr += sizeof(*cmd);
2389
2390	tlv = ptr;
2391	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2392	tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
2393	if (arg->key_data)
2394		memcpy(tlv->value, arg->key_data, arg->key_len);
2395
2396	ptr += sizeof(*tlv);
2397	ptr += roundup(arg->key_len, sizeof(__le32));
2398
2399	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
2400	return skb;
2401}
2402
2403static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
2404					 const struct wmi_sta_uapsd_auto_trig_arg *arg)
2405{
2406	struct wmi_sta_uapsd_auto_trig_param *ac;
2407	struct wmi_tlv *tlv;
2408
2409	tlv = ptr;
2410	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
2411	tlv->len = __cpu_to_le16(sizeof(*ac));
2412	ac = (void *)tlv->value;
2413
2414	ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
2415	ac->user_priority = __cpu_to_le32(arg->user_priority);
2416	ac->service_interval = __cpu_to_le32(arg->service_interval);
2417	ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
2418	ac->delay_interval = __cpu_to_le32(arg->delay_interval);
2419
2420	ath10k_dbg(ar, ATH10K_DBG_WMI,
2421		   "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
2422		   ac->wmm_ac, ac->user_priority, ac->service_interval,
2423		   ac->suspend_interval, ac->delay_interval);
2424
2425	return ptr + sizeof(*tlv) + sizeof(*ac);
2426}
2427
2428static struct sk_buff *
2429ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
2430				     const u8 peer_addr[ETH_ALEN],
2431				     const struct wmi_sta_uapsd_auto_trig_arg *args,
2432				     u32 num_ac)
2433{
2434	struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
2435	struct wmi_sta_uapsd_auto_trig_param *ac;
2436	struct wmi_tlv *tlv;
2437	struct sk_buff *skb;
2438	size_t len;
2439	size_t ac_tlv_len;
2440	void *ptr;
2441	int i;
2442
2443	ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
2444	len = sizeof(*tlv) + sizeof(*cmd) +
2445	      sizeof(*tlv) + ac_tlv_len;
2446	skb = ath10k_wmi_alloc_skb(ar, len);
2447	if (!skb)
2448		return ERR_PTR(-ENOMEM);
2449
2450	ptr = (void *)skb->data;
2451	tlv = ptr;
2452	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
2453	tlv->len = __cpu_to_le16(sizeof(*cmd));
2454	cmd = (void *)tlv->value;
2455	cmd->vdev_id = __cpu_to_le32(vdev_id);
2456	cmd->num_ac = __cpu_to_le32(num_ac);
2457	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2458
2459	ptr += sizeof(*tlv);
2460	ptr += sizeof(*cmd);
2461
2462	tlv = ptr;
2463	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2464	tlv->len = __cpu_to_le16(ac_tlv_len);
2465	ac = (void *)tlv->value;
2466
2467	ptr += sizeof(*tlv);
2468	for (i = 0; i < num_ac; i++)
2469		ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
2470
2471	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
2472	return skb;
2473}
2474
2475static void *ath10k_wmi_tlv_put_wmm(void *ptr,
2476				    const struct wmi_wmm_params_arg *arg)
2477{
2478	struct wmi_wmm_params *wmm;
2479	struct wmi_tlv *tlv;
2480
2481	tlv = ptr;
2482	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
2483	tlv->len = __cpu_to_le16(sizeof(*wmm));
2484	wmm = (void *)tlv->value;
2485	ath10k_wmi_set_wmm_param(wmm, arg);
2486
2487	return ptr + sizeof(*tlv) + sizeof(*wmm);
2488}
2489
2490static struct sk_buff *
2491ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
2492				    const struct wmi_wmm_params_all_arg *arg)
2493{
2494	struct wmi_tlv_vdev_set_wmm_cmd *cmd;
2495	struct wmi_tlv *tlv;
2496	struct sk_buff *skb;
2497	size_t len;
2498	void *ptr;
2499
2500	len = sizeof(*tlv) + sizeof(*cmd);
2501	skb = ath10k_wmi_alloc_skb(ar, len);
2502	if (!skb)
2503		return ERR_PTR(-ENOMEM);
2504
2505	ptr = (void *)skb->data;
2506	tlv = ptr;
2507	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
2508	tlv->len = __cpu_to_le16(sizeof(*cmd));
2509	cmd = (void *)tlv->value;
2510	cmd->vdev_id = __cpu_to_le32(vdev_id);
2511
2512	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
2513	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
2514	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
2515	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
2516
2517	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
2518	return skb;
2519}
2520
2521static struct sk_buff *
2522ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
2523				    const struct wmi_sta_keepalive_arg *arg)
2524{
2525	struct wmi_tlv_sta_keepalive_cmd *cmd;
2526	struct wmi_sta_keepalive_arp_resp *arp;
2527	struct sk_buff *skb;
2528	struct wmi_tlv *tlv;
2529	void *ptr;
2530	size_t len;
2531
2532	len = sizeof(*tlv) + sizeof(*cmd) +
2533	      sizeof(*tlv) + sizeof(*arp);
2534	skb = ath10k_wmi_alloc_skb(ar, len);
2535	if (!skb)
2536		return ERR_PTR(-ENOMEM);
2537
2538	ptr = (void *)skb->data;
2539	tlv = ptr;
2540	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
2541	tlv->len = __cpu_to_le16(sizeof(*cmd));
2542	cmd = (void *)tlv->value;
2543	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2544	cmd->enabled = __cpu_to_le32(arg->enabled);
2545	cmd->method = __cpu_to_le32(arg->method);
2546	cmd->interval = __cpu_to_le32(arg->interval);
2547
2548	ptr += sizeof(*tlv);
2549	ptr += sizeof(*cmd);
2550
2551	tlv = ptr;
2552	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
2553	tlv->len = __cpu_to_le16(sizeof(*arp));
2554	arp = (void *)tlv->value;
2555
2556	arp->src_ip4_addr = arg->src_ip4_addr;
2557	arp->dest_ip4_addr = arg->dest_ip4_addr;
2558	ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
2559
2560	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
2561		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
2562	return skb;
2563}
2564
2565static struct sk_buff *
2566ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
2567				  const u8 peer_addr[ETH_ALEN],
2568				  enum wmi_peer_type peer_type)
2569{
2570	struct wmi_tlv_peer_create_cmd *cmd;
2571	struct wmi_tlv *tlv;
2572	struct sk_buff *skb;
2573
2574	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2575	if (!skb)
2576		return ERR_PTR(-ENOMEM);
2577
2578	tlv = (void *)skb->data;
2579	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
2580	tlv->len = __cpu_to_le16(sizeof(*cmd));
2581	cmd = (void *)tlv->value;
2582	cmd->vdev_id = __cpu_to_le32(vdev_id);
2583	cmd->peer_type = __cpu_to_le32(peer_type);
2584	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
2585
2586	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
2587	return skb;
2588}
2589
2590static struct sk_buff *
2591ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
2592				  const u8 peer_addr[ETH_ALEN])
2593{
2594	struct wmi_peer_delete_cmd *cmd;
2595	struct wmi_tlv *tlv;
2596	struct sk_buff *skb;
2597
2598	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2599	if (!skb)
2600		return ERR_PTR(-ENOMEM);
2601
2602	tlv = (void *)skb->data;
2603	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
2604	tlv->len = __cpu_to_le16(sizeof(*cmd));
2605	cmd = (void *)tlv->value;
2606	cmd->vdev_id = __cpu_to_le32(vdev_id);
2607	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2608
2609	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
2610	return skb;
2611}
2612
2613static struct sk_buff *
2614ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
2615				 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2616{
2617	struct wmi_peer_flush_tids_cmd *cmd;
2618	struct wmi_tlv *tlv;
2619	struct sk_buff *skb;
2620
2621	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2622	if (!skb)
2623		return ERR_PTR(-ENOMEM);
2624
2625	tlv = (void *)skb->data;
2626	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
2627	tlv->len = __cpu_to_le16(sizeof(*cmd));
2628	cmd = (void *)tlv->value;
2629	cmd->vdev_id = __cpu_to_le32(vdev_id);
2630	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2631	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2632
2633	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
2634	return skb;
2635}
2636
2637static struct sk_buff *
2638ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
2639				     const u8 *peer_addr,
2640				     enum wmi_peer_param param_id,
2641				     u32 param_value)
2642{
2643	struct wmi_peer_set_param_cmd *cmd;
2644	struct wmi_tlv *tlv;
2645	struct sk_buff *skb;
2646
2647	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2648	if (!skb)
2649		return ERR_PTR(-ENOMEM);
2650
2651	tlv = (void *)skb->data;
2652	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
2653	tlv->len = __cpu_to_le16(sizeof(*cmd));
2654	cmd = (void *)tlv->value;
2655	cmd->vdev_id = __cpu_to_le32(vdev_id);
2656	cmd->param_id = __cpu_to_le32(param_id);
2657	cmd->param_value = __cpu_to_le32(param_value);
2658	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2659
2660	ath10k_dbg(ar, ATH10K_DBG_WMI,
2661		   "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
2662		   vdev_id, peer_addr, param_id, param_value);
2663	return skb;
2664}
2665
2666static struct sk_buff *
2667ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
2668				 const struct wmi_peer_assoc_complete_arg *arg)
2669{
2670	struct wmi_tlv_peer_assoc_cmd *cmd;
2671	struct wmi_vht_rate_set *vht_rate;
2672	struct wmi_tlv *tlv;
2673	struct sk_buff *skb;
2674	size_t len, legacy_rate_len, ht_rate_len;
2675	void *ptr;
2676
2677	if (arg->peer_mpdu_density > 16)
2678		return ERR_PTR(-EINVAL);
2679	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2680		return ERR_PTR(-EINVAL);
2681	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2682		return ERR_PTR(-EINVAL);
2683
2684	legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
2685				  sizeof(__le32));
2686	ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
2687	len = (sizeof(*tlv) + sizeof(*cmd)) +
2688	      (sizeof(*tlv) + legacy_rate_len) +
2689	      (sizeof(*tlv) + ht_rate_len) +
2690	      (sizeof(*tlv) + sizeof(*vht_rate));
2691	skb = ath10k_wmi_alloc_skb(ar, len);
2692	if (!skb)
2693		return ERR_PTR(-ENOMEM);
2694
2695	ptr = (void *)skb->data;
2696	tlv = ptr;
2697	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
2698	tlv->len = __cpu_to_le16(sizeof(*cmd));
2699	cmd = (void *)tlv->value;
2700
2701	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2702	cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2703	cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
2704	cmd->flags = __cpu_to_le32(arg->peer_flags);
2705	cmd->caps = __cpu_to_le32(arg->peer_caps);
2706	cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2707	cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2708	cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2709	cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2710	cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2711	cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2712	cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2713	cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
2714	cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2715	cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
2716	ether_addr_copy(cmd->mac_addr.addr, arg->addr);
2717
2718	ptr += sizeof(*tlv);
2719	ptr += sizeof(*cmd);
2720
2721	tlv = ptr;
2722	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2723	tlv->len = __cpu_to_le16(legacy_rate_len);
2724	memcpy(tlv->value, arg->peer_legacy_rates.rates,
2725	       arg->peer_legacy_rates.num_rates);
2726
2727	ptr += sizeof(*tlv);
2728	ptr += legacy_rate_len;
2729
2730	tlv = ptr;
2731	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2732	tlv->len = __cpu_to_le16(ht_rate_len);
2733	memcpy(tlv->value, arg->peer_ht_rates.rates,
2734	       arg->peer_ht_rates.num_rates);
2735
2736	ptr += sizeof(*tlv);
2737	ptr += ht_rate_len;
2738
2739	tlv = ptr;
2740	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
2741	tlv->len = __cpu_to_le16(sizeof(*vht_rate));
2742	vht_rate = (void *)tlv->value;
2743
2744	vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2745	vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2746	vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2747	vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2748
2749	ptr += sizeof(*tlv);
2750	ptr += sizeof(*vht_rate);
2751
2752	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
2753	return skb;
2754}
2755
2756static struct sk_buff *
2757ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
2758				 enum wmi_sta_ps_mode psmode)
2759{
2760	struct wmi_sta_powersave_mode_cmd *cmd;
2761	struct wmi_tlv *tlv;
2762	struct sk_buff *skb;
2763
2764	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2765	if (!skb)
2766		return ERR_PTR(-ENOMEM);
2767
2768	tlv = (void *)skb->data;
2769	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
2770	tlv->len = __cpu_to_le16(sizeof(*cmd));
2771	cmd = (void *)tlv->value;
2772	cmd->vdev_id = __cpu_to_le32(vdev_id);
2773	cmd->sta_ps_mode = __cpu_to_le32(psmode);
2774
2775	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
2776	return skb;
2777}
2778
2779static struct sk_buff *
2780ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
2781				 enum wmi_sta_powersave_param param_id,
2782				 u32 param_value)
2783{
2784	struct wmi_sta_powersave_param_cmd *cmd;
2785	struct wmi_tlv *tlv;
2786	struct sk_buff *skb;
2787
2788	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2789	if (!skb)
2790		return ERR_PTR(-ENOMEM);
2791
2792	tlv = (void *)skb->data;
2793	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2794	tlv->len = __cpu_to_le16(sizeof(*cmd));
2795	cmd = (void *)tlv->value;
2796	cmd->vdev_id = __cpu_to_le32(vdev_id);
2797	cmd->param_id = __cpu_to_le32(param_id);
2798	cmd->param_value = __cpu_to_le32(param_value);
2799
2800	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2801	return skb;
2802}
2803
2804static struct sk_buff *
2805ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2806				enum wmi_ap_ps_peer_param param_id, u32 value)
2807{
2808	struct wmi_ap_ps_peer_cmd *cmd;
2809	struct wmi_tlv *tlv;
2810	struct sk_buff *skb;
2811
2812	if (!mac)
2813		return ERR_PTR(-EINVAL);
2814
2815	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2816	if (!skb)
2817		return ERR_PTR(-ENOMEM);
2818
2819	tlv = (void *)skb->data;
2820	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2821	tlv->len = __cpu_to_le16(sizeof(*cmd));
2822	cmd = (void *)tlv->value;
2823	cmd->vdev_id = __cpu_to_le32(vdev_id);
2824	cmd->param_id = __cpu_to_le32(param_id);
2825	cmd->param_value = __cpu_to_le32(value);
2826	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2827
2828	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2829	return skb;
2830}
2831
2832static struct sk_buff *
2833ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2834				     const struct wmi_scan_chan_list_arg *arg)
2835{
2836	struct wmi_tlv_scan_chan_list_cmd *cmd;
2837	struct wmi_channel *ci;
2838	struct wmi_channel_arg *ch;
2839	struct wmi_tlv *tlv;
2840	struct sk_buff *skb;
2841	size_t chans_len, len;
2842	int i;
2843	void *ptr, *chans;
2844
2845	chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2846	len = (sizeof(*tlv) + sizeof(*cmd)) +
2847	      (sizeof(*tlv) + chans_len);
2848
2849	skb = ath10k_wmi_alloc_skb(ar, len);
2850	if (!skb)
2851		return ERR_PTR(-ENOMEM);
2852
2853	ptr = (void *)skb->data;
2854	tlv = ptr;
2855	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2856	tlv->len = __cpu_to_le16(sizeof(*cmd));
2857	cmd = (void *)tlv->value;
2858	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2859
2860	ptr += sizeof(*tlv);
2861	ptr += sizeof(*cmd);
2862
2863	tlv = ptr;
2864	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2865	tlv->len = __cpu_to_le16(chans_len);
2866	chans = (void *)tlv->value;
2867
2868	for (i = 0; i < arg->n_channels; i++) {
2869		ch = &arg->channels[i];
2870
2871		tlv = chans;
2872		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2873		tlv->len = __cpu_to_le16(sizeof(*ci));
2874		ci = (void *)tlv->value;
2875
2876		ath10k_wmi_put_wmi_channel(ar, ci, ch);
2877
2878		chans += sizeof(*tlv);
2879		chans += sizeof(*ci);
2880	}
2881
2882	ptr += sizeof(*tlv);
2883	ptr += chans_len;
2884
2885	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2886	return skb;
2887}
2888
2889static struct sk_buff *
2890ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
2891{
2892	struct wmi_scan_prob_req_oui_cmd *cmd;
2893	struct wmi_tlv *tlv;
2894	struct sk_buff *skb;
2895
2896	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2897	if (!skb)
2898		return ERR_PTR(-ENOMEM);
2899
2900	tlv = (void *)skb->data;
2901	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
2902	tlv->len = __cpu_to_le16(sizeof(*cmd));
2903	cmd = (void *)tlv->value;
2904	cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
2905
2906	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
2907	return skb;
2908}
2909
2910static struct sk_buff *
2911ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2912				 const void *bcn, size_t bcn_len,
2913				 u32 bcn_paddr, bool dtim_zero,
2914				 bool deliver_cab)
2915
2916{
2917	struct wmi_bcn_tx_ref_cmd *cmd;
2918	struct wmi_tlv *tlv;
2919	struct sk_buff *skb;
2920	struct ieee80211_hdr *hdr;
2921	u16 fc;
2922
2923	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2924	if (!skb)
2925		return ERR_PTR(-ENOMEM);
2926
2927	hdr = (struct ieee80211_hdr *)bcn;
2928	fc = le16_to_cpu(hdr->frame_control);
2929
2930	tlv = (void *)skb->data;
2931	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2932	tlv->len = __cpu_to_le16(sizeof(*cmd));
2933	cmd = (void *)tlv->value;
2934	cmd->vdev_id = __cpu_to_le32(vdev_id);
2935	cmd->data_len = __cpu_to_le32(bcn_len);
2936	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2937	cmd->msdu_id = 0;
2938	cmd->frame_control = __cpu_to_le32(fc);
2939	cmd->flags = 0;
2940
2941	if (dtim_zero)
2942		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2943
2944	if (deliver_cab)
2945		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2946
2947	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2948	return skb;
2949}
2950
2951static struct sk_buff *
2952ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2953				   const struct wmi_wmm_params_all_arg *arg)
2954{
2955	struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2956	struct wmi_wmm_params *wmm;
2957	struct wmi_tlv *tlv;
2958	struct sk_buff *skb;
2959	size_t len;
2960	void *ptr;
2961
2962	len = (sizeof(*tlv) + sizeof(*cmd)) +
2963	      (4 * (sizeof(*tlv) + sizeof(*wmm)));
2964	skb = ath10k_wmi_alloc_skb(ar, len);
2965	if (!skb)
2966		return ERR_PTR(-ENOMEM);
2967
2968	ptr = (void *)skb->data;
2969
2970	tlv = ptr;
2971	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2972	tlv->len = __cpu_to_le16(sizeof(*cmd));
2973	cmd = (void *)tlv->value;
2974
2975	/* nothing to set here */
2976
2977	ptr += sizeof(*tlv);
2978	ptr += sizeof(*cmd);
2979
2980	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2981	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2982	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2983	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2984
2985	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2986	return skb;
2987}
2988
2989static struct sk_buff *
2990ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2991{
2992	struct wmi_request_stats_cmd *cmd;
2993	struct wmi_tlv *tlv;
2994	struct sk_buff *skb;
2995
2996	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2997	if (!skb)
2998		return ERR_PTR(-ENOMEM);
2999
3000	tlv = (void *)skb->data;
3001	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
3002	tlv->len = __cpu_to_le16(sizeof(*cmd));
3003	cmd = (void *)tlv->value;
3004	cmd->stats_id = __cpu_to_le32(stats_mask);
3005
3006	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
3007	return skb;
3008}
3009
3010static struct sk_buff *
3011ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
3012					      u32 vdev_id,
3013					      enum wmi_peer_stats_info_request_type type,
3014					      u8 *addr,
3015					      u32 reset)
3016{
3017	struct wmi_tlv_request_peer_stats_info *cmd;
3018	struct wmi_tlv *tlv;
3019	struct sk_buff *skb;
3020
3021	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3022	if (!skb)
3023		return ERR_PTR(-ENOMEM);
3024
3025	tlv = (void *)skb->data;
3026	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
3027	tlv->len = __cpu_to_le16(sizeof(*cmd));
3028	cmd = (void *)tlv->value;
3029	cmd->vdev_id = __cpu_to_le32(vdev_id);
3030	cmd->request_type = __cpu_to_le32(type);
3031
3032	if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
3033		ether_addr_copy(cmd->peer_macaddr.addr, addr);
3034
3035	cmd->reset_after_request = __cpu_to_le32(reset);
3036	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
3037	return skb;
3038}
3039
3040static int
3041ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
3042				       struct sk_buff *msdu)
3043{
3044	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3045	struct ath10k_wmi *wmi = &ar->wmi;
3046
3047	idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
3048
3049	return 0;
3050}
3051
3052static int
3053ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
3054				 dma_addr_t paddr)
3055{
3056	struct ath10k_wmi *wmi = &ar->wmi;
3057	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
3058	int ret;
3059
3060	pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
3061	if (!pkt_addr)
3062		return -ENOMEM;
3063
3064	pkt_addr->vaddr = skb;
3065	pkt_addr->paddr = paddr;
3066
3067	spin_lock_bh(&ar->data_lock);
3068	ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
3069			wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
3070	spin_unlock_bh(&ar->data_lock);
3071
3072	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
3073	return ret;
3074}
3075
3076static struct sk_buff *
3077ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
3078				   dma_addr_t paddr)
3079{
3080	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
3081	struct wmi_tlv_mgmt_tx_cmd *cmd;
3082	struct ieee80211_hdr *hdr;
3083	struct ath10k_vif *arvif;
3084	u32 buf_len = msdu->len;
3085	struct wmi_tlv *tlv;
3086	struct sk_buff *skb;
3087	int len, desc_id;
3088	u32 vdev_id;
3089	void *ptr;
3090
3091	if (!cb->vif)
3092		return ERR_PTR(-EINVAL);
3093
3094	hdr = (struct ieee80211_hdr *)msdu->data;
3095	arvif = (void *)cb->vif->drv_priv;
3096	vdev_id = arvif->vdev_id;
3097
3098	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
3099			 (!(ieee80211_is_nullfunc(hdr->frame_control) ||
3100			 ieee80211_is_qos_nullfunc(hdr->frame_control)))))
3101		return ERR_PTR(-EINVAL);
3102
3103	len = sizeof(*cmd) + 2 * sizeof(*tlv);
3104
3105	if ((ieee80211_is_action(hdr->frame_control) ||
3106	     ieee80211_is_deauth(hdr->frame_control) ||
3107	     ieee80211_is_disassoc(hdr->frame_control)) &&
3108	     ieee80211_has_protected(hdr->frame_control)) {
3109		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
3110		buf_len += IEEE80211_CCMP_MIC_LEN;
3111	}
3112
3113	buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
3114	buf_len = round_up(buf_len, 4);
3115
3116	len += buf_len;
3117	len = round_up(len, 4);
3118	skb = ath10k_wmi_alloc_skb(ar, len);
3119	if (!skb)
3120		return ERR_PTR(-ENOMEM);
3121
3122	desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
3123	if (desc_id < 0)
3124		goto err_free_skb;
3125
3126	cb->msdu_id = desc_id;
3127
3128	ptr = (void *)skb->data;
3129	tlv = ptr;
3130	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
3131	tlv->len = __cpu_to_le16(sizeof(*cmd));
3132	cmd = (void *)tlv->value;
3133	cmd->vdev_id = __cpu_to_le32(vdev_id);
3134	cmd->desc_id = __cpu_to_le32(desc_id);
3135	cmd->chanfreq = 0;
3136	cmd->buf_len = __cpu_to_le32(buf_len);
3137	cmd->frame_len = __cpu_to_le32(msdu->len);
3138	cmd->paddr = __cpu_to_le64(paddr);
3139
3140	ptr += sizeof(*tlv);
3141	ptr += sizeof(*cmd);
3142
3143	tlv = ptr;
3144	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3145	tlv->len = __cpu_to_le16(buf_len);
3146
3147	ptr += sizeof(*tlv);
3148	memcpy(ptr, msdu->data, buf_len);
3149
3150	return skb;
3151
3152err_free_skb:
3153	dev_kfree_skb(skb);
3154	return ERR_PTR(desc_id);
3155}
3156
3157static struct sk_buff *
3158ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
3159				    enum wmi_force_fw_hang_type type,
3160				    u32 delay_ms)
3161{
3162	struct wmi_force_fw_hang_cmd *cmd;
3163	struct wmi_tlv *tlv;
3164	struct sk_buff *skb;
3165
3166	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3167	if (!skb)
3168		return ERR_PTR(-ENOMEM);
3169
3170	tlv = (void *)skb->data;
3171	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
3172	tlv->len = __cpu_to_le16(sizeof(*cmd));
3173	cmd = (void *)tlv->value;
3174	cmd->type = __cpu_to_le32(type);
3175	cmd->delay_ms = __cpu_to_le32(delay_ms);
3176
3177	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
3178	return skb;
3179}
3180
3181static struct sk_buff *
3182ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
3183				 u32 log_level)
3184{
3185	struct wmi_tlv_dbglog_cmd *cmd;
3186	struct wmi_tlv *tlv;
3187	struct sk_buff *skb;
3188	size_t len, bmap_len;
3189	u32 value;
3190	void *ptr;
3191
3192	if (module_enable) {
3193		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3194				module_enable,
3195				WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
3196	} else {
3197		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
3198				WMI_TLV_DBGLOG_ALL_MODULES,
3199				WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
3200	}
3201
3202	bmap_len = 0;
3203	len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
3204	skb = ath10k_wmi_alloc_skb(ar, len);
3205	if (!skb)
3206		return ERR_PTR(-ENOMEM);
3207
3208	ptr = (void *)skb->data;
3209
3210	tlv = ptr;
3211	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
3212	tlv->len = __cpu_to_le16(sizeof(*cmd));
3213	cmd = (void *)tlv->value;
3214	cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
3215	cmd->value = __cpu_to_le32(value);
3216
3217	ptr += sizeof(*tlv);
3218	ptr += sizeof(*cmd);
3219
3220	tlv = ptr;
3221	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3222	tlv->len = __cpu_to_le16(bmap_len);
3223
3224	/* nothing to do here */
3225
3226	ptr += sizeof(*tlv);
3227	ptr += sizeof(bmap_len);
3228
3229	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
3230	return skb;
3231}
3232
3233static struct sk_buff *
3234ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
3235{
3236	struct wmi_tlv_pktlog_enable *cmd;
3237	struct wmi_tlv *tlv;
3238	struct sk_buff *skb;
3239	void *ptr;
3240	size_t len;
3241
3242	len = sizeof(*tlv) + sizeof(*cmd);
3243	skb = ath10k_wmi_alloc_skb(ar, len);
3244	if (!skb)
3245		return ERR_PTR(-ENOMEM);
3246
3247	ptr = (void *)skb->data;
3248	tlv = ptr;
3249	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
3250	tlv->len = __cpu_to_le16(sizeof(*cmd));
3251	cmd = (void *)tlv->value;
3252	cmd->filter = __cpu_to_le32(filter);
3253
3254	ptr += sizeof(*tlv);
3255	ptr += sizeof(*cmd);
3256
3257	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
3258		   filter);
3259	return skb;
3260}
3261
3262static struct sk_buff *
3263ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
3264{
3265	struct wmi_tlv_pdev_get_temp_cmd *cmd;
3266	struct wmi_tlv *tlv;
3267	struct sk_buff *skb;
3268
3269	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3270	if (!skb)
3271		return ERR_PTR(-ENOMEM);
3272
3273	tlv = (void *)skb->data;
3274	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
3275	tlv->len = __cpu_to_le16(sizeof(*cmd));
3276	cmd = (void *)tlv->value;
3277	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
3278	return skb;
3279}
3280
3281static struct sk_buff *
3282ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
3283{
3284	struct wmi_tlv_pktlog_disable *cmd;
3285	struct wmi_tlv *tlv;
3286	struct sk_buff *skb;
3287	void *ptr;
3288	size_t len;
3289
3290	len = sizeof(*tlv) + sizeof(*cmd);
3291	skb = ath10k_wmi_alloc_skb(ar, len);
3292	if (!skb)
3293		return ERR_PTR(-ENOMEM);
3294
3295	ptr = (void *)skb->data;
3296	tlv = ptr;
3297	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
3298	tlv->len = __cpu_to_le16(sizeof(*cmd));
3299	cmd = (void *)tlv->value;
3300
3301	ptr += sizeof(*tlv);
3302	ptr += sizeof(*cmd);
3303
3304	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
3305	return skb;
3306}
3307
3308static struct sk_buff *
3309ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
3310			       u32 tim_ie_offset, struct sk_buff *bcn,
3311			       u32 prb_caps, u32 prb_erp, void *prb_ies,
3312			       size_t prb_ies_len)
3313{
3314	struct wmi_tlv_bcn_tmpl_cmd *cmd;
3315	struct wmi_tlv_bcn_prb_info *info;
3316	struct wmi_tlv *tlv;
3317	struct sk_buff *skb;
3318	void *ptr;
3319	size_t len;
3320
3321	if (WARN_ON(prb_ies_len > 0 && !prb_ies))
3322		return ERR_PTR(-EINVAL);
3323
3324	len = sizeof(*tlv) + sizeof(*cmd) +
3325	      sizeof(*tlv) + sizeof(*info) + prb_ies_len +
3326	      sizeof(*tlv) + roundup(bcn->len, 4);
3327	skb = ath10k_wmi_alloc_skb(ar, len);
3328	if (!skb)
3329		return ERR_PTR(-ENOMEM);
3330
3331	ptr = (void *)skb->data;
3332	tlv = ptr;
3333	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
3334	tlv->len = __cpu_to_le16(sizeof(*cmd));
3335	cmd = (void *)tlv->value;
3336	cmd->vdev_id = __cpu_to_le32(vdev_id);
3337	cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
3338	cmd->buf_len = __cpu_to_le32(bcn->len);
3339
3340	ptr += sizeof(*tlv);
3341	ptr += sizeof(*cmd);
3342
3343	/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
3344	 * then it is then impossible to pass original ie len.
3345	 * This chunk is not used yet so if setting probe resp template yields
3346	 * problems with beaconing or crashes firmware look here.
3347	 */
3348	tlv = ptr;
3349	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3350	tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
3351	info = (void *)tlv->value;
3352	info->caps = __cpu_to_le32(prb_caps);
3353	info->erp = __cpu_to_le32(prb_erp);
3354	memcpy(info->ies, prb_ies, prb_ies_len);
3355
3356	ptr += sizeof(*tlv);
3357	ptr += sizeof(*info);
3358	ptr += prb_ies_len;
3359
3360	tlv = ptr;
3361	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3362	tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
3363	memcpy(tlv->value, bcn->data, bcn->len);
3364
3365	/* FIXME: Adjust TSF? */
3366
3367	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
3368		   vdev_id);
3369	return skb;
3370}
3371
3372static struct sk_buff *
3373ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
3374			       struct sk_buff *prb)
3375{
3376	struct wmi_tlv_prb_tmpl_cmd *cmd;
3377	struct wmi_tlv_bcn_prb_info *info;
3378	struct wmi_tlv *tlv;
3379	struct sk_buff *skb;
3380	void *ptr;
3381	size_t len;
3382
3383	len = sizeof(*tlv) + sizeof(*cmd) +
3384	      sizeof(*tlv) + sizeof(*info) +
3385	      sizeof(*tlv) + roundup(prb->len, 4);
3386	skb = ath10k_wmi_alloc_skb(ar, len);
3387	if (!skb)
3388		return ERR_PTR(-ENOMEM);
3389
3390	ptr = (void *)skb->data;
3391	tlv = ptr;
3392	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
3393	tlv->len = __cpu_to_le16(sizeof(*cmd));
3394	cmd = (void *)tlv->value;
3395	cmd->vdev_id = __cpu_to_le32(vdev_id);
3396	cmd->buf_len = __cpu_to_le32(prb->len);
3397
3398	ptr += sizeof(*tlv);
3399	ptr += sizeof(*cmd);
3400
3401	tlv = ptr;
3402	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
3403	tlv->len = __cpu_to_le16(sizeof(*info));
3404	info = (void *)tlv->value;
3405	info->caps = 0;
3406	info->erp = 0;
3407
3408	ptr += sizeof(*tlv);
3409	ptr += sizeof(*info);
3410
3411	tlv = ptr;
3412	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3413	tlv->len = __cpu_to_le16(roundup(prb->len, 4));
3414	memcpy(tlv->value, prb->data, prb->len);
3415
3416	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
3417		   vdev_id);
3418	return skb;
3419}
3420
3421static struct sk_buff *
3422ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
3423				    const u8 *p2p_ie)
3424{
3425	struct wmi_tlv_p2p_go_bcn_ie *cmd;
3426	struct wmi_tlv *tlv;
3427	struct sk_buff *skb;
3428	void *ptr;
3429	size_t len;
3430
3431	len = sizeof(*tlv) + sizeof(*cmd) +
3432	      sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
3433	skb = ath10k_wmi_alloc_skb(ar, len);
3434	if (!skb)
3435		return ERR_PTR(-ENOMEM);
3436
3437	ptr = (void *)skb->data;
3438	tlv = ptr;
3439	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
3440	tlv->len = __cpu_to_le16(sizeof(*cmd));
3441	cmd = (void *)tlv->value;
3442	cmd->vdev_id = __cpu_to_le32(vdev_id);
3443	cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
3444
3445	ptr += sizeof(*tlv);
3446	ptr += sizeof(*cmd);
3447
3448	tlv = ptr;
3449	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3450	tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
3451	memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
3452
3453	ptr += sizeof(*tlv);
3454	ptr += roundup(p2p_ie[1] + 2, 4);
3455
3456	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
3457		   vdev_id);
3458	return skb;
3459}
3460
3461static struct sk_buff *
3462ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
3463					   enum wmi_tdls_state state)
3464{
3465	struct wmi_tdls_set_state_cmd *cmd;
3466	struct wmi_tlv *tlv;
3467	struct sk_buff *skb;
3468	void *ptr;
3469	size_t len;
3470	/* Set to options from wmi_tlv_tdls_options,
3471	 * for now none of them are enabled.
3472	 */
3473	u32 options = 0;
3474
3475	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
3476		options |=  WMI_TLV_TDLS_BUFFER_STA_EN;
3477
3478	/* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
3479	 * link inactivity detecting logic.
3480	 */
3481	if (state == WMI_TDLS_ENABLE_ACTIVE)
3482		state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
3483
3484	len = sizeof(*tlv) + sizeof(*cmd);
3485	skb = ath10k_wmi_alloc_skb(ar, len);
3486	if (!skb)
3487		return ERR_PTR(-ENOMEM);
3488
3489	ptr = (void *)skb->data;
3490	tlv = ptr;
3491	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
3492	tlv->len = __cpu_to_le16(sizeof(*cmd));
3493
3494	cmd = (void *)tlv->value;
3495	cmd->vdev_id = __cpu_to_le32(vdev_id);
3496	cmd->state = __cpu_to_le32(state);
3497	cmd->notification_interval_ms = __cpu_to_le32(5000);
3498	cmd->tx_discovery_threshold = __cpu_to_le32(100);
3499	cmd->tx_teardown_threshold = __cpu_to_le32(5);
3500	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
3501	cmd->rssi_delta = __cpu_to_le32(-20);
3502	cmd->tdls_options = __cpu_to_le32(options);
3503	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
3504	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
3505	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
3506	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
3507	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
3508
3509	ptr += sizeof(*tlv);
3510	ptr += sizeof(*cmd);
3511
3512	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
3513		   state, vdev_id);
3514	return skb;
3515}
3516
3517static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
3518{
3519	u32 peer_qos = 0;
3520
3521	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
3522		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
3523	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
3524		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
3525	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
3526		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
3527	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
3528		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
3529
3530	peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
3531
3532	return peer_qos;
3533}
3534
3535static struct sk_buff *
3536ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
3537				       const struct wmi_tdls_peer_update_cmd_arg *arg,
3538				       const struct wmi_tdls_peer_capab_arg *cap,
3539				       const struct wmi_channel_arg *chan_arg)
3540{
3541	struct wmi_tdls_peer_update_cmd *cmd;
3542	struct wmi_tdls_peer_capab *peer_cap;
3543	struct wmi_channel *chan;
3544	struct wmi_tlv *tlv;
3545	struct sk_buff *skb;
3546	u32 peer_qos;
3547	void *ptr;
3548	int len;
3549	int i;
3550
3551	len = sizeof(*tlv) + sizeof(*cmd) +
3552	      sizeof(*tlv) + sizeof(*peer_cap) +
3553	      sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
3554
3555	skb = ath10k_wmi_alloc_skb(ar, len);
3556	if (!skb)
3557		return ERR_PTR(-ENOMEM);
3558
3559	ptr = (void *)skb->data;
3560	tlv = ptr;
3561	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
3562	tlv->len = __cpu_to_le16(sizeof(*cmd));
3563
3564	cmd = (void *)tlv->value;
3565	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3566	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
3567	cmd->peer_state = __cpu_to_le32(arg->peer_state);
3568
3569	ptr += sizeof(*tlv);
3570	ptr += sizeof(*cmd);
3571
3572	tlv = ptr;
3573	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
3574	tlv->len = __cpu_to_le16(sizeof(*peer_cap));
3575	peer_cap = (void *)tlv->value;
3576	peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
3577						   cap->peer_max_sp);
3578	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
3579	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
3580	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
3581	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
3582	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
3583	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
3584	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
3585
3586	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
3587		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
3588
3589	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
3590	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
3591	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
3592
3593	ptr += sizeof(*tlv);
3594	ptr += sizeof(*peer_cap);
3595
3596	tlv = ptr;
3597	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3598	tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
3599
3600	ptr += sizeof(*tlv);
3601
3602	for (i = 0; i < cap->peer_chan_len; i++) {
3603		tlv = ptr;
3604		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
3605		tlv->len = __cpu_to_le16(sizeof(*chan));
3606		chan = (void *)tlv->value;
3607		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
3608
3609		ptr += sizeof(*tlv);
3610		ptr += sizeof(*chan);
3611	}
3612
3613	ath10k_dbg(ar, ATH10K_DBG_WMI,
3614		   "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
3615		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
3616	return skb;
3617}
3618
3619static struct sk_buff *
3620ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
3621					  u32 duration, u32 next_offset,
3622					  u32 enabled)
3623{
3624	struct wmi_tlv_set_quiet_cmd *cmd;
3625	struct wmi_tlv *tlv;
3626	struct sk_buff *skb;
3627
3628	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
3629	if (!skb)
3630		return ERR_PTR(-ENOMEM);
3631
3632	tlv = (void *)skb->data;
3633	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
3634	tlv->len = __cpu_to_le16(sizeof(*cmd));
3635	cmd = (void *)tlv->value;
3636
3637	/* vdev_id is not in use, set to 0 */
3638	cmd->vdev_id = __cpu_to_le32(0);
3639	cmd->period = __cpu_to_le32(period);
3640	cmd->duration = __cpu_to_le32(duration);
3641	cmd->next_start = __cpu_to_le32(next_offset);
3642	cmd->enabled = __cpu_to_le32(enabled);
3643
3644	ath10k_dbg(ar, ATH10K_DBG_WMI,
3645		   "wmi tlv quiet param: period %u duration %u enabled %d\n",
3646		   period, duration, enabled);
3647	return skb;
3648}
3649
3650static struct sk_buff *
3651ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
3652{
3653	struct wmi_tlv_wow_enable_cmd *cmd;
3654	struct wmi_tlv *tlv;
3655	struct sk_buff *skb;
3656	size_t len;
3657
3658	len = sizeof(*tlv) + sizeof(*cmd);
3659	skb = ath10k_wmi_alloc_skb(ar, len);
3660	if (!skb)
3661		return ERR_PTR(-ENOMEM);
3662
3663	tlv = (struct wmi_tlv *)skb->data;
3664	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
3665	tlv->len = __cpu_to_le16(sizeof(*cmd));
3666	cmd = (void *)tlv->value;
3667
3668	cmd->enable = __cpu_to_le32(1);
3669	if (!ar->bus_param.link_can_suspend)
3670		cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
3671
3672	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
3673	return skb;
3674}
3675
3676static struct sk_buff *
3677ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
3678					   u32 vdev_id,
3679					   enum wmi_wow_wakeup_event event,
3680					   u32 enable)
3681{
3682	struct wmi_tlv_wow_add_del_event_cmd *cmd;
3683	struct wmi_tlv *tlv;
3684	struct sk_buff *skb;
3685	size_t len;
3686
3687	len = sizeof(*tlv) + sizeof(*cmd);
3688	skb = ath10k_wmi_alloc_skb(ar, len);
3689	if (!skb)
3690		return ERR_PTR(-ENOMEM);
3691
3692	tlv = (struct wmi_tlv *)skb->data;
3693	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
3694	tlv->len = __cpu_to_le16(sizeof(*cmd));
3695	cmd = (void *)tlv->value;
3696
3697	cmd->vdev_id = __cpu_to_le32(vdev_id);
3698	cmd->is_add = __cpu_to_le32(enable);
3699	cmd->event_bitmap = __cpu_to_le32(1 << event);
3700
3701	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
3702		   wow_wakeup_event(event), enable, vdev_id);
3703	return skb;
3704}
3705
3706static struct sk_buff *
3707ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
3708{
3709	struct wmi_tlv_wow_host_wakeup_ind *cmd;
3710	struct wmi_tlv *tlv;
3711	struct sk_buff *skb;
3712	size_t len;
3713
3714	len = sizeof(*tlv) + sizeof(*cmd);
3715	skb = ath10k_wmi_alloc_skb(ar, len);
3716	if (!skb)
3717		return ERR_PTR(-ENOMEM);
3718
3719	tlv = (struct wmi_tlv *)skb->data;
3720	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
3721	tlv->len = __cpu_to_le16(sizeof(*cmd));
3722	cmd = (void *)tlv->value;
3723
3724	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
3725	return skb;
3726}
3727
3728static struct sk_buff *
3729ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
3730				      u32 pattern_id, const u8 *pattern,
3731				      const u8 *bitmask, int pattern_len,
3732				      int pattern_offset)
3733{
3734	struct wmi_tlv_wow_add_pattern_cmd *cmd;
3735	struct wmi_tlv_wow_bitmap_pattern *bitmap;
3736	struct wmi_tlv *tlv;
3737	struct sk_buff *skb;
3738	void *ptr;
3739	size_t len;
3740
3741	len = sizeof(*tlv) + sizeof(*cmd) +
3742	      sizeof(*tlv) +			/* array struct */
3743	      sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
3744	      sizeof(*tlv) +			/* empty ipv4 sync */
3745	      sizeof(*tlv) +			/* empty ipv6 sync */
3746	      sizeof(*tlv) +			/* empty magic */
3747	      sizeof(*tlv) +			/* empty info timeout */
3748	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
3749
3750	skb = ath10k_wmi_alloc_skb(ar, len);
3751	if (!skb)
3752		return ERR_PTR(-ENOMEM);
3753
3754	/* cmd */
3755	ptr = (void *)skb->data;
3756	tlv = ptr;
3757	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
3758	tlv->len = __cpu_to_le16(sizeof(*cmd));
3759	cmd = (void *)tlv->value;
3760
3761	cmd->vdev_id = __cpu_to_le32(vdev_id);
3762	cmd->pattern_id = __cpu_to_le32(pattern_id);
3763	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3764
3765	ptr += sizeof(*tlv);
3766	ptr += sizeof(*cmd);
3767
3768	/* bitmap */
3769	tlv = ptr;
3770	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3771	tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
3772
3773	ptr += sizeof(*tlv);
3774
3775	tlv = ptr;
3776	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
3777	tlv->len = __cpu_to_le16(sizeof(*bitmap));
3778	bitmap = (void *)tlv->value;
3779
3780	memcpy(bitmap->patternbuf, pattern, pattern_len);
3781	memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
3782	bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
3783	bitmap->pattern_len = __cpu_to_le32(pattern_len);
3784	bitmap->bitmask_len = __cpu_to_le32(pattern_len);
3785	bitmap->pattern_id = __cpu_to_le32(pattern_id);
3786
3787	ptr += sizeof(*tlv);
3788	ptr += sizeof(*bitmap);
3789
3790	/* ipv4 sync */
3791	tlv = ptr;
3792	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3793	tlv->len = __cpu_to_le16(0);
3794
3795	ptr += sizeof(*tlv);
3796
3797	/* ipv6 sync */
3798	tlv = ptr;
3799	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3800	tlv->len = __cpu_to_le16(0);
3801
3802	ptr += sizeof(*tlv);
3803
3804	/* magic */
3805	tlv = ptr;
3806	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3807	tlv->len = __cpu_to_le16(0);
3808
3809	ptr += sizeof(*tlv);
3810
3811	/* pattern info timeout */
3812	tlv = ptr;
3813	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3814	tlv->len = __cpu_to_le16(0);
3815
3816	ptr += sizeof(*tlv);
3817
3818	/* ratelimit interval */
3819	tlv = ptr;
3820	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3821	tlv->len = __cpu_to_le16(sizeof(u32));
3822
3823	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
3824		   vdev_id, pattern_id, pattern_offset);
3825	return skb;
3826}
3827
3828static struct sk_buff *
3829ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3830				      u32 pattern_id)
3831{
3832	struct wmi_tlv_wow_del_pattern_cmd *cmd;
3833	struct wmi_tlv *tlv;
3834	struct sk_buff *skb;
3835	size_t len;
3836
3837	len = sizeof(*tlv) + sizeof(*cmd);
3838	skb = ath10k_wmi_alloc_skb(ar, len);
3839	if (!skb)
3840		return ERR_PTR(-ENOMEM);
3841
3842	tlv = (struct wmi_tlv *)skb->data;
3843	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
3844	tlv->len = __cpu_to_le16(sizeof(*cmd));
3845	cmd = (void *)tlv->value;
3846
3847	cmd->vdev_id = __cpu_to_le32(vdev_id);
3848	cmd->pattern_id = __cpu_to_le32(pattern_id);
3849	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3850
3851	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
3852		   vdev_id, pattern_id);
3853	return skb;
3854}
3855
3856/* Request FW to start PNO operation */
3857static struct sk_buff *
3858ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
3859				       u32 vdev_id,
3860				       struct wmi_pno_scan_req *pno)
3861{
3862	struct nlo_configured_parameters *nlo_list;
3863	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3864	struct wmi_tlv *tlv;
3865	struct sk_buff *skb;
3866	__le32 *channel_list;
3867	u16 tlv_len;
3868	size_t len;
3869	void *ptr;
3870	u32 i;
3871
3872	len = sizeof(*tlv) + sizeof(*cmd) +
3873	      sizeof(*tlv) +
3874	      /* TLV place holder for array of structures
3875	       * nlo_configured_parameters(nlo_list)
3876	       */
3877	      sizeof(*tlv);
3878	      /* TLV place holder for array of uint32 channel_list */
3879
3880	len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
3881				   WMI_NLO_MAX_CHAN);
3882	len += sizeof(struct nlo_configured_parameters) *
3883				min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
3884
3885	skb = ath10k_wmi_alloc_skb(ar, len);
3886	if (!skb)
3887		return ERR_PTR(-ENOMEM);
3888
3889	ptr = (void *)skb->data;
3890	tlv = ptr;
3891	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
3892	tlv->len = __cpu_to_le16(sizeof(*cmd));
3893	cmd = (void *)tlv->value;
3894
3895	/* wmi_tlv_wow_nlo_config_cmd parameters*/
3896	cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
3897	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
3898
3899	/* current FW does not support min-max range for dwell time */
3900	cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
3901	cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
3902
3903	if (pno->do_passive_scan)
3904		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
3905
3906	/* copy scan interval */
3907	cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
3908	cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
3909	cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
3910	cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
3911
3912	if (pno->enable_pno_scan_randomization) {
3913		cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
3914				WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
3915		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
3916		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
3917	}
3918
3919	ptr += sizeof(*tlv);
3920	ptr += sizeof(*cmd);
3921
3922	/* nlo_configured_parameters(nlo_list) */
3923	cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
3924					       WMI_NLO_MAX_SSIDS));
3925	tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
3926		sizeof(struct nlo_configured_parameters);
3927
3928	tlv = ptr;
3929	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3930	tlv->len = __cpu_to_le16(tlv_len);
3931
3932	ptr += sizeof(*tlv);
3933	nlo_list = ptr;
3934	for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
3935		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
3936		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
3937		tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
3938					 sizeof(*tlv));
3939
3940		/* copy ssid and it's length */
3941		nlo_list[i].ssid.valid = __cpu_to_le32(true);
3942		nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
3943		memcpy(nlo_list[i].ssid.ssid.ssid,
3944		       pno->a_networks[i].ssid.ssid,
3945		       __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
3946
3947		/* copy rssi threshold */
3948		if (pno->a_networks[i].rssi_threshold &&
3949		    pno->a_networks[i].rssi_threshold > -300) {
3950			nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
3951			nlo_list[i].rssi_cond.rssi =
3952				__cpu_to_le32(pno->a_networks[i].rssi_threshold);
3953		}
3954
3955		nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
3956		nlo_list[i].bcast_nw_type.bcast_nw_type =
3957			__cpu_to_le32(pno->a_networks[i].bcast_nw_type);
3958	}
3959
3960	ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
3961
3962	/* copy channel info */
3963	cmd->num_of_channels = __cpu_to_le32(min_t(u8,
3964						   pno->a_networks[0].channel_count,
3965						   WMI_NLO_MAX_CHAN));
3966
3967	tlv = ptr;
3968	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3969	tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
3970				 sizeof(u_int32_t));
3971	ptr += sizeof(*tlv);
3972
3973	channel_list = (__le32 *)ptr;
3974	for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
3975		channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
3976
3977	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
3978		   vdev_id);
3979
3980	return skb;
3981}
3982
3983/* Request FW to stop ongoing PNO operation */
3984static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
3985							     u32 vdev_id)
3986{
3987	struct wmi_tlv_wow_nlo_config_cmd *cmd;
3988	struct wmi_tlv *tlv;
3989	struct sk_buff *skb;
3990	void *ptr;
3991	size_t len;
3992
3993	len = sizeof(*tlv) + sizeof(*cmd) +
3994	      sizeof(*tlv) +
3995	      /* TLV place holder for array of structures
3996	       * nlo_configured_parameters(nlo_list)
3997	       */
3998	      sizeof(*tlv);
3999	      /* TLV place holder for array of uint32 channel_list */
4000	skb = ath10k_wmi_alloc_skb(ar, len);
4001	if (!skb)
4002		return ERR_PTR(-ENOMEM);
4003
4004	ptr = (void *)skb->data;
4005	tlv = ptr;
4006	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
4007	tlv->len = __cpu_to_le16(sizeof(*cmd));
4008	cmd = (void *)tlv->value;
4009
4010	cmd->vdev_id = __cpu_to_le32(vdev_id);
4011	cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
4012
4013	ptr += sizeof(*tlv);
4014	ptr += sizeof(*cmd);
4015
4016	/* nlo_configured_parameters(nlo_list) */
4017	tlv = ptr;
4018	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
4019	tlv->len = __cpu_to_le16(0);
4020
4021	ptr += sizeof(*tlv);
4022
4023	/* channel list */
4024	tlv = ptr;
4025	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
4026	tlv->len = __cpu_to_le16(0);
4027
4028	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
4029	return skb;
4030}
4031
4032static struct sk_buff *
4033ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
4034				 struct wmi_pno_scan_req *pno_scan)
4035{
4036	if (pno_scan->enable)
4037		return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
4038	else
4039		return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
4040}
4041
4042static struct sk_buff *
4043ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
4044{
4045	struct wmi_tlv_adaptive_qcs *cmd;
4046	struct wmi_tlv *tlv;
4047	struct sk_buff *skb;
4048	void *ptr;
4049	size_t len;
4050
4051	len = sizeof(*tlv) + sizeof(*cmd);
4052	skb = ath10k_wmi_alloc_skb(ar, len);
4053	if (!skb)
4054		return ERR_PTR(-ENOMEM);
4055
4056	ptr = (void *)skb->data;
4057	tlv = ptr;
4058	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
4059	tlv->len = __cpu_to_le16(sizeof(*cmd));
4060	cmd = (void *)tlv->value;
4061	cmd->enable = __cpu_to_le32(enable ? 1 : 0);
4062
4063	ptr += sizeof(*tlv);
4064	ptr += sizeof(*cmd);
4065
4066	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
4067	return skb;
4068}
4069
4070static struct sk_buff *
4071ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
4072{
4073	struct wmi_echo_cmd *cmd;
4074	struct wmi_tlv *tlv;
4075	struct sk_buff *skb;
4076	void *ptr;
4077	size_t len;
4078
4079	len = sizeof(*tlv) + sizeof(*cmd);
4080	skb = ath10k_wmi_alloc_skb(ar, len);
4081	if (!skb)
4082		return ERR_PTR(-ENOMEM);
4083
4084	ptr = (void *)skb->data;
4085	tlv = ptr;
4086	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
4087	tlv->len = __cpu_to_le16(sizeof(*cmd));
4088	cmd = (void *)tlv->value;
4089	cmd->value = cpu_to_le32(value);
4090
4091	ptr += sizeof(*tlv);
4092	ptr += sizeof(*cmd);
4093
4094	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
4095	return skb;
4096}
4097
4098static struct sk_buff *
4099ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
4100					 const struct wmi_vdev_spectral_conf_arg *arg)
4101{
4102	struct wmi_vdev_spectral_conf_cmd *cmd;
4103	struct sk_buff *skb;
4104	struct wmi_tlv *tlv;
4105	void *ptr;
4106	size_t len;
4107
4108	len = sizeof(*tlv) + sizeof(*cmd);
4109	skb = ath10k_wmi_alloc_skb(ar, len);
4110	if (!skb)
4111		return ERR_PTR(-ENOMEM);
4112
4113	ptr = (void *)skb->data;
4114	tlv = ptr;
4115	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
4116	tlv->len = __cpu_to_le16(sizeof(*cmd));
4117	cmd = (void *)tlv->value;
4118	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
4119	cmd->scan_count = __cpu_to_le32(arg->scan_count);
4120	cmd->scan_period = __cpu_to_le32(arg->scan_period);
4121	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
4122	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
4123	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
4124	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
4125	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
4126	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
4127	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
4128	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
4129	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
4130	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
4131	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
4132	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
4133	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
4134	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
4135	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
4136	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
4137
4138	return skb;
4139}
4140
4141static struct sk_buff *
4142ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
4143					   u32 trigger, u32 enable)
4144{
4145	struct wmi_vdev_spectral_enable_cmd *cmd;
4146	struct sk_buff *skb;
4147	struct wmi_tlv *tlv;
4148	void *ptr;
4149	size_t len;
4150
4151	len = sizeof(*tlv) + sizeof(*cmd);
4152	skb = ath10k_wmi_alloc_skb(ar, len);
4153	if (!skb)
4154		return ERR_PTR(-ENOMEM);
4155
4156	ptr = (void *)skb->data;
4157	tlv = ptr;
4158	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
4159	tlv->len = __cpu_to_le16(sizeof(*cmd));
4160	cmd = (void *)tlv->value;
4161	cmd->vdev_id = __cpu_to_le32(vdev_id);
4162	cmd->trigger_cmd = __cpu_to_le32(trigger);
4163	cmd->enable_cmd = __cpu_to_le32(enable);
4164
4165	return skb;
4166}
4167
4168/****************/
4169/* TLV mappings */
4170/****************/
4171
4172static struct wmi_cmd_map wmi_tlv_cmd_map = {
4173	.init_cmdid = WMI_TLV_INIT_CMDID,
4174	.start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
4175	.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
4176	.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
4177	.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
4178	.scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
4179	.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
4180	.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
4181	.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
4182	.pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
4183	.pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
4184	.pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
4185	.pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
4186	.pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
4187	.pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
4188	.pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
4189	.pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
4190	.pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
4191	.pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
4192	.vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
4193	.vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
4194	.vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
4195	.vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
4196	.vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
4197	.vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
4198	.vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
4199	.vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
4200	.vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
4201	.peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
4202	.peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
4203	.peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
4204	.peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
4205	.peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
4206	.peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
4207	.peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
4208	.peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
4209	.bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
4210	.pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
4211	.bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
4212	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
4213	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
4214	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
4215	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
4216	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
4217	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
4218	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
4219	.addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
4220	.delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
4221	.addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
4222	.send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
4223	.sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
4224	.sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
4225	.sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
4226	.pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
4227	.pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
4228	.roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
4229	.roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
4230	.roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
4231	.roam_scan_rssi_change_threshold =
4232				WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
4233	.roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4234	.ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
4235	.ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
4236	.ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
4237	.p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
4238	.p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
4239	.p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
4240	.p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
4241	.p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
4242	.ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
4243	.ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
4244	.peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
4245	.wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
4246	.wlan_profile_set_hist_intvl_cmdid =
4247				WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
4248	.wlan_profile_get_profile_data_cmdid =
4249				WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
4250	.wlan_profile_enable_profile_id_cmdid =
4251				WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
4252	.wlan_profile_list_profile_id_cmdid =
4253				WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
4254	.pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
4255	.pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
4256	.add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
4257	.rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
4258	.wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
4259	.wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
4260	.wow_enable_disable_wake_event_cmdid =
4261				WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
4262	.wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
4263	.wow_hostwakeup_from_sleep_cmdid =
4264				WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
4265	.rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
4266	.rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
4267	.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
4268	.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
4269	.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
4270	.request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
4271	.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
4272	.network_list_offload_config_cmdid =
4273				WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
4274	.gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
4275	.csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
4276	.csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
4277	.chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
4278	.peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
4279	.peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
4280	.sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
4281	.sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
4282	.sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
4283	.echo_cmdid = WMI_TLV_ECHO_CMDID,
4284	.pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
4285	.dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
4286	.pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
4287	.pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
4288	.vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
4289	.vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
4290	.force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
4291	.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
4292	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
4293	.pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
4294	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
4295	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
4296	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
4297	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
4298	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
4299	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
4300	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
4301	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
4302	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
4303	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
4304	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
4305	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
4306	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
4307	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
4308	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
4309	.nan_cmdid = WMI_CMD_UNSUPPORTED,
4310	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
4311	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
4312	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
4313	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4314	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
4315	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
4316	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
4317	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
4318	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
4319	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
4320	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
4321	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
4322	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
4323	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
4324	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
4325	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4326	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
4327	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
4328	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
4329	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
4330};
4331
4332static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
4333	.tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
4334	.rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
4335	.txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
4336	.txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
4337	.txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
4338	.beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
4339	.beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
4340	.resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
4341	.protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
4342	.dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
4343	.non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
4344	.agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
4345	.sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
4346	.ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
4347	.ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
4348	.ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
4349	.ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
4350	.ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
4351	.ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
4352	.ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
4353	.ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
4354	.ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
4355	.ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
4356	.l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
4357	.dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
4358	.pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
4359	.pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4360	.pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
4361	.pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
4362	.pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
4363	.vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
4364	.peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
4365	.bcnflt_stats_update_period =
4366				WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
4367	.pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
4368	.arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
4369	.dcs = WMI_TLV_PDEV_PARAM_DCS,
4370	.ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
4371	.ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
4372	.ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
4373	.ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
4374	.ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
4375	.dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
4376	.proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
4377	.idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
4378	.power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
4379	.fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
4380	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
4381	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
4382	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
4383	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
4384	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4385	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
4386	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
4387	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4388	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
4389	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
4390	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
4391	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4392	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
4393	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4394	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
4395	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4396	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
4397	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
4398	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4399	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4400	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4401	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4402	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4403	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
4404	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
4405	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
4406	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
4407	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4408	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
4409	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
4410	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
4411	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
4412	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
4413	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
4414	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
4415	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
4416	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
4417	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
4418	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
4419	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
4420	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
4421	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
4422	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
4423	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4424	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
4425	.rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
4426	.rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
4427	.peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
4428};
4429
4430static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
4431	.smps_state = WMI_TLV_PEER_SMPS_STATE,
4432	.ampdu = WMI_TLV_PEER_AMPDU,
4433	.authorize = WMI_TLV_PEER_AUTHORIZE,
4434	.chan_width = WMI_TLV_PEER_CHAN_WIDTH,
4435	.nss = WMI_TLV_PEER_NSS,
4436	.use_4addr = WMI_TLV_PEER_USE_4ADDR,
4437	.membership = WMI_TLV_PEER_MEMBERSHIP,
4438	.user_pos = WMI_TLV_PEER_USERPOS,
4439	.crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
4440	.tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
4441	.set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
4442	.ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
4443	.phymode = WMI_TLV_PEER_PHYMODE,
4444	.use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
4445	.dummy_var = WMI_TLV_PEER_DUMMY_VAR,
4446};
4447
4448static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
4449	.rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
4450	.fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
4451	.beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
4452	.listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
4453	.multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
4454	.mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
4455	.slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
4456	.preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
4457	.swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
4458	.wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
4459	.wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
4460	.wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
4461	.dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
4462	.wmi_vdev_oc_scheduler_air_time_limit =
4463				WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
4464	.wds = WMI_TLV_VDEV_PARAM_WDS,
4465	.atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
4466	.bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
4467	.bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
4468	.bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
4469	.feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
4470	.chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
4471	.chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
4472	.disable_htprotection =	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
4473	.sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
4474	.mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
4475	.protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
4476	.fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
4477	.sgi = WMI_TLV_VDEV_PARAM_SGI,
4478	.ldpc = WMI_TLV_VDEV_PARAM_LDPC,
4479	.tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
4480	.rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
4481	.intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
4482	.def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
4483	.nss = WMI_TLV_VDEV_PARAM_NSS,
4484	.bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
4485	.mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
4486	.mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
4487	.dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
4488	.unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
4489	.ap_keepalive_min_idle_inactive_time_secs =
4490		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
4491	.ap_keepalive_max_idle_inactive_time_secs =
4492		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
4493	.ap_keepalive_max_unresponsive_time_secs =
4494		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
4495	.ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
4496	.mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4497	.enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
4498	.txbf = WMI_TLV_VDEV_PARAM_TXBF,
4499	.packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
4500	.drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
4501	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
4502	.ap_detect_out_of_sync_sleeping_sta_time_secs =
4503					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
4504	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
4505	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
4506	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
4507	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
4508	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
4509	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4510	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
4511	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
4512	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
4513	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
4514	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
4515	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
4516	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
4517	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
4518	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
4519	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
4520};
4521
4522static const struct wmi_ops wmi_tlv_ops = {
4523	.rx = ath10k_wmi_tlv_op_rx,
4524	.map_svc = wmi_tlv_svc_map,
4525	.map_svc_ext = wmi_tlv_svc_map_ext,
4526
4527	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
4528	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
4529	.pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
4530	.pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
4531	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
4532	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
4533	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
4534	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
4535	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
4536	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
4537	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
4538	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
4539	.pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
4540	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
4541	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
4542	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
4543	.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
4544	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
4545
4546	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
4547	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
4548	.gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
4549	.gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
4550	.gen_init = ath10k_wmi_tlv_op_gen_init,
4551	.gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
4552	.gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
4553	.gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
4554	.gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
4555	.gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
4556	.gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
4557	.gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
4558	.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
4559	.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
4560	.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
4561	.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
4562	.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
4563	.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
4564	.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
4565	.gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
4566	.gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
4567	.gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
4568	.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
4569	.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
4570	.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
4571	.gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
4572	.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
4573	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
4574	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
4575	.gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
4576	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
4577	/* .gen_mgmt_tx = not implemented; HTT is used */
4578	.gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
4579	.cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
4580	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
4581	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
4582	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
4583	.gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
4584	.gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
4585	/* .gen_addba_clear_resp not implemented */
4586	/* .gen_addba_send not implemented */
4587	/* .gen_addba_set_resp not implemented */
4588	/* .gen_delba_send not implemented */
4589	.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
4590	.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
4591	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
4592	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
4593	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
4594	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
4595	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
4596	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
4597	.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
4598	.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
4599	.gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
4600	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
4601	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
4602	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
4603	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
4604	.get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
4605	.gen_echo = ath10k_wmi_tlv_op_gen_echo,
4606	.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
4607	.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
4608};
4609
4610static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
4611	.auth = WMI_TLV_PEER_AUTH,
4612	.qos = WMI_TLV_PEER_QOS,
4613	.need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
4614	.need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
4615	.apsd = WMI_TLV_PEER_APSD,
4616	.ht = WMI_TLV_PEER_HT,
4617	.bw40 = WMI_TLV_PEER_40MHZ,
4618	.stbc = WMI_TLV_PEER_STBC,
4619	.ldbc = WMI_TLV_PEER_LDPC,
4620	.dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
4621	.static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
4622	.spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
4623	.vht = WMI_TLV_PEER_VHT,
4624	.bw80 = WMI_TLV_PEER_80MHZ,
4625	.pmf = WMI_TLV_PEER_PMF,
4626	.bw160 = WMI_TLV_PEER_160MHZ,
4627};
4628
4629/************/
4630/* TLV init */
4631/************/
4632
4633void ath10k_wmi_tlv_attach(struct ath10k *ar)
4634{
4635	ar->wmi.cmd = &wmi_tlv_cmd_map;
4636	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
4637	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
4638	ar->wmi.peer_param = &wmi_tlv_peer_param_map;
4639	ar->wmi.ops = &wmi_tlv_ops;
4640	ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
4641}
4642