1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/ieee80211.h>
7#include <linux/kernel.h>
8#include <linux/skbuff.h>
9#include <crypto/hash.h>
10#include "core.h"
11#include "debug.h"
12#include "debugfs_htt_stats.h"
13#include "debugfs_sta.h"
14#include "hal_desc.h"
15#include "hw.h"
16#include "dp_rx.h"
17#include "hal_rx.h"
18#include "dp_tx.h"
19#include "peer.h"
20
21#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22
23static inline
24u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
25{
26	return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
27}
28
29static inline
30enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
31							struct hal_rx_desc *desc)
32{
33	if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
34		return HAL_ENCRYPT_TYPE_OPEN;
35
36	return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
37}
38
39static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
40						      struct hal_rx_desc *desc)
41{
42	return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
43}
44
45static inline
46bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
47					    struct hal_rx_desc *desc)
48{
49	return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
50}
51
52static inline
53u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
54					      struct hal_rx_desc *desc)
55{
56	return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
57}
58
59static inline
60bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
61					      struct hal_rx_desc *desc)
62{
63	return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
64}
65
66static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
67						      struct hal_rx_desc *desc)
68{
69	return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
70}
71
72static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
73							struct sk_buff *skb)
74{
75	struct ieee80211_hdr *hdr;
76
77	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
78	return ieee80211_has_morefrags(hdr->frame_control);
79}
80
81static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
82						    struct sk_buff *skb)
83{
84	struct ieee80211_hdr *hdr;
85
86	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
87	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
88}
89
90static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
91						   struct hal_rx_desc *desc)
92{
93	return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
94}
95
96static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
97					       struct hal_rx_desc *desc)
98{
99	return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
100}
101
102static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
103{
104	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
105			   __le32_to_cpu(attn->info2));
106}
107
108static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
109{
110	return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
111			   __le32_to_cpu(attn->info1));
112}
113
114static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
115{
116	return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
117			   __le32_to_cpu(attn->info1));
118}
119
120static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
121{
122	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
123			  __le32_to_cpu(attn->info2)) ==
124		RX_DESC_DECRYPT_STATUS_CODE_OK);
125}
126
127static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
128{
129	u32 info = __le32_to_cpu(attn->info1);
130	u32 errmap = 0;
131
132	if (info & RX_ATTENTION_INFO1_FCS_ERR)
133		errmap |= DP_RX_MPDU_ERR_FCS;
134
135	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
136		errmap |= DP_RX_MPDU_ERR_DECRYPT;
137
138	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
139		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
140
141	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
142		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
143
144	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
145		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
146
147	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
148		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
149
150	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
151		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
152
153	return errmap;
154}
155
156static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
157					     struct hal_rx_desc *desc)
158{
159	struct rx_attention *rx_attention;
160	u32 errmap;
161
162	rx_attention = ath11k_dp_rx_get_attention(ab, desc);
163	errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
164
165	return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
166}
167
168static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
169						     struct hal_rx_desc *desc)
170{
171	return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
172}
173
174static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
175					       struct hal_rx_desc *desc)
176{
177	return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
178}
179
180static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
181						    struct hal_rx_desc *desc)
182{
183	return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
184}
185
186static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
187						 struct hal_rx_desc *desc)
188{
189	return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
190}
191
192static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
193						 struct hal_rx_desc *desc)
194{
195	return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
196}
197
198static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
199						    struct hal_rx_desc *desc)
200{
201	return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
202}
203
204static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
205					       struct hal_rx_desc *desc)
206{
207	return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
208}
209
210static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
211					       struct hal_rx_desc *desc)
212{
213	return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
214}
215
216static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
217						    struct hal_rx_desc *desc)
218{
219	return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
220}
221
222static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
223					       struct hal_rx_desc *desc)
224{
225	return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
226}
227
228static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
229						      struct hal_rx_desc *desc)
230{
231	return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
232}
233
234static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
235					      struct hal_rx_desc *desc)
236{
237	return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
238}
239
240static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
241					   struct hal_rx_desc *fdesc,
242					   struct hal_rx_desc *ldesc)
243{
244	ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
245}
246
247static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
248{
249	return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
250			 __le32_to_cpu(attn->info1));
251}
252
253static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
254						struct hal_rx_desc *rx_desc)
255{
256	u8 *rx_pkt_hdr;
257
258	rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
259
260	return rx_pkt_hdr;
261}
262
263static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
264					       struct hal_rx_desc *rx_desc)
265{
266	u32 tlv_tag;
267
268	tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
269
270	return tlv_tag == HAL_RX_MPDU_START;
271}
272
273static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
274					      struct hal_rx_desc *rx_desc)
275{
276	return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
277}
278
279static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
280						 struct hal_rx_desc *desc,
281						 u16 len)
282{
283	ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
284}
285
286static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
287					struct hal_rx_desc *desc)
288{
289	struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
290
291	return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
292		(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
293		 __le32_to_cpu(attn->info1)));
294}
295
296static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
297					     struct hal_rx_desc *desc)
298{
299	return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
300}
301
302static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
303					     struct hal_rx_desc *desc)
304{
305	return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
306}
307
308static void ath11k_dp_service_mon_ring(struct timer_list *t)
309{
310	struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
311	int i;
312
313	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
314		ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
315
316	mod_timer(&ab->mon_reap_timer, jiffies +
317		  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
318}
319
320static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
321{
322	int i, reaped = 0;
323	unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
324
325	do {
326		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
327			reaped += ath11k_dp_rx_process_mon_rings(ab, i,
328								 NULL,
329								 DP_MON_SERVICE_BUDGET);
330
331		/* nothing more to reap */
332		if (reaped < DP_MON_SERVICE_BUDGET)
333			return 0;
334
335	} while (time_before(jiffies, timeout));
336
337	ath11k_warn(ab, "dp mon ring purge timeout");
338
339	return -ETIMEDOUT;
340}
341
342/* Returns number of Rx buffers replenished */
343int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
344			       struct dp_rxdma_ring *rx_ring,
345			       int req_entries,
346			       enum hal_rx_buf_return_buf_manager mgr)
347{
348	struct hal_srng *srng;
349	u32 *desc;
350	struct sk_buff *skb;
351	int num_free;
352	int num_remain;
353	int buf_id;
354	u32 cookie;
355	dma_addr_t paddr;
356
357	req_entries = min(req_entries, rx_ring->bufs_max);
358
359	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
360
361	spin_lock_bh(&srng->lock);
362
363	ath11k_hal_srng_access_begin(ab, srng);
364
365	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
366	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
367		req_entries = num_free;
368
369	req_entries = min(num_free, req_entries);
370	num_remain = req_entries;
371
372	while (num_remain > 0) {
373		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
374				    DP_RX_BUFFER_ALIGN_SIZE);
375		if (!skb)
376			break;
377
378		if (!IS_ALIGNED((unsigned long)skb->data,
379				DP_RX_BUFFER_ALIGN_SIZE)) {
380			skb_pull(skb,
381				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
382				 skb->data);
383		}
384
385		paddr = dma_map_single(ab->dev, skb->data,
386				       skb->len + skb_tailroom(skb),
387				       DMA_FROM_DEVICE);
388		if (dma_mapping_error(ab->dev, paddr))
389			goto fail_free_skb;
390
391		spin_lock_bh(&rx_ring->idr_lock);
392		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
393				   (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
394		spin_unlock_bh(&rx_ring->idr_lock);
395		if (buf_id <= 0)
396			goto fail_dma_unmap;
397
398		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
399		if (!desc)
400			goto fail_idr_remove;
401
402		ATH11K_SKB_RXCB(skb)->paddr = paddr;
403
404		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
405			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
406
407		num_remain--;
408
409		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
410	}
411
412	ath11k_hal_srng_access_end(ab, srng);
413
414	spin_unlock_bh(&srng->lock);
415
416	return req_entries - num_remain;
417
418fail_idr_remove:
419	spin_lock_bh(&rx_ring->idr_lock);
420	idr_remove(&rx_ring->bufs_idr, buf_id);
421	spin_unlock_bh(&rx_ring->idr_lock);
422fail_dma_unmap:
423	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
424			 DMA_FROM_DEVICE);
425fail_free_skb:
426	dev_kfree_skb_any(skb);
427
428	ath11k_hal_srng_access_end(ab, srng);
429
430	spin_unlock_bh(&srng->lock);
431
432	return req_entries - num_remain;
433}
434
435static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
436					 struct dp_rxdma_ring *rx_ring)
437{
438	struct sk_buff *skb;
439	int buf_id;
440
441	spin_lock_bh(&rx_ring->idr_lock);
442	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
443		idr_remove(&rx_ring->bufs_idr, buf_id);
444		/* TODO: Understand where internal driver does this dma_unmap
445		 * of rxdma_buffer.
446		 */
447		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
448				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
449		dev_kfree_skb_any(skb);
450	}
451
452	idr_destroy(&rx_ring->bufs_idr);
453	spin_unlock_bh(&rx_ring->idr_lock);
454
455	return 0;
456}
457
458static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
459{
460	struct ath11k_pdev_dp *dp = &ar->dp;
461	struct ath11k_base *ab = ar->ab;
462	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
463	int i;
464
465	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
466
467	rx_ring = &dp->rxdma_mon_buf_ring;
468	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
469
470	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
471		rx_ring = &dp->rx_mon_status_refill_ring[i];
472		ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
473	}
474
475	return 0;
476}
477
478static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
479					  struct dp_rxdma_ring *rx_ring,
480					  u32 ringtype)
481{
482	struct ath11k_pdev_dp *dp = &ar->dp;
483	int num_entries;
484
485	num_entries = rx_ring->refill_buf_ring.size /
486		ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
487
488	rx_ring->bufs_max = num_entries;
489	ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
490				   ar->ab->hw_params.hal_params->rx_buf_rbm);
491	return 0;
492}
493
494static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
495{
496	struct ath11k_pdev_dp *dp = &ar->dp;
497	struct ath11k_base *ab = ar->ab;
498	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
499	int i;
500
501	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
502
503	if (ar->ab->hw_params.rxdma1_enable) {
504		rx_ring = &dp->rxdma_mon_buf_ring;
505		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
506	}
507
508	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
509		rx_ring = &dp->rx_mon_status_refill_ring[i];
510		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
511	}
512
513	return 0;
514}
515
516static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
517{
518	struct ath11k_pdev_dp *dp = &ar->dp;
519	struct ath11k_base *ab = ar->ab;
520	int i;
521
522	ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
523
524	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
525		if (ab->hw_params.rx_mac_buf_ring)
526			ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
527
528		ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
529		ath11k_dp_srng_cleanup(ab,
530				       &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
531	}
532
533	ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
534}
535
536void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
537{
538	struct ath11k_dp *dp = &ab->dp;
539	int i;
540
541	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
542		ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
543}
544
545int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
546{
547	struct ath11k_dp *dp = &ab->dp;
548	int ret;
549	int i;
550
551	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
552		ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
553					   HAL_REO_DST, i, 0,
554					   DP_REO_DST_RING_SIZE);
555		if (ret) {
556			ath11k_warn(ab, "failed to setup reo_dst_ring\n");
557			goto err_reo_cleanup;
558		}
559	}
560
561	return 0;
562
563err_reo_cleanup:
564	ath11k_dp_pdev_reo_cleanup(ab);
565
566	return ret;
567}
568
569static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
570{
571	struct ath11k_pdev_dp *dp = &ar->dp;
572	struct ath11k_base *ab = ar->ab;
573	struct dp_srng *srng = NULL;
574	int i;
575	int ret;
576
577	ret = ath11k_dp_srng_setup(ar->ab,
578				   &dp->rx_refill_buf_ring.refill_buf_ring,
579				   HAL_RXDMA_BUF, 0,
580				   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
581	if (ret) {
582		ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
583		return ret;
584	}
585
586	if (ar->ab->hw_params.rx_mac_buf_ring) {
587		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
588			ret = ath11k_dp_srng_setup(ar->ab,
589						   &dp->rx_mac_buf_ring[i],
590						   HAL_RXDMA_BUF, 1,
591						   dp->mac_id + i, 1024);
592			if (ret) {
593				ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
594					    i);
595				return ret;
596			}
597		}
598	}
599
600	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
601		ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
602					   HAL_RXDMA_DST, 0, dp->mac_id + i,
603					   DP_RXDMA_ERR_DST_RING_SIZE);
604		if (ret) {
605			ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
606			return ret;
607		}
608	}
609
610	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
611		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
612		ret = ath11k_dp_srng_setup(ar->ab,
613					   srng,
614					   HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
615					   DP_RXDMA_MON_STATUS_RING_SIZE);
616		if (ret) {
617			ath11k_warn(ar->ab,
618				    "failed to setup rx_mon_status_refill_ring %d\n", i);
619			return ret;
620		}
621	}
622
623	/* if rxdma1_enable is false, then it doesn't need
624	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
625	 * and rxdma_mon_desc_ring.
626	 * init reap timer for QCA6390.
627	 */
628	if (!ar->ab->hw_params.rxdma1_enable) {
629		//init mon status buffer reap timer
630		timer_setup(&ar->ab->mon_reap_timer,
631			    ath11k_dp_service_mon_ring, 0);
632		return 0;
633	}
634
635	ret = ath11k_dp_srng_setup(ar->ab,
636				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
637				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
638				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
639	if (ret) {
640		ath11k_warn(ar->ab,
641			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
642		return ret;
643	}
644
645	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
646				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
647				   DP_RXDMA_MONITOR_DST_RING_SIZE);
648	if (ret) {
649		ath11k_warn(ar->ab,
650			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
651		return ret;
652	}
653
654	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
655				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
656				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
657	if (ret) {
658		ath11k_warn(ar->ab,
659			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
660		return ret;
661	}
662
663	return 0;
664}
665
666void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
667{
668	struct ath11k_dp *dp = &ab->dp;
669	struct dp_reo_cmd *cmd, *tmp;
670	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
671	struct dp_rx_tid *rx_tid;
672
673	spin_lock_bh(&dp->reo_cmd_lock);
674	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
675		list_del(&cmd->list);
676		rx_tid = &cmd->data;
677		if (rx_tid->vaddr) {
678			dma_unmap_single(ab->dev, rx_tid->paddr,
679					 rx_tid->size, DMA_BIDIRECTIONAL);
680			kfree(rx_tid->vaddr);
681			rx_tid->vaddr = NULL;
682		}
683		kfree(cmd);
684	}
685
686	list_for_each_entry_safe(cmd_cache, tmp_cache,
687				 &dp->reo_cmd_cache_flush_list, list) {
688		list_del(&cmd_cache->list);
689		dp->reo_cmd_cache_flush_count--;
690		rx_tid = &cmd_cache->data;
691		if (rx_tid->vaddr) {
692			dma_unmap_single(ab->dev, rx_tid->paddr,
693					 rx_tid->size, DMA_BIDIRECTIONAL);
694			kfree(rx_tid->vaddr);
695			rx_tid->vaddr = NULL;
696		}
697		kfree(cmd_cache);
698	}
699	spin_unlock_bh(&dp->reo_cmd_lock);
700}
701
702static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
703				   enum hal_reo_cmd_status status)
704{
705	struct dp_rx_tid *rx_tid = ctx;
706
707	if (status != HAL_REO_CMD_SUCCESS)
708		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
709			    rx_tid->tid, status);
710	if (rx_tid->vaddr) {
711		dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
712				 DMA_BIDIRECTIONAL);
713		kfree(rx_tid->vaddr);
714		rx_tid->vaddr = NULL;
715	}
716}
717
718static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
719				      struct dp_rx_tid *rx_tid)
720{
721	struct ath11k_hal_reo_cmd cmd = {0};
722	unsigned long tot_desc_sz, desc_sz;
723	int ret;
724
725	tot_desc_sz = rx_tid->size;
726	desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
727
728	while (tot_desc_sz > desc_sz) {
729		tot_desc_sz -= desc_sz;
730		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
731		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
732		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
733						HAL_REO_CMD_FLUSH_CACHE, &cmd,
734						NULL);
735		if (ret)
736			ath11k_warn(ab,
737				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
738				    rx_tid->tid, ret);
739	}
740
741	memset(&cmd, 0, sizeof(cmd));
742	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
743	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
744	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
745	ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
746					HAL_REO_CMD_FLUSH_CACHE,
747					&cmd, ath11k_dp_reo_cmd_free);
748	if (ret) {
749		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
750			   rx_tid->tid, ret);
751		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
752				 DMA_BIDIRECTIONAL);
753		kfree(rx_tid->vaddr);
754		rx_tid->vaddr = NULL;
755	}
756}
757
758static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
759				      enum hal_reo_cmd_status status)
760{
761	struct ath11k_base *ab = dp->ab;
762	struct dp_rx_tid *rx_tid = ctx;
763	struct dp_reo_cache_flush_elem *elem, *tmp;
764
765	if (status == HAL_REO_CMD_DRAIN) {
766		goto free_desc;
767	} else if (status != HAL_REO_CMD_SUCCESS) {
768		/* Shouldn't happen! Cleanup in case of other failure? */
769		ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
770			    rx_tid->tid, status);
771		return;
772	}
773
774	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
775	if (!elem)
776		goto free_desc;
777
778	elem->ts = jiffies;
779	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
780
781	spin_lock_bh(&dp->reo_cmd_lock);
782	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
783	dp->reo_cmd_cache_flush_count++;
784
785	/* Flush and invalidate aged REO desc from HW cache */
786	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
787				 list) {
788		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
789		    time_after(jiffies, elem->ts +
790			       msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
791			list_del(&elem->list);
792			dp->reo_cmd_cache_flush_count--;
793			spin_unlock_bh(&dp->reo_cmd_lock);
794
795			ath11k_dp_reo_cache_flush(ab, &elem->data);
796			kfree(elem);
797			spin_lock_bh(&dp->reo_cmd_lock);
798		}
799	}
800	spin_unlock_bh(&dp->reo_cmd_lock);
801
802	return;
803free_desc:
804	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
805			 DMA_BIDIRECTIONAL);
806	kfree(rx_tid->vaddr);
807	rx_tid->vaddr = NULL;
808}
809
810void ath11k_peer_rx_tid_delete(struct ath11k *ar,
811			       struct ath11k_peer *peer, u8 tid)
812{
813	struct ath11k_hal_reo_cmd cmd = {0};
814	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
815	int ret;
816
817	if (!rx_tid->active)
818		return;
819
820	rx_tid->active = false;
821
822	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
823	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
824	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
825	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
826	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
827					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
828					ath11k_dp_rx_tid_del_func);
829	if (ret) {
830		if (ret != -ESHUTDOWN)
831			ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
832				   tid, ret);
833		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
834				 DMA_BIDIRECTIONAL);
835		kfree(rx_tid->vaddr);
836		rx_tid->vaddr = NULL;
837	}
838
839	rx_tid->paddr = 0;
840	rx_tid->size = 0;
841}
842
843static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
844					 u32 *link_desc,
845					 enum hal_wbm_rel_bm_act action)
846{
847	struct ath11k_dp *dp = &ab->dp;
848	struct hal_srng *srng;
849	u32 *desc;
850	int ret = 0;
851
852	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
853
854	spin_lock_bh(&srng->lock);
855
856	ath11k_hal_srng_access_begin(ab, srng);
857
858	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
859	if (!desc) {
860		ret = -ENOBUFS;
861		goto exit;
862	}
863
864	ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
865					 action);
866
867exit:
868	ath11k_hal_srng_access_end(ab, srng);
869
870	spin_unlock_bh(&srng->lock);
871
872	return ret;
873}
874
875static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
876{
877	struct ath11k_base *ab = rx_tid->ab;
878
879	lockdep_assert_held(&ab->base_lock);
880
881	if (rx_tid->dst_ring_desc) {
882		if (rel_link_desc)
883			ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
884						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
885		kfree(rx_tid->dst_ring_desc);
886		rx_tid->dst_ring_desc = NULL;
887	}
888
889	rx_tid->cur_sn = 0;
890	rx_tid->last_frag_no = 0;
891	rx_tid->rx_frag_bitmap = 0;
892	__skb_queue_purge(&rx_tid->rx_frags);
893}
894
895void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
896{
897	struct dp_rx_tid *rx_tid;
898	int i;
899
900	lockdep_assert_held(&ar->ab->base_lock);
901
902	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
903		rx_tid = &peer->rx_tid[i];
904
905		spin_unlock_bh(&ar->ab->base_lock);
906		del_timer_sync(&rx_tid->frag_timer);
907		spin_lock_bh(&ar->ab->base_lock);
908
909		ath11k_dp_rx_frags_cleanup(rx_tid, true);
910	}
911}
912
913void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
914{
915	struct dp_rx_tid *rx_tid;
916	int i;
917
918	lockdep_assert_held(&ar->ab->base_lock);
919
920	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
921		rx_tid = &peer->rx_tid[i];
922
923		ath11k_peer_rx_tid_delete(ar, peer, i);
924		ath11k_dp_rx_frags_cleanup(rx_tid, true);
925
926		spin_unlock_bh(&ar->ab->base_lock);
927		del_timer_sync(&rx_tid->frag_timer);
928		spin_lock_bh(&ar->ab->base_lock);
929	}
930}
931
932static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
933					 struct ath11k_peer *peer,
934					 struct dp_rx_tid *rx_tid,
935					 u32 ba_win_sz, u16 ssn,
936					 bool update_ssn)
937{
938	struct ath11k_hal_reo_cmd cmd = {0};
939	int ret;
940
941	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
942	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
943	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
944	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
945	cmd.ba_window_size = ba_win_sz;
946
947	if (update_ssn) {
948		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
949		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
950	}
951
952	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
953					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
954					NULL);
955	if (ret) {
956		ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
957			    rx_tid->tid, ret);
958		return ret;
959	}
960
961	rx_tid->ba_win_sz = ba_win_sz;
962
963	return 0;
964}
965
966static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
967				      const u8 *peer_mac, int vdev_id, u8 tid)
968{
969	struct ath11k_peer *peer;
970	struct dp_rx_tid *rx_tid;
971
972	spin_lock_bh(&ab->base_lock);
973
974	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
975	if (!peer) {
976		ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
977		goto unlock_exit;
978	}
979
980	rx_tid = &peer->rx_tid[tid];
981	if (!rx_tid->active)
982		goto unlock_exit;
983
984	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
985			 DMA_BIDIRECTIONAL);
986	kfree(rx_tid->vaddr);
987	rx_tid->vaddr = NULL;
988
989	rx_tid->active = false;
990
991unlock_exit:
992	spin_unlock_bh(&ab->base_lock);
993}
994
995int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
996			     u8 tid, u32 ba_win_sz, u16 ssn,
997			     enum hal_pn_type pn_type)
998{
999	struct ath11k_base *ab = ar->ab;
1000	struct ath11k_peer *peer;
1001	struct dp_rx_tid *rx_tid;
1002	u32 hw_desc_sz;
1003	u32 *addr_aligned;
1004	void *vaddr;
1005	dma_addr_t paddr;
1006	int ret;
1007
1008	spin_lock_bh(&ab->base_lock);
1009
1010	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1011	if (!peer) {
1012		ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
1013			    peer_mac);
1014		spin_unlock_bh(&ab->base_lock);
1015		return -ENOENT;
1016	}
1017
1018	rx_tid = &peer->rx_tid[tid];
1019	/* Update the tid queue if it is already setup */
1020	if (rx_tid->active) {
1021		paddr = rx_tid->paddr;
1022		ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1023						    ba_win_sz, ssn, true);
1024		spin_unlock_bh(&ab->base_lock);
1025		if (ret) {
1026			ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
1027				    peer_mac, tid, ret);
1028			return ret;
1029		}
1030
1031		ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1032							     peer_mac, paddr,
1033							     tid, 1, ba_win_sz);
1034		if (ret)
1035			ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1036				    peer_mac, tid, ret);
1037		return ret;
1038	}
1039
1040	rx_tid->tid = tid;
1041
1042	rx_tid->ba_win_sz = ba_win_sz;
1043
1044	/* TODO: Optimize the memory allocation for qos tid based on
1045	 * the actual BA window size in REO tid update path.
1046	 */
1047	if (tid == HAL_DESC_REO_NON_QOS_TID)
1048		hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1049	else
1050		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1051
1052	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1053	if (!vaddr) {
1054		spin_unlock_bh(&ab->base_lock);
1055		return -ENOMEM;
1056	}
1057
1058	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1059
1060	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1061				   ssn, pn_type);
1062
1063	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1064			       DMA_BIDIRECTIONAL);
1065
1066	ret = dma_mapping_error(ab->dev, paddr);
1067	if (ret) {
1068		spin_unlock_bh(&ab->base_lock);
1069		ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
1070			    peer_mac, tid, ret);
1071		goto err_mem_free;
1072	}
1073
1074	rx_tid->vaddr = vaddr;
1075	rx_tid->paddr = paddr;
1076	rx_tid->size = hw_desc_sz;
1077	rx_tid->active = true;
1078
1079	spin_unlock_bh(&ab->base_lock);
1080
1081	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1082						     paddr, tid, 1, ba_win_sz);
1083	if (ret) {
1084		ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1085			    peer_mac, tid, ret);
1086		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1087	}
1088
1089	return ret;
1090
1091err_mem_free:
1092	kfree(rx_tid->vaddr);
1093	rx_tid->vaddr = NULL;
1094
1095	return ret;
1096}
1097
1098int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1099			     struct ieee80211_ampdu_params *params)
1100{
1101	struct ath11k_base *ab = ar->ab;
1102	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1103	int vdev_id = arsta->arvif->vdev_id;
1104	int ret;
1105
1106	ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1107				       params->tid, params->buf_size,
1108				       params->ssn, arsta->pn_type);
1109	if (ret)
1110		ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1111
1112	return ret;
1113}
1114
1115int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1116			    struct ieee80211_ampdu_params *params)
1117{
1118	struct ath11k_base *ab = ar->ab;
1119	struct ath11k_peer *peer;
1120	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1121	int vdev_id = arsta->arvif->vdev_id;
1122	dma_addr_t paddr;
1123	bool active;
1124	int ret;
1125
1126	spin_lock_bh(&ab->base_lock);
1127
1128	peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1129	if (!peer) {
1130		ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1131		spin_unlock_bh(&ab->base_lock);
1132		return -ENOENT;
1133	}
1134
1135	paddr = peer->rx_tid[params->tid].paddr;
1136	active = peer->rx_tid[params->tid].active;
1137
1138	if (!active) {
1139		spin_unlock_bh(&ab->base_lock);
1140		return 0;
1141	}
1142
1143	ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1144	spin_unlock_bh(&ab->base_lock);
1145	if (ret) {
1146		ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1147			    params->tid, ret);
1148		return ret;
1149	}
1150
1151	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1152						     params->sta->addr, paddr,
1153						     params->tid, 1, 1);
1154	if (ret)
1155		ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1156			    ret);
1157
1158	return ret;
1159}
1160
1161int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1162				       const u8 *peer_addr,
1163				       enum set_key_cmd key_cmd,
1164				       struct ieee80211_key_conf *key)
1165{
1166	struct ath11k *ar = arvif->ar;
1167	struct ath11k_base *ab = ar->ab;
1168	struct ath11k_hal_reo_cmd cmd = {0};
1169	struct ath11k_peer *peer;
1170	struct dp_rx_tid *rx_tid;
1171	u8 tid;
1172	int ret = 0;
1173
1174	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1175	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1176	 * for now.
1177	 */
1178	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1179		return 0;
1180
1181	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1182	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1183		    HAL_REO_CMD_UPD0_PN_SIZE |
1184		    HAL_REO_CMD_UPD0_PN_VALID |
1185		    HAL_REO_CMD_UPD0_PN_CHECK |
1186		    HAL_REO_CMD_UPD0_SVLD;
1187
1188	switch (key->cipher) {
1189	case WLAN_CIPHER_SUITE_TKIP:
1190	case WLAN_CIPHER_SUITE_CCMP:
1191	case WLAN_CIPHER_SUITE_CCMP_256:
1192	case WLAN_CIPHER_SUITE_GCMP:
1193	case WLAN_CIPHER_SUITE_GCMP_256:
1194		if (key_cmd == SET_KEY) {
1195			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1196			cmd.pn_size = 48;
1197		}
1198		break;
1199	default:
1200		break;
1201	}
1202
1203	spin_lock_bh(&ab->base_lock);
1204
1205	peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1206	if (!peer) {
1207		ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1208		spin_unlock_bh(&ab->base_lock);
1209		return -ENOENT;
1210	}
1211
1212	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1213		rx_tid = &peer->rx_tid[tid];
1214		if (!rx_tid->active)
1215			continue;
1216		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1217		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1218		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1219						HAL_REO_CMD_UPDATE_RX_QUEUE,
1220						&cmd, NULL);
1221		if (ret) {
1222			ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1223				    tid, ret);
1224			break;
1225		}
1226	}
1227
1228	spin_unlock_bh(&ab->base_lock);
1229
1230	return ret;
1231}
1232
1233static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1234					     u16 peer_id)
1235{
1236	int i;
1237
1238	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1239		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1240			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1241				return i;
1242		} else {
1243			return i;
1244		}
1245	}
1246
1247	return -EINVAL;
1248}
1249
1250static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1251					   u16 tag, u16 len, const void *ptr,
1252					   void *data)
1253{
1254	struct htt_ppdu_stats_info *ppdu_info;
1255	struct htt_ppdu_user_stats *user_stats;
1256	int cur_user;
1257	u16 peer_id;
1258
1259	ppdu_info = (struct htt_ppdu_stats_info *)data;
1260
1261	switch (tag) {
1262	case HTT_PPDU_STATS_TAG_COMMON:
1263		if (len < sizeof(struct htt_ppdu_stats_common)) {
1264			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1265				    len, tag);
1266			return -EINVAL;
1267		}
1268		memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1269		       sizeof(struct htt_ppdu_stats_common));
1270		break;
1271	case HTT_PPDU_STATS_TAG_USR_RATE:
1272		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1273			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1274				    len, tag);
1275			return -EINVAL;
1276		}
1277
1278		peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1279		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1280						      peer_id);
1281		if (cur_user < 0)
1282			return -EINVAL;
1283		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1284		user_stats->peer_id = peer_id;
1285		user_stats->is_valid_peer_id = true;
1286		memcpy((void *)&user_stats->rate, ptr,
1287		       sizeof(struct htt_ppdu_stats_user_rate));
1288		user_stats->tlv_flags |= BIT(tag);
1289		break;
1290	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1291		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1292			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1293				    len, tag);
1294			return -EINVAL;
1295		}
1296
1297		peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1298		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1299						      peer_id);
1300		if (cur_user < 0)
1301			return -EINVAL;
1302		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1303		user_stats->peer_id = peer_id;
1304		user_stats->is_valid_peer_id = true;
1305		memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1306		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1307		user_stats->tlv_flags |= BIT(tag);
1308		break;
1309	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1310		if (len <
1311		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1312			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1313				    len, tag);
1314			return -EINVAL;
1315		}
1316
1317		peer_id =
1318		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1319		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1320						      peer_id);
1321		if (cur_user < 0)
1322			return -EINVAL;
1323		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1324		user_stats->peer_id = peer_id;
1325		user_stats->is_valid_peer_id = true;
1326		memcpy((void *)&user_stats->ack_ba, ptr,
1327		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1328		user_stats->tlv_flags |= BIT(tag);
1329		break;
1330	}
1331	return 0;
1332}
1333
1334int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1335			   int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1336				       const void *ptr, void *data),
1337			   void *data)
1338{
1339	const struct htt_tlv *tlv;
1340	const void *begin = ptr;
1341	u16 tlv_tag, tlv_len;
1342	int ret = -EINVAL;
1343
1344	while (len > 0) {
1345		if (len < sizeof(*tlv)) {
1346			ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1347				   ptr - begin, len, sizeof(*tlv));
1348			return -EINVAL;
1349		}
1350		tlv = (struct htt_tlv *)ptr;
1351		tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1352		tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1353		ptr += sizeof(*tlv);
1354		len -= sizeof(*tlv);
1355
1356		if (tlv_len > len) {
1357			ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1358				   tlv_tag, ptr - begin, len, tlv_len);
1359			return -EINVAL;
1360		}
1361		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1362		if (ret == -ENOMEM)
1363			return ret;
1364
1365		ptr += tlv_len;
1366		len -= tlv_len;
1367	}
1368	return 0;
1369}
1370
1371static void
1372ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1373				struct htt_ppdu_stats *ppdu_stats, u8 user)
1374{
1375	struct ath11k_base *ab = ar->ab;
1376	struct ath11k_peer *peer;
1377	struct ieee80211_sta *sta;
1378	struct ath11k_sta *arsta;
1379	struct htt_ppdu_stats_user_rate *user_rate;
1380	struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1381	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1382	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1383	int ret;
1384	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1385	u32 succ_bytes = 0;
1386	u16 rate = 0, succ_pkts = 0;
1387	u32 tx_duration = 0;
1388	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1389	bool is_ampdu = false;
1390
1391	if (!usr_stats)
1392		return;
1393
1394	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1395		return;
1396
1397	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1398		is_ampdu =
1399			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1400
1401	if (usr_stats->tlv_flags &
1402	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1403		succ_bytes = usr_stats->ack_ba.success_bytes;
1404		succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1405				      usr_stats->ack_ba.info);
1406		tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1407				usr_stats->ack_ba.info);
1408	}
1409
1410	if (common->fes_duration_us)
1411		tx_duration = common->fes_duration_us;
1412
1413	user_rate = &usr_stats->rate;
1414	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1415	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1416	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1417	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1418	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1419	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1420
1421	/* Note: If host configured fixed rates and in some other special
1422	 * cases, the broadcast/management frames are sent in different rates.
1423	 * Firmware rate's control to be skipped for this?
1424	 */
1425
1426	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1427		ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1428		return;
1429	}
1430
1431	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1432		ath11k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1433		return;
1434	}
1435
1436	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1437		ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1438			    mcs, nss);
1439		return;
1440	}
1441
1442	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1443		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1444							    flags,
1445							    &rate_idx,
1446							    &rate);
1447		if (ret < 0)
1448			return;
1449	}
1450
1451	rcu_read_lock();
1452	spin_lock_bh(&ab->base_lock);
1453	peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1454
1455	if (!peer || !peer->sta) {
1456		spin_unlock_bh(&ab->base_lock);
1457		rcu_read_unlock();
1458		return;
1459	}
1460
1461	sta = peer->sta;
1462	arsta = (struct ath11k_sta *)sta->drv_priv;
1463
1464	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1465
1466	switch (flags) {
1467	case WMI_RATE_PREAMBLE_OFDM:
1468		arsta->txrate.legacy = rate;
1469		break;
1470	case WMI_RATE_PREAMBLE_CCK:
1471		arsta->txrate.legacy = rate;
1472		break;
1473	case WMI_RATE_PREAMBLE_HT:
1474		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1475		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1476		if (sgi)
1477			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1478		break;
1479	case WMI_RATE_PREAMBLE_VHT:
1480		arsta->txrate.mcs = mcs;
1481		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1482		if (sgi)
1483			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1484		break;
1485	case WMI_RATE_PREAMBLE_HE:
1486		arsta->txrate.mcs = mcs;
1487		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1488		arsta->txrate.he_dcm = dcm;
1489		arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1490		arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1491						((user_rate->ru_end -
1492						 user_rate->ru_start) + 1);
1493		break;
1494	}
1495
1496	arsta->txrate.nss = nss;
1497
1498	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1499	arsta->tx_duration += tx_duration;
1500	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1501
1502	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1503	 * So skip peer stats update for mgmt packets.
1504	 */
1505	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1506		memset(peer_stats, 0, sizeof(*peer_stats));
1507		peer_stats->succ_pkts = succ_pkts;
1508		peer_stats->succ_bytes = succ_bytes;
1509		peer_stats->is_ampdu = is_ampdu;
1510		peer_stats->duration = tx_duration;
1511		peer_stats->ba_fails =
1512			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1513			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1514
1515		if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1516			ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1517	}
1518
1519	spin_unlock_bh(&ab->base_lock);
1520	rcu_read_unlock();
1521}
1522
1523static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1524					 struct htt_ppdu_stats *ppdu_stats)
1525{
1526	u8 user;
1527
1528	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1529		ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1530}
1531
1532static
1533struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1534							u32 ppdu_id)
1535{
1536	struct htt_ppdu_stats_info *ppdu_info;
1537
1538	lockdep_assert_held(&ar->data_lock);
1539
1540	if (!list_empty(&ar->ppdu_stats_info)) {
1541		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1542			if (ppdu_info->ppdu_id == ppdu_id)
1543				return ppdu_info;
1544		}
1545
1546		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1547			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1548						     typeof(*ppdu_info), list);
1549			list_del(&ppdu_info->list);
1550			ar->ppdu_stat_list_depth--;
1551			ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1552			kfree(ppdu_info);
1553		}
1554	}
1555
1556	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1557	if (!ppdu_info)
1558		return NULL;
1559
1560	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1561	ar->ppdu_stat_list_depth++;
1562
1563	return ppdu_info;
1564}
1565
1566static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1567				      struct sk_buff *skb)
1568{
1569	struct ath11k_htt_ppdu_stats_msg *msg;
1570	struct htt_ppdu_stats_info *ppdu_info;
1571	struct ath11k *ar;
1572	int ret;
1573	u8 pdev_id;
1574	u32 ppdu_id, len;
1575
1576	msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1577	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1578	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1579	ppdu_id = msg->ppdu_id;
1580
1581	rcu_read_lock();
1582	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1583	if (!ar) {
1584		ret = -EINVAL;
1585		goto out;
1586	}
1587
1588	if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1589		trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1590
1591	spin_lock_bh(&ar->data_lock);
1592	ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1593	if (!ppdu_info) {
1594		ret = -EINVAL;
1595		goto out_unlock_data;
1596	}
1597
1598	ppdu_info->ppdu_id = ppdu_id;
1599	ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1600				     ath11k_htt_tlv_ppdu_stats_parse,
1601				     (void *)ppdu_info);
1602	if (ret) {
1603		ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1604		goto out_unlock_data;
1605	}
1606
1607out_unlock_data:
1608	spin_unlock_bh(&ar->data_lock);
1609
1610out:
1611	rcu_read_unlock();
1612
1613	return ret;
1614}
1615
1616static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1617{
1618	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1619	struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1620	struct ath11k *ar;
1621	u8 pdev_id;
1622
1623	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1624
1625	rcu_read_lock();
1626
1627	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1628	if (!ar) {
1629		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1630		goto out;
1631	}
1632
1633	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1634				ar->ab->pktlog_defs_checksum);
1635
1636out:
1637	rcu_read_unlock();
1638}
1639
1640static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1641						  struct sk_buff *skb)
1642{
1643	u32 *data = (u32 *)skb->data;
1644	u8 pdev_id, ring_type, ring_id, pdev_idx;
1645	u16 hp, tp;
1646	u32 backpressure_time;
1647	struct ath11k_bp_stats *bp_stats;
1648
1649	pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1650	ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1651	ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1652	++data;
1653
1654	hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1655	tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1656	++data;
1657
1658	backpressure_time = *data;
1659
1660	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1661		   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1662
1663	if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1664		if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1665			return;
1666
1667		bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1668	} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1669		pdev_idx = DP_HW2SW_MACID(pdev_id);
1670
1671		if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1672			return;
1673
1674		bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1675	} else {
1676		ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1677			    ring_type);
1678		return;
1679	}
1680
1681	spin_lock_bh(&ab->base_lock);
1682	bp_stats->hp = hp;
1683	bp_stats->tp = tp;
1684	bp_stats->count++;
1685	bp_stats->jiffies = jiffies;
1686	spin_unlock_bh(&ab->base_lock);
1687}
1688
1689void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1690				       struct sk_buff *skb)
1691{
1692	struct ath11k_dp *dp = &ab->dp;
1693	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1694	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1695	u16 peer_id;
1696	u8 vdev_id;
1697	u8 mac_addr[ETH_ALEN];
1698	u16 peer_mac_h16;
1699	u16 ast_hash;
1700	u16 hw_peer_id;
1701
1702	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1703
1704	switch (type) {
1705	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1706		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1707						  resp->version_msg.version);
1708		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1709						  resp->version_msg.version);
1710		complete(&dp->htt_tgt_version_received);
1711		break;
1712	case HTT_T2H_MSG_TYPE_PEER_MAP:
1713		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1714				    resp->peer_map_ev.info);
1715		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1716				    resp->peer_map_ev.info);
1717		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1718					 resp->peer_map_ev.info1);
1719		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1720				       peer_mac_h16, mac_addr);
1721		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1722		break;
1723	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1724		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1725				    resp->peer_map_ev.info);
1726		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1727				    resp->peer_map_ev.info);
1728		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1729					 resp->peer_map_ev.info1);
1730		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1731				       peer_mac_h16, mac_addr);
1732		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1733				     resp->peer_map_ev.info2);
1734		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1735				       resp->peer_map_ev.info1);
1736		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1737				      hw_peer_id);
1738		break;
1739	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1740	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1741		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1742				    resp->peer_unmap_ev.info);
1743		ath11k_peer_unmap_event(ab, peer_id);
1744		break;
1745	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1746		ath11k_htt_pull_ppdu_stats(ab, skb);
1747		break;
1748	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1749		ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1750		break;
1751	case HTT_T2H_MSG_TYPE_PKTLOG:
1752		ath11k_htt_pktlog(ab, skb);
1753		break;
1754	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1755		ath11k_htt_backpressure_event_handler(ab, skb);
1756		break;
1757	default:
1758		ath11k_warn(ab, "htt event %d not handled\n", type);
1759		break;
1760	}
1761
1762	dev_kfree_skb_any(skb);
1763}
1764
1765static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1766				      struct sk_buff_head *msdu_list,
1767				      struct sk_buff *first, struct sk_buff *last,
1768				      u8 l3pad_bytes, int msdu_len)
1769{
1770	struct ath11k_base *ab = ar->ab;
1771	struct sk_buff *skb;
1772	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1773	int buf_first_hdr_len, buf_first_len;
1774	struct hal_rx_desc *ldesc;
1775	int space_extra, rem_len, buf_len;
1776	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1777
1778	/* As the msdu is spread across multiple rx buffers,
1779	 * find the offset to the start of msdu for computing
1780	 * the length of the msdu in the first buffer.
1781	 */
1782	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1783	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1784
1785	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1786		skb_put(first, buf_first_hdr_len + msdu_len);
1787		skb_pull(first, buf_first_hdr_len);
1788		return 0;
1789	}
1790
1791	ldesc = (struct hal_rx_desc *)last->data;
1792	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1793	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1794
1795	/* MSDU spans over multiple buffers because the length of the MSDU
1796	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1797	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1798	 */
1799	skb_put(first, DP_RX_BUFFER_SIZE);
1800	skb_pull(first, buf_first_hdr_len);
1801
1802	/* When an MSDU spread over multiple buffers attention, MSDU_END and
1803	 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1804	 */
1805	ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1806
1807	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1808	if (space_extra > 0 &&
1809	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1810		/* Free up all buffers of the MSDU */
1811		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1812			rxcb = ATH11K_SKB_RXCB(skb);
1813			if (!rxcb->is_continuation) {
1814				dev_kfree_skb_any(skb);
1815				break;
1816			}
1817			dev_kfree_skb_any(skb);
1818		}
1819		return -ENOMEM;
1820	}
1821
1822	rem_len = msdu_len - buf_first_len;
1823	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1824		rxcb = ATH11K_SKB_RXCB(skb);
1825		if (rxcb->is_continuation)
1826			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1827		else
1828			buf_len = rem_len;
1829
1830		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1831			WARN_ON_ONCE(1);
1832			dev_kfree_skb_any(skb);
1833			return -EINVAL;
1834		}
1835
1836		skb_put(skb, buf_len + hal_rx_desc_sz);
1837		skb_pull(skb, hal_rx_desc_sz);
1838		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1839					  buf_len);
1840		dev_kfree_skb_any(skb);
1841
1842		rem_len -= buf_len;
1843		if (!rxcb->is_continuation)
1844			break;
1845	}
1846
1847	return 0;
1848}
1849
1850static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1851						      struct sk_buff *first)
1852{
1853	struct sk_buff *skb;
1854	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1855
1856	if (!rxcb->is_continuation)
1857		return first;
1858
1859	skb_queue_walk(msdu_list, skb) {
1860		rxcb = ATH11K_SKB_RXCB(skb);
1861		if (!rxcb->is_continuation)
1862			return skb;
1863	}
1864
1865	return NULL;
1866}
1867
1868static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1869{
1870	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1871	struct rx_attention *rx_attention;
1872	bool ip_csum_fail, l4_csum_fail;
1873
1874	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1875	ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1876	l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1877
1878	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1879			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1880}
1881
1882static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1883				       enum hal_encrypt_type enctype)
1884{
1885	switch (enctype) {
1886	case HAL_ENCRYPT_TYPE_OPEN:
1887	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1888	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1889		return 0;
1890	case HAL_ENCRYPT_TYPE_CCMP_128:
1891		return IEEE80211_CCMP_MIC_LEN;
1892	case HAL_ENCRYPT_TYPE_CCMP_256:
1893		return IEEE80211_CCMP_256_MIC_LEN;
1894	case HAL_ENCRYPT_TYPE_GCMP_128:
1895	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1896		return IEEE80211_GCMP_MIC_LEN;
1897	case HAL_ENCRYPT_TYPE_WEP_40:
1898	case HAL_ENCRYPT_TYPE_WEP_104:
1899	case HAL_ENCRYPT_TYPE_WEP_128:
1900	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1901	case HAL_ENCRYPT_TYPE_WAPI:
1902		break;
1903	}
1904
1905	ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1906	return 0;
1907}
1908
1909static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1910					 enum hal_encrypt_type enctype)
1911{
1912	switch (enctype) {
1913	case HAL_ENCRYPT_TYPE_OPEN:
1914		return 0;
1915	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1916	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1917		return IEEE80211_TKIP_IV_LEN;
1918	case HAL_ENCRYPT_TYPE_CCMP_128:
1919		return IEEE80211_CCMP_HDR_LEN;
1920	case HAL_ENCRYPT_TYPE_CCMP_256:
1921		return IEEE80211_CCMP_256_HDR_LEN;
1922	case HAL_ENCRYPT_TYPE_GCMP_128:
1923	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1924		return IEEE80211_GCMP_HDR_LEN;
1925	case HAL_ENCRYPT_TYPE_WEP_40:
1926	case HAL_ENCRYPT_TYPE_WEP_104:
1927	case HAL_ENCRYPT_TYPE_WEP_128:
1928	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1929	case HAL_ENCRYPT_TYPE_WAPI:
1930		break;
1931	}
1932
1933	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1934	return 0;
1935}
1936
1937static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1938				       enum hal_encrypt_type enctype)
1939{
1940	switch (enctype) {
1941	case HAL_ENCRYPT_TYPE_OPEN:
1942	case HAL_ENCRYPT_TYPE_CCMP_128:
1943	case HAL_ENCRYPT_TYPE_CCMP_256:
1944	case HAL_ENCRYPT_TYPE_GCMP_128:
1945	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1946		return 0;
1947	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1948	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1949		return IEEE80211_TKIP_ICV_LEN;
1950	case HAL_ENCRYPT_TYPE_WEP_40:
1951	case HAL_ENCRYPT_TYPE_WEP_104:
1952	case HAL_ENCRYPT_TYPE_WEP_128:
1953	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1954	case HAL_ENCRYPT_TYPE_WAPI:
1955		break;
1956	}
1957
1958	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1959	return 0;
1960}
1961
1962static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1963					 struct sk_buff *msdu,
1964					 u8 *first_hdr,
1965					 enum hal_encrypt_type enctype,
1966					 struct ieee80211_rx_status *status)
1967{
1968	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1969	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1970	struct ieee80211_hdr *hdr;
1971	size_t hdr_len;
1972	u8 da[ETH_ALEN];
1973	u8 sa[ETH_ALEN];
1974	u16 qos_ctl = 0;
1975	u8 *qos;
1976
1977	/* copy SA & DA and pull decapped header */
1978	hdr = (struct ieee80211_hdr *)msdu->data;
1979	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1980	ether_addr_copy(da, ieee80211_get_DA(hdr));
1981	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1982	skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1983
1984	if (rxcb->is_first_msdu) {
1985		/* original 802.11 header is valid for the first msdu
1986		 * hence we can reuse the same header
1987		 */
1988		hdr = (struct ieee80211_hdr *)first_hdr;
1989		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1990
1991		/* Each A-MSDU subframe will be reported as a separate MSDU,
1992		 * so strip the A-MSDU bit from QoS Ctl.
1993		 */
1994		if (ieee80211_is_data_qos(hdr->frame_control)) {
1995			qos = ieee80211_get_qos_ctl(hdr);
1996			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1997		}
1998	} else {
1999		/*  Rebuild qos header if this is a middle/last msdu */
2000		hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2001
2002		/* Reset the order bit as the HT_Control header is stripped */
2003		hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2004
2005		qos_ctl = rxcb->tid;
2006
2007		if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2008			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2009
2010		/* TODO Add other QoS ctl fields when required */
2011
2012		/* copy decap header before overwriting for reuse below */
2013		memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2014	}
2015
2016	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2017		memcpy(skb_push(msdu,
2018				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2019		       (void *)hdr + hdr_len,
2020		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2021	}
2022
2023	if (!rxcb->is_first_msdu) {
2024		memcpy(skb_push(msdu,
2025				IEEE80211_QOS_CTL_LEN), &qos_ctl,
2026				IEEE80211_QOS_CTL_LEN);
2027		memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2028		return;
2029	}
2030
2031	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2032
2033	/* original 802.11 header has a different DA and in
2034	 * case of 4addr it may also have different SA
2035	 */
2036	hdr = (struct ieee80211_hdr *)msdu->data;
2037	ether_addr_copy(ieee80211_get_DA(hdr), da);
2038	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2039}
2040
2041static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2042				       enum hal_encrypt_type enctype,
2043				       struct ieee80211_rx_status *status,
2044				       bool decrypted)
2045{
2046	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2047	struct ieee80211_hdr *hdr;
2048	size_t hdr_len;
2049	size_t crypto_len;
2050
2051	if (!rxcb->is_first_msdu ||
2052	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2053		WARN_ON_ONCE(1);
2054		return;
2055	}
2056
2057	skb_trim(msdu, msdu->len - FCS_LEN);
2058
2059	if (!decrypted)
2060		return;
2061
2062	hdr = (void *)msdu->data;
2063
2064	/* Tail */
2065	if (status->flag & RX_FLAG_IV_STRIPPED) {
2066		skb_trim(msdu, msdu->len -
2067			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2068
2069		skb_trim(msdu, msdu->len -
2070			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2071	} else {
2072		/* MIC */
2073		if (status->flag & RX_FLAG_MIC_STRIPPED)
2074			skb_trim(msdu, msdu->len -
2075				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2076
2077		/* ICV */
2078		if (status->flag & RX_FLAG_ICV_STRIPPED)
2079			skb_trim(msdu, msdu->len -
2080				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2081	}
2082
2083	/* MMIC */
2084	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2085	    !ieee80211_has_morefrags(hdr->frame_control) &&
2086	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2087		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2088
2089	/* Head */
2090	if (status->flag & RX_FLAG_IV_STRIPPED) {
2091		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2092		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2093
2094		memmove((void *)msdu->data + crypto_len,
2095			(void *)msdu->data, hdr_len);
2096		skb_pull(msdu, crypto_len);
2097	}
2098}
2099
2100static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2101					 struct sk_buff *msdu,
2102					 enum hal_encrypt_type enctype)
2103{
2104	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2105	struct ieee80211_hdr *hdr;
2106	size_t hdr_len, crypto_len;
2107	void *rfc1042;
2108	bool is_amsdu;
2109
2110	is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2111	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2112	rfc1042 = hdr;
2113
2114	if (rxcb->is_first_msdu) {
2115		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2116		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2117
2118		rfc1042 += hdr_len + crypto_len;
2119	}
2120
2121	if (is_amsdu)
2122		rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2123
2124	return rfc1042;
2125}
2126
2127static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2128				       struct sk_buff *msdu,
2129				       u8 *first_hdr,
2130				       enum hal_encrypt_type enctype,
2131				       struct ieee80211_rx_status *status)
2132{
2133	struct ieee80211_hdr *hdr;
2134	struct ethhdr *eth;
2135	size_t hdr_len;
2136	u8 da[ETH_ALEN];
2137	u8 sa[ETH_ALEN];
2138	void *rfc1042;
2139
2140	rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2141	if (WARN_ON_ONCE(!rfc1042))
2142		return;
2143
2144	/* pull decapped header and copy SA & DA */
2145	eth = (struct ethhdr *)msdu->data;
2146	ether_addr_copy(da, eth->h_dest);
2147	ether_addr_copy(sa, eth->h_source);
2148	skb_pull(msdu, sizeof(struct ethhdr));
2149
2150	/* push rfc1042/llc/snap */
2151	memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2152	       sizeof(struct ath11k_dp_rfc1042_hdr));
2153
2154	/* push original 802.11 header */
2155	hdr = (struct ieee80211_hdr *)first_hdr;
2156	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2157
2158	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2159		memcpy(skb_push(msdu,
2160				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2161		       (void *)hdr + hdr_len,
2162		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2163	}
2164
2165	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2166
2167	/* original 802.11 header has a different DA and in
2168	 * case of 4addr it may also have different SA
2169	 */
2170	hdr = (struct ieee80211_hdr *)msdu->data;
2171	ether_addr_copy(ieee80211_get_DA(hdr), da);
2172	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2173}
2174
2175static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2176				   struct hal_rx_desc *rx_desc,
2177				   enum hal_encrypt_type enctype,
2178				   struct ieee80211_rx_status *status,
2179				   bool decrypted)
2180{
2181	u8 *first_hdr;
2182	u8 decap;
2183	struct ethhdr *ehdr;
2184
2185	first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2186	decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2187
2188	switch (decap) {
2189	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2190		ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2191					     enctype, status);
2192		break;
2193	case DP_RX_DECAP_TYPE_RAW:
2194		ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2195					   decrypted);
2196		break;
2197	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2198		ehdr = (struct ethhdr *)msdu->data;
2199
2200		/* mac80211 allows fast path only for authorized STA */
2201		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2202			ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2203			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2204						   enctype, status);
2205			break;
2206		}
2207
2208		/* PN for mcast packets will be validated in mac80211;
2209		 * remove eth header and add 802.11 header.
2210		 */
2211		if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2212			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2213						   enctype, status);
2214		break;
2215	case DP_RX_DECAP_TYPE_8023:
2216		/* TODO: Handle undecap for these formats */
2217		break;
2218	}
2219}
2220
2221static struct ath11k_peer *
2222ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2223{
2224	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2225	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2226	struct ath11k_peer *peer = NULL;
2227
2228	lockdep_assert_held(&ab->base_lock);
2229
2230	if (rxcb->peer_id)
2231		peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2232
2233	if (peer)
2234		return peer;
2235
2236	if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2237		return NULL;
2238
2239	peer = ath11k_peer_find_by_addr(ab,
2240					ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2241	return peer;
2242}
2243
2244static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2245				struct sk_buff *msdu,
2246				struct hal_rx_desc *rx_desc,
2247				struct ieee80211_rx_status *rx_status)
2248{
2249	bool  fill_crypto_hdr;
2250	enum hal_encrypt_type enctype;
2251	bool is_decrypted = false;
2252	struct ath11k_skb_rxcb *rxcb;
2253	struct ieee80211_hdr *hdr;
2254	struct ath11k_peer *peer;
2255	struct rx_attention *rx_attention;
2256	u32 err_bitmap;
2257
2258	/* PN for multicast packets will be checked in mac80211 */
2259	rxcb = ATH11K_SKB_RXCB(msdu);
2260	fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2261	rxcb->is_mcbc = fill_crypto_hdr;
2262
2263	if (rxcb->is_mcbc) {
2264		rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2265		rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2266	}
2267
2268	spin_lock_bh(&ar->ab->base_lock);
2269	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2270	if (peer) {
2271		if (rxcb->is_mcbc)
2272			enctype = peer->sec_type_grp;
2273		else
2274			enctype = peer->sec_type;
2275	} else {
2276		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2277	}
2278	spin_unlock_bh(&ar->ab->base_lock);
2279
2280	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2281	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2282	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2283		is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2284
2285	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2286	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2287			     RX_FLAG_MMIC_ERROR |
2288			     RX_FLAG_DECRYPTED |
2289			     RX_FLAG_IV_STRIPPED |
2290			     RX_FLAG_MMIC_STRIPPED);
2291
2292	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2293		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2294	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2295		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2296
2297	if (is_decrypted) {
2298		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2299
2300		if (fill_crypto_hdr)
2301			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2302					RX_FLAG_ICV_STRIPPED;
2303		else
2304			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2305					   RX_FLAG_PN_VALIDATED;
2306	}
2307
2308	ath11k_dp_rx_h_csum_offload(ar, msdu);
2309	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2310			       enctype, rx_status, is_decrypted);
2311
2312	if (!is_decrypted || fill_crypto_hdr)
2313		return;
2314
2315	if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2316	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2317		hdr = (void *)msdu->data;
2318		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2319	}
2320}
2321
2322static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2323				struct ieee80211_rx_status *rx_status)
2324{
2325	struct ieee80211_supported_band *sband;
2326	enum rx_msdu_start_pkt_type pkt_type;
2327	u8 bw;
2328	u8 rate_mcs, nss;
2329	u8 sgi;
2330	bool is_cck, is_ldpc;
2331
2332	pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2333	bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2334	rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2335	nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2336	sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2337
2338	switch (pkt_type) {
2339	case RX_MSDU_START_PKT_TYPE_11A:
2340	case RX_MSDU_START_PKT_TYPE_11B:
2341		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2342		sband = &ar->mac.sbands[rx_status->band];
2343		rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2344								is_cck);
2345		break;
2346	case RX_MSDU_START_PKT_TYPE_11N:
2347		rx_status->encoding = RX_ENC_HT;
2348		if (rate_mcs > ATH11K_HT_MCS_MAX) {
2349			ath11k_warn(ar->ab,
2350				    "Received with invalid mcs in HT mode %d\n",
2351				     rate_mcs);
2352			break;
2353		}
2354		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2355		if (sgi)
2356			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2357		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2358		break;
2359	case RX_MSDU_START_PKT_TYPE_11AC:
2360		rx_status->encoding = RX_ENC_VHT;
2361		rx_status->rate_idx = rate_mcs;
2362		if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2363			ath11k_warn(ar->ab,
2364				    "Received with invalid mcs in VHT mode %d\n",
2365				     rate_mcs);
2366			break;
2367		}
2368		rx_status->nss = nss;
2369		if (sgi)
2370			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2371		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2372		is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2373		if (is_ldpc)
2374			rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2375		break;
2376	case RX_MSDU_START_PKT_TYPE_11AX:
2377		rx_status->rate_idx = rate_mcs;
2378		if (rate_mcs > ATH11K_HE_MCS_MAX) {
2379			ath11k_warn(ar->ab,
2380				    "Received with invalid mcs in HE mode %d\n",
2381				    rate_mcs);
2382			break;
2383		}
2384		rx_status->encoding = RX_ENC_HE;
2385		rx_status->nss = nss;
2386		rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2387		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2388		break;
2389	}
2390}
2391
2392static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2393				struct ieee80211_rx_status *rx_status)
2394{
2395	u8 channel_num;
2396	u32 center_freq, meta_data;
2397	struct ieee80211_channel *channel;
2398
2399	rx_status->freq = 0;
2400	rx_status->rate_idx = 0;
2401	rx_status->nss = 0;
2402	rx_status->encoding = RX_ENC_LEGACY;
2403	rx_status->bw = RATE_INFO_BW_20;
2404
2405	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2406
2407	meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2408	channel_num = meta_data;
2409	center_freq = meta_data >> 16;
2410
2411	if (center_freq >= ATH11K_MIN_6G_FREQ &&
2412	    center_freq <= ATH11K_MAX_6G_FREQ) {
2413		rx_status->band = NL80211_BAND_6GHZ;
2414		rx_status->freq = center_freq;
2415	} else if (channel_num >= 1 && channel_num <= 14) {
2416		rx_status->band = NL80211_BAND_2GHZ;
2417	} else if (channel_num >= 36 && channel_num <= 177) {
2418		rx_status->band = NL80211_BAND_5GHZ;
2419	} else {
2420		spin_lock_bh(&ar->data_lock);
2421		channel = ar->rx_channel;
2422		if (channel) {
2423			rx_status->band = channel->band;
2424			channel_num =
2425				ieee80211_frequency_to_channel(channel->center_freq);
2426		}
2427		spin_unlock_bh(&ar->data_lock);
2428		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2429				rx_desc, sizeof(struct hal_rx_desc));
2430	}
2431
2432	if (rx_status->band != NL80211_BAND_6GHZ)
2433		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2434								 rx_status->band);
2435
2436	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2437}
2438
2439static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2440				      struct sk_buff *msdu,
2441				      struct ieee80211_rx_status *status)
2442{
2443	static const struct ieee80211_radiotap_he known = {
2444		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2445				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2446		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2447	};
2448	struct ieee80211_rx_status *rx_status;
2449	struct ieee80211_radiotap_he *he = NULL;
2450	struct ieee80211_sta *pubsta = NULL;
2451	struct ath11k_peer *peer;
2452	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2453	u8 decap = DP_RX_DECAP_TYPE_RAW;
2454	bool is_mcbc = rxcb->is_mcbc;
2455	bool is_eapol = rxcb->is_eapol;
2456
2457	if (status->encoding == RX_ENC_HE &&
2458	    !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2459	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2460		he = skb_push(msdu, sizeof(known));
2461		memcpy(he, &known, sizeof(known));
2462		status->flag |= RX_FLAG_RADIOTAP_HE;
2463	}
2464
2465	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2466		decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2467
2468	spin_lock_bh(&ar->ab->base_lock);
2469	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2470	if (peer && peer->sta)
2471		pubsta = peer->sta;
2472	spin_unlock_bh(&ar->ab->base_lock);
2473
2474	ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2475		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2476		   msdu,
2477		   msdu->len,
2478		   peer ? peer->addr : NULL,
2479		   rxcb->tid,
2480		   is_mcbc ? "mcast" : "ucast",
2481		   rxcb->seq_no,
2482		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2483		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2484		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2485		   (status->encoding == RX_ENC_HE) ? "he" : "",
2486		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2487		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2488		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2489		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2490		   status->rate_idx,
2491		   status->nss,
2492		   status->freq,
2493		   status->band, status->flag,
2494		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2495		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2496		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2497
2498	ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2499			msdu->data, msdu->len);
2500
2501	rx_status = IEEE80211_SKB_RXCB(msdu);
2502	*rx_status = *status;
2503
2504	/* TODO: trace rx packet */
2505
2506	/* PN for multicast packets are not validate in HW,
2507	 * so skip 802.3 rx path
2508	 * Also, fast_rx expects the STA to be authorized, hence
2509	 * eapol packets are sent in slow path.
2510	 */
2511	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2512	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2513		rx_status->flag |= RX_FLAG_8023;
2514
2515	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2516}
2517
2518static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2519				     struct sk_buff *msdu,
2520				     struct sk_buff_head *msdu_list,
2521				     struct ieee80211_rx_status *rx_status)
2522{
2523	struct ath11k_base *ab = ar->ab;
2524	struct hal_rx_desc *rx_desc, *lrx_desc;
2525	struct rx_attention *rx_attention;
2526	struct ath11k_skb_rxcb *rxcb;
2527	struct sk_buff *last_buf;
2528	u8 l3_pad_bytes;
2529	u8 *hdr_status;
2530	u16 msdu_len;
2531	int ret;
2532	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2533
2534	last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2535	if (!last_buf) {
2536		ath11k_warn(ab,
2537			    "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2538		ret = -EIO;
2539		goto free_out;
2540	}
2541
2542	rx_desc = (struct hal_rx_desc *)msdu->data;
2543	if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2544		ath11k_warn(ar->ab, "msdu len not valid\n");
2545		ret = -EIO;
2546		goto free_out;
2547	}
2548
2549	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2550	rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2551	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2552		ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2553		ret = -EIO;
2554		goto free_out;
2555	}
2556
2557	rxcb = ATH11K_SKB_RXCB(msdu);
2558	rxcb->rx_desc = rx_desc;
2559	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2560	l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2561
2562	if (rxcb->is_frag) {
2563		skb_pull(msdu, hal_rx_desc_sz);
2564	} else if (!rxcb->is_continuation) {
2565		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2566			hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2567			ret = -EINVAL;
2568			ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2569			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2570					sizeof(struct ieee80211_hdr));
2571			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2572					sizeof(struct hal_rx_desc));
2573			goto free_out;
2574		}
2575		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2576		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2577	} else {
2578		ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2579						 msdu, last_buf,
2580						 l3_pad_bytes, msdu_len);
2581		if (ret) {
2582			ath11k_warn(ab,
2583				    "failed to coalesce msdu rx buffer%d\n", ret);
2584			goto free_out;
2585		}
2586	}
2587
2588	ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2589	ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2590
2591	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2592
2593	return 0;
2594
2595free_out:
2596	return ret;
2597}
2598
2599static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2600						  struct napi_struct *napi,
2601						  struct sk_buff_head *msdu_list,
2602						  int mac_id)
2603{
2604	struct sk_buff *msdu;
2605	struct ath11k *ar;
2606	struct ieee80211_rx_status rx_status = {0};
2607	int ret;
2608
2609	if (skb_queue_empty(msdu_list))
2610		return;
2611
2612	if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2613		__skb_queue_purge(msdu_list);
2614		return;
2615	}
2616
2617	ar = ab->pdevs[mac_id].ar;
2618	if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2619		__skb_queue_purge(msdu_list);
2620		return;
2621	}
2622
2623	while ((msdu = __skb_dequeue(msdu_list))) {
2624		ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2625		if (unlikely(ret)) {
2626			ath11k_dbg(ab, ATH11K_DBG_DATA,
2627				   "Unable to process msdu %d", ret);
2628			dev_kfree_skb_any(msdu);
2629			continue;
2630		}
2631
2632		ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2633	}
2634}
2635
2636int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2637			 struct napi_struct *napi, int budget)
2638{
2639	struct ath11k_dp *dp = &ab->dp;
2640	struct dp_rxdma_ring *rx_ring;
2641	int num_buffs_reaped[MAX_RADIOS] = {0};
2642	struct sk_buff_head msdu_list[MAX_RADIOS];
2643	struct ath11k_skb_rxcb *rxcb;
2644	int total_msdu_reaped = 0;
2645	struct hal_srng *srng;
2646	struct sk_buff *msdu;
2647	bool done = false;
2648	int buf_id, mac_id;
2649	struct ath11k *ar;
2650	struct hal_reo_dest_ring *desc;
2651	enum hal_reo_dest_ring_push_reason push_reason;
2652	u32 cookie;
2653	int i;
2654
2655	for (i = 0; i < MAX_RADIOS; i++)
2656		__skb_queue_head_init(&msdu_list[i]);
2657
2658	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2659
2660	spin_lock_bh(&srng->lock);
2661
2662try_again:
2663	ath11k_hal_srng_access_begin(ab, srng);
2664
2665	while (likely(desc =
2666	      (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2667									     srng))) {
2668		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2669				   desc->buf_addr_info.info1);
2670		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2671				   cookie);
2672		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2673
2674		if (unlikely(buf_id == 0))
2675			continue;
2676
2677		ar = ab->pdevs[mac_id].ar;
2678		rx_ring = &ar->dp.rx_refill_buf_ring;
2679		spin_lock_bh(&rx_ring->idr_lock);
2680		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2681		if (unlikely(!msdu)) {
2682			ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2683				    buf_id);
2684			spin_unlock_bh(&rx_ring->idr_lock);
2685			continue;
2686		}
2687
2688		idr_remove(&rx_ring->bufs_idr, buf_id);
2689		spin_unlock_bh(&rx_ring->idr_lock);
2690
2691		rxcb = ATH11K_SKB_RXCB(msdu);
2692		dma_unmap_single(ab->dev, rxcb->paddr,
2693				 msdu->len + skb_tailroom(msdu),
2694				 DMA_FROM_DEVICE);
2695
2696		num_buffs_reaped[mac_id]++;
2697
2698		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2699					desc->info0);
2700		if (unlikely(push_reason !=
2701			     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2702			dev_kfree_skb_any(msdu);
2703			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2704			continue;
2705		}
2706
2707		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2708					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2709		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2710					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2711		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2712					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2713		rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2714					  desc->rx_mpdu_info.meta_data);
2715		rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2716					 desc->rx_mpdu_info.info0);
2717		rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2718				      desc->info0);
2719
2720		rxcb->mac_id = mac_id;
2721		__skb_queue_tail(&msdu_list[mac_id], msdu);
2722
2723		if (rxcb->is_continuation) {
2724			done = false;
2725		} else {
2726			total_msdu_reaped++;
2727			done = true;
2728		}
2729
2730		if (total_msdu_reaped >= budget)
2731			break;
2732	}
2733
2734	/* Hw might have updated the head pointer after we cached it.
2735	 * In this case, even though there are entries in the ring we'll
2736	 * get rx_desc NULL. Give the read another try with updated cached
2737	 * head pointer so that we can reap complete MPDU in the current
2738	 * rx processing.
2739	 */
2740	if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2741		ath11k_hal_srng_access_end(ab, srng);
2742		goto try_again;
2743	}
2744
2745	ath11k_hal_srng_access_end(ab, srng);
2746
2747	spin_unlock_bh(&srng->lock);
2748
2749	if (unlikely(!total_msdu_reaped))
2750		goto exit;
2751
2752	for (i = 0; i < ab->num_radios; i++) {
2753		if (!num_buffs_reaped[i])
2754			continue;
2755
2756		ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2757
2758		ar = ab->pdevs[i].ar;
2759		rx_ring = &ar->dp.rx_refill_buf_ring;
2760
2761		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2762					   ab->hw_params.hal_params->rx_buf_rbm);
2763	}
2764exit:
2765	return total_msdu_reaped;
2766}
2767
2768static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2769					   struct hal_rx_mon_ppdu_info *ppdu_info)
2770{
2771	struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2772	u32 num_msdu;
2773	int i;
2774
2775	if (!rx_stats)
2776		return;
2777
2778	arsta->rssi_comb = ppdu_info->rssi_comb;
2779	ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
2780
2781	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2782		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2783
2784	rx_stats->num_msdu += num_msdu;
2785	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2786				    ppdu_info->tcp_ack_msdu_count;
2787	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2788	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2789
2790	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2791	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2792		ppdu_info->nss = 1;
2793		ppdu_info->mcs = HAL_RX_MAX_MCS;
2794		ppdu_info->tid = IEEE80211_NUM_TIDS;
2795	}
2796
2797	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2798		rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2799
2800	if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2801		rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2802
2803	if (ppdu_info->gi < HAL_RX_GI_MAX)
2804		rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2805
2806	if (ppdu_info->bw < HAL_RX_BW_MAX)
2807		rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2808
2809	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2810		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2811
2812	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2813		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2814
2815	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2816		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2817
2818	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2819		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2820
2821	if (ppdu_info->is_stbc)
2822		rx_stats->stbc_count += num_msdu;
2823
2824	if (ppdu_info->beamformed)
2825		rx_stats->beamformed_count += num_msdu;
2826
2827	if (ppdu_info->num_mpdu_fcs_ok > 1)
2828		rx_stats->ampdu_msdu_count += num_msdu;
2829	else
2830		rx_stats->non_ampdu_msdu_count += num_msdu;
2831
2832	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2833	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2834	rx_stats->dcm_count += ppdu_info->dcm;
2835	rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2836
2837	arsta->rssi_comb = ppdu_info->rssi_comb;
2838
2839	BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2840			     ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2841
2842	for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2843		arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2844
2845	rx_stats->rx_duration += ppdu_info->rx_duration;
2846	arsta->rx_duration = rx_stats->rx_duration;
2847}
2848
2849static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2850							 struct dp_rxdma_ring *rx_ring,
2851							 int *buf_id)
2852{
2853	struct sk_buff *skb;
2854	dma_addr_t paddr;
2855
2856	skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2857			    DP_RX_BUFFER_ALIGN_SIZE);
2858
2859	if (!skb)
2860		goto fail_alloc_skb;
2861
2862	if (!IS_ALIGNED((unsigned long)skb->data,
2863			DP_RX_BUFFER_ALIGN_SIZE)) {
2864		skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2865			 skb->data);
2866	}
2867
2868	paddr = dma_map_single(ab->dev, skb->data,
2869			       skb->len + skb_tailroom(skb),
2870			       DMA_FROM_DEVICE);
2871	if (unlikely(dma_mapping_error(ab->dev, paddr)))
2872		goto fail_free_skb;
2873
2874	spin_lock_bh(&rx_ring->idr_lock);
2875	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2876			    rx_ring->bufs_max, GFP_ATOMIC);
2877	spin_unlock_bh(&rx_ring->idr_lock);
2878	if (*buf_id < 0)
2879		goto fail_dma_unmap;
2880
2881	ATH11K_SKB_RXCB(skb)->paddr = paddr;
2882	return skb;
2883
2884fail_dma_unmap:
2885	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2886			 DMA_FROM_DEVICE);
2887fail_free_skb:
2888	dev_kfree_skb_any(skb);
2889fail_alloc_skb:
2890	return NULL;
2891}
2892
2893int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2894					   struct dp_rxdma_ring *rx_ring,
2895					   int req_entries,
2896					   enum hal_rx_buf_return_buf_manager mgr)
2897{
2898	struct hal_srng *srng;
2899	u32 *desc;
2900	struct sk_buff *skb;
2901	int num_free;
2902	int num_remain;
2903	int buf_id;
2904	u32 cookie;
2905	dma_addr_t paddr;
2906
2907	req_entries = min(req_entries, rx_ring->bufs_max);
2908
2909	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2910
2911	spin_lock_bh(&srng->lock);
2912
2913	ath11k_hal_srng_access_begin(ab, srng);
2914
2915	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2916
2917	req_entries = min(num_free, req_entries);
2918	num_remain = req_entries;
2919
2920	while (num_remain > 0) {
2921		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2922							&buf_id);
2923		if (!skb)
2924			break;
2925		paddr = ATH11K_SKB_RXCB(skb)->paddr;
2926
2927		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2928		if (!desc)
2929			goto fail_desc_get;
2930
2931		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2932			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2933
2934		num_remain--;
2935
2936		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2937	}
2938
2939	ath11k_hal_srng_access_end(ab, srng);
2940
2941	spin_unlock_bh(&srng->lock);
2942
2943	return req_entries - num_remain;
2944
2945fail_desc_get:
2946	spin_lock_bh(&rx_ring->idr_lock);
2947	idr_remove(&rx_ring->bufs_idr, buf_id);
2948	spin_unlock_bh(&rx_ring->idr_lock);
2949	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2950			 DMA_FROM_DEVICE);
2951	dev_kfree_skb_any(skb);
2952	ath11k_hal_srng_access_end(ab, srng);
2953	spin_unlock_bh(&srng->lock);
2954
2955	return req_entries - num_remain;
2956}
2957
2958#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2959
2960static void
2961ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
2962					 struct hal_tlv_hdr *tlv)
2963{
2964	struct hal_rx_ppdu_start *ppdu_start;
2965	u16 ppdu_id_diff, ppdu_id, tlv_len;
2966	u8 *ptr;
2967
2968	/* PPDU id is part of second tlv, move ptr to second tlv */
2969	tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
2970	ptr = (u8 *)tlv;
2971	ptr += sizeof(*tlv) + tlv_len;
2972	tlv = (struct hal_tlv_hdr *)ptr;
2973
2974	if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
2975		return;
2976
2977	ptr += sizeof(*tlv);
2978	ppdu_start = (struct hal_rx_ppdu_start *)ptr;
2979	ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
2980			    __le32_to_cpu(ppdu_start->info0));
2981
2982	if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
2983		pmon->buf_state = DP_MON_STATUS_LEAD;
2984		ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
2985		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2986			pmon->buf_state = DP_MON_STATUS_LAG;
2987	} else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
2988		pmon->buf_state = DP_MON_STATUS_LAG;
2989		ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
2990		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2991			pmon->buf_state = DP_MON_STATUS_LEAD;
2992	}
2993}
2994
2995static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
2996					     int *budget, struct sk_buff_head *skb_list)
2997{
2998	struct ath11k *ar;
2999	const struct ath11k_hw_hal_params *hal_params;
3000	struct ath11k_pdev_dp *dp;
3001	struct dp_rxdma_ring *rx_ring;
3002	struct ath11k_mon_data *pmon;
3003	struct hal_srng *srng;
3004	void *rx_mon_status_desc;
3005	struct sk_buff *skb;
3006	struct ath11k_skb_rxcb *rxcb;
3007	struct hal_tlv_hdr *tlv;
3008	u32 cookie;
3009	int buf_id, srng_id;
3010	dma_addr_t paddr;
3011	u8 rbm;
3012	int num_buffs_reaped = 0;
3013
3014	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3015	dp = &ar->dp;
3016	pmon = &dp->mon_data;
3017	srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3018	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3019
3020	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3021
3022	spin_lock_bh(&srng->lock);
3023
3024	ath11k_hal_srng_access_begin(ab, srng);
3025	while (*budget) {
3026		*budget -= 1;
3027		rx_mon_status_desc =
3028			ath11k_hal_srng_src_peek(ab, srng);
3029		if (!rx_mon_status_desc) {
3030			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3031			break;
3032		}
3033
3034		ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3035						&cookie, &rbm);
3036		if (paddr) {
3037			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3038
3039			spin_lock_bh(&rx_ring->idr_lock);
3040			skb = idr_find(&rx_ring->bufs_idr, buf_id);
3041			spin_unlock_bh(&rx_ring->idr_lock);
3042
3043			if (!skb) {
3044				ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3045					    buf_id);
3046				pmon->buf_state = DP_MON_STATUS_REPLINISH;
3047				goto move_next;
3048			}
3049
3050			rxcb = ATH11K_SKB_RXCB(skb);
3051
3052			dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3053						skb->len + skb_tailroom(skb),
3054						DMA_FROM_DEVICE);
3055
3056			tlv = (struct hal_tlv_hdr *)skb->data;
3057			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3058					HAL_RX_STATUS_BUFFER_DONE) {
3059				ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
3060					    FIELD_GET(HAL_TLV_HDR_TAG,
3061						      tlv->tl), buf_id);
3062				/* If done status is missing, hold onto status
3063				 * ring until status is done for this status
3064				 * ring buffer.
3065				 * Keep HP in mon_status_ring unchanged,
3066				 * and break from here.
3067				 * Check status for same buffer for next time
3068				 */
3069				pmon->buf_state = DP_MON_STATUS_NO_DMA;
3070				break;
3071			}
3072
3073			spin_lock_bh(&rx_ring->idr_lock);
3074			idr_remove(&rx_ring->bufs_idr, buf_id);
3075			spin_unlock_bh(&rx_ring->idr_lock);
3076			if (ab->hw_params.full_monitor_mode) {
3077				ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3078				if (paddr == pmon->mon_status_paddr)
3079					pmon->buf_state = DP_MON_STATUS_MATCH;
3080			}
3081
3082			dma_unmap_single(ab->dev, rxcb->paddr,
3083					 skb->len + skb_tailroom(skb),
3084					 DMA_FROM_DEVICE);
3085
3086			__skb_queue_tail(skb_list, skb);
3087		} else {
3088			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3089		}
3090move_next:
3091		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3092							&buf_id);
3093
3094		if (!skb) {
3095			hal_params = ab->hw_params.hal_params;
3096			ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3097							hal_params->rx_buf_rbm);
3098			num_buffs_reaped++;
3099			break;
3100		}
3101		rxcb = ATH11K_SKB_RXCB(skb);
3102
3103		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3104			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3105
3106		ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3107						cookie,
3108						ab->hw_params.hal_params->rx_buf_rbm);
3109		ath11k_hal_srng_src_get_next_entry(ab, srng);
3110		num_buffs_reaped++;
3111	}
3112	ath11k_hal_srng_access_end(ab, srng);
3113	spin_unlock_bh(&srng->lock);
3114
3115	return num_buffs_reaped;
3116}
3117
3118static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3119{
3120	struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3121
3122	spin_lock_bh(&rx_tid->ab->base_lock);
3123	if (rx_tid->last_frag_no &&
3124	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3125		spin_unlock_bh(&rx_tid->ab->base_lock);
3126		return;
3127	}
3128	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3129	spin_unlock_bh(&rx_tid->ab->base_lock);
3130}
3131
3132int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3133{
3134	struct ath11k_base *ab = ar->ab;
3135	struct crypto_shash *tfm;
3136	struct ath11k_peer *peer;
3137	struct dp_rx_tid *rx_tid;
3138	int i;
3139
3140	tfm = crypto_alloc_shash("michael_mic", 0, 0);
3141	if (IS_ERR(tfm)) {
3142		ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
3143			    PTR_ERR(tfm));
3144		return PTR_ERR(tfm);
3145	}
3146
3147	spin_lock_bh(&ab->base_lock);
3148
3149	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3150	if (!peer) {
3151		ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3152		spin_unlock_bh(&ab->base_lock);
3153		crypto_free_shash(tfm);
3154		return -ENOENT;
3155	}
3156
3157	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3158		rx_tid = &peer->rx_tid[i];
3159		rx_tid->ab = ab;
3160		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3161		skb_queue_head_init(&rx_tid->rx_frags);
3162	}
3163
3164	peer->tfm_mmic = tfm;
3165	peer->dp_setup_done = true;
3166	spin_unlock_bh(&ab->base_lock);
3167
3168	return 0;
3169}
3170
3171static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3172				      struct ieee80211_hdr *hdr, u8 *data,
3173				      size_t data_len, u8 *mic)
3174{
3175	SHASH_DESC_ON_STACK(desc, tfm);
3176	u8 mic_hdr[16] = {0};
3177	u8 tid = 0;
3178	int ret;
3179
3180	if (!tfm)
3181		return -EINVAL;
3182
3183	desc->tfm = tfm;
3184
3185	ret = crypto_shash_setkey(tfm, key, 8);
3186	if (ret)
3187		goto out;
3188
3189	ret = crypto_shash_init(desc);
3190	if (ret)
3191		goto out;
3192
3193	/* TKIP MIC header */
3194	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3195	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3196	if (ieee80211_is_data_qos(hdr->frame_control))
3197		tid = ieee80211_get_tid(hdr);
3198	mic_hdr[12] = tid;
3199
3200	ret = crypto_shash_update(desc, mic_hdr, 16);
3201	if (ret)
3202		goto out;
3203	ret = crypto_shash_update(desc, data, data_len);
3204	if (ret)
3205		goto out;
3206	ret = crypto_shash_final(desc, mic);
3207out:
3208	shash_desc_zero(desc);
3209	return ret;
3210}
3211
3212static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3213					  struct sk_buff *msdu)
3214{
3215	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3216	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3217	struct ieee80211_key_conf *key_conf;
3218	struct ieee80211_hdr *hdr;
3219	u8 mic[IEEE80211_CCMP_MIC_LEN];
3220	int head_len, tail_len, ret;
3221	size_t data_len;
3222	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3223	u8 *key, *data;
3224	u8 key_idx;
3225
3226	if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3227	    HAL_ENCRYPT_TYPE_TKIP_MIC)
3228		return 0;
3229
3230	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3231	hdr_len = ieee80211_hdrlen(hdr->frame_control);
3232	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3233	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3234
3235	if (!is_multicast_ether_addr(hdr->addr1))
3236		key_idx = peer->ucast_keyidx;
3237	else
3238		key_idx = peer->mcast_keyidx;
3239
3240	key_conf = peer->keys[key_idx];
3241
3242	data = msdu->data + head_len;
3243	data_len = msdu->len - head_len - tail_len;
3244	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3245
3246	ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3247	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3248		goto mic_fail;
3249
3250	return 0;
3251
3252mic_fail:
3253	(ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3254	(ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3255
3256	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3257		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3258	skb_pull(msdu, hal_rx_desc_sz);
3259
3260	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3261	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3262			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3263	ieee80211_rx(ar->hw, msdu);
3264	return -EINVAL;
3265}
3266
3267static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3268					enum hal_encrypt_type enctype, u32 flags)
3269{
3270	struct ieee80211_hdr *hdr;
3271	size_t hdr_len;
3272	size_t crypto_len;
3273	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3274
3275	if (!flags)
3276		return;
3277
3278	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3279
3280	if (flags & RX_FLAG_MIC_STRIPPED)
3281		skb_trim(msdu, msdu->len -
3282			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3283
3284	if (flags & RX_FLAG_ICV_STRIPPED)
3285		skb_trim(msdu, msdu->len -
3286			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3287
3288	if (flags & RX_FLAG_IV_STRIPPED) {
3289		hdr_len = ieee80211_hdrlen(hdr->frame_control);
3290		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3291
3292		memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3293			(void *)msdu->data + hal_rx_desc_sz, hdr_len);
3294		skb_pull(msdu, crypto_len);
3295	}
3296}
3297
3298static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3299				 struct ath11k_peer *peer,
3300				 struct dp_rx_tid *rx_tid,
3301				 struct sk_buff **defrag_skb)
3302{
3303	struct hal_rx_desc *rx_desc;
3304	struct sk_buff *skb, *first_frag, *last_frag;
3305	struct ieee80211_hdr *hdr;
3306	struct rx_attention *rx_attention;
3307	enum hal_encrypt_type enctype;
3308	bool is_decrypted = false;
3309	int msdu_len = 0;
3310	int extra_space;
3311	u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3312
3313	first_frag = skb_peek(&rx_tid->rx_frags);
3314	last_frag = skb_peek_tail(&rx_tid->rx_frags);
3315
3316	skb_queue_walk(&rx_tid->rx_frags, skb) {
3317		flags = 0;
3318		rx_desc = (struct hal_rx_desc *)skb->data;
3319		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3320
3321		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3322		if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3323			rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3324			is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3325		}
3326
3327		if (is_decrypted) {
3328			if (skb != first_frag)
3329				flags |=  RX_FLAG_IV_STRIPPED;
3330			if (skb != last_frag)
3331				flags |= RX_FLAG_ICV_STRIPPED |
3332					 RX_FLAG_MIC_STRIPPED;
3333		}
3334
3335		/* RX fragments are always raw packets */
3336		if (skb != last_frag)
3337			skb_trim(skb, skb->len - FCS_LEN);
3338		ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3339
3340		if (skb != first_frag)
3341			skb_pull(skb, hal_rx_desc_sz +
3342				      ieee80211_hdrlen(hdr->frame_control));
3343		msdu_len += skb->len;
3344	}
3345
3346	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3347	if (extra_space > 0 &&
3348	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3349		return -ENOMEM;
3350
3351	__skb_unlink(first_frag, &rx_tid->rx_frags);
3352	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3353		skb_put_data(first_frag, skb->data, skb->len);
3354		dev_kfree_skb_any(skb);
3355	}
3356
3357	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3358	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3359	ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3360
3361	if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3362		first_frag = NULL;
3363
3364	*defrag_skb = first_frag;
3365	return 0;
3366}
3367
3368static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3369					      struct sk_buff *defrag_skb)
3370{
3371	struct ath11k_base *ab = ar->ab;
3372	struct ath11k_pdev_dp *dp = &ar->dp;
3373	struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3374	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3375	struct hal_reo_entrance_ring *reo_ent_ring;
3376	struct hal_reo_dest_ring *reo_dest_ring;
3377	struct dp_link_desc_bank *link_desc_banks;
3378	struct hal_rx_msdu_link *msdu_link;
3379	struct hal_rx_msdu_details *msdu0;
3380	struct hal_srng *srng;
3381	dma_addr_t paddr;
3382	u32 desc_bank, msdu_info, mpdu_info;
3383	u32 dst_idx, cookie, hal_rx_desc_sz;
3384	int ret, buf_id;
3385
3386	hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3387	link_desc_banks = ab->dp.link_desc_banks;
3388	reo_dest_ring = rx_tid->dst_ring_desc;
3389
3390	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3391	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3392			(paddr - link_desc_banks[desc_bank].paddr));
3393	msdu0 = &msdu_link->msdu_link[0];
3394	dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3395	memset(msdu0, 0, sizeof(*msdu0));
3396
3397	msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3398		    FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3399		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3400		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3401			       defrag_skb->len - hal_rx_desc_sz) |
3402		    FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3403		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3404		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3405	msdu0->rx_msdu_info.info0 = msdu_info;
3406
3407	/* change msdu len in hal rx desc */
3408	ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3409
3410	paddr = dma_map_single(ab->dev, defrag_skb->data,
3411			       defrag_skb->len + skb_tailroom(defrag_skb),
3412			       DMA_TO_DEVICE);
3413	if (dma_mapping_error(ab->dev, paddr))
3414		return -ENOMEM;
3415
3416	spin_lock_bh(&rx_refill_ring->idr_lock);
3417	buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3418			   rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3419	spin_unlock_bh(&rx_refill_ring->idr_lock);
3420	if (buf_id < 0) {
3421		ret = -ENOMEM;
3422		goto err_unmap_dma;
3423	}
3424
3425	ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3426	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3427		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3428
3429	ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3430					ab->hw_params.hal_params->rx_buf_rbm);
3431
3432	/* Fill mpdu details into reo entrance ring */
3433	srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3434
3435	spin_lock_bh(&srng->lock);
3436	ath11k_hal_srng_access_begin(ab, srng);
3437
3438	reo_ent_ring = (struct hal_reo_entrance_ring *)
3439			ath11k_hal_srng_src_get_next_entry(ab, srng);
3440	if (!reo_ent_ring) {
3441		ath11k_hal_srng_access_end(ab, srng);
3442		spin_unlock_bh(&srng->lock);
3443		ret = -ENOSPC;
3444		goto err_free_idr;
3445	}
3446	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3447
3448	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3449	ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3450					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3451
3452	mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3453		    FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3454		    FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3455		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3456		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3457		    FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3458		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3459
3460	reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3461	reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3462	reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3463	reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3464					 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3465						   reo_dest_ring->info0)) |
3466			      FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3467	ath11k_hal_srng_access_end(ab, srng);
3468	spin_unlock_bh(&srng->lock);
3469
3470	return 0;
3471
3472err_free_idr:
3473	spin_lock_bh(&rx_refill_ring->idr_lock);
3474	idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3475	spin_unlock_bh(&rx_refill_ring->idr_lock);
3476err_unmap_dma:
3477	dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3478			 DMA_TO_DEVICE);
3479	return ret;
3480}
3481
3482static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3483				    struct sk_buff *a, struct sk_buff *b)
3484{
3485	int frag1, frag2;
3486
3487	frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3488	frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3489
3490	return frag1 - frag2;
3491}
3492
3493static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3494				      struct sk_buff_head *frag_list,
3495				      struct sk_buff *cur_frag)
3496{
3497	struct sk_buff *skb;
3498	int cmp;
3499
3500	skb_queue_walk(frag_list, skb) {
3501		cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3502		if (cmp < 0)
3503			continue;
3504		__skb_queue_before(frag_list, skb, cur_frag);
3505		return;
3506	}
3507	__skb_queue_tail(frag_list, cur_frag);
3508}
3509
3510static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3511{
3512	struct ieee80211_hdr *hdr;
3513	u64 pn = 0;
3514	u8 *ehdr;
3515	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3516
3517	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3518	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3519
3520	pn = ehdr[0];
3521	pn |= (u64)ehdr[1] << 8;
3522	pn |= (u64)ehdr[4] << 16;
3523	pn |= (u64)ehdr[5] << 24;
3524	pn |= (u64)ehdr[6] << 32;
3525	pn |= (u64)ehdr[7] << 40;
3526
3527	return pn;
3528}
3529
3530static bool
3531ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3532{
3533	enum hal_encrypt_type encrypt_type;
3534	struct sk_buff *first_frag, *skb;
3535	struct hal_rx_desc *desc;
3536	u64 last_pn;
3537	u64 cur_pn;
3538
3539	first_frag = skb_peek(&rx_tid->rx_frags);
3540	desc = (struct hal_rx_desc *)first_frag->data;
3541
3542	encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3543	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3544	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3545	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3546	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3547		return true;
3548
3549	last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3550	skb_queue_walk(&rx_tid->rx_frags, skb) {
3551		if (skb == first_frag)
3552			continue;
3553
3554		cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3555		if (cur_pn != last_pn + 1)
3556			return false;
3557		last_pn = cur_pn;
3558	}
3559	return true;
3560}
3561
3562static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3563				    struct sk_buff *msdu,
3564				    u32 *ring_desc)
3565{
3566	struct ath11k_base *ab = ar->ab;
3567	struct hal_rx_desc *rx_desc;
3568	struct ath11k_peer *peer;
3569	struct dp_rx_tid *rx_tid;
3570	struct sk_buff *defrag_skb = NULL;
3571	u32 peer_id;
3572	u16 seqno, frag_no;
3573	u8 tid;
3574	int ret = 0;
3575	bool more_frags;
3576	bool is_mcbc;
3577
3578	rx_desc = (struct hal_rx_desc *)msdu->data;
3579	peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3580	tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3581	seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3582	frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3583	more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3584	is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3585
3586	/* Multicast/Broadcast fragments are not expected */
3587	if (is_mcbc)
3588		return -EINVAL;
3589
3590	if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3591	    !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3592	    tid > IEEE80211_NUM_TIDS)
3593		return -EINVAL;
3594
3595	/* received unfragmented packet in reo
3596	 * exception ring, this shouldn't happen
3597	 * as these packets typically come from
3598	 * reo2sw srngs.
3599	 */
3600	if (WARN_ON_ONCE(!frag_no && !more_frags))
3601		return -EINVAL;
3602
3603	spin_lock_bh(&ab->base_lock);
3604	peer = ath11k_peer_find_by_id(ab, peer_id);
3605	if (!peer) {
3606		ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3607			    peer_id);
3608		ret = -ENOENT;
3609		goto out_unlock;
3610	}
3611	if (!peer->dp_setup_done) {
3612		ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3613			    peer->addr, peer_id);
3614		ret = -ENOENT;
3615		goto out_unlock;
3616	}
3617
3618	rx_tid = &peer->rx_tid[tid];
3619
3620	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3621	    skb_queue_empty(&rx_tid->rx_frags)) {
3622		/* Flush stored fragments and start a new sequence */
3623		ath11k_dp_rx_frags_cleanup(rx_tid, true);
3624		rx_tid->cur_sn = seqno;
3625	}
3626
3627	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3628		/* Fragment already present */
3629		ret = -EINVAL;
3630		goto out_unlock;
3631	}
3632
3633	if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
3634		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3635	else
3636		ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3637
3638	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3639	if (!more_frags)
3640		rx_tid->last_frag_no = frag_no;
3641
3642	if (frag_no == 0) {
3643		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3644						sizeof(*rx_tid->dst_ring_desc),
3645						GFP_ATOMIC);
3646		if (!rx_tid->dst_ring_desc) {
3647			ret = -ENOMEM;
3648			goto out_unlock;
3649		}
3650	} else {
3651		ath11k_dp_rx_link_desc_return(ab, ring_desc,
3652					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3653	}
3654
3655	if (!rx_tid->last_frag_no ||
3656	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3657		mod_timer(&rx_tid->frag_timer, jiffies +
3658					       ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3659		goto out_unlock;
3660	}
3661
3662	spin_unlock_bh(&ab->base_lock);
3663	del_timer_sync(&rx_tid->frag_timer);
3664	spin_lock_bh(&ab->base_lock);
3665
3666	peer = ath11k_peer_find_by_id(ab, peer_id);
3667	if (!peer)
3668		goto err_frags_cleanup;
3669
3670	if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3671		goto err_frags_cleanup;
3672
3673	if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3674		goto err_frags_cleanup;
3675
3676	if (!defrag_skb)
3677		goto err_frags_cleanup;
3678
3679	if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3680		goto err_frags_cleanup;
3681
3682	ath11k_dp_rx_frags_cleanup(rx_tid, false);
3683	goto out_unlock;
3684
3685err_frags_cleanup:
3686	dev_kfree_skb_any(defrag_skb);
3687	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3688out_unlock:
3689	spin_unlock_bh(&ab->base_lock);
3690	return ret;
3691}
3692
3693static int
3694ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3695{
3696	struct ath11k_pdev_dp *dp = &ar->dp;
3697	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3698	struct sk_buff *msdu;
3699	struct ath11k_skb_rxcb *rxcb;
3700	struct hal_rx_desc *rx_desc;
3701	u8 *hdr_status;
3702	u16 msdu_len;
3703	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3704
3705	spin_lock_bh(&rx_ring->idr_lock);
3706	msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3707	if (!msdu) {
3708		ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3709			    buf_id);
3710		spin_unlock_bh(&rx_ring->idr_lock);
3711		return -EINVAL;
3712	}
3713
3714	idr_remove(&rx_ring->bufs_idr, buf_id);
3715	spin_unlock_bh(&rx_ring->idr_lock);
3716
3717	rxcb = ATH11K_SKB_RXCB(msdu);
3718	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3719			 msdu->len + skb_tailroom(msdu),
3720			 DMA_FROM_DEVICE);
3721
3722	if (drop) {
3723		dev_kfree_skb_any(msdu);
3724		return 0;
3725	}
3726
3727	rcu_read_lock();
3728	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3729		dev_kfree_skb_any(msdu);
3730		goto exit;
3731	}
3732
3733	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3734		dev_kfree_skb_any(msdu);
3735		goto exit;
3736	}
3737
3738	rx_desc = (struct hal_rx_desc *)msdu->data;
3739	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3740	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3741		hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3742		ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3743		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3744				sizeof(struct ieee80211_hdr));
3745		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3746				sizeof(struct hal_rx_desc));
3747		dev_kfree_skb_any(msdu);
3748		goto exit;
3749	}
3750
3751	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3752
3753	if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3754		dev_kfree_skb_any(msdu);
3755		ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3756					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3757	}
3758exit:
3759	rcu_read_unlock();
3760	return 0;
3761}
3762
3763int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3764			     int budget)
3765{
3766	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3767	struct dp_link_desc_bank *link_desc_banks;
3768	enum hal_rx_buf_return_buf_manager rbm;
3769	int tot_n_bufs_reaped, quota, ret, i;
3770	int n_bufs_reaped[MAX_RADIOS] = {0};
3771	struct dp_rxdma_ring *rx_ring;
3772	struct dp_srng *reo_except;
3773	u32 desc_bank, num_msdus;
3774	struct hal_srng *srng;
3775	struct ath11k_dp *dp;
3776	void *link_desc_va;
3777	int buf_id, mac_id;
3778	struct ath11k *ar;
3779	dma_addr_t paddr;
3780	u32 *desc;
3781	bool is_frag;
3782	u8 drop = 0;
3783
3784	tot_n_bufs_reaped = 0;
3785	quota = budget;
3786
3787	dp = &ab->dp;
3788	reo_except = &dp->reo_except_ring;
3789	link_desc_banks = dp->link_desc_banks;
3790
3791	srng = &ab->hal.srng_list[reo_except->ring_id];
3792
3793	spin_lock_bh(&srng->lock);
3794
3795	ath11k_hal_srng_access_begin(ab, srng);
3796
3797	while (budget &&
3798	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3799		struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3800
3801		ab->soc_stats.err_ring_pkts++;
3802		ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3803						    &desc_bank);
3804		if (ret) {
3805			ath11k_warn(ab, "failed to parse error reo desc %d\n",
3806				    ret);
3807			continue;
3808		}
3809		link_desc_va = link_desc_banks[desc_bank].vaddr +
3810			       (paddr - link_desc_banks[desc_bank].paddr);
3811		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3812						 &rbm);
3813		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3814		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
3815			ab->soc_stats.invalid_rbm++;
3816			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3817			ath11k_dp_rx_link_desc_return(ab, desc,
3818						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3819			continue;
3820		}
3821
3822		is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3823
3824		/* Process only rx fragments with one msdu per link desc below, and drop
3825		 * msdu's indicated due to error reasons.
3826		 */
3827		if (!is_frag || num_msdus > 1) {
3828			drop = 1;
3829			/* Return the link desc back to wbm idle list */
3830			ath11k_dp_rx_link_desc_return(ab, desc,
3831						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3832		}
3833
3834		for (i = 0; i < num_msdus; i++) {
3835			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3836					   msdu_cookies[i]);
3837
3838			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3839					   msdu_cookies[i]);
3840
3841			ar = ab->pdevs[mac_id].ar;
3842
3843			if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3844				n_bufs_reaped[mac_id]++;
3845				tot_n_bufs_reaped++;
3846			}
3847		}
3848
3849		if (tot_n_bufs_reaped >= quota) {
3850			tot_n_bufs_reaped = quota;
3851			goto exit;
3852		}
3853
3854		budget = quota - tot_n_bufs_reaped;
3855	}
3856
3857exit:
3858	ath11k_hal_srng_access_end(ab, srng);
3859
3860	spin_unlock_bh(&srng->lock);
3861
3862	for (i = 0; i <  ab->num_radios; i++) {
3863		if (!n_bufs_reaped[i])
3864			continue;
3865
3866		ar = ab->pdevs[i].ar;
3867		rx_ring = &ar->dp.rx_refill_buf_ring;
3868
3869		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3870					   ab->hw_params.hal_params->rx_buf_rbm);
3871	}
3872
3873	return tot_n_bufs_reaped;
3874}
3875
3876static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3877					     int msdu_len,
3878					     struct sk_buff_head *msdu_list)
3879{
3880	struct sk_buff *skb, *tmp;
3881	struct ath11k_skb_rxcb *rxcb;
3882	int n_buffs;
3883
3884	n_buffs = DIV_ROUND_UP(msdu_len,
3885			       (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3886
3887	skb_queue_walk_safe(msdu_list, skb, tmp) {
3888		rxcb = ATH11K_SKB_RXCB(skb);
3889		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3890		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3891			if (!n_buffs)
3892				break;
3893			__skb_unlink(skb, msdu_list);
3894			dev_kfree_skb_any(skb);
3895			n_buffs--;
3896		}
3897	}
3898}
3899
3900static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3901				      struct ieee80211_rx_status *status,
3902				      struct sk_buff_head *msdu_list)
3903{
3904	u16 msdu_len;
3905	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3906	struct rx_attention *rx_attention;
3907	u8 l3pad_bytes;
3908	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3909	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3910
3911	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3912
3913	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3914		/* First buffer will be freed by the caller, so deduct it's length */
3915		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3916		ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3917		return -EINVAL;
3918	}
3919
3920	rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3921	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3922		ath11k_warn(ar->ab,
3923			    "msdu_done bit not set in null_q_des processing\n");
3924		__skb_queue_purge(msdu_list);
3925		return -EIO;
3926	}
3927
3928	/* Handle NULL queue descriptor violations arising out a missing
3929	 * REO queue for a given peer or a given TID. This typically
3930	 * may happen if a packet is received on a QOS enabled TID before the
3931	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3932	 * it may also happen for MC/BC frames if they are not routed to the
3933	 * non-QOS TID queue, in the absence of any other default TID queue.
3934	 * This error can show up both in a REO destination or WBM release ring.
3935	 */
3936
3937	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3938	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3939
3940	if (rxcb->is_frag) {
3941		skb_pull(msdu, hal_rx_desc_sz);
3942	} else {
3943		l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3944
3945		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3946			return -EINVAL;
3947
3948		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3949		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3950	}
3951	ath11k_dp_rx_h_ppdu(ar, desc, status);
3952
3953	ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3954
3955	rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
3956
3957	/* Please note that caller will having the access to msdu and completing
3958	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3959	 */
3960
3961	return 0;
3962}
3963
3964static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3965				   struct ieee80211_rx_status *status,
3966				   struct sk_buff_head *msdu_list)
3967{
3968	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3969	bool drop = false;
3970
3971	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3972
3973	switch (rxcb->err_code) {
3974	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3975		if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3976			drop = true;
3977		break;
3978	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3979		/* TODO: Do not drop PN failed packets in the driver;
3980		 * instead, it is good to drop such packets in mac80211
3981		 * after incrementing the replay counters.
3982		 */
3983		fallthrough;
3984	default:
3985		/* TODO: Review other errors and process them to mac80211
3986		 * as appropriate.
3987		 */
3988		drop = true;
3989		break;
3990	}
3991
3992	return drop;
3993}
3994
3995static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
3996					struct ieee80211_rx_status *status)
3997{
3998	u16 msdu_len;
3999	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
4000	u8 l3pad_bytes;
4001	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4002	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4003
4004	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4005	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4006
4007	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4008	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4009	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4010	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4011
4012	ath11k_dp_rx_h_ppdu(ar, desc, status);
4013
4014	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4015			 RX_FLAG_DECRYPTED);
4016
4017	ath11k_dp_rx_h_undecap(ar, msdu, desc,
4018			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4019}
4020
4021static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
4022				     struct ieee80211_rx_status *status)
4023{
4024	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4025	bool drop = false;
4026
4027	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4028
4029	switch (rxcb->err_code) {
4030	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4031		ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4032		break;
4033	default:
4034		/* TODO: Review other rxdma error code to check if anything is
4035		 * worth reporting to mac80211
4036		 */
4037		drop = true;
4038		break;
4039	}
4040
4041	return drop;
4042}
4043
4044static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4045				 struct napi_struct *napi,
4046				 struct sk_buff *msdu,
4047				 struct sk_buff_head *msdu_list)
4048{
4049	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4050	struct ieee80211_rx_status rxs = {0};
4051	bool drop = true;
4052
4053	switch (rxcb->err_rel_src) {
4054	case HAL_WBM_REL_SRC_MODULE_REO:
4055		drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4056		break;
4057	case HAL_WBM_REL_SRC_MODULE_RXDMA:
4058		drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4059		break;
4060	default:
4061		/* msdu will get freed */
4062		break;
4063	}
4064
4065	if (drop) {
4066		dev_kfree_skb_any(msdu);
4067		return;
4068	}
4069
4070	ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4071}
4072
4073int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4074				 struct napi_struct *napi, int budget)
4075{
4076	struct ath11k *ar;
4077	struct ath11k_dp *dp = &ab->dp;
4078	struct dp_rxdma_ring *rx_ring;
4079	struct hal_rx_wbm_rel_info err_info;
4080	struct hal_srng *srng;
4081	struct sk_buff *msdu;
4082	struct sk_buff_head msdu_list[MAX_RADIOS];
4083	struct ath11k_skb_rxcb *rxcb;
4084	u32 *rx_desc;
4085	int buf_id, mac_id;
4086	int num_buffs_reaped[MAX_RADIOS] = {0};
4087	int total_num_buffs_reaped = 0;
4088	int ret, i;
4089
4090	for (i = 0; i < ab->num_radios; i++)
4091		__skb_queue_head_init(&msdu_list[i]);
4092
4093	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4094
4095	spin_lock_bh(&srng->lock);
4096
4097	ath11k_hal_srng_access_begin(ab, srng);
4098
4099	while (budget) {
4100		rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4101		if (!rx_desc)
4102			break;
4103
4104		ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4105		if (ret) {
4106			ath11k_warn(ab,
4107				    "failed to parse rx error in wbm_rel ring desc %d\n",
4108				    ret);
4109			continue;
4110		}
4111
4112		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4113		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4114
4115		ar = ab->pdevs[mac_id].ar;
4116		rx_ring = &ar->dp.rx_refill_buf_ring;
4117
4118		spin_lock_bh(&rx_ring->idr_lock);
4119		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4120		if (!msdu) {
4121			ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4122				    buf_id, mac_id);
4123			spin_unlock_bh(&rx_ring->idr_lock);
4124			continue;
4125		}
4126
4127		idr_remove(&rx_ring->bufs_idr, buf_id);
4128		spin_unlock_bh(&rx_ring->idr_lock);
4129
4130		rxcb = ATH11K_SKB_RXCB(msdu);
4131		dma_unmap_single(ab->dev, rxcb->paddr,
4132				 msdu->len + skb_tailroom(msdu),
4133				 DMA_FROM_DEVICE);
4134
4135		num_buffs_reaped[mac_id]++;
4136		total_num_buffs_reaped++;
4137		budget--;
4138
4139		if (err_info.push_reason !=
4140		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4141			dev_kfree_skb_any(msdu);
4142			continue;
4143		}
4144
4145		rxcb->err_rel_src = err_info.err_rel_src;
4146		rxcb->err_code = err_info.err_code;
4147		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4148		__skb_queue_tail(&msdu_list[mac_id], msdu);
4149	}
4150
4151	ath11k_hal_srng_access_end(ab, srng);
4152
4153	spin_unlock_bh(&srng->lock);
4154
4155	if (!total_num_buffs_reaped)
4156		goto done;
4157
4158	for (i = 0; i <  ab->num_radios; i++) {
4159		if (!num_buffs_reaped[i])
4160			continue;
4161
4162		ar = ab->pdevs[i].ar;
4163		rx_ring = &ar->dp.rx_refill_buf_ring;
4164
4165		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4166					   ab->hw_params.hal_params->rx_buf_rbm);
4167	}
4168
4169	rcu_read_lock();
4170	for (i = 0; i <  ab->num_radios; i++) {
4171		if (!rcu_dereference(ab->pdevs_active[i])) {
4172			__skb_queue_purge(&msdu_list[i]);
4173			continue;
4174		}
4175
4176		ar = ab->pdevs[i].ar;
4177
4178		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4179			__skb_queue_purge(&msdu_list[i]);
4180			continue;
4181		}
4182
4183		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4184			ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4185	}
4186	rcu_read_unlock();
4187done:
4188	return total_num_buffs_reaped;
4189}
4190
4191int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4192{
4193	struct ath11k *ar;
4194	struct dp_srng *err_ring;
4195	struct dp_rxdma_ring *rx_ring;
4196	struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4197	struct hal_srng *srng;
4198	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4199	enum hal_rx_buf_return_buf_manager rbm;
4200	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4201	struct ath11k_skb_rxcb *rxcb;
4202	struct sk_buff *skb;
4203	struct hal_reo_entrance_ring *entr_ring;
4204	void *desc;
4205	int num_buf_freed = 0;
4206	int quota = budget;
4207	dma_addr_t paddr;
4208	u32 desc_bank;
4209	void *link_desc_va;
4210	int num_msdus;
4211	int i;
4212	int buf_id;
4213
4214	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4215	err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4216									  mac_id)];
4217	rx_ring = &ar->dp.rx_refill_buf_ring;
4218
4219	srng = &ab->hal.srng_list[err_ring->ring_id];
4220
4221	spin_lock_bh(&srng->lock);
4222
4223	ath11k_hal_srng_access_begin(ab, srng);
4224
4225	while (quota-- &&
4226	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4227		ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4228
4229		entr_ring = (struct hal_reo_entrance_ring *)desc;
4230		rxdma_err_code =
4231			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4232				  entr_ring->info1);
4233		ab->soc_stats.rxdma_error[rxdma_err_code]++;
4234
4235		link_desc_va = link_desc_banks[desc_bank].vaddr +
4236			       (paddr - link_desc_banks[desc_bank].paddr);
4237		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4238						 msdu_cookies, &rbm);
4239
4240		for (i = 0; i < num_msdus; i++) {
4241			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4242					   msdu_cookies[i]);
4243
4244			spin_lock_bh(&rx_ring->idr_lock);
4245			skb = idr_find(&rx_ring->bufs_idr, buf_id);
4246			if (!skb) {
4247				ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4248					    buf_id);
4249				spin_unlock_bh(&rx_ring->idr_lock);
4250				continue;
4251			}
4252
4253			idr_remove(&rx_ring->bufs_idr, buf_id);
4254			spin_unlock_bh(&rx_ring->idr_lock);
4255
4256			rxcb = ATH11K_SKB_RXCB(skb);
4257			dma_unmap_single(ab->dev, rxcb->paddr,
4258					 skb->len + skb_tailroom(skb),
4259					 DMA_FROM_DEVICE);
4260			dev_kfree_skb_any(skb);
4261
4262			num_buf_freed++;
4263		}
4264
4265		ath11k_dp_rx_link_desc_return(ab, desc,
4266					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4267	}
4268
4269	ath11k_hal_srng_access_end(ab, srng);
4270
4271	spin_unlock_bh(&srng->lock);
4272
4273	if (num_buf_freed)
4274		ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4275					   ab->hw_params.hal_params->rx_buf_rbm);
4276
4277	return budget - quota;
4278}
4279
4280void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4281{
4282	struct ath11k_dp *dp = &ab->dp;
4283	struct hal_srng *srng;
4284	struct dp_reo_cmd *cmd, *tmp;
4285	bool found = false;
4286	u32 *reo_desc;
4287	u16 tag;
4288	struct hal_reo_status reo_status;
4289
4290	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4291
4292	memset(&reo_status, 0, sizeof(reo_status));
4293
4294	spin_lock_bh(&srng->lock);
4295
4296	ath11k_hal_srng_access_begin(ab, srng);
4297
4298	while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4299		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4300
4301		switch (tag) {
4302		case HAL_REO_GET_QUEUE_STATS_STATUS:
4303			ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4304							  &reo_status);
4305			break;
4306		case HAL_REO_FLUSH_QUEUE_STATUS:
4307			ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4308							  &reo_status);
4309			break;
4310		case HAL_REO_FLUSH_CACHE_STATUS:
4311			ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4312							  &reo_status);
4313			break;
4314		case HAL_REO_UNBLOCK_CACHE_STATUS:
4315			ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4316							  &reo_status);
4317			break;
4318		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4319			ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4320								 &reo_status);
4321			break;
4322		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4323			ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4324								  &reo_status);
4325			break;
4326		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4327			ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4328								  &reo_status);
4329			break;
4330		default:
4331			ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4332			continue;
4333		}
4334
4335		spin_lock_bh(&dp->reo_cmd_lock);
4336		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4337			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4338				found = true;
4339				list_del(&cmd->list);
4340				break;
4341			}
4342		}
4343		spin_unlock_bh(&dp->reo_cmd_lock);
4344
4345		if (found) {
4346			cmd->handler(dp, (void *)&cmd->data,
4347				     reo_status.uniform_hdr.cmd_status);
4348			kfree(cmd);
4349		}
4350
4351		found = false;
4352	}
4353
4354	ath11k_hal_srng_access_end(ab, srng);
4355
4356	spin_unlock_bh(&srng->lock);
4357}
4358
4359void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4360{
4361	struct ath11k *ar = ab->pdevs[mac_id].ar;
4362
4363	ath11k_dp_rx_pdev_srng_free(ar);
4364	ath11k_dp_rxdma_pdev_buf_free(ar);
4365}
4366
4367int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4368{
4369	struct ath11k *ar = ab->pdevs[mac_id].ar;
4370	struct ath11k_pdev_dp *dp = &ar->dp;
4371	u32 ring_id;
4372	int i;
4373	int ret;
4374
4375	ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4376	if (ret) {
4377		ath11k_warn(ab, "failed to setup rx srngs\n");
4378		return ret;
4379	}
4380
4381	ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4382	if (ret) {
4383		ath11k_warn(ab, "failed to setup rxdma ring\n");
4384		return ret;
4385	}
4386
4387	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4388	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4389	if (ret) {
4390		ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4391			    ret);
4392		return ret;
4393	}
4394
4395	if (ab->hw_params.rx_mac_buf_ring) {
4396		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4397			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4398			ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4399							  mac_id + i, HAL_RXDMA_BUF);
4400			if (ret) {
4401				ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4402					    i, ret);
4403				return ret;
4404			}
4405		}
4406	}
4407
4408	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4409		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4410		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4411						  mac_id + i, HAL_RXDMA_DST);
4412		if (ret) {
4413			ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4414				    i, ret);
4415			return ret;
4416		}
4417	}
4418
4419	if (!ab->hw_params.rxdma1_enable)
4420		goto config_refill_ring;
4421
4422	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4423	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4424					  mac_id, HAL_RXDMA_MONITOR_BUF);
4425	if (ret) {
4426		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4427			    ret);
4428		return ret;
4429	}
4430	ret = ath11k_dp_tx_htt_srng_setup(ab,
4431					  dp->rxdma_mon_dst_ring.ring_id,
4432					  mac_id, HAL_RXDMA_MONITOR_DST);
4433	if (ret) {
4434		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4435			    ret);
4436		return ret;
4437	}
4438	ret = ath11k_dp_tx_htt_srng_setup(ab,
4439					  dp->rxdma_mon_desc_ring.ring_id,
4440					  mac_id, HAL_RXDMA_MONITOR_DESC);
4441	if (ret) {
4442		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4443			    ret);
4444		return ret;
4445	}
4446
4447config_refill_ring:
4448	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4449		ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4450		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4451						  HAL_RXDMA_MONITOR_STATUS);
4452		if (ret) {
4453			ath11k_warn(ab,
4454				    "failed to configure mon_status_refill_ring%d %d\n",
4455				    i, ret);
4456			return ret;
4457		}
4458	}
4459
4460	return 0;
4461}
4462
4463static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4464{
4465	if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4466		*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4467		*total_len -= *frag_len;
4468	} else {
4469		*frag_len = *total_len;
4470		*total_len = 0;
4471	}
4472}
4473
4474static
4475int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4476					  void *p_last_buf_addr_info,
4477					  u8 mac_id)
4478{
4479	struct ath11k_pdev_dp *dp = &ar->dp;
4480	struct dp_srng *dp_srng;
4481	void *hal_srng;
4482	void *src_srng_desc;
4483	int ret = 0;
4484
4485	if (ar->ab->hw_params.rxdma1_enable) {
4486		dp_srng = &dp->rxdma_mon_desc_ring;
4487		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4488	} else {
4489		dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4490		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4491	}
4492
4493	ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4494
4495	src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4496
4497	if (src_srng_desc) {
4498		struct ath11k_buffer_addr *src_desc =
4499				(struct ath11k_buffer_addr *)src_srng_desc;
4500
4501		*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4502	} else {
4503		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4504			   "Monitor Link Desc Ring %d Full", mac_id);
4505		ret = -ENOMEM;
4506	}
4507
4508	ath11k_hal_srng_access_end(ar->ab, hal_srng);
4509	return ret;
4510}
4511
4512static
4513void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4514					 dma_addr_t *paddr, u32 *sw_cookie,
4515					 u8 *rbm,
4516					 void **pp_buf_addr_info)
4517{
4518	struct hal_rx_msdu_link *msdu_link =
4519			(struct hal_rx_msdu_link *)rx_msdu_link_desc;
4520	struct ath11k_buffer_addr *buf_addr_info;
4521
4522	buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4523
4524	ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4525
4526	*pp_buf_addr_info = (void *)buf_addr_info;
4527}
4528
4529static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4530{
4531	if (skb->len > len) {
4532		skb_trim(skb, len);
4533	} else {
4534		if (skb_tailroom(skb) < len - skb->len) {
4535			if ((pskb_expand_head(skb, 0,
4536					      len - skb->len - skb_tailroom(skb),
4537					      GFP_ATOMIC))) {
4538				dev_kfree_skb_any(skb);
4539				return -ENOMEM;
4540			}
4541		}
4542		skb_put(skb, (len - skb->len));
4543	}
4544	return 0;
4545}
4546
4547static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4548					void *msdu_link_desc,
4549					struct hal_rx_msdu_list *msdu_list,
4550					u16 *num_msdus)
4551{
4552	struct hal_rx_msdu_details *msdu_details = NULL;
4553	struct rx_msdu_desc *msdu_desc_info = NULL;
4554	struct hal_rx_msdu_link *msdu_link = NULL;
4555	int i;
4556	u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4557	u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4558	u8  tmp  = 0;
4559
4560	msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4561	msdu_details = &msdu_link->msdu_link[0];
4562
4563	for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4564		if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4565			      msdu_details[i].buf_addr_info.info0) == 0) {
4566			msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4567			msdu_desc_info->info0 |= last;
4568			;
4569			break;
4570		}
4571		msdu_desc_info = &msdu_details[i].rx_msdu_info;
4572
4573		if (!i)
4574			msdu_desc_info->info0 |= first;
4575		else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4576			msdu_desc_info->info0 |= last;
4577		msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4578		msdu_list->msdu_info[i].msdu_len =
4579			 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4580		msdu_list->sw_cookie[i] =
4581			FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4582				  msdu_details[i].buf_addr_info.info1);
4583		tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4584				msdu_details[i].buf_addr_info.info1);
4585		msdu_list->rbm[i] = tmp;
4586	}
4587	*num_msdus = i;
4588}
4589
4590static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4591					u32 *rx_bufs_used)
4592{
4593	u32 ret = 0;
4594
4595	if ((*ppdu_id < msdu_ppdu_id) &&
4596	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4597		*ppdu_id = msdu_ppdu_id;
4598		ret = msdu_ppdu_id;
4599	} else if ((*ppdu_id > msdu_ppdu_id) &&
4600		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4601		/* mon_dst is behind than mon_status
4602		 * skip dst_ring and free it
4603		 */
4604		*rx_bufs_used += 1;
4605		*ppdu_id = msdu_ppdu_id;
4606		ret = msdu_ppdu_id;
4607	}
4608	return ret;
4609}
4610
4611static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4612				      bool *is_frag, u32 *total_len,
4613				      u32 *frag_len, u32 *msdu_cnt)
4614{
4615	if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4616		if (!*is_frag) {
4617			*total_len = info->msdu_len;
4618			*is_frag = true;
4619		}
4620		ath11k_dp_mon_set_frag_len(total_len,
4621					   frag_len);
4622	} else {
4623		if (*is_frag) {
4624			ath11k_dp_mon_set_frag_len(total_len,
4625						   frag_len);
4626		} else {
4627			*frag_len = info->msdu_len;
4628		}
4629		*is_frag = false;
4630		*msdu_cnt -= 1;
4631	}
4632}
4633
4634static u32
4635ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4636			  void *ring_entry, struct sk_buff **head_msdu,
4637			  struct sk_buff **tail_msdu, u32 *npackets,
4638			  u32 *ppdu_id)
4639{
4640	struct ath11k_pdev_dp *dp = &ar->dp;
4641	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4642	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4643	struct sk_buff *msdu = NULL, *last = NULL;
4644	struct hal_rx_msdu_list msdu_list;
4645	void *p_buf_addr_info, *p_last_buf_addr_info;
4646	struct hal_rx_desc *rx_desc;
4647	void *rx_msdu_link_desc;
4648	dma_addr_t paddr;
4649	u16 num_msdus = 0;
4650	u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4651	u32 rx_bufs_used = 0, i = 0;
4652	u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4653	u32 total_len = 0, frag_len = 0;
4654	bool is_frag, is_first_msdu;
4655	bool drop_mpdu = false;
4656	struct ath11k_skb_rxcb *rxcb;
4657	struct hal_reo_entrance_ring *ent_desc =
4658			(struct hal_reo_entrance_ring *)ring_entry;
4659	int buf_id;
4660	u32 rx_link_buf_info[2];
4661	u8 rbm;
4662
4663	if (!ar->ab->hw_params.rxdma1_enable)
4664		rx_ring = &dp->rx_refill_buf_ring;
4665
4666	ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4667					    &sw_cookie,
4668					    &p_last_buf_addr_info, &rbm,
4669					    &msdu_cnt);
4670
4671	if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4672		      ent_desc->info1) ==
4673		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4674		u8 rxdma_err =
4675			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4676				  ent_desc->info1);
4677		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4678		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4679		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4680			drop_mpdu = true;
4681			pmon->rx_mon_stats.dest_mpdu_drop++;
4682		}
4683	}
4684
4685	is_frag = false;
4686	is_first_msdu = true;
4687
4688	do {
4689		if (pmon->mon_last_linkdesc_paddr == paddr) {
4690			pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4691			return rx_bufs_used;
4692		}
4693
4694		if (ar->ab->hw_params.rxdma1_enable)
4695			rx_msdu_link_desc =
4696				(void *)pmon->link_desc_banks[sw_cookie].vaddr +
4697				(paddr - pmon->link_desc_banks[sw_cookie].paddr);
4698		else
4699			rx_msdu_link_desc =
4700				(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4701				(paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4702
4703		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4704					    &num_msdus);
4705
4706		for (i = 0; i < num_msdus; i++) {
4707			u32 l2_hdr_offset;
4708
4709			if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4710				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4711					   "i %d last_cookie %d is same\n",
4712					   i, pmon->mon_last_buf_cookie);
4713				drop_mpdu = true;
4714				pmon->rx_mon_stats.dup_mon_buf_cnt++;
4715				continue;
4716			}
4717			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4718					   msdu_list.sw_cookie[i]);
4719
4720			spin_lock_bh(&rx_ring->idr_lock);
4721			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4722			spin_unlock_bh(&rx_ring->idr_lock);
4723			if (!msdu) {
4724				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4725					   "msdu_pop: invalid buf_id %d\n", buf_id);
4726				break;
4727			}
4728			rxcb = ATH11K_SKB_RXCB(msdu);
4729			if (!rxcb->unmapped) {
4730				dma_unmap_single(ar->ab->dev, rxcb->paddr,
4731						 msdu->len +
4732						 skb_tailroom(msdu),
4733						 DMA_FROM_DEVICE);
4734				rxcb->unmapped = 1;
4735			}
4736			if (drop_mpdu) {
4737				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4738					   "i %d drop msdu %p *ppdu_id %x\n",
4739					   i, msdu, *ppdu_id);
4740				dev_kfree_skb_any(msdu);
4741				msdu = NULL;
4742				goto next_msdu;
4743			}
4744
4745			rx_desc = (struct hal_rx_desc *)msdu->data;
4746
4747			rx_pkt_offset = sizeof(struct hal_rx_desc);
4748			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4749
4750			if (is_first_msdu) {
4751				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4752					drop_mpdu = true;
4753					dev_kfree_skb_any(msdu);
4754					msdu = NULL;
4755					pmon->mon_last_linkdesc_paddr = paddr;
4756					goto next_msdu;
4757				}
4758
4759				msdu_ppdu_id =
4760					ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4761
4762				if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4763								 ppdu_id,
4764								 &rx_bufs_used)) {
4765					if (rx_bufs_used) {
4766						drop_mpdu = true;
4767						dev_kfree_skb_any(msdu);
4768						msdu = NULL;
4769						goto next_msdu;
4770					}
4771					return rx_bufs_used;
4772				}
4773				pmon->mon_last_linkdesc_paddr = paddr;
4774				is_first_msdu = false;
4775			}
4776			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4777						  &is_frag, &total_len,
4778						  &frag_len, &msdu_cnt);
4779			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4780
4781			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4782
4783			if (!(*head_msdu))
4784				*head_msdu = msdu;
4785			else if (last)
4786				last->next = msdu;
4787
4788			last = msdu;
4789next_msdu:
4790			pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4791			rx_bufs_used++;
4792			spin_lock_bh(&rx_ring->idr_lock);
4793			idr_remove(&rx_ring->bufs_idr, buf_id);
4794			spin_unlock_bh(&rx_ring->idr_lock);
4795		}
4796
4797		ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4798
4799		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4800						    &sw_cookie, &rbm,
4801						    &p_buf_addr_info);
4802
4803		if (ar->ab->hw_params.rxdma1_enable) {
4804			if (ath11k_dp_rx_monitor_link_desc_return(ar,
4805								  p_last_buf_addr_info,
4806								  dp->mac_id))
4807				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4808					   "dp_rx_monitor_link_desc_return failed");
4809		} else {
4810			ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4811						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4812		}
4813
4814		p_last_buf_addr_info = p_buf_addr_info;
4815
4816	} while (paddr && msdu_cnt);
4817
4818	if (last)
4819		last->next = NULL;
4820
4821	*tail_msdu = msdu;
4822
4823	if (msdu_cnt == 0)
4824		*npackets = 1;
4825
4826	return rx_bufs_used;
4827}
4828
4829static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4830{
4831	u32 rx_pkt_offset, l2_hdr_offset;
4832
4833	rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4834	l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4835						      (struct hal_rx_desc *)msdu->data);
4836	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4837}
4838
4839static struct sk_buff *
4840ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4841			    u32 mac_id, struct sk_buff *head_msdu,
4842			    struct sk_buff *last_msdu,
4843			    struct ieee80211_rx_status *rxs, bool *fcs_err)
4844{
4845	struct ath11k_base *ab = ar->ab;
4846	struct sk_buff *msdu, *prev_buf;
4847	struct hal_rx_desc *rx_desc;
4848	char *hdr_desc;
4849	u8 *dest, decap_format;
4850	struct ieee80211_hdr_3addr *wh;
4851	struct rx_attention *rx_attention;
4852	u32 err_bitmap;
4853
4854	if (!head_msdu)
4855		goto err_merge_fail;
4856
4857	rx_desc = (struct hal_rx_desc *)head_msdu->data;
4858	rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4859	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4860
4861	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4862		*fcs_err = true;
4863
4864	if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4865		return NULL;
4866
4867	decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4868
4869	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4870
4871	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4872		ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4873
4874		prev_buf = head_msdu;
4875		msdu = head_msdu->next;
4876
4877		while (msdu) {
4878			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4879
4880			prev_buf = msdu;
4881			msdu = msdu->next;
4882		}
4883
4884		prev_buf->next = NULL;
4885
4886		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4887	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4888		u8 qos_pkt = 0;
4889
4890		rx_desc = (struct hal_rx_desc *)head_msdu->data;
4891		hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4892
4893		/* Base size */
4894		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4895
4896		if (ieee80211_is_data_qos(wh->frame_control))
4897			qos_pkt = 1;
4898
4899		msdu = head_msdu;
4900
4901		while (msdu) {
4902			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4903			if (qos_pkt) {
4904				dest = skb_push(msdu, sizeof(__le16));
4905				if (!dest)
4906					goto err_merge_fail;
4907				memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4908			}
4909			prev_buf = msdu;
4910			msdu = msdu->next;
4911		}
4912		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4913		if (!dest)
4914			goto err_merge_fail;
4915
4916		ath11k_dbg(ab, ATH11K_DBG_DATA,
4917			   "mpdu_buf %p mpdu_buf->len %u",
4918			   prev_buf, prev_buf->len);
4919	} else {
4920		ath11k_dbg(ab, ATH11K_DBG_DATA,
4921			   "decap format %d is not supported!\n",
4922			   decap_format);
4923		goto err_merge_fail;
4924	}
4925
4926	return head_msdu;
4927
4928err_merge_fail:
4929	return NULL;
4930}
4931
4932static void
4933ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4934				u8 *rtap_buf)
4935{
4936	u32 rtap_len = 0;
4937
4938	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4939	rtap_len += 2;
4940
4941	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4942	rtap_len += 2;
4943
4944	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4945	rtap_len += 2;
4946
4947	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4948	rtap_len += 2;
4949
4950	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4951	rtap_len += 2;
4952
4953	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
4954}
4955
4956static void
4957ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
4958				   u8 *rtap_buf)
4959{
4960	u32 rtap_len = 0;
4961
4962	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
4963	rtap_len += 2;
4964
4965	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
4966	rtap_len += 2;
4967
4968	rtap_buf[rtap_len] = rx_status->he_RU[0];
4969	rtap_len += 1;
4970
4971	rtap_buf[rtap_len] = rx_status->he_RU[1];
4972	rtap_len += 1;
4973
4974	rtap_buf[rtap_len] = rx_status->he_RU[2];
4975	rtap_len += 1;
4976
4977	rtap_buf[rtap_len] = rx_status->he_RU[3];
4978}
4979
4980static void ath11k_update_radiotap(struct ath11k *ar,
4981				   struct hal_rx_mon_ppdu_info *ppduinfo,
4982				   struct sk_buff *mon_skb,
4983				   struct ieee80211_rx_status *rxs)
4984{
4985	struct ieee80211_supported_band *sband;
4986	u8 *ptr = NULL;
4987
4988	rxs->flag |= RX_FLAG_MACTIME_START;
4989	rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
4990
4991	if (ppduinfo->nss)
4992		rxs->nss = ppduinfo->nss;
4993
4994	if (ppduinfo->he_mu_flags) {
4995		rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
4996		rxs->encoding = RX_ENC_HE;
4997		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
4998		ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
4999	} else if (ppduinfo->he_flags) {
5000		rxs->flag |= RX_FLAG_RADIOTAP_HE;
5001		rxs->encoding = RX_ENC_HE;
5002		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
5003		ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
5004		rxs->rate_idx = ppduinfo->rate;
5005	} else if (ppduinfo->vht_flags) {
5006		rxs->encoding = RX_ENC_VHT;
5007		rxs->rate_idx = ppduinfo->rate;
5008	} else if (ppduinfo->ht_flags) {
5009		rxs->encoding = RX_ENC_HT;
5010		rxs->rate_idx = ppduinfo->rate;
5011	} else {
5012		rxs->encoding = RX_ENC_LEGACY;
5013		sband = &ar->mac.sbands[rxs->band];
5014		rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
5015							  ppduinfo->cck_flag);
5016	}
5017
5018	rxs->mactime = ppduinfo->tsft;
5019}
5020
5021static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
5022				    struct sk_buff *head_msdu,
5023				    struct hal_rx_mon_ppdu_info *ppduinfo,
5024				    struct sk_buff *tail_msdu,
5025				    struct napi_struct *napi)
5026{
5027	struct ath11k_pdev_dp *dp = &ar->dp;
5028	struct sk_buff *mon_skb, *skb_next, *header;
5029	struct ieee80211_rx_status *rxs = &dp->rx_status;
5030	bool fcs_err = false;
5031
5032	mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
5033					      tail_msdu, rxs, &fcs_err);
5034
5035	if (!mon_skb)
5036		goto mon_deliver_fail;
5037
5038	header = mon_skb;
5039
5040	rxs->flag = 0;
5041
5042	if (fcs_err)
5043		rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5044
5045	do {
5046		skb_next = mon_skb->next;
5047		if (!skb_next)
5048			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5049		else
5050			rxs->flag |= RX_FLAG_AMSDU_MORE;
5051
5052		if (mon_skb == header) {
5053			header = NULL;
5054			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5055		} else {
5056			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5057		}
5058		rxs->flag |= RX_FLAG_ONLY_MONITOR;
5059		ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5060
5061		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5062		mon_skb = skb_next;
5063	} while (mon_skb);
5064	rxs->flag = 0;
5065
5066	return 0;
5067
5068mon_deliver_fail:
5069	mon_skb = head_msdu;
5070	while (mon_skb) {
5071		skb_next = mon_skb->next;
5072		dev_kfree_skb_any(mon_skb);
5073		mon_skb = skb_next;
5074	}
5075	return -EINVAL;
5076}
5077
5078/* The destination ring processing is stuck if the destination is not
5079 * moving while status ring moves 16 PPDU. The destination ring processing
5080 * skips this destination ring PPDU as a workaround.
5081 */
5082#define MON_DEST_RING_STUCK_MAX_CNT 16
5083
5084static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5085					  u32 quota, struct napi_struct *napi)
5086{
5087	struct ath11k_pdev_dp *dp = &ar->dp;
5088	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5089	const struct ath11k_hw_hal_params *hal_params;
5090	void *ring_entry;
5091	void *mon_dst_srng;
5092	u32 ppdu_id;
5093	u32 rx_bufs_used;
5094	u32 ring_id;
5095	struct ath11k_pdev_mon_stats *rx_mon_stats;
5096	u32	 npackets = 0;
5097	u32 mpdu_rx_bufs_used;
5098
5099	if (ar->ab->hw_params.rxdma1_enable)
5100		ring_id = dp->rxdma_mon_dst_ring.ring_id;
5101	else
5102		ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5103
5104	mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5105
5106	if (!mon_dst_srng) {
5107		ath11k_warn(ar->ab,
5108			    "HAL Monitor Destination Ring Init Failed -- %p",
5109			    mon_dst_srng);
5110		return;
5111	}
5112
5113	spin_lock_bh(&pmon->mon_lock);
5114
5115	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5116
5117	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5118	rx_bufs_used = 0;
5119	rx_mon_stats = &pmon->rx_mon_stats;
5120
5121	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5122		struct sk_buff *head_msdu, *tail_msdu;
5123
5124		head_msdu = NULL;
5125		tail_msdu = NULL;
5126
5127		mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5128							      &head_msdu,
5129							      &tail_msdu,
5130							      &npackets, &ppdu_id);
5131
5132		rx_bufs_used += mpdu_rx_bufs_used;
5133
5134		if (mpdu_rx_bufs_used) {
5135			dp->mon_dest_ring_stuck_cnt = 0;
5136		} else {
5137			dp->mon_dest_ring_stuck_cnt++;
5138			rx_mon_stats->dest_mon_not_reaped++;
5139		}
5140
5141		if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5142			rx_mon_stats->dest_mon_stuck++;
5143			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5144				   "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5145				   pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5146				   dp->mon_dest_ring_stuck_cnt,
5147				   rx_mon_stats->dest_mon_not_reaped,
5148				   rx_mon_stats->dest_mon_stuck);
5149			pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5150			continue;
5151		}
5152
5153		if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5154			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5155			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5156				   "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5157				   ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5158				   rx_mon_stats->dest_mon_not_reaped,
5159				   rx_mon_stats->dest_mon_stuck);
5160			break;
5161		}
5162		if (head_msdu && tail_msdu) {
5163			ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5164						 &pmon->mon_ppdu_info,
5165						 tail_msdu, napi);
5166			rx_mon_stats->dest_mpdu_done++;
5167		}
5168
5169		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5170								mon_dst_srng);
5171	}
5172	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5173
5174	spin_unlock_bh(&pmon->mon_lock);
5175
5176	if (rx_bufs_used) {
5177		rx_mon_stats->dest_ppdu_done++;
5178		hal_params = ar->ab->hw_params.hal_params;
5179
5180		if (ar->ab->hw_params.rxdma1_enable)
5181			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5182						   &dp->rxdma_mon_buf_ring,
5183						   rx_bufs_used,
5184						   hal_params->rx_buf_rbm);
5185		else
5186			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5187						   &dp->rx_refill_buf_ring,
5188						   rx_bufs_used,
5189						   hal_params->rx_buf_rbm);
5190	}
5191}
5192
5193int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5194				    struct napi_struct *napi, int budget)
5195{
5196	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5197	enum hal_rx_mon_status hal_status;
5198	struct sk_buff *skb;
5199	struct sk_buff_head skb_list;
5200	struct ath11k_peer *peer;
5201	struct ath11k_sta *arsta;
5202	int num_buffs_reaped = 0;
5203	u32 rx_buf_sz;
5204	u16 log_type;
5205	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5206	struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5207	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5208
5209	__skb_queue_head_init(&skb_list);
5210
5211	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5212							     &skb_list);
5213	if (!num_buffs_reaped)
5214		goto exit;
5215
5216	memset(ppdu_info, 0, sizeof(*ppdu_info));
5217	ppdu_info->peer_id = HAL_INVALID_PEERID;
5218
5219	while ((skb = __skb_dequeue(&skb_list))) {
5220		if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5221			log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5222			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5223		} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5224			log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5225			rx_buf_sz = DP_RX_BUFFER_SIZE;
5226		} else {
5227			log_type = ATH11K_PKTLOG_TYPE_INVALID;
5228			rx_buf_sz = 0;
5229		}
5230
5231		if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5232			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5233
5234		memset(ppdu_info, 0, sizeof(*ppdu_info));
5235		ppdu_info->peer_id = HAL_INVALID_PEERID;
5236		hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5237
5238		if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5239		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5240		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5241			rx_mon_stats->status_ppdu_done++;
5242			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5243			ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
5244			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5245		}
5246
5247		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5248		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5249			dev_kfree_skb_any(skb);
5250			continue;
5251		}
5252
5253		rcu_read_lock();
5254		spin_lock_bh(&ab->base_lock);
5255		peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5256
5257		if (!peer || !peer->sta) {
5258			ath11k_dbg(ab, ATH11K_DBG_DATA,
5259				   "failed to find the peer with peer_id %d\n",
5260				   ppdu_info->peer_id);
5261			goto next_skb;
5262		}
5263
5264		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
5265		ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5266
5267		if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5268			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5269
5270next_skb:
5271		spin_unlock_bh(&ab->base_lock);
5272		rcu_read_unlock();
5273
5274		dev_kfree_skb_any(skb);
5275		memset(ppdu_info, 0, sizeof(*ppdu_info));
5276		ppdu_info->peer_id = HAL_INVALID_PEERID;
5277	}
5278exit:
5279	return num_buffs_reaped;
5280}
5281
5282static u32
5283ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5284			       void *ring_entry, struct sk_buff **head_msdu,
5285			       struct sk_buff **tail_msdu,
5286			       struct hal_sw_mon_ring_entries *sw_mon_entries)
5287{
5288	struct ath11k_pdev_dp *dp = &ar->dp;
5289	struct ath11k_mon_data *pmon = &dp->mon_data;
5290	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5291	struct sk_buff *msdu = NULL, *last = NULL;
5292	struct hal_sw_monitor_ring *sw_desc = ring_entry;
5293	struct hal_rx_msdu_list msdu_list;
5294	struct hal_rx_desc *rx_desc;
5295	struct ath11k_skb_rxcb *rxcb;
5296	void *rx_msdu_link_desc;
5297	void *p_buf_addr_info, *p_last_buf_addr_info;
5298	int buf_id, i = 0;
5299	u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5300	u32 rx_bufs_used = 0, msdu_cnt = 0;
5301	u32 total_len = 0, frag_len = 0, sw_cookie;
5302	u16 num_msdus = 0;
5303	u8 rxdma_err, rbm;
5304	bool is_frag, is_first_msdu;
5305	bool drop_mpdu = false;
5306
5307	ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5308
5309	sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5310	sw_mon_entries->end_of_ppdu = false;
5311	sw_mon_entries->drop_ppdu = false;
5312	p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5313	msdu_cnt = sw_mon_entries->msdu_cnt;
5314
5315	sw_mon_entries->end_of_ppdu =
5316		FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5317	if (sw_mon_entries->end_of_ppdu)
5318		return rx_bufs_used;
5319
5320	if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5321		      sw_desc->info0) ==
5322		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5323		rxdma_err =
5324			FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5325				  sw_desc->info0);
5326		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5327		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5328		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5329			pmon->rx_mon_stats.dest_mpdu_drop++;
5330			drop_mpdu = true;
5331		}
5332	}
5333
5334	is_frag = false;
5335	is_first_msdu = true;
5336
5337	do {
5338		rx_msdu_link_desc =
5339			(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5340			(sw_mon_entries->mon_dst_paddr -
5341			 pmon->link_desc_banks[sw_cookie].paddr);
5342
5343		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5344					    &num_msdus);
5345
5346		for (i = 0; i < num_msdus; i++) {
5347			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5348					   msdu_list.sw_cookie[i]);
5349
5350			spin_lock_bh(&rx_ring->idr_lock);
5351			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5352			if (!msdu) {
5353				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5354					   "full mon msdu_pop: invalid buf_id %d\n",
5355					    buf_id);
5356				spin_unlock_bh(&rx_ring->idr_lock);
5357				break;
5358			}
5359			idr_remove(&rx_ring->bufs_idr, buf_id);
5360			spin_unlock_bh(&rx_ring->idr_lock);
5361
5362			rxcb = ATH11K_SKB_RXCB(msdu);
5363			if (!rxcb->unmapped) {
5364				dma_unmap_single(ar->ab->dev, rxcb->paddr,
5365						 msdu->len +
5366						 skb_tailroom(msdu),
5367						 DMA_FROM_DEVICE);
5368				rxcb->unmapped = 1;
5369			}
5370			if (drop_mpdu) {
5371				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5372					   "full mon: i %d drop msdu %p *ppdu_id %x\n",
5373					   i, msdu, sw_mon_entries->ppdu_id);
5374				dev_kfree_skb_any(msdu);
5375				msdu_cnt--;
5376				goto next_msdu;
5377			}
5378
5379			rx_desc = (struct hal_rx_desc *)msdu->data;
5380
5381			rx_pkt_offset = sizeof(struct hal_rx_desc);
5382			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5383
5384			if (is_first_msdu) {
5385				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5386					drop_mpdu = true;
5387					dev_kfree_skb_any(msdu);
5388					msdu = NULL;
5389					goto next_msdu;
5390				}
5391				is_first_msdu = false;
5392			}
5393
5394			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5395						  &is_frag, &total_len,
5396						  &frag_len, &msdu_cnt);
5397
5398			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5399
5400			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5401
5402			if (!(*head_msdu))
5403				*head_msdu = msdu;
5404			else if (last)
5405				last->next = msdu;
5406
5407			last = msdu;
5408next_msdu:
5409			rx_bufs_used++;
5410		}
5411
5412		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5413						    &sw_mon_entries->mon_dst_paddr,
5414						    &sw_mon_entries->mon_dst_sw_cookie,
5415						    &rbm,
5416						    &p_buf_addr_info);
5417
5418		if (ath11k_dp_rx_monitor_link_desc_return(ar,
5419							  p_last_buf_addr_info,
5420							  dp->mac_id))
5421			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5422				   "full mon: dp_rx_monitor_link_desc_return failed\n");
5423
5424		p_last_buf_addr_info = p_buf_addr_info;
5425
5426	} while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5427
5428	if (last)
5429		last->next = NULL;
5430
5431	*tail_msdu = msdu;
5432
5433	return rx_bufs_used;
5434}
5435
5436static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5437					      struct dp_full_mon_mpdu *mon_mpdu,
5438					      struct sk_buff *head,
5439					      struct sk_buff *tail)
5440{
5441	mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5442	if (!mon_mpdu)
5443		return -ENOMEM;
5444
5445	list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5446	mon_mpdu->head = head;
5447	mon_mpdu->tail = tail;
5448
5449	return 0;
5450}
5451
5452static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5453					    struct dp_full_mon_mpdu *mon_mpdu)
5454{
5455	struct dp_full_mon_mpdu *tmp;
5456	struct sk_buff *tmp_msdu, *skb_next;
5457
5458	if (list_empty(&dp->dp_full_mon_mpdu_list))
5459		return;
5460
5461	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5462		list_del(&mon_mpdu->list);
5463
5464		tmp_msdu = mon_mpdu->head;
5465		while (tmp_msdu) {
5466			skb_next = tmp_msdu->next;
5467			dev_kfree_skb_any(tmp_msdu);
5468			tmp_msdu = skb_next;
5469		}
5470
5471		kfree(mon_mpdu);
5472	}
5473}
5474
5475static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5476					      int mac_id,
5477					      struct ath11k_mon_data *pmon,
5478					      struct napi_struct *napi)
5479{
5480	struct ath11k_pdev_mon_stats *rx_mon_stats;
5481	struct dp_full_mon_mpdu *tmp;
5482	struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5483	struct sk_buff *head_msdu, *tail_msdu;
5484	struct ath11k_base *ab = ar->ab;
5485	struct ath11k_dp *dp = &ab->dp;
5486	int ret;
5487
5488	rx_mon_stats = &pmon->rx_mon_stats;
5489
5490	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5491		list_del(&mon_mpdu->list);
5492		head_msdu = mon_mpdu->head;
5493		tail_msdu = mon_mpdu->tail;
5494		if (head_msdu && tail_msdu) {
5495			ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5496						       &pmon->mon_ppdu_info,
5497						       tail_msdu, napi);
5498			rx_mon_stats->dest_mpdu_done++;
5499			ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5500		}
5501		kfree(mon_mpdu);
5502	}
5503
5504	return ret;
5505}
5506
5507static int
5508ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5509					  struct napi_struct *napi, int budget)
5510{
5511	struct ath11k *ar = ab->pdevs[mac_id].ar;
5512	struct ath11k_pdev_dp *dp = &ar->dp;
5513	struct ath11k_mon_data *pmon = &dp->mon_data;
5514	struct hal_sw_mon_ring_entries *sw_mon_entries;
5515	int quota = 0, work = 0, count;
5516
5517	sw_mon_entries = &pmon->sw_mon_entries;
5518
5519	while (pmon->hold_mon_dst_ring) {
5520		quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5521							napi, 1);
5522		if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5523			count = sw_mon_entries->status_buf_count;
5524			if (count > 1) {
5525				quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5526									 napi, count);
5527			}
5528
5529			ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5530							   pmon, napi);
5531			pmon->hold_mon_dst_ring = false;
5532		} else if (!pmon->mon_status_paddr ||
5533			   pmon->buf_state == DP_MON_STATUS_LEAD) {
5534			sw_mon_entries->drop_ppdu = true;
5535			pmon->hold_mon_dst_ring = false;
5536		}
5537
5538		if (!quota)
5539			break;
5540
5541		work += quota;
5542	}
5543
5544	if (sw_mon_entries->drop_ppdu)
5545		ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5546
5547	return work;
5548}
5549
5550static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5551					 struct napi_struct *napi, int budget)
5552{
5553	struct ath11k *ar = ab->pdevs[mac_id].ar;
5554	struct ath11k_pdev_dp *dp = &ar->dp;
5555	struct ath11k_mon_data *pmon = &dp->mon_data;
5556	struct hal_sw_mon_ring_entries *sw_mon_entries;
5557	struct ath11k_pdev_mon_stats *rx_mon_stats;
5558	struct sk_buff *head_msdu, *tail_msdu;
5559	void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5560	void *ring_entry;
5561	u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5562	int quota = 0, ret;
5563	bool break_dst_ring = false;
5564
5565	spin_lock_bh(&pmon->mon_lock);
5566
5567	sw_mon_entries = &pmon->sw_mon_entries;
5568	rx_mon_stats = &pmon->rx_mon_stats;
5569
5570	if (pmon->hold_mon_dst_ring) {
5571		spin_unlock_bh(&pmon->mon_lock);
5572		goto reap_status_ring;
5573	}
5574
5575	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5576	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5577		head_msdu = NULL;
5578		tail_msdu = NULL;
5579
5580		mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5581								   &head_msdu,
5582								   &tail_msdu,
5583								   sw_mon_entries);
5584		rx_bufs_used += mpdu_rx_bufs_used;
5585
5586		if (!sw_mon_entries->end_of_ppdu) {
5587			if (head_msdu) {
5588				ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5589									 pmon->mon_mpdu,
5590									 head_msdu,
5591									 tail_msdu);
5592				if (ret)
5593					break_dst_ring = true;
5594			}
5595
5596			goto next_entry;
5597		} else {
5598			if (!sw_mon_entries->ppdu_id &&
5599			    !sw_mon_entries->mon_status_paddr) {
5600				break_dst_ring = true;
5601				goto next_entry;
5602			}
5603		}
5604
5605		rx_mon_stats->dest_ppdu_done++;
5606		pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5607		pmon->buf_state = DP_MON_STATUS_LAG;
5608		pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5609		pmon->hold_mon_dst_ring = true;
5610next_entry:
5611		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5612								mon_dst_srng);
5613		if (break_dst_ring)
5614			break;
5615	}
5616
5617	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5618	spin_unlock_bh(&pmon->mon_lock);
5619
5620	if (rx_bufs_used) {
5621		ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5622					   &dp->rxdma_mon_buf_ring,
5623					   rx_bufs_used,
5624					   HAL_RX_BUF_RBM_SW3_BM);
5625	}
5626
5627reap_status_ring:
5628	quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5629							  napi, budget);
5630
5631	return quota;
5632}
5633
5634int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5635				   struct napi_struct *napi, int budget)
5636{
5637	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5638	int ret = 0;
5639
5640	if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5641	    ab->hw_params.full_monitor_mode)
5642		ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5643	else
5644		ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5645
5646	return ret;
5647}
5648
5649static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5650{
5651	struct ath11k_pdev_dp *dp = &ar->dp;
5652	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5653
5654	skb_queue_head_init(&pmon->rx_status_q);
5655
5656	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5657
5658	memset(&pmon->rx_mon_stats, 0,
5659	       sizeof(pmon->rx_mon_stats));
5660	return 0;
5661}
5662
5663int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5664{
5665	struct ath11k_pdev_dp *dp = &ar->dp;
5666	struct ath11k_mon_data *pmon = &dp->mon_data;
5667	struct hal_srng *mon_desc_srng = NULL;
5668	struct dp_srng *dp_srng;
5669	int ret = 0;
5670	u32 n_link_desc = 0;
5671
5672	ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5673	if (ret) {
5674		ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5675		return ret;
5676	}
5677
5678	/* if rxdma1_enable is false, no need to setup
5679	 * rxdma_mon_desc_ring.
5680	 */
5681	if (!ar->ab->hw_params.rxdma1_enable)
5682		return 0;
5683
5684	dp_srng = &dp->rxdma_mon_desc_ring;
5685	n_link_desc = dp_srng->size /
5686		ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5687	mon_desc_srng =
5688		&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5689
5690	ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5691					HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5692					n_link_desc);
5693	if (ret) {
5694		ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5695		return ret;
5696	}
5697	pmon->mon_last_linkdesc_paddr = 0;
5698	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5699	spin_lock_init(&pmon->mon_lock);
5700
5701	return 0;
5702}
5703
5704static int ath11k_dp_mon_link_free(struct ath11k *ar)
5705{
5706	struct ath11k_pdev_dp *dp = &ar->dp;
5707	struct ath11k_mon_data *pmon = &dp->mon_data;
5708
5709	ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5710				    HAL_RXDMA_MONITOR_DESC,
5711				    &dp->rxdma_mon_desc_ring);
5712	return 0;
5713}
5714
5715int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5716{
5717	ath11k_dp_mon_link_free(ar);
5718	return 0;
5719}
5720
5721int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5722{
5723	/* start reap timer */
5724	mod_timer(&ab->mon_reap_timer,
5725		  jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5726
5727	return 0;
5728}
5729
5730int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5731{
5732	int ret;
5733
5734	if (stop_timer)
5735		del_timer_sync(&ab->mon_reap_timer);
5736
5737	/* reap all the monitor related rings */
5738	ret = ath11k_dp_purge_mon_ring(ab);
5739	if (ret) {
5740		ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5741		return ret;
5742	}
5743
5744	return 0;
5745}
5746