1// SPDX-License-Identifier: BSD-3-Clause-Clear 2/* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 */ 5 6#include <linux/ieee80211.h> 7#include <linux/kernel.h> 8#include <linux/skbuff.h> 9#include <crypto/hash.h> 10#include "core.h" 11#include "debug.h" 12#include "debugfs_htt_stats.h" 13#include "debugfs_sta.h" 14#include "hal_desc.h" 15#include "hw.h" 16#include "dp_rx.h" 17#include "hal_rx.h" 18#include "dp_tx.h" 19#include "peer.h" 20 21#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) 22 23static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) 24{ 25 return desc->hdr_status; 26} 27 28static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) 29{ 30 if (!(__le32_to_cpu(desc->mpdu_start.info1) & 31 RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID)) 32 return HAL_ENCRYPT_TYPE_OPEN; 33 34 return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE, 35 __le32_to_cpu(desc->mpdu_start.info2)); 36} 37 38static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc) 39{ 40 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 41 __le32_to_cpu(desc->msdu_start.info2)); 42} 43 44static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc) 45{ 46 return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT, 47 __le32_to_cpu(desc->msdu_start.info2)); 48} 49 50static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc) 51{ 52 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID, 53 __le32_to_cpu(desc->mpdu_start.info1)); 54} 55 56static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc) 57{ 58 return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID, 59 __le32_to_cpu(desc->mpdu_start.info1)); 60} 61 62static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb) 63{ 64 struct ieee80211_hdr *hdr; 65 66 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 67 return ieee80211_has_morefrags(hdr->frame_control); 68} 69 70static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb) 71{ 72 struct ieee80211_hdr *hdr; 73 74 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 75 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 76} 77 78static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc) 79{ 80 return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM, 81 __le32_to_cpu(desc->mpdu_start.info1)); 82} 83 84static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) 85{ 86 return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, 87 __le32_to_cpu(desc->attention.info2)); 88} 89 90static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) 91{ 92 return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, 93 __le32_to_cpu(desc->attention.info1)); 94} 95 96static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) 97{ 98 return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, 99 __le32_to_cpu(desc->attention.info1)); 100} 101 102static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) 103{ 104 return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, 105 __le32_to_cpu(desc->attention.info2)) == 106 RX_DESC_DECRYPT_STATUS_CODE_OK); 107} 108 109static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) 110{ 111 u32 info = __le32_to_cpu(desc->attention.info1); 112 u32 errmap = 0; 113 114 if (info & RX_ATTENTION_INFO1_FCS_ERR) 115 errmap |= DP_RX_MPDU_ERR_FCS; 116 117 if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) 118 errmap |= DP_RX_MPDU_ERR_DECRYPT; 119 120 if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) 121 errmap |= DP_RX_MPDU_ERR_TKIP_MIC; 122 123 if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) 124 errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; 125 126 if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) 127 errmap |= DP_RX_MPDU_ERR_OVERFLOW; 128 129 if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) 130 errmap |= DP_RX_MPDU_ERR_MSDU_LEN; 131 132 if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) 133 errmap |= DP_RX_MPDU_ERR_MPDU_LEN; 134 135 return errmap; 136} 137 138static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) 139{ 140 return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH, 141 __le32_to_cpu(desc->msdu_start.info1)); 142} 143 144static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) 145{ 146 return FIELD_GET(RX_MSDU_START_INFO3_SGI, 147 __le32_to_cpu(desc->msdu_start.info3)); 148} 149 150static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) 151{ 152 return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS, 153 __le32_to_cpu(desc->msdu_start.info3)); 154} 155 156static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) 157{ 158 return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW, 159 __le32_to_cpu(desc->msdu_start.info3)); 160} 161 162static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) 163{ 164 return __le32_to_cpu(desc->msdu_start.phy_meta_data); 165} 166 167static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) 168{ 169 return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE, 170 __le32_to_cpu(desc->msdu_start.info3)); 171} 172 173static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) 174{ 175 u8 mimo_ss_bitmap = FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP, 176 __le32_to_cpu(desc->msdu_start.info3)); 177 178 return hweight8(mimo_ss_bitmap); 179} 180 181static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc) 182{ 183 return FIELD_GET(RX_MPDU_START_INFO2_TID, 184 __le32_to_cpu(desc->mpdu_start.info2)); 185} 186 187static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc) 188{ 189 return __le16_to_cpu(desc->mpdu_start.sw_peer_id); 190} 191 192static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) 193{ 194 return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING, 195 __le32_to_cpu(desc->msdu_end.info2)); 196} 197 198static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) 199{ 200 return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU, 201 __le32_to_cpu(desc->msdu_end.info2)); 202} 203 204static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) 205{ 206 return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU, 207 __le32_to_cpu(desc->msdu_end.info2)); 208} 209 210static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, 211 struct hal_rx_desc *ldesc) 212{ 213 memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, 214 sizeof(struct rx_msdu_end)); 215 memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, 216 sizeof(struct rx_attention)); 217 memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, 218 sizeof(struct rx_mpdu_end)); 219} 220 221static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) 222{ 223 struct rx_attention *rx_attn; 224 225 rx_attn = &rx_desc->attention; 226 227 return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, 228 __le32_to_cpu(rx_attn->info1)); 229} 230 231static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) 232{ 233 struct rx_msdu_start *rx_msdu_start; 234 235 rx_msdu_start = &rx_desc->msdu_start; 236 237 return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT, 238 __le32_to_cpu(rx_msdu_start->info2)); 239} 240 241static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) 242{ 243 u8 *rx_pkt_hdr; 244 245 rx_pkt_hdr = &rx_desc->msdu_payload[0]; 246 247 return rx_pkt_hdr; 248} 249 250static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) 251{ 252 u32 tlv_tag; 253 254 tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, 255 __le32_to_cpu(rx_desc->mpdu_start_tag)); 256 257 return tlv_tag == HAL_RX_MPDU_START; 258} 259 260static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) 261{ 262 return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); 263} 264 265static void ath11k_dp_service_mon_ring(struct timer_list *t) 266{ 267 struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); 268 int i; 269 270 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) 271 ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); 272 273 mod_timer(&ab->mon_reap_timer, jiffies + 274 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 275} 276 277/* Returns number of Rx buffers replenished */ 278int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, 279 struct dp_rxdma_ring *rx_ring, 280 int req_entries, 281 enum hal_rx_buf_return_buf_manager mgr) 282{ 283 struct hal_srng *srng; 284 u32 *desc; 285 struct sk_buff *skb; 286 int num_free; 287 int num_remain; 288 int buf_id; 289 u32 cookie; 290 dma_addr_t paddr; 291 292 req_entries = min(req_entries, rx_ring->bufs_max); 293 294 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 295 296 spin_lock_bh(&srng->lock); 297 298 ath11k_hal_srng_access_begin(ab, srng); 299 300 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 301 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) 302 req_entries = num_free; 303 304 req_entries = min(num_free, req_entries); 305 num_remain = req_entries; 306 307 while (num_remain > 0) { 308 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 309 DP_RX_BUFFER_ALIGN_SIZE); 310 if (!skb) 311 break; 312 313 if (!IS_ALIGNED((unsigned long)skb->data, 314 DP_RX_BUFFER_ALIGN_SIZE)) { 315 skb_pull(skb, 316 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 317 skb->data); 318 } 319 320 paddr = dma_map_single(ab->dev, skb->data, 321 skb->len + skb_tailroom(skb), 322 DMA_FROM_DEVICE); 323 if (dma_mapping_error(ab->dev, paddr)) 324 goto fail_free_skb; 325 326 spin_lock_bh(&rx_ring->idr_lock); 327 buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, 328 (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); 329 spin_unlock_bh(&rx_ring->idr_lock); 330 if (buf_id <= 0) 331 goto fail_dma_unmap; 332 333 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 334 if (!desc) 335 goto fail_idr_remove; 336 337 ATH11K_SKB_RXCB(skb)->paddr = paddr; 338 339 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 340 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 341 342 num_remain--; 343 344 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 345 } 346 347 ath11k_hal_srng_access_end(ab, srng); 348 349 spin_unlock_bh(&srng->lock); 350 351 return req_entries - num_remain; 352 353fail_idr_remove: 354 spin_lock_bh(&rx_ring->idr_lock); 355 idr_remove(&rx_ring->bufs_idr, buf_id); 356 spin_unlock_bh(&rx_ring->idr_lock); 357fail_dma_unmap: 358 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 359 DMA_FROM_DEVICE); 360fail_free_skb: 361 dev_kfree_skb_any(skb); 362 363 ath11k_hal_srng_access_end(ab, srng); 364 365 spin_unlock_bh(&srng->lock); 366 367 return req_entries - num_remain; 368} 369 370static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, 371 struct dp_rxdma_ring *rx_ring) 372{ 373 struct ath11k_pdev_dp *dp = &ar->dp; 374 struct sk_buff *skb; 375 int buf_id; 376 377 spin_lock_bh(&rx_ring->idr_lock); 378 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 379 idr_remove(&rx_ring->bufs_idr, buf_id); 380 /* TODO: Understand where internal driver does this dma_unmap of 381 * of rxdma_buffer. 382 */ 383 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 384 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); 385 dev_kfree_skb_any(skb); 386 } 387 388 idr_destroy(&rx_ring->bufs_idr); 389 spin_unlock_bh(&rx_ring->idr_lock); 390 391 /* if rxdma1_enable is false, mon_status_refill_ring 392 * isn't setup, so don't clean. 393 */ 394 if (!ar->ab->hw_params.rxdma1_enable) 395 return 0; 396 397 rx_ring = &dp->rx_mon_status_refill_ring[0]; 398 399 spin_lock_bh(&rx_ring->idr_lock); 400 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { 401 idr_remove(&rx_ring->bufs_idr, buf_id); 402 /* XXX: Understand where internal driver does this dma_unmap of 403 * of rxdma_buffer. 404 */ 405 dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, 406 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL); 407 dev_kfree_skb_any(skb); 408 } 409 410 idr_destroy(&rx_ring->bufs_idr); 411 spin_unlock_bh(&rx_ring->idr_lock); 412 413 return 0; 414} 415 416static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) 417{ 418 struct ath11k_pdev_dp *dp = &ar->dp; 419 struct ath11k_base *ab = ar->ab; 420 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 421 int i; 422 423 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 424 425 rx_ring = &dp->rxdma_mon_buf_ring; 426 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 427 428 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 429 rx_ring = &dp->rx_mon_status_refill_ring[i]; 430 ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); 431 } 432 433 return 0; 434} 435 436static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, 437 struct dp_rxdma_ring *rx_ring, 438 u32 ringtype) 439{ 440 struct ath11k_pdev_dp *dp = &ar->dp; 441 int num_entries; 442 443 num_entries = rx_ring->refill_buf_ring.size / 444 ath11k_hal_srng_get_entrysize(ar->ab, ringtype); 445 446 rx_ring->bufs_max = num_entries; 447 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, 448 HAL_RX_BUF_RBM_SW3_BM); 449 return 0; 450} 451 452static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) 453{ 454 struct ath11k_pdev_dp *dp = &ar->dp; 455 struct ath11k_base *ab = ar->ab; 456 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 457 int i; 458 459 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); 460 461 if (ar->ab->hw_params.rxdma1_enable) { 462 rx_ring = &dp->rxdma_mon_buf_ring; 463 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); 464 } 465 466 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 467 rx_ring = &dp->rx_mon_status_refill_ring[i]; 468 ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); 469 } 470 471 return 0; 472} 473 474static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) 475{ 476 struct ath11k_pdev_dp *dp = &ar->dp; 477 struct ath11k_base *ab = ar->ab; 478 int i; 479 480 ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); 481 482 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 483 if (ab->hw_params.rx_mac_buf_ring) 484 ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); 485 486 ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); 487 ath11k_dp_srng_cleanup(ab, 488 &dp->rx_mon_status_refill_ring[i].refill_buf_ring); 489 } 490 491 ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); 492} 493 494void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) 495{ 496 struct ath11k_dp *dp = &ab->dp; 497 int i; 498 499 for (i = 0; i < DP_REO_DST_RING_MAX; i++) 500 ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); 501} 502 503int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) 504{ 505 struct ath11k_dp *dp = &ab->dp; 506 int ret; 507 int i; 508 509 for (i = 0; i < DP_REO_DST_RING_MAX; i++) { 510 ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], 511 HAL_REO_DST, i, 0, 512 DP_REO_DST_RING_SIZE); 513 if (ret) { 514 ath11k_warn(ab, "failed to setup reo_dst_ring\n"); 515 goto err_reo_cleanup; 516 } 517 } 518 519 return 0; 520 521err_reo_cleanup: 522 ath11k_dp_pdev_reo_cleanup(ab); 523 524 return ret; 525} 526 527static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) 528{ 529 struct ath11k_pdev_dp *dp = &ar->dp; 530 struct ath11k_base *ab = ar->ab; 531 struct dp_srng *srng = NULL; 532 int i; 533 int ret; 534 535 ret = ath11k_dp_srng_setup(ar->ab, 536 &dp->rx_refill_buf_ring.refill_buf_ring, 537 HAL_RXDMA_BUF, 0, 538 dp->mac_id, DP_RXDMA_BUF_RING_SIZE); 539 if (ret) { 540 ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); 541 return ret; 542 } 543 544 if (ar->ab->hw_params.rx_mac_buf_ring) { 545 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 546 ret = ath11k_dp_srng_setup(ar->ab, 547 &dp->rx_mac_buf_ring[i], 548 HAL_RXDMA_BUF, 1, 549 dp->mac_id + i, 1024); 550 if (ret) { 551 ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", 552 i); 553 return ret; 554 } 555 } 556 } 557 558 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 559 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], 560 HAL_RXDMA_DST, 0, dp->mac_id + i, 561 DP_RXDMA_ERR_DST_RING_SIZE); 562 if (ret) { 563 ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); 564 return ret; 565 } 566 } 567 568 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 569 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; 570 ret = ath11k_dp_srng_setup(ar->ab, 571 srng, 572 HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, 573 DP_RXDMA_MON_STATUS_RING_SIZE); 574 if (ret) { 575 ath11k_warn(ar->ab, 576 "failed to setup rx_mon_status_refill_ring %d\n", i); 577 return ret; 578 } 579 } 580 581 /* if rxdma1_enable is false, then it doesn't need 582 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring 583 * and rxdma_mon_desc_ring. 584 * init reap timer for QCA6390. 585 */ 586 if (!ar->ab->hw_params.rxdma1_enable) { 587 //init mon status buffer reap timer 588 timer_setup(&ar->ab->mon_reap_timer, 589 ath11k_dp_service_mon_ring, 0); 590 return 0; 591 } 592 593 ret = ath11k_dp_srng_setup(ar->ab, 594 &dp->rxdma_mon_buf_ring.refill_buf_ring, 595 HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, 596 DP_RXDMA_MONITOR_BUF_RING_SIZE); 597 if (ret) { 598 ath11k_warn(ar->ab, 599 "failed to setup HAL_RXDMA_MONITOR_BUF\n"); 600 return ret; 601 } 602 603 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, 604 HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, 605 DP_RXDMA_MONITOR_DST_RING_SIZE); 606 if (ret) { 607 ath11k_warn(ar->ab, 608 "failed to setup HAL_RXDMA_MONITOR_DST\n"); 609 return ret; 610 } 611 612 ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, 613 HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, 614 DP_RXDMA_MONITOR_DESC_RING_SIZE); 615 if (ret) { 616 ath11k_warn(ar->ab, 617 "failed to setup HAL_RXDMA_MONITOR_DESC\n"); 618 return ret; 619 } 620 621 return 0; 622} 623 624void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) 625{ 626 struct ath11k_dp *dp = &ab->dp; 627 struct dp_reo_cmd *cmd, *tmp; 628 struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; 629 630 spin_lock_bh(&dp->reo_cmd_lock); 631 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 632 list_del(&cmd->list); 633 dma_unmap_single(ab->dev, cmd->data.paddr, 634 cmd->data.size, DMA_BIDIRECTIONAL); 635 kfree(cmd->data.vaddr); 636 kfree(cmd); 637 } 638 639 list_for_each_entry_safe(cmd_cache, tmp_cache, 640 &dp->reo_cmd_cache_flush_list, list) { 641 list_del(&cmd_cache->list); 642 dp->reo_cmd_cache_flush_count--; 643 dma_unmap_single(ab->dev, cmd_cache->data.paddr, 644 cmd_cache->data.size, DMA_BIDIRECTIONAL); 645 kfree(cmd_cache->data.vaddr); 646 kfree(cmd_cache); 647 } 648 spin_unlock_bh(&dp->reo_cmd_lock); 649} 650 651static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, 652 enum hal_reo_cmd_status status) 653{ 654 struct dp_rx_tid *rx_tid = ctx; 655 656 if (status != HAL_REO_CMD_SUCCESS) 657 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 658 rx_tid->tid, status); 659 660 dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 661 DMA_BIDIRECTIONAL); 662 kfree(rx_tid->vaddr); 663} 664 665static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, 666 struct dp_rx_tid *rx_tid) 667{ 668 struct ath11k_hal_reo_cmd cmd = {0}; 669 unsigned long tot_desc_sz, desc_sz; 670 int ret; 671 672 tot_desc_sz = rx_tid->size; 673 desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); 674 675 while (tot_desc_sz > desc_sz) { 676 tot_desc_sz -= desc_sz; 677 cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); 678 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 679 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 680 HAL_REO_CMD_FLUSH_CACHE, &cmd, 681 NULL); 682 if (ret) 683 ath11k_warn(ab, 684 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", 685 rx_tid->tid, ret); 686 } 687 688 memset(&cmd, 0, sizeof(cmd)); 689 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 690 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 691 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 692 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 693 HAL_REO_CMD_FLUSH_CACHE, 694 &cmd, ath11k_dp_reo_cmd_free); 695 if (ret) { 696 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 697 rx_tid->tid, ret); 698 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 699 DMA_BIDIRECTIONAL); 700 kfree(rx_tid->vaddr); 701 } 702} 703 704static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, 705 enum hal_reo_cmd_status status) 706{ 707 struct ath11k_base *ab = dp->ab; 708 struct dp_rx_tid *rx_tid = ctx; 709 struct dp_reo_cache_flush_elem *elem, *tmp; 710 711 if (status == HAL_REO_CMD_DRAIN) { 712 goto free_desc; 713 } else if (status != HAL_REO_CMD_SUCCESS) { 714 /* Shouldn't happen! Cleanup in case of other failure? */ 715 ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", 716 rx_tid->tid, status); 717 return; 718 } 719 720 elem = kzalloc(sizeof(*elem), GFP_ATOMIC); 721 if (!elem) 722 goto free_desc; 723 724 elem->ts = jiffies; 725 memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); 726 727 spin_lock_bh(&dp->reo_cmd_lock); 728 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); 729 dp->reo_cmd_cache_flush_count++; 730 731 /* Flush and invalidate aged REO desc from HW cache */ 732 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, 733 list) { 734 if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || 735 time_after(jiffies, elem->ts + 736 msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { 737 list_del(&elem->list); 738 dp->reo_cmd_cache_flush_count--; 739 spin_unlock_bh(&dp->reo_cmd_lock); 740 741 ath11k_dp_reo_cache_flush(ab, &elem->data); 742 kfree(elem); 743 spin_lock_bh(&dp->reo_cmd_lock); 744 } 745 } 746 spin_unlock_bh(&dp->reo_cmd_lock); 747 748 return; 749free_desc: 750 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 751 DMA_BIDIRECTIONAL); 752 kfree(rx_tid->vaddr); 753} 754 755void ath11k_peer_rx_tid_delete(struct ath11k *ar, 756 struct ath11k_peer *peer, u8 tid) 757{ 758 struct ath11k_hal_reo_cmd cmd = {0}; 759 struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; 760 int ret; 761 762 if (!rx_tid->active) 763 return; 764 765 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 766 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 767 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 768 cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; 769 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 770 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 771 ath11k_dp_rx_tid_del_func); 772 if (ret) { 773 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 774 tid, ret); 775 dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 776 DMA_BIDIRECTIONAL); 777 kfree(rx_tid->vaddr); 778 } 779 780 rx_tid->active = false; 781} 782 783static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, 784 u32 *link_desc, 785 enum hal_wbm_rel_bm_act action) 786{ 787 struct ath11k_dp *dp = &ab->dp; 788 struct hal_srng *srng; 789 u32 *desc; 790 int ret = 0; 791 792 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; 793 794 spin_lock_bh(&srng->lock); 795 796 ath11k_hal_srng_access_begin(ab, srng); 797 798 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 799 if (!desc) { 800 ret = -ENOBUFS; 801 goto exit; 802 } 803 804 ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, 805 action); 806 807exit: 808 ath11k_hal_srng_access_end(ab, srng); 809 810 spin_unlock_bh(&srng->lock); 811 812 return ret; 813} 814 815static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) 816{ 817 struct ath11k_base *ab = rx_tid->ab; 818 819 lockdep_assert_held(&ab->base_lock); 820 821 if (rx_tid->dst_ring_desc) { 822 if (rel_link_desc) 823 ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, 824 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 825 kfree(rx_tid->dst_ring_desc); 826 rx_tid->dst_ring_desc = NULL; 827 } 828 829 rx_tid->cur_sn = 0; 830 rx_tid->last_frag_no = 0; 831 rx_tid->rx_frag_bitmap = 0; 832 __skb_queue_purge(&rx_tid->rx_frags); 833} 834 835void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) 836{ 837 struct dp_rx_tid *rx_tid; 838 int i; 839 840 lockdep_assert_held(&ar->ab->base_lock); 841 842 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 843 rx_tid = &peer->rx_tid[i]; 844 845 spin_unlock_bh(&ar->ab->base_lock); 846 del_timer_sync(&rx_tid->frag_timer); 847 spin_lock_bh(&ar->ab->base_lock); 848 849 ath11k_dp_rx_frags_cleanup(rx_tid, true); 850 } 851} 852 853void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) 854{ 855 struct dp_rx_tid *rx_tid; 856 int i; 857 858 lockdep_assert_held(&ar->ab->base_lock); 859 860 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 861 rx_tid = &peer->rx_tid[i]; 862 863 ath11k_peer_rx_tid_delete(ar, peer, i); 864 ath11k_dp_rx_frags_cleanup(rx_tid, true); 865 866 spin_unlock_bh(&ar->ab->base_lock); 867 del_timer_sync(&rx_tid->frag_timer); 868 spin_lock_bh(&ar->ab->base_lock); 869 } 870} 871 872static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, 873 struct ath11k_peer *peer, 874 struct dp_rx_tid *rx_tid, 875 u32 ba_win_sz, u16 ssn, 876 bool update_ssn) 877{ 878 struct ath11k_hal_reo_cmd cmd = {0}; 879 int ret; 880 881 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 882 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 883 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; 884 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; 885 cmd.ba_window_size = ba_win_sz; 886 887 if (update_ssn) { 888 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; 889 cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); 890 } 891 892 ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, 893 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, 894 NULL); 895 if (ret) { 896 ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", 897 rx_tid->tid, ret); 898 return ret; 899 } 900 901 rx_tid->ba_win_sz = ba_win_sz; 902 903 return 0; 904} 905 906static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, 907 const u8 *peer_mac, int vdev_id, u8 tid) 908{ 909 struct ath11k_peer *peer; 910 struct dp_rx_tid *rx_tid; 911 912 spin_lock_bh(&ab->base_lock); 913 914 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 915 if (!peer) { 916 ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); 917 goto unlock_exit; 918 } 919 920 rx_tid = &peer->rx_tid[tid]; 921 if (!rx_tid->active) 922 goto unlock_exit; 923 924 dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 925 DMA_BIDIRECTIONAL); 926 kfree(rx_tid->vaddr); 927 928 rx_tid->active = false; 929 930unlock_exit: 931 spin_unlock_bh(&ab->base_lock); 932} 933 934int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, 935 u8 tid, u32 ba_win_sz, u16 ssn, 936 enum hal_pn_type pn_type) 937{ 938 struct ath11k_base *ab = ar->ab; 939 struct ath11k_peer *peer; 940 struct dp_rx_tid *rx_tid; 941 u32 hw_desc_sz; 942 u32 *addr_aligned; 943 void *vaddr; 944 dma_addr_t paddr; 945 int ret; 946 947 spin_lock_bh(&ab->base_lock); 948 949 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 950 if (!peer) { 951 ath11k_warn(ab, "failed to find the peer to set up rx tid\n"); 952 spin_unlock_bh(&ab->base_lock); 953 return -ENOENT; 954 } 955 956 rx_tid = &peer->rx_tid[tid]; 957 /* Update the tid queue if it is already setup */ 958 if (rx_tid->active) { 959 paddr = rx_tid->paddr; 960 ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 961 ba_win_sz, ssn, true); 962 spin_unlock_bh(&ab->base_lock); 963 if (ret) { 964 ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid); 965 return ret; 966 } 967 968 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 969 peer_mac, paddr, 970 tid, 1, ba_win_sz); 971 if (ret) 972 ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n", 973 tid, ret); 974 return ret; 975 } 976 977 rx_tid->tid = tid; 978 979 rx_tid->ba_win_sz = ba_win_sz; 980 981 /* TODO: Optimize the memory allocation for qos tid based on the 982 * the actual BA window size in REO tid update path. 983 */ 984 if (tid == HAL_DESC_REO_NON_QOS_TID) 985 hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); 986 else 987 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 988 989 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 990 if (!vaddr) { 991 spin_unlock_bh(&ab->base_lock); 992 return -ENOMEM; 993 } 994 995 addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 996 997 ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 998 ssn, pn_type); 999 1000 paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1001 DMA_BIDIRECTIONAL); 1002 1003 ret = dma_mapping_error(ab->dev, paddr); 1004 if (ret) { 1005 spin_unlock_bh(&ab->base_lock); 1006 goto err_mem_free; 1007 } 1008 1009 rx_tid->vaddr = vaddr; 1010 rx_tid->paddr = paddr; 1011 rx_tid->size = hw_desc_sz; 1012 rx_tid->active = true; 1013 1014 spin_unlock_bh(&ab->base_lock); 1015 1016 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1017 paddr, tid, 1, ba_win_sz); 1018 if (ret) { 1019 ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n", 1020 tid, ret); 1021 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1022 } 1023 1024 return ret; 1025 1026err_mem_free: 1027 kfree(vaddr); 1028 1029 return ret; 1030} 1031 1032int ath11k_dp_rx_ampdu_start(struct ath11k *ar, 1033 struct ieee80211_ampdu_params *params) 1034{ 1035 struct ath11k_base *ab = ar->ab; 1036 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1037 int vdev_id = arsta->arvif->vdev_id; 1038 int ret; 1039 1040 ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, 1041 params->tid, params->buf_size, 1042 params->ssn, arsta->pn_type); 1043 if (ret) 1044 ath11k_warn(ab, "failed to setup rx tid %d\n", ret); 1045 1046 return ret; 1047} 1048 1049int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, 1050 struct ieee80211_ampdu_params *params) 1051{ 1052 struct ath11k_base *ab = ar->ab; 1053 struct ath11k_peer *peer; 1054 struct ath11k_sta *arsta = (void *)params->sta->drv_priv; 1055 int vdev_id = arsta->arvif->vdev_id; 1056 dma_addr_t paddr; 1057 bool active; 1058 int ret; 1059 1060 spin_lock_bh(&ab->base_lock); 1061 1062 peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); 1063 if (!peer) { 1064 ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); 1065 spin_unlock_bh(&ab->base_lock); 1066 return -ENOENT; 1067 } 1068 1069 paddr = peer->rx_tid[params->tid].paddr; 1070 active = peer->rx_tid[params->tid].active; 1071 1072 if (!active) { 1073 spin_unlock_bh(&ab->base_lock); 1074 return 0; 1075 } 1076 1077 ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); 1078 spin_unlock_bh(&ab->base_lock); 1079 if (ret) { 1080 ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", 1081 params->tid, ret); 1082 return ret; 1083 } 1084 1085 ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, 1086 params->sta->addr, paddr, 1087 params->tid, 1, 1); 1088 if (ret) 1089 ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", 1090 ret); 1091 1092 return ret; 1093} 1094 1095int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, 1096 const u8 *peer_addr, 1097 enum set_key_cmd key_cmd, 1098 struct ieee80211_key_conf *key) 1099{ 1100 struct ath11k *ar = arvif->ar; 1101 struct ath11k_base *ab = ar->ab; 1102 struct ath11k_hal_reo_cmd cmd = {0}; 1103 struct ath11k_peer *peer; 1104 struct dp_rx_tid *rx_tid; 1105 u8 tid; 1106 int ret = 0; 1107 1108 /* NOTE: Enable PN/TSC replay check offload only for unicast frames. 1109 * We use mac80211 PN/TSC replay check functionality for bcast/mcast 1110 * for now. 1111 */ 1112 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1113 return 0; 1114 1115 cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; 1116 cmd.upd0 |= HAL_REO_CMD_UPD0_PN | 1117 HAL_REO_CMD_UPD0_PN_SIZE | 1118 HAL_REO_CMD_UPD0_PN_VALID | 1119 HAL_REO_CMD_UPD0_PN_CHECK | 1120 HAL_REO_CMD_UPD0_SVLD; 1121 1122 switch (key->cipher) { 1123 case WLAN_CIPHER_SUITE_TKIP: 1124 case WLAN_CIPHER_SUITE_CCMP: 1125 case WLAN_CIPHER_SUITE_CCMP_256: 1126 case WLAN_CIPHER_SUITE_GCMP: 1127 case WLAN_CIPHER_SUITE_GCMP_256: 1128 if (key_cmd == SET_KEY) { 1129 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; 1130 cmd.pn_size = 48; 1131 } 1132 break; 1133 default: 1134 break; 1135 } 1136 1137 spin_lock_bh(&ab->base_lock); 1138 1139 peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); 1140 if (!peer) { 1141 ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); 1142 spin_unlock_bh(&ab->base_lock); 1143 return -ENOENT; 1144 } 1145 1146 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { 1147 rx_tid = &peer->rx_tid[tid]; 1148 if (!rx_tid->active) 1149 continue; 1150 cmd.addr_lo = lower_32_bits(rx_tid->paddr); 1151 cmd.addr_hi = upper_32_bits(rx_tid->paddr); 1152 ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, 1153 HAL_REO_CMD_UPDATE_RX_QUEUE, 1154 &cmd, NULL); 1155 if (ret) { 1156 ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", 1157 tid, ret); 1158 break; 1159 } 1160 } 1161 1162 spin_unlock_bh(&ar->ab->base_lock); 1163 1164 return ret; 1165} 1166 1167static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, 1168 u16 peer_id) 1169{ 1170 int i; 1171 1172 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { 1173 if (ppdu_stats->user_stats[i].is_valid_peer_id) { 1174 if (peer_id == ppdu_stats->user_stats[i].peer_id) 1175 return i; 1176 } else { 1177 return i; 1178 } 1179 } 1180 1181 return -EINVAL; 1182} 1183 1184static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, 1185 u16 tag, u16 len, const void *ptr, 1186 void *data) 1187{ 1188 struct htt_ppdu_stats_info *ppdu_info; 1189 struct htt_ppdu_user_stats *user_stats; 1190 int cur_user; 1191 u16 peer_id; 1192 1193 ppdu_info = (struct htt_ppdu_stats_info *)data; 1194 1195 switch (tag) { 1196 case HTT_PPDU_STATS_TAG_COMMON: 1197 if (len < sizeof(struct htt_ppdu_stats_common)) { 1198 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1199 len, tag); 1200 return -EINVAL; 1201 } 1202 memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, 1203 sizeof(struct htt_ppdu_stats_common)); 1204 break; 1205 case HTT_PPDU_STATS_TAG_USR_RATE: 1206 if (len < sizeof(struct htt_ppdu_stats_user_rate)) { 1207 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1208 len, tag); 1209 return -EINVAL; 1210 } 1211 1212 peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; 1213 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1214 peer_id); 1215 if (cur_user < 0) 1216 return -EINVAL; 1217 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1218 user_stats->peer_id = peer_id; 1219 user_stats->is_valid_peer_id = true; 1220 memcpy((void *)&user_stats->rate, ptr, 1221 sizeof(struct htt_ppdu_stats_user_rate)); 1222 user_stats->tlv_flags |= BIT(tag); 1223 break; 1224 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: 1225 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { 1226 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1227 len, tag); 1228 return -EINVAL; 1229 } 1230 1231 peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; 1232 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1233 peer_id); 1234 if (cur_user < 0) 1235 return -EINVAL; 1236 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1237 user_stats->peer_id = peer_id; 1238 user_stats->is_valid_peer_id = true; 1239 memcpy((void *)&user_stats->cmpltn_cmn, ptr, 1240 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); 1241 user_stats->tlv_flags |= BIT(tag); 1242 break; 1243 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: 1244 if (len < 1245 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { 1246 ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", 1247 len, tag); 1248 return -EINVAL; 1249 } 1250 1251 peer_id = 1252 ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; 1253 cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, 1254 peer_id); 1255 if (cur_user < 0) 1256 return -EINVAL; 1257 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; 1258 user_stats->peer_id = peer_id; 1259 user_stats->is_valid_peer_id = true; 1260 memcpy((void *)&user_stats->ack_ba, ptr, 1261 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); 1262 user_stats->tlv_flags |= BIT(tag); 1263 break; 1264 } 1265 return 0; 1266} 1267 1268int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, 1269 int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, 1270 const void *ptr, void *data), 1271 void *data) 1272{ 1273 const struct htt_tlv *tlv; 1274 const void *begin = ptr; 1275 u16 tlv_tag, tlv_len; 1276 int ret = -EINVAL; 1277 1278 while (len > 0) { 1279 if (len < sizeof(*tlv)) { 1280 ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 1281 ptr - begin, len, sizeof(*tlv)); 1282 return -EINVAL; 1283 } 1284 tlv = (struct htt_tlv *)ptr; 1285 tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); 1286 tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); 1287 ptr += sizeof(*tlv); 1288 len -= sizeof(*tlv); 1289 1290 if (tlv_len > len) { 1291 ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n", 1292 tlv_tag, ptr - begin, len, tlv_len); 1293 return -EINVAL; 1294 } 1295 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 1296 if (ret == -ENOMEM) 1297 return ret; 1298 1299 ptr += tlv_len; 1300 len -= tlv_len; 1301 } 1302 return 0; 1303} 1304 1305static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi) 1306{ 1307 u32 ret = 0; 1308 1309 switch (sgi) { 1310 case RX_MSDU_START_SGI_0_8_US: 1311 ret = NL80211_RATE_INFO_HE_GI_0_8; 1312 break; 1313 case RX_MSDU_START_SGI_1_6_US: 1314 ret = NL80211_RATE_INFO_HE_GI_1_6; 1315 break; 1316 case RX_MSDU_START_SGI_3_2_US: 1317 ret = NL80211_RATE_INFO_HE_GI_3_2; 1318 break; 1319 } 1320 1321 return ret; 1322} 1323 1324static void 1325ath11k_update_per_peer_tx_stats(struct ath11k *ar, 1326 struct htt_ppdu_stats *ppdu_stats, u8 user) 1327{ 1328 struct ath11k_base *ab = ar->ab; 1329 struct ath11k_peer *peer; 1330 struct ieee80211_sta *sta; 1331 struct ath11k_sta *arsta; 1332 struct htt_ppdu_stats_user_rate *user_rate; 1333 struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; 1334 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; 1335 struct htt_ppdu_stats_common *common = &ppdu_stats->common; 1336 int ret; 1337 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; 1338 u32 succ_bytes = 0; 1339 u16 rate = 0, succ_pkts = 0; 1340 u32 tx_duration = 0; 1341 u8 tid = HTT_PPDU_STATS_NON_QOS_TID; 1342 bool is_ampdu = false; 1343 1344 if (!usr_stats) 1345 return; 1346 1347 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) 1348 return; 1349 1350 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) 1351 is_ampdu = 1352 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); 1353 1354 if (usr_stats->tlv_flags & 1355 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { 1356 succ_bytes = usr_stats->ack_ba.success_bytes; 1357 succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, 1358 usr_stats->ack_ba.info); 1359 tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, 1360 usr_stats->ack_ba.info); 1361 } 1362 1363 if (common->fes_duration_us) 1364 tx_duration = common->fes_duration_us; 1365 1366 user_rate = &usr_stats->rate; 1367 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); 1368 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; 1369 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; 1370 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); 1371 sgi = HTT_USR_RATE_GI(user_rate->rate_flags); 1372 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); 1373 1374 /* Note: If host configured fixed rates and in some other special 1375 * cases, the broadcast/management frames are sent in different rates. 1376 * Firmware rate's control to be skipped for this? 1377 */ 1378 1379 if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) { 1380 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1381 return; 1382 } 1383 1384 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { 1385 ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs); 1386 return; 1387 } 1388 1389 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { 1390 ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs); 1391 return; 1392 } 1393 1394 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { 1395 ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats", 1396 mcs, nss); 1397 return; 1398 } 1399 1400 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { 1401 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 1402 flags, 1403 &rate_idx, 1404 &rate); 1405 if (ret < 0) 1406 return; 1407 } 1408 1409 rcu_read_lock(); 1410 spin_lock_bh(&ab->base_lock); 1411 peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); 1412 1413 if (!peer || !peer->sta) { 1414 spin_unlock_bh(&ab->base_lock); 1415 rcu_read_unlock(); 1416 return; 1417 } 1418 1419 sta = peer->sta; 1420 arsta = (struct ath11k_sta *)sta->drv_priv; 1421 1422 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 1423 1424 switch (flags) { 1425 case WMI_RATE_PREAMBLE_OFDM: 1426 arsta->txrate.legacy = rate; 1427 break; 1428 case WMI_RATE_PREAMBLE_CCK: 1429 arsta->txrate.legacy = rate; 1430 break; 1431 case WMI_RATE_PREAMBLE_HT: 1432 arsta->txrate.mcs = mcs + 8 * (nss - 1); 1433 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 1434 if (sgi) 1435 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1436 break; 1437 case WMI_RATE_PREAMBLE_VHT: 1438 arsta->txrate.mcs = mcs; 1439 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 1440 if (sgi) 1441 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1442 break; 1443 case WMI_RATE_PREAMBLE_HE: 1444 arsta->txrate.mcs = mcs; 1445 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 1446 arsta->txrate.he_dcm = dcm; 1447 arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 1448 arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc( 1449 (user_rate->ru_end - 1450 user_rate->ru_start) + 1); 1451 break; 1452 } 1453 1454 arsta->txrate.nss = nss; 1455 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 1456 arsta->tx_duration += tx_duration; 1457 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); 1458 1459 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. 1460 * So skip peer stats update for mgmt packets. 1461 */ 1462 if (tid < HTT_PPDU_STATS_NON_QOS_TID) { 1463 memset(peer_stats, 0, sizeof(*peer_stats)); 1464 peer_stats->succ_pkts = succ_pkts; 1465 peer_stats->succ_bytes = succ_bytes; 1466 peer_stats->is_ampdu = is_ampdu; 1467 peer_stats->duration = tx_duration; 1468 peer_stats->ba_fails = 1469 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1470 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1471 1472 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1473 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1474 } 1475 1476 spin_unlock_bh(&ab->base_lock); 1477 rcu_read_unlock(); 1478} 1479 1480static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, 1481 struct htt_ppdu_stats *ppdu_stats) 1482{ 1483 u8 user; 1484 1485 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) 1486 ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); 1487} 1488 1489static 1490struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, 1491 u32 ppdu_id) 1492{ 1493 struct htt_ppdu_stats_info *ppdu_info; 1494 1495 spin_lock_bh(&ar->data_lock); 1496 if (!list_empty(&ar->ppdu_stats_info)) { 1497 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { 1498 if (ppdu_info->ppdu_id == ppdu_id) { 1499 spin_unlock_bh(&ar->data_lock); 1500 return ppdu_info; 1501 } 1502 } 1503 1504 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { 1505 ppdu_info = list_first_entry(&ar->ppdu_stats_info, 1506 typeof(*ppdu_info), list); 1507 list_del(&ppdu_info->list); 1508 ar->ppdu_stat_list_depth--; 1509 ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); 1510 kfree(ppdu_info); 1511 } 1512 } 1513 spin_unlock_bh(&ar->data_lock); 1514 1515 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); 1516 if (!ppdu_info) 1517 return NULL; 1518 1519 spin_lock_bh(&ar->data_lock); 1520 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); 1521 ar->ppdu_stat_list_depth++; 1522 spin_unlock_bh(&ar->data_lock); 1523 1524 return ppdu_info; 1525} 1526 1527static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, 1528 struct sk_buff *skb) 1529{ 1530 struct ath11k_htt_ppdu_stats_msg *msg; 1531 struct htt_ppdu_stats_info *ppdu_info; 1532 struct ath11k *ar; 1533 int ret; 1534 u8 pdev_id; 1535 u32 ppdu_id, len; 1536 1537 msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; 1538 len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); 1539 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); 1540 ppdu_id = msg->ppdu_id; 1541 1542 rcu_read_lock(); 1543 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1544 if (!ar) { 1545 ret = -EINVAL; 1546 goto exit; 1547 } 1548 1549 if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1550 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1551 1552 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); 1553 if (!ppdu_info) { 1554 ret = -EINVAL; 1555 goto exit; 1556 } 1557 1558 ppdu_info->ppdu_id = ppdu_id; 1559 ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, 1560 ath11k_htt_tlv_ppdu_stats_parse, 1561 (void *)ppdu_info); 1562 if (ret) { 1563 ath11k_warn(ab, "Failed to parse tlv %d\n", ret); 1564 goto exit; 1565 } 1566 1567exit: 1568 rcu_read_unlock(); 1569 1570 return ret; 1571} 1572 1573static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) 1574{ 1575 struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; 1576 struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; 1577 struct ath11k *ar; 1578 u8 pdev_id; 1579 1580 pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); 1581 1582 rcu_read_lock(); 1583 1584 ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); 1585 if (!ar) { 1586 ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); 1587 goto out; 1588 } 1589 1590 trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, 1591 ar->ab->pktlog_defs_checksum); 1592 1593out: 1594 rcu_read_unlock(); 1595} 1596 1597static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, 1598 struct sk_buff *skb) 1599{ 1600 u32 *data = (u32 *)skb->data; 1601 u8 pdev_id, ring_type, ring_id, pdev_idx; 1602 u16 hp, tp; 1603 u32 backpressure_time; 1604 struct ath11k_bp_stats *bp_stats; 1605 1606 pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); 1607 ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); 1608 ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); 1609 ++data; 1610 1611 hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); 1612 tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); 1613 ++data; 1614 1615 backpressure_time = *data; 1616 1617 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", 1618 pdev_id, ring_type, ring_id, hp, tp, backpressure_time); 1619 1620 if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { 1621 if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) 1622 return; 1623 1624 bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; 1625 } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { 1626 pdev_idx = DP_HW2SW_MACID(pdev_id); 1627 1628 if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) 1629 return; 1630 1631 bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; 1632 } else { 1633 ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", 1634 ring_type); 1635 return; 1636 } 1637 1638 spin_lock_bh(&ab->base_lock); 1639 bp_stats->hp = hp; 1640 bp_stats->tp = tp; 1641 bp_stats->count++; 1642 bp_stats->jiffies = jiffies; 1643 spin_unlock_bh(&ab->base_lock); 1644} 1645 1646void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, 1647 struct sk_buff *skb) 1648{ 1649 struct ath11k_dp *dp = &ab->dp; 1650 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; 1651 enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); 1652 u16 peer_id; 1653 u8 vdev_id; 1654 u8 mac_addr[ETH_ALEN]; 1655 u16 peer_mac_h16; 1656 u16 ast_hash; 1657 1658 ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); 1659 1660 switch (type) { 1661 case HTT_T2H_MSG_TYPE_VERSION_CONF: 1662 dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, 1663 resp->version_msg.version); 1664 dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, 1665 resp->version_msg.version); 1666 complete(&dp->htt_tgt_version_received); 1667 break; 1668 case HTT_T2H_MSG_TYPE_PEER_MAP: 1669 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1670 resp->peer_map_ev.info); 1671 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1672 resp->peer_map_ev.info); 1673 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1674 resp->peer_map_ev.info1); 1675 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1676 peer_mac_h16, mac_addr); 1677 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0); 1678 break; 1679 case HTT_T2H_MSG_TYPE_PEER_MAP2: 1680 vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, 1681 resp->peer_map_ev.info); 1682 peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, 1683 resp->peer_map_ev.info); 1684 peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, 1685 resp->peer_map_ev.info1); 1686 ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, 1687 peer_mac_h16, mac_addr); 1688 ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, 1689 resp->peer_map_ev.info2); 1690 ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); 1691 break; 1692 case HTT_T2H_MSG_TYPE_PEER_UNMAP: 1693 case HTT_T2H_MSG_TYPE_PEER_UNMAP2: 1694 peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, 1695 resp->peer_unmap_ev.info); 1696 ath11k_peer_unmap_event(ab, peer_id); 1697 break; 1698 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: 1699 ath11k_htt_pull_ppdu_stats(ab, skb); 1700 break; 1701 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1702 ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1703 break; 1704 case HTT_T2H_MSG_TYPE_PKTLOG: 1705 ath11k_htt_pktlog(ab, skb); 1706 break; 1707 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: 1708 ath11k_htt_backpressure_event_handler(ab, skb); 1709 break; 1710 default: 1711 ath11k_warn(ab, "htt event %d not handled\n", type); 1712 break; 1713 } 1714 1715 dev_kfree_skb_any(skb); 1716} 1717 1718static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, 1719 struct sk_buff_head *msdu_list, 1720 struct sk_buff *first, struct sk_buff *last, 1721 u8 l3pad_bytes, int msdu_len) 1722{ 1723 struct sk_buff *skb; 1724 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1725 int buf_first_hdr_len, buf_first_len; 1726 struct hal_rx_desc *ldesc; 1727 int space_extra; 1728 int rem_len; 1729 int buf_len; 1730 1731 /* As the msdu is spread across multiple rx buffers, 1732 * find the offset to the start of msdu for computing 1733 * the length of the msdu in the first buffer. 1734 */ 1735 buf_first_hdr_len = HAL_RX_DESC_SIZE + l3pad_bytes; 1736 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; 1737 1738 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { 1739 skb_put(first, buf_first_hdr_len + msdu_len); 1740 skb_pull(first, buf_first_hdr_len); 1741 return 0; 1742 } 1743 1744 ldesc = (struct hal_rx_desc *)last->data; 1745 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); 1746 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); 1747 1748 /* MSDU spans over multiple buffers because the length of the MSDU 1749 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data 1750 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. 1751 */ 1752 skb_put(first, DP_RX_BUFFER_SIZE); 1753 skb_pull(first, buf_first_hdr_len); 1754 1755 /* When an MSDU spread over multiple buffers attention, MSDU_END and 1756 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. 1757 */ 1758 ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); 1759 1760 space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); 1761 if (space_extra > 0 && 1762 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { 1763 /* Free up all buffers of the MSDU */ 1764 while ((skb = __skb_dequeue(msdu_list)) != NULL) { 1765 rxcb = ATH11K_SKB_RXCB(skb); 1766 if (!rxcb->is_continuation) { 1767 dev_kfree_skb_any(skb); 1768 break; 1769 } 1770 dev_kfree_skb_any(skb); 1771 } 1772 return -ENOMEM; 1773 } 1774 1775 rem_len = msdu_len - buf_first_len; 1776 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { 1777 rxcb = ATH11K_SKB_RXCB(skb); 1778 if (rxcb->is_continuation) 1779 buf_len = DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE; 1780 else 1781 buf_len = rem_len; 1782 1783 if (buf_len > (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)) { 1784 WARN_ON_ONCE(1); 1785 dev_kfree_skb_any(skb); 1786 return -EINVAL; 1787 } 1788 1789 skb_put(skb, buf_len + HAL_RX_DESC_SIZE); 1790 skb_pull(skb, HAL_RX_DESC_SIZE); 1791 skb_copy_from_linear_data(skb, skb_put(first, buf_len), 1792 buf_len); 1793 dev_kfree_skb_any(skb); 1794 1795 rem_len -= buf_len; 1796 if (!rxcb->is_continuation) 1797 break; 1798 } 1799 1800 return 0; 1801} 1802 1803static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, 1804 struct sk_buff *first) 1805{ 1806 struct sk_buff *skb; 1807 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); 1808 1809 if (!rxcb->is_continuation) 1810 return first; 1811 1812 skb_queue_walk(msdu_list, skb) { 1813 rxcb = ATH11K_SKB_RXCB(skb); 1814 if (!rxcb->is_continuation) 1815 return skb; 1816 } 1817 1818 return NULL; 1819} 1820 1821static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) 1822{ 1823 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1824 bool ip_csum_fail, l4_csum_fail; 1825 1826 ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); 1827 l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); 1828 1829 msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? 1830 CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1831} 1832 1833static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, 1834 enum hal_encrypt_type enctype) 1835{ 1836 switch (enctype) { 1837 case HAL_ENCRYPT_TYPE_OPEN: 1838 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1839 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1840 return 0; 1841 case HAL_ENCRYPT_TYPE_CCMP_128: 1842 return IEEE80211_CCMP_MIC_LEN; 1843 case HAL_ENCRYPT_TYPE_CCMP_256: 1844 return IEEE80211_CCMP_256_MIC_LEN; 1845 case HAL_ENCRYPT_TYPE_GCMP_128: 1846 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1847 return IEEE80211_GCMP_MIC_LEN; 1848 case HAL_ENCRYPT_TYPE_WEP_40: 1849 case HAL_ENCRYPT_TYPE_WEP_104: 1850 case HAL_ENCRYPT_TYPE_WEP_128: 1851 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1852 case HAL_ENCRYPT_TYPE_WAPI: 1853 break; 1854 } 1855 1856 ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); 1857 return 0; 1858} 1859 1860static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, 1861 enum hal_encrypt_type enctype) 1862{ 1863 switch (enctype) { 1864 case HAL_ENCRYPT_TYPE_OPEN: 1865 return 0; 1866 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1867 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1868 return IEEE80211_TKIP_IV_LEN; 1869 case HAL_ENCRYPT_TYPE_CCMP_128: 1870 return IEEE80211_CCMP_HDR_LEN; 1871 case HAL_ENCRYPT_TYPE_CCMP_256: 1872 return IEEE80211_CCMP_256_HDR_LEN; 1873 case HAL_ENCRYPT_TYPE_GCMP_128: 1874 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1875 return IEEE80211_GCMP_HDR_LEN; 1876 case HAL_ENCRYPT_TYPE_WEP_40: 1877 case HAL_ENCRYPT_TYPE_WEP_104: 1878 case HAL_ENCRYPT_TYPE_WEP_128: 1879 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1880 case HAL_ENCRYPT_TYPE_WAPI: 1881 break; 1882 } 1883 1884 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1885 return 0; 1886} 1887 1888static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, 1889 enum hal_encrypt_type enctype) 1890{ 1891 switch (enctype) { 1892 case HAL_ENCRYPT_TYPE_OPEN: 1893 case HAL_ENCRYPT_TYPE_CCMP_128: 1894 case HAL_ENCRYPT_TYPE_CCMP_256: 1895 case HAL_ENCRYPT_TYPE_GCMP_128: 1896 case HAL_ENCRYPT_TYPE_AES_GCMP_256: 1897 return 0; 1898 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: 1899 case HAL_ENCRYPT_TYPE_TKIP_MIC: 1900 return IEEE80211_TKIP_ICV_LEN; 1901 case HAL_ENCRYPT_TYPE_WEP_40: 1902 case HAL_ENCRYPT_TYPE_WEP_104: 1903 case HAL_ENCRYPT_TYPE_WEP_128: 1904 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: 1905 case HAL_ENCRYPT_TYPE_WAPI: 1906 break; 1907 } 1908 1909 ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); 1910 return 0; 1911} 1912 1913static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, 1914 struct sk_buff *msdu, 1915 u8 *first_hdr, 1916 enum hal_encrypt_type enctype, 1917 struct ieee80211_rx_status *status) 1918{ 1919 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1920 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; 1921 struct ieee80211_hdr *hdr; 1922 size_t hdr_len; 1923 u8 da[ETH_ALEN]; 1924 u8 sa[ETH_ALEN]; 1925 u16 qos_ctl = 0; 1926 u8 *qos; 1927 1928 /* copy SA & DA and pull decapped header */ 1929 hdr = (struct ieee80211_hdr *)msdu->data; 1930 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1931 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1932 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1933 skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); 1934 1935 if (rxcb->is_first_msdu) { 1936 /* original 802.11 header is valid for the first msdu 1937 * hence we can reuse the same header 1938 */ 1939 hdr = (struct ieee80211_hdr *)first_hdr; 1940 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1941 1942 /* Each A-MSDU subframe will be reported as a separate MSDU, 1943 * so strip the A-MSDU bit from QoS Ctl. 1944 */ 1945 if (ieee80211_is_data_qos(hdr->frame_control)) { 1946 qos = ieee80211_get_qos_ctl(hdr); 1947 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1948 } 1949 } else { 1950 /* Rebuild qos header if this is a middle/last msdu */ 1951 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1952 1953 /* Reset the order bit as the HT_Control header is stripped */ 1954 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); 1955 1956 qos_ctl = rxcb->tid; 1957 1958 if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc)) 1959 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; 1960 1961 /* TODO Add other QoS ctl fields when required */ 1962 1963 /* copy decap header before overwriting for reuse below */ 1964 memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); 1965 } 1966 1967 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1968 memcpy(skb_push(msdu, 1969 ath11k_dp_rx_crypto_param_len(ar, enctype)), 1970 (void *)hdr + hdr_len, 1971 ath11k_dp_rx_crypto_param_len(ar, enctype)); 1972 } 1973 1974 if (!rxcb->is_first_msdu) { 1975 memcpy(skb_push(msdu, 1976 IEEE80211_QOS_CTL_LEN), &qos_ctl, 1977 IEEE80211_QOS_CTL_LEN); 1978 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); 1979 return; 1980 } 1981 1982 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1983 1984 /* original 802.11 header has a different DA and in 1985 * case of 4addr it may also have different SA 1986 */ 1987 hdr = (struct ieee80211_hdr *)msdu->data; 1988 ether_addr_copy(ieee80211_get_DA(hdr), da); 1989 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1990} 1991 1992static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, 1993 enum hal_encrypt_type enctype, 1994 struct ieee80211_rx_status *status, 1995 bool decrypted) 1996{ 1997 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 1998 struct ieee80211_hdr *hdr; 1999 size_t hdr_len; 2000 size_t crypto_len; 2001 2002 if (!rxcb->is_first_msdu || 2003 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { 2004 WARN_ON_ONCE(1); 2005 return; 2006 } 2007 2008 skb_trim(msdu, msdu->len - FCS_LEN); 2009 2010 if (!decrypted) 2011 return; 2012 2013 hdr = (void *)msdu->data; 2014 2015 /* Tail */ 2016 if (status->flag & RX_FLAG_IV_STRIPPED) { 2017 skb_trim(msdu, msdu->len - 2018 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2019 2020 skb_trim(msdu, msdu->len - 2021 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2022 } else { 2023 /* MIC */ 2024 if (status->flag & RX_FLAG_MIC_STRIPPED) 2025 skb_trim(msdu, msdu->len - 2026 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 2027 2028 /* ICV */ 2029 if (status->flag & RX_FLAG_ICV_STRIPPED) 2030 skb_trim(msdu, msdu->len - 2031 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 2032 } 2033 2034 /* MMIC */ 2035 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 2036 !ieee80211_has_morefrags(hdr->frame_control) && 2037 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) 2038 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); 2039 2040 /* Head */ 2041 if (status->flag & RX_FLAG_IV_STRIPPED) { 2042 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2043 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2044 2045 memmove((void *)msdu->data + crypto_len, 2046 (void *)msdu->data, hdr_len); 2047 skb_pull(msdu, crypto_len); 2048 } 2049} 2050 2051static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, 2052 struct sk_buff *msdu, 2053 enum hal_encrypt_type enctype) 2054{ 2055 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 2056 struct ieee80211_hdr *hdr; 2057 size_t hdr_len, crypto_len; 2058 void *rfc1042; 2059 bool is_amsdu; 2060 2061 is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); 2062 hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); 2063 rfc1042 = hdr; 2064 2065 if (rxcb->is_first_msdu) { 2066 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2067 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 2068 2069 rfc1042 += hdr_len + crypto_len; 2070 } 2071 2072 if (is_amsdu) 2073 rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); 2074 2075 return rfc1042; 2076} 2077 2078static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, 2079 struct sk_buff *msdu, 2080 u8 *first_hdr, 2081 enum hal_encrypt_type enctype, 2082 struct ieee80211_rx_status *status) 2083{ 2084 struct ieee80211_hdr *hdr; 2085 struct ethhdr *eth; 2086 size_t hdr_len; 2087 u8 da[ETH_ALEN]; 2088 u8 sa[ETH_ALEN]; 2089 void *rfc1042; 2090 2091 rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); 2092 if (WARN_ON_ONCE(!rfc1042)) 2093 return; 2094 2095 /* pull decapped header and copy SA & DA */ 2096 eth = (struct ethhdr *)msdu->data; 2097 ether_addr_copy(da, eth->h_dest); 2098 ether_addr_copy(sa, eth->h_source); 2099 skb_pull(msdu, sizeof(struct ethhdr)); 2100 2101 /* push rfc1042/llc/snap */ 2102 memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, 2103 sizeof(struct ath11k_dp_rfc1042_hdr)); 2104 2105 /* push original 802.11 header */ 2106 hdr = (struct ieee80211_hdr *)first_hdr; 2107 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2108 2109 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 2110 memcpy(skb_push(msdu, 2111 ath11k_dp_rx_crypto_param_len(ar, enctype)), 2112 (void *)hdr + hdr_len, 2113 ath11k_dp_rx_crypto_param_len(ar, enctype)); 2114 } 2115 2116 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 2117 2118 /* original 802.11 header has a different DA and in 2119 * case of 4addr it may also have different SA 2120 */ 2121 hdr = (struct ieee80211_hdr *)msdu->data; 2122 ether_addr_copy(ieee80211_get_DA(hdr), da); 2123 ether_addr_copy(ieee80211_get_SA(hdr), sa); 2124} 2125 2126static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, 2127 struct hal_rx_desc *rx_desc, 2128 enum hal_encrypt_type enctype, 2129 struct ieee80211_rx_status *status, 2130 bool decrypted) 2131{ 2132 u8 *first_hdr; 2133 u8 decap; 2134 2135 first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); 2136 decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc); 2137 2138 switch (decap) { 2139 case DP_RX_DECAP_TYPE_NATIVE_WIFI: 2140 ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, 2141 enctype, status); 2142 break; 2143 case DP_RX_DECAP_TYPE_RAW: 2144 ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, 2145 decrypted); 2146 break; 2147 case DP_RX_DECAP_TYPE_ETHERNET2_DIX: 2148 /* TODO undecap support for middle/last msdu's of amsdu */ 2149 ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, 2150 enctype, status); 2151 break; 2152 case DP_RX_DECAP_TYPE_8023: 2153 /* TODO: Handle undecap for these formats */ 2154 break; 2155 } 2156} 2157 2158static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, 2159 struct sk_buff *msdu, 2160 struct hal_rx_desc *rx_desc, 2161 struct ieee80211_rx_status *rx_status) 2162{ 2163 bool fill_crypto_hdr, mcast; 2164 enum hal_encrypt_type enctype; 2165 bool is_decrypted = false; 2166 struct ieee80211_hdr *hdr; 2167 struct ath11k_peer *peer; 2168 u32 err_bitmap; 2169 2170 hdr = (struct ieee80211_hdr *)msdu->data; 2171 2172 /* PN for multicast packets will be checked in mac80211 */ 2173 2174 mcast = is_multicast_ether_addr(hdr->addr1); 2175 fill_crypto_hdr = mcast; 2176 2177 spin_lock_bh(&ar->ab->base_lock); 2178 peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2); 2179 if (peer) { 2180 if (mcast) 2181 enctype = peer->sec_type_grp; 2182 else 2183 enctype = peer->sec_type; 2184 } else { 2185 enctype = HAL_ENCRYPT_TYPE_OPEN; 2186 } 2187 spin_unlock_bh(&ar->ab->base_lock); 2188 2189 err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); 2190 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) 2191 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 2192 2193 /* Clear per-MPDU flags while leaving per-PPDU flags intact */ 2194 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2195 RX_FLAG_MMIC_ERROR | 2196 RX_FLAG_DECRYPTED | 2197 RX_FLAG_IV_STRIPPED | 2198 RX_FLAG_MMIC_STRIPPED); 2199 2200 if (err_bitmap & DP_RX_MPDU_ERR_FCS) 2201 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 2202 if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) 2203 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2204 2205 if (is_decrypted) { 2206 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; 2207 2208 if (fill_crypto_hdr) 2209 rx_status->flag |= RX_FLAG_MIC_STRIPPED | 2210 RX_FLAG_ICV_STRIPPED; 2211 else 2212 rx_status->flag |= RX_FLAG_IV_STRIPPED | 2213 RX_FLAG_PN_VALIDATED; 2214 } 2215 2216 ath11k_dp_rx_h_csum_offload(msdu); 2217 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 2218 enctype, rx_status, is_decrypted); 2219 2220 if (!is_decrypted || fill_crypto_hdr) 2221 return; 2222 2223 hdr = (void *)msdu->data; 2224 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2225} 2226 2227static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2228 struct ieee80211_rx_status *rx_status) 2229{ 2230 struct ieee80211_supported_band *sband; 2231 enum rx_msdu_start_pkt_type pkt_type; 2232 u8 bw; 2233 u8 rate_mcs, nss; 2234 u8 sgi; 2235 bool is_cck; 2236 2237 pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); 2238 bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); 2239 rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); 2240 nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); 2241 sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); 2242 2243 switch (pkt_type) { 2244 case RX_MSDU_START_PKT_TYPE_11A: 2245 case RX_MSDU_START_PKT_TYPE_11B: 2246 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); 2247 sband = &ar->mac.sbands[rx_status->band]; 2248 rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, 2249 is_cck); 2250 break; 2251 case RX_MSDU_START_PKT_TYPE_11N: 2252 rx_status->encoding = RX_ENC_HT; 2253 if (rate_mcs > ATH11K_HT_MCS_MAX) { 2254 ath11k_warn(ar->ab, 2255 "Received with invalid mcs in HT mode %d\n", 2256 rate_mcs); 2257 break; 2258 } 2259 rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); 2260 if (sgi) 2261 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2262 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2263 break; 2264 case RX_MSDU_START_PKT_TYPE_11AC: 2265 rx_status->encoding = RX_ENC_VHT; 2266 rx_status->rate_idx = rate_mcs; 2267 if (rate_mcs > ATH11K_VHT_MCS_MAX) { 2268 ath11k_warn(ar->ab, 2269 "Received with invalid mcs in VHT mode %d\n", 2270 rate_mcs); 2271 break; 2272 } 2273 rx_status->nss = nss; 2274 if (sgi) 2275 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 2276 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2277 break; 2278 case RX_MSDU_START_PKT_TYPE_11AX: 2279 rx_status->rate_idx = rate_mcs; 2280 if (rate_mcs > ATH11K_HE_MCS_MAX) { 2281 ath11k_warn(ar->ab, 2282 "Received with invalid mcs in HE mode %d\n", 2283 rate_mcs); 2284 break; 2285 } 2286 rx_status->encoding = RX_ENC_HE; 2287 rx_status->nss = nss; 2288 rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi); 2289 rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); 2290 break; 2291 } 2292} 2293 2294static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, 2295 struct ieee80211_rx_status *rx_status) 2296{ 2297 u8 channel_num; 2298 u32 center_freq; 2299 struct ieee80211_channel *channel; 2300 2301 rx_status->freq = 0; 2302 rx_status->rate_idx = 0; 2303 rx_status->nss = 0; 2304 rx_status->encoding = RX_ENC_LEGACY; 2305 rx_status->bw = RATE_INFO_BW_20; 2306 2307 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2308 2309 channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); 2310 center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16; 2311 2312 if (center_freq >= ATH11K_MIN_6G_FREQ && 2313 center_freq <= ATH11K_MAX_6G_FREQ) { 2314 rx_status->band = NL80211_BAND_6GHZ; 2315 rx_status->freq = center_freq; 2316 } else if (channel_num >= 1 && channel_num <= 14) { 2317 rx_status->band = NL80211_BAND_2GHZ; 2318 } else if (channel_num >= 36 && channel_num <= 173) { 2319 rx_status->band = NL80211_BAND_5GHZ; 2320 } else { 2321 spin_lock_bh(&ar->data_lock); 2322 channel = ar->rx_channel; 2323 if (channel) { 2324 rx_status->band = channel->band; 2325 channel_num = 2326 ieee80211_frequency_to_channel(channel->center_freq); 2327 } 2328 spin_unlock_bh(&ar->data_lock); 2329 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", 2330 rx_desc, sizeof(struct hal_rx_desc)); 2331 } 2332 2333 if (rx_status->band != NL80211_BAND_6GHZ) 2334 rx_status->freq = ieee80211_channel_to_frequency(channel_num, 2335 rx_status->band); 2336 2337 ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); 2338} 2339 2340static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, 2341 size_t size) 2342{ 2343 u8 *qc; 2344 int tid; 2345 2346 if (!ieee80211_is_data_qos(hdr->frame_control)) 2347 return ""; 2348 2349 qc = ieee80211_get_qos_ctl(hdr); 2350 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 2351 snprintf(out, size, "tid %d", tid); 2352 2353 return out; 2354} 2355 2356static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, 2357 struct sk_buff *msdu) 2358{ 2359 static const struct ieee80211_radiotap_he known = { 2360 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | 2361 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), 2362 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), 2363 }; 2364 struct ieee80211_rx_status *status; 2365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 2366 struct ieee80211_radiotap_he *he = NULL; 2367 char tid[32]; 2368 2369 status = IEEE80211_SKB_RXCB(msdu); 2370 if (status->encoding == RX_ENC_HE) { 2371 he = skb_push(msdu, sizeof(known)); 2372 memcpy(he, &known, sizeof(known)); 2373 status->flag |= RX_FLAG_RADIOTAP_HE; 2374 } 2375 2376 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 2377 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2378 msdu, 2379 msdu->len, 2380 ieee80211_get_SA(hdr), 2381 ath11k_print_get_tid(hdr, tid, sizeof(tid)), 2382 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 2383 "mcast" : "ucast", 2384 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, 2385 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 2386 (status->encoding == RX_ENC_HT) ? "ht" : "", 2387 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2388 (status->encoding == RX_ENC_HE) ? "he" : "", 2389 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2390 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2391 (status->bw == RATE_INFO_BW_160) ? "160" : "", 2392 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 2393 status->rate_idx, 2394 status->nss, 2395 status->freq, 2396 status->band, status->flag, 2397 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 2398 !!(status->flag & RX_FLAG_MMIC_ERROR), 2399 !!(status->flag & RX_FLAG_AMSDU_MORE)); 2400 2401 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", 2402 msdu->data, msdu->len); 2403 2404 /* TODO: trace rx packet */ 2405 2406 ieee80211_rx_napi(ar->hw, NULL, msdu, napi); 2407} 2408 2409static int ath11k_dp_rx_process_msdu(struct ath11k *ar, 2410 struct sk_buff *msdu, 2411 struct sk_buff_head *msdu_list) 2412{ 2413 struct hal_rx_desc *rx_desc, *lrx_desc; 2414 struct ieee80211_rx_status rx_status = {0}; 2415 struct ieee80211_rx_status *status; 2416 struct ath11k_skb_rxcb *rxcb; 2417 struct ieee80211_hdr *hdr; 2418 struct sk_buff *last_buf; 2419 u8 l3_pad_bytes; 2420 u8 *hdr_status; 2421 u16 msdu_len; 2422 int ret; 2423 2424 last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); 2425 if (!last_buf) { 2426 ath11k_warn(ar->ab, 2427 "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); 2428 ret = -EIO; 2429 goto free_out; 2430 } 2431 2432 rx_desc = (struct hal_rx_desc *)msdu->data; 2433 lrx_desc = (struct hal_rx_desc *)last_buf->data; 2434 if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { 2435 ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n"); 2436 ret = -EIO; 2437 goto free_out; 2438 } 2439 2440 rxcb = ATH11K_SKB_RXCB(msdu); 2441 rxcb->rx_desc = rx_desc; 2442 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 2443 l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); 2444 2445 if (rxcb->is_frag) { 2446 skb_pull(msdu, HAL_RX_DESC_SIZE); 2447 } else if (!rxcb->is_continuation) { 2448 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 2449 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 2450 ret = -EINVAL; 2451 ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len); 2452 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 2453 sizeof(struct ieee80211_hdr)); 2454 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 2455 sizeof(struct hal_rx_desc)); 2456 goto free_out; 2457 } 2458 skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len); 2459 skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes); 2460 } else { 2461 ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, 2462 msdu, last_buf, 2463 l3_pad_bytes, msdu_len); 2464 if (ret) { 2465 ath11k_warn(ar->ab, 2466 "failed to coalesce msdu rx buffer%d\n", ret); 2467 goto free_out; 2468 } 2469 } 2470 2471 hdr = (struct ieee80211_hdr *)msdu->data; 2472 2473 /* Process only data frames */ 2474 if (!ieee80211_is_data(hdr->frame_control)) 2475 return -EINVAL; 2476 2477 ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status); 2478 ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status); 2479 2480 rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; 2481 2482 status = IEEE80211_SKB_RXCB(msdu); 2483 *status = rx_status; 2484 return 0; 2485 2486free_out: 2487 return ret; 2488} 2489 2490static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, 2491 struct napi_struct *napi, 2492 struct sk_buff_head *msdu_list, 2493 int *quota, int ring_id) 2494{ 2495 struct ath11k_skb_rxcb *rxcb; 2496 struct sk_buff *msdu; 2497 struct ath11k *ar; 2498 u8 mac_id; 2499 int ret; 2500 2501 if (skb_queue_empty(msdu_list)) 2502 return; 2503 2504 rcu_read_lock(); 2505 2506 while (*quota && (msdu = __skb_dequeue(msdu_list))) { 2507 rxcb = ATH11K_SKB_RXCB(msdu); 2508 mac_id = rxcb->mac_id; 2509 ar = ab->pdevs[mac_id].ar; 2510 if (!rcu_dereference(ab->pdevs_active[mac_id])) { 2511 dev_kfree_skb_any(msdu); 2512 continue; 2513 } 2514 2515 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 2516 dev_kfree_skb_any(msdu); 2517 continue; 2518 } 2519 2520 ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list); 2521 if (ret) { 2522 ath11k_dbg(ab, ATH11K_DBG_DATA, 2523 "Unable to process msdu %d", ret); 2524 dev_kfree_skb_any(msdu); 2525 continue; 2526 } 2527 2528 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 2529 (*quota)--; 2530 } 2531 2532 rcu_read_unlock(); 2533} 2534 2535int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, 2536 struct napi_struct *napi, int budget) 2537{ 2538 struct ath11k_dp *dp = &ab->dp; 2539 struct dp_rxdma_ring *rx_ring; 2540 int num_buffs_reaped[MAX_RADIOS] = {0}; 2541 struct sk_buff_head msdu_list; 2542 struct ath11k_skb_rxcb *rxcb; 2543 int total_msdu_reaped = 0; 2544 struct hal_srng *srng; 2545 struct sk_buff *msdu; 2546 int quota = budget; 2547 bool done = false; 2548 int buf_id, mac_id; 2549 struct ath11k *ar; 2550 u32 *rx_desc; 2551 int i; 2552 2553 __skb_queue_head_init(&msdu_list); 2554 2555 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2556 2557 spin_lock_bh(&srng->lock); 2558 2559 ath11k_hal_srng_access_begin(ab, srng); 2560 2561try_again: 2562 while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 2563 struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc; 2564 enum hal_reo_dest_ring_push_reason push_reason; 2565 u32 cookie; 2566 2567 cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 2568 desc.buf_addr_info.info1); 2569 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 2570 cookie); 2571 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); 2572 2573 if (unlikely(buf_id == 0)) 2574 continue; 2575 2576 ar = ab->pdevs[mac_id].ar; 2577 rx_ring = &ar->dp.rx_refill_buf_ring; 2578 spin_lock_bh(&rx_ring->idr_lock); 2579 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 2580 if (!msdu) { 2581 ath11k_warn(ab, "frame rx with invalid buf_id %d\n", 2582 buf_id); 2583 spin_unlock_bh(&rx_ring->idr_lock); 2584 continue; 2585 } 2586 2587 idr_remove(&rx_ring->bufs_idr, buf_id); 2588 spin_unlock_bh(&rx_ring->idr_lock); 2589 2590 rxcb = ATH11K_SKB_RXCB(msdu); 2591 dma_unmap_single(ab->dev, rxcb->paddr, 2592 msdu->len + skb_tailroom(msdu), 2593 DMA_FROM_DEVICE); 2594 2595 num_buffs_reaped[mac_id]++; 2596 total_msdu_reaped++; 2597 2598 push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, 2599 desc.info0); 2600 if (push_reason != 2601 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) { 2602 dev_kfree_skb_any(msdu); 2603 ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++; 2604 continue; 2605 } 2606 2607 rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 & 2608 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); 2609 rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 & 2610 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2611 rxcb->is_continuation = !!(desc.rx_msdu_info.info0 & 2612 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2613 rxcb->mac_id = mac_id; 2614 rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, 2615 desc.info0); 2616 2617 __skb_queue_tail(&msdu_list, msdu); 2618 2619 if (total_msdu_reaped >= quota && !rxcb->is_continuation) { 2620 done = true; 2621 break; 2622 } 2623 } 2624 2625 /* Hw might have updated the head pointer after we cached it. 2626 * In this case, even though there are entries in the ring we'll 2627 * get rx_desc NULL. Give the read another try with updated cached 2628 * head pointer so that we can reap complete MPDU in the current 2629 * rx processing. 2630 */ 2631 if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { 2632 ath11k_hal_srng_access_end(ab, srng); 2633 goto try_again; 2634 } 2635 2636 ath11k_hal_srng_access_end(ab, srng); 2637 2638 spin_unlock_bh(&srng->lock); 2639 2640 if (!total_msdu_reaped) 2641 goto exit; 2642 2643 for (i = 0; i < ab->num_radios; i++) { 2644 if (!num_buffs_reaped[i]) 2645 continue; 2646 2647 ar = ab->pdevs[i].ar; 2648 rx_ring = &ar->dp.rx_refill_buf_ring; 2649 2650 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 2651 HAL_RX_BUF_RBM_SW3_BM); 2652 } 2653 2654 ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2655 "a, ring_id); 2656 2657exit: 2658 return budget - quota; 2659} 2660 2661static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, 2662 struct hal_rx_mon_ppdu_info *ppdu_info) 2663{ 2664 struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; 2665 u32 num_msdu; 2666 2667 if (!rx_stats) 2668 return; 2669 2670 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2671 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; 2672 2673 rx_stats->num_msdu += num_msdu; 2674 rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + 2675 ppdu_info->tcp_ack_msdu_count; 2676 rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; 2677 rx_stats->other_msdu_count += ppdu_info->other_msdu_count; 2678 2679 if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || 2680 ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { 2681 ppdu_info->nss = 1; 2682 ppdu_info->mcs = HAL_RX_MAX_MCS; 2683 ppdu_info->tid = IEEE80211_NUM_TIDS; 2684 } 2685 2686 if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) 2687 rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; 2688 2689 if (ppdu_info->mcs <= HAL_RX_MAX_MCS) 2690 rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; 2691 2692 if (ppdu_info->gi < HAL_RX_GI_MAX) 2693 rx_stats->gi_count[ppdu_info->gi] += num_msdu; 2694 2695 if (ppdu_info->bw < HAL_RX_BW_MAX) 2696 rx_stats->bw_count[ppdu_info->bw] += num_msdu; 2697 2698 if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) 2699 rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; 2700 2701 if (ppdu_info->tid <= IEEE80211_NUM_TIDS) 2702 rx_stats->tid_count[ppdu_info->tid] += num_msdu; 2703 2704 if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) 2705 rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; 2706 2707 if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) 2708 rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; 2709 2710 if (ppdu_info->is_stbc) 2711 rx_stats->stbc_count += num_msdu; 2712 2713 if (ppdu_info->beamformed) 2714 rx_stats->beamformed_count += num_msdu; 2715 2716 if (ppdu_info->num_mpdu_fcs_ok > 1) 2717 rx_stats->ampdu_msdu_count += num_msdu; 2718 else 2719 rx_stats->non_ampdu_msdu_count += num_msdu; 2720 2721 rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; 2722 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2723 rx_stats->dcm_count += ppdu_info->dcm; 2724 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2725 2726 arsta->rssi_comb = ppdu_info->rssi_comb; 2727 rx_stats->rx_duration += ppdu_info->rx_duration; 2728 arsta->rx_duration = rx_stats->rx_duration; 2729} 2730 2731static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, 2732 struct dp_rxdma_ring *rx_ring, 2733 int *buf_id) 2734{ 2735 struct sk_buff *skb; 2736 dma_addr_t paddr; 2737 2738 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + 2739 DP_RX_BUFFER_ALIGN_SIZE); 2740 2741 if (!skb) 2742 goto fail_alloc_skb; 2743 2744 if (!IS_ALIGNED((unsigned long)skb->data, 2745 DP_RX_BUFFER_ALIGN_SIZE)) { 2746 skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - 2747 skb->data); 2748 } 2749 2750 paddr = dma_map_single(ab->dev, skb->data, 2751 skb->len + skb_tailroom(skb), 2752 DMA_BIDIRECTIONAL); 2753 if (unlikely(dma_mapping_error(ab->dev, paddr))) 2754 goto fail_free_skb; 2755 2756 spin_lock_bh(&rx_ring->idr_lock); 2757 *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, 2758 rx_ring->bufs_max, GFP_ATOMIC); 2759 spin_unlock_bh(&rx_ring->idr_lock); 2760 if (*buf_id < 0) 2761 goto fail_dma_unmap; 2762 2763 ATH11K_SKB_RXCB(skb)->paddr = paddr; 2764 return skb; 2765 2766fail_dma_unmap: 2767 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2768 DMA_BIDIRECTIONAL); 2769fail_free_skb: 2770 dev_kfree_skb_any(skb); 2771fail_alloc_skb: 2772 return NULL; 2773} 2774 2775int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, 2776 struct dp_rxdma_ring *rx_ring, 2777 int req_entries, 2778 enum hal_rx_buf_return_buf_manager mgr) 2779{ 2780 struct hal_srng *srng; 2781 u32 *desc; 2782 struct sk_buff *skb; 2783 int num_free; 2784 int num_remain; 2785 int buf_id; 2786 u32 cookie; 2787 dma_addr_t paddr; 2788 2789 req_entries = min(req_entries, rx_ring->bufs_max); 2790 2791 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2792 2793 spin_lock_bh(&srng->lock); 2794 2795 ath11k_hal_srng_access_begin(ab, srng); 2796 2797 num_free = ath11k_hal_srng_src_num_free(ab, srng, true); 2798 2799 req_entries = min(num_free, req_entries); 2800 num_remain = req_entries; 2801 2802 while (num_remain > 0) { 2803 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2804 &buf_id); 2805 if (!skb) 2806 break; 2807 paddr = ATH11K_SKB_RXCB(skb)->paddr; 2808 2809 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 2810 if (!desc) 2811 goto fail_desc_get; 2812 2813 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2814 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2815 2816 num_remain--; 2817 2818 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); 2819 } 2820 2821 ath11k_hal_srng_access_end(ab, srng); 2822 2823 spin_unlock_bh(&srng->lock); 2824 2825 return req_entries - num_remain; 2826 2827fail_desc_get: 2828 spin_lock_bh(&rx_ring->idr_lock); 2829 idr_remove(&rx_ring->bufs_idr, buf_id); 2830 spin_unlock_bh(&rx_ring->idr_lock); 2831 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), 2832 DMA_BIDIRECTIONAL); 2833 dev_kfree_skb_any(skb); 2834 ath11k_hal_srng_access_end(ab, srng); 2835 spin_unlock_bh(&srng->lock); 2836 2837 return req_entries - num_remain; 2838} 2839 2840static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, 2841 int *budget, struct sk_buff_head *skb_list) 2842{ 2843 struct ath11k *ar; 2844 struct ath11k_pdev_dp *dp; 2845 struct dp_rxdma_ring *rx_ring; 2846 struct hal_srng *srng; 2847 void *rx_mon_status_desc; 2848 struct sk_buff *skb; 2849 struct ath11k_skb_rxcb *rxcb; 2850 struct hal_tlv_hdr *tlv; 2851 u32 cookie; 2852 int buf_id, srng_id; 2853 dma_addr_t paddr; 2854 u8 rbm; 2855 int num_buffs_reaped = 0; 2856 2857 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 2858 dp = &ar->dp; 2859 srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); 2860 rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; 2861 2862 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; 2863 2864 spin_lock_bh(&srng->lock); 2865 2866 ath11k_hal_srng_access_begin(ab, srng); 2867 while (*budget) { 2868 *budget -= 1; 2869 rx_mon_status_desc = 2870 ath11k_hal_srng_src_peek(ab, srng); 2871 if (!rx_mon_status_desc) 2872 break; 2873 2874 ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, 2875 &cookie, &rbm); 2876 if (paddr) { 2877 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 2878 2879 spin_lock_bh(&rx_ring->idr_lock); 2880 skb = idr_find(&rx_ring->bufs_idr, buf_id); 2881 if (!skb) { 2882 ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", 2883 buf_id); 2884 spin_unlock_bh(&rx_ring->idr_lock); 2885 goto move_next; 2886 } 2887 2888 idr_remove(&rx_ring->bufs_idr, buf_id); 2889 spin_unlock_bh(&rx_ring->idr_lock); 2890 2891 rxcb = ATH11K_SKB_RXCB(skb); 2892 2893 dma_sync_single_for_cpu(ab->dev, rxcb->paddr, 2894 skb->len + skb_tailroom(skb), 2895 DMA_FROM_DEVICE); 2896 2897 dma_unmap_single(ab->dev, rxcb->paddr, 2898 skb->len + skb_tailroom(skb), 2899 DMA_BIDIRECTIONAL); 2900 2901 tlv = (struct hal_tlv_hdr *)skb->data; 2902 if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != 2903 HAL_RX_STATUS_BUFFER_DONE) { 2904 ath11k_warn(ab, "mon status DONE not set %lx\n", 2905 FIELD_GET(HAL_TLV_HDR_TAG, 2906 tlv->tl)); 2907 dev_kfree_skb_any(skb); 2908 goto move_next; 2909 } 2910 2911 __skb_queue_tail(skb_list, skb); 2912 } 2913move_next: 2914 skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, 2915 &buf_id); 2916 2917 if (!skb) { 2918 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, 2919 HAL_RX_BUF_RBM_SW3_BM); 2920 num_buffs_reaped++; 2921 break; 2922 } 2923 rxcb = ATH11K_SKB_RXCB(skb); 2924 2925 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | 2926 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 2927 2928 ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, 2929 cookie, HAL_RX_BUF_RBM_SW3_BM); 2930 ath11k_hal_srng_src_get_next_entry(ab, srng); 2931 num_buffs_reaped++; 2932 } 2933 ath11k_hal_srng_access_end(ab, srng); 2934 spin_unlock_bh(&srng->lock); 2935 2936 return num_buffs_reaped; 2937} 2938 2939int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, 2940 struct napi_struct *napi, int budget) 2941{ 2942 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 2943 enum hal_rx_mon_status hal_status; 2944 struct sk_buff *skb; 2945 struct sk_buff_head skb_list; 2946 struct hal_rx_mon_ppdu_info ppdu_info; 2947 struct ath11k_peer *peer; 2948 struct ath11k_sta *arsta; 2949 int num_buffs_reaped = 0; 2950 2951 __skb_queue_head_init(&skb_list); 2952 2953 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, 2954 &skb_list); 2955 if (!num_buffs_reaped) 2956 goto exit; 2957 2958 while ((skb = __skb_dequeue(&skb_list))) { 2959 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2960 ppdu_info.peer_id = HAL_INVALID_PEERID; 2961 2962 if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) 2963 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2964 2965 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); 2966 2967 if (ppdu_info.peer_id == HAL_INVALID_PEERID || 2968 hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2969 dev_kfree_skb_any(skb); 2970 continue; 2971 } 2972 2973 rcu_read_lock(); 2974 spin_lock_bh(&ab->base_lock); 2975 peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); 2976 2977 if (!peer || !peer->sta) { 2978 ath11k_dbg(ab, ATH11K_DBG_DATA, 2979 "failed to find the peer with peer_id %d\n", 2980 ppdu_info.peer_id); 2981 spin_unlock_bh(&ab->base_lock); 2982 rcu_read_unlock(); 2983 dev_kfree_skb_any(skb); 2984 continue; 2985 } 2986 2987 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2988 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2989 2990 if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 2991 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2992 2993 spin_unlock_bh(&ab->base_lock); 2994 rcu_read_unlock(); 2995 2996 dev_kfree_skb_any(skb); 2997 } 2998exit: 2999 return num_buffs_reaped; 3000} 3001 3002static void ath11k_dp_rx_frag_timer(struct timer_list *timer) 3003{ 3004 struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer); 3005 3006 spin_lock_bh(&rx_tid->ab->base_lock); 3007 if (rx_tid->last_frag_no && 3008 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { 3009 spin_unlock_bh(&rx_tid->ab->base_lock); 3010 return; 3011 } 3012 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3013 spin_unlock_bh(&rx_tid->ab->base_lock); 3014} 3015 3016int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) 3017{ 3018 struct ath11k_base *ab = ar->ab; 3019 struct crypto_shash *tfm; 3020 struct ath11k_peer *peer; 3021 struct dp_rx_tid *rx_tid; 3022 int i; 3023 3024 tfm = crypto_alloc_shash("michael_mic", 0, 0); 3025 if (IS_ERR(tfm)) 3026 return PTR_ERR(tfm); 3027 3028 spin_lock_bh(&ab->base_lock); 3029 3030 peer = ath11k_peer_find(ab, vdev_id, peer_mac); 3031 if (!peer) { 3032 ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); 3033 spin_unlock_bh(&ab->base_lock); 3034 crypto_free_shash(tfm); 3035 return -ENOENT; 3036 } 3037 3038 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 3039 rx_tid = &peer->rx_tid[i]; 3040 rx_tid->ab = ab; 3041 timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); 3042 skb_queue_head_init(&rx_tid->rx_frags); 3043 } 3044 3045 peer->tfm_mmic = tfm; 3046 spin_unlock_bh(&ab->base_lock); 3047 3048 return 0; 3049} 3050 3051static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, 3052 struct ieee80211_hdr *hdr, u8 *data, 3053 size_t data_len, u8 *mic) 3054{ 3055 SHASH_DESC_ON_STACK(desc, tfm); 3056 u8 mic_hdr[16] = {0}; 3057 u8 tid = 0; 3058 int ret; 3059 3060 if (!tfm) 3061 return -EINVAL; 3062 3063 desc->tfm = tfm; 3064 3065 ret = crypto_shash_setkey(tfm, key, 8); 3066 if (ret) 3067 goto out; 3068 3069 ret = crypto_shash_init(desc); 3070 if (ret) 3071 goto out; 3072 3073 /* TKIP MIC header */ 3074 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); 3075 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); 3076 if (ieee80211_is_data_qos(hdr->frame_control)) 3077 tid = ieee80211_get_tid(hdr); 3078 mic_hdr[12] = tid; 3079 3080 ret = crypto_shash_update(desc, mic_hdr, 16); 3081 if (ret) 3082 goto out; 3083 ret = crypto_shash_update(desc, data, data_len); 3084 if (ret) 3085 goto out; 3086 ret = crypto_shash_final(desc, mic); 3087out: 3088 shash_desc_zero(desc); 3089 return ret; 3090} 3091 3092static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, 3093 struct sk_buff *msdu) 3094{ 3095 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; 3096 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); 3097 struct ieee80211_key_conf *key_conf; 3098 struct ieee80211_hdr *hdr; 3099 u8 mic[IEEE80211_CCMP_MIC_LEN]; 3100 int head_len, tail_len, ret; 3101 size_t data_len; 3102 u32 hdr_len; 3103 u8 *key, *data; 3104 u8 key_idx; 3105 3106 if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) 3107 return 0; 3108 3109 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3110 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3111 head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN; 3112 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; 3113 3114 if (!is_multicast_ether_addr(hdr->addr1)) 3115 key_idx = peer->ucast_keyidx; 3116 else 3117 key_idx = peer->mcast_keyidx; 3118 3119 key_conf = peer->keys[key_idx]; 3120 3121 data = msdu->data + head_len; 3122 data_len = msdu->len - head_len - tail_len; 3123 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 3124 3125 ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); 3126 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) 3127 goto mic_fail; 3128 3129 return 0; 3130 3131mic_fail: 3132 (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; 3133 (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; 3134 3135 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3136 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3137 skb_pull(msdu, HAL_RX_DESC_SIZE); 3138 3139 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3140 ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, 3141 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 3142 ieee80211_rx(ar->hw, msdu); 3143 return -EINVAL; 3144} 3145 3146static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, 3147 enum hal_encrypt_type enctype, u32 flags) 3148{ 3149 struct ieee80211_hdr *hdr; 3150 size_t hdr_len; 3151 size_t crypto_len; 3152 3153 if (!flags) 3154 return; 3155 3156 hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE); 3157 3158 if (flags & RX_FLAG_MIC_STRIPPED) 3159 skb_trim(msdu, msdu->len - 3160 ath11k_dp_rx_crypto_mic_len(ar, enctype)); 3161 3162 if (flags & RX_FLAG_ICV_STRIPPED) 3163 skb_trim(msdu, msdu->len - 3164 ath11k_dp_rx_crypto_icv_len(ar, enctype)); 3165 3166 if (flags & RX_FLAG_IV_STRIPPED) { 3167 hdr_len = ieee80211_hdrlen(hdr->frame_control); 3168 crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); 3169 3170 memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len, 3171 (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len); 3172 skb_pull(msdu, crypto_len); 3173 } 3174} 3175 3176static int ath11k_dp_rx_h_defrag(struct ath11k *ar, 3177 struct ath11k_peer *peer, 3178 struct dp_rx_tid *rx_tid, 3179 struct sk_buff **defrag_skb) 3180{ 3181 struct hal_rx_desc *rx_desc; 3182 struct sk_buff *skb, *first_frag, *last_frag; 3183 struct ieee80211_hdr *hdr; 3184 enum hal_encrypt_type enctype; 3185 bool is_decrypted = false; 3186 int msdu_len = 0; 3187 int extra_space; 3188 u32 flags; 3189 3190 first_frag = skb_peek(&rx_tid->rx_frags); 3191 last_frag = skb_peek_tail(&rx_tid->rx_frags); 3192 3193 skb_queue_walk(&rx_tid->rx_frags, skb) { 3194 flags = 0; 3195 rx_desc = (struct hal_rx_desc *)skb->data; 3196 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3197 3198 enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); 3199 if (enctype != HAL_ENCRYPT_TYPE_OPEN) 3200 is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); 3201 3202 if (is_decrypted) { 3203 if (skb != first_frag) 3204 flags |= RX_FLAG_IV_STRIPPED; 3205 if (skb != last_frag) 3206 flags |= RX_FLAG_ICV_STRIPPED | 3207 RX_FLAG_MIC_STRIPPED; 3208 } 3209 3210 /* RX fragments are always raw packets */ 3211 if (skb != last_frag) 3212 skb_trim(skb, skb->len - FCS_LEN); 3213 ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); 3214 3215 if (skb != first_frag) 3216 skb_pull(skb, HAL_RX_DESC_SIZE + 3217 ieee80211_hdrlen(hdr->frame_control)); 3218 msdu_len += skb->len; 3219 } 3220 3221 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); 3222 if (extra_space > 0 && 3223 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) 3224 return -ENOMEM; 3225 3226 __skb_unlink(first_frag, &rx_tid->rx_frags); 3227 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { 3228 skb_put_data(first_frag, skb->data, skb->len); 3229 dev_kfree_skb_any(skb); 3230 } 3231 3232 hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE); 3233 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 3234 ATH11K_SKB_RXCB(first_frag)->is_frag = 1; 3235 3236 if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) 3237 first_frag = NULL; 3238 3239 *defrag_skb = first_frag; 3240 return 0; 3241} 3242 3243static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, 3244 struct sk_buff *defrag_skb) 3245{ 3246 struct ath11k_base *ab = ar->ab; 3247 struct ath11k_pdev_dp *dp = &ar->dp; 3248 struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; 3249 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; 3250 struct hal_reo_entrance_ring *reo_ent_ring; 3251 struct hal_reo_dest_ring *reo_dest_ring; 3252 struct dp_link_desc_bank *link_desc_banks; 3253 struct hal_rx_msdu_link *msdu_link; 3254 struct hal_rx_msdu_details *msdu0; 3255 struct hal_srng *srng; 3256 dma_addr_t paddr; 3257 u32 desc_bank, msdu_info, mpdu_info; 3258 u32 dst_idx, cookie; 3259 u32 *msdu_len_offset; 3260 int ret, buf_id; 3261 3262 link_desc_banks = ab->dp.link_desc_banks; 3263 reo_dest_ring = rx_tid->dst_ring_desc; 3264 3265 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3266 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + 3267 (paddr - link_desc_banks[desc_bank].paddr)); 3268 msdu0 = &msdu_link->msdu_link[0]; 3269 dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); 3270 memset(msdu0, 0, sizeof(*msdu0)); 3271 3272 msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | 3273 FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | 3274 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | 3275 FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, 3276 defrag_skb->len - HAL_RX_DESC_SIZE) | 3277 FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | 3278 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | 3279 FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); 3280 msdu0->rx_msdu_info.info0 = msdu_info; 3281 3282 /* change msdu len in hal rx desc */ 3283 msdu_len_offset = (u32 *)&rx_desc->msdu_start; 3284 *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH); 3285 *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE; 3286 3287 paddr = dma_map_single(ab->dev, defrag_skb->data, 3288 defrag_skb->len + skb_tailroom(defrag_skb), 3289 DMA_TO_DEVICE); 3290 if (dma_mapping_error(ab->dev, paddr)) 3291 return -ENOMEM; 3292 3293 spin_lock_bh(&rx_refill_ring->idr_lock); 3294 buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, 3295 rx_refill_ring->bufs_max * 3, GFP_ATOMIC); 3296 spin_unlock_bh(&rx_refill_ring->idr_lock); 3297 if (buf_id < 0) { 3298 ret = -ENOMEM; 3299 goto err_unmap_dma; 3300 } 3301 3302 ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; 3303 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | 3304 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 3305 3306 ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM); 3307 3308 /* Fill mpdu details into reo entrace ring */ 3309 srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; 3310 3311 spin_lock_bh(&srng->lock); 3312 ath11k_hal_srng_access_begin(ab, srng); 3313 3314 reo_ent_ring = (struct hal_reo_entrance_ring *) 3315 ath11k_hal_srng_src_get_next_entry(ab, srng); 3316 if (!reo_ent_ring) { 3317 ath11k_hal_srng_access_end(ab, srng); 3318 spin_unlock_bh(&srng->lock); 3319 ret = -ENOSPC; 3320 goto err_free_idr; 3321 } 3322 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); 3323 3324 ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); 3325 ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, 3326 HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); 3327 3328 mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | 3329 FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | 3330 FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | 3331 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | 3332 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | 3333 FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | 3334 FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); 3335 3336 reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; 3337 reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; 3338 reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; 3339 reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, 3340 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, 3341 reo_dest_ring->info0)) | 3342 FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); 3343 ath11k_hal_srng_access_end(ab, srng); 3344 spin_unlock_bh(&srng->lock); 3345 3346 return 0; 3347 3348err_free_idr: 3349 spin_lock_bh(&rx_refill_ring->idr_lock); 3350 idr_remove(&rx_refill_ring->bufs_idr, buf_id); 3351 spin_unlock_bh(&rx_refill_ring->idr_lock); 3352err_unmap_dma: 3353 dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), 3354 DMA_TO_DEVICE); 3355 return ret; 3356} 3357 3358static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b) 3359{ 3360 int frag1, frag2; 3361 3362 frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a); 3363 frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b); 3364 3365 return frag1 - frag2; 3366} 3367 3368static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list, 3369 struct sk_buff *cur_frag) 3370{ 3371 struct sk_buff *skb; 3372 int cmp; 3373 3374 skb_queue_walk(frag_list, skb) { 3375 cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag); 3376 if (cmp < 0) 3377 continue; 3378 __skb_queue_before(frag_list, skb, cur_frag); 3379 return; 3380 } 3381 __skb_queue_tail(frag_list, cur_frag); 3382} 3383 3384static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb) 3385{ 3386 struct ieee80211_hdr *hdr; 3387 u64 pn = 0; 3388 u8 *ehdr; 3389 3390 hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE); 3391 ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control); 3392 3393 pn = ehdr[0]; 3394 pn |= (u64)ehdr[1] << 8; 3395 pn |= (u64)ehdr[4] << 16; 3396 pn |= (u64)ehdr[5] << 24; 3397 pn |= (u64)ehdr[6] << 32; 3398 pn |= (u64)ehdr[7] << 40; 3399 3400 return pn; 3401} 3402 3403static bool 3404ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) 3405{ 3406 enum hal_encrypt_type encrypt_type; 3407 struct sk_buff *first_frag, *skb; 3408 struct hal_rx_desc *desc; 3409 u64 last_pn; 3410 u64 cur_pn; 3411 3412 first_frag = skb_peek(&rx_tid->rx_frags); 3413 desc = (struct hal_rx_desc *)first_frag->data; 3414 3415 encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc); 3416 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && 3417 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && 3418 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && 3419 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) 3420 return true; 3421 3422 last_pn = ath11k_dp_rx_h_get_pn(first_frag); 3423 skb_queue_walk(&rx_tid->rx_frags, skb) { 3424 if (skb == first_frag) 3425 continue; 3426 3427 cur_pn = ath11k_dp_rx_h_get_pn(skb); 3428 if (cur_pn != last_pn + 1) 3429 return false; 3430 last_pn = cur_pn; 3431 } 3432 return true; 3433} 3434 3435static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, 3436 struct sk_buff *msdu, 3437 u32 *ring_desc) 3438{ 3439 struct ath11k_base *ab = ar->ab; 3440 struct hal_rx_desc *rx_desc; 3441 struct ath11k_peer *peer; 3442 struct dp_rx_tid *rx_tid; 3443 struct sk_buff *defrag_skb = NULL; 3444 u32 peer_id; 3445 u16 seqno, frag_no; 3446 u8 tid; 3447 int ret = 0; 3448 bool more_frags; 3449 3450 rx_desc = (struct hal_rx_desc *)msdu->data; 3451 peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc); 3452 tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc); 3453 seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc); 3454 frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu); 3455 more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu); 3456 3457 if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) || 3458 !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) || 3459 tid > IEEE80211_NUM_TIDS) 3460 return -EINVAL; 3461 3462 /* received unfragmented packet in reo 3463 * exception ring, this shouldn't happen 3464 * as these packets typically come from 3465 * reo2sw srngs. 3466 */ 3467 if (WARN_ON_ONCE(!frag_no && !more_frags)) 3468 return -EINVAL; 3469 3470 spin_lock_bh(&ab->base_lock); 3471 peer = ath11k_peer_find_by_id(ab, peer_id); 3472 if (!peer) { 3473 ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", 3474 peer_id); 3475 ret = -ENOENT; 3476 goto out_unlock; 3477 } 3478 rx_tid = &peer->rx_tid[tid]; 3479 3480 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || 3481 skb_queue_empty(&rx_tid->rx_frags)) { 3482 /* Flush stored fragments and start a new sequence */ 3483 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3484 rx_tid->cur_sn = seqno; 3485 } 3486 3487 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { 3488 /* Fragment already present */ 3489 ret = -EINVAL; 3490 goto out_unlock; 3491 } 3492 3493 if (frag_no > __fls(rx_tid->rx_frag_bitmap)) 3494 __skb_queue_tail(&rx_tid->rx_frags, msdu); 3495 else 3496 ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu); 3497 3498 rx_tid->rx_frag_bitmap |= BIT(frag_no); 3499 if (!more_frags) 3500 rx_tid->last_frag_no = frag_no; 3501 3502 if (frag_no == 0) { 3503 rx_tid->dst_ring_desc = kmemdup(ring_desc, 3504 sizeof(*rx_tid->dst_ring_desc), 3505 GFP_ATOMIC); 3506 if (!rx_tid->dst_ring_desc) { 3507 ret = -ENOMEM; 3508 goto out_unlock; 3509 } 3510 } else { 3511 ath11k_dp_rx_link_desc_return(ab, ring_desc, 3512 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3513 } 3514 3515 if (!rx_tid->last_frag_no || 3516 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { 3517 mod_timer(&rx_tid->frag_timer, jiffies + 3518 ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); 3519 goto out_unlock; 3520 } 3521 3522 spin_unlock_bh(&ab->base_lock); 3523 del_timer_sync(&rx_tid->frag_timer); 3524 spin_lock_bh(&ab->base_lock); 3525 3526 peer = ath11k_peer_find_by_id(ab, peer_id); 3527 if (!peer) 3528 goto err_frags_cleanup; 3529 3530 if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) 3531 goto err_frags_cleanup; 3532 3533 if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) 3534 goto err_frags_cleanup; 3535 3536 if (!defrag_skb) 3537 goto err_frags_cleanup; 3538 3539 if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) 3540 goto err_frags_cleanup; 3541 3542 ath11k_dp_rx_frags_cleanup(rx_tid, false); 3543 goto out_unlock; 3544 3545err_frags_cleanup: 3546 dev_kfree_skb_any(defrag_skb); 3547 ath11k_dp_rx_frags_cleanup(rx_tid, true); 3548out_unlock: 3549 spin_unlock_bh(&ab->base_lock); 3550 return ret; 3551} 3552 3553static int 3554ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) 3555{ 3556 struct ath11k_pdev_dp *dp = &ar->dp; 3557 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 3558 struct sk_buff *msdu; 3559 struct ath11k_skb_rxcb *rxcb; 3560 struct hal_rx_desc *rx_desc; 3561 u8 *hdr_status; 3562 u16 msdu_len; 3563 3564 spin_lock_bh(&rx_ring->idr_lock); 3565 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3566 if (!msdu) { 3567 ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", 3568 buf_id); 3569 spin_unlock_bh(&rx_ring->idr_lock); 3570 return -EINVAL; 3571 } 3572 3573 idr_remove(&rx_ring->bufs_idr, buf_id); 3574 spin_unlock_bh(&rx_ring->idr_lock); 3575 3576 rxcb = ATH11K_SKB_RXCB(msdu); 3577 dma_unmap_single(ar->ab->dev, rxcb->paddr, 3578 msdu->len + skb_tailroom(msdu), 3579 DMA_FROM_DEVICE); 3580 3581 if (drop) { 3582 dev_kfree_skb_any(msdu); 3583 return 0; 3584 } 3585 3586 rcu_read_lock(); 3587 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { 3588 dev_kfree_skb_any(msdu); 3589 goto exit; 3590 } 3591 3592 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 3593 dev_kfree_skb_any(msdu); 3594 goto exit; 3595 } 3596 3597 rx_desc = (struct hal_rx_desc *)msdu->data; 3598 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); 3599 if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) { 3600 hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); 3601 ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); 3602 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, 3603 sizeof(struct ieee80211_hdr)); 3604 ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, 3605 sizeof(struct hal_rx_desc)); 3606 dev_kfree_skb_any(msdu); 3607 goto exit; 3608 } 3609 3610 skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len); 3611 3612 if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { 3613 dev_kfree_skb_any(msdu); 3614 ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, 3615 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3616 } 3617exit: 3618 rcu_read_unlock(); 3619 return 0; 3620} 3621 3622int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, 3623 int budget) 3624{ 3625 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3626 struct dp_link_desc_bank *link_desc_banks; 3627 enum hal_rx_buf_return_buf_manager rbm; 3628 int tot_n_bufs_reaped, quota, ret, i; 3629 int n_bufs_reaped[MAX_RADIOS] = {0}; 3630 struct dp_rxdma_ring *rx_ring; 3631 struct dp_srng *reo_except; 3632 u32 desc_bank, num_msdus; 3633 struct hal_srng *srng; 3634 struct ath11k_dp *dp; 3635 void *link_desc_va; 3636 int buf_id, mac_id; 3637 struct ath11k *ar; 3638 dma_addr_t paddr; 3639 u32 *desc; 3640 bool is_frag; 3641 u8 drop = 0; 3642 3643 tot_n_bufs_reaped = 0; 3644 quota = budget; 3645 3646 dp = &ab->dp; 3647 reo_except = &dp->reo_except_ring; 3648 link_desc_banks = dp->link_desc_banks; 3649 3650 srng = &ab->hal.srng_list[reo_except->ring_id]; 3651 3652 spin_lock_bh(&srng->lock); 3653 3654 ath11k_hal_srng_access_begin(ab, srng); 3655 3656 while (budget && 3657 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 3658 struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; 3659 3660 ab->soc_stats.err_ring_pkts++; 3661 ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, 3662 &desc_bank); 3663 if (ret) { 3664 ath11k_warn(ab, "failed to parse error reo desc %d\n", 3665 ret); 3666 continue; 3667 } 3668 link_desc_va = link_desc_banks[desc_bank].vaddr + 3669 (paddr - link_desc_banks[desc_bank].paddr); 3670 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3671 &rbm); 3672 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3673 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3674 ab->soc_stats.invalid_rbm++; 3675 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); 3676 ath11k_dp_rx_link_desc_return(ab, desc, 3677 HAL_WBM_REL_BM_ACT_REL_MSDU); 3678 continue; 3679 } 3680 3681 is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); 3682 3683 /* Process only rx fragments with one msdu per link desc below, and drop 3684 * msdu's indicated due to error reasons. 3685 */ 3686 if (!is_frag || num_msdus > 1) { 3687 drop = 1; 3688 /* Return the link desc back to wbm idle list */ 3689 ath11k_dp_rx_link_desc_return(ab, desc, 3690 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3691 } 3692 3693 for (i = 0; i < num_msdus; i++) { 3694 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 3695 msdu_cookies[i]); 3696 3697 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, 3698 msdu_cookies[i]); 3699 3700 ar = ab->pdevs[mac_id].ar; 3701 3702 if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { 3703 n_bufs_reaped[mac_id]++; 3704 tot_n_bufs_reaped++; 3705 } 3706 } 3707 3708 if (tot_n_bufs_reaped >= quota) { 3709 tot_n_bufs_reaped = quota; 3710 goto exit; 3711 } 3712 3713 budget = quota - tot_n_bufs_reaped; 3714 } 3715 3716exit: 3717 ath11k_hal_srng_access_end(ab, srng); 3718 3719 spin_unlock_bh(&srng->lock); 3720 3721 for (i = 0; i < ab->num_radios; i++) { 3722 if (!n_bufs_reaped[i]) 3723 continue; 3724 3725 ar = ab->pdevs[i].ar; 3726 rx_ring = &ar->dp.rx_refill_buf_ring; 3727 3728 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], 3729 HAL_RX_BUF_RBM_SW3_BM); 3730 } 3731 3732 return tot_n_bufs_reaped; 3733} 3734 3735static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, 3736 int msdu_len, 3737 struct sk_buff_head *msdu_list) 3738{ 3739 struct sk_buff *skb, *tmp; 3740 struct ath11k_skb_rxcb *rxcb; 3741 int n_buffs; 3742 3743 n_buffs = DIV_ROUND_UP(msdu_len, 3744 (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE)); 3745 3746 skb_queue_walk_safe(msdu_list, skb, tmp) { 3747 rxcb = ATH11K_SKB_RXCB(skb); 3748 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && 3749 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { 3750 if (!n_buffs) 3751 break; 3752 __skb_unlink(skb, msdu_list); 3753 dev_kfree_skb_any(skb); 3754 n_buffs--; 3755 } 3756 } 3757} 3758 3759static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, 3760 struct ieee80211_rx_status *status, 3761 struct sk_buff_head *msdu_list) 3762{ 3763 u16 msdu_len; 3764 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3765 u8 l3pad_bytes; 3766 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3767 3768 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3769 3770 if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) { 3771 /* First buffer will be freed by the caller, so deduct it's length */ 3772 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE); 3773 ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); 3774 return -EINVAL; 3775 } 3776 3777 if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { 3778 ath11k_warn(ar->ab, 3779 "msdu_done bit not set in null_q_des processing\n"); 3780 __skb_queue_purge(msdu_list); 3781 return -EIO; 3782 } 3783 3784 /* Handle NULL queue descriptor violations arising out a missing 3785 * REO queue for a given peer or a given TID. This typically 3786 * may happen if a packet is received on a QOS enabled TID before the 3787 * ADDBA negotiation for that TID, when the TID queue is setup. Or 3788 * it may also happen for MC/BC frames if they are not routed to the 3789 * non-QOS TID queue, in the absence of any other default TID queue. 3790 * This error can show up both in a REO destination or WBM release ring. 3791 */ 3792 3793 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3794 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3795 3796 if (rxcb->is_frag) { 3797 skb_pull(msdu, HAL_RX_DESC_SIZE); 3798 } else { 3799 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3800 3801 if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) 3802 return -EINVAL; 3803 3804 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3805 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3806 } 3807 ath11k_dp_rx_h_ppdu(ar, desc, status); 3808 3809 ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); 3810 3811 rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc); 3812 3813 /* Please note that caller will having the access to msdu and completing 3814 * rx with mac80211. Need not worry about cleaning up amsdu_list. 3815 */ 3816 3817 return 0; 3818} 3819 3820static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, 3821 struct ieee80211_rx_status *status, 3822 struct sk_buff_head *msdu_list) 3823{ 3824 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3825 bool drop = false; 3826 3827 ar->ab->soc_stats.reo_error[rxcb->err_code]++; 3828 3829 switch (rxcb->err_code) { 3830 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: 3831 if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) 3832 drop = true; 3833 break; 3834 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: 3835 /* TODO: Do not drop PN failed packets in the driver; 3836 * instead, it is good to drop such packets in mac80211 3837 * after incrementing the replay counters. 3838 */ 3839 fallthrough; 3840 default: 3841 /* TODO: Review other errors and process them to mac80211 3842 * as appropriate. 3843 */ 3844 drop = true; 3845 break; 3846 } 3847 3848 return drop; 3849} 3850 3851static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, 3852 struct ieee80211_rx_status *status) 3853{ 3854 u16 msdu_len; 3855 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; 3856 u8 l3pad_bytes; 3857 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3858 3859 rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); 3860 rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); 3861 3862 l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); 3863 msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); 3864 skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len); 3865 skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes); 3866 3867 ath11k_dp_rx_h_ppdu(ar, desc, status); 3868 3869 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | 3870 RX_FLAG_DECRYPTED); 3871 3872 ath11k_dp_rx_h_undecap(ar, msdu, desc, 3873 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3874} 3875 3876static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, 3877 struct ieee80211_rx_status *status) 3878{ 3879 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3880 bool drop = false; 3881 3882 ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; 3883 3884 switch (rxcb->err_code) { 3885 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3886 ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3887 break; 3888 default: 3889 /* TODO: Review other rxdma error code to check if anything is 3890 * worth reporting to mac80211 3891 */ 3892 drop = true; 3893 break; 3894 } 3895 3896 return drop; 3897} 3898 3899static void ath11k_dp_rx_wbm_err(struct ath11k *ar, 3900 struct napi_struct *napi, 3901 struct sk_buff *msdu, 3902 struct sk_buff_head *msdu_list) 3903{ 3904 struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); 3905 struct ieee80211_rx_status rxs = {0}; 3906 struct ieee80211_rx_status *status; 3907 bool drop = true; 3908 3909 switch (rxcb->err_rel_src) { 3910 case HAL_WBM_REL_SRC_MODULE_REO: 3911 drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); 3912 break; 3913 case HAL_WBM_REL_SRC_MODULE_RXDMA: 3914 drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); 3915 break; 3916 default: 3917 /* msdu will get freed */ 3918 break; 3919 } 3920 3921 if (drop) { 3922 dev_kfree_skb_any(msdu); 3923 return; 3924 } 3925 3926 status = IEEE80211_SKB_RXCB(msdu); 3927 *status = rxs; 3928 3929 ath11k_dp_rx_deliver_msdu(ar, napi, msdu); 3930} 3931 3932int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, 3933 struct napi_struct *napi, int budget) 3934{ 3935 struct ath11k *ar; 3936 struct ath11k_dp *dp = &ab->dp; 3937 struct dp_rxdma_ring *rx_ring; 3938 struct hal_rx_wbm_rel_info err_info; 3939 struct hal_srng *srng; 3940 struct sk_buff *msdu; 3941 struct sk_buff_head msdu_list[MAX_RADIOS]; 3942 struct ath11k_skb_rxcb *rxcb; 3943 u32 *rx_desc; 3944 int buf_id, mac_id; 3945 int num_buffs_reaped[MAX_RADIOS] = {0}; 3946 int total_num_buffs_reaped = 0; 3947 int ret, i; 3948 3949 for (i = 0; i < ab->num_radios; i++) 3950 __skb_queue_head_init(&msdu_list[i]); 3951 3952 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3953 3954 spin_lock_bh(&srng->lock); 3955 3956 ath11k_hal_srng_access_begin(ab, srng); 3957 3958 while (budget) { 3959 rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); 3960 if (!rx_desc) 3961 break; 3962 3963 ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); 3964 if (ret) { 3965 ath11k_warn(ab, 3966 "failed to parse rx error in wbm_rel ring desc %d\n", 3967 ret); 3968 continue; 3969 } 3970 3971 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); 3972 mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); 3973 3974 ar = ab->pdevs[mac_id].ar; 3975 rx_ring = &ar->dp.rx_refill_buf_ring; 3976 3977 spin_lock_bh(&rx_ring->idr_lock); 3978 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 3979 if (!msdu) { 3980 ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", 3981 buf_id, mac_id); 3982 spin_unlock_bh(&rx_ring->idr_lock); 3983 continue; 3984 } 3985 3986 idr_remove(&rx_ring->bufs_idr, buf_id); 3987 spin_unlock_bh(&rx_ring->idr_lock); 3988 3989 rxcb = ATH11K_SKB_RXCB(msdu); 3990 dma_unmap_single(ab->dev, rxcb->paddr, 3991 msdu->len + skb_tailroom(msdu), 3992 DMA_FROM_DEVICE); 3993 3994 num_buffs_reaped[mac_id]++; 3995 total_num_buffs_reaped++; 3996 budget--; 3997 3998 if (err_info.push_reason != 3999 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4000 dev_kfree_skb_any(msdu); 4001 continue; 4002 } 4003 4004 rxcb->err_rel_src = err_info.err_rel_src; 4005 rxcb->err_code = err_info.err_code; 4006 rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; 4007 __skb_queue_tail(&msdu_list[mac_id], msdu); 4008 } 4009 4010 ath11k_hal_srng_access_end(ab, srng); 4011 4012 spin_unlock_bh(&srng->lock); 4013 4014 if (!total_num_buffs_reaped) 4015 goto done; 4016 4017 for (i = 0; i < ab->num_radios; i++) { 4018 if (!num_buffs_reaped[i]) 4019 continue; 4020 4021 ar = ab->pdevs[i].ar; 4022 rx_ring = &ar->dp.rx_refill_buf_ring; 4023 4024 ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], 4025 HAL_RX_BUF_RBM_SW3_BM); 4026 } 4027 4028 rcu_read_lock(); 4029 for (i = 0; i < ab->num_radios; i++) { 4030 if (!rcu_dereference(ab->pdevs_active[i])) { 4031 __skb_queue_purge(&msdu_list[i]); 4032 continue; 4033 } 4034 4035 ar = ab->pdevs[i].ar; 4036 4037 if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { 4038 __skb_queue_purge(&msdu_list[i]); 4039 continue; 4040 } 4041 4042 while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) 4043 ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); 4044 } 4045 rcu_read_unlock(); 4046done: 4047 return total_num_buffs_reaped; 4048} 4049 4050int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) 4051{ 4052 struct ath11k *ar; 4053 struct dp_srng *err_ring; 4054 struct dp_rxdma_ring *rx_ring; 4055 struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; 4056 struct hal_srng *srng; 4057 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 4058 enum hal_rx_buf_return_buf_manager rbm; 4059 enum hal_reo_entr_rxdma_ecode rxdma_err_code; 4060 struct ath11k_skb_rxcb *rxcb; 4061 struct sk_buff *skb; 4062 struct hal_reo_entrance_ring *entr_ring; 4063 void *desc; 4064 int num_buf_freed = 0; 4065 int quota = budget; 4066 dma_addr_t paddr; 4067 u32 desc_bank; 4068 void *link_desc_va; 4069 int num_msdus; 4070 int i; 4071 int buf_id; 4072 4073 ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; 4074 err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, 4075 mac_id)]; 4076 rx_ring = &ar->dp.rx_refill_buf_ring; 4077 4078 srng = &ab->hal.srng_list[err_ring->ring_id]; 4079 4080 spin_lock_bh(&srng->lock); 4081 4082 ath11k_hal_srng_access_begin(ab, srng); 4083 4084 while (quota-- && 4085 (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4086 ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); 4087 4088 entr_ring = (struct hal_reo_entrance_ring *)desc; 4089 rxdma_err_code = 4090 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4091 entr_ring->info1); 4092 ab->soc_stats.rxdma_error[rxdma_err_code]++; 4093 4094 link_desc_va = link_desc_banks[desc_bank].vaddr + 4095 (paddr - link_desc_banks[desc_bank].paddr); 4096 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, 4097 msdu_cookies, &rbm); 4098 4099 for (i = 0; i < num_msdus; i++) { 4100 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4101 msdu_cookies[i]); 4102 4103 spin_lock_bh(&rx_ring->idr_lock); 4104 skb = idr_find(&rx_ring->bufs_idr, buf_id); 4105 if (!skb) { 4106 ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", 4107 buf_id); 4108 spin_unlock_bh(&rx_ring->idr_lock); 4109 continue; 4110 } 4111 4112 idr_remove(&rx_ring->bufs_idr, buf_id); 4113 spin_unlock_bh(&rx_ring->idr_lock); 4114 4115 rxcb = ATH11K_SKB_RXCB(skb); 4116 dma_unmap_single(ab->dev, rxcb->paddr, 4117 skb->len + skb_tailroom(skb), 4118 DMA_FROM_DEVICE); 4119 dev_kfree_skb_any(skb); 4120 4121 num_buf_freed++; 4122 } 4123 4124 ath11k_dp_rx_link_desc_return(ab, desc, 4125 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4126 } 4127 4128 ath11k_hal_srng_access_end(ab, srng); 4129 4130 spin_unlock_bh(&srng->lock); 4131 4132 if (num_buf_freed) 4133 ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, 4134 HAL_RX_BUF_RBM_SW3_BM); 4135 4136 return budget - quota; 4137} 4138 4139void ath11k_dp_process_reo_status(struct ath11k_base *ab) 4140{ 4141 struct ath11k_dp *dp = &ab->dp; 4142 struct hal_srng *srng; 4143 struct dp_reo_cmd *cmd, *tmp; 4144 bool found = false; 4145 u32 *reo_desc; 4146 u16 tag; 4147 struct hal_reo_status reo_status; 4148 4149 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; 4150 4151 memset(&reo_status, 0, sizeof(reo_status)); 4152 4153 spin_lock_bh(&srng->lock); 4154 4155 ath11k_hal_srng_access_begin(ab, srng); 4156 4157 while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { 4158 tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); 4159 4160 switch (tag) { 4161 case HAL_REO_GET_QUEUE_STATS_STATUS: 4162 ath11k_hal_reo_status_queue_stats(ab, reo_desc, 4163 &reo_status); 4164 break; 4165 case HAL_REO_FLUSH_QUEUE_STATUS: 4166 ath11k_hal_reo_flush_queue_status(ab, reo_desc, 4167 &reo_status); 4168 break; 4169 case HAL_REO_FLUSH_CACHE_STATUS: 4170 ath11k_hal_reo_flush_cache_status(ab, reo_desc, 4171 &reo_status); 4172 break; 4173 case HAL_REO_UNBLOCK_CACHE_STATUS: 4174 ath11k_hal_reo_unblk_cache_status(ab, reo_desc, 4175 &reo_status); 4176 break; 4177 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: 4178 ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, 4179 &reo_status); 4180 break; 4181 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: 4182 ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, 4183 &reo_status); 4184 break; 4185 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: 4186 ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, 4187 &reo_status); 4188 break; 4189 default: 4190 ath11k_warn(ab, "Unknown reo status type %d\n", tag); 4191 continue; 4192 } 4193 4194 spin_lock_bh(&dp->reo_cmd_lock); 4195 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 4196 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { 4197 found = true; 4198 list_del(&cmd->list); 4199 break; 4200 } 4201 } 4202 spin_unlock_bh(&dp->reo_cmd_lock); 4203 4204 if (found) { 4205 cmd->handler(dp, (void *)&cmd->data, 4206 reo_status.uniform_hdr.cmd_status); 4207 kfree(cmd); 4208 } 4209 4210 found = false; 4211 } 4212 4213 ath11k_hal_srng_access_end(ab, srng); 4214 4215 spin_unlock_bh(&srng->lock); 4216} 4217 4218void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) 4219{ 4220 struct ath11k *ar = ab->pdevs[mac_id].ar; 4221 4222 ath11k_dp_rx_pdev_srng_free(ar); 4223 ath11k_dp_rxdma_pdev_buf_free(ar); 4224} 4225 4226int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) 4227{ 4228 struct ath11k *ar = ab->pdevs[mac_id].ar; 4229 struct ath11k_pdev_dp *dp = &ar->dp; 4230 u32 ring_id; 4231 int i; 4232 int ret; 4233 4234 ret = ath11k_dp_rx_pdev_srng_alloc(ar); 4235 if (ret) { 4236 ath11k_warn(ab, "failed to setup rx srngs\n"); 4237 return ret; 4238 } 4239 4240 ret = ath11k_dp_rxdma_pdev_buf_setup(ar); 4241 if (ret) { 4242 ath11k_warn(ab, "failed to setup rxdma ring\n"); 4243 return ret; 4244 } 4245 4246 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; 4247 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); 4248 if (ret) { 4249 ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", 4250 ret); 4251 return ret; 4252 } 4253 4254 if (ab->hw_params.rx_mac_buf_ring) { 4255 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4256 ring_id = dp->rx_mac_buf_ring[i].ring_id; 4257 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4258 mac_id + i, HAL_RXDMA_BUF); 4259 if (ret) { 4260 ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", 4261 i, ret); 4262 return ret; 4263 } 4264 } 4265 } 4266 4267 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4268 ring_id = dp->rxdma_err_dst_ring[i].ring_id; 4269 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4270 mac_id + i, HAL_RXDMA_DST); 4271 if (ret) { 4272 ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", 4273 i, ret); 4274 return ret; 4275 } 4276 } 4277 4278 if (!ab->hw_params.rxdma1_enable) 4279 goto config_refill_ring; 4280 4281 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 4282 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, 4283 mac_id, HAL_RXDMA_MONITOR_BUF); 4284 if (ret) { 4285 ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", 4286 ret); 4287 return ret; 4288 } 4289 ret = ath11k_dp_tx_htt_srng_setup(ab, 4290 dp->rxdma_mon_dst_ring.ring_id, 4291 mac_id, HAL_RXDMA_MONITOR_DST); 4292 if (ret) { 4293 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4294 ret); 4295 return ret; 4296 } 4297 ret = ath11k_dp_tx_htt_srng_setup(ab, 4298 dp->rxdma_mon_desc_ring.ring_id, 4299 mac_id, HAL_RXDMA_MONITOR_DESC); 4300 if (ret) { 4301 ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", 4302 ret); 4303 return ret; 4304 } 4305 4306config_refill_ring: 4307 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 4308 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 4309 ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, 4310 HAL_RXDMA_MONITOR_STATUS); 4311 if (ret) { 4312 ath11k_warn(ab, 4313 "failed to configure mon_status_refill_ring%d %d\n", 4314 i, ret); 4315 return ret; 4316 } 4317 } 4318 4319 return 0; 4320} 4321 4322static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) 4323{ 4324 if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { 4325 *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); 4326 *total_len -= *frag_len; 4327 } else { 4328 *frag_len = *total_len; 4329 *total_len = 0; 4330 } 4331} 4332 4333static 4334int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, 4335 void *p_last_buf_addr_info, 4336 u8 mac_id) 4337{ 4338 struct ath11k_pdev_dp *dp = &ar->dp; 4339 struct dp_srng *dp_srng; 4340 void *hal_srng; 4341 void *src_srng_desc; 4342 int ret = 0; 4343 4344 if (ar->ab->hw_params.rxdma1_enable) { 4345 dp_srng = &dp->rxdma_mon_desc_ring; 4346 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4347 } else { 4348 dp_srng = &ar->ab->dp.wbm_desc_rel_ring; 4349 hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; 4350 } 4351 4352 ath11k_hal_srng_access_begin(ar->ab, hal_srng); 4353 4354 src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); 4355 4356 if (src_srng_desc) { 4357 struct ath11k_buffer_addr *src_desc = 4358 (struct ath11k_buffer_addr *)src_srng_desc; 4359 4360 *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); 4361 } else { 4362 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4363 "Monitor Link Desc Ring %d Full", mac_id); 4364 ret = -ENOMEM; 4365 } 4366 4367 ath11k_hal_srng_access_end(ar->ab, hal_srng); 4368 return ret; 4369} 4370 4371static 4372void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, 4373 dma_addr_t *paddr, u32 *sw_cookie, 4374 u8 *rbm, 4375 void **pp_buf_addr_info) 4376{ 4377 struct hal_rx_msdu_link *msdu_link = 4378 (struct hal_rx_msdu_link *)rx_msdu_link_desc; 4379 struct ath11k_buffer_addr *buf_addr_info; 4380 4381 buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; 4382 4383 ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); 4384 4385 *pp_buf_addr_info = (void *)buf_addr_info; 4386} 4387 4388static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) 4389{ 4390 if (skb->len > len) { 4391 skb_trim(skb, len); 4392 } else { 4393 if (skb_tailroom(skb) < len - skb->len) { 4394 if ((pskb_expand_head(skb, 0, 4395 len - skb->len - skb_tailroom(skb), 4396 GFP_ATOMIC))) { 4397 dev_kfree_skb_any(skb); 4398 return -ENOMEM; 4399 } 4400 } 4401 skb_put(skb, (len - skb->len)); 4402 } 4403 return 0; 4404} 4405 4406static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, 4407 void *msdu_link_desc, 4408 struct hal_rx_msdu_list *msdu_list, 4409 u16 *num_msdus) 4410{ 4411 struct hal_rx_msdu_details *msdu_details = NULL; 4412 struct rx_msdu_desc *msdu_desc_info = NULL; 4413 struct hal_rx_msdu_link *msdu_link = NULL; 4414 int i; 4415 u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); 4416 u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); 4417 u8 tmp = 0; 4418 4419 msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; 4420 msdu_details = &msdu_link->msdu_link[0]; 4421 4422 for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { 4423 if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, 4424 msdu_details[i].buf_addr_info.info0) == 0) { 4425 msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; 4426 msdu_desc_info->info0 |= last; 4427 ; 4428 break; 4429 } 4430 msdu_desc_info = &msdu_details[i].rx_msdu_info; 4431 4432 if (!i) 4433 msdu_desc_info->info0 |= first; 4434 else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) 4435 msdu_desc_info->info0 |= last; 4436 msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; 4437 msdu_list->msdu_info[i].msdu_len = 4438 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); 4439 msdu_list->sw_cookie[i] = 4440 FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 4441 msdu_details[i].buf_addr_info.info1); 4442 tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 4443 msdu_details[i].buf_addr_info.info1); 4444 msdu_list->rbm[i] = tmp; 4445 } 4446 *num_msdus = i; 4447} 4448 4449static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, 4450 u32 *rx_bufs_used) 4451{ 4452 u32 ret = 0; 4453 4454 if ((*ppdu_id < msdu_ppdu_id) && 4455 ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { 4456 *ppdu_id = msdu_ppdu_id; 4457 ret = msdu_ppdu_id; 4458 } else if ((*ppdu_id > msdu_ppdu_id) && 4459 ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { 4460 /* mon_dst is behind than mon_status 4461 * skip dst_ring and free it 4462 */ 4463 *rx_bufs_used += 1; 4464 *ppdu_id = msdu_ppdu_id; 4465 ret = msdu_ppdu_id; 4466 } 4467 return ret; 4468} 4469 4470static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, 4471 bool *is_frag, u32 *total_len, 4472 u32 *frag_len, u32 *msdu_cnt) 4473{ 4474 if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { 4475 if (!*is_frag) { 4476 *total_len = info->msdu_len; 4477 *is_frag = true; 4478 } 4479 ath11k_dp_mon_set_frag_len(total_len, 4480 frag_len); 4481 } else { 4482 if (*is_frag) { 4483 ath11k_dp_mon_set_frag_len(total_len, 4484 frag_len); 4485 } else { 4486 *frag_len = info->msdu_len; 4487 } 4488 *is_frag = false; 4489 *msdu_cnt -= 1; 4490 } 4491} 4492 4493static u32 4494ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4495 void *ring_entry, struct sk_buff **head_msdu, 4496 struct sk_buff **tail_msdu, u32 *npackets, 4497 u32 *ppdu_id) 4498{ 4499 struct ath11k_pdev_dp *dp = &ar->dp; 4500 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4501 struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; 4502 struct sk_buff *msdu = NULL, *last = NULL; 4503 struct hal_rx_msdu_list msdu_list; 4504 void *p_buf_addr_info, *p_last_buf_addr_info; 4505 struct hal_rx_desc *rx_desc; 4506 void *rx_msdu_link_desc; 4507 dma_addr_t paddr; 4508 u16 num_msdus = 0; 4509 u32 rx_buf_size, rx_pkt_offset, sw_cookie; 4510 u32 rx_bufs_used = 0, i = 0; 4511 u32 msdu_ppdu_id = 0, msdu_cnt = 0; 4512 u32 total_len = 0, frag_len = 0; 4513 bool is_frag, is_first_msdu; 4514 bool drop_mpdu = false; 4515 struct ath11k_skb_rxcb *rxcb; 4516 struct hal_reo_entrance_ring *ent_desc = 4517 (struct hal_reo_entrance_ring *)ring_entry; 4518 int buf_id; 4519 u32 rx_link_buf_info[2]; 4520 u8 rbm; 4521 4522 if (!ar->ab->hw_params.rxdma1_enable) 4523 rx_ring = &dp->rx_refill_buf_ring; 4524 4525 ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, 4526 &sw_cookie, 4527 &p_last_buf_addr_info, &rbm, 4528 &msdu_cnt); 4529 4530 if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, 4531 ent_desc->info1) == 4532 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { 4533 u8 rxdma_err = 4534 FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, 4535 ent_desc->info1); 4536 if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || 4537 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || 4538 rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { 4539 drop_mpdu = true; 4540 pmon->rx_mon_stats.dest_mpdu_drop++; 4541 } 4542 } 4543 4544 is_frag = false; 4545 is_first_msdu = true; 4546 4547 do { 4548 if (pmon->mon_last_linkdesc_paddr == paddr) { 4549 pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; 4550 return rx_bufs_used; 4551 } 4552 4553 if (ar->ab->hw_params.rxdma1_enable) 4554 rx_msdu_link_desc = 4555 (void *)pmon->link_desc_banks[sw_cookie].vaddr + 4556 (paddr - pmon->link_desc_banks[sw_cookie].paddr); 4557 else 4558 rx_msdu_link_desc = 4559 (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + 4560 (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); 4561 4562 ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, 4563 &num_msdus); 4564 4565 for (i = 0; i < num_msdus; i++) { 4566 u32 l2_hdr_offset; 4567 4568 if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { 4569 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4570 "i %d last_cookie %d is same\n", 4571 i, pmon->mon_last_buf_cookie); 4572 drop_mpdu = true; 4573 pmon->rx_mon_stats.dup_mon_buf_cnt++; 4574 continue; 4575 } 4576 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, 4577 msdu_list.sw_cookie[i]); 4578 4579 spin_lock_bh(&rx_ring->idr_lock); 4580 msdu = idr_find(&rx_ring->bufs_idr, buf_id); 4581 spin_unlock_bh(&rx_ring->idr_lock); 4582 if (!msdu) { 4583 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4584 "msdu_pop: invalid buf_id %d\n", buf_id); 4585 break; 4586 } 4587 rxcb = ATH11K_SKB_RXCB(msdu); 4588 if (!rxcb->unmapped) { 4589 dma_unmap_single(ar->ab->dev, rxcb->paddr, 4590 msdu->len + 4591 skb_tailroom(msdu), 4592 DMA_FROM_DEVICE); 4593 rxcb->unmapped = 1; 4594 } 4595 if (drop_mpdu) { 4596 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4597 "i %d drop msdu %p *ppdu_id %x\n", 4598 i, msdu, *ppdu_id); 4599 dev_kfree_skb_any(msdu); 4600 msdu = NULL; 4601 goto next_msdu; 4602 } 4603 4604 rx_desc = (struct hal_rx_desc *)msdu->data; 4605 4606 rx_pkt_offset = sizeof(struct hal_rx_desc); 4607 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); 4608 4609 if (is_first_msdu) { 4610 if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { 4611 drop_mpdu = true; 4612 dev_kfree_skb_any(msdu); 4613 msdu = NULL; 4614 pmon->mon_last_linkdesc_paddr = paddr; 4615 goto next_msdu; 4616 } 4617 4618 msdu_ppdu_id = 4619 ath11k_dp_rxdesc_get_ppduid(rx_desc); 4620 4621 if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, 4622 ppdu_id, 4623 &rx_bufs_used)) { 4624 if (rx_bufs_used) { 4625 drop_mpdu = true; 4626 dev_kfree_skb_any(msdu); 4627 msdu = NULL; 4628 goto next_msdu; 4629 } 4630 return rx_bufs_used; 4631 } 4632 pmon->mon_last_linkdesc_paddr = paddr; 4633 is_first_msdu = false; 4634 } 4635 ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], 4636 &is_frag, &total_len, 4637 &frag_len, &msdu_cnt); 4638 rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; 4639 4640 ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); 4641 4642 if (!(*head_msdu)) 4643 *head_msdu = msdu; 4644 else if (last) 4645 last->next = msdu; 4646 4647 last = msdu; 4648next_msdu: 4649 pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; 4650 rx_bufs_used++; 4651 spin_lock_bh(&rx_ring->idr_lock); 4652 idr_remove(&rx_ring->bufs_idr, buf_id); 4653 spin_unlock_bh(&rx_ring->idr_lock); 4654 } 4655 4656 ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); 4657 4658 ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, 4659 &sw_cookie, &rbm, 4660 &p_buf_addr_info); 4661 4662 if (ar->ab->hw_params.rxdma1_enable) { 4663 if (ath11k_dp_rx_monitor_link_desc_return(ar, 4664 p_last_buf_addr_info, 4665 dp->mac_id)) 4666 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4667 "dp_rx_monitor_link_desc_return failed"); 4668 } else { 4669 ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, 4670 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 4671 } 4672 4673 p_last_buf_addr_info = p_buf_addr_info; 4674 4675 } while (paddr && msdu_cnt); 4676 4677 if (last) 4678 last->next = NULL; 4679 4680 *tail_msdu = msdu; 4681 4682 if (msdu_cnt == 0) 4683 *npackets = 1; 4684 4685 return rx_bufs_used; 4686} 4687 4688static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) 4689{ 4690 u32 rx_pkt_offset, l2_hdr_offset; 4691 4692 rx_pkt_offset = sizeof(struct hal_rx_desc); 4693 l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); 4694 skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); 4695} 4696 4697static struct sk_buff * 4698ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, 4699 u32 mac_id, struct sk_buff *head_msdu, 4700 struct sk_buff *last_msdu, 4701 struct ieee80211_rx_status *rxs) 4702{ 4703 struct sk_buff *msdu, *mpdu_buf, *prev_buf; 4704 u32 decap_format, wifi_hdr_len; 4705 struct hal_rx_desc *rx_desc; 4706 char *hdr_desc; 4707 u8 *dest; 4708 struct ieee80211_hdr_3addr *wh; 4709 4710 mpdu_buf = NULL; 4711 4712 if (!head_msdu) 4713 goto err_merge_fail; 4714 4715 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4716 4717 if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) 4718 return NULL; 4719 4720 decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); 4721 4722 ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); 4723 4724 if (decap_format == DP_RX_DECAP_TYPE_RAW) { 4725 ath11k_dp_rx_msdus_set_payload(head_msdu); 4726 4727 prev_buf = head_msdu; 4728 msdu = head_msdu->next; 4729 4730 while (msdu) { 4731 ath11k_dp_rx_msdus_set_payload(msdu); 4732 4733 prev_buf = msdu; 4734 msdu = msdu->next; 4735 } 4736 4737 prev_buf->next = NULL; 4738 4739 skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); 4740 } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { 4741 __le16 qos_field; 4742 u8 qos_pkt = 0; 4743 4744 rx_desc = (struct hal_rx_desc *)head_msdu->data; 4745 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4746 4747 /* Base size */ 4748 wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); 4749 wh = (struct ieee80211_hdr_3addr *)hdr_desc; 4750 4751 if (ieee80211_is_data_qos(wh->frame_control)) { 4752 struct ieee80211_qos_hdr *qwh = 4753 (struct ieee80211_qos_hdr *)hdr_desc; 4754 4755 qos_field = qwh->qos_ctrl; 4756 qos_pkt = 1; 4757 } 4758 msdu = head_msdu; 4759 4760 while (msdu) { 4761 rx_desc = (struct hal_rx_desc *)msdu->data; 4762 hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); 4763 4764 if (qos_pkt) { 4765 dest = skb_push(msdu, sizeof(__le16)); 4766 if (!dest) 4767 goto err_merge_fail; 4768 memcpy(dest, hdr_desc, wifi_hdr_len); 4769 memcpy(dest + wifi_hdr_len, 4770 (u8 *)&qos_field, sizeof(__le16)); 4771 } 4772 ath11k_dp_rx_msdus_set_payload(msdu); 4773 prev_buf = msdu; 4774 msdu = msdu->next; 4775 } 4776 dest = skb_put(prev_buf, HAL_RX_FCS_LEN); 4777 if (!dest) 4778 goto err_merge_fail; 4779 4780 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4781 "mpdu_buf %pK mpdu_buf->len %u", 4782 prev_buf, prev_buf->len); 4783 } else { 4784 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4785 "decap format %d is not supported!\n", 4786 decap_format); 4787 goto err_merge_fail; 4788 } 4789 4790 return head_msdu; 4791 4792err_merge_fail: 4793 if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { 4794 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4795 "err_merge_fail mpdu_buf %pK", mpdu_buf); 4796 /* Free the head buffer */ 4797 dev_kfree_skb_any(mpdu_buf); 4798 } 4799 return NULL; 4800} 4801 4802static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, 4803 struct sk_buff *head_msdu, 4804 struct sk_buff *tail_msdu, 4805 struct napi_struct *napi) 4806{ 4807 struct ath11k_pdev_dp *dp = &ar->dp; 4808 struct sk_buff *mon_skb, *skb_next, *header; 4809 struct ieee80211_rx_status *rxs = &dp->rx_status, *status; 4810 4811 mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, 4812 tail_msdu, rxs); 4813 4814 if (!mon_skb) 4815 goto mon_deliver_fail; 4816 4817 header = mon_skb; 4818 4819 rxs->flag = 0; 4820 do { 4821 skb_next = mon_skb->next; 4822 if (!skb_next) 4823 rxs->flag &= ~RX_FLAG_AMSDU_MORE; 4824 else 4825 rxs->flag |= RX_FLAG_AMSDU_MORE; 4826 4827 if (mon_skb == header) { 4828 header = NULL; 4829 rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; 4830 } else { 4831 rxs->flag |= RX_FLAG_ALLOW_SAME_PN; 4832 } 4833 rxs->flag |= RX_FLAG_ONLY_MONITOR; 4834 4835 status = IEEE80211_SKB_RXCB(mon_skb); 4836 *status = *rxs; 4837 4838 ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); 4839 mon_skb = skb_next; 4840 } while (mon_skb); 4841 rxs->flag = 0; 4842 4843 return 0; 4844 4845mon_deliver_fail: 4846 mon_skb = head_msdu; 4847 while (mon_skb) { 4848 skb_next = mon_skb->next; 4849 dev_kfree_skb_any(mon_skb); 4850 mon_skb = skb_next; 4851 } 4852 return -EINVAL; 4853} 4854 4855static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, 4856 u32 quota, struct napi_struct *napi) 4857{ 4858 struct ath11k_pdev_dp *dp = &ar->dp; 4859 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4860 void *ring_entry; 4861 void *mon_dst_srng; 4862 u32 ppdu_id; 4863 u32 rx_bufs_used; 4864 u32 ring_id; 4865 struct ath11k_pdev_mon_stats *rx_mon_stats; 4866 u32 npackets = 0; 4867 4868 if (ar->ab->hw_params.rxdma1_enable) 4869 ring_id = dp->rxdma_mon_dst_ring.ring_id; 4870 else 4871 ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; 4872 4873 mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; 4874 4875 if (!mon_dst_srng) { 4876 ath11k_warn(ar->ab, 4877 "HAL Monitor Destination Ring Init Failed -- %pK", 4878 mon_dst_srng); 4879 return; 4880 } 4881 4882 spin_lock_bh(&pmon->mon_lock); 4883 4884 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 4885 4886 ppdu_id = pmon->mon_ppdu_info.ppdu_id; 4887 rx_bufs_used = 0; 4888 rx_mon_stats = &pmon->rx_mon_stats; 4889 4890 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { 4891 struct sk_buff *head_msdu, *tail_msdu; 4892 4893 head_msdu = NULL; 4894 tail_msdu = NULL; 4895 4896 rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, 4897 &head_msdu, 4898 &tail_msdu, 4899 &npackets, &ppdu_id); 4900 4901 if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { 4902 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4903 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4904 "dest_rx: new ppdu_id %x != status ppdu_id %x", 4905 ppdu_id, pmon->mon_ppdu_info.ppdu_id); 4906 break; 4907 } 4908 if (head_msdu && tail_msdu) { 4909 ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, 4910 tail_msdu, napi); 4911 rx_mon_stats->dest_mpdu_done++; 4912 } 4913 4914 ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, 4915 mon_dst_srng); 4916 } 4917 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 4918 4919 spin_unlock_bh(&pmon->mon_lock); 4920 4921 if (rx_bufs_used) { 4922 rx_mon_stats->dest_ppdu_done++; 4923 if (ar->ab->hw_params.rxdma1_enable) 4924 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4925 &dp->rxdma_mon_buf_ring, 4926 rx_bufs_used, 4927 HAL_RX_BUF_RBM_SW3_BM); 4928 else 4929 ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, 4930 &dp->rx_refill_buf_ring, 4931 rx_bufs_used, 4932 HAL_RX_BUF_RBM_SW3_BM); 4933 } 4934} 4935 4936static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, 4937 int mac_id, u32 quota, 4938 struct napi_struct *napi) 4939{ 4940 struct ath11k_pdev_dp *dp = &ar->dp; 4941 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4942 struct hal_rx_mon_ppdu_info *ppdu_info; 4943 struct sk_buff *status_skb; 4944 u32 tlv_status = HAL_TLV_STATUS_BUF_DONE; 4945 struct ath11k_pdev_mon_stats *rx_mon_stats; 4946 4947 ppdu_info = &pmon->mon_ppdu_info; 4948 rx_mon_stats = &pmon->rx_mon_stats; 4949 4950 if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START) 4951 return; 4952 4953 while (!skb_queue_empty(&pmon->rx_status_q)) { 4954 status_skb = skb_dequeue(&pmon->rx_status_q); 4955 4956 tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, 4957 status_skb); 4958 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { 4959 rx_mon_stats->status_ppdu_done++; 4960 pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; 4961 ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); 4962 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 4963 } 4964 dev_kfree_skb_any(status_skb); 4965 } 4966} 4967 4968static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, 4969 struct napi_struct *napi, int budget) 4970{ 4971 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4972 struct ath11k_pdev_dp *dp = &ar->dp; 4973 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 4974 int num_buffs_reaped = 0; 4975 4976 num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, 4977 &pmon->rx_status_q); 4978 if (num_buffs_reaped) 4979 ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); 4980 4981 return num_buffs_reaped; 4982} 4983 4984int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, 4985 struct napi_struct *napi, int budget) 4986{ 4987 struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); 4988 int ret = 0; 4989 4990 if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) 4991 ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); 4992 else 4993 ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); 4994 return ret; 4995} 4996 4997static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) 4998{ 4999 struct ath11k_pdev_dp *dp = &ar->dp; 5000 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5001 5002 skb_queue_head_init(&pmon->rx_status_q); 5003 5004 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5005 5006 memset(&pmon->rx_mon_stats, 0, 5007 sizeof(pmon->rx_mon_stats)); 5008 return 0; 5009} 5010 5011int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) 5012{ 5013 struct ath11k_pdev_dp *dp = &ar->dp; 5014 struct ath11k_mon_data *pmon = &dp->mon_data; 5015 struct hal_srng *mon_desc_srng = NULL; 5016 struct dp_srng *dp_srng; 5017 int ret = 0; 5018 u32 n_link_desc = 0; 5019 5020 ret = ath11k_dp_rx_pdev_mon_status_attach(ar); 5021 if (ret) { 5022 ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); 5023 return ret; 5024 } 5025 5026 /* if rxdma1_enable is false, no need to setup 5027 * rxdma_mon_desc_ring. 5028 */ 5029 if (!ar->ab->hw_params.rxdma1_enable) 5030 return 0; 5031 5032 dp_srng = &dp->rxdma_mon_desc_ring; 5033 n_link_desc = dp_srng->size / 5034 ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); 5035 mon_desc_srng = 5036 &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; 5037 5038 ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, 5039 HAL_RXDMA_MONITOR_DESC, mon_desc_srng, 5040 n_link_desc); 5041 if (ret) { 5042 ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); 5043 return ret; 5044 } 5045 pmon->mon_last_linkdesc_paddr = 0; 5046 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; 5047 spin_lock_init(&pmon->mon_lock); 5048 5049 return 0; 5050} 5051 5052static int ath11k_dp_mon_link_free(struct ath11k *ar) 5053{ 5054 struct ath11k_pdev_dp *dp = &ar->dp; 5055 struct ath11k_mon_data *pmon = &dp->mon_data; 5056 5057 ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, 5058 HAL_RXDMA_MONITOR_DESC, 5059 &dp->rxdma_mon_desc_ring); 5060 return 0; 5061} 5062 5063int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) 5064{ 5065 ath11k_dp_mon_link_free(ar); 5066 return 0; 5067} 5068