1/* 2 * NXP Wireless LAN device driver: AP TX and RX data handling 3 * 4 * Copyright 2011-2020 NXP 5 * 6 * This software file (the "File") is distributed by NXP 7 * under the terms of the GNU General Public License Version 2, June 1991 8 * (the "License"). You may use, redistribute and/or modify this File in 9 * accordance with the terms and conditions of the License, a copy of which 10 * is available by writing to the Free Software Foundation, Inc., 11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 * 14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 17 * this warranty disclaimer. 18 */ 19 20#include "decl.h" 21#include "ioctl.h" 22#include "main.h" 23#include "wmm.h" 24#include "11n_aggr.h" 25#include "11n_rxreorder.h" 26 27/* This function checks if particular RA list has packets more than low bridge 28 * packet threshold and then deletes packet from this RA list. 29 * Function deletes packets from such RA list and returns true. If no such list 30 * is found, false is returned. 31 */ 32static bool 33mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv, 34 struct list_head *ra_list_head, 35 int tid) 36{ 37 struct mwifiex_ra_list_tbl *ra_list; 38 struct sk_buff *skb, *tmp; 39 bool pkt_deleted = false; 40 struct mwifiex_txinfo *tx_info; 41 struct mwifiex_adapter *adapter = priv->adapter; 42 43 list_for_each_entry(ra_list, ra_list_head, list) { 44 if (skb_queue_empty(&ra_list->skb_head)) 45 continue; 46 47 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) { 48 tx_info = MWIFIEX_SKB_TXCB(skb); 49 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) { 50 __skb_unlink(skb, &ra_list->skb_head); 51 mwifiex_write_data_complete(adapter, skb, 0, 52 -1); 53 if (ra_list->tx_paused) 54 priv->wmm.pkts_paused[tid]--; 55 else 56 atomic_dec(&priv->wmm.tx_pkts_queued); 57 pkt_deleted = true; 58 } 59 if ((atomic_read(&adapter->pending_bridged_pkts) <= 60 MWIFIEX_BRIDGED_PKTS_THR_LOW)) 61 break; 62 } 63 } 64 65 return pkt_deleted; 66} 67 68/* This function deletes packets from particular RA List. RA list index 69 * from which packets are deleted is preserved so that packets from next RA 70 * list are deleted upon subsequent call thus maintaining fairness. 71 */ 72static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv) 73{ 74 struct list_head *ra_list; 75 int i; 76 77 spin_lock_bh(&priv->wmm.ra_list_spinlock); 78 79 for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) { 80 if (priv->del_list_idx == MAX_NUM_TID) 81 priv->del_list_idx = 0; 82 ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list; 83 if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list, i)) { 84 priv->del_list_idx++; 85 break; 86 } 87 } 88 89 spin_unlock_bh(&priv->wmm.ra_list_spinlock); 90} 91 92 93static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, 94 struct sk_buff *skb) 95{ 96 struct mwifiex_adapter *adapter = priv->adapter; 97 struct uap_rxpd *uap_rx_pd; 98 struct rx_packet_hdr *rx_pkt_hdr; 99 struct sk_buff *new_skb; 100 struct mwifiex_txinfo *tx_info; 101 int hdr_chop; 102 struct ethhdr *p_ethhdr; 103 struct mwifiex_sta_node *src_node; 104 int index; 105 106 uap_rx_pd = (struct uap_rxpd *)(skb->data); 107 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 108 109 if ((atomic_read(&adapter->pending_bridged_pkts) >= 110 MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { 111 mwifiex_dbg(priv->adapter, ERROR, 112 "Tx: Bridge packet limit reached. Drop packet!\n"); 113 kfree_skb(skb); 114 mwifiex_uap_cleanup_tx_queues(priv); 115 return; 116 } 117 118 if (sizeof(*rx_pkt_hdr) + 119 le16_to_cpu(uap_rx_pd->rx_pkt_offset) > skb->len) { 120 mwifiex_dbg(adapter, ERROR, 121 "wrong rx packet offset: len=%d,rx_pkt_offset=%d\n", 122 skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); 123 priv->stats.rx_dropped++; 124 dev_kfree_skb_any(skb); 125 return; 126 } 127 128 if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, 129 sizeof(bridge_tunnel_header))) || 130 (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, 131 sizeof(rfc1042_header)) && 132 ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && 133 ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { 134 /* Replace the 803 header and rfc1042 header (llc/snap) with 135 * an Ethernet II header, keep the src/dst and snap_type 136 * (ethertype). 137 * 138 * The firmware only passes up SNAP frames converting all RX 139 * data from 802.11 to 802.2/LLC/SNAP frames. 140 * 141 * To create the Ethernet II, just move the src, dst address 142 * right before the snap_type. 143 */ 144 p_ethhdr = (struct ethhdr *) 145 ((u8 *)(&rx_pkt_hdr->eth803_hdr) 146 + sizeof(rx_pkt_hdr->eth803_hdr) 147 + sizeof(rx_pkt_hdr->rfc1042_hdr) 148 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest) 149 - sizeof(rx_pkt_hdr->eth803_hdr.h_source) 150 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type)); 151 memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source, 152 sizeof(p_ethhdr->h_source)); 153 memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest, 154 sizeof(p_ethhdr->h_dest)); 155 /* Chop off the rxpd + the excess memory from 156 * 802.2/llc/snap header that was removed. 157 */ 158 hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd; 159 } else { 160 /* Chop off the rxpd */ 161 hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; 162 } 163 164 /* Chop off the leading header bytes so that it points 165 * to the start of either the reconstructed EthII frame 166 * or the 802.2/llc/snap frame. 167 */ 168 skb_pull(skb, hdr_chop); 169 170 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { 171 mwifiex_dbg(priv->adapter, ERROR, 172 "data: Tx: insufficient skb headroom %d\n", 173 skb_headroom(skb)); 174 /* Insufficient skb headroom - allocate a new skb */ 175 new_skb = 176 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); 177 if (unlikely(!new_skb)) { 178 mwifiex_dbg(priv->adapter, ERROR, 179 "Tx: cannot allocate new_skb\n"); 180 kfree_skb(skb); 181 priv->stats.tx_dropped++; 182 return; 183 } 184 185 kfree_skb(skb); 186 skb = new_skb; 187 mwifiex_dbg(priv->adapter, INFO, 188 "info: new skb headroom %d\n", 189 skb_headroom(skb)); 190 } 191 192 tx_info = MWIFIEX_SKB_TXCB(skb); 193 memset(tx_info, 0, sizeof(*tx_info)); 194 tx_info->bss_num = priv->bss_num; 195 tx_info->bss_type = priv->bss_type; 196 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; 197 198 src_node = mwifiex_get_sta_entry(priv, rx_pkt_hdr->eth803_hdr.h_source); 199 if (src_node) { 200 src_node->stats.last_rx = jiffies; 201 src_node->stats.rx_bytes += skb->len; 202 src_node->stats.rx_packets++; 203 src_node->stats.last_tx_rate = uap_rx_pd->rx_rate; 204 src_node->stats.last_tx_htinfo = uap_rx_pd->ht_info; 205 } 206 207 if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) { 208 /* Update bridge packet statistics as the 209 * packet is not going to kernel/upper layer. 210 */ 211 priv->stats.rx_bytes += skb->len; 212 priv->stats.rx_packets++; 213 214 /* Sending bridge packet to TX queue, so save the packet 215 * length in TXCB to update statistics in TX complete. 216 */ 217 tx_info->pkt_len = skb->len; 218 } 219 220 __net_timestamp(skb); 221 222 index = mwifiex_1d_to_wmm_queue[skb->priority]; 223 atomic_inc(&priv->wmm_tx_pending[index]); 224 mwifiex_wmm_add_buf_txqueue(priv, skb); 225 atomic_inc(&adapter->tx_pending); 226 atomic_inc(&adapter->pending_bridged_pkts); 227 228 mwifiex_queue_main_work(priv->adapter); 229 230 return; 231} 232 233/* 234 * This function contains logic for AP packet forwarding. 235 * 236 * If a packet is multicast/broadcast, it is sent to kernel/upper layer 237 * as well as queued back to AP TX queue so that it can be sent to other 238 * associated stations. 239 * If a packet is unicast and RA is present in associated station list, 240 * it is again requeued into AP TX queue. 241 * If a packet is unicast and RA is not in associated station list, 242 * packet is forwarded to kernel to handle routing logic. 243 */ 244int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, 245 struct sk_buff *skb) 246{ 247 struct mwifiex_adapter *adapter = priv->adapter; 248 struct uap_rxpd *uap_rx_pd; 249 struct rx_packet_hdr *rx_pkt_hdr; 250 u8 ra[ETH_ALEN]; 251 struct sk_buff *skb_uap; 252 253 uap_rx_pd = (struct uap_rxpd *)(skb->data); 254 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 255 256 /* don't do packet forwarding in disconnected state */ 257 if (!priv->media_connected) { 258 mwifiex_dbg(adapter, ERROR, 259 "drop packet in disconnected state.\n"); 260 dev_kfree_skb_any(skb); 261 return 0; 262 } 263 264 memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN); 265 266 if (is_multicast_ether_addr(ra)) { 267 skb_uap = skb_copy(skb, GFP_ATOMIC); 268 if (likely(skb_uap)) { 269 mwifiex_uap_queue_bridged_pkt(priv, skb_uap); 270 } else { 271 mwifiex_dbg(adapter, ERROR, 272 "failed to copy skb for uAP\n"); 273 priv->stats.rx_dropped++; 274 dev_kfree_skb_any(skb); 275 return -1; 276 } 277 } else { 278 if (mwifiex_get_sta_entry(priv, ra)) { 279 /* Requeue Intra-BSS packet */ 280 mwifiex_uap_queue_bridged_pkt(priv, skb); 281 return 0; 282 } 283 } 284 285 /* Forward unicat/Inter-BSS packets to kernel. */ 286 return mwifiex_process_rx_packet(priv, skb); 287} 288 289int mwifiex_uap_recv_packet(struct mwifiex_private *priv, 290 struct sk_buff *skb) 291{ 292 struct mwifiex_adapter *adapter = priv->adapter; 293 struct mwifiex_sta_node *src_node; 294 struct ethhdr *p_ethhdr; 295 struct sk_buff *skb_uap; 296 struct mwifiex_txinfo *tx_info; 297 298 if (!skb) 299 return -1; 300 301 p_ethhdr = (void *)skb->data; 302 src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source); 303 if (src_node) { 304 src_node->stats.last_rx = jiffies; 305 src_node->stats.rx_bytes += skb->len; 306 src_node->stats.rx_packets++; 307 } 308 309 if (is_multicast_ether_addr(p_ethhdr->h_dest) || 310 mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) { 311 if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) 312 skb_uap = 313 skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); 314 else 315 skb_uap = skb_copy(skb, GFP_ATOMIC); 316 317 if (likely(skb_uap)) { 318 tx_info = MWIFIEX_SKB_TXCB(skb_uap); 319 memset(tx_info, 0, sizeof(*tx_info)); 320 tx_info->bss_num = priv->bss_num; 321 tx_info->bss_type = priv->bss_type; 322 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; 323 __net_timestamp(skb_uap); 324 mwifiex_wmm_add_buf_txqueue(priv, skb_uap); 325 atomic_inc(&adapter->tx_pending); 326 atomic_inc(&adapter->pending_bridged_pkts); 327 if ((atomic_read(&adapter->pending_bridged_pkts) >= 328 MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { 329 mwifiex_dbg(adapter, ERROR, 330 "Tx: Bridge packet limit reached. Drop packet!\n"); 331 mwifiex_uap_cleanup_tx_queues(priv); 332 } 333 334 } else { 335 mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap"); 336 } 337 338 mwifiex_queue_main_work(adapter); 339 /* Don't forward Intra-BSS unicast packet to upper layer*/ 340 if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) 341 return 0; 342 } 343 344 skb->dev = priv->netdev; 345 skb->protocol = eth_type_trans(skb, priv->netdev); 346 skb->ip_summed = CHECKSUM_NONE; 347 348 /* This is required only in case of 11n and USB/PCIE as we alloc 349 * a buffer of 4K only if its 11N (to be able to receive 4K 350 * AMSDU packets). In case of SD we allocate buffers based 351 * on the size of packet and hence this is not needed. 352 * 353 * Modifying the truesize here as our allocation for each 354 * skb is 4K but we only receive 2K packets and this cause 355 * the kernel to start dropping packets in case where 356 * application has allocated buffer based on 2K size i.e. 357 * if there a 64K packet received (in IP fragments and 358 * application allocates 64K to receive this packet but 359 * this packet would almost double up because we allocate 360 * each 1.5K fragment in 4K and pass it up. As soon as the 361 * 64K limit hits kernel will start to drop rest of the 362 * fragments. Currently we fail the Filesndl-ht.scr script 363 * for UDP, hence this fix 364 */ 365 if ((adapter->iface_type == MWIFIEX_USB || 366 adapter->iface_type == MWIFIEX_PCIE) && 367 skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE) 368 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); 369 370 /* Forward multicast/broadcast packet to upper layer*/ 371 netif_rx_any_context(skb); 372 return 0; 373} 374 375/* 376 * This function processes the packet received on AP interface. 377 * 378 * The function looks into the RxPD and performs sanity tests on the 379 * received buffer to ensure its a valid packet before processing it 380 * further. If the packet is determined to be aggregated, it is 381 * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic. 382 * 383 * The completion callback is called after processing is complete. 384 */ 385int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, 386 struct sk_buff *skb) 387{ 388 struct mwifiex_adapter *adapter = priv->adapter; 389 int ret; 390 struct uap_rxpd *uap_rx_pd; 391 struct rx_packet_hdr *rx_pkt_hdr; 392 u16 rx_pkt_type; 393 u8 ta[ETH_ALEN], pkt_type; 394 struct mwifiex_sta_node *node; 395 396 uap_rx_pd = (struct uap_rxpd *)(skb->data); 397 rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type); 398 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 399 400 if (le16_to_cpu(uap_rx_pd->rx_pkt_offset) + 401 sizeof(rx_pkt_hdr->eth803_hdr) > skb->len) { 402 mwifiex_dbg(adapter, ERROR, 403 "wrong rx packet for struct ethhdr: len=%d, offset=%d\n", 404 skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); 405 priv->stats.rx_dropped++; 406 dev_kfree_skb_any(skb); 407 return 0; 408 } 409 410 ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source); 411 412 if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + 413 le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) { 414 mwifiex_dbg(adapter, ERROR, 415 "wrong rx packet: len=%d, offset=%d, length=%d\n", 416 skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset), 417 le16_to_cpu(uap_rx_pd->rx_pkt_length)); 418 priv->stats.rx_dropped++; 419 420 node = mwifiex_get_sta_entry(priv, ta); 421 if (node) 422 node->stats.tx_failed++; 423 424 dev_kfree_skb_any(skb); 425 return 0; 426 } 427 428 if (rx_pkt_type == PKT_TYPE_MGMT) { 429 ret = mwifiex_process_mgmt_packet(priv, skb); 430 if (ret) 431 mwifiex_dbg(adapter, DATA, "Rx of mgmt packet failed"); 432 dev_kfree_skb_any(skb); 433 return ret; 434 } 435 436 437 if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) { 438 spin_lock_bh(&priv->sta_list_spinlock); 439 node = mwifiex_get_sta_entry(priv, ta); 440 if (node) 441 node->rx_seq[uap_rx_pd->priority] = 442 le16_to_cpu(uap_rx_pd->seq_num); 443 spin_unlock_bh(&priv->sta_list_spinlock); 444 } 445 446 if (!priv->ap_11n_enabled || 447 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && 448 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { 449 ret = mwifiex_handle_uap_rx_forward(priv, skb); 450 return ret; 451 } 452 453 /* Reorder and send to kernel */ 454 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); 455 ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num), 456 uap_rx_pd->priority, ta, pkt_type, 457 skb); 458 459 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) 460 dev_kfree_skb_any(skb); 461 462 if (ret) 463 priv->stats.rx_dropped++; 464 465 return ret; 466} 467 468/* 469 * This function fills the TxPD for AP tx packets. 470 * 471 * The Tx buffer received by this function should already have the 472 * header space allocated for TxPD. 473 * 474 * This function inserts the TxPD in between interface header and actual 475 * data and adjusts the buffer pointers accordingly. 476 * 477 * The following TxPD fields are set by this function, as required - 478 * - BSS number 479 * - Tx packet length and offset 480 * - Priority 481 * - Packet delay 482 * - Priority specific Tx control 483 * - Flags 484 */ 485void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, 486 struct sk_buff *skb) 487{ 488 struct mwifiex_adapter *adapter = priv->adapter; 489 struct uap_txpd *txpd; 490 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 491 int pad; 492 u16 pkt_type, pkt_offset; 493 int hroom = adapter->intf_hdr_len; 494 495 if (!skb->len) { 496 mwifiex_dbg(adapter, ERROR, 497 "Tx: bad packet length: %d\n", skb->len); 498 tx_info->status_code = -1; 499 return skb->data; 500 } 501 502 BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN); 503 504 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; 505 506 pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) & 507 (MWIFIEX_DMA_ALIGN_SZ - 1); 508 509 skb_push(skb, sizeof(*txpd) + pad); 510 511 txpd = (struct uap_txpd *)skb->data; 512 memset(txpd, 0, sizeof(*txpd)); 513 txpd->bss_num = priv->bss_num; 514 txpd->bss_type = priv->bss_type; 515 txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) + 516 pad))); 517 txpd->priority = (u8)skb->priority; 518 519 txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb); 520 521 if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS || 522 tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) { 523 txpd->tx_token_id = tx_info->ack_frame_id; 524 txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS; 525 } 526 527 if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl)) 528 /* 529 * Set the priority specific tx_control field, setting of 0 will 530 * cause the default value to be used later in this function. 531 */ 532 txpd->tx_control = 533 cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]); 534 535 /* Offset of actual data */ 536 pkt_offset = sizeof(*txpd) + pad; 537 if (pkt_type == PKT_TYPE_MGMT) { 538 /* Set the packet type and add header for management frame */ 539 txpd->tx_pkt_type = cpu_to_le16(pkt_type); 540 pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE; 541 } 542 543 txpd->tx_pkt_offset = cpu_to_le16(pkt_offset); 544 545 /* make space for adapter->intf_hdr_len */ 546 skb_push(skb, hroom); 547 548 if (!txpd->tx_control) 549 /* TxCtrl set by user or default */ 550 txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl); 551 552 return skb->data; 553} 554