1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 7 * Copyright (C) 2018-2021 Intel Corporation 8 */ 9 10#include <linux/module.h> 11#include <linux/init.h> 12#include <linux/etherdevice.h> 13#include <linux/netdevice.h> 14#include <linux/types.h> 15#include <linux/slab.h> 16#include <linux/skbuff.h> 17#include <linux/if_arp.h> 18#include <linux/timer.h> 19#include <linux/rtnetlink.h> 20 21#include <net/codel.h> 22#include <net/mac80211.h> 23#include "ieee80211_i.h" 24#include "driver-ops.h" 25#include "rate.h" 26#include "sta_info.h" 27#include "debugfs_sta.h" 28#include "mesh.h" 29#include "wme.h" 30 31/** 32 * DOC: STA information lifetime rules 33 * 34 * STA info structures (&struct sta_info) are managed in a hash table 35 * for faster lookup and a list for iteration. They are managed using 36 * RCU, i.e. access to the list and hash table is protected by RCU. 37 * 38 * Upon allocating a STA info structure with sta_info_alloc(), the caller 39 * owns that structure. It must then insert it into the hash table using 40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter 41 * case (which acquires an rcu read section but must not be called from 42 * within one) will the pointer still be valid after the call. Note that 43 * the caller may not do much with the STA info before inserting it, in 44 * particular, it may not start any mesh peer link management or add 45 * encryption keys. 46 * 47 * When the insertion fails (sta_info_insert()) returns non-zero), the 48 * structure will have been freed by sta_info_insert()! 49 * 50 * Station entries are added by mac80211 when you establish a link with a 51 * peer. This means different things for the different type of interfaces 52 * we support. For a regular station this mean we add the AP sta when we 53 * receive an association response from the AP. For IBSS this occurs when 54 * get to know about a peer on the same IBSS. For WDS we add the sta for 55 * the peer immediately upon device open. When using AP mode we add stations 56 * for each respective station upon request from userspace through nl80211. 57 * 58 * In order to remove a STA info structure, various sta_info_destroy_*() 59 * calls are available. 60 * 61 * There is no concept of ownership on a STA entry, each structure is 62 * owned by the global hash table/list until it is removed. All users of 63 * the structure need to be RCU protected so that the structure won't be 64 * freed before they are done using it. 65 */ 66 67static const struct rhashtable_params sta_rht_params = { 68 .nelem_hint = 3, /* start small */ 69 .automatic_shrinking = true, 70 .head_offset = offsetof(struct sta_info, hash_node), 71 .key_offset = offsetof(struct sta_info, addr), 72 .key_len = ETH_ALEN, 73 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, 74}; 75 76/* Caller must hold local->sta_mtx */ 77static int sta_info_hash_del(struct ieee80211_local *local, 78 struct sta_info *sta) 79{ 80 return rhltable_remove(&local->sta_hash, &sta->hash_node, 81 sta_rht_params); 82} 83 84static void __cleanup_single_sta(struct sta_info *sta) 85{ 86 int ac, i; 87 struct tid_ampdu_tx *tid_tx; 88 struct ieee80211_sub_if_data *sdata = sta->sdata; 89 struct ieee80211_local *local = sdata->local; 90 struct ps_data *ps; 91 92 if (test_sta_flag(sta, WLAN_STA_PS_STA) || 93 test_sta_flag(sta, WLAN_STA_PS_DRIVER) || 94 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { 95 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 96 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 97 ps = &sdata->bss->ps; 98 else if (ieee80211_vif_is_mesh(&sdata->vif)) 99 ps = &sdata->u.mesh.ps; 100 else 101 return; 102 103 clear_sta_flag(sta, WLAN_STA_PS_STA); 104 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 105 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 106 107 atomic_dec(&ps->num_sta_ps); 108 } 109 110 if (sta->sta.txq[0]) { 111 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 112 struct txq_info *txqi; 113 114 if (!sta->sta.txq[i]) 115 continue; 116 117 txqi = to_txq_info(sta->sta.txq[i]); 118 119 ieee80211_txq_purge(local, txqi); 120 } 121 } 122 123 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 124 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); 125 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); 126 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); 127 } 128 129 if (ieee80211_vif_is_mesh(&sdata->vif)) 130 mesh_sta_cleanup(sta); 131 132 cancel_work_sync(&sta->drv_deliver_wk); 133 134 /* 135 * Destroy aggregation state here. It would be nice to wait for the 136 * driver to finish aggregation stop and then clean up, but for now 137 * drivers have to handle aggregation stop being requested, followed 138 * directly by station destruction. 139 */ 140 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 141 kfree(sta->ampdu_mlme.tid_start_tx[i]); 142 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); 143 if (!tid_tx) 144 continue; 145 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); 146 kfree(tid_tx); 147 } 148} 149 150static void cleanup_single_sta(struct sta_info *sta) 151{ 152 struct ieee80211_sub_if_data *sdata = sta->sdata; 153 struct ieee80211_local *local = sdata->local; 154 155 __cleanup_single_sta(sta); 156 sta_info_free(local, sta); 157} 158 159struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, 160 const u8 *addr) 161{ 162 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); 163} 164 165/* protected by RCU */ 166struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, 167 const u8 *addr) 168{ 169 struct ieee80211_local *local = sdata->local; 170 struct rhlist_head *tmp; 171 struct sta_info *sta; 172 173 rcu_read_lock(); 174 for_each_sta_info(local, addr, sta, tmp) { 175 if (sta->sdata == sdata) { 176 rcu_read_unlock(); 177 /* this is safe as the caller must already hold 178 * another rcu read section or the mutex 179 */ 180 return sta; 181 } 182 } 183 rcu_read_unlock(); 184 return NULL; 185} 186 187/* 188 * Get sta info either from the specified interface 189 * or from one of its vlans 190 */ 191struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, 192 const u8 *addr) 193{ 194 struct ieee80211_local *local = sdata->local; 195 struct rhlist_head *tmp; 196 struct sta_info *sta; 197 198 rcu_read_lock(); 199 for_each_sta_info(local, addr, sta, tmp) { 200 if (sta->sdata == sdata || 201 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { 202 rcu_read_unlock(); 203 /* this is safe as the caller must already hold 204 * another rcu read section or the mutex 205 */ 206 return sta; 207 } 208 } 209 rcu_read_unlock(); 210 return NULL; 211} 212 213struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, 214 const u8 *sta_addr, const u8 *vif_addr) 215{ 216 struct rhlist_head *tmp; 217 struct sta_info *sta; 218 219 for_each_sta_info(local, sta_addr, sta, tmp) { 220 if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) 221 return sta; 222 } 223 224 return NULL; 225} 226 227struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, 228 int idx) 229{ 230 struct ieee80211_local *local = sdata->local; 231 struct sta_info *sta; 232 int i = 0; 233 234 list_for_each_entry_rcu(sta, &local->sta_list, list, 235 lockdep_is_held(&local->sta_mtx)) { 236 if (sdata != sta->sdata) 237 continue; 238 if (i < idx) { 239 ++i; 240 continue; 241 } 242 return sta; 243 } 244 245 return NULL; 246} 247 248/** 249 * sta_info_free - free STA 250 * 251 * @local: pointer to the global information 252 * @sta: STA info to free 253 * 254 * This function must undo everything done by sta_info_alloc() 255 * that may happen before sta_info_insert(). It may only be 256 * called when sta_info_insert() has not been attempted (and 257 * if that fails, the station is freed anyway.) 258 */ 259void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) 260{ 261 /* 262 * If we had used sta_info_pre_move_state() then we might not 263 * have gone through the state transitions down again, so do 264 * it here now (and warn if it's inserted). 265 * 266 * This will clear state such as fast TX/RX that may have been 267 * allocated during state transitions. 268 */ 269 while (sta->sta_state > IEEE80211_STA_NONE) { 270 int ret; 271 272 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); 273 274 ret = sta_info_move_state(sta, sta->sta_state - 1); 275 if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) 276 break; 277 } 278 279 if (sta->rate_ctrl) 280 rate_control_free_sta(sta); 281 282 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr); 283 284 if (sta->sta.txq[0]) 285 kfree(to_txq_info(sta->sta.txq[0])); 286 kfree(rcu_dereference_raw(sta->sta.rates)); 287#ifdef CONFIG_MAC80211_MESH 288 kfree(sta->mesh); 289#endif 290 free_percpu(sta->pcpu_rx_stats); 291 kfree(sta); 292} 293 294/* Caller must hold local->sta_mtx */ 295static int sta_info_hash_add(struct ieee80211_local *local, 296 struct sta_info *sta) 297{ 298 return rhltable_insert(&local->sta_hash, &sta->hash_node, 299 sta_rht_params); 300} 301 302static void sta_deliver_ps_frames(struct work_struct *wk) 303{ 304 struct sta_info *sta; 305 306 sta = container_of(wk, struct sta_info, drv_deliver_wk); 307 308 if (sta->dead) 309 return; 310 311 local_bh_disable(); 312 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) 313 ieee80211_sta_ps_deliver_wakeup(sta); 314 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) 315 ieee80211_sta_ps_deliver_poll_response(sta); 316 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) 317 ieee80211_sta_ps_deliver_uapsd(sta); 318 local_bh_enable(); 319} 320 321static int sta_prepare_rate_control(struct ieee80211_local *local, 322 struct sta_info *sta, gfp_t gfp) 323{ 324 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) 325 return 0; 326 327 sta->rate_ctrl = local->rate_ctrl; 328 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 329 sta, gfp); 330 if (!sta->rate_ctrl_priv) 331 return -ENOMEM; 332 333 return 0; 334} 335 336struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 337 const u8 *addr, gfp_t gfp) 338{ 339 struct ieee80211_local *local = sdata->local; 340 struct ieee80211_hw *hw = &local->hw; 341 struct sta_info *sta; 342 int i; 343 344 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp); 345 if (!sta) 346 return NULL; 347 348 if (ieee80211_hw_check(hw, USES_RSS)) { 349 sta->pcpu_rx_stats = 350 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); 351 if (!sta->pcpu_rx_stats) 352 goto free; 353 } 354 355 spin_lock_init(&sta->lock); 356 spin_lock_init(&sta->ps_lock); 357 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames); 358 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 359 mutex_init(&sta->ampdu_mlme.mtx); 360#ifdef CONFIG_MAC80211_MESH 361 if (ieee80211_vif_is_mesh(&sdata->vif)) { 362 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); 363 if (!sta->mesh) 364 goto free; 365 sta->mesh->plink_sta = sta; 366 spin_lock_init(&sta->mesh->plink_lock); 367 if (ieee80211_vif_is_mesh(&sdata->vif) && 368 !sdata->u.mesh.user_mpm) 369 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer, 370 0); 371 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; 372 } 373#endif 374 375 memcpy(sta->addr, addr, ETH_ALEN); 376 memcpy(sta->sta.addr, addr, ETH_ALEN); 377 sta->sta.max_rx_aggregation_subframes = 378 local->hw.max_rx_aggregation_subframes; 379 380 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. 381 * The Tx path starts to use a key as soon as the key slot ptk_idx 382 * references to is not NULL. To not use the initial Rx-only key 383 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid 384 * which always will refer to a NULL key. 385 */ 386 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX); 387 sta->ptk_idx = INVALID_PTK_KEYIDX; 388 389 sta->local = local; 390 sta->sdata = sdata; 391 sta->rx_stats.last_rx = jiffies; 392 393 u64_stats_init(&sta->rx_stats.syncp); 394 395 ieee80211_init_frag_cache(&sta->frags); 396 397 sta->sta_state = IEEE80211_STA_NONE; 398 399 /* Mark TID as unreserved */ 400 sta->reserved_tid = IEEE80211_TID_UNRESERVED; 401 402 sta->last_connected = ktime_get_seconds(); 403 ewma_signal_init(&sta->rx_stats_avg.signal); 404 ewma_avg_signal_init(&sta->status_stats.avg_ack_signal); 405 for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) 406 ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); 407 408 if (local->ops->wake_tx_queue) { 409 void *txq_data; 410 int size = sizeof(struct txq_info) + 411 ALIGN(hw->txq_data_size, sizeof(void *)); 412 413 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp); 414 if (!txq_data) 415 goto free; 416 417 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 418 struct txq_info *txq = txq_data + i * size; 419 420 /* might not do anything for the bufferable MMPDU TXQ */ 421 ieee80211_txq_init(sdata, sta, txq, i); 422 } 423 } 424 425 if (sta_prepare_rate_control(local, sta, gfp)) 426 goto free_txq; 427 428 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; 429 430 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 431 skb_queue_head_init(&sta->ps_tx_buf[i]); 432 skb_queue_head_init(&sta->tx_filtered[i]); 433 sta->airtime[i].deficit = sta->airtime_weight; 434 atomic_set(&sta->airtime[i].aql_tx_pending, 0); 435 sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; 436 sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; 437 } 438 439 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 440 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 441 442 for (i = 0; i < NUM_NL80211_BANDS; i++) { 443 u32 mandatory = 0; 444 int r; 445 446 if (!hw->wiphy->bands[i]) 447 continue; 448 449 switch (i) { 450 case NL80211_BAND_2GHZ: 451 /* 452 * We use both here, even if we cannot really know for 453 * sure the station will support both, but the only use 454 * for this is when we don't know anything yet and send 455 * management frames, and then we'll pick the lowest 456 * possible rate anyway. 457 * If we don't include _G here, we cannot find a rate 458 * in P2P, and thus trigger the WARN_ONCE() in rate.c 459 */ 460 mandatory = IEEE80211_RATE_MANDATORY_B | 461 IEEE80211_RATE_MANDATORY_G; 462 break; 463 case NL80211_BAND_5GHZ: 464 mandatory = IEEE80211_RATE_MANDATORY_A; 465 break; 466 case NL80211_BAND_60GHZ: 467 WARN_ON(1); 468 mandatory = 0; 469 break; 470 } 471 472 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) { 473 struct ieee80211_rate *rate; 474 475 rate = &hw->wiphy->bands[i]->bitrates[r]; 476 477 if (!(rate->flags & mandatory)) 478 continue; 479 sta->sta.supp_rates[i] |= BIT(r); 480 } 481 } 482 483 sta->sta.smps_mode = IEEE80211_SMPS_OFF; 484 if (sdata->vif.type == NL80211_IFTYPE_AP || 485 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 486 struct ieee80211_supported_band *sband; 487 u8 smps; 488 489 sband = ieee80211_get_sband(sdata); 490 if (!sband) 491 goto free_txq; 492 493 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 494 IEEE80211_HT_CAP_SM_PS_SHIFT; 495 /* 496 * Assume that hostapd advertises our caps in the beacon and 497 * this is the known_smps_mode for a station that just assciated 498 */ 499 switch (smps) { 500 case WLAN_HT_SMPS_CONTROL_DISABLED: 501 sta->known_smps_mode = IEEE80211_SMPS_OFF; 502 break; 503 case WLAN_HT_SMPS_CONTROL_STATIC: 504 sta->known_smps_mode = IEEE80211_SMPS_STATIC; 505 break; 506 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 507 sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; 508 break; 509 default: 510 WARN_ON(1); 511 } 512 } 513 514 sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; 515 516 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD; 517 sta->cparams.target = MS2TIME(20); 518 sta->cparams.interval = MS2TIME(100); 519 sta->cparams.ecn = true; 520 521 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); 522 523 return sta; 524 525free_txq: 526 if (sta->sta.txq[0]) 527 kfree(to_txq_info(sta->sta.txq[0])); 528free: 529 free_percpu(sta->pcpu_rx_stats); 530#ifdef CONFIG_MAC80211_MESH 531 kfree(sta->mesh); 532#endif 533 kfree(sta); 534 return NULL; 535} 536 537static int sta_info_insert_check(struct sta_info *sta) 538{ 539 struct ieee80211_sub_if_data *sdata = sta->sdata; 540 541 /* 542 * Can't be a WARN_ON because it can be triggered through a race: 543 * something inserts a STA (on one CPU) without holding the RTNL 544 * and another CPU turns off the net device. 545 */ 546 if (unlikely(!ieee80211_sdata_running(sdata))) 547 return -ENETDOWN; 548 549 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) || 550 is_multicast_ether_addr(sta->sta.addr))) 551 return -EINVAL; 552 553 /* The RCU read lock is required by rhashtable due to 554 * asynchronous resize/rehash. We also require the mutex 555 * for correctness. 556 */ 557 rcu_read_lock(); 558 lockdep_assert_held(&sdata->local->sta_mtx); 559 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) && 560 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) { 561 rcu_read_unlock(); 562 return -ENOTUNIQ; 563 } 564 rcu_read_unlock(); 565 566 return 0; 567} 568 569static int sta_info_insert_drv_state(struct ieee80211_local *local, 570 struct ieee80211_sub_if_data *sdata, 571 struct sta_info *sta) 572{ 573 enum ieee80211_sta_state state; 574 int err = 0; 575 576 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) { 577 err = drv_sta_state(local, sdata, sta, state, state + 1); 578 if (err) 579 break; 580 } 581 582 if (!err) { 583 /* 584 * Drivers using legacy sta_add/sta_remove callbacks only 585 * get uploaded set to true after sta_add is called. 586 */ 587 if (!local->ops->sta_add) 588 sta->uploaded = true; 589 return 0; 590 } 591 592 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 593 sdata_info(sdata, 594 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n", 595 sta->sta.addr, state + 1, err); 596 err = 0; 597 } 598 599 /* unwind on error */ 600 for (; state > IEEE80211_STA_NOTEXIST; state--) 601 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1)); 602 603 return err; 604} 605 606static void 607ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata) 608{ 609 struct ieee80211_local *local = sdata->local; 610 bool allow_p2p_go_ps = sdata->vif.p2p; 611 struct sta_info *sta; 612 613 rcu_read_lock(); 614 list_for_each_entry_rcu(sta, &local->sta_list, list) { 615 if (sdata != sta->sdata || 616 !test_sta_flag(sta, WLAN_STA_ASSOC)) 617 continue; 618 if (!sta->sta.support_p2p_ps) { 619 allow_p2p_go_ps = false; 620 break; 621 } 622 } 623 rcu_read_unlock(); 624 625 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) { 626 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps; 627 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS); 628 } 629} 630 631/* 632 * should be called with sta_mtx locked 633 * this function replaces the mutex lock 634 * with a RCU lock 635 */ 636static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) 637{ 638 struct ieee80211_local *local = sta->local; 639 struct ieee80211_sub_if_data *sdata = sta->sdata; 640 struct station_info *sinfo = NULL; 641 int err = 0; 642 643 lockdep_assert_held(&local->sta_mtx); 644 645 /* check if STA exists already */ 646 if (sta_info_get_bss(sdata, sta->sta.addr)) { 647 err = -EEXIST; 648 goto out_cleanup; 649 } 650 651 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL); 652 if (!sinfo) { 653 err = -ENOMEM; 654 goto out_cleanup; 655 } 656 657 local->num_sta++; 658 local->sta_generation++; 659 smp_mb(); 660 661 /* simplify things and don't accept BA sessions yet */ 662 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 663 664 /* make the station visible */ 665 err = sta_info_hash_add(local, sta); 666 if (err) 667 goto out_drop_sta; 668 669 list_add_tail_rcu(&sta->list, &local->sta_list); 670 671 /* notify driver */ 672 err = sta_info_insert_drv_state(local, sdata, sta); 673 if (err) 674 goto out_remove; 675 676 set_sta_flag(sta, WLAN_STA_INSERTED); 677 678 if (sta->sta_state >= IEEE80211_STA_ASSOC) { 679 ieee80211_recalc_min_chandef(sta->sdata); 680 if (!sta->sta.support_p2p_ps) 681 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 682 } 683 684 /* accept BA sessions now */ 685 clear_sta_flag(sta, WLAN_STA_BLOCK_BA); 686 687 ieee80211_sta_debugfs_add(sta); 688 rate_control_add_sta_debugfs(sta); 689 690 sinfo->generation = local->sta_generation; 691 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 692 kfree(sinfo); 693 694 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr); 695 696 /* move reference to rcu-protected */ 697 rcu_read_lock(); 698 mutex_unlock(&local->sta_mtx); 699 700 if (ieee80211_vif_is_mesh(&sdata->vif)) 701 mesh_accept_plinks_update(sdata); 702 703 ieee80211_check_fast_xmit(sta); 704 705 return 0; 706 out_remove: 707 sta_info_hash_del(local, sta); 708 list_del_rcu(&sta->list); 709 out_drop_sta: 710 local->num_sta--; 711 synchronize_net(); 712 out_cleanup: 713 cleanup_single_sta(sta); 714 mutex_unlock(&local->sta_mtx); 715 kfree(sinfo); 716 rcu_read_lock(); 717 return err; 718} 719 720int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) 721{ 722 struct ieee80211_local *local = sta->local; 723 int err; 724 725 might_sleep(); 726 727 mutex_lock(&local->sta_mtx); 728 729 err = sta_info_insert_check(sta); 730 if (err) { 731 sta_info_free(local, sta); 732 mutex_unlock(&local->sta_mtx); 733 rcu_read_lock(); 734 return err; 735 } 736 737 return sta_info_insert_finish(sta); 738} 739 740int sta_info_insert(struct sta_info *sta) 741{ 742 int err = sta_info_insert_rcu(sta); 743 744 rcu_read_unlock(); 745 746 return err; 747} 748 749static inline void __bss_tim_set(u8 *tim, u16 id) 750{ 751 /* 752 * This format has been mandated by the IEEE specifications, 753 * so this line may not be changed to use the __set_bit() format. 754 */ 755 tim[id / 8] |= (1 << (id % 8)); 756} 757 758static inline void __bss_tim_clear(u8 *tim, u16 id) 759{ 760 /* 761 * This format has been mandated by the IEEE specifications, 762 * so this line may not be changed to use the __clear_bit() format. 763 */ 764 tim[id / 8] &= ~(1 << (id % 8)); 765} 766 767static inline bool __bss_tim_get(u8 *tim, u16 id) 768{ 769 /* 770 * This format has been mandated by the IEEE specifications, 771 * so this line may not be changed to use the test_bit() format. 772 */ 773 return tim[id / 8] & (1 << (id % 8)); 774} 775 776static unsigned long ieee80211_tids_for_ac(int ac) 777{ 778 /* If we ever support TIDs > 7, this obviously needs to be adjusted */ 779 switch (ac) { 780 case IEEE80211_AC_VO: 781 return BIT(6) | BIT(7); 782 case IEEE80211_AC_VI: 783 return BIT(4) | BIT(5); 784 case IEEE80211_AC_BE: 785 return BIT(0) | BIT(3); 786 case IEEE80211_AC_BK: 787 return BIT(1) | BIT(2); 788 default: 789 WARN_ON(1); 790 return 0; 791 } 792} 793 794static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) 795{ 796 struct ieee80211_local *local = sta->local; 797 struct ps_data *ps; 798 bool indicate_tim = false; 799 u8 ignore_for_tim = sta->sta.uapsd_queues; 800 int ac; 801 u16 id = sta->sta.aid; 802 803 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 804 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 805 if (WARN_ON_ONCE(!sta->sdata->bss)) 806 return; 807 808 ps = &sta->sdata->bss->ps; 809#ifdef CONFIG_MAC80211_MESH 810 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { 811 ps = &sta->sdata->u.mesh.ps; 812#endif 813 } else { 814 return; 815 } 816 817 /* No need to do anything if the driver does all */ 818 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim) 819 return; 820 821 if (sta->dead) 822 goto done; 823 824 /* 825 * If all ACs are delivery-enabled then we should build 826 * the TIM bit for all ACs anyway; if only some are then 827 * we ignore those and build the TIM bit using only the 828 * non-enabled ones. 829 */ 830 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1) 831 ignore_for_tim = 0; 832 833 if (ignore_pending) 834 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1; 835 836 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 837 unsigned long tids; 838 839 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac]) 840 continue; 841 842 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) || 843 !skb_queue_empty(&sta->ps_tx_buf[ac]); 844 if (indicate_tim) 845 break; 846 847 tids = ieee80211_tids_for_ac(ac); 848 849 indicate_tim |= 850 sta->driver_buffered_tids & tids; 851 indicate_tim |= 852 sta->txq_buffered_tids & tids; 853 } 854 855 done: 856 spin_lock_bh(&local->tim_lock); 857 858 if (indicate_tim == __bss_tim_get(ps->tim, id)) 859 goto out_unlock; 860 861 if (indicate_tim) 862 __bss_tim_set(ps->tim, id); 863 else 864 __bss_tim_clear(ps->tim, id); 865 866 if (local->ops->set_tim && !WARN_ON(sta->dead)) { 867 local->tim_in_locked_section = true; 868 drv_set_tim(local, &sta->sta, indicate_tim); 869 local->tim_in_locked_section = false; 870 } 871 872out_unlock: 873 spin_unlock_bh(&local->tim_lock); 874} 875 876void sta_info_recalc_tim(struct sta_info *sta) 877{ 878 __sta_info_recalc_tim(sta, false); 879} 880 881static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) 882{ 883 struct ieee80211_tx_info *info; 884 int timeout; 885 886 if (!skb) 887 return false; 888 889 info = IEEE80211_SKB_CB(skb); 890 891 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */ 892 timeout = (sta->listen_interval * 893 sta->sdata->vif.bss_conf.beacon_int * 894 32 / 15625) * HZ; 895 if (timeout < STA_TX_BUFFER_EXPIRE) 896 timeout = STA_TX_BUFFER_EXPIRE; 897 return time_after(jiffies, info->control.jiffies + timeout); 898} 899 900 901static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local, 902 struct sta_info *sta, int ac) 903{ 904 unsigned long flags; 905 struct sk_buff *skb; 906 907 /* 908 * First check for frames that should expire on the filtered 909 * queue. Frames here were rejected by the driver and are on 910 * a separate queue to avoid reordering with normal PS-buffered 911 * frames. They also aren't accounted for right now in the 912 * total_ps_buffered counter. 913 */ 914 for (;;) { 915 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 916 skb = skb_peek(&sta->tx_filtered[ac]); 917 if (sta_info_buffer_expired(sta, skb)) 918 skb = __skb_dequeue(&sta->tx_filtered[ac]); 919 else 920 skb = NULL; 921 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 922 923 /* 924 * Frames are queued in order, so if this one 925 * hasn't expired yet we can stop testing. If 926 * we actually reached the end of the queue we 927 * also need to stop, of course. 928 */ 929 if (!skb) 930 break; 931 ieee80211_free_txskb(&local->hw, skb); 932 } 933 934 /* 935 * Now also check the normal PS-buffered queue, this will 936 * only find something if the filtered queue was emptied 937 * since the filtered frames are all before the normal PS 938 * buffered frames. 939 */ 940 for (;;) { 941 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 942 skb = skb_peek(&sta->ps_tx_buf[ac]); 943 if (sta_info_buffer_expired(sta, skb)) 944 skb = __skb_dequeue(&sta->ps_tx_buf[ac]); 945 else 946 skb = NULL; 947 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 948 949 /* 950 * frames are queued in order, so if this one 951 * hasn't expired yet (or we reached the end of 952 * the queue) we can stop testing 953 */ 954 if (!skb) 955 break; 956 957 local->total_ps_buffered--; 958 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n", 959 sta->sta.addr); 960 ieee80211_free_txskb(&local->hw, skb); 961 } 962 963 /* 964 * Finally, recalculate the TIM bit for this station -- it might 965 * now be clear because the station was too slow to retrieve its 966 * frames. 967 */ 968 sta_info_recalc_tim(sta); 969 970 /* 971 * Return whether there are any frames still buffered, this is 972 * used to check whether the cleanup timer still needs to run, 973 * if there are no frames we don't need to rearm the timer. 974 */ 975 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) && 976 skb_queue_empty(&sta->tx_filtered[ac])); 977} 978 979static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, 980 struct sta_info *sta) 981{ 982 bool have_buffered = false; 983 int ac; 984 985 /* This is only necessary for stations on BSS/MBSS interfaces */ 986 if (!sta->sdata->bss && 987 !ieee80211_vif_is_mesh(&sta->sdata->vif)) 988 return false; 989 990 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 991 have_buffered |= 992 sta_info_cleanup_expire_buffered_ac(local, sta, ac); 993 994 return have_buffered; 995} 996 997static int __must_check __sta_info_destroy_part1(struct sta_info *sta) 998{ 999 struct ieee80211_local *local; 1000 struct ieee80211_sub_if_data *sdata; 1001 int ret; 1002 1003 might_sleep(); 1004 1005 if (!sta) 1006 return -ENOENT; 1007 1008 local = sta->local; 1009 sdata = sta->sdata; 1010 1011 lockdep_assert_held(&local->sta_mtx); 1012 1013 /* 1014 * Before removing the station from the driver and 1015 * rate control, it might still start new aggregation 1016 * sessions -- block that to make sure the tear-down 1017 * will be sufficient. 1018 */ 1019 set_sta_flag(sta, WLAN_STA_BLOCK_BA); 1020 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA); 1021 1022 /* 1023 * Before removing the station from the driver there might be pending 1024 * rx frames on RSS queues sent prior to the disassociation - wait for 1025 * all such frames to be processed. 1026 */ 1027 drv_sync_rx_queues(local, sta); 1028 1029 ret = sta_info_hash_del(local, sta); 1030 if (WARN_ON(ret)) 1031 return ret; 1032 1033 /* 1034 * for TDLS peers, make sure to return to the base channel before 1035 * removal. 1036 */ 1037 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) { 1038 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta); 1039 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL); 1040 } 1041 1042 list_del_rcu(&sta->list); 1043 sta->removed = true; 1044 1045 if (sta->uploaded) 1046 drv_sta_pre_rcu_remove(local, sta->sdata, sta); 1047 1048 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1049 rcu_access_pointer(sdata->u.vlan.sta) == sta) 1050 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL); 1051 1052 return 0; 1053} 1054 1055static void __sta_info_destroy_part2(struct sta_info *sta) 1056{ 1057 struct ieee80211_local *local = sta->local; 1058 struct ieee80211_sub_if_data *sdata = sta->sdata; 1059 struct station_info *sinfo; 1060 int ret; 1061 1062 /* 1063 * NOTE: This assumes at least synchronize_net() was done 1064 * after _part1 and before _part2! 1065 */ 1066 1067 might_sleep(); 1068 lockdep_assert_held(&local->sta_mtx); 1069 1070 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1071 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); 1072 WARN_ON_ONCE(ret); 1073 } 1074 1075 /* now keys can no longer be reached */ 1076 ieee80211_free_sta_keys(local, sta); 1077 1078 /* disable TIM bit - last chance to tell driver */ 1079 __sta_info_recalc_tim(sta, true); 1080 1081 sta->dead = true; 1082 1083 local->num_sta--; 1084 local->sta_generation++; 1085 1086 while (sta->sta_state > IEEE80211_STA_NONE) { 1087 ret = sta_info_move_state(sta, sta->sta_state - 1); 1088 if (ret) { 1089 WARN_ON_ONCE(1); 1090 break; 1091 } 1092 } 1093 1094 if (sta->uploaded) { 1095 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE, 1096 IEEE80211_STA_NOTEXIST); 1097 WARN_ON_ONCE(ret != 0); 1098 } 1099 1100 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr); 1101 1102 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); 1103 if (sinfo) 1104 sta_set_sinfo(sta, sinfo, true); 1105 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL); 1106 kfree(sinfo); 1107 1108 ieee80211_sta_debugfs_remove(sta); 1109 1110 ieee80211_destroy_frag_cache(&sta->frags); 1111 1112 cleanup_single_sta(sta); 1113} 1114 1115int __must_check __sta_info_destroy(struct sta_info *sta) 1116{ 1117 int err = __sta_info_destroy_part1(sta); 1118 1119 if (err) 1120 return err; 1121 1122 synchronize_net(); 1123 1124 __sta_info_destroy_part2(sta); 1125 1126 return 0; 1127} 1128 1129int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr) 1130{ 1131 struct sta_info *sta; 1132 int ret; 1133 1134 mutex_lock(&sdata->local->sta_mtx); 1135 sta = sta_info_get(sdata, addr); 1136 ret = __sta_info_destroy(sta); 1137 mutex_unlock(&sdata->local->sta_mtx); 1138 1139 return ret; 1140} 1141 1142int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata, 1143 const u8 *addr) 1144{ 1145 struct sta_info *sta; 1146 int ret; 1147 1148 mutex_lock(&sdata->local->sta_mtx); 1149 sta = sta_info_get_bss(sdata, addr); 1150 ret = __sta_info_destroy(sta); 1151 mutex_unlock(&sdata->local->sta_mtx); 1152 1153 return ret; 1154} 1155 1156static void sta_info_cleanup(struct timer_list *t) 1157{ 1158 struct ieee80211_local *local = from_timer(local, t, sta_cleanup); 1159 struct sta_info *sta; 1160 bool timer_needed = false; 1161 1162 rcu_read_lock(); 1163 list_for_each_entry_rcu(sta, &local->sta_list, list) 1164 if (sta_info_cleanup_expire_buffered(local, sta)) 1165 timer_needed = true; 1166 rcu_read_unlock(); 1167 1168 if (local->quiescing) 1169 return; 1170 1171 if (!timer_needed) 1172 return; 1173 1174 mod_timer(&local->sta_cleanup, 1175 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); 1176} 1177 1178int sta_info_init(struct ieee80211_local *local) 1179{ 1180 int err; 1181 1182 err = rhltable_init(&local->sta_hash, &sta_rht_params); 1183 if (err) 1184 return err; 1185 1186 spin_lock_init(&local->tim_lock); 1187 mutex_init(&local->sta_mtx); 1188 INIT_LIST_HEAD(&local->sta_list); 1189 1190 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0); 1191 return 0; 1192} 1193 1194void sta_info_stop(struct ieee80211_local *local) 1195{ 1196 del_timer_sync(&local->sta_cleanup); 1197 rhltable_destroy(&local->sta_hash); 1198} 1199 1200 1201int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) 1202{ 1203 struct ieee80211_local *local = sdata->local; 1204 struct sta_info *sta, *tmp; 1205 LIST_HEAD(free_list); 1206 int ret = 0; 1207 1208 might_sleep(); 1209 1210 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP); 1211 WARN_ON(vlans && !sdata->bss); 1212 1213 mutex_lock(&local->sta_mtx); 1214 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1215 if (sdata == sta->sdata || 1216 (vlans && sdata->bss == sta->sdata->bss)) { 1217 if (!WARN_ON(__sta_info_destroy_part1(sta))) 1218 list_add(&sta->free_list, &free_list); 1219 ret++; 1220 } 1221 } 1222 1223 if (!list_empty(&free_list)) { 1224 synchronize_net(); 1225 list_for_each_entry_safe(sta, tmp, &free_list, free_list) 1226 __sta_info_destroy_part2(sta); 1227 } 1228 mutex_unlock(&local->sta_mtx); 1229 1230 return ret; 1231} 1232 1233void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata, 1234 unsigned long exp_time) 1235{ 1236 struct ieee80211_local *local = sdata->local; 1237 struct sta_info *sta, *tmp; 1238 1239 mutex_lock(&local->sta_mtx); 1240 1241 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 1242 unsigned long last_active = ieee80211_sta_last_active(sta); 1243 1244 if (sdata != sta->sdata) 1245 continue; 1246 1247 if (time_is_before_jiffies(last_active + exp_time)) { 1248 sta_dbg(sta->sdata, "expiring inactive STA %pM\n", 1249 sta->sta.addr); 1250 1251 if (ieee80211_vif_is_mesh(&sdata->vif) && 1252 test_sta_flag(sta, WLAN_STA_PS_STA)) 1253 atomic_dec(&sdata->u.mesh.ps.num_sta_ps); 1254 1255 WARN_ON(__sta_info_destroy(sta)); 1256 } 1257 } 1258 1259 mutex_unlock(&local->sta_mtx); 1260} 1261 1262struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, 1263 const u8 *addr, 1264 const u8 *localaddr) 1265{ 1266 struct ieee80211_local *local = hw_to_local(hw); 1267 struct rhlist_head *tmp; 1268 struct sta_info *sta; 1269 1270 /* 1271 * Just return a random station if localaddr is NULL 1272 * ... first in list. 1273 */ 1274 for_each_sta_info(local, addr, sta, tmp) { 1275 if (localaddr && 1276 !ether_addr_equal(sta->sdata->vif.addr, localaddr)) 1277 continue; 1278 if (!sta->uploaded) 1279 return NULL; 1280 return &sta->sta; 1281 } 1282 1283 return NULL; 1284} 1285EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr); 1286 1287struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, 1288 const u8 *addr) 1289{ 1290 struct sta_info *sta; 1291 1292 if (!vif) 1293 return NULL; 1294 1295 sta = sta_info_get_bss(vif_to_sdata(vif), addr); 1296 if (!sta) 1297 return NULL; 1298 1299 if (!sta->uploaded) 1300 return NULL; 1301 1302 return &sta->sta; 1303} 1304EXPORT_SYMBOL(ieee80211_find_sta); 1305 1306/* powersave support code */ 1307void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) 1308{ 1309 struct ieee80211_sub_if_data *sdata = sta->sdata; 1310 struct ieee80211_local *local = sdata->local; 1311 struct sk_buff_head pending; 1312 int filtered = 0, buffered = 0, ac, i; 1313 unsigned long flags; 1314 struct ps_data *ps; 1315 1316 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1317 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 1318 u.ap); 1319 1320 if (sdata->vif.type == NL80211_IFTYPE_AP) 1321 ps = &sdata->bss->ps; 1322 else if (ieee80211_vif_is_mesh(&sdata->vif)) 1323 ps = &sdata->u.mesh.ps; 1324 else 1325 return; 1326 1327 clear_sta_flag(sta, WLAN_STA_SP); 1328 1329 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); 1330 sta->driver_buffered_tids = 0; 1331 sta->txq_buffered_tids = 0; 1332 1333 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1334 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 1335 1336 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { 1337 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i])) 1338 continue; 1339 1340 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i])); 1341 } 1342 1343 skb_queue_head_init(&pending); 1344 1345 /* sync with ieee80211_tx_h_unicast_ps_buf */ 1346 spin_lock_bh(&sta->ps_lock); 1347 /* Send all buffered frames to the station */ 1348 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1349 int count = skb_queue_len(&pending), tmp; 1350 1351 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); 1352 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); 1353 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); 1354 tmp = skb_queue_len(&pending); 1355 filtered += tmp - count; 1356 count = tmp; 1357 1358 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); 1359 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); 1360 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); 1361 tmp = skb_queue_len(&pending); 1362 buffered += tmp - count; 1363 } 1364 1365 ieee80211_add_pending_skbs(local, &pending); 1366 1367 /* now we're no longer in the deliver code */ 1368 clear_sta_flag(sta, WLAN_STA_PS_DELIVER); 1369 1370 /* The station might have polled and then woken up before we responded, 1371 * so clear these flags now to avoid them sticking around. 1372 */ 1373 clear_sta_flag(sta, WLAN_STA_PSPOLL); 1374 clear_sta_flag(sta, WLAN_STA_UAPSD); 1375 spin_unlock_bh(&sta->ps_lock); 1376 1377 atomic_dec(&ps->num_sta_ps); 1378 1379 local->total_ps_buffered -= buffered; 1380 1381 sta_info_recalc_tim(sta); 1382 1383 ps_dbg(sdata, 1384 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n", 1385 sta->sta.addr, sta->sta.aid, filtered, buffered); 1386 1387 ieee80211_check_fast_xmit(sta); 1388} 1389 1390static void ieee80211_send_null_response(struct sta_info *sta, int tid, 1391 enum ieee80211_frame_release_type reason, 1392 bool call_driver, bool more_data) 1393{ 1394 struct ieee80211_sub_if_data *sdata = sta->sdata; 1395 struct ieee80211_local *local = sdata->local; 1396 struct ieee80211_qos_hdr *nullfunc; 1397 struct sk_buff *skb; 1398 int size = sizeof(*nullfunc); 1399 __le16 fc; 1400 bool qos = sta->sta.wme; 1401 struct ieee80211_tx_info *info; 1402 struct ieee80211_chanctx_conf *chanctx_conf; 1403 1404 if (qos) { 1405 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1406 IEEE80211_STYPE_QOS_NULLFUNC | 1407 IEEE80211_FCTL_FROMDS); 1408 } else { 1409 size -= 2; 1410 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | 1411 IEEE80211_STYPE_NULLFUNC | 1412 IEEE80211_FCTL_FROMDS); 1413 } 1414 1415 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); 1416 if (!skb) 1417 return; 1418 1419 skb_reserve(skb, local->hw.extra_tx_headroom); 1420 1421 nullfunc = skb_put(skb, size); 1422 nullfunc->frame_control = fc; 1423 nullfunc->duration_id = 0; 1424 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); 1425 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); 1426 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); 1427 nullfunc->seq_ctrl = 0; 1428 1429 skb->priority = tid; 1430 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 1431 if (qos) { 1432 nullfunc->qos_ctrl = cpu_to_le16(tid); 1433 1434 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) { 1435 nullfunc->qos_ctrl |= 1436 cpu_to_le16(IEEE80211_QOS_CTL_EOSP); 1437 if (more_data) 1438 nullfunc->frame_control |= 1439 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1440 } 1441 } 1442 1443 info = IEEE80211_SKB_CB(skb); 1444 1445 /* 1446 * Tell TX path to send this frame even though the 1447 * STA may still remain is PS mode after this frame 1448 * exchange. Also set EOSP to indicate this packet 1449 * ends the poll/service period. 1450 */ 1451 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER | 1452 IEEE80211_TX_STATUS_EOSP | 1453 IEEE80211_TX_CTL_REQ_TX_STATUS; 1454 1455 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1456 1457 if (call_driver) 1458 drv_allow_buffered_frames(local, sta, BIT(tid), 1, 1459 reason, false); 1460 1461 skb->dev = sdata->dev; 1462 1463 rcu_read_lock(); 1464 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 1465 if (WARN_ON(!chanctx_conf)) { 1466 rcu_read_unlock(); 1467 kfree_skb(skb); 1468 return; 1469 } 1470 1471 info->band = chanctx_conf->def.chan->band; 1472 ieee80211_xmit(sdata, sta, skb); 1473 rcu_read_unlock(); 1474} 1475 1476static int find_highest_prio_tid(unsigned long tids) 1477{ 1478 /* lower 3 TIDs aren't ordered perfectly */ 1479 if (tids & 0xF8) 1480 return fls(tids) - 1; 1481 /* TID 0 is BE just like TID 3 */ 1482 if (tids & BIT(0)) 1483 return 0; 1484 return fls(tids) - 1; 1485} 1486 1487/* Indicates if the MORE_DATA bit should be set in the last 1488 * frame obtained by ieee80211_sta_ps_get_frames. 1489 * Note that driver_release_tids is relevant only if 1490 * reason = IEEE80211_FRAME_RELEASE_PSPOLL 1491 */ 1492static bool 1493ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs, 1494 enum ieee80211_frame_release_type reason, 1495 unsigned long driver_release_tids) 1496{ 1497 int ac; 1498 1499 /* If the driver has data on more than one TID then 1500 * certainly there's more data if we release just a 1501 * single frame now (from a single TID). This will 1502 * only happen for PS-Poll. 1503 */ 1504 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL && 1505 hweight16(driver_release_tids) > 1) 1506 return true; 1507 1508 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1509 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1510 continue; 1511 1512 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1513 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1514 return true; 1515 } 1516 1517 return false; 1518} 1519 1520static void 1521ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs, 1522 enum ieee80211_frame_release_type reason, 1523 struct sk_buff_head *frames, 1524 unsigned long *driver_release_tids) 1525{ 1526 struct ieee80211_sub_if_data *sdata = sta->sdata; 1527 struct ieee80211_local *local = sdata->local; 1528 int ac; 1529 1530 /* Get response frame(s) and more data bit for the last one. */ 1531 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 1532 unsigned long tids; 1533 1534 if (ignored_acs & ieee80211_ac_to_qos_mask[ac]) 1535 continue; 1536 1537 tids = ieee80211_tids_for_ac(ac); 1538 1539 /* if we already have frames from software, then we can't also 1540 * release from hardware queues 1541 */ 1542 if (skb_queue_empty(frames)) { 1543 *driver_release_tids |= 1544 sta->driver_buffered_tids & tids; 1545 *driver_release_tids |= sta->txq_buffered_tids & tids; 1546 } 1547 1548 if (!*driver_release_tids) { 1549 struct sk_buff *skb; 1550 1551 while (n_frames > 0) { 1552 skb = skb_dequeue(&sta->tx_filtered[ac]); 1553 if (!skb) { 1554 skb = skb_dequeue( 1555 &sta->ps_tx_buf[ac]); 1556 if (skb) 1557 local->total_ps_buffered--; 1558 } 1559 if (!skb) 1560 break; 1561 n_frames--; 1562 __skb_queue_tail(frames, skb); 1563 } 1564 } 1565 1566 /* If we have more frames buffered on this AC, then abort the 1567 * loop since we can't send more data from other ACs before 1568 * the buffered frames from this. 1569 */ 1570 if (!skb_queue_empty(&sta->tx_filtered[ac]) || 1571 !skb_queue_empty(&sta->ps_tx_buf[ac])) 1572 break; 1573 } 1574} 1575 1576static void 1577ieee80211_sta_ps_deliver_response(struct sta_info *sta, 1578 int n_frames, u8 ignored_acs, 1579 enum ieee80211_frame_release_type reason) 1580{ 1581 struct ieee80211_sub_if_data *sdata = sta->sdata; 1582 struct ieee80211_local *local = sdata->local; 1583 unsigned long driver_release_tids = 0; 1584 struct sk_buff_head frames; 1585 bool more_data; 1586 1587 /* Service or PS-Poll period starts */ 1588 set_sta_flag(sta, WLAN_STA_SP); 1589 1590 __skb_queue_head_init(&frames); 1591 1592 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason, 1593 &frames, &driver_release_tids); 1594 1595 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); 1596 1597 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) 1598 driver_release_tids = 1599 BIT(find_highest_prio_tid(driver_release_tids)); 1600 1601 if (skb_queue_empty(&frames) && !driver_release_tids) { 1602 int tid, ac; 1603 1604 /* 1605 * For PS-Poll, this can only happen due to a race condition 1606 * when we set the TIM bit and the station notices it, but 1607 * before it can poll for the frame we expire it. 1608 * 1609 * For uAPSD, this is said in the standard (11.2.1.5 h): 1610 * At each unscheduled SP for a non-AP STA, the AP shall 1611 * attempt to transmit at least one MSDU or MMPDU, but no 1612 * more than the value specified in the Max SP Length field 1613 * in the QoS Capability element from delivery-enabled ACs, 1614 * that are destined for the non-AP STA. 1615 * 1616 * Since we have no other MSDU/MMPDU, transmit a QoS null frame. 1617 */ 1618 1619 /* This will evaluate to 1, 3, 5 or 7. */ 1620 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1621 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac])) 1622 break; 1623 tid = 7 - 2 * ac; 1624 1625 ieee80211_send_null_response(sta, tid, reason, true, false); 1626 } else if (!driver_release_tids) { 1627 struct sk_buff_head pending; 1628 struct sk_buff *skb; 1629 int num = 0; 1630 u16 tids = 0; 1631 bool need_null = false; 1632 1633 skb_queue_head_init(&pending); 1634 1635 while ((skb = __skb_dequeue(&frames))) { 1636 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1637 struct ieee80211_hdr *hdr = (void *) skb->data; 1638 u8 *qoshdr = NULL; 1639 1640 num++; 1641 1642 /* 1643 * Tell TX path to send this frame even though the 1644 * STA may still remain is PS mode after this frame 1645 * exchange. 1646 */ 1647 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; 1648 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; 1649 1650 /* 1651 * Use MoreData flag to indicate whether there are 1652 * more buffered frames for this STA 1653 */ 1654 if (more_data || !skb_queue_empty(&frames)) 1655 hdr->frame_control |= 1656 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1657 else 1658 hdr->frame_control &= 1659 cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 1660 1661 if (ieee80211_is_data_qos(hdr->frame_control) || 1662 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1663 qoshdr = ieee80211_get_qos_ctl(hdr); 1664 1665 tids |= BIT(skb->priority); 1666 1667 __skb_queue_tail(&pending, skb); 1668 1669 /* end service period after last frame or add one */ 1670 if (!skb_queue_empty(&frames)) 1671 continue; 1672 1673 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) { 1674 /* for PS-Poll, there's only one frame */ 1675 info->flags |= IEEE80211_TX_STATUS_EOSP | 1676 IEEE80211_TX_CTL_REQ_TX_STATUS; 1677 break; 1678 } 1679 1680 /* For uAPSD, things are a bit more complicated. If the 1681 * last frame has a QoS header (i.e. is a QoS-data or 1682 * QoS-nulldata frame) then just set the EOSP bit there 1683 * and be done. 1684 * If the frame doesn't have a QoS header (which means 1685 * it should be a bufferable MMPDU) then we can't set 1686 * the EOSP bit in the QoS header; add a QoS-nulldata 1687 * frame to the list to send it after the MMPDU. 1688 * 1689 * Note that this code is only in the mac80211-release 1690 * code path, we assume that the driver will not buffer 1691 * anything but QoS-data frames, or if it does, will 1692 * create the QoS-nulldata frame by itself if needed. 1693 * 1694 * Cf. 802.11-2012 10.2.1.10 (c). 1695 */ 1696 if (qoshdr) { 1697 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1698 1699 info->flags |= IEEE80211_TX_STATUS_EOSP | 1700 IEEE80211_TX_CTL_REQ_TX_STATUS; 1701 } else { 1702 /* The standard isn't completely clear on this 1703 * as it says the more-data bit should be set 1704 * if there are more BUs. The QoS-Null frame 1705 * we're about to send isn't buffered yet, we 1706 * only create it below, but let's pretend it 1707 * was buffered just in case some clients only 1708 * expect more-data=0 when eosp=1. 1709 */ 1710 hdr->frame_control |= 1711 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1712 need_null = true; 1713 num++; 1714 } 1715 break; 1716 } 1717 1718 drv_allow_buffered_frames(local, sta, tids, num, 1719 reason, more_data); 1720 1721 ieee80211_add_pending_skbs(local, &pending); 1722 1723 if (need_null) 1724 ieee80211_send_null_response( 1725 sta, find_highest_prio_tid(tids), 1726 reason, false, false); 1727 1728 sta_info_recalc_tim(sta); 1729 } else { 1730 int tid; 1731 1732 /* 1733 * We need to release a frame that is buffered somewhere in the 1734 * driver ... it'll have to handle that. 1735 * Note that the driver also has to check the number of frames 1736 * on the TIDs we're releasing from - if there are more than 1737 * n_frames it has to set the more-data bit (if we didn't ask 1738 * it to set it anyway due to other buffered frames); if there 1739 * are fewer than n_frames it has to make sure to adjust that 1740 * to allow the service period to end properly. 1741 */ 1742 drv_release_buffered_frames(local, sta, driver_release_tids, 1743 n_frames, reason, more_data); 1744 1745 /* 1746 * Note that we don't recalculate the TIM bit here as it would 1747 * most likely have no effect at all unless the driver told us 1748 * that the TID(s) became empty before returning here from the 1749 * release function. 1750 * Either way, however, when the driver tells us that the TID(s) 1751 * became empty or we find that a txq became empty, we'll do the 1752 * TIM recalculation. 1753 */ 1754 1755 if (!sta->sta.txq[0]) 1756 return; 1757 1758 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1759 if (!sta->sta.txq[tid] || 1760 !(driver_release_tids & BIT(tid)) || 1761 txq_has_queue(sta->sta.txq[tid])) 1762 continue; 1763 1764 sta_info_recalc_tim(sta); 1765 break; 1766 } 1767 } 1768} 1769 1770void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta) 1771{ 1772 u8 ignore_for_response = sta->sta.uapsd_queues; 1773 1774 /* 1775 * If all ACs are delivery-enabled then we should reply 1776 * from any of them, if only some are enabled we reply 1777 * only from the non-enabled ones. 1778 */ 1779 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1) 1780 ignore_for_response = 0; 1781 1782 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response, 1783 IEEE80211_FRAME_RELEASE_PSPOLL); 1784} 1785 1786void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta) 1787{ 1788 int n_frames = sta->sta.max_sp; 1789 u8 delivery_enabled = sta->sta.uapsd_queues; 1790 1791 /* 1792 * If we ever grow support for TSPEC this might happen if 1793 * the TSPEC update from hostapd comes in between a trigger 1794 * frame setting WLAN_STA_UAPSD in the RX path and this 1795 * actually getting called. 1796 */ 1797 if (!delivery_enabled) 1798 return; 1799 1800 switch (sta->sta.max_sp) { 1801 case 1: 1802 n_frames = 2; 1803 break; 1804 case 2: 1805 n_frames = 4; 1806 break; 1807 case 3: 1808 n_frames = 6; 1809 break; 1810 case 0: 1811 /* XXX: what is a good value? */ 1812 n_frames = 128; 1813 break; 1814 } 1815 1816 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled, 1817 IEEE80211_FRAME_RELEASE_UAPSD); 1818} 1819 1820void ieee80211_sta_block_awake(struct ieee80211_hw *hw, 1821 struct ieee80211_sta *pubsta, bool block) 1822{ 1823 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1824 1825 trace_api_sta_block_awake(sta->local, pubsta, block); 1826 1827 if (block) { 1828 set_sta_flag(sta, WLAN_STA_PS_DRIVER); 1829 ieee80211_clear_fast_xmit(sta); 1830 return; 1831 } 1832 1833 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1834 return; 1835 1836 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) { 1837 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1838 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1839 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1840 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) || 1841 test_sta_flag(sta, WLAN_STA_UAPSD)) { 1842 /* must be asleep in this case */ 1843 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1844 ieee80211_queue_work(hw, &sta->drv_deliver_wk); 1845 } else { 1846 clear_sta_flag(sta, WLAN_STA_PS_DRIVER); 1847 ieee80211_check_fast_xmit(sta); 1848 } 1849} 1850EXPORT_SYMBOL(ieee80211_sta_block_awake); 1851 1852void ieee80211_sta_eosp(struct ieee80211_sta *pubsta) 1853{ 1854 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1855 struct ieee80211_local *local = sta->local; 1856 1857 trace_api_eosp(local, pubsta); 1858 1859 clear_sta_flag(sta, WLAN_STA_SP); 1860} 1861EXPORT_SYMBOL(ieee80211_sta_eosp); 1862 1863void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid) 1864{ 1865 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1866 enum ieee80211_frame_release_type reason; 1867 bool more_data; 1868 1869 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid); 1870 1871 reason = IEEE80211_FRAME_RELEASE_UAPSD; 1872 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues, 1873 reason, 0); 1874 1875 ieee80211_send_null_response(sta, tid, reason, false, more_data); 1876} 1877EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc); 1878 1879void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta, 1880 u8 tid, bool buffered) 1881{ 1882 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1883 1884 if (WARN_ON(tid >= IEEE80211_NUM_TIDS)) 1885 return; 1886 1887 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered); 1888 1889 if (buffered) 1890 set_bit(tid, &sta->driver_buffered_tids); 1891 else 1892 clear_bit(tid, &sta->driver_buffered_tids); 1893 1894 sta_info_recalc_tim(sta); 1895} 1896EXPORT_SYMBOL(ieee80211_sta_set_buffered); 1897 1898void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, 1899 u32 tx_airtime, u32 rx_airtime) 1900{ 1901 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1902 struct ieee80211_local *local = sta->sdata->local; 1903 u8 ac = ieee80211_ac_from_tid(tid); 1904 u32 airtime = 0; 1905 1906 if (sta->local->airtime_flags & AIRTIME_USE_TX) 1907 airtime += tx_airtime; 1908 if (sta->local->airtime_flags & AIRTIME_USE_RX) 1909 airtime += rx_airtime; 1910 1911 spin_lock_bh(&local->active_txq_lock[ac]); 1912 sta->airtime[ac].tx_airtime += tx_airtime; 1913 sta->airtime[ac].rx_airtime += rx_airtime; 1914 sta->airtime[ac].deficit -= airtime; 1915 spin_unlock_bh(&local->active_txq_lock[ac]); 1916} 1917EXPORT_SYMBOL(ieee80211_sta_register_airtime); 1918 1919void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, 1920 struct sta_info *sta, u8 ac, 1921 u16 tx_airtime, bool tx_completed) 1922{ 1923 int tx_pending; 1924 1925 if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) 1926 return; 1927 1928 if (!tx_completed) { 1929 if (sta) 1930 atomic_add(tx_airtime, 1931 &sta->airtime[ac].aql_tx_pending); 1932 1933 atomic_add(tx_airtime, &local->aql_total_pending_airtime); 1934 return; 1935 } 1936 1937 if (sta) { 1938 tx_pending = atomic_sub_return(tx_airtime, 1939 &sta->airtime[ac].aql_tx_pending); 1940 if (tx_pending < 0) 1941 atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, 1942 tx_pending, 0); 1943 } 1944 1945 tx_pending = atomic_sub_return(tx_airtime, 1946 &local->aql_total_pending_airtime); 1947 if (WARN_ONCE(tx_pending < 0, 1948 "Device %s AC %d pending airtime underflow: %u, %u", 1949 wiphy_name(local->hw.wiphy), ac, tx_pending, 1950 tx_airtime)) 1951 atomic_cmpxchg(&local->aql_total_pending_airtime, 1952 tx_pending, 0); 1953} 1954 1955int sta_info_move_state(struct sta_info *sta, 1956 enum ieee80211_sta_state new_state) 1957{ 1958 might_sleep(); 1959 1960 if (sta->sta_state == new_state) 1961 return 0; 1962 1963 /* check allowed transitions first */ 1964 1965 switch (new_state) { 1966 case IEEE80211_STA_NONE: 1967 if (sta->sta_state != IEEE80211_STA_AUTH) 1968 return -EINVAL; 1969 break; 1970 case IEEE80211_STA_AUTH: 1971 if (sta->sta_state != IEEE80211_STA_NONE && 1972 sta->sta_state != IEEE80211_STA_ASSOC) 1973 return -EINVAL; 1974 break; 1975 case IEEE80211_STA_ASSOC: 1976 if (sta->sta_state != IEEE80211_STA_AUTH && 1977 sta->sta_state != IEEE80211_STA_AUTHORIZED) 1978 return -EINVAL; 1979 break; 1980 case IEEE80211_STA_AUTHORIZED: 1981 if (sta->sta_state != IEEE80211_STA_ASSOC) 1982 return -EINVAL; 1983 break; 1984 default: 1985 WARN(1, "invalid state %d", new_state); 1986 return -EINVAL; 1987 } 1988 1989 sta_dbg(sta->sdata, "moving STA %pM to state %d\n", 1990 sta->sta.addr, new_state); 1991 1992 /* 1993 * notify the driver before the actual changes so it can 1994 * fail the transition 1995 */ 1996 if (test_sta_flag(sta, WLAN_STA_INSERTED)) { 1997 int err = drv_sta_state(sta->local, sta->sdata, sta, 1998 sta->sta_state, new_state); 1999 if (err) 2000 return err; 2001 } 2002 2003 /* reflect the change in all state variables */ 2004 2005 switch (new_state) { 2006 case IEEE80211_STA_NONE: 2007 if (sta->sta_state == IEEE80211_STA_AUTH) 2008 clear_bit(WLAN_STA_AUTH, &sta->_flags); 2009 break; 2010 case IEEE80211_STA_AUTH: 2011 if (sta->sta_state == IEEE80211_STA_NONE) { 2012 set_bit(WLAN_STA_AUTH, &sta->_flags); 2013 } else if (sta->sta_state == IEEE80211_STA_ASSOC) { 2014 clear_bit(WLAN_STA_ASSOC, &sta->_flags); 2015 ieee80211_recalc_min_chandef(sta->sdata); 2016 if (!sta->sta.support_p2p_ps) 2017 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2018 } 2019 break; 2020 case IEEE80211_STA_ASSOC: 2021 if (sta->sta_state == IEEE80211_STA_AUTH) { 2022 set_bit(WLAN_STA_ASSOC, &sta->_flags); 2023 sta->assoc_at = ktime_get_boottime_ns(); 2024 ieee80211_recalc_min_chandef(sta->sdata); 2025 if (!sta->sta.support_p2p_ps) 2026 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); 2027 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 2028 ieee80211_vif_dec_num_mcast(sta->sdata); 2029 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2030 ieee80211_clear_fast_xmit(sta); 2031 ieee80211_clear_fast_rx(sta); 2032 } 2033 break; 2034 case IEEE80211_STA_AUTHORIZED: 2035 if (sta->sta_state == IEEE80211_STA_ASSOC) { 2036 ieee80211_vif_inc_num_mcast(sta->sdata); 2037 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 2038 ieee80211_check_fast_xmit(sta); 2039 ieee80211_check_fast_rx(sta); 2040 } 2041 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 2042 sta->sdata->vif.type == NL80211_IFTYPE_AP) 2043 cfg80211_send_layer2_update(sta->sdata->dev, 2044 sta->sta.addr); 2045 break; 2046 default: 2047 break; 2048 } 2049 2050 sta->sta_state = new_state; 2051 2052 return 0; 2053} 2054 2055u8 sta_info_tx_streams(struct sta_info *sta) 2056{ 2057 struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap; 2058 u8 rx_streams; 2059 2060 if (!sta->sta.ht_cap.ht_supported) 2061 return 1; 2062 2063 if (sta->sta.vht_cap.vht_supported) { 2064 int i; 2065 u16 tx_mcs_map = 2066 le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map); 2067 2068 for (i = 7; i >= 0; i--) 2069 if ((tx_mcs_map & (0x3 << (i * 2))) != 2070 IEEE80211_VHT_MCS_NOT_SUPPORTED) 2071 return i + 1; 2072 } 2073 2074 if (ht_cap->mcs.rx_mask[3]) 2075 rx_streams = 4; 2076 else if (ht_cap->mcs.rx_mask[2]) 2077 rx_streams = 3; 2078 else if (ht_cap->mcs.rx_mask[1]) 2079 rx_streams = 2; 2080 else 2081 rx_streams = 1; 2082 2083 if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF)) 2084 return rx_streams; 2085 2086 return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) 2087 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1; 2088} 2089 2090static struct ieee80211_sta_rx_stats * 2091sta_get_last_rx_stats(struct sta_info *sta) 2092{ 2093 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 2094 int cpu; 2095 2096 if (!sta->pcpu_rx_stats) 2097 return stats; 2098 2099 for_each_possible_cpu(cpu) { 2100 struct ieee80211_sta_rx_stats *cpustats; 2101 2102 cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2103 2104 if (time_after(cpustats->last_rx, stats->last_rx)) 2105 stats = cpustats; 2106 } 2107 2108 return stats; 2109} 2110 2111static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate, 2112 struct rate_info *rinfo) 2113{ 2114 rinfo->bw = STA_STATS_GET(BW, rate); 2115 2116 switch (STA_STATS_GET(TYPE, rate)) { 2117 case STA_STATS_RATE_TYPE_VHT: 2118 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS; 2119 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate); 2120 rinfo->nss = STA_STATS_GET(VHT_NSS, rate); 2121 if (STA_STATS_GET(SGI, rate)) 2122 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2123 break; 2124 case STA_STATS_RATE_TYPE_HT: 2125 rinfo->flags = RATE_INFO_FLAGS_MCS; 2126 rinfo->mcs = STA_STATS_GET(HT_MCS, rate); 2127 if (STA_STATS_GET(SGI, rate)) 2128 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 2129 break; 2130 case STA_STATS_RATE_TYPE_LEGACY: { 2131 struct ieee80211_supported_band *sband; 2132 u16 brate; 2133 unsigned int shift; 2134 int band = STA_STATS_GET(LEGACY_BAND, rate); 2135 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate); 2136 2137 sband = local->hw.wiphy->bands[band]; 2138 2139 if (WARN_ON_ONCE(!sband->bitrates)) 2140 break; 2141 2142 brate = sband->bitrates[rate_idx].bitrate; 2143 if (rinfo->bw == RATE_INFO_BW_5) 2144 shift = 2; 2145 else if (rinfo->bw == RATE_INFO_BW_10) 2146 shift = 1; 2147 else 2148 shift = 0; 2149 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift); 2150 break; 2151 } 2152 case STA_STATS_RATE_TYPE_HE: 2153 rinfo->flags = RATE_INFO_FLAGS_HE_MCS; 2154 rinfo->mcs = STA_STATS_GET(HE_MCS, rate); 2155 rinfo->nss = STA_STATS_GET(HE_NSS, rate); 2156 rinfo->he_gi = STA_STATS_GET(HE_GI, rate); 2157 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate); 2158 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate); 2159 break; 2160 } 2161} 2162 2163static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) 2164{ 2165 u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); 2166 2167 if (rate == STA_STATS_RATE_INVALID) 2168 return -EINVAL; 2169 2170 sta_stats_decode_rate(sta->local, rate, rinfo); 2171 return 0; 2172} 2173 2174static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats, 2175 int tid) 2176{ 2177 unsigned int start; 2178 u64 value; 2179 2180 do { 2181 start = u64_stats_fetch_begin_irq(&rxstats->syncp); 2182 value = rxstats->msdu[tid]; 2183 } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); 2184 2185 return value; 2186} 2187 2188static void sta_set_tidstats(struct sta_info *sta, 2189 struct cfg80211_tid_stats *tidstats, 2190 int tid) 2191{ 2192 struct ieee80211_local *local = sta->local; 2193 int cpu; 2194 2195 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { 2196 tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid); 2197 2198 if (sta->pcpu_rx_stats) { 2199 for_each_possible_cpu(cpu) { 2200 struct ieee80211_sta_rx_stats *cpurxs; 2201 2202 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2203 tidstats->rx_msdu += 2204 sta_get_tidstats_msdu(cpurxs, tid); 2205 } 2206 } 2207 2208 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU); 2209 } 2210 2211 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { 2212 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); 2213 tidstats->tx_msdu = sta->tx_stats.msdu[tid]; 2214 } 2215 2216 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && 2217 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2218 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); 2219 tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; 2220 } 2221 2222 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && 2223 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { 2224 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); 2225 tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; 2226 } 2227 2228 if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) { 2229 spin_lock_bh(&local->fq.lock); 2230 rcu_read_lock(); 2231 2232 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); 2233 ieee80211_fill_txq_stats(&tidstats->txq_stats, 2234 to_txq_info(sta->sta.txq[tid])); 2235 2236 rcu_read_unlock(); 2237 spin_unlock_bh(&local->fq.lock); 2238 } 2239} 2240 2241static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) 2242{ 2243 unsigned int start; 2244 u64 value; 2245 2246 do { 2247 start = u64_stats_fetch_begin_irq(&rxstats->syncp); 2248 value = rxstats->bytes; 2249 } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); 2250 2251 return value; 2252} 2253 2254void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, 2255 bool tidstats) 2256{ 2257 struct ieee80211_sub_if_data *sdata = sta->sdata; 2258 struct ieee80211_local *local = sdata->local; 2259 u32 thr = 0; 2260 int i, ac, cpu; 2261 struct ieee80211_sta_rx_stats *last_rxstats; 2262 2263 last_rxstats = sta_get_last_rx_stats(sta); 2264 2265 sinfo->generation = sdata->local->sta_generation; 2266 2267 /* do before driver, so beacon filtering drivers have a 2268 * chance to e.g. just add the number of filtered beacons 2269 * (or just modify the value entirely, of course) 2270 */ 2271 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2272 sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal; 2273 2274 drv_sta_statistics(local, sdata, &sta->sta, sinfo); 2275 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) | 2276 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) | 2277 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) | 2278 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) | 2279 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) | 2280 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC); 2281 2282 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2283 sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count; 2284 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS); 2285 } 2286 2287 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 2288 sinfo->assoc_at = sta->assoc_at; 2289 sinfo->inactive_time = 2290 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta)); 2291 2292 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2293 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { 2294 sinfo->tx_bytes = 0; 2295 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2296 sinfo->tx_bytes += sta->tx_stats.bytes[ac]; 2297 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); 2298 } 2299 2300 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { 2301 sinfo->tx_packets = 0; 2302 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2303 sinfo->tx_packets += sta->tx_stats.packets[ac]; 2304 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); 2305 } 2306 2307 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | 2308 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { 2309 sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); 2310 2311 if (sta->pcpu_rx_stats) { 2312 for_each_possible_cpu(cpu) { 2313 struct ieee80211_sta_rx_stats *cpurxs; 2314 2315 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2316 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); 2317 } 2318 } 2319 2320 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); 2321 } 2322 2323 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { 2324 sinfo->rx_packets = sta->rx_stats.packets; 2325 if (sta->pcpu_rx_stats) { 2326 for_each_possible_cpu(cpu) { 2327 struct ieee80211_sta_rx_stats *cpurxs; 2328 2329 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2330 sinfo->rx_packets += cpurxs->packets; 2331 } 2332 } 2333 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); 2334 } 2335 2336 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { 2337 sinfo->tx_retries = sta->status_stats.retry_count; 2338 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); 2339 } 2340 2341 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { 2342 sinfo->tx_failed = sta->status_stats.retry_failed; 2343 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); 2344 } 2345 2346 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) { 2347 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2348 sinfo->rx_duration += sta->airtime[ac].rx_airtime; 2349 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 2350 } 2351 2352 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) { 2353 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 2354 sinfo->tx_duration += sta->airtime[ac].tx_airtime; 2355 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 2356 } 2357 2358 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { 2359 sinfo->airtime_weight = sta->airtime_weight; 2360 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); 2361 } 2362 2363 sinfo->rx_dropped_misc = sta->rx_stats.dropped; 2364 if (sta->pcpu_rx_stats) { 2365 for_each_possible_cpu(cpu) { 2366 struct ieee80211_sta_rx_stats *cpurxs; 2367 2368 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2369 sinfo->rx_dropped_misc += cpurxs->dropped; 2370 } 2371 } 2372 2373 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2374 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) { 2375 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) | 2376 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 2377 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif); 2378 } 2379 2380 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) || 2381 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) { 2382 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) { 2383 sinfo->signal = (s8)last_rxstats->last_signal; 2384 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 2385 } 2386 2387 if (!sta->pcpu_rx_stats && 2388 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { 2389 sinfo->signal_avg = 2390 -ewma_signal_read(&sta->rx_stats_avg.signal); 2391 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 2392 } 2393 } 2394 2395 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to 2396 * the sta->rx_stats struct, so the check here is fine with and without 2397 * pcpu statistics 2398 */ 2399 if (last_rxstats->chains && 2400 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | 2401 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { 2402 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); 2403 if (!sta->pcpu_rx_stats) 2404 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); 2405 2406 sinfo->chains = last_rxstats->chains; 2407 2408 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { 2409 sinfo->chain_signal[i] = 2410 last_rxstats->chain_signal_last[i]; 2411 sinfo->chain_signal_avg[i] = 2412 -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); 2413 } 2414 } 2415 2416 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { 2417 sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, 2418 &sinfo->txrate); 2419 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 2420 } 2421 2422 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) { 2423 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0) 2424 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); 2425 } 2426 2427 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) { 2428 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) 2429 sta_set_tidstats(sta, &sinfo->pertid[i], i); 2430 } 2431 2432 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2433#ifdef CONFIG_MAC80211_MESH 2434 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | 2435 BIT_ULL(NL80211_STA_INFO_PLID) | 2436 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | 2437 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | 2438 BIT_ULL(NL80211_STA_INFO_PEER_PM) | 2439 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | 2440 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | 2441 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); 2442 2443 sinfo->llid = sta->mesh->llid; 2444 sinfo->plid = sta->mesh->plid; 2445 sinfo->plink_state = sta->mesh->plink_state; 2446 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { 2447 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); 2448 sinfo->t_offset = sta->mesh->t_offset; 2449 } 2450 sinfo->local_pm = sta->mesh->local_pm; 2451 sinfo->peer_pm = sta->mesh->peer_pm; 2452 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; 2453 sinfo->connected_to_gate = sta->mesh->connected_to_gate; 2454 sinfo->connected_to_as = sta->mesh->connected_to_as; 2455#endif 2456 } 2457 2458 sinfo->bss_param.flags = 0; 2459 if (sdata->vif.bss_conf.use_cts_prot) 2460 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; 2461 if (sdata->vif.bss_conf.use_short_preamble) 2462 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; 2463 if (sdata->vif.bss_conf.use_short_slot) 2464 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; 2465 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period; 2466 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; 2467 2468 sinfo->sta_flags.set = 0; 2469 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | 2470 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | 2471 BIT(NL80211_STA_FLAG_WME) | 2472 BIT(NL80211_STA_FLAG_MFP) | 2473 BIT(NL80211_STA_FLAG_AUTHENTICATED) | 2474 BIT(NL80211_STA_FLAG_ASSOCIATED) | 2475 BIT(NL80211_STA_FLAG_TDLS_PEER); 2476 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 2477 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); 2478 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) 2479 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); 2480 if (sta->sta.wme) 2481 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); 2482 if (test_sta_flag(sta, WLAN_STA_MFP)) 2483 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); 2484 if (test_sta_flag(sta, WLAN_STA_AUTH)) 2485 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); 2486 if (test_sta_flag(sta, WLAN_STA_ASSOC)) 2487 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 2488 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 2489 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); 2490 2491 thr = sta_get_expected_throughput(sta); 2492 2493 if (thr != 0) { 2494 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT); 2495 sinfo->expected_throughput = thr; 2496 } 2497 2498 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && 2499 sta->status_stats.ack_signal_filled) { 2500 sinfo->ack_signal = sta->status_stats.last_ack_signal; 2501 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); 2502 } 2503 2504 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && 2505 sta->status_stats.ack_signal_filled) { 2506 sinfo->avg_ack_signal = 2507 -(s8)ewma_avg_signal_read( 2508 &sta->status_stats.avg_ack_signal); 2509 sinfo->filled |= 2510 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); 2511 } 2512 2513 if (ieee80211_vif_is_mesh(&sdata->vif)) { 2514 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); 2515 sinfo->airtime_link_metric = 2516 airtime_link_metric_get(local, sta); 2517 } 2518} 2519 2520u32 sta_get_expected_throughput(struct sta_info *sta) 2521{ 2522 struct ieee80211_sub_if_data *sdata = sta->sdata; 2523 struct ieee80211_local *local = sdata->local; 2524 struct rate_control_ref *ref = NULL; 2525 u32 thr = 0; 2526 2527 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) 2528 ref = local->rate_ctrl; 2529 2530 /* check if the driver has a SW RC implementation */ 2531 if (ref && ref->ops->get_expected_throughput) 2532 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); 2533 else 2534 thr = drv_get_expected_throughput(local, sta); 2535 2536 return thr; 2537} 2538 2539unsigned long ieee80211_sta_last_active(struct sta_info *sta) 2540{ 2541 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2542 2543 if (!sta->status_stats.last_ack || 2544 time_after(stats->last_rx, sta->status_stats.last_ack)) 2545 return stats->last_rx; 2546 return sta->status_stats.last_ack; 2547} 2548 2549static void sta_update_codel_params(struct sta_info *sta, u32 thr) 2550{ 2551 if (!sta->sdata->local->ops->wake_tx_queue) 2552 return; 2553 2554 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) { 2555 sta->cparams.target = MS2TIME(50); 2556 sta->cparams.interval = MS2TIME(300); 2557 sta->cparams.ecn = false; 2558 } else { 2559 sta->cparams.target = MS2TIME(20); 2560 sta->cparams.interval = MS2TIME(100); 2561 sta->cparams.ecn = true; 2562 } 2563} 2564 2565void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, 2566 u32 thr) 2567{ 2568 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 2569 2570 sta_update_codel_params(sta, thr); 2571} 2572