Lines Matching defs:rtlpriv
62 struct rtl_priv *rtlpriv = rtl_priv(hw);
155 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
198 struct rtl_priv *rtlpriv = rtl_priv(hw);
212 rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
240 struct rtl_priv *rtlpriv = rtl_priv(hw);
252 rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
301 struct rtl_priv *rtlpriv = rtl_priv(hw);
306 if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
307 list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
310 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
313 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
329 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
341 struct rtl_priv *rtlpriv = rtl_priv(hw);
351 rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
378 struct rtl_priv *rtlpriv = rtl_priv(hw);
380 rtlpriv->io.dev = dev;
382 rtlpriv->io.write8_async = pci_write8_async;
383 rtlpriv->io.write16_async = pci_write16_async;
384 rtlpriv->io.write32_async = pci_write32_async;
386 rtlpriv->io.read8_sync = pci_read8_sync;
387 rtlpriv->io.read16_sync = pci_read16_sync;
388 rtlpriv->io.read32_sync = pci_read32_sync;
395 struct rtl_priv *rtlpriv = rtl_priv(hw);
407 spin_lock_bh(&rtlpriv->locks.waitq_lock);
408 skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
420 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
427 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
435 struct rtl_priv *rtlpriv = rtl_priv(hw);
443 if (!rtlpriv->rtlhal.earlymode_enable)
452 rtlpriv->psc.rfpwr_state == ERFON) {
457 spin_lock(&rtlpriv->locks.waitq_lock);
463 spin_unlock(&rtlpriv->locks.waitq_lock);
466 spin_unlock(&rtlpriv->locks.waitq_lock);
476 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
483 struct rtl_priv *rtlpriv = rtl_priv(hw);
495 if (rtlpriv->use_new_trx_flow)
500 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
506 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
511 if (rtlpriv->rtlhal.earlymode_enable)
514 rtl_dbg(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
532 rtlpriv->mac80211.offchan_delay = true;
533 rtlpriv->psc.state_inap = true;
535 rtlpriv->psc.state_inap = false;
551 rtlpriv->link_info.tidtx_inperiod[tid]++;
565 rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
576 if (((rtlpriv->link_info.num_rx_inperiod +
577 rtlpriv->link_info.num_tx_inperiod) > 8) ||
578 rtlpriv->link_info.num_rx_inperiod > 2)
586 struct rtl_priv *rtlpriv = rtl_priv(hw);
609 if (rtlpriv->use_new_trx_flow) {
611 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
615 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
618 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
621 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
666 struct rtl_priv *rtlpriv = rtl_priv(hw);
669 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
670 rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
676 struct rtl_priv *rtlpriv = rtl_priv(hw);
705 if (rtlpriv->use_new_trx_flow) {
708 rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
719 own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc,
738 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
741 if (rtlpriv->use_new_trx_flow)
742 rtlpriv->cfg->ops->rx_check_dma_ok(hw,
746 len = rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, false,
751 if (rtlpriv->use_new_trx_flow)
758 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
789 rtlpriv->stats.rxbytesunicast += skb->len;
794 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
796 rtlpriv->link_info.num_rx_inperiod++;
807 if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP &&
808 rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G &&
820 if (rtlpriv->use_new_trx_flow) {
826 rtl_write_word(rtlpriv, 0x3B4,
829 if (((rtlpriv->link_info.num_rx_inperiod +
830 rtlpriv->link_info.num_tx_inperiod) > 8) ||
831 rtlpriv->link_info.num_rx_inperiod > 2)
835 if (rtlpriv->use_new_trx_flow) {
845 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
860 struct rtl_priv *rtlpriv = rtl_priv(hw);
870 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
871 rtlpriv->cfg->ops->disable_interrupt(hw);
874 rtlpriv->cfg->ops->interrupt_recognized(hw, &intvec);
881 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
882 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
885 if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
886 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
889 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
890 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
892 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
893 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
895 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
899 if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
900 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
902 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
903 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
908 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
909 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
914 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
915 rtlpriv->link_info.num_tx_inperiod++;
917 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
922 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
923 rtlpriv->link_info.num_tx_inperiod++;
925 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
930 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
931 rtlpriv->link_info.num_tx_inperiod++;
933 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
938 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
939 rtlpriv->link_info.num_tx_inperiod++;
941 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
947 if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
948 rtlpriv->link_info.num_tx_inperiod++;
950 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
957 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
958 rtlpriv->link_info.num_tx_inperiod++;
960 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
967 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
968 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
972 if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
973 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
978 if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
979 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
985 if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
986 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
988 queue_delayed_work(rtlpriv->works.rtl_wq,
989 &rtlpriv->works.fwevt_wq, 0);
1002 rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
1003 rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
1009 if (rtlpriv->rtlhal.earlymode_enable)
1010 tasklet_schedule(&rtlpriv->works.irq_tasklet);
1013 rtlpriv->cfg->ops->enable_interrupt(hw);
1014 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1020 struct rtl_priv *rtlpriv = from_tasklet(rtlpriv, t, works.irq_tasklet);
1021 struct ieee80211_hw *hw = rtlpriv->hw;
1027 struct rtl_priv *rtlpriv = from_tasklet(rtlpriv, t,
1029 struct ieee80211_hw *hw = rtlpriv->hw;
1046 if (rtlpriv->use_new_trx_flow)
1052 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
1065 if (rtlpriv->use_new_trx_flow)
1068 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1074 if (rtlpriv->use_new_trx_flow) {
1076 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true,
1079 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
1087 struct rtl_priv *rtlpriv = rtl_priv(hw);
1088 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1121 struct rtl_priv *rtlpriv = rtl_priv(hw);
1153 tasklet_setup(&rtlpriv->works.irq_tasklet, _rtl_pci_irq_tasklet);
1154 tasklet_setup(&rtlpriv->works.irq_prepare_bcn_tasklet,
1156 INIT_WORK(&rtlpriv->works.lps_change_work,
1164 struct rtl_priv *rtlpriv = rtl_priv(hw);
1172 if (rtlpriv->use_new_trx_flow) {
1207 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
1211 if (!rtlpriv->use_new_trx_flow) {
1217 rtlpriv->cfg->ops->set_desc(hw, (u8 *)&desc[i],
1229 struct rtl_priv *rtlpriv = rtl_priv(hw);
1232 if (rtlpriv->use_new_trx_flow) {
1279 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1288 struct rtl_priv *rtlpriv = rtl_priv(hw);
1297 if (rtlpriv->use_new_trx_flow)
1303 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
1315 if (rtlpriv->use_new_trx_flow) {
1325 struct rtl_priv *rtlpriv = rtl_priv(hw);
1341 if (rtlpriv->use_new_trx_flow) {
1410 struct rtl_priv *rtlpriv = rtl_priv(hw);
1423 if (!rtlpriv->use_new_trx_flow &&
1431 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
1436 if (rtlpriv->use_new_trx_flow) {
1437 rtlpriv->cfg->ops->set_desc(hw,
1442 rtlpriv->cfg->ops->set_desc(hw,
1446 rtlpriv->cfg->ops->set_desc(hw,
1450 rtlpriv->cfg->ops->set_desc(hw,
1456 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1465 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1475 if (rtlpriv->use_new_trx_flow)
1482 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
1489 if (rtlpriv->use_new_trx_flow) {
1498 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1507 struct rtl_priv *rtlpriv = rtl_priv(hw);
1516 if (!rtlpriv->rtlhal.earlymode_enable)
1532 if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1535 spin_lock_bh(&rtlpriv->locks.waitq_lock);
1536 skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1537 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1547 struct rtl_priv *rtlpriv = rtl_priv(hw);
1565 if (rtlpriv->psc.sw_ps_enabled) {
1574 rtlpriv->stats.txbytesmulticast += skb->len;
1576 rtlpriv->stats.txbytesbroadcast += skb->len;
1578 rtlpriv->stats.txbytesunicast += skb->len;
1580 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1583 if (rtlpriv->use_new_trx_flow)
1593 if (rtlpriv->use_new_trx_flow) {
1596 own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc,
1600 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
1605 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1611 if (rtlpriv->cfg->ops->get_available_desc &&
1612 rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
1613 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
1615 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1620 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1622 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1627 if (rtlpriv->use_new_trx_flow) {
1628 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1631 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1637 rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
1645 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1647 rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1654 struct rtl_priv *rtlpriv = rtl_priv(hw);
1684 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1692 struct rtl_priv *rtlpriv = rtl_priv(hw);
1698 tasklet_kill(&rtlpriv->works.irq_tasklet);
1699 cancel_work_sync(&rtlpriv->works.lps_change_work);
1701 destroy_workqueue(rtlpriv->works.rtl_wq);
1721 struct rtl_priv *rtlpriv = rtl_priv(hw);
1726 struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
1733 if (rtlpriv->cfg->ops->get_btc_status &&
1734 rtlpriv->cfg->ops->get_btc_status()) {
1735 rtlpriv->btcoexist.btc_info.ap_num = 36;
1736 btc_ops->btc_init_variables(rtlpriv);
1737 btc_ops->btc_init_hal_vars(rtlpriv);
1739 btc_ops->btc_init_variables_wifi_only(rtlpriv);
1742 err = rtlpriv->cfg->ops->hw_init(hw);
1744 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
1746 kfree(rtlpriv->btcoexist.btc_context);
1747 kfree(rtlpriv->btcoexist.wifi_only_context);
1750 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
1753 rtlpriv->cfg->ops->enable_interrupt(hw);
1754 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
1765 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
1771 struct rtl_priv *rtlpriv = rtl_priv(hw);
1778 if (rtlpriv->cfg->ops->get_btc_status())
1779 rtlpriv->btcoexist.btc_ops->btc_halt_notify(rtlpriv);
1781 if (rtlpriv->btcoexist.btc_ops)
1782 rtlpriv->btcoexist.btc_ops->btc_deinit_variables(rtlpriv);
1790 rtlpriv->cfg->ops->disable_interrupt(hw);
1791 cancel_work_sync(&rtlpriv->works.lps_change_work);
1793 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1795 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1797 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1802 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1805 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1807 rtlpriv->cfg->ops->hw_disable(hw);
1809 if (!rtlpriv->max_fw_size)
1811 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1813 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1815 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1823 struct rtl_priv *rtlpriv = rtl_priv(hw);
1859 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
1865 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
1871 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
1879 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
1887 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
1893 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
1898 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1902 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1906 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1910 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1914 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1919 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1922 rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
1932 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1936 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1941 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
1952 rtlpriv->use_new_trx_flow = true;
1956 rtlpriv->use_new_trx_flow = false;
1976 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
2000 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
2007 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
2016 list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
2023 struct rtl_priv *rtlpriv = rtl_priv(hw);
2041 rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
2048 struct rtl_priv *rtlpriv = rtl_priv(hw);
2059 rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
2100 struct rtl_priv *rtlpriv = NULL;
2148 rtlpriv = hw->priv;
2149 rtlpriv->hw = hw;
2150 pcipriv = (void *)rtlpriv->priv;
2152 init_completion(&rtlpriv->firmware_loading_complete);
2154 rtlpriv->proximity.proxim_on = false;
2156 pcipriv = (void *)rtlpriv->priv;
2160 rtlpriv->rtlhal.interface = INTF_PCI;
2161 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
2162 rtlpriv->intf_ops = &rtl_pci_ops;
2163 rtlpriv->glb_var = &rtl_global_var;
2173 pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
2174 pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
2175 pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
2178 rtlpriv->io.pci_mem_start =
2180 rtlpriv->cfg->bar_id, pmem_len);
2181 if (rtlpriv->io.pci_mem_start == 0) {
2187 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
2190 rtlpriv->io.pci_mem_start);
2209 rtlpriv->cfg->ops->read_eeprom_info(hw);
2211 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2241 rtlpriv->mac80211.mac80211_registered = 1;
2252 rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
2259 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2267 if (rtlpriv->io.pci_mem_start != 0)
2268 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2271 complete(&rtlpriv->firmware_loading_complete);
2286 struct rtl_priv *rtlpriv = rtl_priv(hw);
2288 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
2291 wait_for_completion(&rtlpriv->firmware_loading_complete);
2292 clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2303 rtlpriv->intf_ops->adapter_stop(hw);
2305 rtlpriv->cfg->ops->disable_interrupt(hw);
2312 rtlpriv->cfg->ops->deinit_sw_vars(hw);
2322 list_del(&rtlpriv->list);
2323 if (rtlpriv->io.pci_mem_start != 0) {
2324 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2357 struct rtl_priv *rtlpriv = rtl_priv(hw);
2359 rtlpriv->cfg->ops->hw_suspend(hw);
2369 struct rtl_priv *rtlpriv = rtl_priv(hw);
2371 rtlpriv->cfg->ops->hw_resume(hw);