18c2ecf20Sopenharmony_ci/****************************************************************************** 28c2ecf20Sopenharmony_ci * 38c2ecf20Sopenharmony_ci * This file is provided under a dual BSD/GPLv2 license. When using or 48c2ecf20Sopenharmony_ci * redistributing this file, you may do so under either license. 58c2ecf20Sopenharmony_ci * 68c2ecf20Sopenharmony_ci * GPL LICENSE SUMMARY 78c2ecf20Sopenharmony_ci * 88c2ecf20Sopenharmony_ci * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 98c2ecf20Sopenharmony_ci * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 108c2ecf20Sopenharmony_ci * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 118c2ecf20Sopenharmony_ci * Copyright(c) 2018 - 2019 Intel Corporation 128c2ecf20Sopenharmony_ci * 138c2ecf20Sopenharmony_ci * This program is free software; you can redistribute it and/or modify it 148c2ecf20Sopenharmony_ci * under the terms of version 2 of the GNU General Public License as 158c2ecf20Sopenharmony_ci * published by the Free Software Foundation. 168c2ecf20Sopenharmony_ci * 178c2ecf20Sopenharmony_ci * This program is distributed in the hope that it will be useful, but WITHOUT 188c2ecf20Sopenharmony_ci * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 198c2ecf20Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 208c2ecf20Sopenharmony_ci * more details. 218c2ecf20Sopenharmony_ci * 228c2ecf20Sopenharmony_ci * The full GNU General Public License is included in this distribution in the 238c2ecf20Sopenharmony_ci * file called COPYING. 248c2ecf20Sopenharmony_ci * 258c2ecf20Sopenharmony_ci * Contact Information: 268c2ecf20Sopenharmony_ci * Intel Linux Wireless <linuxwifi@intel.com> 278c2ecf20Sopenharmony_ci * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 288c2ecf20Sopenharmony_ci * 298c2ecf20Sopenharmony_ci * BSD LICENSE 308c2ecf20Sopenharmony_ci * 318c2ecf20Sopenharmony_ci * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 328c2ecf20Sopenharmony_ci * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 338c2ecf20Sopenharmony_ci * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 348c2ecf20Sopenharmony_ci * Copyright(c) 2018 - 2019 Intel Corporation 358c2ecf20Sopenharmony_ci * All rights reserved. 368c2ecf20Sopenharmony_ci * 378c2ecf20Sopenharmony_ci * Redistribution and use in source and binary forms, with or without 388c2ecf20Sopenharmony_ci * modification, are permitted provided that the following conditions 398c2ecf20Sopenharmony_ci * are met: 408c2ecf20Sopenharmony_ci * 418c2ecf20Sopenharmony_ci * * Redistributions of source code must retain the above copyright 428c2ecf20Sopenharmony_ci * notice, this list of conditions and the following disclaimer. 438c2ecf20Sopenharmony_ci * * Redistributions in binary form must reproduce the above copyright 448c2ecf20Sopenharmony_ci * notice, this list of conditions and the following disclaimer in 458c2ecf20Sopenharmony_ci * the documentation and/or other materials provided with the 468c2ecf20Sopenharmony_ci * distribution. 478c2ecf20Sopenharmony_ci * * Neither the name Intel Corporation nor the names of its 488c2ecf20Sopenharmony_ci * contributors may be used to endorse or promote products derived 498c2ecf20Sopenharmony_ci * from this software without specific prior written permission. 508c2ecf20Sopenharmony_ci * 518c2ecf20Sopenharmony_ci * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 528c2ecf20Sopenharmony_ci * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 538c2ecf20Sopenharmony_ci * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 548c2ecf20Sopenharmony_ci * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 558c2ecf20Sopenharmony_ci * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 568c2ecf20Sopenharmony_ci * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 578c2ecf20Sopenharmony_ci * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 588c2ecf20Sopenharmony_ci * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 598c2ecf20Sopenharmony_ci * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 608c2ecf20Sopenharmony_ci * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 618c2ecf20Sopenharmony_ci * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 628c2ecf20Sopenharmony_ci * 638c2ecf20Sopenharmony_ci *****************************************************************************/ 648c2ecf20Sopenharmony_ci#include <linux/sched.h> 658c2ecf20Sopenharmony_ci#include <linux/wait.h> 668c2ecf20Sopenharmony_ci#include <linux/gfp.h> 678c2ecf20Sopenharmony_ci 688c2ecf20Sopenharmony_ci#include "iwl-prph.h" 698c2ecf20Sopenharmony_ci#include "iwl-io.h" 708c2ecf20Sopenharmony_ci#include "internal.h" 718c2ecf20Sopenharmony_ci#include "iwl-op-mode.h" 728c2ecf20Sopenharmony_ci#include "iwl-context-info-gen3.h" 738c2ecf20Sopenharmony_ci 748c2ecf20Sopenharmony_ci/****************************************************************************** 758c2ecf20Sopenharmony_ci * 768c2ecf20Sopenharmony_ci * RX path functions 778c2ecf20Sopenharmony_ci * 788c2ecf20Sopenharmony_ci ******************************************************************************/ 798c2ecf20Sopenharmony_ci 808c2ecf20Sopenharmony_ci/* 818c2ecf20Sopenharmony_ci * Rx theory of operation 828c2ecf20Sopenharmony_ci * 838c2ecf20Sopenharmony_ci * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 848c2ecf20Sopenharmony_ci * each of which point to Receive Buffers to be filled by the NIC. These get 858c2ecf20Sopenharmony_ci * used not only for Rx frames, but for any command response or notification 868c2ecf20Sopenharmony_ci * from the NIC. The driver and NIC manage the Rx buffers by means 878c2ecf20Sopenharmony_ci * of indexes into the circular buffer. 888c2ecf20Sopenharmony_ci * 898c2ecf20Sopenharmony_ci * Rx Queue Indexes 908c2ecf20Sopenharmony_ci * The host/firmware share two index registers for managing the Rx buffers. 918c2ecf20Sopenharmony_ci * 928c2ecf20Sopenharmony_ci * The READ index maps to the first position that the firmware may be writing 938c2ecf20Sopenharmony_ci * to -- the driver can read up to (but not including) this position and get 948c2ecf20Sopenharmony_ci * good data. 958c2ecf20Sopenharmony_ci * The READ index is managed by the firmware once the card is enabled. 968c2ecf20Sopenharmony_ci * 978c2ecf20Sopenharmony_ci * The WRITE index maps to the last position the driver has read from -- the 988c2ecf20Sopenharmony_ci * position preceding WRITE is the last slot the firmware can place a packet. 998c2ecf20Sopenharmony_ci * 1008c2ecf20Sopenharmony_ci * The queue is empty (no good data) if WRITE = READ - 1, and is full if 1018c2ecf20Sopenharmony_ci * WRITE = READ. 1028c2ecf20Sopenharmony_ci * 1038c2ecf20Sopenharmony_ci * During initialization, the host sets up the READ queue position to the first 1048c2ecf20Sopenharmony_ci * INDEX position, and WRITE to the last (READ - 1 wrapped) 1058c2ecf20Sopenharmony_ci * 1068c2ecf20Sopenharmony_ci * When the firmware places a packet in a buffer, it will advance the READ index 1078c2ecf20Sopenharmony_ci * and fire the RX interrupt. The driver can then query the READ index and 1088c2ecf20Sopenharmony_ci * process as many packets as possible, moving the WRITE index forward as it 1098c2ecf20Sopenharmony_ci * resets the Rx queue buffers with new memory. 1108c2ecf20Sopenharmony_ci * 1118c2ecf20Sopenharmony_ci * The management in the driver is as follows: 1128c2ecf20Sopenharmony_ci * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 1138c2ecf20Sopenharmony_ci * When the interrupt handler is called, the request is processed. 1148c2ecf20Sopenharmony_ci * The page is either stolen - transferred to the upper layer 1158c2ecf20Sopenharmony_ci * or reused - added immediately to the iwl->rxq->rx_free list. 1168c2ecf20Sopenharmony_ci * + When the page is stolen - the driver updates the matching queue's used 1178c2ecf20Sopenharmony_ci * count, detaches the RBD and transfers it to the queue used list. 1188c2ecf20Sopenharmony_ci * When there are two used RBDs - they are transferred to the allocator empty 1198c2ecf20Sopenharmony_ci * list. Work is then scheduled for the allocator to start allocating 1208c2ecf20Sopenharmony_ci * eight buffers. 1218c2ecf20Sopenharmony_ci * When there are another 6 used RBDs - they are transferred to the allocator 1228c2ecf20Sopenharmony_ci * empty list and the driver tries to claim the pre-allocated buffers and 1238c2ecf20Sopenharmony_ci * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 1248c2ecf20Sopenharmony_ci * until ready. 1258c2ecf20Sopenharmony_ci * When there are 8+ buffers in the free list - either from allocation or from 1268c2ecf20Sopenharmony_ci * 8 reused unstolen pages - restock is called to update the FW and indexes. 1278c2ecf20Sopenharmony_ci * + In order to make sure the allocator always has RBDs to use for allocation 1288c2ecf20Sopenharmony_ci * the allocator has initial pool in the size of num_queues*(8-2) - the 1298c2ecf20Sopenharmony_ci * maximum missing RBDs per allocation request (request posted with 2 1308c2ecf20Sopenharmony_ci * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 1318c2ecf20Sopenharmony_ci * The queues supplies the recycle of the rest of the RBDs. 1328c2ecf20Sopenharmony_ci * + A received packet is processed and handed to the kernel network stack, 1338c2ecf20Sopenharmony_ci * detached from the iwl->rxq. The driver 'processed' index is updated. 1348c2ecf20Sopenharmony_ci * + If there are no allocated buffers in iwl->rxq->rx_free, 1358c2ecf20Sopenharmony_ci * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 1368c2ecf20Sopenharmony_ci * If there were enough free buffers and RX_STALLED is set it is cleared. 1378c2ecf20Sopenharmony_ci * 1388c2ecf20Sopenharmony_ci * 1398c2ecf20Sopenharmony_ci * Driver sequence: 1408c2ecf20Sopenharmony_ci * 1418c2ecf20Sopenharmony_ci * iwl_rxq_alloc() Allocates rx_free 1428c2ecf20Sopenharmony_ci * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 1438c2ecf20Sopenharmony_ci * iwl_pcie_rxq_restock. 1448c2ecf20Sopenharmony_ci * Used only during initialization. 1458c2ecf20Sopenharmony_ci * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 1468c2ecf20Sopenharmony_ci * queue, updates firmware pointers, and updates 1478c2ecf20Sopenharmony_ci * the WRITE index. 1488c2ecf20Sopenharmony_ci * iwl_pcie_rx_allocator() Background work for allocating pages. 1498c2ecf20Sopenharmony_ci * 1508c2ecf20Sopenharmony_ci * -- enable interrupts -- 1518c2ecf20Sopenharmony_ci * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 1528c2ecf20Sopenharmony_ci * READ INDEX, detaching the SKB from the pool. 1538c2ecf20Sopenharmony_ci * Moves the packet buffer from queue to rx_used. 1548c2ecf20Sopenharmony_ci * Posts and claims requests to the allocator. 1558c2ecf20Sopenharmony_ci * Calls iwl_pcie_rxq_restock to refill any empty 1568c2ecf20Sopenharmony_ci * slots. 1578c2ecf20Sopenharmony_ci * 1588c2ecf20Sopenharmony_ci * RBD life-cycle: 1598c2ecf20Sopenharmony_ci * 1608c2ecf20Sopenharmony_ci * Init: 1618c2ecf20Sopenharmony_ci * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 1628c2ecf20Sopenharmony_ci * 1638c2ecf20Sopenharmony_ci * Regular Receive interrupt: 1648c2ecf20Sopenharmony_ci * Page Stolen: 1658c2ecf20Sopenharmony_ci * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 1668c2ecf20Sopenharmony_ci * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 1678c2ecf20Sopenharmony_ci * Page not Stolen: 1688c2ecf20Sopenharmony_ci * rxq.queue -> rxq.rx_free -> rxq.queue 1698c2ecf20Sopenharmony_ci * ... 1708c2ecf20Sopenharmony_ci * 1718c2ecf20Sopenharmony_ci */ 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_ci/* 1748c2ecf20Sopenharmony_ci * iwl_rxq_space - Return number of free slots available in queue. 1758c2ecf20Sopenharmony_ci */ 1768c2ecf20Sopenharmony_cistatic int iwl_rxq_space(const struct iwl_rxq *rxq) 1778c2ecf20Sopenharmony_ci{ 1788c2ecf20Sopenharmony_ci /* Make sure rx queue size is a power of 2 */ 1798c2ecf20Sopenharmony_ci WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 1808c2ecf20Sopenharmony_ci 1818c2ecf20Sopenharmony_ci /* 1828c2ecf20Sopenharmony_ci * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 1838c2ecf20Sopenharmony_ci * between empty and completely full queues. 1848c2ecf20Sopenharmony_ci * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 1858c2ecf20Sopenharmony_ci * defined for negative dividends. 1868c2ecf20Sopenharmony_ci */ 1878c2ecf20Sopenharmony_ci return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 1888c2ecf20Sopenharmony_ci} 1898c2ecf20Sopenharmony_ci 1908c2ecf20Sopenharmony_ci/* 1918c2ecf20Sopenharmony_ci * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 1928c2ecf20Sopenharmony_ci */ 1938c2ecf20Sopenharmony_cistatic inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 1948c2ecf20Sopenharmony_ci{ 1958c2ecf20Sopenharmony_ci return cpu_to_le32((u32)(dma_addr >> 8)); 1968c2ecf20Sopenharmony_ci} 1978c2ecf20Sopenharmony_ci 1988c2ecf20Sopenharmony_ci/* 1998c2ecf20Sopenharmony_ci * iwl_pcie_rx_stop - stops the Rx DMA 2008c2ecf20Sopenharmony_ci */ 2018c2ecf20Sopenharmony_ciint iwl_pcie_rx_stop(struct iwl_trans *trans) 2028c2ecf20Sopenharmony_ci{ 2038c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 2048c2ecf20Sopenharmony_ci /* TODO: remove this once fw does it */ 2058c2ecf20Sopenharmony_ci iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); 2068c2ecf20Sopenharmony_ci return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, 2078c2ecf20Sopenharmony_ci RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 2088c2ecf20Sopenharmony_ci } else if (trans->trans_cfg->mq_rx_supported) { 2098c2ecf20Sopenharmony_ci iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 2108c2ecf20Sopenharmony_ci return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 2118c2ecf20Sopenharmony_ci RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 2128c2ecf20Sopenharmony_ci } else { 2138c2ecf20Sopenharmony_ci iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 2148c2ecf20Sopenharmony_ci return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 2158c2ecf20Sopenharmony_ci FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 2168c2ecf20Sopenharmony_ci 1000); 2178c2ecf20Sopenharmony_ci } 2188c2ecf20Sopenharmony_ci} 2198c2ecf20Sopenharmony_ci 2208c2ecf20Sopenharmony_ci/* 2218c2ecf20Sopenharmony_ci * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 2228c2ecf20Sopenharmony_ci */ 2238c2ecf20Sopenharmony_cistatic void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 2248c2ecf20Sopenharmony_ci struct iwl_rxq *rxq) 2258c2ecf20Sopenharmony_ci{ 2268c2ecf20Sopenharmony_ci u32 reg; 2278c2ecf20Sopenharmony_ci 2288c2ecf20Sopenharmony_ci lockdep_assert_held(&rxq->lock); 2298c2ecf20Sopenharmony_ci 2308c2ecf20Sopenharmony_ci /* 2318c2ecf20Sopenharmony_ci * explicitly wake up the NIC if: 2328c2ecf20Sopenharmony_ci * 1. shadow registers aren't enabled 2338c2ecf20Sopenharmony_ci * 2. there is a chance that the NIC is asleep 2348c2ecf20Sopenharmony_ci */ 2358c2ecf20Sopenharmony_ci if (!trans->trans_cfg->base_params->shadow_reg_enable && 2368c2ecf20Sopenharmony_ci test_bit(STATUS_TPOWER_PMI, &trans->status)) { 2378c2ecf20Sopenharmony_ci reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 2388c2ecf20Sopenharmony_ci 2398c2ecf20Sopenharmony_ci if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 2408c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 2418c2ecf20Sopenharmony_ci reg); 2428c2ecf20Sopenharmony_ci iwl_set_bit(trans, CSR_GP_CNTRL, 2438c2ecf20Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2448c2ecf20Sopenharmony_ci rxq->need_update = true; 2458c2ecf20Sopenharmony_ci return; 2468c2ecf20Sopenharmony_ci } 2478c2ecf20Sopenharmony_ci } 2488c2ecf20Sopenharmony_ci 2498c2ecf20Sopenharmony_ci rxq->write_actual = round_down(rxq->write, 8); 2508c2ecf20Sopenharmony_ci if (trans->trans_cfg->mq_rx_supported) 2518c2ecf20Sopenharmony_ci iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 2528c2ecf20Sopenharmony_ci rxq->write_actual); 2538c2ecf20Sopenharmony_ci else 2548c2ecf20Sopenharmony_ci iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 2558c2ecf20Sopenharmony_ci} 2568c2ecf20Sopenharmony_ci 2578c2ecf20Sopenharmony_cistatic void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 2588c2ecf20Sopenharmony_ci{ 2598c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2608c2ecf20Sopenharmony_ci int i; 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci for (i = 0; i < trans->num_rx_queues; i++) { 2638c2ecf20Sopenharmony_ci struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2648c2ecf20Sopenharmony_ci 2658c2ecf20Sopenharmony_ci if (!rxq->need_update) 2668c2ecf20Sopenharmony_ci continue; 2678c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 2688c2ecf20Sopenharmony_ci iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 2698c2ecf20Sopenharmony_ci rxq->need_update = false; 2708c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 2718c2ecf20Sopenharmony_ci } 2728c2ecf20Sopenharmony_ci} 2738c2ecf20Sopenharmony_ci 2748c2ecf20Sopenharmony_cistatic void iwl_pcie_restock_bd(struct iwl_trans *trans, 2758c2ecf20Sopenharmony_ci struct iwl_rxq *rxq, 2768c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb) 2778c2ecf20Sopenharmony_ci{ 2788c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 2798c2ecf20Sopenharmony_ci struct iwl_rx_transfer_desc *bd = rxq->bd; 2808c2ecf20Sopenharmony_ci 2818c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); 2828c2ecf20Sopenharmony_ci 2838c2ecf20Sopenharmony_ci bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); 2848c2ecf20Sopenharmony_ci bd[rxq->write].rbid = cpu_to_le16(rxb->vid); 2858c2ecf20Sopenharmony_ci } else { 2868c2ecf20Sopenharmony_ci __le64 *bd = rxq->bd; 2878c2ecf20Sopenharmony_ci 2888c2ecf20Sopenharmony_ci bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 2898c2ecf20Sopenharmony_ci } 2908c2ecf20Sopenharmony_ci 2918c2ecf20Sopenharmony_ci IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", 2928c2ecf20Sopenharmony_ci (u32)rxb->vid, rxq->id, rxq->write); 2938c2ecf20Sopenharmony_ci} 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci/* 2968c2ecf20Sopenharmony_ci * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 2978c2ecf20Sopenharmony_ci */ 2988c2ecf20Sopenharmony_cistatic void iwl_pcie_rxmq_restock(struct iwl_trans *trans, 2998c2ecf20Sopenharmony_ci struct iwl_rxq *rxq) 3008c2ecf20Sopenharmony_ci{ 3018c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3028c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb; 3038c2ecf20Sopenharmony_ci 3048c2ecf20Sopenharmony_ci /* 3058c2ecf20Sopenharmony_ci * If the device isn't enabled - no need to try to add buffers... 3068c2ecf20Sopenharmony_ci * This can happen when we stop the device and still have an interrupt 3078c2ecf20Sopenharmony_ci * pending. We stop the APM before we sync the interrupts because we 3088c2ecf20Sopenharmony_ci * have to (see comment there). On the other hand, since the APM is 3098c2ecf20Sopenharmony_ci * stopped, we cannot access the HW (in particular not prph). 3108c2ecf20Sopenharmony_ci * So don't try to restock if the APM has been already stopped. 3118c2ecf20Sopenharmony_ci */ 3128c2ecf20Sopenharmony_ci if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 3138c2ecf20Sopenharmony_ci return; 3148c2ecf20Sopenharmony_ci 3158c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 3168c2ecf20Sopenharmony_ci while (rxq->free_count) { 3178c2ecf20Sopenharmony_ci /* Get next free Rx buffer, remove from free list */ 3188c2ecf20Sopenharmony_ci rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 3198c2ecf20Sopenharmony_ci list); 3208c2ecf20Sopenharmony_ci list_del(&rxb->list); 3218c2ecf20Sopenharmony_ci rxb->invalid = false; 3228c2ecf20Sopenharmony_ci /* some low bits are expected to be unset (depending on hw) */ 3238c2ecf20Sopenharmony_ci WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); 3248c2ecf20Sopenharmony_ci /* Point to Rx buffer via next RBD in circular buffer */ 3258c2ecf20Sopenharmony_ci iwl_pcie_restock_bd(trans, rxq, rxb); 3268c2ecf20Sopenharmony_ci rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); 3278c2ecf20Sopenharmony_ci rxq->free_count--; 3288c2ecf20Sopenharmony_ci } 3298c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci /* 3328c2ecf20Sopenharmony_ci * If we've added more space for the firmware to place data, tell it. 3338c2ecf20Sopenharmony_ci * Increment device's write pointer in multiples of 8. 3348c2ecf20Sopenharmony_ci */ 3358c2ecf20Sopenharmony_ci if (rxq->write_actual != (rxq->write & ~0x7)) { 3368c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 3378c2ecf20Sopenharmony_ci iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 3388c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 3398c2ecf20Sopenharmony_ci } 3408c2ecf20Sopenharmony_ci} 3418c2ecf20Sopenharmony_ci 3428c2ecf20Sopenharmony_ci/* 3438c2ecf20Sopenharmony_ci * iwl_pcie_rxsq_restock - restock implementation for single queue rx 3448c2ecf20Sopenharmony_ci */ 3458c2ecf20Sopenharmony_cistatic void iwl_pcie_rxsq_restock(struct iwl_trans *trans, 3468c2ecf20Sopenharmony_ci struct iwl_rxq *rxq) 3478c2ecf20Sopenharmony_ci{ 3488c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb; 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_ci /* 3518c2ecf20Sopenharmony_ci * If the device isn't enabled - not need to try to add buffers... 3528c2ecf20Sopenharmony_ci * This can happen when we stop the device and still have an interrupt 3538c2ecf20Sopenharmony_ci * pending. We stop the APM before we sync the interrupts because we 3548c2ecf20Sopenharmony_ci * have to (see comment there). On the other hand, since the APM is 3558c2ecf20Sopenharmony_ci * stopped, we cannot access the HW (in particular not prph). 3568c2ecf20Sopenharmony_ci * So don't try to restock if the APM has been already stopped. 3578c2ecf20Sopenharmony_ci */ 3588c2ecf20Sopenharmony_ci if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 3598c2ecf20Sopenharmony_ci return; 3608c2ecf20Sopenharmony_ci 3618c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 3628c2ecf20Sopenharmony_ci while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 3638c2ecf20Sopenharmony_ci __le32 *bd = (__le32 *)rxq->bd; 3648c2ecf20Sopenharmony_ci /* The overwritten rxb must be a used one */ 3658c2ecf20Sopenharmony_ci rxb = rxq->queue[rxq->write]; 3668c2ecf20Sopenharmony_ci BUG_ON(rxb && rxb->page); 3678c2ecf20Sopenharmony_ci 3688c2ecf20Sopenharmony_ci /* Get next free Rx buffer, remove from free list */ 3698c2ecf20Sopenharmony_ci rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 3708c2ecf20Sopenharmony_ci list); 3718c2ecf20Sopenharmony_ci list_del(&rxb->list); 3728c2ecf20Sopenharmony_ci rxb->invalid = false; 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_ci /* Point to Rx buffer via next RBD in circular buffer */ 3758c2ecf20Sopenharmony_ci bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 3768c2ecf20Sopenharmony_ci rxq->queue[rxq->write] = rxb; 3778c2ecf20Sopenharmony_ci rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 3788c2ecf20Sopenharmony_ci rxq->free_count--; 3798c2ecf20Sopenharmony_ci } 3808c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 3818c2ecf20Sopenharmony_ci 3828c2ecf20Sopenharmony_ci /* If we've added more space for the firmware to place data, tell it. 3838c2ecf20Sopenharmony_ci * Increment device's write pointer in multiples of 8. */ 3848c2ecf20Sopenharmony_ci if (rxq->write_actual != (rxq->write & ~0x7)) { 3858c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 3868c2ecf20Sopenharmony_ci iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 3878c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 3888c2ecf20Sopenharmony_ci } 3898c2ecf20Sopenharmony_ci} 3908c2ecf20Sopenharmony_ci 3918c2ecf20Sopenharmony_ci/* 3928c2ecf20Sopenharmony_ci * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 3938c2ecf20Sopenharmony_ci * 3948c2ecf20Sopenharmony_ci * If there are slots in the RX queue that need to be restocked, 3958c2ecf20Sopenharmony_ci * and we have free pre-allocated buffers, fill the ranks as much 3968c2ecf20Sopenharmony_ci * as we can, pulling from rx_free. 3978c2ecf20Sopenharmony_ci * 3988c2ecf20Sopenharmony_ci * This moves the 'write' index forward to catch up with 'processed', and 3998c2ecf20Sopenharmony_ci * also updates the memory address in the firmware to reference the new 4008c2ecf20Sopenharmony_ci * target buffer. 4018c2ecf20Sopenharmony_ci */ 4028c2ecf20Sopenharmony_cistatic 4038c2ecf20Sopenharmony_civoid iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 4048c2ecf20Sopenharmony_ci{ 4058c2ecf20Sopenharmony_ci if (trans->trans_cfg->mq_rx_supported) 4068c2ecf20Sopenharmony_ci iwl_pcie_rxmq_restock(trans, rxq); 4078c2ecf20Sopenharmony_ci else 4088c2ecf20Sopenharmony_ci iwl_pcie_rxsq_restock(trans, rxq); 4098c2ecf20Sopenharmony_ci} 4108c2ecf20Sopenharmony_ci 4118c2ecf20Sopenharmony_ci/* 4128c2ecf20Sopenharmony_ci * iwl_pcie_rx_alloc_page - allocates and returns a page. 4138c2ecf20Sopenharmony_ci * 4148c2ecf20Sopenharmony_ci */ 4158c2ecf20Sopenharmony_cistatic struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 4168c2ecf20Sopenharmony_ci u32 *offset, gfp_t priority) 4178c2ecf20Sopenharmony_ci{ 4188c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 4198c2ecf20Sopenharmony_ci unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 4208c2ecf20Sopenharmony_ci unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; 4218c2ecf20Sopenharmony_ci struct page *page; 4228c2ecf20Sopenharmony_ci gfp_t gfp_mask = priority; 4238c2ecf20Sopenharmony_ci 4248c2ecf20Sopenharmony_ci if (trans_pcie->rx_page_order > 0) 4258c2ecf20Sopenharmony_ci gfp_mask |= __GFP_COMP; 4268c2ecf20Sopenharmony_ci 4278c2ecf20Sopenharmony_ci if (trans_pcie->alloc_page) { 4288c2ecf20Sopenharmony_ci spin_lock_bh(&trans_pcie->alloc_page_lock); 4298c2ecf20Sopenharmony_ci /* recheck */ 4308c2ecf20Sopenharmony_ci if (trans_pcie->alloc_page) { 4318c2ecf20Sopenharmony_ci *offset = trans_pcie->alloc_page_used; 4328c2ecf20Sopenharmony_ci page = trans_pcie->alloc_page; 4338c2ecf20Sopenharmony_ci trans_pcie->alloc_page_used += rbsize; 4348c2ecf20Sopenharmony_ci if (trans_pcie->alloc_page_used >= allocsize) 4358c2ecf20Sopenharmony_ci trans_pcie->alloc_page = NULL; 4368c2ecf20Sopenharmony_ci else 4378c2ecf20Sopenharmony_ci get_page(page); 4388c2ecf20Sopenharmony_ci spin_unlock_bh(&trans_pcie->alloc_page_lock); 4398c2ecf20Sopenharmony_ci return page; 4408c2ecf20Sopenharmony_ci } 4418c2ecf20Sopenharmony_ci spin_unlock_bh(&trans_pcie->alloc_page_lock); 4428c2ecf20Sopenharmony_ci } 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_ci /* Alloc a new receive buffer */ 4458c2ecf20Sopenharmony_ci page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 4468c2ecf20Sopenharmony_ci if (!page) { 4478c2ecf20Sopenharmony_ci if (net_ratelimit()) 4488c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 4498c2ecf20Sopenharmony_ci trans_pcie->rx_page_order); 4508c2ecf20Sopenharmony_ci /* 4518c2ecf20Sopenharmony_ci * Issue an error if we don't have enough pre-allocated 4528c2ecf20Sopenharmony_ci * buffers. 4538c2ecf20Sopenharmony_ci */ 4548c2ecf20Sopenharmony_ci if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 4558c2ecf20Sopenharmony_ci IWL_CRIT(trans, 4568c2ecf20Sopenharmony_ci "Failed to alloc_pages\n"); 4578c2ecf20Sopenharmony_ci return NULL; 4588c2ecf20Sopenharmony_ci } 4598c2ecf20Sopenharmony_ci 4608c2ecf20Sopenharmony_ci if (2 * rbsize <= allocsize) { 4618c2ecf20Sopenharmony_ci spin_lock_bh(&trans_pcie->alloc_page_lock); 4628c2ecf20Sopenharmony_ci if (!trans_pcie->alloc_page) { 4638c2ecf20Sopenharmony_ci get_page(page); 4648c2ecf20Sopenharmony_ci trans_pcie->alloc_page = page; 4658c2ecf20Sopenharmony_ci trans_pcie->alloc_page_used = rbsize; 4668c2ecf20Sopenharmony_ci } 4678c2ecf20Sopenharmony_ci spin_unlock_bh(&trans_pcie->alloc_page_lock); 4688c2ecf20Sopenharmony_ci } 4698c2ecf20Sopenharmony_ci 4708c2ecf20Sopenharmony_ci *offset = 0; 4718c2ecf20Sopenharmony_ci return page; 4728c2ecf20Sopenharmony_ci} 4738c2ecf20Sopenharmony_ci 4748c2ecf20Sopenharmony_ci/* 4758c2ecf20Sopenharmony_ci * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 4768c2ecf20Sopenharmony_ci * 4778c2ecf20Sopenharmony_ci * A used RBD is an Rx buffer that has been given to the stack. To use it again 4788c2ecf20Sopenharmony_ci * a page must be allocated and the RBD must point to the page. This function 4798c2ecf20Sopenharmony_ci * doesn't change the HW pointer but handles the list of pages that is used by 4808c2ecf20Sopenharmony_ci * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 4818c2ecf20Sopenharmony_ci * allocated buffers. 4828c2ecf20Sopenharmony_ci */ 4838c2ecf20Sopenharmony_civoid iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 4848c2ecf20Sopenharmony_ci struct iwl_rxq *rxq) 4858c2ecf20Sopenharmony_ci{ 4868c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 4878c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb; 4888c2ecf20Sopenharmony_ci struct page *page; 4898c2ecf20Sopenharmony_ci 4908c2ecf20Sopenharmony_ci while (1) { 4918c2ecf20Sopenharmony_ci unsigned int offset; 4928c2ecf20Sopenharmony_ci 4938c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 4948c2ecf20Sopenharmony_ci if (list_empty(&rxq->rx_used)) { 4958c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 4968c2ecf20Sopenharmony_ci return; 4978c2ecf20Sopenharmony_ci } 4988c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 4998c2ecf20Sopenharmony_ci 5008c2ecf20Sopenharmony_ci page = iwl_pcie_rx_alloc_page(trans, &offset, priority); 5018c2ecf20Sopenharmony_ci if (!page) 5028c2ecf20Sopenharmony_ci return; 5038c2ecf20Sopenharmony_ci 5048c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 5058c2ecf20Sopenharmony_ci 5068c2ecf20Sopenharmony_ci if (list_empty(&rxq->rx_used)) { 5078c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 5088c2ecf20Sopenharmony_ci __free_pages(page, trans_pcie->rx_page_order); 5098c2ecf20Sopenharmony_ci return; 5108c2ecf20Sopenharmony_ci } 5118c2ecf20Sopenharmony_ci rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 5128c2ecf20Sopenharmony_ci list); 5138c2ecf20Sopenharmony_ci list_del(&rxb->list); 5148c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 5158c2ecf20Sopenharmony_ci 5168c2ecf20Sopenharmony_ci BUG_ON(rxb->page); 5178c2ecf20Sopenharmony_ci rxb->page = page; 5188c2ecf20Sopenharmony_ci rxb->offset = offset; 5198c2ecf20Sopenharmony_ci /* Get physical address of the RB */ 5208c2ecf20Sopenharmony_ci rxb->page_dma = 5218c2ecf20Sopenharmony_ci dma_map_page(trans->dev, page, rxb->offset, 5228c2ecf20Sopenharmony_ci trans_pcie->rx_buf_bytes, 5238c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 5248c2ecf20Sopenharmony_ci if (dma_mapping_error(trans->dev, rxb->page_dma)) { 5258c2ecf20Sopenharmony_ci rxb->page = NULL; 5268c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 5278c2ecf20Sopenharmony_ci list_add(&rxb->list, &rxq->rx_used); 5288c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 5298c2ecf20Sopenharmony_ci __free_pages(page, trans_pcie->rx_page_order); 5308c2ecf20Sopenharmony_ci return; 5318c2ecf20Sopenharmony_ci } 5328c2ecf20Sopenharmony_ci 5338c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 5348c2ecf20Sopenharmony_ci 5358c2ecf20Sopenharmony_ci list_add_tail(&rxb->list, &rxq->rx_free); 5368c2ecf20Sopenharmony_ci rxq->free_count++; 5378c2ecf20Sopenharmony_ci 5388c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 5398c2ecf20Sopenharmony_ci } 5408c2ecf20Sopenharmony_ci} 5418c2ecf20Sopenharmony_ci 5428c2ecf20Sopenharmony_civoid iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 5438c2ecf20Sopenharmony_ci{ 5448c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5458c2ecf20Sopenharmony_ci int i; 5468c2ecf20Sopenharmony_ci 5478c2ecf20Sopenharmony_ci if (!trans_pcie->rx_pool) 5488c2ecf20Sopenharmony_ci return; 5498c2ecf20Sopenharmony_ci 5508c2ecf20Sopenharmony_ci for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { 5518c2ecf20Sopenharmony_ci if (!trans_pcie->rx_pool[i].page) 5528c2ecf20Sopenharmony_ci continue; 5538c2ecf20Sopenharmony_ci dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 5548c2ecf20Sopenharmony_ci trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); 5558c2ecf20Sopenharmony_ci __free_pages(trans_pcie->rx_pool[i].page, 5568c2ecf20Sopenharmony_ci trans_pcie->rx_page_order); 5578c2ecf20Sopenharmony_ci trans_pcie->rx_pool[i].page = NULL; 5588c2ecf20Sopenharmony_ci } 5598c2ecf20Sopenharmony_ci} 5608c2ecf20Sopenharmony_ci 5618c2ecf20Sopenharmony_ci/* 5628c2ecf20Sopenharmony_ci * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 5638c2ecf20Sopenharmony_ci * 5648c2ecf20Sopenharmony_ci * Allocates for each received request 8 pages 5658c2ecf20Sopenharmony_ci * Called as a scheduled work item. 5668c2ecf20Sopenharmony_ci */ 5678c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_allocator(struct iwl_trans *trans) 5688c2ecf20Sopenharmony_ci{ 5698c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5708c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba = &trans_pcie->rba; 5718c2ecf20Sopenharmony_ci struct list_head local_empty; 5728c2ecf20Sopenharmony_ci int pending = atomic_read(&rba->req_pending); 5738c2ecf20Sopenharmony_ci 5748c2ecf20Sopenharmony_ci IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); 5758c2ecf20Sopenharmony_ci 5768c2ecf20Sopenharmony_ci /* If we were scheduled - there is at least one request */ 5778c2ecf20Sopenharmony_ci spin_lock(&rba->lock); 5788c2ecf20Sopenharmony_ci /* swap out the rba->rbd_empty to a local list */ 5798c2ecf20Sopenharmony_ci list_replace_init(&rba->rbd_empty, &local_empty); 5808c2ecf20Sopenharmony_ci spin_unlock(&rba->lock); 5818c2ecf20Sopenharmony_ci 5828c2ecf20Sopenharmony_ci while (pending) { 5838c2ecf20Sopenharmony_ci int i; 5848c2ecf20Sopenharmony_ci LIST_HEAD(local_allocated); 5858c2ecf20Sopenharmony_ci gfp_t gfp_mask = GFP_KERNEL; 5868c2ecf20Sopenharmony_ci 5878c2ecf20Sopenharmony_ci /* Do not post a warning if there are only a few requests */ 5888c2ecf20Sopenharmony_ci if (pending < RX_PENDING_WATERMARK) 5898c2ecf20Sopenharmony_ci gfp_mask |= __GFP_NOWARN; 5908c2ecf20Sopenharmony_ci 5918c2ecf20Sopenharmony_ci for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 5928c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb; 5938c2ecf20Sopenharmony_ci struct page *page; 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_ci /* List should never be empty - each reused RBD is 5968c2ecf20Sopenharmony_ci * returned to the list, and initial pool covers any 5978c2ecf20Sopenharmony_ci * possible gap between the time the page is allocated 5988c2ecf20Sopenharmony_ci * to the time the RBD is added. 5998c2ecf20Sopenharmony_ci */ 6008c2ecf20Sopenharmony_ci BUG_ON(list_empty(&local_empty)); 6018c2ecf20Sopenharmony_ci /* Get the first rxb from the rbd list */ 6028c2ecf20Sopenharmony_ci rxb = list_first_entry(&local_empty, 6038c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer, list); 6048c2ecf20Sopenharmony_ci BUG_ON(rxb->page); 6058c2ecf20Sopenharmony_ci 6068c2ecf20Sopenharmony_ci /* Alloc a new receive buffer */ 6078c2ecf20Sopenharmony_ci page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, 6088c2ecf20Sopenharmony_ci gfp_mask); 6098c2ecf20Sopenharmony_ci if (!page) 6108c2ecf20Sopenharmony_ci continue; 6118c2ecf20Sopenharmony_ci rxb->page = page; 6128c2ecf20Sopenharmony_ci 6138c2ecf20Sopenharmony_ci /* Get physical address of the RB */ 6148c2ecf20Sopenharmony_ci rxb->page_dma = dma_map_page(trans->dev, page, 6158c2ecf20Sopenharmony_ci rxb->offset, 6168c2ecf20Sopenharmony_ci trans_pcie->rx_buf_bytes, 6178c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 6188c2ecf20Sopenharmony_ci if (dma_mapping_error(trans->dev, rxb->page_dma)) { 6198c2ecf20Sopenharmony_ci rxb->page = NULL; 6208c2ecf20Sopenharmony_ci __free_pages(page, trans_pcie->rx_page_order); 6218c2ecf20Sopenharmony_ci continue; 6228c2ecf20Sopenharmony_ci } 6238c2ecf20Sopenharmony_ci 6248c2ecf20Sopenharmony_ci /* move the allocated entry to the out list */ 6258c2ecf20Sopenharmony_ci list_move(&rxb->list, &local_allocated); 6268c2ecf20Sopenharmony_ci i++; 6278c2ecf20Sopenharmony_ci } 6288c2ecf20Sopenharmony_ci 6298c2ecf20Sopenharmony_ci atomic_dec(&rba->req_pending); 6308c2ecf20Sopenharmony_ci pending--; 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci if (!pending) { 6338c2ecf20Sopenharmony_ci pending = atomic_read(&rba->req_pending); 6348c2ecf20Sopenharmony_ci if (pending) 6358c2ecf20Sopenharmony_ci IWL_DEBUG_TPT(trans, 6368c2ecf20Sopenharmony_ci "Got more pending allocation requests = %d\n", 6378c2ecf20Sopenharmony_ci pending); 6388c2ecf20Sopenharmony_ci } 6398c2ecf20Sopenharmony_ci 6408c2ecf20Sopenharmony_ci spin_lock(&rba->lock); 6418c2ecf20Sopenharmony_ci /* add the allocated rbds to the allocator allocated list */ 6428c2ecf20Sopenharmony_ci list_splice_tail(&local_allocated, &rba->rbd_allocated); 6438c2ecf20Sopenharmony_ci /* get more empty RBDs for current pending requests */ 6448c2ecf20Sopenharmony_ci list_splice_tail_init(&rba->rbd_empty, &local_empty); 6458c2ecf20Sopenharmony_ci spin_unlock(&rba->lock); 6468c2ecf20Sopenharmony_ci 6478c2ecf20Sopenharmony_ci atomic_inc(&rba->req_ready); 6488c2ecf20Sopenharmony_ci 6498c2ecf20Sopenharmony_ci } 6508c2ecf20Sopenharmony_ci 6518c2ecf20Sopenharmony_ci spin_lock(&rba->lock); 6528c2ecf20Sopenharmony_ci /* return unused rbds to the allocator empty list */ 6538c2ecf20Sopenharmony_ci list_splice_tail(&local_empty, &rba->rbd_empty); 6548c2ecf20Sopenharmony_ci spin_unlock(&rba->lock); 6558c2ecf20Sopenharmony_ci 6568c2ecf20Sopenharmony_ci IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); 6578c2ecf20Sopenharmony_ci} 6588c2ecf20Sopenharmony_ci 6598c2ecf20Sopenharmony_ci/* 6608c2ecf20Sopenharmony_ci * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 6618c2ecf20Sopenharmony_ci.* 6628c2ecf20Sopenharmony_ci.* Called by queue when the queue posted allocation request and 6638c2ecf20Sopenharmony_ci * has freed 8 RBDs in order to restock itself. 6648c2ecf20Sopenharmony_ci * This function directly moves the allocated RBs to the queue's ownership 6658c2ecf20Sopenharmony_ci * and updates the relevant counters. 6668c2ecf20Sopenharmony_ci */ 6678c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 6688c2ecf20Sopenharmony_ci struct iwl_rxq *rxq) 6698c2ecf20Sopenharmony_ci{ 6708c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 6718c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba = &trans_pcie->rba; 6728c2ecf20Sopenharmony_ci int i; 6738c2ecf20Sopenharmony_ci 6748c2ecf20Sopenharmony_ci lockdep_assert_held(&rxq->lock); 6758c2ecf20Sopenharmony_ci 6768c2ecf20Sopenharmony_ci /* 6778c2ecf20Sopenharmony_ci * atomic_dec_if_positive returns req_ready - 1 for any scenario. 6788c2ecf20Sopenharmony_ci * If req_ready is 0 atomic_dec_if_positive will return -1 and this 6798c2ecf20Sopenharmony_ci * function will return early, as there are no ready requests. 6808c2ecf20Sopenharmony_ci * atomic_dec_if_positive will perofrm the *actual* decrement only if 6818c2ecf20Sopenharmony_ci * req_ready > 0, i.e. - there are ready requests and the function 6828c2ecf20Sopenharmony_ci * hands one request to the caller. 6838c2ecf20Sopenharmony_ci */ 6848c2ecf20Sopenharmony_ci if (atomic_dec_if_positive(&rba->req_ready) < 0) 6858c2ecf20Sopenharmony_ci return; 6868c2ecf20Sopenharmony_ci 6878c2ecf20Sopenharmony_ci spin_lock(&rba->lock); 6888c2ecf20Sopenharmony_ci for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 6898c2ecf20Sopenharmony_ci /* Get next free Rx buffer, remove it from free list */ 6908c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb = 6918c2ecf20Sopenharmony_ci list_first_entry(&rba->rbd_allocated, 6928c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer, list); 6938c2ecf20Sopenharmony_ci 6948c2ecf20Sopenharmony_ci list_move(&rxb->list, &rxq->rx_free); 6958c2ecf20Sopenharmony_ci } 6968c2ecf20Sopenharmony_ci spin_unlock(&rba->lock); 6978c2ecf20Sopenharmony_ci 6988c2ecf20Sopenharmony_ci rxq->used_count -= RX_CLAIM_REQ_ALLOC; 6998c2ecf20Sopenharmony_ci rxq->free_count += RX_CLAIM_REQ_ALLOC; 7008c2ecf20Sopenharmony_ci} 7018c2ecf20Sopenharmony_ci 7028c2ecf20Sopenharmony_civoid iwl_pcie_rx_allocator_work(struct work_struct *data) 7038c2ecf20Sopenharmony_ci{ 7048c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba_p = 7058c2ecf20Sopenharmony_ci container_of(data, struct iwl_rb_allocator, rx_alloc); 7068c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = 7078c2ecf20Sopenharmony_ci container_of(rba_p, struct iwl_trans_pcie, rba); 7088c2ecf20Sopenharmony_ci 7098c2ecf20Sopenharmony_ci iwl_pcie_rx_allocator(trans_pcie->trans); 7108c2ecf20Sopenharmony_ci} 7118c2ecf20Sopenharmony_ci 7128c2ecf20Sopenharmony_cistatic int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) 7138c2ecf20Sopenharmony_ci{ 7148c2ecf20Sopenharmony_ci struct iwl_rx_transfer_desc *rx_td; 7158c2ecf20Sopenharmony_ci 7168c2ecf20Sopenharmony_ci if (use_rx_td) 7178c2ecf20Sopenharmony_ci return sizeof(*rx_td); 7188c2ecf20Sopenharmony_ci else 7198c2ecf20Sopenharmony_ci return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : 7208c2ecf20Sopenharmony_ci sizeof(__le32); 7218c2ecf20Sopenharmony_ci} 7228c2ecf20Sopenharmony_ci 7238c2ecf20Sopenharmony_cistatic void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, 7248c2ecf20Sopenharmony_ci struct iwl_rxq *rxq) 7258c2ecf20Sopenharmony_ci{ 7268c2ecf20Sopenharmony_ci struct device *dev = trans->dev; 7278c2ecf20Sopenharmony_ci bool use_rx_td = (trans->trans_cfg->device_family >= 7288c2ecf20Sopenharmony_ci IWL_DEVICE_FAMILY_AX210); 7298c2ecf20Sopenharmony_ci int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); 7308c2ecf20Sopenharmony_ci 7318c2ecf20Sopenharmony_ci if (rxq->bd) 7328c2ecf20Sopenharmony_ci dma_free_coherent(trans->dev, 7338c2ecf20Sopenharmony_ci free_size * rxq->queue_size, 7348c2ecf20Sopenharmony_ci rxq->bd, rxq->bd_dma); 7358c2ecf20Sopenharmony_ci rxq->bd_dma = 0; 7368c2ecf20Sopenharmony_ci rxq->bd = NULL; 7378c2ecf20Sopenharmony_ci 7388c2ecf20Sopenharmony_ci rxq->rb_stts_dma = 0; 7398c2ecf20Sopenharmony_ci rxq->rb_stts = NULL; 7408c2ecf20Sopenharmony_ci 7418c2ecf20Sopenharmony_ci if (rxq->used_bd) 7428c2ecf20Sopenharmony_ci dma_free_coherent(trans->dev, 7438c2ecf20Sopenharmony_ci (use_rx_td ? sizeof(*rxq->cd) : 7448c2ecf20Sopenharmony_ci sizeof(__le32)) * rxq->queue_size, 7458c2ecf20Sopenharmony_ci rxq->used_bd, rxq->used_bd_dma); 7468c2ecf20Sopenharmony_ci rxq->used_bd_dma = 0; 7478c2ecf20Sopenharmony_ci rxq->used_bd = NULL; 7488c2ecf20Sopenharmony_ci 7498c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 7508c2ecf20Sopenharmony_ci return; 7518c2ecf20Sopenharmony_ci 7528c2ecf20Sopenharmony_ci if (rxq->tr_tail) 7538c2ecf20Sopenharmony_ci dma_free_coherent(dev, sizeof(__le16), 7548c2ecf20Sopenharmony_ci rxq->tr_tail, rxq->tr_tail_dma); 7558c2ecf20Sopenharmony_ci rxq->tr_tail_dma = 0; 7568c2ecf20Sopenharmony_ci rxq->tr_tail = NULL; 7578c2ecf20Sopenharmony_ci 7588c2ecf20Sopenharmony_ci if (rxq->cr_tail) 7598c2ecf20Sopenharmony_ci dma_free_coherent(dev, sizeof(__le16), 7608c2ecf20Sopenharmony_ci rxq->cr_tail, rxq->cr_tail_dma); 7618c2ecf20Sopenharmony_ci rxq->cr_tail_dma = 0; 7628c2ecf20Sopenharmony_ci rxq->cr_tail = NULL; 7638c2ecf20Sopenharmony_ci} 7648c2ecf20Sopenharmony_ci 7658c2ecf20Sopenharmony_cistatic int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, 7668c2ecf20Sopenharmony_ci struct iwl_rxq *rxq) 7678c2ecf20Sopenharmony_ci{ 7688c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 7698c2ecf20Sopenharmony_ci struct device *dev = trans->dev; 7708c2ecf20Sopenharmony_ci int i; 7718c2ecf20Sopenharmony_ci int free_size; 7728c2ecf20Sopenharmony_ci bool use_rx_td = (trans->trans_cfg->device_family >= 7738c2ecf20Sopenharmony_ci IWL_DEVICE_FAMILY_AX210); 7748c2ecf20Sopenharmony_ci size_t rb_stts_size = use_rx_td ? sizeof(__le16) : 7758c2ecf20Sopenharmony_ci sizeof(struct iwl_rb_status); 7768c2ecf20Sopenharmony_ci 7778c2ecf20Sopenharmony_ci spin_lock_init(&rxq->lock); 7788c2ecf20Sopenharmony_ci if (trans->trans_cfg->mq_rx_supported) 7798c2ecf20Sopenharmony_ci rxq->queue_size = trans->cfg->num_rbds; 7808c2ecf20Sopenharmony_ci else 7818c2ecf20Sopenharmony_ci rxq->queue_size = RX_QUEUE_SIZE; 7828c2ecf20Sopenharmony_ci 7838c2ecf20Sopenharmony_ci free_size = iwl_pcie_free_bd_size(trans, use_rx_td); 7848c2ecf20Sopenharmony_ci 7858c2ecf20Sopenharmony_ci /* 7868c2ecf20Sopenharmony_ci * Allocate the circular buffer of Read Buffer Descriptors 7878c2ecf20Sopenharmony_ci * (RBDs) 7888c2ecf20Sopenharmony_ci */ 7898c2ecf20Sopenharmony_ci rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, 7908c2ecf20Sopenharmony_ci &rxq->bd_dma, GFP_KERNEL); 7918c2ecf20Sopenharmony_ci if (!rxq->bd) 7928c2ecf20Sopenharmony_ci goto err; 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_ci if (trans->trans_cfg->mq_rx_supported) { 7958c2ecf20Sopenharmony_ci rxq->used_bd = dma_alloc_coherent(dev, 7968c2ecf20Sopenharmony_ci (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, 7978c2ecf20Sopenharmony_ci &rxq->used_bd_dma, 7988c2ecf20Sopenharmony_ci GFP_KERNEL); 7998c2ecf20Sopenharmony_ci if (!rxq->used_bd) 8008c2ecf20Sopenharmony_ci goto err; 8018c2ecf20Sopenharmony_ci } 8028c2ecf20Sopenharmony_ci 8038c2ecf20Sopenharmony_ci rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size; 8048c2ecf20Sopenharmony_ci rxq->rb_stts_dma = 8058c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; 8068c2ecf20Sopenharmony_ci 8078c2ecf20Sopenharmony_ci if (!use_rx_td) 8088c2ecf20Sopenharmony_ci return 0; 8098c2ecf20Sopenharmony_ci 8108c2ecf20Sopenharmony_ci /* Allocate the driver's pointer to TR tail */ 8118c2ecf20Sopenharmony_ci rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16), 8128c2ecf20Sopenharmony_ci &rxq->tr_tail_dma, GFP_KERNEL); 8138c2ecf20Sopenharmony_ci if (!rxq->tr_tail) 8148c2ecf20Sopenharmony_ci goto err; 8158c2ecf20Sopenharmony_ci 8168c2ecf20Sopenharmony_ci /* Allocate the driver's pointer to CR tail */ 8178c2ecf20Sopenharmony_ci rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16), 8188c2ecf20Sopenharmony_ci &rxq->cr_tail_dma, GFP_KERNEL); 8198c2ecf20Sopenharmony_ci if (!rxq->cr_tail) 8208c2ecf20Sopenharmony_ci goto err; 8218c2ecf20Sopenharmony_ci 8228c2ecf20Sopenharmony_ci return 0; 8238c2ecf20Sopenharmony_ci 8248c2ecf20Sopenharmony_cierr: 8258c2ecf20Sopenharmony_ci for (i = 0; i < trans->num_rx_queues; i++) { 8268c2ecf20Sopenharmony_ci struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 8278c2ecf20Sopenharmony_ci 8288c2ecf20Sopenharmony_ci iwl_pcie_free_rxq_dma(trans, rxq); 8298c2ecf20Sopenharmony_ci } 8308c2ecf20Sopenharmony_ci 8318c2ecf20Sopenharmony_ci return -ENOMEM; 8328c2ecf20Sopenharmony_ci} 8338c2ecf20Sopenharmony_ci 8348c2ecf20Sopenharmony_cistatic int iwl_pcie_rx_alloc(struct iwl_trans *trans) 8358c2ecf20Sopenharmony_ci{ 8368c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 8378c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba = &trans_pcie->rba; 8388c2ecf20Sopenharmony_ci int i, ret; 8398c2ecf20Sopenharmony_ci size_t rb_stts_size = trans->trans_cfg->device_family >= 8408c2ecf20Sopenharmony_ci IWL_DEVICE_FAMILY_AX210 ? 8418c2ecf20Sopenharmony_ci sizeof(__le16) : sizeof(struct iwl_rb_status); 8428c2ecf20Sopenharmony_ci 8438c2ecf20Sopenharmony_ci if (WARN_ON(trans_pcie->rxq)) 8448c2ecf20Sopenharmony_ci return -EINVAL; 8458c2ecf20Sopenharmony_ci 8468c2ecf20Sopenharmony_ci trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 8478c2ecf20Sopenharmony_ci GFP_KERNEL); 8488c2ecf20Sopenharmony_ci trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 8498c2ecf20Sopenharmony_ci sizeof(trans_pcie->rx_pool[0]), 8508c2ecf20Sopenharmony_ci GFP_KERNEL); 8518c2ecf20Sopenharmony_ci trans_pcie->global_table = 8528c2ecf20Sopenharmony_ci kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), 8538c2ecf20Sopenharmony_ci sizeof(trans_pcie->global_table[0]), 8548c2ecf20Sopenharmony_ci GFP_KERNEL); 8558c2ecf20Sopenharmony_ci if (!trans_pcie->rxq || !trans_pcie->rx_pool || 8568c2ecf20Sopenharmony_ci !trans_pcie->global_table) { 8578c2ecf20Sopenharmony_ci ret = -ENOMEM; 8588c2ecf20Sopenharmony_ci goto err; 8598c2ecf20Sopenharmony_ci } 8608c2ecf20Sopenharmony_ci 8618c2ecf20Sopenharmony_ci spin_lock_init(&rba->lock); 8628c2ecf20Sopenharmony_ci 8638c2ecf20Sopenharmony_ci /* 8648c2ecf20Sopenharmony_ci * Allocate the driver's pointer to receive buffer status. 8658c2ecf20Sopenharmony_ci * Allocate for all queues continuously (HW requirement). 8668c2ecf20Sopenharmony_ci */ 8678c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts = 8688c2ecf20Sopenharmony_ci dma_alloc_coherent(trans->dev, 8698c2ecf20Sopenharmony_ci rb_stts_size * trans->num_rx_queues, 8708c2ecf20Sopenharmony_ci &trans_pcie->base_rb_stts_dma, 8718c2ecf20Sopenharmony_ci GFP_KERNEL); 8728c2ecf20Sopenharmony_ci if (!trans_pcie->base_rb_stts) { 8738c2ecf20Sopenharmony_ci ret = -ENOMEM; 8748c2ecf20Sopenharmony_ci goto err; 8758c2ecf20Sopenharmony_ci } 8768c2ecf20Sopenharmony_ci 8778c2ecf20Sopenharmony_ci for (i = 0; i < trans->num_rx_queues; i++) { 8788c2ecf20Sopenharmony_ci struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 8798c2ecf20Sopenharmony_ci 8808c2ecf20Sopenharmony_ci rxq->id = i; 8818c2ecf20Sopenharmony_ci ret = iwl_pcie_alloc_rxq_dma(trans, rxq); 8828c2ecf20Sopenharmony_ci if (ret) 8838c2ecf20Sopenharmony_ci goto err; 8848c2ecf20Sopenharmony_ci } 8858c2ecf20Sopenharmony_ci return 0; 8868c2ecf20Sopenharmony_ci 8878c2ecf20Sopenharmony_cierr: 8888c2ecf20Sopenharmony_ci if (trans_pcie->base_rb_stts) { 8898c2ecf20Sopenharmony_ci dma_free_coherent(trans->dev, 8908c2ecf20Sopenharmony_ci rb_stts_size * trans->num_rx_queues, 8918c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts, 8928c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts_dma); 8938c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts = NULL; 8948c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts_dma = 0; 8958c2ecf20Sopenharmony_ci } 8968c2ecf20Sopenharmony_ci kfree(trans_pcie->rx_pool); 8978c2ecf20Sopenharmony_ci kfree(trans_pcie->global_table); 8988c2ecf20Sopenharmony_ci kfree(trans_pcie->rxq); 8998c2ecf20Sopenharmony_ci 9008c2ecf20Sopenharmony_ci return ret; 9018c2ecf20Sopenharmony_ci} 9028c2ecf20Sopenharmony_ci 9038c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 9048c2ecf20Sopenharmony_ci{ 9058c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 9068c2ecf20Sopenharmony_ci u32 rb_size; 9078c2ecf20Sopenharmony_ci unsigned long flags; 9088c2ecf20Sopenharmony_ci const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 9098c2ecf20Sopenharmony_ci 9108c2ecf20Sopenharmony_ci switch (trans_pcie->rx_buf_size) { 9118c2ecf20Sopenharmony_ci case IWL_AMSDU_4K: 9128c2ecf20Sopenharmony_ci rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 9138c2ecf20Sopenharmony_ci break; 9148c2ecf20Sopenharmony_ci case IWL_AMSDU_8K: 9158c2ecf20Sopenharmony_ci rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 9168c2ecf20Sopenharmony_ci break; 9178c2ecf20Sopenharmony_ci case IWL_AMSDU_12K: 9188c2ecf20Sopenharmony_ci rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 9198c2ecf20Sopenharmony_ci break; 9208c2ecf20Sopenharmony_ci default: 9218c2ecf20Sopenharmony_ci WARN_ON(1); 9228c2ecf20Sopenharmony_ci rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 9238c2ecf20Sopenharmony_ci } 9248c2ecf20Sopenharmony_ci 9258c2ecf20Sopenharmony_ci if (!iwl_trans_grab_nic_access(trans, &flags)) 9268c2ecf20Sopenharmony_ci return; 9278c2ecf20Sopenharmony_ci 9288c2ecf20Sopenharmony_ci /* Stop Rx DMA */ 9298c2ecf20Sopenharmony_ci iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 9308c2ecf20Sopenharmony_ci /* reset and flush pointers */ 9318c2ecf20Sopenharmony_ci iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 9328c2ecf20Sopenharmony_ci iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 9338c2ecf20Sopenharmony_ci iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 9348c2ecf20Sopenharmony_ci 9358c2ecf20Sopenharmony_ci /* Reset driver's Rx queue write index */ 9368c2ecf20Sopenharmony_ci iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 9378c2ecf20Sopenharmony_ci 9388c2ecf20Sopenharmony_ci /* Tell device where to find RBD circular buffer in DRAM */ 9398c2ecf20Sopenharmony_ci iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 9408c2ecf20Sopenharmony_ci (u32)(rxq->bd_dma >> 8)); 9418c2ecf20Sopenharmony_ci 9428c2ecf20Sopenharmony_ci /* Tell device where in DRAM to update its Rx status */ 9438c2ecf20Sopenharmony_ci iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 9448c2ecf20Sopenharmony_ci rxq->rb_stts_dma >> 4); 9458c2ecf20Sopenharmony_ci 9468c2ecf20Sopenharmony_ci /* Enable Rx DMA 9478c2ecf20Sopenharmony_ci * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 9488c2ecf20Sopenharmony_ci * the credit mechanism in 5000 HW RX FIFO 9498c2ecf20Sopenharmony_ci * Direct rx interrupts to hosts 9508c2ecf20Sopenharmony_ci * Rx buffer size 4 or 8k or 12k 9518c2ecf20Sopenharmony_ci * RB timeout 0x10 9528c2ecf20Sopenharmony_ci * 256 RBDs 9538c2ecf20Sopenharmony_ci */ 9548c2ecf20Sopenharmony_ci iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 9558c2ecf20Sopenharmony_ci FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 9568c2ecf20Sopenharmony_ci FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 9578c2ecf20Sopenharmony_ci FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 9588c2ecf20Sopenharmony_ci rb_size | 9598c2ecf20Sopenharmony_ci (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 9608c2ecf20Sopenharmony_ci (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 9618c2ecf20Sopenharmony_ci 9628c2ecf20Sopenharmony_ci iwl_trans_release_nic_access(trans, &flags); 9638c2ecf20Sopenharmony_ci 9648c2ecf20Sopenharmony_ci /* Set interrupt coalescing timer to default (2048 usecs) */ 9658c2ecf20Sopenharmony_ci iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 9668c2ecf20Sopenharmony_ci 9678c2ecf20Sopenharmony_ci /* W/A for interrupt coalescing bug in 7260 and 3160 */ 9688c2ecf20Sopenharmony_ci if (trans->cfg->host_interrupt_operation_mode) 9698c2ecf20Sopenharmony_ci iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 9708c2ecf20Sopenharmony_ci} 9718c2ecf20Sopenharmony_ci 9728c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 9738c2ecf20Sopenharmony_ci{ 9748c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 9758c2ecf20Sopenharmony_ci u32 rb_size, enabled = 0; 9768c2ecf20Sopenharmony_ci unsigned long flags; 9778c2ecf20Sopenharmony_ci int i; 9788c2ecf20Sopenharmony_ci 9798c2ecf20Sopenharmony_ci switch (trans_pcie->rx_buf_size) { 9808c2ecf20Sopenharmony_ci case IWL_AMSDU_2K: 9818c2ecf20Sopenharmony_ci rb_size = RFH_RXF_DMA_RB_SIZE_2K; 9828c2ecf20Sopenharmony_ci break; 9838c2ecf20Sopenharmony_ci case IWL_AMSDU_4K: 9848c2ecf20Sopenharmony_ci rb_size = RFH_RXF_DMA_RB_SIZE_4K; 9858c2ecf20Sopenharmony_ci break; 9868c2ecf20Sopenharmony_ci case IWL_AMSDU_8K: 9878c2ecf20Sopenharmony_ci rb_size = RFH_RXF_DMA_RB_SIZE_8K; 9888c2ecf20Sopenharmony_ci break; 9898c2ecf20Sopenharmony_ci case IWL_AMSDU_12K: 9908c2ecf20Sopenharmony_ci rb_size = RFH_RXF_DMA_RB_SIZE_12K; 9918c2ecf20Sopenharmony_ci break; 9928c2ecf20Sopenharmony_ci default: 9938c2ecf20Sopenharmony_ci WARN_ON(1); 9948c2ecf20Sopenharmony_ci rb_size = RFH_RXF_DMA_RB_SIZE_4K; 9958c2ecf20Sopenharmony_ci } 9968c2ecf20Sopenharmony_ci 9978c2ecf20Sopenharmony_ci if (!iwl_trans_grab_nic_access(trans, &flags)) 9988c2ecf20Sopenharmony_ci return; 9998c2ecf20Sopenharmony_ci 10008c2ecf20Sopenharmony_ci /* Stop Rx DMA */ 10018c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 10028c2ecf20Sopenharmony_ci /* disable free amd used rx queue operation */ 10038c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci for (i = 0; i < trans->num_rx_queues; i++) { 10068c2ecf20Sopenharmony_ci /* Tell device where to find RBD free table in DRAM */ 10078c2ecf20Sopenharmony_ci iwl_write_prph64_no_grab(trans, 10088c2ecf20Sopenharmony_ci RFH_Q_FRBDCB_BA_LSB(i), 10098c2ecf20Sopenharmony_ci trans_pcie->rxq[i].bd_dma); 10108c2ecf20Sopenharmony_ci /* Tell device where to find RBD used table in DRAM */ 10118c2ecf20Sopenharmony_ci iwl_write_prph64_no_grab(trans, 10128c2ecf20Sopenharmony_ci RFH_Q_URBDCB_BA_LSB(i), 10138c2ecf20Sopenharmony_ci trans_pcie->rxq[i].used_bd_dma); 10148c2ecf20Sopenharmony_ci /* Tell device where in DRAM to update its Rx status */ 10158c2ecf20Sopenharmony_ci iwl_write_prph64_no_grab(trans, 10168c2ecf20Sopenharmony_ci RFH_Q_URBD_STTS_WPTR_LSB(i), 10178c2ecf20Sopenharmony_ci trans_pcie->rxq[i].rb_stts_dma); 10188c2ecf20Sopenharmony_ci /* Reset device indice tables */ 10198c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 10208c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 10218c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 10228c2ecf20Sopenharmony_ci 10238c2ecf20Sopenharmony_ci enabled |= BIT(i) | BIT(i + 16); 10248c2ecf20Sopenharmony_ci } 10258c2ecf20Sopenharmony_ci 10268c2ecf20Sopenharmony_ci /* 10278c2ecf20Sopenharmony_ci * Enable Rx DMA 10288c2ecf20Sopenharmony_ci * Rx buffer size 4 or 8k or 12k 10298c2ecf20Sopenharmony_ci * Min RB size 4 or 8 10308c2ecf20Sopenharmony_ci * Drop frames that exceed RB size 10318c2ecf20Sopenharmony_ci * 512 RBDs 10328c2ecf20Sopenharmony_ci */ 10338c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 10348c2ecf20Sopenharmony_ci RFH_DMA_EN_ENABLE_VAL | rb_size | 10358c2ecf20Sopenharmony_ci RFH_RXF_DMA_MIN_RB_4_8 | 10368c2ecf20Sopenharmony_ci RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 10378c2ecf20Sopenharmony_ci RFH_RXF_DMA_RBDCB_SIZE_512); 10388c2ecf20Sopenharmony_ci 10398c2ecf20Sopenharmony_ci /* 10408c2ecf20Sopenharmony_ci * Activate DMA snooping. 10418c2ecf20Sopenharmony_ci * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe 10428c2ecf20Sopenharmony_ci * Default queue is 0 10438c2ecf20Sopenharmony_ci */ 10448c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_GEN_CFG, 10458c2ecf20Sopenharmony_ci RFH_GEN_CFG_RFH_DMA_SNOOP | 10468c2ecf20Sopenharmony_ci RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | 10478c2ecf20Sopenharmony_ci RFH_GEN_CFG_SERVICE_DMA_SNOOP | 10488c2ecf20Sopenharmony_ci RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, 10498c2ecf20Sopenharmony_ci trans->trans_cfg->integrated ? 10508c2ecf20Sopenharmony_ci RFH_GEN_CFG_RB_CHUNK_SIZE_64 : 10518c2ecf20Sopenharmony_ci RFH_GEN_CFG_RB_CHUNK_SIZE_128)); 10528c2ecf20Sopenharmony_ci /* Enable the relevant rx queues */ 10538c2ecf20Sopenharmony_ci iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 10548c2ecf20Sopenharmony_ci 10558c2ecf20Sopenharmony_ci iwl_trans_release_nic_access(trans, &flags); 10568c2ecf20Sopenharmony_ci 10578c2ecf20Sopenharmony_ci /* Set interrupt coalescing timer to default (2048 usecs) */ 10588c2ecf20Sopenharmony_ci iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 10598c2ecf20Sopenharmony_ci} 10608c2ecf20Sopenharmony_ci 10618c2ecf20Sopenharmony_civoid iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 10628c2ecf20Sopenharmony_ci{ 10638c2ecf20Sopenharmony_ci lockdep_assert_held(&rxq->lock); 10648c2ecf20Sopenharmony_ci 10658c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&rxq->rx_free); 10668c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&rxq->rx_used); 10678c2ecf20Sopenharmony_ci rxq->free_count = 0; 10688c2ecf20Sopenharmony_ci rxq->used_count = 0; 10698c2ecf20Sopenharmony_ci} 10708c2ecf20Sopenharmony_ci 10718c2ecf20Sopenharmony_ciint iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 10728c2ecf20Sopenharmony_ci{ 10738c2ecf20Sopenharmony_ci WARN_ON(1); 10748c2ecf20Sopenharmony_ci return 0; 10758c2ecf20Sopenharmony_ci} 10768c2ecf20Sopenharmony_ci 10778c2ecf20Sopenharmony_cistatic int _iwl_pcie_rx_init(struct iwl_trans *trans) 10788c2ecf20Sopenharmony_ci{ 10798c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 10808c2ecf20Sopenharmony_ci struct iwl_rxq *def_rxq; 10818c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba = &trans_pcie->rba; 10828c2ecf20Sopenharmony_ci int i, err, queue_size, allocator_pool_size, num_alloc; 10838c2ecf20Sopenharmony_ci 10848c2ecf20Sopenharmony_ci if (!trans_pcie->rxq) { 10858c2ecf20Sopenharmony_ci err = iwl_pcie_rx_alloc(trans); 10868c2ecf20Sopenharmony_ci if (err) 10878c2ecf20Sopenharmony_ci return err; 10888c2ecf20Sopenharmony_ci } 10898c2ecf20Sopenharmony_ci def_rxq = trans_pcie->rxq; 10908c2ecf20Sopenharmony_ci 10918c2ecf20Sopenharmony_ci cancel_work_sync(&rba->rx_alloc); 10928c2ecf20Sopenharmony_ci 10938c2ecf20Sopenharmony_ci spin_lock(&rba->lock); 10948c2ecf20Sopenharmony_ci atomic_set(&rba->req_pending, 0); 10958c2ecf20Sopenharmony_ci atomic_set(&rba->req_ready, 0); 10968c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&rba->rbd_allocated); 10978c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&rba->rbd_empty); 10988c2ecf20Sopenharmony_ci spin_unlock(&rba->lock); 10998c2ecf20Sopenharmony_ci 11008c2ecf20Sopenharmony_ci /* free all first - we overwrite everything here */ 11018c2ecf20Sopenharmony_ci iwl_pcie_free_rbs_pool(trans); 11028c2ecf20Sopenharmony_ci 11038c2ecf20Sopenharmony_ci for (i = 0; i < RX_QUEUE_SIZE; i++) 11048c2ecf20Sopenharmony_ci def_rxq->queue[i] = NULL; 11058c2ecf20Sopenharmony_ci 11068c2ecf20Sopenharmony_ci for (i = 0; i < trans->num_rx_queues; i++) { 11078c2ecf20Sopenharmony_ci struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 11088c2ecf20Sopenharmony_ci 11098c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 11108c2ecf20Sopenharmony_ci /* 11118c2ecf20Sopenharmony_ci * Set read write pointer to reflect that we have processed 11128c2ecf20Sopenharmony_ci * and used all buffers, but have not restocked the Rx queue 11138c2ecf20Sopenharmony_ci * with fresh buffers 11148c2ecf20Sopenharmony_ci */ 11158c2ecf20Sopenharmony_ci rxq->read = 0; 11168c2ecf20Sopenharmony_ci rxq->write = 0; 11178c2ecf20Sopenharmony_ci rxq->write_actual = 0; 11188c2ecf20Sopenharmony_ci memset(rxq->rb_stts, 0, 11198c2ecf20Sopenharmony_ci (trans->trans_cfg->device_family >= 11208c2ecf20Sopenharmony_ci IWL_DEVICE_FAMILY_AX210) ? 11218c2ecf20Sopenharmony_ci sizeof(__le16) : sizeof(struct iwl_rb_status)); 11228c2ecf20Sopenharmony_ci 11238c2ecf20Sopenharmony_ci iwl_pcie_rx_init_rxb_lists(rxq); 11248c2ecf20Sopenharmony_ci 11258c2ecf20Sopenharmony_ci if (!rxq->napi.poll) 11268c2ecf20Sopenharmony_ci netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 11278c2ecf20Sopenharmony_ci iwl_pcie_dummy_napi_poll, 64); 11288c2ecf20Sopenharmony_ci 11298c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 11308c2ecf20Sopenharmony_ci } 11318c2ecf20Sopenharmony_ci 11328c2ecf20Sopenharmony_ci /* move the pool to the default queue and allocator ownerships */ 11338c2ecf20Sopenharmony_ci queue_size = trans->trans_cfg->mq_rx_supported ? 11348c2ecf20Sopenharmony_ci trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; 11358c2ecf20Sopenharmony_ci allocator_pool_size = trans->num_rx_queues * 11368c2ecf20Sopenharmony_ci (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 11378c2ecf20Sopenharmony_ci num_alloc = queue_size + allocator_pool_size; 11388c2ecf20Sopenharmony_ci 11398c2ecf20Sopenharmony_ci for (i = 0; i < num_alloc; i++) { 11408c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 11418c2ecf20Sopenharmony_ci 11428c2ecf20Sopenharmony_ci if (i < allocator_pool_size) 11438c2ecf20Sopenharmony_ci list_add(&rxb->list, &rba->rbd_empty); 11448c2ecf20Sopenharmony_ci else 11458c2ecf20Sopenharmony_ci list_add(&rxb->list, &def_rxq->rx_used); 11468c2ecf20Sopenharmony_ci trans_pcie->global_table[i] = rxb; 11478c2ecf20Sopenharmony_ci rxb->vid = (u16)(i + 1); 11488c2ecf20Sopenharmony_ci rxb->invalid = true; 11498c2ecf20Sopenharmony_ci } 11508c2ecf20Sopenharmony_ci 11518c2ecf20Sopenharmony_ci iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 11528c2ecf20Sopenharmony_ci 11538c2ecf20Sopenharmony_ci return 0; 11548c2ecf20Sopenharmony_ci} 11558c2ecf20Sopenharmony_ci 11568c2ecf20Sopenharmony_ciint iwl_pcie_rx_init(struct iwl_trans *trans) 11578c2ecf20Sopenharmony_ci{ 11588c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11598c2ecf20Sopenharmony_ci int ret = _iwl_pcie_rx_init(trans); 11608c2ecf20Sopenharmony_ci 11618c2ecf20Sopenharmony_ci if (ret) 11628c2ecf20Sopenharmony_ci return ret; 11638c2ecf20Sopenharmony_ci 11648c2ecf20Sopenharmony_ci if (trans->trans_cfg->mq_rx_supported) 11658c2ecf20Sopenharmony_ci iwl_pcie_rx_mq_hw_init(trans); 11668c2ecf20Sopenharmony_ci else 11678c2ecf20Sopenharmony_ci iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); 11688c2ecf20Sopenharmony_ci 11698c2ecf20Sopenharmony_ci iwl_pcie_rxq_restock(trans, trans_pcie->rxq); 11708c2ecf20Sopenharmony_ci 11718c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->rxq->lock); 11728c2ecf20Sopenharmony_ci iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); 11738c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->rxq->lock); 11748c2ecf20Sopenharmony_ci 11758c2ecf20Sopenharmony_ci return 0; 11768c2ecf20Sopenharmony_ci} 11778c2ecf20Sopenharmony_ci 11788c2ecf20Sopenharmony_ciint iwl_pcie_gen2_rx_init(struct iwl_trans *trans) 11798c2ecf20Sopenharmony_ci{ 11808c2ecf20Sopenharmony_ci /* Set interrupt coalescing timer to default (2048 usecs) */ 11818c2ecf20Sopenharmony_ci iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 11828c2ecf20Sopenharmony_ci 11838c2ecf20Sopenharmony_ci /* 11848c2ecf20Sopenharmony_ci * We don't configure the RFH. 11858c2ecf20Sopenharmony_ci * Restock will be done at alive, after firmware configured the RFH. 11868c2ecf20Sopenharmony_ci */ 11878c2ecf20Sopenharmony_ci return _iwl_pcie_rx_init(trans); 11888c2ecf20Sopenharmony_ci} 11898c2ecf20Sopenharmony_ci 11908c2ecf20Sopenharmony_civoid iwl_pcie_rx_free(struct iwl_trans *trans) 11918c2ecf20Sopenharmony_ci{ 11928c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11938c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba = &trans_pcie->rba; 11948c2ecf20Sopenharmony_ci int i; 11958c2ecf20Sopenharmony_ci size_t rb_stts_size = trans->trans_cfg->device_family >= 11968c2ecf20Sopenharmony_ci IWL_DEVICE_FAMILY_AX210 ? 11978c2ecf20Sopenharmony_ci sizeof(__le16) : sizeof(struct iwl_rb_status); 11988c2ecf20Sopenharmony_ci 11998c2ecf20Sopenharmony_ci /* 12008c2ecf20Sopenharmony_ci * if rxq is NULL, it means that nothing has been allocated, 12018c2ecf20Sopenharmony_ci * exit now 12028c2ecf20Sopenharmony_ci */ 12038c2ecf20Sopenharmony_ci if (!trans_pcie->rxq) { 12048c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 12058c2ecf20Sopenharmony_ci return; 12068c2ecf20Sopenharmony_ci } 12078c2ecf20Sopenharmony_ci 12088c2ecf20Sopenharmony_ci cancel_work_sync(&rba->rx_alloc); 12098c2ecf20Sopenharmony_ci 12108c2ecf20Sopenharmony_ci iwl_pcie_free_rbs_pool(trans); 12118c2ecf20Sopenharmony_ci 12128c2ecf20Sopenharmony_ci if (trans_pcie->base_rb_stts) { 12138c2ecf20Sopenharmony_ci dma_free_coherent(trans->dev, 12148c2ecf20Sopenharmony_ci rb_stts_size * trans->num_rx_queues, 12158c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts, 12168c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts_dma); 12178c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts = NULL; 12188c2ecf20Sopenharmony_ci trans_pcie->base_rb_stts_dma = 0; 12198c2ecf20Sopenharmony_ci } 12208c2ecf20Sopenharmony_ci 12218c2ecf20Sopenharmony_ci for (i = 0; i < trans->num_rx_queues; i++) { 12228c2ecf20Sopenharmony_ci struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 12238c2ecf20Sopenharmony_ci 12248c2ecf20Sopenharmony_ci iwl_pcie_free_rxq_dma(trans, rxq); 12258c2ecf20Sopenharmony_ci 12268c2ecf20Sopenharmony_ci if (rxq->napi.poll) 12278c2ecf20Sopenharmony_ci netif_napi_del(&rxq->napi); 12288c2ecf20Sopenharmony_ci } 12298c2ecf20Sopenharmony_ci kfree(trans_pcie->rx_pool); 12308c2ecf20Sopenharmony_ci kfree(trans_pcie->global_table); 12318c2ecf20Sopenharmony_ci kfree(trans_pcie->rxq); 12328c2ecf20Sopenharmony_ci 12338c2ecf20Sopenharmony_ci if (trans_pcie->alloc_page) 12348c2ecf20Sopenharmony_ci __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); 12358c2ecf20Sopenharmony_ci} 12368c2ecf20Sopenharmony_ci 12378c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, 12388c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba) 12398c2ecf20Sopenharmony_ci{ 12408c2ecf20Sopenharmony_ci spin_lock(&rba->lock); 12418c2ecf20Sopenharmony_ci list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 12428c2ecf20Sopenharmony_ci spin_unlock(&rba->lock); 12438c2ecf20Sopenharmony_ci} 12448c2ecf20Sopenharmony_ci 12458c2ecf20Sopenharmony_ci/* 12468c2ecf20Sopenharmony_ci * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 12478c2ecf20Sopenharmony_ci * 12488c2ecf20Sopenharmony_ci * Called when a RBD can be reused. The RBD is transferred to the allocator. 12498c2ecf20Sopenharmony_ci * When there are 2 empty RBDs - a request for allocation is posted 12508c2ecf20Sopenharmony_ci */ 12518c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 12528c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb, 12538c2ecf20Sopenharmony_ci struct iwl_rxq *rxq, bool emergency) 12548c2ecf20Sopenharmony_ci{ 12558c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 12568c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba = &trans_pcie->rba; 12578c2ecf20Sopenharmony_ci 12588c2ecf20Sopenharmony_ci /* Move the RBD to the used list, will be moved to allocator in batches 12598c2ecf20Sopenharmony_ci * before claiming or posting a request*/ 12608c2ecf20Sopenharmony_ci list_add_tail(&rxb->list, &rxq->rx_used); 12618c2ecf20Sopenharmony_ci 12628c2ecf20Sopenharmony_ci if (unlikely(emergency)) 12638c2ecf20Sopenharmony_ci return; 12648c2ecf20Sopenharmony_ci 12658c2ecf20Sopenharmony_ci /* Count the allocator owned RBDs */ 12668c2ecf20Sopenharmony_ci rxq->used_count++; 12678c2ecf20Sopenharmony_ci 12688c2ecf20Sopenharmony_ci /* If we have RX_POST_REQ_ALLOC new released rx buffers - 12698c2ecf20Sopenharmony_ci * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 12708c2ecf20Sopenharmony_ci * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 12718c2ecf20Sopenharmony_ci * after but we still need to post another request. 12728c2ecf20Sopenharmony_ci */ 12738c2ecf20Sopenharmony_ci if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 12748c2ecf20Sopenharmony_ci /* Move the 2 RBDs to the allocator ownership. 12758c2ecf20Sopenharmony_ci Allocator has another 6 from pool for the request completion*/ 12768c2ecf20Sopenharmony_ci iwl_pcie_rx_move_to_allocator(rxq, rba); 12778c2ecf20Sopenharmony_ci 12788c2ecf20Sopenharmony_ci atomic_inc(&rba->req_pending); 12798c2ecf20Sopenharmony_ci queue_work(rba->alloc_wq, &rba->rx_alloc); 12808c2ecf20Sopenharmony_ci } 12818c2ecf20Sopenharmony_ci} 12828c2ecf20Sopenharmony_ci 12838c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 12848c2ecf20Sopenharmony_ci struct iwl_rxq *rxq, 12858c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb, 12868c2ecf20Sopenharmony_ci bool emergency, 12878c2ecf20Sopenharmony_ci int i) 12888c2ecf20Sopenharmony_ci{ 12898c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 12908c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 12918c2ecf20Sopenharmony_ci bool page_stolen = false; 12928c2ecf20Sopenharmony_ci int max_len = trans_pcie->rx_buf_bytes; 12938c2ecf20Sopenharmony_ci u32 offset = 0; 12948c2ecf20Sopenharmony_ci 12958c2ecf20Sopenharmony_ci if (WARN_ON(!rxb)) 12968c2ecf20Sopenharmony_ci return; 12978c2ecf20Sopenharmony_ci 12988c2ecf20Sopenharmony_ci dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 12998c2ecf20Sopenharmony_ci 13008c2ecf20Sopenharmony_ci while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 13018c2ecf20Sopenharmony_ci struct iwl_rx_packet *pkt; 13028c2ecf20Sopenharmony_ci u16 sequence; 13038c2ecf20Sopenharmony_ci bool reclaim; 13048c2ecf20Sopenharmony_ci int index, cmd_index, len; 13058c2ecf20Sopenharmony_ci struct iwl_rx_cmd_buffer rxcb = { 13068c2ecf20Sopenharmony_ci ._offset = rxb->offset + offset, 13078c2ecf20Sopenharmony_ci ._rx_page_order = trans_pcie->rx_page_order, 13088c2ecf20Sopenharmony_ci ._page = rxb->page, 13098c2ecf20Sopenharmony_ci ._page_stolen = false, 13108c2ecf20Sopenharmony_ci .truesize = max_len, 13118c2ecf20Sopenharmony_ci }; 13128c2ecf20Sopenharmony_ci 13138c2ecf20Sopenharmony_ci pkt = rxb_addr(&rxcb); 13148c2ecf20Sopenharmony_ci 13158c2ecf20Sopenharmony_ci if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { 13168c2ecf20Sopenharmony_ci IWL_DEBUG_RX(trans, 13178c2ecf20Sopenharmony_ci "Q %d: RB end marker at offset %d\n", 13188c2ecf20Sopenharmony_ci rxq->id, offset); 13198c2ecf20Sopenharmony_ci break; 13208c2ecf20Sopenharmony_ci } 13218c2ecf20Sopenharmony_ci 13228c2ecf20Sopenharmony_ci WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 13238c2ecf20Sopenharmony_ci FH_RSCSR_RXQ_POS != rxq->id, 13248c2ecf20Sopenharmony_ci "frame on invalid queue - is on %d and indicates %d\n", 13258c2ecf20Sopenharmony_ci rxq->id, 13268c2ecf20Sopenharmony_ci (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> 13278c2ecf20Sopenharmony_ci FH_RSCSR_RXQ_POS); 13288c2ecf20Sopenharmony_ci 13298c2ecf20Sopenharmony_ci IWL_DEBUG_RX(trans, 13308c2ecf20Sopenharmony_ci "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", 13318c2ecf20Sopenharmony_ci rxq->id, offset, 13328c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, 13338c2ecf20Sopenharmony_ci iwl_cmd_id(pkt->hdr.cmd, 13348c2ecf20Sopenharmony_ci pkt->hdr.group_id, 13358c2ecf20Sopenharmony_ci 0)), 13368c2ecf20Sopenharmony_ci pkt->hdr.group_id, pkt->hdr.cmd, 13378c2ecf20Sopenharmony_ci le16_to_cpu(pkt->hdr.sequence)); 13388c2ecf20Sopenharmony_ci 13398c2ecf20Sopenharmony_ci len = iwl_rx_packet_len(pkt); 13408c2ecf20Sopenharmony_ci len += sizeof(u32); /* account for status word */ 13418c2ecf20Sopenharmony_ci trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 13428c2ecf20Sopenharmony_ci trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 13438c2ecf20Sopenharmony_ci 13448c2ecf20Sopenharmony_ci /* Reclaim a command buffer only if this packet is a response 13458c2ecf20Sopenharmony_ci * to a (driver-originated) command. 13468c2ecf20Sopenharmony_ci * If the packet (e.g. Rx frame) originated from uCode, 13478c2ecf20Sopenharmony_ci * there is no command buffer to reclaim. 13488c2ecf20Sopenharmony_ci * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 13498c2ecf20Sopenharmony_ci * but apparently a few don't get set; catch them here. */ 13508c2ecf20Sopenharmony_ci reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 13518c2ecf20Sopenharmony_ci if (reclaim && !pkt->hdr.group_id) { 13528c2ecf20Sopenharmony_ci int i; 13538c2ecf20Sopenharmony_ci 13548c2ecf20Sopenharmony_ci for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 13558c2ecf20Sopenharmony_ci if (trans_pcie->no_reclaim_cmds[i] == 13568c2ecf20Sopenharmony_ci pkt->hdr.cmd) { 13578c2ecf20Sopenharmony_ci reclaim = false; 13588c2ecf20Sopenharmony_ci break; 13598c2ecf20Sopenharmony_ci } 13608c2ecf20Sopenharmony_ci } 13618c2ecf20Sopenharmony_ci } 13628c2ecf20Sopenharmony_ci 13638c2ecf20Sopenharmony_ci sequence = le16_to_cpu(pkt->hdr.sequence); 13648c2ecf20Sopenharmony_ci index = SEQ_TO_INDEX(sequence); 13658c2ecf20Sopenharmony_ci cmd_index = iwl_txq_get_cmd_index(txq, index); 13668c2ecf20Sopenharmony_ci 13678c2ecf20Sopenharmony_ci if (rxq->id == trans_pcie->def_rx_queue) 13688c2ecf20Sopenharmony_ci iwl_op_mode_rx(trans->op_mode, &rxq->napi, 13698c2ecf20Sopenharmony_ci &rxcb); 13708c2ecf20Sopenharmony_ci else 13718c2ecf20Sopenharmony_ci iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 13728c2ecf20Sopenharmony_ci &rxcb, rxq->id); 13738c2ecf20Sopenharmony_ci 13748c2ecf20Sopenharmony_ci if (reclaim) { 13758c2ecf20Sopenharmony_ci kfree_sensitive(txq->entries[cmd_index].free_buf); 13768c2ecf20Sopenharmony_ci txq->entries[cmd_index].free_buf = NULL; 13778c2ecf20Sopenharmony_ci } 13788c2ecf20Sopenharmony_ci 13798c2ecf20Sopenharmony_ci /* 13808c2ecf20Sopenharmony_ci * After here, we should always check rxcb._page_stolen, 13818c2ecf20Sopenharmony_ci * if it is true then one of the handlers took the page. 13828c2ecf20Sopenharmony_ci */ 13838c2ecf20Sopenharmony_ci 13848c2ecf20Sopenharmony_ci if (reclaim) { 13858c2ecf20Sopenharmony_ci /* Invoke any callbacks, transfer the buffer to caller, 13868c2ecf20Sopenharmony_ci * and fire off the (possibly) blocking 13878c2ecf20Sopenharmony_ci * iwl_trans_send_cmd() 13888c2ecf20Sopenharmony_ci * as we reclaim the driver command queue */ 13898c2ecf20Sopenharmony_ci if (!rxcb._page_stolen) 13908c2ecf20Sopenharmony_ci iwl_pcie_hcmd_complete(trans, &rxcb); 13918c2ecf20Sopenharmony_ci else 13928c2ecf20Sopenharmony_ci IWL_WARN(trans, "Claim null rxb?\n"); 13938c2ecf20Sopenharmony_ci } 13948c2ecf20Sopenharmony_ci 13958c2ecf20Sopenharmony_ci page_stolen |= rxcb._page_stolen; 13968c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 13978c2ecf20Sopenharmony_ci break; 13988c2ecf20Sopenharmony_ci offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 13998c2ecf20Sopenharmony_ci } 14008c2ecf20Sopenharmony_ci 14018c2ecf20Sopenharmony_ci /* page was stolen from us -- free our reference */ 14028c2ecf20Sopenharmony_ci if (page_stolen) { 14038c2ecf20Sopenharmony_ci __free_pages(rxb->page, trans_pcie->rx_page_order); 14048c2ecf20Sopenharmony_ci rxb->page = NULL; 14058c2ecf20Sopenharmony_ci } 14068c2ecf20Sopenharmony_ci 14078c2ecf20Sopenharmony_ci /* Reuse the page if possible. For notification packets and 14088c2ecf20Sopenharmony_ci * SKBs that fail to Rx correctly, add them back into the 14098c2ecf20Sopenharmony_ci * rx_free list for reuse later. */ 14108c2ecf20Sopenharmony_ci if (rxb->page != NULL) { 14118c2ecf20Sopenharmony_ci rxb->page_dma = 14128c2ecf20Sopenharmony_ci dma_map_page(trans->dev, rxb->page, rxb->offset, 14138c2ecf20Sopenharmony_ci trans_pcie->rx_buf_bytes, 14148c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 14158c2ecf20Sopenharmony_ci if (dma_mapping_error(trans->dev, rxb->page_dma)) { 14168c2ecf20Sopenharmony_ci /* 14178c2ecf20Sopenharmony_ci * free the page(s) as well to not break 14188c2ecf20Sopenharmony_ci * the invariant that the items on the used 14198c2ecf20Sopenharmony_ci * list have no page(s) 14208c2ecf20Sopenharmony_ci */ 14218c2ecf20Sopenharmony_ci __free_pages(rxb->page, trans_pcie->rx_page_order); 14228c2ecf20Sopenharmony_ci rxb->page = NULL; 14238c2ecf20Sopenharmony_ci iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 14248c2ecf20Sopenharmony_ci } else { 14258c2ecf20Sopenharmony_ci list_add_tail(&rxb->list, &rxq->rx_free); 14268c2ecf20Sopenharmony_ci rxq->free_count++; 14278c2ecf20Sopenharmony_ci } 14288c2ecf20Sopenharmony_ci } else 14298c2ecf20Sopenharmony_ci iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 14308c2ecf20Sopenharmony_ci} 14318c2ecf20Sopenharmony_ci 14328c2ecf20Sopenharmony_cistatic struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, 14338c2ecf20Sopenharmony_ci struct iwl_rxq *rxq, int i, 14348c2ecf20Sopenharmony_ci bool *join) 14358c2ecf20Sopenharmony_ci{ 14368c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 14378c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb; 14388c2ecf20Sopenharmony_ci u16 vid; 14398c2ecf20Sopenharmony_ci 14408c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); 14418c2ecf20Sopenharmony_ci 14428c2ecf20Sopenharmony_ci if (!trans->trans_cfg->mq_rx_supported) { 14438c2ecf20Sopenharmony_ci rxb = rxq->queue[i]; 14448c2ecf20Sopenharmony_ci rxq->queue[i] = NULL; 14458c2ecf20Sopenharmony_ci return rxb; 14468c2ecf20Sopenharmony_ci } 14478c2ecf20Sopenharmony_ci 14488c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 14498c2ecf20Sopenharmony_ci vid = le16_to_cpu(rxq->cd[i].rbid); 14508c2ecf20Sopenharmony_ci *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; 14518c2ecf20Sopenharmony_ci } else { 14528c2ecf20Sopenharmony_ci vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */ 14538c2ecf20Sopenharmony_ci } 14548c2ecf20Sopenharmony_ci 14558c2ecf20Sopenharmony_ci if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) 14568c2ecf20Sopenharmony_ci goto out_err; 14578c2ecf20Sopenharmony_ci 14588c2ecf20Sopenharmony_ci rxb = trans_pcie->global_table[vid - 1]; 14598c2ecf20Sopenharmony_ci if (rxb->invalid) 14608c2ecf20Sopenharmony_ci goto out_err; 14618c2ecf20Sopenharmony_ci 14628c2ecf20Sopenharmony_ci IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); 14638c2ecf20Sopenharmony_ci 14648c2ecf20Sopenharmony_ci rxb->invalid = true; 14658c2ecf20Sopenharmony_ci 14668c2ecf20Sopenharmony_ci return rxb; 14678c2ecf20Sopenharmony_ci 14688c2ecf20Sopenharmony_ciout_err: 14698c2ecf20Sopenharmony_ci WARN(1, "Invalid rxb from HW %u\n", (u32)vid); 14708c2ecf20Sopenharmony_ci iwl_force_nmi(trans); 14718c2ecf20Sopenharmony_ci return NULL; 14728c2ecf20Sopenharmony_ci} 14738c2ecf20Sopenharmony_ci 14748c2ecf20Sopenharmony_ci/* 14758c2ecf20Sopenharmony_ci * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 14768c2ecf20Sopenharmony_ci */ 14778c2ecf20Sopenharmony_cistatic void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) 14788c2ecf20Sopenharmony_ci{ 14798c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 14808c2ecf20Sopenharmony_ci struct napi_struct *napi; 14818c2ecf20Sopenharmony_ci struct iwl_rxq *rxq; 14828c2ecf20Sopenharmony_ci u32 r, i, count = 0; 14838c2ecf20Sopenharmony_ci bool emergency = false; 14848c2ecf20Sopenharmony_ci 14858c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) 14868c2ecf20Sopenharmony_ci return; 14878c2ecf20Sopenharmony_ci 14888c2ecf20Sopenharmony_ci rxq = &trans_pcie->rxq[queue]; 14898c2ecf20Sopenharmony_ci 14908c2ecf20Sopenharmony_cirestart: 14918c2ecf20Sopenharmony_ci spin_lock(&rxq->lock); 14928c2ecf20Sopenharmony_ci /* uCode's read index (stored in shared DRAM) indicates the last Rx 14938c2ecf20Sopenharmony_ci * buffer that the driver may process (last buffer filled by ucode). */ 14948c2ecf20Sopenharmony_ci r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 14958c2ecf20Sopenharmony_ci i = rxq->read; 14968c2ecf20Sopenharmony_ci 14978c2ecf20Sopenharmony_ci /* W/A 9000 device step A0 wrap-around bug */ 14988c2ecf20Sopenharmony_ci r &= (rxq->queue_size - 1); 14998c2ecf20Sopenharmony_ci 15008c2ecf20Sopenharmony_ci /* Rx interrupt, but nothing sent from uCode */ 15018c2ecf20Sopenharmony_ci if (i == r) 15028c2ecf20Sopenharmony_ci IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 15038c2ecf20Sopenharmony_ci 15048c2ecf20Sopenharmony_ci while (i != r) { 15058c2ecf20Sopenharmony_ci struct iwl_rb_allocator *rba = &trans_pcie->rba; 15068c2ecf20Sopenharmony_ci struct iwl_rx_mem_buffer *rxb; 15078c2ecf20Sopenharmony_ci /* number of RBDs still waiting for page allocation */ 15088c2ecf20Sopenharmony_ci u32 rb_pending_alloc = 15098c2ecf20Sopenharmony_ci atomic_read(&trans_pcie->rba.req_pending) * 15108c2ecf20Sopenharmony_ci RX_CLAIM_REQ_ALLOC; 15118c2ecf20Sopenharmony_ci bool join = false; 15128c2ecf20Sopenharmony_ci 15138c2ecf20Sopenharmony_ci if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && 15148c2ecf20Sopenharmony_ci !emergency)) { 15158c2ecf20Sopenharmony_ci iwl_pcie_rx_move_to_allocator(rxq, rba); 15168c2ecf20Sopenharmony_ci emergency = true; 15178c2ecf20Sopenharmony_ci IWL_DEBUG_TPT(trans, 15188c2ecf20Sopenharmony_ci "RX path is in emergency. Pending allocations %d\n", 15198c2ecf20Sopenharmony_ci rb_pending_alloc); 15208c2ecf20Sopenharmony_ci } 15218c2ecf20Sopenharmony_ci 15228c2ecf20Sopenharmony_ci IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 15238c2ecf20Sopenharmony_ci 15248c2ecf20Sopenharmony_ci rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); 15258c2ecf20Sopenharmony_ci if (!rxb) 15268c2ecf20Sopenharmony_ci goto out; 15278c2ecf20Sopenharmony_ci 15288c2ecf20Sopenharmony_ci if (unlikely(join || rxq->next_rb_is_fragment)) { 15298c2ecf20Sopenharmony_ci rxq->next_rb_is_fragment = join; 15308c2ecf20Sopenharmony_ci /* 15318c2ecf20Sopenharmony_ci * We can only get a multi-RB in the following cases: 15328c2ecf20Sopenharmony_ci * - firmware issue, sending a too big notification 15338c2ecf20Sopenharmony_ci * - sniffer mode with a large A-MSDU 15348c2ecf20Sopenharmony_ci * - large MTU frames (>2k) 15358c2ecf20Sopenharmony_ci * since the multi-RB functionality is limited to newer 15368c2ecf20Sopenharmony_ci * hardware that cannot put multiple entries into a 15378c2ecf20Sopenharmony_ci * single RB. 15388c2ecf20Sopenharmony_ci * 15398c2ecf20Sopenharmony_ci * Right now, the higher layers aren't set up to deal 15408c2ecf20Sopenharmony_ci * with that, so discard all of these. 15418c2ecf20Sopenharmony_ci */ 15428c2ecf20Sopenharmony_ci list_add_tail(&rxb->list, &rxq->rx_free); 15438c2ecf20Sopenharmony_ci rxq->free_count++; 15448c2ecf20Sopenharmony_ci } else { 15458c2ecf20Sopenharmony_ci iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); 15468c2ecf20Sopenharmony_ci } 15478c2ecf20Sopenharmony_ci 15488c2ecf20Sopenharmony_ci i = (i + 1) & (rxq->queue_size - 1); 15498c2ecf20Sopenharmony_ci 15508c2ecf20Sopenharmony_ci /* 15518c2ecf20Sopenharmony_ci * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 15528c2ecf20Sopenharmony_ci * try to claim the pre-allocated buffers from the allocator. 15538c2ecf20Sopenharmony_ci * If not ready - will try to reclaim next time. 15548c2ecf20Sopenharmony_ci * There is no need to reschedule work - allocator exits only 15558c2ecf20Sopenharmony_ci * on success 15568c2ecf20Sopenharmony_ci */ 15578c2ecf20Sopenharmony_ci if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 15588c2ecf20Sopenharmony_ci iwl_pcie_rx_allocator_get(trans, rxq); 15598c2ecf20Sopenharmony_ci 15608c2ecf20Sopenharmony_ci if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 15618c2ecf20Sopenharmony_ci /* Add the remaining empty RBDs for allocator use */ 15628c2ecf20Sopenharmony_ci iwl_pcie_rx_move_to_allocator(rxq, rba); 15638c2ecf20Sopenharmony_ci } else if (emergency) { 15648c2ecf20Sopenharmony_ci count++; 15658c2ecf20Sopenharmony_ci if (count == 8) { 15668c2ecf20Sopenharmony_ci count = 0; 15678c2ecf20Sopenharmony_ci if (rb_pending_alloc < rxq->queue_size / 3) { 15688c2ecf20Sopenharmony_ci IWL_DEBUG_TPT(trans, 15698c2ecf20Sopenharmony_ci "RX path exited emergency. Pending allocations %d\n", 15708c2ecf20Sopenharmony_ci rb_pending_alloc); 15718c2ecf20Sopenharmony_ci emergency = false; 15728c2ecf20Sopenharmony_ci } 15738c2ecf20Sopenharmony_ci 15748c2ecf20Sopenharmony_ci rxq->read = i; 15758c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 15768c2ecf20Sopenharmony_ci iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 15778c2ecf20Sopenharmony_ci iwl_pcie_rxq_restock(trans, rxq); 15788c2ecf20Sopenharmony_ci goto restart; 15798c2ecf20Sopenharmony_ci } 15808c2ecf20Sopenharmony_ci } 15818c2ecf20Sopenharmony_ci } 15828c2ecf20Sopenharmony_ciout: 15838c2ecf20Sopenharmony_ci /* Backtrack one entry */ 15848c2ecf20Sopenharmony_ci rxq->read = i; 15858c2ecf20Sopenharmony_ci /* update cr tail with the rxq read pointer */ 15868c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 15878c2ecf20Sopenharmony_ci *rxq->cr_tail = cpu_to_le16(r); 15888c2ecf20Sopenharmony_ci spin_unlock(&rxq->lock); 15898c2ecf20Sopenharmony_ci 15908c2ecf20Sopenharmony_ci /* 15918c2ecf20Sopenharmony_ci * handle a case where in emergency there are some unallocated RBDs. 15928c2ecf20Sopenharmony_ci * those RBDs are in the used list, but are not tracked by the queue's 15938c2ecf20Sopenharmony_ci * used_count which counts allocator owned RBDs. 15948c2ecf20Sopenharmony_ci * unallocated emergency RBDs must be allocated on exit, otherwise 15958c2ecf20Sopenharmony_ci * when called again the function may not be in emergency mode and 15968c2ecf20Sopenharmony_ci * they will be handed to the allocator with no tracking in the RBD 15978c2ecf20Sopenharmony_ci * allocator counters, which will lead to them never being claimed back 15988c2ecf20Sopenharmony_ci * by the queue. 15998c2ecf20Sopenharmony_ci * by allocating them here, they are now in the queue free list, and 16008c2ecf20Sopenharmony_ci * will be restocked by the next call of iwl_pcie_rxq_restock. 16018c2ecf20Sopenharmony_ci */ 16028c2ecf20Sopenharmony_ci if (unlikely(emergency && count)) 16038c2ecf20Sopenharmony_ci iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 16048c2ecf20Sopenharmony_ci 16058c2ecf20Sopenharmony_ci napi = &rxq->napi; 16068c2ecf20Sopenharmony_ci if (napi->poll) { 16078c2ecf20Sopenharmony_ci napi_gro_flush(napi, false); 16088c2ecf20Sopenharmony_ci 16098c2ecf20Sopenharmony_ci if (napi->rx_count) { 16108c2ecf20Sopenharmony_ci netif_receive_skb_list(&napi->rx_list); 16118c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&napi->rx_list); 16128c2ecf20Sopenharmony_ci napi->rx_count = 0; 16138c2ecf20Sopenharmony_ci } 16148c2ecf20Sopenharmony_ci } 16158c2ecf20Sopenharmony_ci 16168c2ecf20Sopenharmony_ci iwl_pcie_rxq_restock(trans, rxq); 16178c2ecf20Sopenharmony_ci} 16188c2ecf20Sopenharmony_ci 16198c2ecf20Sopenharmony_cistatic struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 16208c2ecf20Sopenharmony_ci{ 16218c2ecf20Sopenharmony_ci u8 queue = entry->entry; 16228c2ecf20Sopenharmony_ci struct msix_entry *entries = entry - queue; 16238c2ecf20Sopenharmony_ci 16248c2ecf20Sopenharmony_ci return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 16258c2ecf20Sopenharmony_ci} 16268c2ecf20Sopenharmony_ci 16278c2ecf20Sopenharmony_ci/* 16288c2ecf20Sopenharmony_ci * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 16298c2ecf20Sopenharmony_ci * This interrupt handler should be used with RSS queue only. 16308c2ecf20Sopenharmony_ci */ 16318c2ecf20Sopenharmony_ciirqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 16328c2ecf20Sopenharmony_ci{ 16338c2ecf20Sopenharmony_ci struct msix_entry *entry = dev_id; 16348c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 16358c2ecf20Sopenharmony_ci struct iwl_trans *trans = trans_pcie->trans; 16368c2ecf20Sopenharmony_ci 16378c2ecf20Sopenharmony_ci trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); 16388c2ecf20Sopenharmony_ci 16398c2ecf20Sopenharmony_ci if (WARN_ON(entry->entry >= trans->num_rx_queues)) 16408c2ecf20Sopenharmony_ci return IRQ_NONE; 16418c2ecf20Sopenharmony_ci 16428c2ecf20Sopenharmony_ci lock_map_acquire(&trans->sync_cmd_lockdep_map); 16438c2ecf20Sopenharmony_ci 16448c2ecf20Sopenharmony_ci local_bh_disable(); 16458c2ecf20Sopenharmony_ci iwl_pcie_rx_handle(trans, entry->entry); 16468c2ecf20Sopenharmony_ci local_bh_enable(); 16478c2ecf20Sopenharmony_ci 16488c2ecf20Sopenharmony_ci iwl_pcie_clear_irq(trans, entry); 16498c2ecf20Sopenharmony_ci 16508c2ecf20Sopenharmony_ci lock_map_release(&trans->sync_cmd_lockdep_map); 16518c2ecf20Sopenharmony_ci 16528c2ecf20Sopenharmony_ci return IRQ_HANDLED; 16538c2ecf20Sopenharmony_ci} 16548c2ecf20Sopenharmony_ci 16558c2ecf20Sopenharmony_ci/* 16568c2ecf20Sopenharmony_ci * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 16578c2ecf20Sopenharmony_ci */ 16588c2ecf20Sopenharmony_cistatic void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 16598c2ecf20Sopenharmony_ci{ 16608c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 16618c2ecf20Sopenharmony_ci int i; 16628c2ecf20Sopenharmony_ci 16638c2ecf20Sopenharmony_ci /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 16648c2ecf20Sopenharmony_ci if (trans->cfg->internal_wimax_coex && 16658c2ecf20Sopenharmony_ci !trans->cfg->apmg_not_supported && 16668c2ecf20Sopenharmony_ci (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 16678c2ecf20Sopenharmony_ci APMS_CLK_VAL_MRB_FUNC_MODE) || 16688c2ecf20Sopenharmony_ci (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 16698c2ecf20Sopenharmony_ci APMG_PS_CTRL_VAL_RESET_REQ))) { 16708c2ecf20Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 16718c2ecf20Sopenharmony_ci iwl_op_mode_wimax_active(trans->op_mode); 16728c2ecf20Sopenharmony_ci wake_up(&trans_pcie->wait_command_queue); 16738c2ecf20Sopenharmony_ci return; 16748c2ecf20Sopenharmony_ci } 16758c2ecf20Sopenharmony_ci 16768c2ecf20Sopenharmony_ci for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 16778c2ecf20Sopenharmony_ci if (!trans->txqs.txq[i]) 16788c2ecf20Sopenharmony_ci continue; 16798c2ecf20Sopenharmony_ci del_timer(&trans->txqs.txq[i]->stuck_timer); 16808c2ecf20Sopenharmony_ci } 16818c2ecf20Sopenharmony_ci 16828c2ecf20Sopenharmony_ci /* The STATUS_FW_ERROR bit is set in this function. This must happen 16838c2ecf20Sopenharmony_ci * before we wake up the command caller, to ensure a proper cleanup. */ 16848c2ecf20Sopenharmony_ci iwl_trans_fw_error(trans); 16858c2ecf20Sopenharmony_ci 16868c2ecf20Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 16878c2ecf20Sopenharmony_ci wake_up(&trans_pcie->wait_command_queue); 16888c2ecf20Sopenharmony_ci} 16898c2ecf20Sopenharmony_ci 16908c2ecf20Sopenharmony_cistatic u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 16918c2ecf20Sopenharmony_ci{ 16928c2ecf20Sopenharmony_ci u32 inta; 16938c2ecf20Sopenharmony_ci 16948c2ecf20Sopenharmony_ci lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 16958c2ecf20Sopenharmony_ci 16968c2ecf20Sopenharmony_ci trace_iwlwifi_dev_irq(trans->dev); 16978c2ecf20Sopenharmony_ci 16988c2ecf20Sopenharmony_ci /* Discover which interrupts are active/pending */ 16998c2ecf20Sopenharmony_ci inta = iwl_read32(trans, CSR_INT); 17008c2ecf20Sopenharmony_ci 17018c2ecf20Sopenharmony_ci /* the thread will service interrupts and re-enable them */ 17028c2ecf20Sopenharmony_ci return inta; 17038c2ecf20Sopenharmony_ci} 17048c2ecf20Sopenharmony_ci 17058c2ecf20Sopenharmony_ci/* a device (PCI-E) page is 4096 bytes long */ 17068c2ecf20Sopenharmony_ci#define ICT_SHIFT 12 17078c2ecf20Sopenharmony_ci#define ICT_SIZE (1 << ICT_SHIFT) 17088c2ecf20Sopenharmony_ci#define ICT_COUNT (ICT_SIZE / sizeof(u32)) 17098c2ecf20Sopenharmony_ci 17108c2ecf20Sopenharmony_ci/* interrupt handler using ict table, with this interrupt driver will 17118c2ecf20Sopenharmony_ci * stop using INTA register to get device's interrupt, reading this register 17128c2ecf20Sopenharmony_ci * is expensive, device will write interrupts in ICT dram table, increment 17138c2ecf20Sopenharmony_ci * index then will fire interrupt to driver, driver will OR all ICT table 17148c2ecf20Sopenharmony_ci * entries from current index up to table entry with 0 value. the result is 17158c2ecf20Sopenharmony_ci * the interrupt we need to service, driver will set the entries back to 0 and 17168c2ecf20Sopenharmony_ci * set index. 17178c2ecf20Sopenharmony_ci */ 17188c2ecf20Sopenharmony_cistatic u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 17198c2ecf20Sopenharmony_ci{ 17208c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 17218c2ecf20Sopenharmony_ci u32 inta; 17228c2ecf20Sopenharmony_ci u32 val = 0; 17238c2ecf20Sopenharmony_ci u32 read; 17248c2ecf20Sopenharmony_ci 17258c2ecf20Sopenharmony_ci trace_iwlwifi_dev_irq(trans->dev); 17268c2ecf20Sopenharmony_ci 17278c2ecf20Sopenharmony_ci /* Ignore interrupt if there's nothing in NIC to service. 17288c2ecf20Sopenharmony_ci * This may be due to IRQ shared with another device, 17298c2ecf20Sopenharmony_ci * or due to sporadic interrupts thrown from our NIC. */ 17308c2ecf20Sopenharmony_ci read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 17318c2ecf20Sopenharmony_ci trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 17328c2ecf20Sopenharmony_ci if (!read) 17338c2ecf20Sopenharmony_ci return 0; 17348c2ecf20Sopenharmony_ci 17358c2ecf20Sopenharmony_ci /* 17368c2ecf20Sopenharmony_ci * Collect all entries up to the first 0, starting from ict_index; 17378c2ecf20Sopenharmony_ci * note we already read at ict_index. 17388c2ecf20Sopenharmony_ci */ 17398c2ecf20Sopenharmony_ci do { 17408c2ecf20Sopenharmony_ci val |= read; 17418c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 17428c2ecf20Sopenharmony_ci trans_pcie->ict_index, read); 17438c2ecf20Sopenharmony_ci trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 17448c2ecf20Sopenharmony_ci trans_pcie->ict_index = 17458c2ecf20Sopenharmony_ci ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 17468c2ecf20Sopenharmony_ci 17478c2ecf20Sopenharmony_ci read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 17488c2ecf20Sopenharmony_ci trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 17498c2ecf20Sopenharmony_ci read); 17508c2ecf20Sopenharmony_ci } while (read); 17518c2ecf20Sopenharmony_ci 17528c2ecf20Sopenharmony_ci /* We should not get this value, just ignore it. */ 17538c2ecf20Sopenharmony_ci if (val == 0xffffffff) 17548c2ecf20Sopenharmony_ci val = 0; 17558c2ecf20Sopenharmony_ci 17568c2ecf20Sopenharmony_ci /* 17578c2ecf20Sopenharmony_ci * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 17588c2ecf20Sopenharmony_ci * (bit 15 before shifting it to 31) to clear when using interrupt 17598c2ecf20Sopenharmony_ci * coalescing. fortunately, bits 18 and 19 stay set when this happens 17608c2ecf20Sopenharmony_ci * so we use them to decide on the real state of the Rx bit. 17618c2ecf20Sopenharmony_ci * In order words, bit 15 is set if bit 18 or bit 19 are set. 17628c2ecf20Sopenharmony_ci */ 17638c2ecf20Sopenharmony_ci if (val & 0xC0000) 17648c2ecf20Sopenharmony_ci val |= 0x8000; 17658c2ecf20Sopenharmony_ci 17668c2ecf20Sopenharmony_ci inta = (0xff & val) | ((0xff00 & val) << 16); 17678c2ecf20Sopenharmony_ci return inta; 17688c2ecf20Sopenharmony_ci} 17698c2ecf20Sopenharmony_ci 17708c2ecf20Sopenharmony_civoid iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) 17718c2ecf20Sopenharmony_ci{ 17728c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 17738c2ecf20Sopenharmony_ci struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 17748c2ecf20Sopenharmony_ci bool hw_rfkill, prev, report; 17758c2ecf20Sopenharmony_ci 17768c2ecf20Sopenharmony_ci mutex_lock(&trans_pcie->mutex); 17778c2ecf20Sopenharmony_ci prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 17788c2ecf20Sopenharmony_ci hw_rfkill = iwl_is_rfkill_set(trans); 17798c2ecf20Sopenharmony_ci if (hw_rfkill) { 17808c2ecf20Sopenharmony_ci set_bit(STATUS_RFKILL_OPMODE, &trans->status); 17818c2ecf20Sopenharmony_ci set_bit(STATUS_RFKILL_HW, &trans->status); 17828c2ecf20Sopenharmony_ci } 17838c2ecf20Sopenharmony_ci if (trans_pcie->opmode_down) 17848c2ecf20Sopenharmony_ci report = hw_rfkill; 17858c2ecf20Sopenharmony_ci else 17868c2ecf20Sopenharmony_ci report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 17878c2ecf20Sopenharmony_ci 17888c2ecf20Sopenharmony_ci IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 17898c2ecf20Sopenharmony_ci hw_rfkill ? "disable radio" : "enable radio"); 17908c2ecf20Sopenharmony_ci 17918c2ecf20Sopenharmony_ci isr_stats->rfkill++; 17928c2ecf20Sopenharmony_ci 17938c2ecf20Sopenharmony_ci if (prev != report) 17948c2ecf20Sopenharmony_ci iwl_trans_pcie_rf_kill(trans, report); 17958c2ecf20Sopenharmony_ci mutex_unlock(&trans_pcie->mutex); 17968c2ecf20Sopenharmony_ci 17978c2ecf20Sopenharmony_ci if (hw_rfkill) { 17988c2ecf20Sopenharmony_ci if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 17998c2ecf20Sopenharmony_ci &trans->status)) 18008c2ecf20Sopenharmony_ci IWL_DEBUG_RF_KILL(trans, 18018c2ecf20Sopenharmony_ci "Rfkill while SYNC HCMD in flight\n"); 18028c2ecf20Sopenharmony_ci wake_up(&trans_pcie->wait_command_queue); 18038c2ecf20Sopenharmony_ci } else { 18048c2ecf20Sopenharmony_ci clear_bit(STATUS_RFKILL_HW, &trans->status); 18058c2ecf20Sopenharmony_ci if (trans_pcie->opmode_down) 18068c2ecf20Sopenharmony_ci clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 18078c2ecf20Sopenharmony_ci } 18088c2ecf20Sopenharmony_ci} 18098c2ecf20Sopenharmony_ci 18108c2ecf20Sopenharmony_ciirqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 18118c2ecf20Sopenharmony_ci{ 18128c2ecf20Sopenharmony_ci struct iwl_trans *trans = dev_id; 18138c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 18148c2ecf20Sopenharmony_ci struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 18158c2ecf20Sopenharmony_ci u32 inta = 0; 18168c2ecf20Sopenharmony_ci u32 handled = 0; 18178c2ecf20Sopenharmony_ci 18188c2ecf20Sopenharmony_ci lock_map_acquire(&trans->sync_cmd_lockdep_map); 18198c2ecf20Sopenharmony_ci 18208c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->irq_lock); 18218c2ecf20Sopenharmony_ci 18228c2ecf20Sopenharmony_ci /* dram interrupt table not set yet, 18238c2ecf20Sopenharmony_ci * use legacy interrupt. 18248c2ecf20Sopenharmony_ci */ 18258c2ecf20Sopenharmony_ci if (likely(trans_pcie->use_ict)) 18268c2ecf20Sopenharmony_ci inta = iwl_pcie_int_cause_ict(trans); 18278c2ecf20Sopenharmony_ci else 18288c2ecf20Sopenharmony_ci inta = iwl_pcie_int_cause_non_ict(trans); 18298c2ecf20Sopenharmony_ci 18308c2ecf20Sopenharmony_ci if (iwl_have_debug_level(IWL_DL_ISR)) { 18318c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 18328c2ecf20Sopenharmony_ci "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 18338c2ecf20Sopenharmony_ci inta, trans_pcie->inta_mask, 18348c2ecf20Sopenharmony_ci iwl_read32(trans, CSR_INT_MASK), 18358c2ecf20Sopenharmony_ci iwl_read32(trans, CSR_FH_INT_STATUS)); 18368c2ecf20Sopenharmony_ci if (inta & (~trans_pcie->inta_mask)) 18378c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 18388c2ecf20Sopenharmony_ci "We got a masked interrupt (0x%08x)\n", 18398c2ecf20Sopenharmony_ci inta & (~trans_pcie->inta_mask)); 18408c2ecf20Sopenharmony_ci } 18418c2ecf20Sopenharmony_ci 18428c2ecf20Sopenharmony_ci inta &= trans_pcie->inta_mask; 18438c2ecf20Sopenharmony_ci 18448c2ecf20Sopenharmony_ci /* 18458c2ecf20Sopenharmony_ci * Ignore interrupt if there's nothing in NIC to service. 18468c2ecf20Sopenharmony_ci * This may be due to IRQ shared with another device, 18478c2ecf20Sopenharmony_ci * or due to sporadic interrupts thrown from our NIC. 18488c2ecf20Sopenharmony_ci */ 18498c2ecf20Sopenharmony_ci if (unlikely(!inta)) { 18508c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 18518c2ecf20Sopenharmony_ci /* 18528c2ecf20Sopenharmony_ci * Re-enable interrupts here since we don't 18538c2ecf20Sopenharmony_ci * have anything to service 18548c2ecf20Sopenharmony_ci */ 18558c2ecf20Sopenharmony_ci if (test_bit(STATUS_INT_ENABLED, &trans->status)) 18568c2ecf20Sopenharmony_ci _iwl_enable_interrupts(trans); 18578c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 18588c2ecf20Sopenharmony_ci lock_map_release(&trans->sync_cmd_lockdep_map); 18598c2ecf20Sopenharmony_ci return IRQ_NONE; 18608c2ecf20Sopenharmony_ci } 18618c2ecf20Sopenharmony_ci 18628c2ecf20Sopenharmony_ci if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 18638c2ecf20Sopenharmony_ci /* 18648c2ecf20Sopenharmony_ci * Hardware disappeared. It might have 18658c2ecf20Sopenharmony_ci * already raised an interrupt. 18668c2ecf20Sopenharmony_ci */ 18678c2ecf20Sopenharmony_ci IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 18688c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 18698c2ecf20Sopenharmony_ci goto out; 18708c2ecf20Sopenharmony_ci } 18718c2ecf20Sopenharmony_ci 18728c2ecf20Sopenharmony_ci /* Ack/clear/reset pending uCode interrupts. 18738c2ecf20Sopenharmony_ci * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 18748c2ecf20Sopenharmony_ci */ 18758c2ecf20Sopenharmony_ci /* There is a hardware bug in the interrupt mask function that some 18768c2ecf20Sopenharmony_ci * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 18778c2ecf20Sopenharmony_ci * they are disabled in the CSR_INT_MASK register. Furthermore the 18788c2ecf20Sopenharmony_ci * ICT interrupt handling mechanism has another bug that might cause 18798c2ecf20Sopenharmony_ci * these unmasked interrupts fail to be detected. We workaround the 18808c2ecf20Sopenharmony_ci * hardware bugs here by ACKing all the possible interrupts so that 18818c2ecf20Sopenharmony_ci * interrupt coalescing can still be achieved. 18828c2ecf20Sopenharmony_ci */ 18838c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 18848c2ecf20Sopenharmony_ci 18858c2ecf20Sopenharmony_ci if (iwl_have_debug_level(IWL_DL_ISR)) 18868c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 18878c2ecf20Sopenharmony_ci inta, iwl_read32(trans, CSR_INT_MASK)); 18888c2ecf20Sopenharmony_ci 18898c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 18908c2ecf20Sopenharmony_ci 18918c2ecf20Sopenharmony_ci /* Now service all interrupt bits discovered above. */ 18928c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_HW_ERR) { 18938c2ecf20Sopenharmony_ci IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 18948c2ecf20Sopenharmony_ci 18958c2ecf20Sopenharmony_ci /* Tell the device to stop sending interrupts */ 18968c2ecf20Sopenharmony_ci iwl_disable_interrupts(trans); 18978c2ecf20Sopenharmony_ci 18988c2ecf20Sopenharmony_ci isr_stats->hw++; 18998c2ecf20Sopenharmony_ci iwl_pcie_irq_handle_error(trans); 19008c2ecf20Sopenharmony_ci 19018c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_HW_ERR; 19028c2ecf20Sopenharmony_ci 19038c2ecf20Sopenharmony_ci goto out; 19048c2ecf20Sopenharmony_ci } 19058c2ecf20Sopenharmony_ci 19068c2ecf20Sopenharmony_ci /* NIC fires this, but we don't use it, redundant with WAKEUP */ 19078c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_SCD) { 19088c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 19098c2ecf20Sopenharmony_ci "Scheduler finished to transmit the frame/frames.\n"); 19108c2ecf20Sopenharmony_ci isr_stats->sch++; 19118c2ecf20Sopenharmony_ci } 19128c2ecf20Sopenharmony_ci 19138c2ecf20Sopenharmony_ci /* Alive notification via Rx interrupt will do the real work */ 19148c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_ALIVE) { 19158c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 19168c2ecf20Sopenharmony_ci isr_stats->alive++; 19178c2ecf20Sopenharmony_ci if (trans->trans_cfg->gen2) { 19188c2ecf20Sopenharmony_ci /* 19198c2ecf20Sopenharmony_ci * We can restock, since firmware configured 19208c2ecf20Sopenharmony_ci * the RFH 19218c2ecf20Sopenharmony_ci */ 19228c2ecf20Sopenharmony_ci iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 19238c2ecf20Sopenharmony_ci } 19248c2ecf20Sopenharmony_ci 19258c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_ALIVE; 19268c2ecf20Sopenharmony_ci } 19278c2ecf20Sopenharmony_ci 19288c2ecf20Sopenharmony_ci /* Safely ignore these bits for debug checks below */ 19298c2ecf20Sopenharmony_ci inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 19308c2ecf20Sopenharmony_ci 19318c2ecf20Sopenharmony_ci /* HW RF KILL switch toggled */ 19328c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_RF_KILL) { 19338c2ecf20Sopenharmony_ci iwl_pcie_handle_rfkill_irq(trans); 19348c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_RF_KILL; 19358c2ecf20Sopenharmony_ci } 19368c2ecf20Sopenharmony_ci 19378c2ecf20Sopenharmony_ci /* Chip got too hot and stopped itself */ 19388c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_CT_KILL) { 19398c2ecf20Sopenharmony_ci IWL_ERR(trans, "Microcode CT kill error detected.\n"); 19408c2ecf20Sopenharmony_ci isr_stats->ctkill++; 19418c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_CT_KILL; 19428c2ecf20Sopenharmony_ci } 19438c2ecf20Sopenharmony_ci 19448c2ecf20Sopenharmony_ci /* Error detected by uCode */ 19458c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_SW_ERR) { 19468c2ecf20Sopenharmony_ci IWL_ERR(trans, "Microcode SW error detected. " 19478c2ecf20Sopenharmony_ci " Restarting 0x%X.\n", inta); 19488c2ecf20Sopenharmony_ci isr_stats->sw++; 19498c2ecf20Sopenharmony_ci iwl_pcie_irq_handle_error(trans); 19508c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_SW_ERR; 19518c2ecf20Sopenharmony_ci } 19528c2ecf20Sopenharmony_ci 19538c2ecf20Sopenharmony_ci /* uCode wakes up after power-down sleep */ 19548c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_WAKEUP) { 19558c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 19568c2ecf20Sopenharmony_ci iwl_pcie_rxq_check_wrptr(trans); 19578c2ecf20Sopenharmony_ci iwl_pcie_txq_check_wrptrs(trans); 19588c2ecf20Sopenharmony_ci 19598c2ecf20Sopenharmony_ci isr_stats->wakeup++; 19608c2ecf20Sopenharmony_ci 19618c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_WAKEUP; 19628c2ecf20Sopenharmony_ci } 19638c2ecf20Sopenharmony_ci 19648c2ecf20Sopenharmony_ci /* All uCode command responses, including Tx command responses, 19658c2ecf20Sopenharmony_ci * Rx "responses" (frame-received notification), and other 19668c2ecf20Sopenharmony_ci * notifications from uCode come through here*/ 19678c2ecf20Sopenharmony_ci if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 19688c2ecf20Sopenharmony_ci CSR_INT_BIT_RX_PERIODIC)) { 19698c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 19708c2ecf20Sopenharmony_ci if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 19718c2ecf20Sopenharmony_ci handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 19728c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_FH_INT_STATUS, 19738c2ecf20Sopenharmony_ci CSR_FH_INT_RX_MASK); 19748c2ecf20Sopenharmony_ci } 19758c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_RX_PERIODIC) { 19768c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_RX_PERIODIC; 19778c2ecf20Sopenharmony_ci iwl_write32(trans, 19788c2ecf20Sopenharmony_ci CSR_INT, CSR_INT_BIT_RX_PERIODIC); 19798c2ecf20Sopenharmony_ci } 19808c2ecf20Sopenharmony_ci /* Sending RX interrupt require many steps to be done in the 19818c2ecf20Sopenharmony_ci * the device: 19828c2ecf20Sopenharmony_ci * 1- write interrupt to current index in ICT table. 19838c2ecf20Sopenharmony_ci * 2- dma RX frame. 19848c2ecf20Sopenharmony_ci * 3- update RX shared data to indicate last write index. 19858c2ecf20Sopenharmony_ci * 4- send interrupt. 19868c2ecf20Sopenharmony_ci * This could lead to RX race, driver could receive RX interrupt 19878c2ecf20Sopenharmony_ci * but the shared data changes does not reflect this; 19888c2ecf20Sopenharmony_ci * periodic interrupt will detect any dangling Rx activity. 19898c2ecf20Sopenharmony_ci */ 19908c2ecf20Sopenharmony_ci 19918c2ecf20Sopenharmony_ci /* Disable periodic interrupt; we use it as just a one-shot. */ 19928c2ecf20Sopenharmony_ci iwl_write8(trans, CSR_INT_PERIODIC_REG, 19938c2ecf20Sopenharmony_ci CSR_INT_PERIODIC_DIS); 19948c2ecf20Sopenharmony_ci 19958c2ecf20Sopenharmony_ci /* 19968c2ecf20Sopenharmony_ci * Enable periodic interrupt in 8 msec only if we received 19978c2ecf20Sopenharmony_ci * real RX interrupt (instead of just periodic int), to catch 19988c2ecf20Sopenharmony_ci * any dangling Rx interrupt. If it was just the periodic 19998c2ecf20Sopenharmony_ci * interrupt, there was no dangling Rx activity, and no need 20008c2ecf20Sopenharmony_ci * to extend the periodic interrupt; one-shot is enough. 20018c2ecf20Sopenharmony_ci */ 20028c2ecf20Sopenharmony_ci if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 20038c2ecf20Sopenharmony_ci iwl_write8(trans, CSR_INT_PERIODIC_REG, 20048c2ecf20Sopenharmony_ci CSR_INT_PERIODIC_ENA); 20058c2ecf20Sopenharmony_ci 20068c2ecf20Sopenharmony_ci isr_stats->rx++; 20078c2ecf20Sopenharmony_ci 20088c2ecf20Sopenharmony_ci local_bh_disable(); 20098c2ecf20Sopenharmony_ci iwl_pcie_rx_handle(trans, 0); 20108c2ecf20Sopenharmony_ci local_bh_enable(); 20118c2ecf20Sopenharmony_ci } 20128c2ecf20Sopenharmony_ci 20138c2ecf20Sopenharmony_ci /* This "Tx" DMA channel is used only for loading uCode */ 20148c2ecf20Sopenharmony_ci if (inta & CSR_INT_BIT_FH_TX) { 20158c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 20168c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 20178c2ecf20Sopenharmony_ci isr_stats->tx++; 20188c2ecf20Sopenharmony_ci handled |= CSR_INT_BIT_FH_TX; 20198c2ecf20Sopenharmony_ci /* Wake up uCode load routine, now that load is complete */ 20208c2ecf20Sopenharmony_ci trans_pcie->ucode_write_complete = true; 20218c2ecf20Sopenharmony_ci wake_up(&trans_pcie->ucode_write_waitq); 20228c2ecf20Sopenharmony_ci } 20238c2ecf20Sopenharmony_ci 20248c2ecf20Sopenharmony_ci if (inta & ~handled) { 20258c2ecf20Sopenharmony_ci IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 20268c2ecf20Sopenharmony_ci isr_stats->unhandled++; 20278c2ecf20Sopenharmony_ci } 20288c2ecf20Sopenharmony_ci 20298c2ecf20Sopenharmony_ci if (inta & ~(trans_pcie->inta_mask)) { 20308c2ecf20Sopenharmony_ci IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 20318c2ecf20Sopenharmony_ci inta & ~trans_pcie->inta_mask); 20328c2ecf20Sopenharmony_ci } 20338c2ecf20Sopenharmony_ci 20348c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->irq_lock); 20358c2ecf20Sopenharmony_ci /* only Re-enable all interrupt if disabled by irq */ 20368c2ecf20Sopenharmony_ci if (test_bit(STATUS_INT_ENABLED, &trans->status)) 20378c2ecf20Sopenharmony_ci _iwl_enable_interrupts(trans); 20388c2ecf20Sopenharmony_ci /* we are loading the firmware, enable FH_TX interrupt only */ 20398c2ecf20Sopenharmony_ci else if (handled & CSR_INT_BIT_FH_TX) 20408c2ecf20Sopenharmony_ci iwl_enable_fw_load_int(trans); 20418c2ecf20Sopenharmony_ci /* Re-enable RF_KILL if it occurred */ 20428c2ecf20Sopenharmony_ci else if (handled & CSR_INT_BIT_RF_KILL) 20438c2ecf20Sopenharmony_ci iwl_enable_rfkill_int(trans); 20448c2ecf20Sopenharmony_ci /* Re-enable the ALIVE / Rx interrupt if it occurred */ 20458c2ecf20Sopenharmony_ci else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) 20468c2ecf20Sopenharmony_ci iwl_enable_fw_load_int_ctx_info(trans); 20478c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 20488c2ecf20Sopenharmony_ci 20498c2ecf20Sopenharmony_ciout: 20508c2ecf20Sopenharmony_ci lock_map_release(&trans->sync_cmd_lockdep_map); 20518c2ecf20Sopenharmony_ci return IRQ_HANDLED; 20528c2ecf20Sopenharmony_ci} 20538c2ecf20Sopenharmony_ci 20548c2ecf20Sopenharmony_ci/****************************************************************************** 20558c2ecf20Sopenharmony_ci * 20568c2ecf20Sopenharmony_ci * ICT functions 20578c2ecf20Sopenharmony_ci * 20588c2ecf20Sopenharmony_ci ******************************************************************************/ 20598c2ecf20Sopenharmony_ci 20608c2ecf20Sopenharmony_ci/* Free dram table */ 20618c2ecf20Sopenharmony_civoid iwl_pcie_free_ict(struct iwl_trans *trans) 20628c2ecf20Sopenharmony_ci{ 20638c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20648c2ecf20Sopenharmony_ci 20658c2ecf20Sopenharmony_ci if (trans_pcie->ict_tbl) { 20668c2ecf20Sopenharmony_ci dma_free_coherent(trans->dev, ICT_SIZE, 20678c2ecf20Sopenharmony_ci trans_pcie->ict_tbl, 20688c2ecf20Sopenharmony_ci trans_pcie->ict_tbl_dma); 20698c2ecf20Sopenharmony_ci trans_pcie->ict_tbl = NULL; 20708c2ecf20Sopenharmony_ci trans_pcie->ict_tbl_dma = 0; 20718c2ecf20Sopenharmony_ci } 20728c2ecf20Sopenharmony_ci} 20738c2ecf20Sopenharmony_ci 20748c2ecf20Sopenharmony_ci/* 20758c2ecf20Sopenharmony_ci * allocate dram shared table, it is an aligned memory 20768c2ecf20Sopenharmony_ci * block of ICT_SIZE. 20778c2ecf20Sopenharmony_ci * also reset all data related to ICT table interrupt. 20788c2ecf20Sopenharmony_ci */ 20798c2ecf20Sopenharmony_ciint iwl_pcie_alloc_ict(struct iwl_trans *trans) 20808c2ecf20Sopenharmony_ci{ 20818c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 20828c2ecf20Sopenharmony_ci 20838c2ecf20Sopenharmony_ci trans_pcie->ict_tbl = 20848c2ecf20Sopenharmony_ci dma_alloc_coherent(trans->dev, ICT_SIZE, 20858c2ecf20Sopenharmony_ci &trans_pcie->ict_tbl_dma, GFP_KERNEL); 20868c2ecf20Sopenharmony_ci if (!trans_pcie->ict_tbl) 20878c2ecf20Sopenharmony_ci return -ENOMEM; 20888c2ecf20Sopenharmony_ci 20898c2ecf20Sopenharmony_ci /* just an API sanity check ... it is guaranteed to be aligned */ 20908c2ecf20Sopenharmony_ci if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 20918c2ecf20Sopenharmony_ci iwl_pcie_free_ict(trans); 20928c2ecf20Sopenharmony_ci return -EINVAL; 20938c2ecf20Sopenharmony_ci } 20948c2ecf20Sopenharmony_ci 20958c2ecf20Sopenharmony_ci return 0; 20968c2ecf20Sopenharmony_ci} 20978c2ecf20Sopenharmony_ci 20988c2ecf20Sopenharmony_ci/* Device is going up inform it about using ICT interrupt table, 20998c2ecf20Sopenharmony_ci * also we need to tell the driver to start using ICT interrupt. 21008c2ecf20Sopenharmony_ci */ 21018c2ecf20Sopenharmony_civoid iwl_pcie_reset_ict(struct iwl_trans *trans) 21028c2ecf20Sopenharmony_ci{ 21038c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 21048c2ecf20Sopenharmony_ci u32 val; 21058c2ecf20Sopenharmony_ci 21068c2ecf20Sopenharmony_ci if (!trans_pcie->ict_tbl) 21078c2ecf20Sopenharmony_ci return; 21088c2ecf20Sopenharmony_ci 21098c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->irq_lock); 21108c2ecf20Sopenharmony_ci _iwl_disable_interrupts(trans); 21118c2ecf20Sopenharmony_ci 21128c2ecf20Sopenharmony_ci memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 21138c2ecf20Sopenharmony_ci 21148c2ecf20Sopenharmony_ci val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 21158c2ecf20Sopenharmony_ci 21168c2ecf20Sopenharmony_ci val |= CSR_DRAM_INT_TBL_ENABLE | 21178c2ecf20Sopenharmony_ci CSR_DRAM_INIT_TBL_WRAP_CHECK | 21188c2ecf20Sopenharmony_ci CSR_DRAM_INIT_TBL_WRITE_POINTER; 21198c2ecf20Sopenharmony_ci 21208c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 21218c2ecf20Sopenharmony_ci 21228c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 21238c2ecf20Sopenharmony_ci trans_pcie->use_ict = true; 21248c2ecf20Sopenharmony_ci trans_pcie->ict_index = 0; 21258c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 21268c2ecf20Sopenharmony_ci _iwl_enable_interrupts(trans); 21278c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 21288c2ecf20Sopenharmony_ci} 21298c2ecf20Sopenharmony_ci 21308c2ecf20Sopenharmony_ci/* Device is going down disable ict interrupt usage */ 21318c2ecf20Sopenharmony_civoid iwl_pcie_disable_ict(struct iwl_trans *trans) 21328c2ecf20Sopenharmony_ci{ 21338c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 21348c2ecf20Sopenharmony_ci 21358c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->irq_lock); 21368c2ecf20Sopenharmony_ci trans_pcie->use_ict = false; 21378c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 21388c2ecf20Sopenharmony_ci} 21398c2ecf20Sopenharmony_ci 21408c2ecf20Sopenharmony_ciirqreturn_t iwl_pcie_isr(int irq, void *data) 21418c2ecf20Sopenharmony_ci{ 21428c2ecf20Sopenharmony_ci struct iwl_trans *trans = data; 21438c2ecf20Sopenharmony_ci 21448c2ecf20Sopenharmony_ci if (!trans) 21458c2ecf20Sopenharmony_ci return IRQ_NONE; 21468c2ecf20Sopenharmony_ci 21478c2ecf20Sopenharmony_ci /* Disable (but don't clear!) interrupts here to avoid 21488c2ecf20Sopenharmony_ci * back-to-back ISRs and sporadic interrupts from our NIC. 21498c2ecf20Sopenharmony_ci * If we have something to service, the tasklet will re-enable ints. 21508c2ecf20Sopenharmony_ci * If we *don't* have something, we'll re-enable before leaving here. 21518c2ecf20Sopenharmony_ci */ 21528c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_INT_MASK, 0x00000000); 21538c2ecf20Sopenharmony_ci 21548c2ecf20Sopenharmony_ci return IRQ_WAKE_THREAD; 21558c2ecf20Sopenharmony_ci} 21568c2ecf20Sopenharmony_ci 21578c2ecf20Sopenharmony_ciirqreturn_t iwl_pcie_msix_isr(int irq, void *data) 21588c2ecf20Sopenharmony_ci{ 21598c2ecf20Sopenharmony_ci return IRQ_WAKE_THREAD; 21608c2ecf20Sopenharmony_ci} 21618c2ecf20Sopenharmony_ci 21628c2ecf20Sopenharmony_ciirqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 21638c2ecf20Sopenharmony_ci{ 21648c2ecf20Sopenharmony_ci struct msix_entry *entry = dev_id; 21658c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 21668c2ecf20Sopenharmony_ci struct iwl_trans *trans = trans_pcie->trans; 21678c2ecf20Sopenharmony_ci struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 21688c2ecf20Sopenharmony_ci u32 inta_fh, inta_hw; 21698c2ecf20Sopenharmony_ci 21708c2ecf20Sopenharmony_ci lock_map_acquire(&trans->sync_cmd_lockdep_map); 21718c2ecf20Sopenharmony_ci 21728c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->irq_lock); 21738c2ecf20Sopenharmony_ci inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 21748c2ecf20Sopenharmony_ci inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 21758c2ecf20Sopenharmony_ci /* 21768c2ecf20Sopenharmony_ci * Clear causes registers to avoid being handling the same cause. 21778c2ecf20Sopenharmony_ci */ 21788c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); 21798c2ecf20Sopenharmony_ci iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 21808c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 21818c2ecf20Sopenharmony_ci 21828c2ecf20Sopenharmony_ci trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); 21838c2ecf20Sopenharmony_ci 21848c2ecf20Sopenharmony_ci if (unlikely(!(inta_fh | inta_hw))) { 21858c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 21868c2ecf20Sopenharmony_ci lock_map_release(&trans->sync_cmd_lockdep_map); 21878c2ecf20Sopenharmony_ci return IRQ_NONE; 21888c2ecf20Sopenharmony_ci } 21898c2ecf20Sopenharmony_ci 21908c2ecf20Sopenharmony_ci if (iwl_have_debug_level(IWL_DL_ISR)) { 21918c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 21928c2ecf20Sopenharmony_ci "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 21938c2ecf20Sopenharmony_ci inta_fh, trans_pcie->fh_mask, 21948c2ecf20Sopenharmony_ci iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 21958c2ecf20Sopenharmony_ci if (inta_fh & ~trans_pcie->fh_mask) 21968c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 21978c2ecf20Sopenharmony_ci "We got a masked interrupt (0x%08x)\n", 21988c2ecf20Sopenharmony_ci inta_fh & ~trans_pcie->fh_mask); 21998c2ecf20Sopenharmony_ci } 22008c2ecf20Sopenharmony_ci 22018c2ecf20Sopenharmony_ci inta_fh &= trans_pcie->fh_mask; 22028c2ecf20Sopenharmony_ci 22038c2ecf20Sopenharmony_ci if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && 22048c2ecf20Sopenharmony_ci inta_fh & MSIX_FH_INT_CAUSES_Q0) { 22058c2ecf20Sopenharmony_ci local_bh_disable(); 22068c2ecf20Sopenharmony_ci iwl_pcie_rx_handle(trans, 0); 22078c2ecf20Sopenharmony_ci local_bh_enable(); 22088c2ecf20Sopenharmony_ci } 22098c2ecf20Sopenharmony_ci 22108c2ecf20Sopenharmony_ci if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && 22118c2ecf20Sopenharmony_ci inta_fh & MSIX_FH_INT_CAUSES_Q1) { 22128c2ecf20Sopenharmony_ci local_bh_disable(); 22138c2ecf20Sopenharmony_ci iwl_pcie_rx_handle(trans, 1); 22148c2ecf20Sopenharmony_ci local_bh_enable(); 22158c2ecf20Sopenharmony_ci } 22168c2ecf20Sopenharmony_ci 22178c2ecf20Sopenharmony_ci /* This "Tx" DMA channel is used only for loading uCode */ 22188c2ecf20Sopenharmony_ci if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 22198c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 22208c2ecf20Sopenharmony_ci isr_stats->tx++; 22218c2ecf20Sopenharmony_ci /* 22228c2ecf20Sopenharmony_ci * Wake up uCode load routine, 22238c2ecf20Sopenharmony_ci * now that load is complete 22248c2ecf20Sopenharmony_ci */ 22258c2ecf20Sopenharmony_ci trans_pcie->ucode_write_complete = true; 22268c2ecf20Sopenharmony_ci wake_up(&trans_pcie->ucode_write_waitq); 22278c2ecf20Sopenharmony_ci } 22288c2ecf20Sopenharmony_ci 22298c2ecf20Sopenharmony_ci /* Error detected by uCode */ 22308c2ecf20Sopenharmony_ci if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || 22318c2ecf20Sopenharmony_ci (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { 22328c2ecf20Sopenharmony_ci IWL_ERR(trans, 22338c2ecf20Sopenharmony_ci "Microcode SW error detected. Restarting 0x%X.\n", 22348c2ecf20Sopenharmony_ci inta_fh); 22358c2ecf20Sopenharmony_ci isr_stats->sw++; 22368c2ecf20Sopenharmony_ci iwl_pcie_irq_handle_error(trans); 22378c2ecf20Sopenharmony_ci } 22388c2ecf20Sopenharmony_ci 22398c2ecf20Sopenharmony_ci /* After checking FH register check HW register */ 22408c2ecf20Sopenharmony_ci if (iwl_have_debug_level(IWL_DL_ISR)) { 22418c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 22428c2ecf20Sopenharmony_ci "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", 22438c2ecf20Sopenharmony_ci inta_hw, trans_pcie->hw_mask, 22448c2ecf20Sopenharmony_ci iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 22458c2ecf20Sopenharmony_ci if (inta_hw & ~trans_pcie->hw_mask) 22468c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 22478c2ecf20Sopenharmony_ci "We got a masked interrupt 0x%08x\n", 22488c2ecf20Sopenharmony_ci inta_hw & ~trans_pcie->hw_mask); 22498c2ecf20Sopenharmony_ci } 22508c2ecf20Sopenharmony_ci 22518c2ecf20Sopenharmony_ci inta_hw &= trans_pcie->hw_mask; 22528c2ecf20Sopenharmony_ci 22538c2ecf20Sopenharmony_ci /* Alive notification via Rx interrupt will do the real work */ 22548c2ecf20Sopenharmony_ci if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 22558c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 22568c2ecf20Sopenharmony_ci isr_stats->alive++; 22578c2ecf20Sopenharmony_ci if (trans->trans_cfg->gen2) { 22588c2ecf20Sopenharmony_ci /* We can restock, since firmware configured the RFH */ 22598c2ecf20Sopenharmony_ci iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); 22608c2ecf20Sopenharmony_ci } 22618c2ecf20Sopenharmony_ci } 22628c2ecf20Sopenharmony_ci 22638c2ecf20Sopenharmony_ci /* 22648c2ecf20Sopenharmony_ci * In some rare cases when the HW is in a bad state, we may 22658c2ecf20Sopenharmony_ci * get this interrupt too early, when prph_info is still NULL. 22668c2ecf20Sopenharmony_ci * So make sure that it's not NULL to prevent crashing. 22678c2ecf20Sopenharmony_ci */ 22688c2ecf20Sopenharmony_ci if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { 22698c2ecf20Sopenharmony_ci u32 sleep_notif = 22708c2ecf20Sopenharmony_ci le32_to_cpu(trans_pcie->prph_info->sleep_notif); 22718c2ecf20Sopenharmony_ci if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || 22728c2ecf20Sopenharmony_ci sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { 22738c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, 22748c2ecf20Sopenharmony_ci "Sx interrupt: sleep notification = 0x%x\n", 22758c2ecf20Sopenharmony_ci sleep_notif); 22768c2ecf20Sopenharmony_ci trans_pcie->sx_complete = true; 22778c2ecf20Sopenharmony_ci wake_up(&trans_pcie->sx_waitq); 22788c2ecf20Sopenharmony_ci } else { 22798c2ecf20Sopenharmony_ci /* uCode wakes up after power-down sleep */ 22808c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 22818c2ecf20Sopenharmony_ci iwl_pcie_rxq_check_wrptr(trans); 22828c2ecf20Sopenharmony_ci iwl_pcie_txq_check_wrptrs(trans); 22838c2ecf20Sopenharmony_ci 22848c2ecf20Sopenharmony_ci isr_stats->wakeup++; 22858c2ecf20Sopenharmony_ci } 22868c2ecf20Sopenharmony_ci } 22878c2ecf20Sopenharmony_ci 22888c2ecf20Sopenharmony_ci if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { 22898c2ecf20Sopenharmony_ci /* Reflect IML transfer status */ 22908c2ecf20Sopenharmony_ci int res = iwl_read32(trans, CSR_IML_RESP_ADDR); 22918c2ecf20Sopenharmony_ci 22928c2ecf20Sopenharmony_ci IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); 22938c2ecf20Sopenharmony_ci if (res == IWL_IMAGE_RESP_FAIL) { 22948c2ecf20Sopenharmony_ci isr_stats->sw++; 22958c2ecf20Sopenharmony_ci iwl_pcie_irq_handle_error(trans); 22968c2ecf20Sopenharmony_ci } 22978c2ecf20Sopenharmony_ci } 22988c2ecf20Sopenharmony_ci 22998c2ecf20Sopenharmony_ci /* Chip got too hot and stopped itself */ 23008c2ecf20Sopenharmony_ci if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 23018c2ecf20Sopenharmony_ci IWL_ERR(trans, "Microcode CT kill error detected.\n"); 23028c2ecf20Sopenharmony_ci isr_stats->ctkill++; 23038c2ecf20Sopenharmony_ci } 23048c2ecf20Sopenharmony_ci 23058c2ecf20Sopenharmony_ci /* HW RF KILL switch toggled */ 23068c2ecf20Sopenharmony_ci if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) 23078c2ecf20Sopenharmony_ci iwl_pcie_handle_rfkill_irq(trans); 23088c2ecf20Sopenharmony_ci 23098c2ecf20Sopenharmony_ci if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 23108c2ecf20Sopenharmony_ci IWL_ERR(trans, 23118c2ecf20Sopenharmony_ci "Hardware error detected. Restarting.\n"); 23128c2ecf20Sopenharmony_ci 23138c2ecf20Sopenharmony_ci isr_stats->hw++; 23148c2ecf20Sopenharmony_ci trans->dbg.hw_error = true; 23158c2ecf20Sopenharmony_ci iwl_pcie_irq_handle_error(trans); 23168c2ecf20Sopenharmony_ci } 23178c2ecf20Sopenharmony_ci 23188c2ecf20Sopenharmony_ci iwl_pcie_clear_irq(trans, entry); 23198c2ecf20Sopenharmony_ci 23208c2ecf20Sopenharmony_ci lock_map_release(&trans->sync_cmd_lockdep_map); 23218c2ecf20Sopenharmony_ci 23228c2ecf20Sopenharmony_ci return IRQ_HANDLED; 23238c2ecf20Sopenharmony_ci} 2324