18c2ecf20Sopenharmony_ci/****************************************************************************** 28c2ecf20Sopenharmony_ci * 38c2ecf20Sopenharmony_ci * This file is provided under a dual BSD/GPLv2 license. When using or 48c2ecf20Sopenharmony_ci * redistributing this file, you may do so under either license. 58c2ecf20Sopenharmony_ci * 68c2ecf20Sopenharmony_ci * GPL LICENSE SUMMARY 78c2ecf20Sopenharmony_ci * 88c2ecf20Sopenharmony_ci * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 98c2ecf20Sopenharmony_ci * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 108c2ecf20Sopenharmony_ci * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 118c2ecf20Sopenharmony_ci * Copyright(c) 2018 - 2020 Intel Corporation 128c2ecf20Sopenharmony_ci * 138c2ecf20Sopenharmony_ci * This program is free software; you can redistribute it and/or modify it 148c2ecf20Sopenharmony_ci * under the terms of version 2 of the GNU General Public License as 158c2ecf20Sopenharmony_ci * published by the Free Software Foundation. 168c2ecf20Sopenharmony_ci * 178c2ecf20Sopenharmony_ci * This program is distributed in the hope that it will be useful, but WITHOUT 188c2ecf20Sopenharmony_ci * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 198c2ecf20Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 208c2ecf20Sopenharmony_ci * more details. 218c2ecf20Sopenharmony_ci * 228c2ecf20Sopenharmony_ci * The full GNU General Public License is included in this distribution in the 238c2ecf20Sopenharmony_ci * file called COPYING. 248c2ecf20Sopenharmony_ci * 258c2ecf20Sopenharmony_ci * Contact Information: 268c2ecf20Sopenharmony_ci * Intel Linux Wireless <linuxwifi@intel.com> 278c2ecf20Sopenharmony_ci * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 288c2ecf20Sopenharmony_ci * 298c2ecf20Sopenharmony_ci * BSD LICENSE 308c2ecf20Sopenharmony_ci * 318c2ecf20Sopenharmony_ci * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 328c2ecf20Sopenharmony_ci * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 338c2ecf20Sopenharmony_ci * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 348c2ecf20Sopenharmony_ci * Copyright(c) 2018 - 2020 Intel Corporation 358c2ecf20Sopenharmony_ci * All rights reserved. 368c2ecf20Sopenharmony_ci * 378c2ecf20Sopenharmony_ci * Redistribution and use in source and binary forms, with or without 388c2ecf20Sopenharmony_ci * modification, are permitted provided that the following conditions 398c2ecf20Sopenharmony_ci * are met: 408c2ecf20Sopenharmony_ci * 418c2ecf20Sopenharmony_ci * * Redistributions of source code must retain the above copyright 428c2ecf20Sopenharmony_ci * notice, this list of conditions and the following disclaimer. 438c2ecf20Sopenharmony_ci * * Redistributions in binary form must reproduce the above copyright 448c2ecf20Sopenharmony_ci * notice, this list of conditions and the following disclaimer in 458c2ecf20Sopenharmony_ci * the documentation and/or other materials provided with the 468c2ecf20Sopenharmony_ci * distribution. 478c2ecf20Sopenharmony_ci * * Neither the name Intel Corporation nor the names of its 488c2ecf20Sopenharmony_ci * contributors may be used to endorse or promote products derived 498c2ecf20Sopenharmony_ci * from this software without specific prior written permission. 508c2ecf20Sopenharmony_ci * 518c2ecf20Sopenharmony_ci * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 528c2ecf20Sopenharmony_ci * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 538c2ecf20Sopenharmony_ci * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 548c2ecf20Sopenharmony_ci * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 558c2ecf20Sopenharmony_ci * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 568c2ecf20Sopenharmony_ci * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 578c2ecf20Sopenharmony_ci * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 588c2ecf20Sopenharmony_ci * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 598c2ecf20Sopenharmony_ci * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 608c2ecf20Sopenharmony_ci * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 618c2ecf20Sopenharmony_ci * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 628c2ecf20Sopenharmony_ci * 638c2ecf20Sopenharmony_ci *****************************************************************************/ 648c2ecf20Sopenharmony_ci#include <linux/etherdevice.h> 658c2ecf20Sopenharmony_ci#include <linux/ieee80211.h> 668c2ecf20Sopenharmony_ci#include <linux/slab.h> 678c2ecf20Sopenharmony_ci#include <linux/sched.h> 688c2ecf20Sopenharmony_ci#include <net/ip6_checksum.h> 698c2ecf20Sopenharmony_ci#include <net/tso.h> 708c2ecf20Sopenharmony_ci 718c2ecf20Sopenharmony_ci#include "iwl-debug.h" 728c2ecf20Sopenharmony_ci#include "iwl-csr.h" 738c2ecf20Sopenharmony_ci#include "iwl-prph.h" 748c2ecf20Sopenharmony_ci#include "iwl-io.h" 758c2ecf20Sopenharmony_ci#include "iwl-scd.h" 768c2ecf20Sopenharmony_ci#include "iwl-op-mode.h" 778c2ecf20Sopenharmony_ci#include "internal.h" 788c2ecf20Sopenharmony_ci#include "fw/api/tx.h" 798c2ecf20Sopenharmony_ci 808c2ecf20Sopenharmony_ci/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 818c2ecf20Sopenharmony_ci * DMA services 828c2ecf20Sopenharmony_ci * 838c2ecf20Sopenharmony_ci * Theory of operation 848c2ecf20Sopenharmony_ci * 858c2ecf20Sopenharmony_ci * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 868c2ecf20Sopenharmony_ci * of buffer descriptors, each of which points to one or more data buffers for 878c2ecf20Sopenharmony_ci * the device to read from or fill. Driver and device exchange status of each 888c2ecf20Sopenharmony_ci * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 898c2ecf20Sopenharmony_ci * entries in each circular buffer, to protect against confusing empty and full 908c2ecf20Sopenharmony_ci * queue states. 918c2ecf20Sopenharmony_ci * 928c2ecf20Sopenharmony_ci * The device reads or writes the data in the queues via the device's several 938c2ecf20Sopenharmony_ci * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 948c2ecf20Sopenharmony_ci * 958c2ecf20Sopenharmony_ci * For Tx queue, there are low mark and high mark limits. If, after queuing 968c2ecf20Sopenharmony_ci * the packet for Tx, free space become < low mark, Tx queue stopped. When 978c2ecf20Sopenharmony_ci * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 988c2ecf20Sopenharmony_ci * Tx queue resumed. 998c2ecf20Sopenharmony_ci * 1008c2ecf20Sopenharmony_ci ***************************************************/ 1018c2ecf20Sopenharmony_ci 1028c2ecf20Sopenharmony_ci 1038c2ecf20Sopenharmony_ciint iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 1048c2ecf20Sopenharmony_ci struct iwl_dma_ptr *ptr, size_t size) 1058c2ecf20Sopenharmony_ci{ 1068c2ecf20Sopenharmony_ci if (WARN_ON(ptr->addr)) 1078c2ecf20Sopenharmony_ci return -EINVAL; 1088c2ecf20Sopenharmony_ci 1098c2ecf20Sopenharmony_ci ptr->addr = dma_alloc_coherent(trans->dev, size, 1108c2ecf20Sopenharmony_ci &ptr->dma, GFP_KERNEL); 1118c2ecf20Sopenharmony_ci if (!ptr->addr) 1128c2ecf20Sopenharmony_ci return -ENOMEM; 1138c2ecf20Sopenharmony_ci ptr->size = size; 1148c2ecf20Sopenharmony_ci return 0; 1158c2ecf20Sopenharmony_ci} 1168c2ecf20Sopenharmony_ci 1178c2ecf20Sopenharmony_civoid iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 1188c2ecf20Sopenharmony_ci{ 1198c2ecf20Sopenharmony_ci if (unlikely(!ptr->addr)) 1208c2ecf20Sopenharmony_ci return; 1218c2ecf20Sopenharmony_ci 1228c2ecf20Sopenharmony_ci dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 1238c2ecf20Sopenharmony_ci memset(ptr, 0, sizeof(*ptr)); 1248c2ecf20Sopenharmony_ci} 1258c2ecf20Sopenharmony_ci 1268c2ecf20Sopenharmony_ci/* 1278c2ecf20Sopenharmony_ci * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 1288c2ecf20Sopenharmony_ci */ 1298c2ecf20Sopenharmony_cistatic void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 1308c2ecf20Sopenharmony_ci struct iwl_txq *txq) 1318c2ecf20Sopenharmony_ci{ 1328c2ecf20Sopenharmony_ci u32 reg = 0; 1338c2ecf20Sopenharmony_ci int txq_id = txq->id; 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_ci lockdep_assert_held(&txq->lock); 1368c2ecf20Sopenharmony_ci 1378c2ecf20Sopenharmony_ci /* 1388c2ecf20Sopenharmony_ci * explicitly wake up the NIC if: 1398c2ecf20Sopenharmony_ci * 1. shadow registers aren't enabled 1408c2ecf20Sopenharmony_ci * 2. NIC is woken up for CMD regardless of shadow outside this function 1418c2ecf20Sopenharmony_ci * 3. there is a chance that the NIC is asleep 1428c2ecf20Sopenharmony_ci */ 1438c2ecf20Sopenharmony_ci if (!trans->trans_cfg->base_params->shadow_reg_enable && 1448c2ecf20Sopenharmony_ci txq_id != trans->txqs.cmd.q_id && 1458c2ecf20Sopenharmony_ci test_bit(STATUS_TPOWER_PMI, &trans->status)) { 1468c2ecf20Sopenharmony_ci /* 1478c2ecf20Sopenharmony_ci * wake up nic if it's powered down ... 1488c2ecf20Sopenharmony_ci * uCode will wake up, and interrupt us again, so next 1498c2ecf20Sopenharmony_ci * time we'll skip this part. 1508c2ecf20Sopenharmony_ci */ 1518c2ecf20Sopenharmony_ci reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 1528c2ecf20Sopenharmony_ci 1538c2ecf20Sopenharmony_ci if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 1548c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 1558c2ecf20Sopenharmony_ci txq_id, reg); 1568c2ecf20Sopenharmony_ci iwl_set_bit(trans, CSR_GP_CNTRL, 1578c2ecf20Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1588c2ecf20Sopenharmony_ci txq->need_update = true; 1598c2ecf20Sopenharmony_ci return; 1608c2ecf20Sopenharmony_ci } 1618c2ecf20Sopenharmony_ci } 1628c2ecf20Sopenharmony_ci 1638c2ecf20Sopenharmony_ci /* 1648c2ecf20Sopenharmony_ci * if not in power-save mode, uCode will never sleep when we're 1658c2ecf20Sopenharmony_ci * trying to tx (during RFKILL, we're not trying to tx). 1668c2ecf20Sopenharmony_ci */ 1678c2ecf20Sopenharmony_ci IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 1688c2ecf20Sopenharmony_ci if (!txq->block) 1698c2ecf20Sopenharmony_ci iwl_write32(trans, HBUS_TARG_WRPTR, 1708c2ecf20Sopenharmony_ci txq->write_ptr | (txq_id << 8)); 1718c2ecf20Sopenharmony_ci} 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_civoid iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 1748c2ecf20Sopenharmony_ci{ 1758c2ecf20Sopenharmony_ci int i; 1768c2ecf20Sopenharmony_ci 1778c2ecf20Sopenharmony_ci for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1788c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[i]; 1798c2ecf20Sopenharmony_ci 1808c2ecf20Sopenharmony_ci if (!test_bit(i, trans->txqs.queue_used)) 1818c2ecf20Sopenharmony_ci continue; 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci spin_lock_bh(&txq->lock); 1848c2ecf20Sopenharmony_ci if (txq->need_update) { 1858c2ecf20Sopenharmony_ci iwl_pcie_txq_inc_wr_ptr(trans, txq); 1868c2ecf20Sopenharmony_ci txq->need_update = false; 1878c2ecf20Sopenharmony_ci } 1888c2ecf20Sopenharmony_ci spin_unlock_bh(&txq->lock); 1898c2ecf20Sopenharmony_ci } 1908c2ecf20Sopenharmony_ci} 1918c2ecf20Sopenharmony_ci 1928c2ecf20Sopenharmony_cistatic inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 1938c2ecf20Sopenharmony_ci u8 idx, dma_addr_t addr, u16 len) 1948c2ecf20Sopenharmony_ci{ 1958c2ecf20Sopenharmony_ci struct iwl_tfd *tfd_fh = (void *)tfd; 1968c2ecf20Sopenharmony_ci struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 1978c2ecf20Sopenharmony_ci 1988c2ecf20Sopenharmony_ci u16 hi_n_len = len << 4; 1998c2ecf20Sopenharmony_ci 2008c2ecf20Sopenharmony_ci put_unaligned_le32(addr, &tb->lo); 2018c2ecf20Sopenharmony_ci hi_n_len |= iwl_get_dma_hi_addr(addr); 2028c2ecf20Sopenharmony_ci 2038c2ecf20Sopenharmony_ci tb->hi_n_len = cpu_to_le16(hi_n_len); 2048c2ecf20Sopenharmony_ci 2058c2ecf20Sopenharmony_ci tfd_fh->num_tbs = idx + 1; 2068c2ecf20Sopenharmony_ci} 2078c2ecf20Sopenharmony_ci 2088c2ecf20Sopenharmony_ci/* 2098c2ecf20Sopenharmony_ci * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 2108c2ecf20Sopenharmony_ci * @trans - transport private data 2118c2ecf20Sopenharmony_ci * @txq - tx queue 2128c2ecf20Sopenharmony_ci * @dma_dir - the direction of the DMA mapping 2138c2ecf20Sopenharmony_ci * 2148c2ecf20Sopenharmony_ci * Does NOT advance any TFD circular buffer read/write indexes 2158c2ecf20Sopenharmony_ci * Does NOT free the TFD itself (which is within circular buffer) 2168c2ecf20Sopenharmony_ci */ 2178c2ecf20Sopenharmony_civoid iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 2188c2ecf20Sopenharmony_ci{ 2198c2ecf20Sopenharmony_ci /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 2208c2ecf20Sopenharmony_ci * idx is bounded by n_window 2218c2ecf20Sopenharmony_ci */ 2228c2ecf20Sopenharmony_ci int rd_ptr = txq->read_ptr; 2238c2ecf20Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, rd_ptr); 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_ci lockdep_assert_held(&txq->lock); 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci /* We have only q->n_window txq->entries, but we use 2288c2ecf20Sopenharmony_ci * TFD_QUEUE_SIZE_MAX tfds 2298c2ecf20Sopenharmony_ci */ 2308c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci /* free SKB */ 2338c2ecf20Sopenharmony_ci if (txq->entries) { 2348c2ecf20Sopenharmony_ci struct sk_buff *skb; 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci skb = txq->entries[idx].skb; 2378c2ecf20Sopenharmony_ci 2388c2ecf20Sopenharmony_ci /* Can be called from irqs-disabled context 2398c2ecf20Sopenharmony_ci * If skb is not NULL, it means that the whole queue is being 2408c2ecf20Sopenharmony_ci * freed and that the queue is not empty - free the skb 2418c2ecf20Sopenharmony_ci */ 2428c2ecf20Sopenharmony_ci if (skb) { 2438c2ecf20Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 2448c2ecf20Sopenharmony_ci txq->entries[idx].skb = NULL; 2458c2ecf20Sopenharmony_ci } 2468c2ecf20Sopenharmony_ci } 2478c2ecf20Sopenharmony_ci} 2488c2ecf20Sopenharmony_ci 2498c2ecf20Sopenharmony_cistatic int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 2508c2ecf20Sopenharmony_ci dma_addr_t addr, u16 len, bool reset) 2518c2ecf20Sopenharmony_ci{ 2528c2ecf20Sopenharmony_ci void *tfd; 2538c2ecf20Sopenharmony_ci u32 num_tbs; 2548c2ecf20Sopenharmony_ci 2558c2ecf20Sopenharmony_ci tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 2568c2ecf20Sopenharmony_ci 2578c2ecf20Sopenharmony_ci if (reset) 2588c2ecf20Sopenharmony_ci memset(tfd, 0, trans->txqs.tfd.size); 2598c2ecf20Sopenharmony_ci 2608c2ecf20Sopenharmony_ci num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci /* Each TFD can point to a maximum max_tbs Tx buffers */ 2638c2ecf20Sopenharmony_ci if (num_tbs >= trans->txqs.tfd.max_tbs) { 2648c2ecf20Sopenharmony_ci IWL_ERR(trans, "Error can not send more than %d chunks\n", 2658c2ecf20Sopenharmony_ci trans->txqs.tfd.max_tbs); 2668c2ecf20Sopenharmony_ci return -EINVAL; 2678c2ecf20Sopenharmony_ci } 2688c2ecf20Sopenharmony_ci 2698c2ecf20Sopenharmony_ci if (WARN(addr & ~IWL_TX_DMA_MASK, 2708c2ecf20Sopenharmony_ci "Unaligned address = %llx\n", (unsigned long long)addr)) 2718c2ecf20Sopenharmony_ci return -EINVAL; 2728c2ecf20Sopenharmony_ci 2738c2ecf20Sopenharmony_ci iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 2748c2ecf20Sopenharmony_ci 2758c2ecf20Sopenharmony_ci return num_tbs; 2768c2ecf20Sopenharmony_ci} 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_cistatic void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 2798c2ecf20Sopenharmony_ci{ 2808c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_ci lockdep_assert_held(&trans_pcie->reg_lock); 2838c2ecf20Sopenharmony_ci 2848c2ecf20Sopenharmony_ci if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 2858c2ecf20Sopenharmony_ci return; 2868c2ecf20Sopenharmony_ci if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 2878c2ecf20Sopenharmony_ci return; 2888c2ecf20Sopenharmony_ci 2898c2ecf20Sopenharmony_ci trans_pcie->cmd_hold_nic_awake = false; 2908c2ecf20Sopenharmony_ci __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2918c2ecf20Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2928c2ecf20Sopenharmony_ci} 2938c2ecf20Sopenharmony_ci 2948c2ecf20Sopenharmony_ci/* 2958c2ecf20Sopenharmony_ci * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 2968c2ecf20Sopenharmony_ci */ 2978c2ecf20Sopenharmony_cistatic void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 2988c2ecf20Sopenharmony_ci{ 2998c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3008c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 3018c2ecf20Sopenharmony_ci 3028c2ecf20Sopenharmony_ci if (!txq) { 3038c2ecf20Sopenharmony_ci IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 3048c2ecf20Sopenharmony_ci return; 3058c2ecf20Sopenharmony_ci } 3068c2ecf20Sopenharmony_ci 3078c2ecf20Sopenharmony_ci spin_lock_bh(&txq->lock); 3088c2ecf20Sopenharmony_ci while (txq->write_ptr != txq->read_ptr) { 3098c2ecf20Sopenharmony_ci IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 3108c2ecf20Sopenharmony_ci txq_id, txq->read_ptr); 3118c2ecf20Sopenharmony_ci 3128c2ecf20Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) { 3138c2ecf20Sopenharmony_ci struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 3148c2ecf20Sopenharmony_ci 3158c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(!skb)) 3168c2ecf20Sopenharmony_ci continue; 3178c2ecf20Sopenharmony_ci 3188c2ecf20Sopenharmony_ci iwl_txq_free_tso_page(trans, skb); 3198c2ecf20Sopenharmony_ci } 3208c2ecf20Sopenharmony_ci iwl_pcie_txq_free_tfd(trans, txq); 3218c2ecf20Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) { 3248c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->reg_lock); 3258c2ecf20Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id) 3268c2ecf20Sopenharmony_ci iwl_pcie_clear_cmd_in_flight(trans); 3278c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->reg_lock); 3288c2ecf20Sopenharmony_ci } 3298c2ecf20Sopenharmony_ci } 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci while (!skb_queue_empty(&txq->overflow_q)) { 3328c2ecf20Sopenharmony_ci struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 3338c2ecf20Sopenharmony_ci 3348c2ecf20Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 3358c2ecf20Sopenharmony_ci } 3368c2ecf20Sopenharmony_ci 3378c2ecf20Sopenharmony_ci spin_unlock_bh(&txq->lock); 3388c2ecf20Sopenharmony_ci 3398c2ecf20Sopenharmony_ci /* just in case - this queue may have been stopped */ 3408c2ecf20Sopenharmony_ci iwl_wake_queue(trans, txq); 3418c2ecf20Sopenharmony_ci} 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci/* 3448c2ecf20Sopenharmony_ci * iwl_pcie_txq_free - Deallocate DMA queue. 3458c2ecf20Sopenharmony_ci * @txq: Transmit queue to deallocate. 3468c2ecf20Sopenharmony_ci * 3478c2ecf20Sopenharmony_ci * Empty queue by removing and destroying all BD's. 3488c2ecf20Sopenharmony_ci * Free all buffers. 3498c2ecf20Sopenharmony_ci * 0-fill, but do not free "txq" descriptor structure. 3508c2ecf20Sopenharmony_ci */ 3518c2ecf20Sopenharmony_cistatic void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 3528c2ecf20Sopenharmony_ci{ 3538c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 3548c2ecf20Sopenharmony_ci struct device *dev = trans->dev; 3558c2ecf20Sopenharmony_ci int i; 3568c2ecf20Sopenharmony_ci 3578c2ecf20Sopenharmony_ci if (WARN_ON(!txq)) 3588c2ecf20Sopenharmony_ci return; 3598c2ecf20Sopenharmony_ci 3608c2ecf20Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 3618c2ecf20Sopenharmony_ci 3628c2ecf20Sopenharmony_ci /* De-alloc array of command/tx buffers */ 3638c2ecf20Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id) 3648c2ecf20Sopenharmony_ci for (i = 0; i < txq->n_window; i++) { 3658c2ecf20Sopenharmony_ci kfree_sensitive(txq->entries[i].cmd); 3668c2ecf20Sopenharmony_ci kfree_sensitive(txq->entries[i].free_buf); 3678c2ecf20Sopenharmony_ci } 3688c2ecf20Sopenharmony_ci 3698c2ecf20Sopenharmony_ci /* De-alloc circular buffer of TFDs */ 3708c2ecf20Sopenharmony_ci if (txq->tfds) { 3718c2ecf20Sopenharmony_ci dma_free_coherent(dev, 3728c2ecf20Sopenharmony_ci trans->txqs.tfd.size * 3738c2ecf20Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size, 3748c2ecf20Sopenharmony_ci txq->tfds, txq->dma_addr); 3758c2ecf20Sopenharmony_ci txq->dma_addr = 0; 3768c2ecf20Sopenharmony_ci txq->tfds = NULL; 3778c2ecf20Sopenharmony_ci 3788c2ecf20Sopenharmony_ci dma_free_coherent(dev, 3798c2ecf20Sopenharmony_ci sizeof(*txq->first_tb_bufs) * txq->n_window, 3808c2ecf20Sopenharmony_ci txq->first_tb_bufs, txq->first_tb_dma); 3818c2ecf20Sopenharmony_ci } 3828c2ecf20Sopenharmony_ci 3838c2ecf20Sopenharmony_ci kfree(txq->entries); 3848c2ecf20Sopenharmony_ci txq->entries = NULL; 3858c2ecf20Sopenharmony_ci 3868c2ecf20Sopenharmony_ci del_timer_sync(&txq->stuck_timer); 3878c2ecf20Sopenharmony_ci 3888c2ecf20Sopenharmony_ci /* 0-fill queue descriptor structure */ 3898c2ecf20Sopenharmony_ci memset(txq, 0, sizeof(*txq)); 3908c2ecf20Sopenharmony_ci} 3918c2ecf20Sopenharmony_ci 3928c2ecf20Sopenharmony_civoid iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 3938c2ecf20Sopenharmony_ci{ 3948c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3958c2ecf20Sopenharmony_ci int nq = trans->trans_cfg->base_params->num_of_queues; 3968c2ecf20Sopenharmony_ci int chan; 3978c2ecf20Sopenharmony_ci u32 reg_val; 3988c2ecf20Sopenharmony_ci int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 3998c2ecf20Sopenharmony_ci SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 4008c2ecf20Sopenharmony_ci 4018c2ecf20Sopenharmony_ci /* make sure all queue are not stopped/used */ 4028c2ecf20Sopenharmony_ci memset(trans->txqs.queue_stopped, 0, 4038c2ecf20Sopenharmony_ci sizeof(trans->txqs.queue_stopped)); 4048c2ecf20Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 4058c2ecf20Sopenharmony_ci 4068c2ecf20Sopenharmony_ci trans_pcie->scd_base_addr = 4078c2ecf20Sopenharmony_ci iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 4088c2ecf20Sopenharmony_ci 4098c2ecf20Sopenharmony_ci WARN_ON(scd_base_addr != 0 && 4108c2ecf20Sopenharmony_ci scd_base_addr != trans_pcie->scd_base_addr); 4118c2ecf20Sopenharmony_ci 4128c2ecf20Sopenharmony_ci /* reset context data, TX status and translation data */ 4138c2ecf20Sopenharmony_ci iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 4148c2ecf20Sopenharmony_ci SCD_CONTEXT_MEM_LOWER_BOUND, 4158c2ecf20Sopenharmony_ci NULL, clear_dwords); 4168c2ecf20Sopenharmony_ci 4178c2ecf20Sopenharmony_ci iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 4188c2ecf20Sopenharmony_ci trans->txqs.scd_bc_tbls.dma >> 10); 4198c2ecf20Sopenharmony_ci 4208c2ecf20Sopenharmony_ci /* The chain extension of the SCD doesn't work well. This feature is 4218c2ecf20Sopenharmony_ci * enabled by default by the HW, so we need to disable it manually. 4228c2ecf20Sopenharmony_ci */ 4238c2ecf20Sopenharmony_ci if (trans->trans_cfg->base_params->scd_chain_ext_wa) 4248c2ecf20Sopenharmony_ci iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 4258c2ecf20Sopenharmony_ci 4268c2ecf20Sopenharmony_ci iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 4278c2ecf20Sopenharmony_ci trans->txqs.cmd.fifo, 4288c2ecf20Sopenharmony_ci trans->txqs.cmd.wdg_timeout); 4298c2ecf20Sopenharmony_ci 4308c2ecf20Sopenharmony_ci /* Activate all Tx DMA/FIFO channels */ 4318c2ecf20Sopenharmony_ci iwl_scd_activate_fifos(trans); 4328c2ecf20Sopenharmony_ci 4338c2ecf20Sopenharmony_ci /* Enable DMA channel */ 4348c2ecf20Sopenharmony_ci for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 4358c2ecf20Sopenharmony_ci iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 4368c2ecf20Sopenharmony_ci FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 4378c2ecf20Sopenharmony_ci FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 4388c2ecf20Sopenharmony_ci 4398c2ecf20Sopenharmony_ci /* Update FH chicken bits */ 4408c2ecf20Sopenharmony_ci reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 4418c2ecf20Sopenharmony_ci iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 4428c2ecf20Sopenharmony_ci reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_ci /* Enable L1-Active */ 4458c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 4468c2ecf20Sopenharmony_ci iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 4478c2ecf20Sopenharmony_ci APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 4488c2ecf20Sopenharmony_ci} 4498c2ecf20Sopenharmony_ci 4508c2ecf20Sopenharmony_civoid iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 4518c2ecf20Sopenharmony_ci{ 4528c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 4538c2ecf20Sopenharmony_ci int txq_id; 4548c2ecf20Sopenharmony_ci 4558c2ecf20Sopenharmony_ci /* 4568c2ecf20Sopenharmony_ci * we should never get here in gen2 trans mode return early to avoid 4578c2ecf20Sopenharmony_ci * having invalid accesses 4588c2ecf20Sopenharmony_ci */ 4598c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 4608c2ecf20Sopenharmony_ci return; 4618c2ecf20Sopenharmony_ci 4628c2ecf20Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 4638c2ecf20Sopenharmony_ci txq_id++) { 4648c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 4658c2ecf20Sopenharmony_ci if (trans->trans_cfg->use_tfh) 4668c2ecf20Sopenharmony_ci iwl_write_direct64(trans, 4678c2ecf20Sopenharmony_ci FH_MEM_CBBC_QUEUE(trans, txq_id), 4688c2ecf20Sopenharmony_ci txq->dma_addr); 4698c2ecf20Sopenharmony_ci else 4708c2ecf20Sopenharmony_ci iwl_write_direct32(trans, 4718c2ecf20Sopenharmony_ci FH_MEM_CBBC_QUEUE(trans, txq_id), 4728c2ecf20Sopenharmony_ci txq->dma_addr >> 8); 4738c2ecf20Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 4748c2ecf20Sopenharmony_ci txq->read_ptr = 0; 4758c2ecf20Sopenharmony_ci txq->write_ptr = 0; 4768c2ecf20Sopenharmony_ci } 4778c2ecf20Sopenharmony_ci 4788c2ecf20Sopenharmony_ci /* Tell NIC where to find the "keep warm" buffer */ 4798c2ecf20Sopenharmony_ci iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 4808c2ecf20Sopenharmony_ci trans_pcie->kw.dma >> 4); 4818c2ecf20Sopenharmony_ci 4828c2ecf20Sopenharmony_ci /* 4838c2ecf20Sopenharmony_ci * Send 0 as the scd_base_addr since the device may have be reset 4848c2ecf20Sopenharmony_ci * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 4858c2ecf20Sopenharmony_ci * contain garbage. 4868c2ecf20Sopenharmony_ci */ 4878c2ecf20Sopenharmony_ci iwl_pcie_tx_start(trans, 0); 4888c2ecf20Sopenharmony_ci} 4898c2ecf20Sopenharmony_ci 4908c2ecf20Sopenharmony_cistatic void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 4918c2ecf20Sopenharmony_ci{ 4928c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 4938c2ecf20Sopenharmony_ci unsigned long flags; 4948c2ecf20Sopenharmony_ci int ch, ret; 4958c2ecf20Sopenharmony_ci u32 mask = 0; 4968c2ecf20Sopenharmony_ci 4978c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->irq_lock); 4988c2ecf20Sopenharmony_ci 4998c2ecf20Sopenharmony_ci if (!iwl_trans_grab_nic_access(trans, &flags)) 5008c2ecf20Sopenharmony_ci goto out; 5018c2ecf20Sopenharmony_ci 5028c2ecf20Sopenharmony_ci /* Stop each Tx DMA channel */ 5038c2ecf20Sopenharmony_ci for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 5048c2ecf20Sopenharmony_ci iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 5058c2ecf20Sopenharmony_ci mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 5068c2ecf20Sopenharmony_ci } 5078c2ecf20Sopenharmony_ci 5088c2ecf20Sopenharmony_ci /* Wait for DMA channels to be idle */ 5098c2ecf20Sopenharmony_ci ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 5108c2ecf20Sopenharmony_ci if (ret < 0) 5118c2ecf20Sopenharmony_ci IWL_ERR(trans, 5128c2ecf20Sopenharmony_ci "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 5138c2ecf20Sopenharmony_ci ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 5148c2ecf20Sopenharmony_ci 5158c2ecf20Sopenharmony_ci iwl_trans_release_nic_access(trans, &flags); 5168c2ecf20Sopenharmony_ci 5178c2ecf20Sopenharmony_ciout: 5188c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 5198c2ecf20Sopenharmony_ci} 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci/* 5228c2ecf20Sopenharmony_ci * iwl_pcie_tx_stop - Stop all Tx DMA channels 5238c2ecf20Sopenharmony_ci */ 5248c2ecf20Sopenharmony_ciint iwl_pcie_tx_stop(struct iwl_trans *trans) 5258c2ecf20Sopenharmony_ci{ 5268c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5278c2ecf20Sopenharmony_ci int txq_id; 5288c2ecf20Sopenharmony_ci 5298c2ecf20Sopenharmony_ci /* Turn off all Tx DMA fifos */ 5308c2ecf20Sopenharmony_ci iwl_scd_deactivate_fifos(trans); 5318c2ecf20Sopenharmony_ci 5328c2ecf20Sopenharmony_ci /* Turn off all Tx DMA channels */ 5338c2ecf20Sopenharmony_ci iwl_pcie_tx_stop_fh(trans); 5348c2ecf20Sopenharmony_ci 5358c2ecf20Sopenharmony_ci /* 5368c2ecf20Sopenharmony_ci * This function can be called before the op_mode disabled the 5378c2ecf20Sopenharmony_ci * queues. This happens when we have an rfkill interrupt. 5388c2ecf20Sopenharmony_ci * Since we stop Tx altogether - mark the queues as stopped. 5398c2ecf20Sopenharmony_ci */ 5408c2ecf20Sopenharmony_ci memset(trans->txqs.queue_stopped, 0, 5418c2ecf20Sopenharmony_ci sizeof(trans->txqs.queue_stopped)); 5428c2ecf20Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 5438c2ecf20Sopenharmony_ci 5448c2ecf20Sopenharmony_ci /* This can happen: start_hw, stop_device */ 5458c2ecf20Sopenharmony_ci if (!trans_pcie->txq_memory) 5468c2ecf20Sopenharmony_ci return 0; 5478c2ecf20Sopenharmony_ci 5488c2ecf20Sopenharmony_ci /* Unmap DMA from host system and free skb's */ 5498c2ecf20Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 5508c2ecf20Sopenharmony_ci txq_id++) 5518c2ecf20Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 5528c2ecf20Sopenharmony_ci 5538c2ecf20Sopenharmony_ci return 0; 5548c2ecf20Sopenharmony_ci} 5558c2ecf20Sopenharmony_ci 5568c2ecf20Sopenharmony_ci/* 5578c2ecf20Sopenharmony_ci * iwl_trans_tx_free - Free TXQ Context 5588c2ecf20Sopenharmony_ci * 5598c2ecf20Sopenharmony_ci * Destroy all TX DMA queues and structures 5608c2ecf20Sopenharmony_ci */ 5618c2ecf20Sopenharmony_civoid iwl_pcie_tx_free(struct iwl_trans *trans) 5628c2ecf20Sopenharmony_ci{ 5638c2ecf20Sopenharmony_ci int txq_id; 5648c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5658c2ecf20Sopenharmony_ci 5668c2ecf20Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 5678c2ecf20Sopenharmony_ci 5688c2ecf20Sopenharmony_ci /* Tx queues */ 5698c2ecf20Sopenharmony_ci if (trans_pcie->txq_memory) { 5708c2ecf20Sopenharmony_ci for (txq_id = 0; 5718c2ecf20Sopenharmony_ci txq_id < trans->trans_cfg->base_params->num_of_queues; 5728c2ecf20Sopenharmony_ci txq_id++) { 5738c2ecf20Sopenharmony_ci iwl_pcie_txq_free(trans, txq_id); 5748c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id] = NULL; 5758c2ecf20Sopenharmony_ci } 5768c2ecf20Sopenharmony_ci } 5778c2ecf20Sopenharmony_ci 5788c2ecf20Sopenharmony_ci kfree(trans_pcie->txq_memory); 5798c2ecf20Sopenharmony_ci trans_pcie->txq_memory = NULL; 5808c2ecf20Sopenharmony_ci 5818c2ecf20Sopenharmony_ci iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 5828c2ecf20Sopenharmony_ci 5838c2ecf20Sopenharmony_ci iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 5848c2ecf20Sopenharmony_ci} 5858c2ecf20Sopenharmony_ci 5868c2ecf20Sopenharmony_ci/* 5878c2ecf20Sopenharmony_ci * iwl_pcie_tx_alloc - allocate TX context 5888c2ecf20Sopenharmony_ci * Allocate all Tx DMA structures and initialize them 5898c2ecf20Sopenharmony_ci */ 5908c2ecf20Sopenharmony_cistatic int iwl_pcie_tx_alloc(struct iwl_trans *trans) 5918c2ecf20Sopenharmony_ci{ 5928c2ecf20Sopenharmony_ci int ret; 5938c2ecf20Sopenharmony_ci int txq_id, slots_num; 5948c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 5958c2ecf20Sopenharmony_ci u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 5968c2ecf20Sopenharmony_ci 5978c2ecf20Sopenharmony_ci if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 5988c2ecf20Sopenharmony_ci return -EINVAL; 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_ci bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 6018c2ecf20Sopenharmony_ci 6028c2ecf20Sopenharmony_ci /*It is not allowed to alloc twice, so warn when this happens. 6038c2ecf20Sopenharmony_ci * We cannot rely on the previous allocation, so free and fail */ 6048c2ecf20Sopenharmony_ci if (WARN_ON(trans_pcie->txq_memory)) { 6058c2ecf20Sopenharmony_ci ret = -EINVAL; 6068c2ecf20Sopenharmony_ci goto error; 6078c2ecf20Sopenharmony_ci } 6088c2ecf20Sopenharmony_ci 6098c2ecf20Sopenharmony_ci ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 6108c2ecf20Sopenharmony_ci bc_tbls_size); 6118c2ecf20Sopenharmony_ci if (ret) { 6128c2ecf20Sopenharmony_ci IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 6138c2ecf20Sopenharmony_ci goto error; 6148c2ecf20Sopenharmony_ci } 6158c2ecf20Sopenharmony_ci 6168c2ecf20Sopenharmony_ci /* Alloc keep-warm buffer */ 6178c2ecf20Sopenharmony_ci ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 6188c2ecf20Sopenharmony_ci if (ret) { 6198c2ecf20Sopenharmony_ci IWL_ERR(trans, "Keep Warm allocation failed\n"); 6208c2ecf20Sopenharmony_ci goto error; 6218c2ecf20Sopenharmony_ci } 6228c2ecf20Sopenharmony_ci 6238c2ecf20Sopenharmony_ci trans_pcie->txq_memory = 6248c2ecf20Sopenharmony_ci kcalloc(trans->trans_cfg->base_params->num_of_queues, 6258c2ecf20Sopenharmony_ci sizeof(struct iwl_txq), GFP_KERNEL); 6268c2ecf20Sopenharmony_ci if (!trans_pcie->txq_memory) { 6278c2ecf20Sopenharmony_ci IWL_ERR(trans, "Not enough memory for txq\n"); 6288c2ecf20Sopenharmony_ci ret = -ENOMEM; 6298c2ecf20Sopenharmony_ci goto error; 6308c2ecf20Sopenharmony_ci } 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 6338c2ecf20Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 6348c2ecf20Sopenharmony_ci txq_id++) { 6358c2ecf20Sopenharmony_ci bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 6368c2ecf20Sopenharmony_ci 6378c2ecf20Sopenharmony_ci if (cmd_queue) 6388c2ecf20Sopenharmony_ci slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 6398c2ecf20Sopenharmony_ci trans->cfg->min_txq_size); 6408c2ecf20Sopenharmony_ci else 6418c2ecf20Sopenharmony_ci slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 6428c2ecf20Sopenharmony_ci trans->cfg->min_256_ba_txq_size); 6438c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 6448c2ecf20Sopenharmony_ci ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 6458c2ecf20Sopenharmony_ci cmd_queue); 6468c2ecf20Sopenharmony_ci if (ret) { 6478c2ecf20Sopenharmony_ci IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 6488c2ecf20Sopenharmony_ci goto error; 6498c2ecf20Sopenharmony_ci } 6508c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id]->id = txq_id; 6518c2ecf20Sopenharmony_ci } 6528c2ecf20Sopenharmony_ci 6538c2ecf20Sopenharmony_ci return 0; 6548c2ecf20Sopenharmony_ci 6558c2ecf20Sopenharmony_cierror: 6568c2ecf20Sopenharmony_ci iwl_pcie_tx_free(trans); 6578c2ecf20Sopenharmony_ci 6588c2ecf20Sopenharmony_ci return ret; 6598c2ecf20Sopenharmony_ci} 6608c2ecf20Sopenharmony_ci 6618c2ecf20Sopenharmony_ciint iwl_pcie_tx_init(struct iwl_trans *trans) 6628c2ecf20Sopenharmony_ci{ 6638c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 6648c2ecf20Sopenharmony_ci int ret; 6658c2ecf20Sopenharmony_ci int txq_id, slots_num; 6668c2ecf20Sopenharmony_ci bool alloc = false; 6678c2ecf20Sopenharmony_ci 6688c2ecf20Sopenharmony_ci if (!trans_pcie->txq_memory) { 6698c2ecf20Sopenharmony_ci ret = iwl_pcie_tx_alloc(trans); 6708c2ecf20Sopenharmony_ci if (ret) 6718c2ecf20Sopenharmony_ci goto error; 6728c2ecf20Sopenharmony_ci alloc = true; 6738c2ecf20Sopenharmony_ci } 6748c2ecf20Sopenharmony_ci 6758c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->irq_lock); 6768c2ecf20Sopenharmony_ci 6778c2ecf20Sopenharmony_ci /* Turn off all Tx DMA fifos */ 6788c2ecf20Sopenharmony_ci iwl_scd_deactivate_fifos(trans); 6798c2ecf20Sopenharmony_ci 6808c2ecf20Sopenharmony_ci /* Tell NIC where to find the "keep warm" buffer */ 6818c2ecf20Sopenharmony_ci iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 6828c2ecf20Sopenharmony_ci trans_pcie->kw.dma >> 4); 6838c2ecf20Sopenharmony_ci 6848c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->irq_lock); 6858c2ecf20Sopenharmony_ci 6868c2ecf20Sopenharmony_ci /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 6878c2ecf20Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 6888c2ecf20Sopenharmony_ci txq_id++) { 6898c2ecf20Sopenharmony_ci bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 6908c2ecf20Sopenharmony_ci 6918c2ecf20Sopenharmony_ci if (cmd_queue) 6928c2ecf20Sopenharmony_ci slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 6938c2ecf20Sopenharmony_ci trans->cfg->min_txq_size); 6948c2ecf20Sopenharmony_ci else 6958c2ecf20Sopenharmony_ci slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 6968c2ecf20Sopenharmony_ci trans->cfg->min_256_ba_txq_size); 6978c2ecf20Sopenharmony_ci ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 6988c2ecf20Sopenharmony_ci cmd_queue); 6998c2ecf20Sopenharmony_ci if (ret) { 7008c2ecf20Sopenharmony_ci IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 7018c2ecf20Sopenharmony_ci goto error; 7028c2ecf20Sopenharmony_ci } 7038c2ecf20Sopenharmony_ci 7048c2ecf20Sopenharmony_ci /* 7058c2ecf20Sopenharmony_ci * Tell nic where to find circular buffer of TFDs for a 7068c2ecf20Sopenharmony_ci * given Tx queue, and enable the DMA channel used for that 7078c2ecf20Sopenharmony_ci * queue. 7088c2ecf20Sopenharmony_ci * Circular buffer (TFD queue in DRAM) physical base address 7098c2ecf20Sopenharmony_ci */ 7108c2ecf20Sopenharmony_ci iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 7118c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id]->dma_addr >> 8); 7128c2ecf20Sopenharmony_ci } 7138c2ecf20Sopenharmony_ci 7148c2ecf20Sopenharmony_ci iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 7158c2ecf20Sopenharmony_ci if (trans->trans_cfg->base_params->num_of_queues > 20) 7168c2ecf20Sopenharmony_ci iwl_set_bits_prph(trans, SCD_GP_CTRL, 7178c2ecf20Sopenharmony_ci SCD_GP_CTRL_ENABLE_31_QUEUES); 7188c2ecf20Sopenharmony_ci 7198c2ecf20Sopenharmony_ci return 0; 7208c2ecf20Sopenharmony_cierror: 7218c2ecf20Sopenharmony_ci /*Upon error, free only if we allocated something */ 7228c2ecf20Sopenharmony_ci if (alloc) 7238c2ecf20Sopenharmony_ci iwl_pcie_tx_free(trans); 7248c2ecf20Sopenharmony_ci return ret; 7258c2ecf20Sopenharmony_ci} 7268c2ecf20Sopenharmony_ci 7278c2ecf20Sopenharmony_cistatic inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 7288c2ecf20Sopenharmony_ci{ 7298c2ecf20Sopenharmony_ci lockdep_assert_held(&txq->lock); 7308c2ecf20Sopenharmony_ci 7318c2ecf20Sopenharmony_ci if (!txq->wd_timeout) 7328c2ecf20Sopenharmony_ci return; 7338c2ecf20Sopenharmony_ci 7348c2ecf20Sopenharmony_ci /* 7358c2ecf20Sopenharmony_ci * station is asleep and we send data - that must 7368c2ecf20Sopenharmony_ci * be uAPSD or PS-Poll. Don't rearm the timer. 7378c2ecf20Sopenharmony_ci */ 7388c2ecf20Sopenharmony_ci if (txq->frozen) 7398c2ecf20Sopenharmony_ci return; 7408c2ecf20Sopenharmony_ci 7418c2ecf20Sopenharmony_ci /* 7428c2ecf20Sopenharmony_ci * if empty delete timer, otherwise move timer forward 7438c2ecf20Sopenharmony_ci * since we're making progress on this queue 7448c2ecf20Sopenharmony_ci */ 7458c2ecf20Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) 7468c2ecf20Sopenharmony_ci del_timer(&txq->stuck_timer); 7478c2ecf20Sopenharmony_ci else 7488c2ecf20Sopenharmony_ci mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 7498c2ecf20Sopenharmony_ci} 7508c2ecf20Sopenharmony_ci 7518c2ecf20Sopenharmony_ci/* Frees buffers until index _not_ inclusive */ 7528c2ecf20Sopenharmony_civoid iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 7538c2ecf20Sopenharmony_ci struct sk_buff_head *skbs) 7548c2ecf20Sopenharmony_ci{ 7558c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 7568c2ecf20Sopenharmony_ci int tfd_num = iwl_txq_get_cmd_index(txq, ssn); 7578c2ecf20Sopenharmony_ci int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); 7588c2ecf20Sopenharmony_ci int last_to_free; 7598c2ecf20Sopenharmony_ci 7608c2ecf20Sopenharmony_ci /* This function is not meant to release cmd queue*/ 7618c2ecf20Sopenharmony_ci if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) 7628c2ecf20Sopenharmony_ci return; 7638c2ecf20Sopenharmony_ci 7648c2ecf20Sopenharmony_ci spin_lock_bh(&txq->lock); 7658c2ecf20Sopenharmony_ci 7668c2ecf20Sopenharmony_ci if (!test_bit(txq_id, trans->txqs.queue_used)) { 7678c2ecf20Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 7688c2ecf20Sopenharmony_ci txq_id, ssn); 7698c2ecf20Sopenharmony_ci goto out; 7708c2ecf20Sopenharmony_ci } 7718c2ecf20Sopenharmony_ci 7728c2ecf20Sopenharmony_ci if (read_ptr == tfd_num) 7738c2ecf20Sopenharmony_ci goto out; 7748c2ecf20Sopenharmony_ci 7758c2ecf20Sopenharmony_ci IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 7768c2ecf20Sopenharmony_ci txq_id, txq->read_ptr, tfd_num, ssn); 7778c2ecf20Sopenharmony_ci 7788c2ecf20Sopenharmony_ci /*Since we free until index _not_ inclusive, the one before index is 7798c2ecf20Sopenharmony_ci * the last we will free. This one must be used */ 7808c2ecf20Sopenharmony_ci last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 7818c2ecf20Sopenharmony_ci 7828c2ecf20Sopenharmony_ci if (!iwl_txq_used(txq, last_to_free)) { 7838c2ecf20Sopenharmony_ci IWL_ERR(trans, 7848c2ecf20Sopenharmony_ci "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 7858c2ecf20Sopenharmony_ci __func__, txq_id, last_to_free, 7868c2ecf20Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size, 7878c2ecf20Sopenharmony_ci txq->write_ptr, txq->read_ptr); 7888c2ecf20Sopenharmony_ci goto out; 7898c2ecf20Sopenharmony_ci } 7908c2ecf20Sopenharmony_ci 7918c2ecf20Sopenharmony_ci if (WARN_ON(!skb_queue_empty(skbs))) 7928c2ecf20Sopenharmony_ci goto out; 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_ci for (; 7958c2ecf20Sopenharmony_ci read_ptr != tfd_num; 7968c2ecf20Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), 7978c2ecf20Sopenharmony_ci read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { 7988c2ecf20Sopenharmony_ci struct sk_buff *skb = txq->entries[read_ptr].skb; 7998c2ecf20Sopenharmony_ci 8008c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(!skb)) 8018c2ecf20Sopenharmony_ci continue; 8028c2ecf20Sopenharmony_ci 8038c2ecf20Sopenharmony_ci iwl_txq_free_tso_page(trans, skb); 8048c2ecf20Sopenharmony_ci 8058c2ecf20Sopenharmony_ci __skb_queue_tail(skbs, skb); 8068c2ecf20Sopenharmony_ci 8078c2ecf20Sopenharmony_ci txq->entries[read_ptr].skb = NULL; 8088c2ecf20Sopenharmony_ci 8098c2ecf20Sopenharmony_ci if (!trans->trans_cfg->use_tfh) 8108c2ecf20Sopenharmony_ci iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); 8118c2ecf20Sopenharmony_ci 8128c2ecf20Sopenharmony_ci iwl_pcie_txq_free_tfd(trans, txq); 8138c2ecf20Sopenharmony_ci } 8148c2ecf20Sopenharmony_ci 8158c2ecf20Sopenharmony_ci iwl_pcie_txq_progress(txq); 8168c2ecf20Sopenharmony_ci 8178c2ecf20Sopenharmony_ci if (iwl_txq_space(trans, txq) > txq->low_mark && 8188c2ecf20Sopenharmony_ci test_bit(txq_id, trans->txqs.queue_stopped)) { 8198c2ecf20Sopenharmony_ci struct sk_buff_head overflow_skbs; 8208c2ecf20Sopenharmony_ci 8218c2ecf20Sopenharmony_ci __skb_queue_head_init(&overflow_skbs); 8228c2ecf20Sopenharmony_ci skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); 8238c2ecf20Sopenharmony_ci 8248c2ecf20Sopenharmony_ci /* 8258c2ecf20Sopenharmony_ci * We are going to transmit from the overflow queue. 8268c2ecf20Sopenharmony_ci * Remember this state so that wait_for_txq_empty will know we 8278c2ecf20Sopenharmony_ci * are adding more packets to the TFD queue. It cannot rely on 8288c2ecf20Sopenharmony_ci * the state of &txq->overflow_q, as we just emptied it, but 8298c2ecf20Sopenharmony_ci * haven't TXed the content yet. 8308c2ecf20Sopenharmony_ci */ 8318c2ecf20Sopenharmony_ci txq->overflow_tx = true; 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_ci /* 8348c2ecf20Sopenharmony_ci * This is tricky: we are in reclaim path which is non 8358c2ecf20Sopenharmony_ci * re-entrant, so noone will try to take the access the 8368c2ecf20Sopenharmony_ci * txq data from that path. We stopped tx, so we can't 8378c2ecf20Sopenharmony_ci * have tx as well. Bottom line, we can unlock and re-lock 8388c2ecf20Sopenharmony_ci * later. 8398c2ecf20Sopenharmony_ci */ 8408c2ecf20Sopenharmony_ci spin_unlock_bh(&txq->lock); 8418c2ecf20Sopenharmony_ci 8428c2ecf20Sopenharmony_ci while (!skb_queue_empty(&overflow_skbs)) { 8438c2ecf20Sopenharmony_ci struct sk_buff *skb = __skb_dequeue(&overflow_skbs); 8448c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd_ptr; 8458c2ecf20Sopenharmony_ci 8468c2ecf20Sopenharmony_ci dev_cmd_ptr = *(void **)((u8 *)skb->cb + 8478c2ecf20Sopenharmony_ci trans->txqs.dev_cmd_offs); 8488c2ecf20Sopenharmony_ci 8498c2ecf20Sopenharmony_ci /* 8508c2ecf20Sopenharmony_ci * Note that we can very well be overflowing again. 8518c2ecf20Sopenharmony_ci * In that case, iwl_txq_space will be small again 8528c2ecf20Sopenharmony_ci * and we won't wake mac80211's queue. 8538c2ecf20Sopenharmony_ci */ 8548c2ecf20Sopenharmony_ci iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 8558c2ecf20Sopenharmony_ci } 8568c2ecf20Sopenharmony_ci 8578c2ecf20Sopenharmony_ci if (iwl_txq_space(trans, txq) > txq->low_mark) 8588c2ecf20Sopenharmony_ci iwl_wake_queue(trans, txq); 8598c2ecf20Sopenharmony_ci 8608c2ecf20Sopenharmony_ci spin_lock_bh(&txq->lock); 8618c2ecf20Sopenharmony_ci txq->overflow_tx = false; 8628c2ecf20Sopenharmony_ci } 8638c2ecf20Sopenharmony_ci 8648c2ecf20Sopenharmony_ciout: 8658c2ecf20Sopenharmony_ci spin_unlock_bh(&txq->lock); 8668c2ecf20Sopenharmony_ci} 8678c2ecf20Sopenharmony_ci 8688c2ecf20Sopenharmony_ci/* Set wr_ptr of specific device and txq */ 8698c2ecf20Sopenharmony_civoid iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 8708c2ecf20Sopenharmony_ci{ 8718c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 8728c2ecf20Sopenharmony_ci 8738c2ecf20Sopenharmony_ci spin_lock_bh(&txq->lock); 8748c2ecf20Sopenharmony_ci 8758c2ecf20Sopenharmony_ci txq->write_ptr = ptr; 8768c2ecf20Sopenharmony_ci txq->read_ptr = txq->write_ptr; 8778c2ecf20Sopenharmony_ci 8788c2ecf20Sopenharmony_ci spin_unlock_bh(&txq->lock); 8798c2ecf20Sopenharmony_ci} 8808c2ecf20Sopenharmony_ci 8818c2ecf20Sopenharmony_cistatic int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 8828c2ecf20Sopenharmony_ci const struct iwl_host_cmd *cmd) 8838c2ecf20Sopenharmony_ci{ 8848c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 8858c2ecf20Sopenharmony_ci int ret; 8868c2ecf20Sopenharmony_ci 8878c2ecf20Sopenharmony_ci lockdep_assert_held(&trans_pcie->reg_lock); 8888c2ecf20Sopenharmony_ci 8898c2ecf20Sopenharmony_ci /* Make sure the NIC is still alive in the bus */ 8908c2ecf20Sopenharmony_ci if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 8918c2ecf20Sopenharmony_ci return -ENODEV; 8928c2ecf20Sopenharmony_ci 8938c2ecf20Sopenharmony_ci /* 8948c2ecf20Sopenharmony_ci * wake up the NIC to make sure that the firmware will see the host 8958c2ecf20Sopenharmony_ci * command - we will let the NIC sleep once all the host commands 8968c2ecf20Sopenharmony_ci * returned. This needs to be done only on NICs that have 8978c2ecf20Sopenharmony_ci * apmg_wake_up_wa set. 8988c2ecf20Sopenharmony_ci */ 8998c2ecf20Sopenharmony_ci if (trans->trans_cfg->base_params->apmg_wake_up_wa && 9008c2ecf20Sopenharmony_ci !trans_pcie->cmd_hold_nic_awake) { 9018c2ecf20Sopenharmony_ci __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 9028c2ecf20Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 9038c2ecf20Sopenharmony_ci 9048c2ecf20Sopenharmony_ci ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 9058c2ecf20Sopenharmony_ci CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 9068c2ecf20Sopenharmony_ci (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 9078c2ecf20Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 9088c2ecf20Sopenharmony_ci 15000); 9098c2ecf20Sopenharmony_ci if (ret < 0) { 9108c2ecf20Sopenharmony_ci __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 9118c2ecf20Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 9128c2ecf20Sopenharmony_ci IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 9138c2ecf20Sopenharmony_ci return -EIO; 9148c2ecf20Sopenharmony_ci } 9158c2ecf20Sopenharmony_ci trans_pcie->cmd_hold_nic_awake = true; 9168c2ecf20Sopenharmony_ci } 9178c2ecf20Sopenharmony_ci 9188c2ecf20Sopenharmony_ci return 0; 9198c2ecf20Sopenharmony_ci} 9208c2ecf20Sopenharmony_ci 9218c2ecf20Sopenharmony_ci/* 9228c2ecf20Sopenharmony_ci * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 9238c2ecf20Sopenharmony_ci * 9248c2ecf20Sopenharmony_ci * When FW advances 'R' index, all entries between old and new 'R' index 9258c2ecf20Sopenharmony_ci * need to be reclaimed. As result, some free space forms. If there is 9268c2ecf20Sopenharmony_ci * enough free space (> low mark), wake the stack that feeds us. 9278c2ecf20Sopenharmony_ci */ 9288c2ecf20Sopenharmony_cistatic void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 9298c2ecf20Sopenharmony_ci{ 9308c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 9318c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 9328c2ecf20Sopenharmony_ci int nfreed = 0; 9338c2ecf20Sopenharmony_ci u16 r; 9348c2ecf20Sopenharmony_ci 9358c2ecf20Sopenharmony_ci lockdep_assert_held(&txq->lock); 9368c2ecf20Sopenharmony_ci 9378c2ecf20Sopenharmony_ci idx = iwl_txq_get_cmd_index(txq, idx); 9388c2ecf20Sopenharmony_ci r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 9398c2ecf20Sopenharmony_ci 9408c2ecf20Sopenharmony_ci if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 9418c2ecf20Sopenharmony_ci (!iwl_txq_used(txq, idx))) { 9428c2ecf20Sopenharmony_ci WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 9438c2ecf20Sopenharmony_ci "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 9448c2ecf20Sopenharmony_ci __func__, txq_id, idx, 9458c2ecf20Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size, 9468c2ecf20Sopenharmony_ci txq->write_ptr, txq->read_ptr); 9478c2ecf20Sopenharmony_ci return; 9488c2ecf20Sopenharmony_ci } 9498c2ecf20Sopenharmony_ci 9508c2ecf20Sopenharmony_ci for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 9518c2ecf20Sopenharmony_ci r = iwl_txq_inc_wrap(trans, r)) { 9528c2ecf20Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 9538c2ecf20Sopenharmony_ci 9548c2ecf20Sopenharmony_ci if (nfreed++ > 0) { 9558c2ecf20Sopenharmony_ci IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 9568c2ecf20Sopenharmony_ci idx, txq->write_ptr, r); 9578c2ecf20Sopenharmony_ci iwl_force_nmi(trans); 9588c2ecf20Sopenharmony_ci } 9598c2ecf20Sopenharmony_ci } 9608c2ecf20Sopenharmony_ci 9618c2ecf20Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) { 9628c2ecf20Sopenharmony_ci /* BHs are also disabled due to txq->lock */ 9638c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->reg_lock); 9648c2ecf20Sopenharmony_ci iwl_pcie_clear_cmd_in_flight(trans); 9658c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->reg_lock); 9668c2ecf20Sopenharmony_ci } 9678c2ecf20Sopenharmony_ci 9688c2ecf20Sopenharmony_ci iwl_pcie_txq_progress(txq); 9698c2ecf20Sopenharmony_ci} 9708c2ecf20Sopenharmony_ci 9718c2ecf20Sopenharmony_cistatic int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 9728c2ecf20Sopenharmony_ci u16 txq_id) 9738c2ecf20Sopenharmony_ci{ 9748c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 9758c2ecf20Sopenharmony_ci u32 tbl_dw_addr; 9768c2ecf20Sopenharmony_ci u32 tbl_dw; 9778c2ecf20Sopenharmony_ci u16 scd_q2ratid; 9788c2ecf20Sopenharmony_ci 9798c2ecf20Sopenharmony_ci scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 9808c2ecf20Sopenharmony_ci 9818c2ecf20Sopenharmony_ci tbl_dw_addr = trans_pcie->scd_base_addr + 9828c2ecf20Sopenharmony_ci SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 9838c2ecf20Sopenharmony_ci 9848c2ecf20Sopenharmony_ci tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 9858c2ecf20Sopenharmony_ci 9868c2ecf20Sopenharmony_ci if (txq_id & 0x1) 9878c2ecf20Sopenharmony_ci tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 9888c2ecf20Sopenharmony_ci else 9898c2ecf20Sopenharmony_ci tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 9908c2ecf20Sopenharmony_ci 9918c2ecf20Sopenharmony_ci iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 9928c2ecf20Sopenharmony_ci 9938c2ecf20Sopenharmony_ci return 0; 9948c2ecf20Sopenharmony_ci} 9958c2ecf20Sopenharmony_ci 9968c2ecf20Sopenharmony_ci/* Receiver address (actually, Rx station's index into station table), 9978c2ecf20Sopenharmony_ci * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 9988c2ecf20Sopenharmony_ci#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 9998c2ecf20Sopenharmony_ci 10008c2ecf20Sopenharmony_cibool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 10018c2ecf20Sopenharmony_ci const struct iwl_trans_txq_scd_cfg *cfg, 10028c2ecf20Sopenharmony_ci unsigned int wdg_timeout) 10038c2ecf20Sopenharmony_ci{ 10048c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 10058c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 10068c2ecf20Sopenharmony_ci int fifo = -1; 10078c2ecf20Sopenharmony_ci bool scd_bug = false; 10088c2ecf20Sopenharmony_ci 10098c2ecf20Sopenharmony_ci if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 10108c2ecf20Sopenharmony_ci WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 10118c2ecf20Sopenharmony_ci 10128c2ecf20Sopenharmony_ci txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 10138c2ecf20Sopenharmony_ci 10148c2ecf20Sopenharmony_ci if (cfg) { 10158c2ecf20Sopenharmony_ci fifo = cfg->fifo; 10168c2ecf20Sopenharmony_ci 10178c2ecf20Sopenharmony_ci /* Disable the scheduler prior configuring the cmd queue */ 10188c2ecf20Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id && 10198c2ecf20Sopenharmony_ci trans_pcie->scd_set_active) 10208c2ecf20Sopenharmony_ci iwl_scd_enable_set_active(trans, 0); 10218c2ecf20Sopenharmony_ci 10228c2ecf20Sopenharmony_ci /* Stop this Tx queue before configuring it */ 10238c2ecf20Sopenharmony_ci iwl_scd_txq_set_inactive(trans, txq_id); 10248c2ecf20Sopenharmony_ci 10258c2ecf20Sopenharmony_ci /* Set this queue as a chain-building queue unless it is CMD */ 10268c2ecf20Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) 10278c2ecf20Sopenharmony_ci iwl_scd_txq_set_chain(trans, txq_id); 10288c2ecf20Sopenharmony_ci 10298c2ecf20Sopenharmony_ci if (cfg->aggregate) { 10308c2ecf20Sopenharmony_ci u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 10318c2ecf20Sopenharmony_ci 10328c2ecf20Sopenharmony_ci /* Map receiver-address / traffic-ID to this queue */ 10338c2ecf20Sopenharmony_ci iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 10348c2ecf20Sopenharmony_ci 10358c2ecf20Sopenharmony_ci /* enable aggregations for the queue */ 10368c2ecf20Sopenharmony_ci iwl_scd_txq_enable_agg(trans, txq_id); 10378c2ecf20Sopenharmony_ci txq->ampdu = true; 10388c2ecf20Sopenharmony_ci } else { 10398c2ecf20Sopenharmony_ci /* 10408c2ecf20Sopenharmony_ci * disable aggregations for the queue, this will also 10418c2ecf20Sopenharmony_ci * make the ra_tid mapping configuration irrelevant 10428c2ecf20Sopenharmony_ci * since it is now a non-AGG queue. 10438c2ecf20Sopenharmony_ci */ 10448c2ecf20Sopenharmony_ci iwl_scd_txq_disable_agg(trans, txq_id); 10458c2ecf20Sopenharmony_ci 10468c2ecf20Sopenharmony_ci ssn = txq->read_ptr; 10478c2ecf20Sopenharmony_ci } 10488c2ecf20Sopenharmony_ci } else { 10498c2ecf20Sopenharmony_ci /* 10508c2ecf20Sopenharmony_ci * If we need to move the SCD write pointer by steps of 10518c2ecf20Sopenharmony_ci * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 10528c2ecf20Sopenharmony_ci * the op_mode know by returning true later. 10538c2ecf20Sopenharmony_ci * Do this only in case cfg is NULL since this trick can 10548c2ecf20Sopenharmony_ci * be done only if we have DQA enabled which is true for mvm 10558c2ecf20Sopenharmony_ci * only. And mvm never sets a cfg pointer. 10568c2ecf20Sopenharmony_ci * This is really ugly, but this is the easiest way out for 10578c2ecf20Sopenharmony_ci * this sad hardware issue. 10588c2ecf20Sopenharmony_ci * This bug has been fixed on devices 9000 and up. 10598c2ecf20Sopenharmony_ci */ 10608c2ecf20Sopenharmony_ci scd_bug = !trans->trans_cfg->mq_rx_supported && 10618c2ecf20Sopenharmony_ci !((ssn - txq->write_ptr) & 0x3f) && 10628c2ecf20Sopenharmony_ci (ssn != txq->write_ptr); 10638c2ecf20Sopenharmony_ci if (scd_bug) 10648c2ecf20Sopenharmony_ci ssn++; 10658c2ecf20Sopenharmony_ci } 10668c2ecf20Sopenharmony_ci 10678c2ecf20Sopenharmony_ci /* Place first TFD at index corresponding to start sequence number. 10688c2ecf20Sopenharmony_ci * Assumes that ssn_idx is valid (!= 0xFFF) */ 10698c2ecf20Sopenharmony_ci txq->read_ptr = (ssn & 0xff); 10708c2ecf20Sopenharmony_ci txq->write_ptr = (ssn & 0xff); 10718c2ecf20Sopenharmony_ci iwl_write_direct32(trans, HBUS_TARG_WRPTR, 10728c2ecf20Sopenharmony_ci (ssn & 0xff) | (txq_id << 8)); 10738c2ecf20Sopenharmony_ci 10748c2ecf20Sopenharmony_ci if (cfg) { 10758c2ecf20Sopenharmony_ci u8 frame_limit = cfg->frame_limit; 10768c2ecf20Sopenharmony_ci 10778c2ecf20Sopenharmony_ci iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 10788c2ecf20Sopenharmony_ci 10798c2ecf20Sopenharmony_ci /* Set up Tx window size and frame limit for this queue */ 10808c2ecf20Sopenharmony_ci iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 10818c2ecf20Sopenharmony_ci SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 10828c2ecf20Sopenharmony_ci iwl_trans_write_mem32(trans, 10838c2ecf20Sopenharmony_ci trans_pcie->scd_base_addr + 10848c2ecf20Sopenharmony_ci SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 10858c2ecf20Sopenharmony_ci SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 10868c2ecf20Sopenharmony_ci SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 10878c2ecf20Sopenharmony_ci 10888c2ecf20Sopenharmony_ci /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 10898c2ecf20Sopenharmony_ci iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 10908c2ecf20Sopenharmony_ci (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 10918c2ecf20Sopenharmony_ci (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 10928c2ecf20Sopenharmony_ci (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 10938c2ecf20Sopenharmony_ci SCD_QUEUE_STTS_REG_MSK); 10948c2ecf20Sopenharmony_ci 10958c2ecf20Sopenharmony_ci /* enable the scheduler for this queue (only) */ 10968c2ecf20Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id && 10978c2ecf20Sopenharmony_ci trans_pcie->scd_set_active) 10988c2ecf20Sopenharmony_ci iwl_scd_enable_set_active(trans, BIT(txq_id)); 10998c2ecf20Sopenharmony_ci 11008c2ecf20Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, 11018c2ecf20Sopenharmony_ci "Activate queue %d on FIFO %d WrPtr: %d\n", 11028c2ecf20Sopenharmony_ci txq_id, fifo, ssn & 0xff); 11038c2ecf20Sopenharmony_ci } else { 11048c2ecf20Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, 11058c2ecf20Sopenharmony_ci "Activate queue %d WrPtr: %d\n", 11068c2ecf20Sopenharmony_ci txq_id, ssn & 0xff); 11078c2ecf20Sopenharmony_ci } 11088c2ecf20Sopenharmony_ci 11098c2ecf20Sopenharmony_ci return scd_bug; 11108c2ecf20Sopenharmony_ci} 11118c2ecf20Sopenharmony_ci 11128c2ecf20Sopenharmony_civoid iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 11138c2ecf20Sopenharmony_ci bool shared_mode) 11148c2ecf20Sopenharmony_ci{ 11158c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 11168c2ecf20Sopenharmony_ci 11178c2ecf20Sopenharmony_ci txq->ampdu = !shared_mode; 11188c2ecf20Sopenharmony_ci} 11198c2ecf20Sopenharmony_ci 11208c2ecf20Sopenharmony_civoid iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 11218c2ecf20Sopenharmony_ci bool configure_scd) 11228c2ecf20Sopenharmony_ci{ 11238c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11248c2ecf20Sopenharmony_ci u32 stts_addr = trans_pcie->scd_base_addr + 11258c2ecf20Sopenharmony_ci SCD_TX_STTS_QUEUE_OFFSET(txq_id); 11268c2ecf20Sopenharmony_ci static const u32 zero_val[4] = {}; 11278c2ecf20Sopenharmony_ci 11288c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 11298c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id]->frozen = false; 11308c2ecf20Sopenharmony_ci 11318c2ecf20Sopenharmony_ci /* 11328c2ecf20Sopenharmony_ci * Upon HW Rfkill - we stop the device, and then stop the queues 11338c2ecf20Sopenharmony_ci * in the op_mode. Just for the sake of the simplicity of the op_mode, 11348c2ecf20Sopenharmony_ci * allow the op_mode to call txq_disable after it already called 11358c2ecf20Sopenharmony_ci * stop_device. 11368c2ecf20Sopenharmony_ci */ 11378c2ecf20Sopenharmony_ci if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 11388c2ecf20Sopenharmony_ci WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 11398c2ecf20Sopenharmony_ci "queue %d not used", txq_id); 11408c2ecf20Sopenharmony_ci return; 11418c2ecf20Sopenharmony_ci } 11428c2ecf20Sopenharmony_ci 11438c2ecf20Sopenharmony_ci if (configure_scd) { 11448c2ecf20Sopenharmony_ci iwl_scd_txq_set_inactive(trans, txq_id); 11458c2ecf20Sopenharmony_ci 11468c2ecf20Sopenharmony_ci iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 11478c2ecf20Sopenharmony_ci ARRAY_SIZE(zero_val)); 11488c2ecf20Sopenharmony_ci } 11498c2ecf20Sopenharmony_ci 11508c2ecf20Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 11518c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id]->ampdu = false; 11528c2ecf20Sopenharmony_ci 11538c2ecf20Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 11548c2ecf20Sopenharmony_ci} 11558c2ecf20Sopenharmony_ci 11568c2ecf20Sopenharmony_ci/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 11578c2ecf20Sopenharmony_ci 11588c2ecf20Sopenharmony_ci/* 11598c2ecf20Sopenharmony_ci * iwl_pcie_enqueue_hcmd - enqueue a uCode command 11608c2ecf20Sopenharmony_ci * @priv: device private data point 11618c2ecf20Sopenharmony_ci * @cmd: a pointer to the ucode command structure 11628c2ecf20Sopenharmony_ci * 11638c2ecf20Sopenharmony_ci * The function returns < 0 values to indicate the operation 11648c2ecf20Sopenharmony_ci * failed. On success, it returns the index (>= 0) of command in the 11658c2ecf20Sopenharmony_ci * command queue. 11668c2ecf20Sopenharmony_ci */ 11678c2ecf20Sopenharmony_cistatic int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 11688c2ecf20Sopenharmony_ci struct iwl_host_cmd *cmd) 11698c2ecf20Sopenharmony_ci{ 11708c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 11718c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 11728c2ecf20Sopenharmony_ci struct iwl_device_cmd *out_cmd; 11738c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta; 11748c2ecf20Sopenharmony_ci void *dup_buf = NULL; 11758c2ecf20Sopenharmony_ci dma_addr_t phys_addr; 11768c2ecf20Sopenharmony_ci int idx; 11778c2ecf20Sopenharmony_ci u16 copy_size, cmd_size, tb0_size; 11788c2ecf20Sopenharmony_ci bool had_nocopy = false; 11798c2ecf20Sopenharmony_ci u8 group_id = iwl_cmd_groupid(cmd->id); 11808c2ecf20Sopenharmony_ci int i, ret; 11818c2ecf20Sopenharmony_ci u32 cmd_pos; 11828c2ecf20Sopenharmony_ci const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 11838c2ecf20Sopenharmony_ci u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 11848c2ecf20Sopenharmony_ci unsigned long flags; 11858c2ecf20Sopenharmony_ci 11868c2ecf20Sopenharmony_ci if (WARN(!trans->wide_cmd_header && 11878c2ecf20Sopenharmony_ci group_id > IWL_ALWAYS_LONG_GROUP, 11888c2ecf20Sopenharmony_ci "unsupported wide command %#x\n", cmd->id)) 11898c2ecf20Sopenharmony_ci return -EINVAL; 11908c2ecf20Sopenharmony_ci 11918c2ecf20Sopenharmony_ci if (group_id != 0) { 11928c2ecf20Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header_wide); 11938c2ecf20Sopenharmony_ci cmd_size = sizeof(struct iwl_cmd_header_wide); 11948c2ecf20Sopenharmony_ci } else { 11958c2ecf20Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header); 11968c2ecf20Sopenharmony_ci cmd_size = sizeof(struct iwl_cmd_header); 11978c2ecf20Sopenharmony_ci } 11988c2ecf20Sopenharmony_ci 11998c2ecf20Sopenharmony_ci /* need one for the header if the first is NOCOPY */ 12008c2ecf20Sopenharmony_ci BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 12018c2ecf20Sopenharmony_ci 12028c2ecf20Sopenharmony_ci for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 12038c2ecf20Sopenharmony_ci cmddata[i] = cmd->data[i]; 12048c2ecf20Sopenharmony_ci cmdlen[i] = cmd->len[i]; 12058c2ecf20Sopenharmony_ci 12068c2ecf20Sopenharmony_ci if (!cmd->len[i]) 12078c2ecf20Sopenharmony_ci continue; 12088c2ecf20Sopenharmony_ci 12098c2ecf20Sopenharmony_ci /* need at least IWL_FIRST_TB_SIZE copied */ 12108c2ecf20Sopenharmony_ci if (copy_size < IWL_FIRST_TB_SIZE) { 12118c2ecf20Sopenharmony_ci int copy = IWL_FIRST_TB_SIZE - copy_size; 12128c2ecf20Sopenharmony_ci 12138c2ecf20Sopenharmony_ci if (copy > cmdlen[i]) 12148c2ecf20Sopenharmony_ci copy = cmdlen[i]; 12158c2ecf20Sopenharmony_ci cmdlen[i] -= copy; 12168c2ecf20Sopenharmony_ci cmddata[i] += copy; 12178c2ecf20Sopenharmony_ci copy_size += copy; 12188c2ecf20Sopenharmony_ci } 12198c2ecf20Sopenharmony_ci 12208c2ecf20Sopenharmony_ci if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 12218c2ecf20Sopenharmony_ci had_nocopy = true; 12228c2ecf20Sopenharmony_ci if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 12238c2ecf20Sopenharmony_ci idx = -EINVAL; 12248c2ecf20Sopenharmony_ci goto free_dup_buf; 12258c2ecf20Sopenharmony_ci } 12268c2ecf20Sopenharmony_ci } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 12278c2ecf20Sopenharmony_ci /* 12288c2ecf20Sopenharmony_ci * This is also a chunk that isn't copied 12298c2ecf20Sopenharmony_ci * to the static buffer so set had_nocopy. 12308c2ecf20Sopenharmony_ci */ 12318c2ecf20Sopenharmony_ci had_nocopy = true; 12328c2ecf20Sopenharmony_ci 12338c2ecf20Sopenharmony_ci /* only allowed once */ 12348c2ecf20Sopenharmony_ci if (WARN_ON(dup_buf)) { 12358c2ecf20Sopenharmony_ci idx = -EINVAL; 12368c2ecf20Sopenharmony_ci goto free_dup_buf; 12378c2ecf20Sopenharmony_ci } 12388c2ecf20Sopenharmony_ci 12398c2ecf20Sopenharmony_ci dup_buf = kmemdup(cmddata[i], cmdlen[i], 12408c2ecf20Sopenharmony_ci GFP_ATOMIC); 12418c2ecf20Sopenharmony_ci if (!dup_buf) 12428c2ecf20Sopenharmony_ci return -ENOMEM; 12438c2ecf20Sopenharmony_ci } else { 12448c2ecf20Sopenharmony_ci /* NOCOPY must not be followed by normal! */ 12458c2ecf20Sopenharmony_ci if (WARN_ON(had_nocopy)) { 12468c2ecf20Sopenharmony_ci idx = -EINVAL; 12478c2ecf20Sopenharmony_ci goto free_dup_buf; 12488c2ecf20Sopenharmony_ci } 12498c2ecf20Sopenharmony_ci copy_size += cmdlen[i]; 12508c2ecf20Sopenharmony_ci } 12518c2ecf20Sopenharmony_ci cmd_size += cmd->len[i]; 12528c2ecf20Sopenharmony_ci } 12538c2ecf20Sopenharmony_ci 12548c2ecf20Sopenharmony_ci /* 12558c2ecf20Sopenharmony_ci * If any of the command structures end up being larger than 12568c2ecf20Sopenharmony_ci * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 12578c2ecf20Sopenharmony_ci * allocated into separate TFDs, then we will need to 12588c2ecf20Sopenharmony_ci * increase the size of the buffers. 12598c2ecf20Sopenharmony_ci */ 12608c2ecf20Sopenharmony_ci if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 12618c2ecf20Sopenharmony_ci "Command %s (%#x) is too large (%d bytes)\n", 12628c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), 12638c2ecf20Sopenharmony_ci cmd->id, copy_size)) { 12648c2ecf20Sopenharmony_ci idx = -EINVAL; 12658c2ecf20Sopenharmony_ci goto free_dup_buf; 12668c2ecf20Sopenharmony_ci } 12678c2ecf20Sopenharmony_ci 12688c2ecf20Sopenharmony_ci spin_lock_irqsave(&txq->lock, flags); 12698c2ecf20Sopenharmony_ci 12708c2ecf20Sopenharmony_ci if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 12718c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&txq->lock, flags); 12728c2ecf20Sopenharmony_ci 12738c2ecf20Sopenharmony_ci IWL_ERR(trans, "No space in command queue\n"); 12748c2ecf20Sopenharmony_ci iwl_op_mode_cmd_queue_full(trans->op_mode); 12758c2ecf20Sopenharmony_ci idx = -ENOSPC; 12768c2ecf20Sopenharmony_ci goto free_dup_buf; 12778c2ecf20Sopenharmony_ci } 12788c2ecf20Sopenharmony_ci 12798c2ecf20Sopenharmony_ci idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 12808c2ecf20Sopenharmony_ci out_cmd = txq->entries[idx].cmd; 12818c2ecf20Sopenharmony_ci out_meta = &txq->entries[idx].meta; 12828c2ecf20Sopenharmony_ci 12838c2ecf20Sopenharmony_ci memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 12848c2ecf20Sopenharmony_ci if (cmd->flags & CMD_WANT_SKB) 12858c2ecf20Sopenharmony_ci out_meta->source = cmd; 12868c2ecf20Sopenharmony_ci 12878c2ecf20Sopenharmony_ci /* set up the header */ 12888c2ecf20Sopenharmony_ci if (group_id != 0) { 12898c2ecf20Sopenharmony_ci out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 12908c2ecf20Sopenharmony_ci out_cmd->hdr_wide.group_id = group_id; 12918c2ecf20Sopenharmony_ci out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 12928c2ecf20Sopenharmony_ci out_cmd->hdr_wide.length = 12938c2ecf20Sopenharmony_ci cpu_to_le16(cmd_size - 12948c2ecf20Sopenharmony_ci sizeof(struct iwl_cmd_header_wide)); 12958c2ecf20Sopenharmony_ci out_cmd->hdr_wide.reserved = 0; 12968c2ecf20Sopenharmony_ci out_cmd->hdr_wide.sequence = 12978c2ecf20Sopenharmony_ci cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 12988c2ecf20Sopenharmony_ci INDEX_TO_SEQ(txq->write_ptr)); 12998c2ecf20Sopenharmony_ci 13008c2ecf20Sopenharmony_ci cmd_pos = sizeof(struct iwl_cmd_header_wide); 13018c2ecf20Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header_wide); 13028c2ecf20Sopenharmony_ci } else { 13038c2ecf20Sopenharmony_ci out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 13048c2ecf20Sopenharmony_ci out_cmd->hdr.sequence = 13058c2ecf20Sopenharmony_ci cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 13068c2ecf20Sopenharmony_ci INDEX_TO_SEQ(txq->write_ptr)); 13078c2ecf20Sopenharmony_ci out_cmd->hdr.group_id = 0; 13088c2ecf20Sopenharmony_ci 13098c2ecf20Sopenharmony_ci cmd_pos = sizeof(struct iwl_cmd_header); 13108c2ecf20Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header); 13118c2ecf20Sopenharmony_ci } 13128c2ecf20Sopenharmony_ci 13138c2ecf20Sopenharmony_ci /* and copy the data that needs to be copied */ 13148c2ecf20Sopenharmony_ci for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 13158c2ecf20Sopenharmony_ci int copy; 13168c2ecf20Sopenharmony_ci 13178c2ecf20Sopenharmony_ci if (!cmd->len[i]) 13188c2ecf20Sopenharmony_ci continue; 13198c2ecf20Sopenharmony_ci 13208c2ecf20Sopenharmony_ci /* copy everything if not nocopy/dup */ 13218c2ecf20Sopenharmony_ci if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 13228c2ecf20Sopenharmony_ci IWL_HCMD_DFL_DUP))) { 13238c2ecf20Sopenharmony_ci copy = cmd->len[i]; 13248c2ecf20Sopenharmony_ci 13258c2ecf20Sopenharmony_ci memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 13268c2ecf20Sopenharmony_ci cmd_pos += copy; 13278c2ecf20Sopenharmony_ci copy_size += copy; 13288c2ecf20Sopenharmony_ci continue; 13298c2ecf20Sopenharmony_ci } 13308c2ecf20Sopenharmony_ci 13318c2ecf20Sopenharmony_ci /* 13328c2ecf20Sopenharmony_ci * Otherwise we need at least IWL_FIRST_TB_SIZE copied 13338c2ecf20Sopenharmony_ci * in total (for bi-directional DMA), but copy up to what 13348c2ecf20Sopenharmony_ci * we can fit into the payload for debug dump purposes. 13358c2ecf20Sopenharmony_ci */ 13368c2ecf20Sopenharmony_ci copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 13378c2ecf20Sopenharmony_ci 13388c2ecf20Sopenharmony_ci memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 13398c2ecf20Sopenharmony_ci cmd_pos += copy; 13408c2ecf20Sopenharmony_ci 13418c2ecf20Sopenharmony_ci /* However, treat copy_size the proper way, we need it below */ 13428c2ecf20Sopenharmony_ci if (copy_size < IWL_FIRST_TB_SIZE) { 13438c2ecf20Sopenharmony_ci copy = IWL_FIRST_TB_SIZE - copy_size; 13448c2ecf20Sopenharmony_ci 13458c2ecf20Sopenharmony_ci if (copy > cmd->len[i]) 13468c2ecf20Sopenharmony_ci copy = cmd->len[i]; 13478c2ecf20Sopenharmony_ci copy_size += copy; 13488c2ecf20Sopenharmony_ci } 13498c2ecf20Sopenharmony_ci } 13508c2ecf20Sopenharmony_ci 13518c2ecf20Sopenharmony_ci IWL_DEBUG_HC(trans, 13528c2ecf20Sopenharmony_ci "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 13538c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), 13548c2ecf20Sopenharmony_ci group_id, out_cmd->hdr.cmd, 13558c2ecf20Sopenharmony_ci le16_to_cpu(out_cmd->hdr.sequence), 13568c2ecf20Sopenharmony_ci cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 13578c2ecf20Sopenharmony_ci 13588c2ecf20Sopenharmony_ci /* start the TFD with the minimum copy bytes */ 13598c2ecf20Sopenharmony_ci tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 13608c2ecf20Sopenharmony_ci memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 13618c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, 13628c2ecf20Sopenharmony_ci iwl_txq_get_first_tb_dma(txq, idx), 13638c2ecf20Sopenharmony_ci tb0_size, true); 13648c2ecf20Sopenharmony_ci 13658c2ecf20Sopenharmony_ci /* map first command fragment, if any remains */ 13668c2ecf20Sopenharmony_ci if (copy_size > tb0_size) { 13678c2ecf20Sopenharmony_ci phys_addr = dma_map_single(trans->dev, 13688c2ecf20Sopenharmony_ci ((u8 *)&out_cmd->hdr) + tb0_size, 13698c2ecf20Sopenharmony_ci copy_size - tb0_size, 13708c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 13718c2ecf20Sopenharmony_ci if (dma_mapping_error(trans->dev, phys_addr)) { 13728c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 13738c2ecf20Sopenharmony_ci txq->write_ptr); 13748c2ecf20Sopenharmony_ci idx = -ENOMEM; 13758c2ecf20Sopenharmony_ci goto out; 13768c2ecf20Sopenharmony_ci } 13778c2ecf20Sopenharmony_ci 13788c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 13798c2ecf20Sopenharmony_ci copy_size - tb0_size, false); 13808c2ecf20Sopenharmony_ci } 13818c2ecf20Sopenharmony_ci 13828c2ecf20Sopenharmony_ci /* map the remaining (adjusted) nocopy/dup fragments */ 13838c2ecf20Sopenharmony_ci for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 13848c2ecf20Sopenharmony_ci const void *data = cmddata[i]; 13858c2ecf20Sopenharmony_ci 13868c2ecf20Sopenharmony_ci if (!cmdlen[i]) 13878c2ecf20Sopenharmony_ci continue; 13888c2ecf20Sopenharmony_ci if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 13898c2ecf20Sopenharmony_ci IWL_HCMD_DFL_DUP))) 13908c2ecf20Sopenharmony_ci continue; 13918c2ecf20Sopenharmony_ci if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 13928c2ecf20Sopenharmony_ci data = dup_buf; 13938c2ecf20Sopenharmony_ci phys_addr = dma_map_single(trans->dev, (void *)data, 13948c2ecf20Sopenharmony_ci cmdlen[i], DMA_TO_DEVICE); 13958c2ecf20Sopenharmony_ci if (dma_mapping_error(trans->dev, phys_addr)) { 13968c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 13978c2ecf20Sopenharmony_ci txq->write_ptr); 13988c2ecf20Sopenharmony_ci idx = -ENOMEM; 13998c2ecf20Sopenharmony_ci goto out; 14008c2ecf20Sopenharmony_ci } 14018c2ecf20Sopenharmony_ci 14028c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 14038c2ecf20Sopenharmony_ci } 14048c2ecf20Sopenharmony_ci 14058c2ecf20Sopenharmony_ci BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 14068c2ecf20Sopenharmony_ci out_meta->flags = cmd->flags; 14078c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 14088c2ecf20Sopenharmony_ci kfree_sensitive(txq->entries[idx].free_buf); 14098c2ecf20Sopenharmony_ci txq->entries[idx].free_buf = dup_buf; 14108c2ecf20Sopenharmony_ci 14118c2ecf20Sopenharmony_ci trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 14128c2ecf20Sopenharmony_ci 14138c2ecf20Sopenharmony_ci /* start timer if queue currently empty */ 14148c2ecf20Sopenharmony_ci if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 14158c2ecf20Sopenharmony_ci mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 14168c2ecf20Sopenharmony_ci 14178c2ecf20Sopenharmony_ci spin_lock(&trans_pcie->reg_lock); 14188c2ecf20Sopenharmony_ci ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 14198c2ecf20Sopenharmony_ci if (ret < 0) { 14208c2ecf20Sopenharmony_ci idx = ret; 14218c2ecf20Sopenharmony_ci goto unlock_reg; 14228c2ecf20Sopenharmony_ci } 14238c2ecf20Sopenharmony_ci 14248c2ecf20Sopenharmony_ci /* Increment and update queue's write index */ 14258c2ecf20Sopenharmony_ci txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 14268c2ecf20Sopenharmony_ci iwl_pcie_txq_inc_wr_ptr(trans, txq); 14278c2ecf20Sopenharmony_ci 14288c2ecf20Sopenharmony_ci unlock_reg: 14298c2ecf20Sopenharmony_ci spin_unlock(&trans_pcie->reg_lock); 14308c2ecf20Sopenharmony_ci out: 14318c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&txq->lock, flags); 14328c2ecf20Sopenharmony_ci free_dup_buf: 14338c2ecf20Sopenharmony_ci if (idx < 0) 14348c2ecf20Sopenharmony_ci kfree(dup_buf); 14358c2ecf20Sopenharmony_ci return idx; 14368c2ecf20Sopenharmony_ci} 14378c2ecf20Sopenharmony_ci 14388c2ecf20Sopenharmony_ci/* 14398c2ecf20Sopenharmony_ci * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 14408c2ecf20Sopenharmony_ci * @rxb: Rx buffer to reclaim 14418c2ecf20Sopenharmony_ci */ 14428c2ecf20Sopenharmony_civoid iwl_pcie_hcmd_complete(struct iwl_trans *trans, 14438c2ecf20Sopenharmony_ci struct iwl_rx_cmd_buffer *rxb) 14448c2ecf20Sopenharmony_ci{ 14458c2ecf20Sopenharmony_ci struct iwl_rx_packet *pkt = rxb_addr(rxb); 14468c2ecf20Sopenharmony_ci u16 sequence = le16_to_cpu(pkt->hdr.sequence); 14478c2ecf20Sopenharmony_ci u8 group_id; 14488c2ecf20Sopenharmony_ci u32 cmd_id; 14498c2ecf20Sopenharmony_ci int txq_id = SEQ_TO_QUEUE(sequence); 14508c2ecf20Sopenharmony_ci int index = SEQ_TO_INDEX(sequence); 14518c2ecf20Sopenharmony_ci int cmd_index; 14528c2ecf20Sopenharmony_ci struct iwl_device_cmd *cmd; 14538c2ecf20Sopenharmony_ci struct iwl_cmd_meta *meta; 14548c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 14558c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 14568c2ecf20Sopenharmony_ci 14578c2ecf20Sopenharmony_ci /* If a Tx command is being handled and it isn't in the actual 14588c2ecf20Sopenharmony_ci * command queue then there a command routing bug has been introduced 14598c2ecf20Sopenharmony_ci * in the queue management code. */ 14608c2ecf20Sopenharmony_ci if (WARN(txq_id != trans->txqs.cmd.q_id, 14618c2ecf20Sopenharmony_ci "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 14628c2ecf20Sopenharmony_ci txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 14638c2ecf20Sopenharmony_ci txq->write_ptr)) { 14648c2ecf20Sopenharmony_ci iwl_print_hex_error(trans, pkt, 32); 14658c2ecf20Sopenharmony_ci return; 14668c2ecf20Sopenharmony_ci } 14678c2ecf20Sopenharmony_ci 14688c2ecf20Sopenharmony_ci spin_lock_bh(&txq->lock); 14698c2ecf20Sopenharmony_ci 14708c2ecf20Sopenharmony_ci cmd_index = iwl_txq_get_cmd_index(txq, index); 14718c2ecf20Sopenharmony_ci cmd = txq->entries[cmd_index].cmd; 14728c2ecf20Sopenharmony_ci meta = &txq->entries[cmd_index].meta; 14738c2ecf20Sopenharmony_ci group_id = cmd->hdr.group_id; 14748c2ecf20Sopenharmony_ci cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 14758c2ecf20Sopenharmony_ci 14768c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 14778c2ecf20Sopenharmony_ci 14788c2ecf20Sopenharmony_ci /* Input error checking is done when commands are added to queue. */ 14798c2ecf20Sopenharmony_ci if (meta->flags & CMD_WANT_SKB) { 14808c2ecf20Sopenharmony_ci struct page *p = rxb_steal_page(rxb); 14818c2ecf20Sopenharmony_ci 14828c2ecf20Sopenharmony_ci meta->source->resp_pkt = pkt; 14838c2ecf20Sopenharmony_ci meta->source->_rx_page_addr = (unsigned long)page_address(p); 14848c2ecf20Sopenharmony_ci meta->source->_rx_page_order = trans_pcie->rx_page_order; 14858c2ecf20Sopenharmony_ci } 14868c2ecf20Sopenharmony_ci 14878c2ecf20Sopenharmony_ci if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 14888c2ecf20Sopenharmony_ci iwl_op_mode_async_cb(trans->op_mode, cmd); 14898c2ecf20Sopenharmony_ci 14908c2ecf20Sopenharmony_ci iwl_pcie_cmdq_reclaim(trans, txq_id, index); 14918c2ecf20Sopenharmony_ci 14928c2ecf20Sopenharmony_ci if (!(meta->flags & CMD_ASYNC)) { 14938c2ecf20Sopenharmony_ci if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 14948c2ecf20Sopenharmony_ci IWL_WARN(trans, 14958c2ecf20Sopenharmony_ci "HCMD_ACTIVE already clear for command %s\n", 14968c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd_id)); 14978c2ecf20Sopenharmony_ci } 14988c2ecf20Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 14998c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 15008c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd_id)); 15018c2ecf20Sopenharmony_ci wake_up(&trans_pcie->wait_command_queue); 15028c2ecf20Sopenharmony_ci } 15038c2ecf20Sopenharmony_ci 15048c2ecf20Sopenharmony_ci meta->flags = 0; 15058c2ecf20Sopenharmony_ci 15068c2ecf20Sopenharmony_ci spin_unlock_bh(&txq->lock); 15078c2ecf20Sopenharmony_ci} 15088c2ecf20Sopenharmony_ci 15098c2ecf20Sopenharmony_ci#define HOST_COMPLETE_TIMEOUT (2 * HZ) 15108c2ecf20Sopenharmony_ci 15118c2ecf20Sopenharmony_cistatic int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 15128c2ecf20Sopenharmony_ci struct iwl_host_cmd *cmd) 15138c2ecf20Sopenharmony_ci{ 15148c2ecf20Sopenharmony_ci int ret; 15158c2ecf20Sopenharmony_ci 15168c2ecf20Sopenharmony_ci /* An asynchronous command can not expect an SKB to be set. */ 15178c2ecf20Sopenharmony_ci if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 15188c2ecf20Sopenharmony_ci return -EINVAL; 15198c2ecf20Sopenharmony_ci 15208c2ecf20Sopenharmony_ci ret = iwl_pcie_enqueue_hcmd(trans, cmd); 15218c2ecf20Sopenharmony_ci if (ret < 0) { 15228c2ecf20Sopenharmony_ci IWL_ERR(trans, 15238c2ecf20Sopenharmony_ci "Error sending %s: enqueue_hcmd failed: %d\n", 15248c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), ret); 15258c2ecf20Sopenharmony_ci return ret; 15268c2ecf20Sopenharmony_ci } 15278c2ecf20Sopenharmony_ci return 0; 15288c2ecf20Sopenharmony_ci} 15298c2ecf20Sopenharmony_ci 15308c2ecf20Sopenharmony_cistatic int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 15318c2ecf20Sopenharmony_ci struct iwl_host_cmd *cmd) 15328c2ecf20Sopenharmony_ci{ 15338c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 15348c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 15358c2ecf20Sopenharmony_ci int cmd_idx; 15368c2ecf20Sopenharmony_ci int ret; 15378c2ecf20Sopenharmony_ci 15388c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 15398c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id)); 15408c2ecf20Sopenharmony_ci 15418c2ecf20Sopenharmony_ci if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 15428c2ecf20Sopenharmony_ci &trans->status), 15438c2ecf20Sopenharmony_ci "Command %s: a command is already active!\n", 15448c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id))) 15458c2ecf20Sopenharmony_ci return -EIO; 15468c2ecf20Sopenharmony_ci 15478c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 15488c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id)); 15498c2ecf20Sopenharmony_ci 15508c2ecf20Sopenharmony_ci cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 15518c2ecf20Sopenharmony_ci if (cmd_idx < 0) { 15528c2ecf20Sopenharmony_ci ret = cmd_idx; 15538c2ecf20Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 15548c2ecf20Sopenharmony_ci IWL_ERR(trans, 15558c2ecf20Sopenharmony_ci "Error sending %s: enqueue_hcmd failed: %d\n", 15568c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), ret); 15578c2ecf20Sopenharmony_ci return ret; 15588c2ecf20Sopenharmony_ci } 15598c2ecf20Sopenharmony_ci 15608c2ecf20Sopenharmony_ci ret = wait_event_timeout(trans_pcie->wait_command_queue, 15618c2ecf20Sopenharmony_ci !test_bit(STATUS_SYNC_HCMD_ACTIVE, 15628c2ecf20Sopenharmony_ci &trans->status), 15638c2ecf20Sopenharmony_ci HOST_COMPLETE_TIMEOUT); 15648c2ecf20Sopenharmony_ci if (!ret) { 15658c2ecf20Sopenharmony_ci IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 15668c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), 15678c2ecf20Sopenharmony_ci jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 15688c2ecf20Sopenharmony_ci 15698c2ecf20Sopenharmony_ci IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 15708c2ecf20Sopenharmony_ci txq->read_ptr, txq->write_ptr); 15718c2ecf20Sopenharmony_ci 15728c2ecf20Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 15738c2ecf20Sopenharmony_ci IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 15748c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id)); 15758c2ecf20Sopenharmony_ci ret = -ETIMEDOUT; 15768c2ecf20Sopenharmony_ci 15778c2ecf20Sopenharmony_ci iwl_trans_pcie_sync_nmi(trans); 15788c2ecf20Sopenharmony_ci goto cancel; 15798c2ecf20Sopenharmony_ci } 15808c2ecf20Sopenharmony_ci 15818c2ecf20Sopenharmony_ci if (test_bit(STATUS_FW_ERROR, &trans->status)) { 15828c2ecf20Sopenharmony_ci iwl_trans_pcie_dump_regs(trans); 15838c2ecf20Sopenharmony_ci IWL_ERR(trans, "FW error in SYNC CMD %s\n", 15848c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id)); 15858c2ecf20Sopenharmony_ci dump_stack(); 15868c2ecf20Sopenharmony_ci ret = -EIO; 15878c2ecf20Sopenharmony_ci goto cancel; 15888c2ecf20Sopenharmony_ci } 15898c2ecf20Sopenharmony_ci 15908c2ecf20Sopenharmony_ci if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 15918c2ecf20Sopenharmony_ci test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 15928c2ecf20Sopenharmony_ci IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 15938c2ecf20Sopenharmony_ci ret = -ERFKILL; 15948c2ecf20Sopenharmony_ci goto cancel; 15958c2ecf20Sopenharmony_ci } 15968c2ecf20Sopenharmony_ci 15978c2ecf20Sopenharmony_ci if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 15988c2ecf20Sopenharmony_ci IWL_ERR(trans, "Error: Response NULL in '%s'\n", 15998c2ecf20Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id)); 16008c2ecf20Sopenharmony_ci ret = -EIO; 16018c2ecf20Sopenharmony_ci goto cancel; 16028c2ecf20Sopenharmony_ci } 16038c2ecf20Sopenharmony_ci 16048c2ecf20Sopenharmony_ci return 0; 16058c2ecf20Sopenharmony_ci 16068c2ecf20Sopenharmony_cicancel: 16078c2ecf20Sopenharmony_ci if (cmd->flags & CMD_WANT_SKB) { 16088c2ecf20Sopenharmony_ci /* 16098c2ecf20Sopenharmony_ci * Cancel the CMD_WANT_SKB flag for the cmd in the 16108c2ecf20Sopenharmony_ci * TX cmd queue. Otherwise in case the cmd comes 16118c2ecf20Sopenharmony_ci * in later, it will possibly set an invalid 16128c2ecf20Sopenharmony_ci * address (cmd->meta.source). 16138c2ecf20Sopenharmony_ci */ 16148c2ecf20Sopenharmony_ci txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 16158c2ecf20Sopenharmony_ci } 16168c2ecf20Sopenharmony_ci 16178c2ecf20Sopenharmony_ci if (cmd->resp_pkt) { 16188c2ecf20Sopenharmony_ci iwl_free_resp(cmd); 16198c2ecf20Sopenharmony_ci cmd->resp_pkt = NULL; 16208c2ecf20Sopenharmony_ci } 16218c2ecf20Sopenharmony_ci 16228c2ecf20Sopenharmony_ci return ret; 16238c2ecf20Sopenharmony_ci} 16248c2ecf20Sopenharmony_ci 16258c2ecf20Sopenharmony_ciint iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 16268c2ecf20Sopenharmony_ci{ 16278c2ecf20Sopenharmony_ci /* Make sure the NIC is still alive in the bus */ 16288c2ecf20Sopenharmony_ci if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 16298c2ecf20Sopenharmony_ci return -ENODEV; 16308c2ecf20Sopenharmony_ci 16318c2ecf20Sopenharmony_ci if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 16328c2ecf20Sopenharmony_ci test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 16338c2ecf20Sopenharmony_ci IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 16348c2ecf20Sopenharmony_ci cmd->id); 16358c2ecf20Sopenharmony_ci return -ERFKILL; 16368c2ecf20Sopenharmony_ci } 16378c2ecf20Sopenharmony_ci 16388c2ecf20Sopenharmony_ci if (cmd->flags & CMD_ASYNC) 16398c2ecf20Sopenharmony_ci return iwl_pcie_send_hcmd_async(trans, cmd); 16408c2ecf20Sopenharmony_ci 16418c2ecf20Sopenharmony_ci /* We still can fail on RFKILL that can be asserted while we wait */ 16428c2ecf20Sopenharmony_ci return iwl_pcie_send_hcmd_sync(trans, cmd); 16438c2ecf20Sopenharmony_ci} 16448c2ecf20Sopenharmony_ci 16458c2ecf20Sopenharmony_cistatic int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 16468c2ecf20Sopenharmony_ci struct iwl_txq *txq, u8 hdr_len, 16478c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta) 16488c2ecf20Sopenharmony_ci{ 16498c2ecf20Sopenharmony_ci u16 head_tb_len; 16508c2ecf20Sopenharmony_ci int i; 16518c2ecf20Sopenharmony_ci 16528c2ecf20Sopenharmony_ci /* 16538c2ecf20Sopenharmony_ci * Set up TFD's third entry to point directly to remainder 16548c2ecf20Sopenharmony_ci * of skb's head, if any 16558c2ecf20Sopenharmony_ci */ 16568c2ecf20Sopenharmony_ci head_tb_len = skb_headlen(skb) - hdr_len; 16578c2ecf20Sopenharmony_ci 16588c2ecf20Sopenharmony_ci if (head_tb_len > 0) { 16598c2ecf20Sopenharmony_ci dma_addr_t tb_phys = dma_map_single(trans->dev, 16608c2ecf20Sopenharmony_ci skb->data + hdr_len, 16618c2ecf20Sopenharmony_ci head_tb_len, DMA_TO_DEVICE); 16628c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 16638c2ecf20Sopenharmony_ci return -EINVAL; 16648c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 16658c2ecf20Sopenharmony_ci tb_phys, head_tb_len); 16668c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 16678c2ecf20Sopenharmony_ci } 16688c2ecf20Sopenharmony_ci 16698c2ecf20Sopenharmony_ci /* set up the remaining entries to point to the data */ 16708c2ecf20Sopenharmony_ci for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 16718c2ecf20Sopenharmony_ci const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 16728c2ecf20Sopenharmony_ci dma_addr_t tb_phys; 16738c2ecf20Sopenharmony_ci int tb_idx; 16748c2ecf20Sopenharmony_ci 16758c2ecf20Sopenharmony_ci if (!skb_frag_size(frag)) 16768c2ecf20Sopenharmony_ci continue; 16778c2ecf20Sopenharmony_ci 16788c2ecf20Sopenharmony_ci tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 16798c2ecf20Sopenharmony_ci skb_frag_size(frag), DMA_TO_DEVICE); 16808c2ecf20Sopenharmony_ci 16818c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 16828c2ecf20Sopenharmony_ci return -EINVAL; 16838c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 16848c2ecf20Sopenharmony_ci tb_phys, skb_frag_size(frag)); 16858c2ecf20Sopenharmony_ci tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 16868c2ecf20Sopenharmony_ci skb_frag_size(frag), false); 16878c2ecf20Sopenharmony_ci if (tb_idx < 0) 16888c2ecf20Sopenharmony_ci return tb_idx; 16898c2ecf20Sopenharmony_ci 16908c2ecf20Sopenharmony_ci out_meta->tbs |= BIT(tb_idx); 16918c2ecf20Sopenharmony_ci } 16928c2ecf20Sopenharmony_ci 16938c2ecf20Sopenharmony_ci return 0; 16948c2ecf20Sopenharmony_ci} 16958c2ecf20Sopenharmony_ci 16968c2ecf20Sopenharmony_ci#ifdef CONFIG_INET 16978c2ecf20Sopenharmony_cistatic void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, 16988c2ecf20Sopenharmony_ci bool ipv6, unsigned int len) 16998c2ecf20Sopenharmony_ci{ 17008c2ecf20Sopenharmony_ci if (ipv6) { 17018c2ecf20Sopenharmony_ci struct ipv6hdr *iphv6 = iph; 17028c2ecf20Sopenharmony_ci 17038c2ecf20Sopenharmony_ci tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, 17048c2ecf20Sopenharmony_ci len + tcph->doff * 4, 17058c2ecf20Sopenharmony_ci IPPROTO_TCP, 0); 17068c2ecf20Sopenharmony_ci } else { 17078c2ecf20Sopenharmony_ci struct iphdr *iphv4 = iph; 17088c2ecf20Sopenharmony_ci 17098c2ecf20Sopenharmony_ci ip_send_check(iphv4); 17108c2ecf20Sopenharmony_ci tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, 17118c2ecf20Sopenharmony_ci len + tcph->doff * 4, 17128c2ecf20Sopenharmony_ci IPPROTO_TCP, 0); 17138c2ecf20Sopenharmony_ci } 17148c2ecf20Sopenharmony_ci} 17158c2ecf20Sopenharmony_ci 17168c2ecf20Sopenharmony_cistatic int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 17178c2ecf20Sopenharmony_ci struct iwl_txq *txq, u8 hdr_len, 17188c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta, 17198c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 17208c2ecf20Sopenharmony_ci u16 tb1_len) 17218c2ecf20Sopenharmony_ci{ 17228c2ecf20Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 17238c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = 17248c2ecf20Sopenharmony_ci IWL_TRANS_GET_PCIE_TRANS(txq->trans); 17258c2ecf20Sopenharmony_ci struct ieee80211_hdr *hdr = (void *)skb->data; 17268c2ecf20Sopenharmony_ci unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 17278c2ecf20Sopenharmony_ci unsigned int mss = skb_shinfo(skb)->gso_size; 17288c2ecf20Sopenharmony_ci u16 length, iv_len, amsdu_pad; 17298c2ecf20Sopenharmony_ci u8 *start_hdr; 17308c2ecf20Sopenharmony_ci struct iwl_tso_hdr_page *hdr_page; 17318c2ecf20Sopenharmony_ci struct tso_t tso; 17328c2ecf20Sopenharmony_ci 17338c2ecf20Sopenharmony_ci /* if the packet is protected, then it must be CCMP or GCMP */ 17348c2ecf20Sopenharmony_ci BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 17358c2ecf20Sopenharmony_ci iv_len = ieee80211_has_protected(hdr->frame_control) ? 17368c2ecf20Sopenharmony_ci IEEE80211_CCMP_HDR_LEN : 0; 17378c2ecf20Sopenharmony_ci 17388c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, 17398c2ecf20Sopenharmony_ci iwl_txq_get_tfd(trans, txq, txq->write_ptr), 17408c2ecf20Sopenharmony_ci trans->txqs.tfd.size, 17418c2ecf20Sopenharmony_ci &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 17428c2ecf20Sopenharmony_ci 17438c2ecf20Sopenharmony_ci ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 17448c2ecf20Sopenharmony_ci snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 17458c2ecf20Sopenharmony_ci total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 17468c2ecf20Sopenharmony_ci amsdu_pad = 0; 17478c2ecf20Sopenharmony_ci 17488c2ecf20Sopenharmony_ci /* total amount of header we may need for this A-MSDU */ 17498c2ecf20Sopenharmony_ci hdr_room = DIV_ROUND_UP(total_len, mss) * 17508c2ecf20Sopenharmony_ci (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 17518c2ecf20Sopenharmony_ci 17528c2ecf20Sopenharmony_ci /* Our device supports 9 segments at most, it will fit in 1 page */ 17538c2ecf20Sopenharmony_ci hdr_page = get_page_hdr(trans, hdr_room, skb); 17548c2ecf20Sopenharmony_ci if (!hdr_page) 17558c2ecf20Sopenharmony_ci return -ENOMEM; 17568c2ecf20Sopenharmony_ci 17578c2ecf20Sopenharmony_ci start_hdr = hdr_page->pos; 17588c2ecf20Sopenharmony_ci memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 17598c2ecf20Sopenharmony_ci hdr_page->pos += iv_len; 17608c2ecf20Sopenharmony_ci 17618c2ecf20Sopenharmony_ci /* 17628c2ecf20Sopenharmony_ci * Pull the ieee80211 header + IV to be able to use TSO core, 17638c2ecf20Sopenharmony_ci * we will restore it for the tx_status flow. 17648c2ecf20Sopenharmony_ci */ 17658c2ecf20Sopenharmony_ci skb_pull(skb, hdr_len + iv_len); 17668c2ecf20Sopenharmony_ci 17678c2ecf20Sopenharmony_ci /* 17688c2ecf20Sopenharmony_ci * Remove the length of all the headers that we don't actually 17698c2ecf20Sopenharmony_ci * have in the MPDU by themselves, but that we duplicate into 17708c2ecf20Sopenharmony_ci * all the different MSDUs inside the A-MSDU. 17718c2ecf20Sopenharmony_ci */ 17728c2ecf20Sopenharmony_ci le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 17738c2ecf20Sopenharmony_ci 17748c2ecf20Sopenharmony_ci tso_start(skb, &tso); 17758c2ecf20Sopenharmony_ci 17768c2ecf20Sopenharmony_ci while (total_len) { 17778c2ecf20Sopenharmony_ci /* this is the data left for this subframe */ 17788c2ecf20Sopenharmony_ci unsigned int data_left = 17798c2ecf20Sopenharmony_ci min_t(unsigned int, mss, total_len); 17808c2ecf20Sopenharmony_ci struct sk_buff *csum_skb = NULL; 17818c2ecf20Sopenharmony_ci unsigned int hdr_tb_len; 17828c2ecf20Sopenharmony_ci dma_addr_t hdr_tb_phys; 17838c2ecf20Sopenharmony_ci struct tcphdr *tcph; 17848c2ecf20Sopenharmony_ci u8 *iph, *subf_hdrs_start = hdr_page->pos; 17858c2ecf20Sopenharmony_ci 17868c2ecf20Sopenharmony_ci total_len -= data_left; 17878c2ecf20Sopenharmony_ci 17888c2ecf20Sopenharmony_ci memset(hdr_page->pos, 0, amsdu_pad); 17898c2ecf20Sopenharmony_ci hdr_page->pos += amsdu_pad; 17908c2ecf20Sopenharmony_ci amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 17918c2ecf20Sopenharmony_ci data_left)) & 0x3; 17928c2ecf20Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 17938c2ecf20Sopenharmony_ci hdr_page->pos += ETH_ALEN; 17948c2ecf20Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 17958c2ecf20Sopenharmony_ci hdr_page->pos += ETH_ALEN; 17968c2ecf20Sopenharmony_ci 17978c2ecf20Sopenharmony_ci length = snap_ip_tcp_hdrlen + data_left; 17988c2ecf20Sopenharmony_ci *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 17998c2ecf20Sopenharmony_ci hdr_page->pos += sizeof(length); 18008c2ecf20Sopenharmony_ci 18018c2ecf20Sopenharmony_ci /* 18028c2ecf20Sopenharmony_ci * This will copy the SNAP as well which will be considered 18038c2ecf20Sopenharmony_ci * as MAC header. 18048c2ecf20Sopenharmony_ci */ 18058c2ecf20Sopenharmony_ci tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 18068c2ecf20Sopenharmony_ci iph = hdr_page->pos + 8; 18078c2ecf20Sopenharmony_ci tcph = (void *)(iph + ip_hdrlen); 18088c2ecf20Sopenharmony_ci 18098c2ecf20Sopenharmony_ci /* For testing on current hardware only */ 18108c2ecf20Sopenharmony_ci if (trans_pcie->sw_csum_tx) { 18118c2ecf20Sopenharmony_ci csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), 18128c2ecf20Sopenharmony_ci GFP_ATOMIC); 18138c2ecf20Sopenharmony_ci if (!csum_skb) 18148c2ecf20Sopenharmony_ci return -ENOMEM; 18158c2ecf20Sopenharmony_ci 18168c2ecf20Sopenharmony_ci iwl_compute_pseudo_hdr_csum(iph, tcph, 18178c2ecf20Sopenharmony_ci skb->protocol == 18188c2ecf20Sopenharmony_ci htons(ETH_P_IPV6), 18198c2ecf20Sopenharmony_ci data_left); 18208c2ecf20Sopenharmony_ci 18218c2ecf20Sopenharmony_ci skb_put_data(csum_skb, tcph, tcp_hdrlen(skb)); 18228c2ecf20Sopenharmony_ci skb_reset_transport_header(csum_skb); 18238c2ecf20Sopenharmony_ci csum_skb->csum_start = 18248c2ecf20Sopenharmony_ci (unsigned char *)tcp_hdr(csum_skb) - 18258c2ecf20Sopenharmony_ci csum_skb->head; 18268c2ecf20Sopenharmony_ci } 18278c2ecf20Sopenharmony_ci 18288c2ecf20Sopenharmony_ci hdr_page->pos += snap_ip_tcp_hdrlen; 18298c2ecf20Sopenharmony_ci 18308c2ecf20Sopenharmony_ci hdr_tb_len = hdr_page->pos - start_hdr; 18318c2ecf20Sopenharmony_ci hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 18328c2ecf20Sopenharmony_ci hdr_tb_len, DMA_TO_DEVICE); 18338c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 18348c2ecf20Sopenharmony_ci dev_kfree_skb(csum_skb); 18358c2ecf20Sopenharmony_ci return -EINVAL; 18368c2ecf20Sopenharmony_ci } 18378c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 18388c2ecf20Sopenharmony_ci hdr_tb_len, false); 18398c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 18408c2ecf20Sopenharmony_ci hdr_tb_phys, hdr_tb_len); 18418c2ecf20Sopenharmony_ci /* add this subframe's headers' length to the tx_cmd */ 18428c2ecf20Sopenharmony_ci le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 18438c2ecf20Sopenharmony_ci 18448c2ecf20Sopenharmony_ci /* prepare the start_hdr for the next subframe */ 18458c2ecf20Sopenharmony_ci start_hdr = hdr_page->pos; 18468c2ecf20Sopenharmony_ci 18478c2ecf20Sopenharmony_ci /* put the payload */ 18488c2ecf20Sopenharmony_ci while (data_left) { 18498c2ecf20Sopenharmony_ci unsigned int size = min_t(unsigned int, tso.size, 18508c2ecf20Sopenharmony_ci data_left); 18518c2ecf20Sopenharmony_ci dma_addr_t tb_phys; 18528c2ecf20Sopenharmony_ci 18538c2ecf20Sopenharmony_ci if (trans_pcie->sw_csum_tx) 18548c2ecf20Sopenharmony_ci skb_put_data(csum_skb, tso.data, size); 18558c2ecf20Sopenharmony_ci 18568c2ecf20Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tso.data, 18578c2ecf20Sopenharmony_ci size, DMA_TO_DEVICE); 18588c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 18598c2ecf20Sopenharmony_ci dev_kfree_skb(csum_skb); 18608c2ecf20Sopenharmony_ci return -EINVAL; 18618c2ecf20Sopenharmony_ci } 18628c2ecf20Sopenharmony_ci 18638c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 18648c2ecf20Sopenharmony_ci size, false); 18658c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 18668c2ecf20Sopenharmony_ci tb_phys, size); 18678c2ecf20Sopenharmony_ci 18688c2ecf20Sopenharmony_ci data_left -= size; 18698c2ecf20Sopenharmony_ci tso_build_data(skb, &tso, size); 18708c2ecf20Sopenharmony_ci } 18718c2ecf20Sopenharmony_ci 18728c2ecf20Sopenharmony_ci /* For testing on early hardware only */ 18738c2ecf20Sopenharmony_ci if (trans_pcie->sw_csum_tx) { 18748c2ecf20Sopenharmony_ci __wsum csum; 18758c2ecf20Sopenharmony_ci 18768c2ecf20Sopenharmony_ci csum = skb_checksum(csum_skb, 18778c2ecf20Sopenharmony_ci skb_checksum_start_offset(csum_skb), 18788c2ecf20Sopenharmony_ci csum_skb->len - 18798c2ecf20Sopenharmony_ci skb_checksum_start_offset(csum_skb), 18808c2ecf20Sopenharmony_ci 0); 18818c2ecf20Sopenharmony_ci dev_kfree_skb(csum_skb); 18828c2ecf20Sopenharmony_ci dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, 18838c2ecf20Sopenharmony_ci hdr_tb_len, DMA_TO_DEVICE); 18848c2ecf20Sopenharmony_ci tcph->check = csum_fold(csum); 18858c2ecf20Sopenharmony_ci dma_sync_single_for_device(trans->dev, hdr_tb_phys, 18868c2ecf20Sopenharmony_ci hdr_tb_len, DMA_TO_DEVICE); 18878c2ecf20Sopenharmony_ci } 18888c2ecf20Sopenharmony_ci } 18898c2ecf20Sopenharmony_ci 18908c2ecf20Sopenharmony_ci /* re -add the WiFi header and IV */ 18918c2ecf20Sopenharmony_ci skb_push(skb, hdr_len + iv_len); 18928c2ecf20Sopenharmony_ci 18938c2ecf20Sopenharmony_ci return 0; 18948c2ecf20Sopenharmony_ci} 18958c2ecf20Sopenharmony_ci#else /* CONFIG_INET */ 18968c2ecf20Sopenharmony_cistatic int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 18978c2ecf20Sopenharmony_ci struct iwl_txq *txq, u8 hdr_len, 18988c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta, 18998c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 19008c2ecf20Sopenharmony_ci u16 tb1_len) 19018c2ecf20Sopenharmony_ci{ 19028c2ecf20Sopenharmony_ci /* No A-MSDU without CONFIG_INET */ 19038c2ecf20Sopenharmony_ci WARN_ON(1); 19048c2ecf20Sopenharmony_ci 19058c2ecf20Sopenharmony_ci return -1; 19068c2ecf20Sopenharmony_ci} 19078c2ecf20Sopenharmony_ci#endif /* CONFIG_INET */ 19088c2ecf20Sopenharmony_ci 19098c2ecf20Sopenharmony_ciint iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 19108c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, int txq_id) 19118c2ecf20Sopenharmony_ci{ 19128c2ecf20Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 19138c2ecf20Sopenharmony_ci struct ieee80211_hdr *hdr; 19148c2ecf20Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 19158c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta; 19168c2ecf20Sopenharmony_ci struct iwl_txq *txq; 19178c2ecf20Sopenharmony_ci dma_addr_t tb0_phys, tb1_phys, scratch_phys; 19188c2ecf20Sopenharmony_ci void *tb1_addr; 19198c2ecf20Sopenharmony_ci void *tfd; 19208c2ecf20Sopenharmony_ci u16 len, tb1_len; 19218c2ecf20Sopenharmony_ci bool wait_write_ptr; 19228c2ecf20Sopenharmony_ci __le16 fc; 19238c2ecf20Sopenharmony_ci u8 hdr_len; 19248c2ecf20Sopenharmony_ci u16 wifi_seq; 19258c2ecf20Sopenharmony_ci bool amsdu; 19268c2ecf20Sopenharmony_ci 19278c2ecf20Sopenharmony_ci txq = trans->txqs.txq[txq_id]; 19288c2ecf20Sopenharmony_ci 19298c2ecf20Sopenharmony_ci if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 19308c2ecf20Sopenharmony_ci "TX on unused queue %d\n", txq_id)) 19318c2ecf20Sopenharmony_ci return -EINVAL; 19328c2ecf20Sopenharmony_ci 19338c2ecf20Sopenharmony_ci if (unlikely(trans_pcie->sw_csum_tx && 19348c2ecf20Sopenharmony_ci skb->ip_summed == CHECKSUM_PARTIAL)) { 19358c2ecf20Sopenharmony_ci int offs = skb_checksum_start_offset(skb); 19368c2ecf20Sopenharmony_ci int csum_offs = offs + skb->csum_offset; 19378c2ecf20Sopenharmony_ci __wsum csum; 19388c2ecf20Sopenharmony_ci 19398c2ecf20Sopenharmony_ci if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) 19408c2ecf20Sopenharmony_ci return -1; 19418c2ecf20Sopenharmony_ci 19428c2ecf20Sopenharmony_ci csum = skb_checksum(skb, offs, skb->len - offs, 0); 19438c2ecf20Sopenharmony_ci *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 19448c2ecf20Sopenharmony_ci 19458c2ecf20Sopenharmony_ci skb->ip_summed = CHECKSUM_UNNECESSARY; 19468c2ecf20Sopenharmony_ci } 19478c2ecf20Sopenharmony_ci 19488c2ecf20Sopenharmony_ci if (skb_is_nonlinear(skb) && 19498c2ecf20Sopenharmony_ci skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 19508c2ecf20Sopenharmony_ci __skb_linearize(skb)) 19518c2ecf20Sopenharmony_ci return -ENOMEM; 19528c2ecf20Sopenharmony_ci 19538c2ecf20Sopenharmony_ci /* mac80211 always puts the full header into the SKB's head, 19548c2ecf20Sopenharmony_ci * so there's no need to check if it's readable there 19558c2ecf20Sopenharmony_ci */ 19568c2ecf20Sopenharmony_ci hdr = (struct ieee80211_hdr *)skb->data; 19578c2ecf20Sopenharmony_ci fc = hdr->frame_control; 19588c2ecf20Sopenharmony_ci hdr_len = ieee80211_hdrlen(fc); 19598c2ecf20Sopenharmony_ci 19608c2ecf20Sopenharmony_ci spin_lock(&txq->lock); 19618c2ecf20Sopenharmony_ci 19628c2ecf20Sopenharmony_ci if (iwl_txq_space(trans, txq) < txq->high_mark) { 19638c2ecf20Sopenharmony_ci iwl_txq_stop(trans, txq); 19648c2ecf20Sopenharmony_ci 19658c2ecf20Sopenharmony_ci /* don't put the packet on the ring, if there is no room */ 19668c2ecf20Sopenharmony_ci if (unlikely(iwl_txq_space(trans, txq) < 3)) { 19678c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd **dev_cmd_ptr; 19688c2ecf20Sopenharmony_ci 19698c2ecf20Sopenharmony_ci dev_cmd_ptr = (void *)((u8 *)skb->cb + 19708c2ecf20Sopenharmony_ci trans->txqs.dev_cmd_offs); 19718c2ecf20Sopenharmony_ci 19728c2ecf20Sopenharmony_ci *dev_cmd_ptr = dev_cmd; 19738c2ecf20Sopenharmony_ci __skb_queue_tail(&txq->overflow_q, skb); 19748c2ecf20Sopenharmony_ci 19758c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 19768c2ecf20Sopenharmony_ci return 0; 19778c2ecf20Sopenharmony_ci } 19788c2ecf20Sopenharmony_ci } 19798c2ecf20Sopenharmony_ci 19808c2ecf20Sopenharmony_ci /* In AGG mode, the index in the ring must correspond to the WiFi 19818c2ecf20Sopenharmony_ci * sequence number. This is a HW requirements to help the SCD to parse 19828c2ecf20Sopenharmony_ci * the BA. 19838c2ecf20Sopenharmony_ci * Check here that the packets are in the right place on the ring. 19848c2ecf20Sopenharmony_ci */ 19858c2ecf20Sopenharmony_ci wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 19868c2ecf20Sopenharmony_ci WARN_ONCE(txq->ampdu && 19878c2ecf20Sopenharmony_ci (wifi_seq & 0xff) != txq->write_ptr, 19888c2ecf20Sopenharmony_ci "Q: %d WiFi Seq %d tfdNum %d", 19898c2ecf20Sopenharmony_ci txq_id, wifi_seq, txq->write_ptr); 19908c2ecf20Sopenharmony_ci 19918c2ecf20Sopenharmony_ci /* Set up driver data for this TFD */ 19928c2ecf20Sopenharmony_ci txq->entries[txq->write_ptr].skb = skb; 19938c2ecf20Sopenharmony_ci txq->entries[txq->write_ptr].cmd = dev_cmd; 19948c2ecf20Sopenharmony_ci 19958c2ecf20Sopenharmony_ci dev_cmd->hdr.sequence = 19968c2ecf20Sopenharmony_ci cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 19978c2ecf20Sopenharmony_ci INDEX_TO_SEQ(txq->write_ptr))); 19988c2ecf20Sopenharmony_ci 19998c2ecf20Sopenharmony_ci tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 20008c2ecf20Sopenharmony_ci scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 20018c2ecf20Sopenharmony_ci offsetof(struct iwl_tx_cmd, scratch); 20028c2ecf20Sopenharmony_ci 20038c2ecf20Sopenharmony_ci tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 20048c2ecf20Sopenharmony_ci tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 20058c2ecf20Sopenharmony_ci 20068c2ecf20Sopenharmony_ci /* Set up first empty entry in queue's array of Tx/cmd buffers */ 20078c2ecf20Sopenharmony_ci out_meta = &txq->entries[txq->write_ptr].meta; 20088c2ecf20Sopenharmony_ci out_meta->flags = 0; 20098c2ecf20Sopenharmony_ci 20108c2ecf20Sopenharmony_ci /* 20118c2ecf20Sopenharmony_ci * The second TB (tb1) points to the remainder of the TX command 20128c2ecf20Sopenharmony_ci * and the 802.11 header - dword aligned size 20138c2ecf20Sopenharmony_ci * (This calculation modifies the TX command, so do it before the 20148c2ecf20Sopenharmony_ci * setup of the first TB) 20158c2ecf20Sopenharmony_ci */ 20168c2ecf20Sopenharmony_ci len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 20178c2ecf20Sopenharmony_ci hdr_len - IWL_FIRST_TB_SIZE; 20188c2ecf20Sopenharmony_ci /* do not align A-MSDU to dword as the subframe header aligns it */ 20198c2ecf20Sopenharmony_ci amsdu = ieee80211_is_data_qos(fc) && 20208c2ecf20Sopenharmony_ci (*ieee80211_get_qos_ctl(hdr) & 20218c2ecf20Sopenharmony_ci IEEE80211_QOS_CTL_A_MSDU_PRESENT); 20228c2ecf20Sopenharmony_ci if (trans_pcie->sw_csum_tx || !amsdu) { 20238c2ecf20Sopenharmony_ci tb1_len = ALIGN(len, 4); 20248c2ecf20Sopenharmony_ci /* Tell NIC about any 2-byte padding after MAC header */ 20258c2ecf20Sopenharmony_ci if (tb1_len != len) 20268c2ecf20Sopenharmony_ci tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 20278c2ecf20Sopenharmony_ci } else { 20288c2ecf20Sopenharmony_ci tb1_len = len; 20298c2ecf20Sopenharmony_ci } 20308c2ecf20Sopenharmony_ci 20318c2ecf20Sopenharmony_ci /* 20328c2ecf20Sopenharmony_ci * The first TB points to bi-directional DMA data, we'll 20338c2ecf20Sopenharmony_ci * memcpy the data into it later. 20348c2ecf20Sopenharmony_ci */ 20358c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 20368c2ecf20Sopenharmony_ci IWL_FIRST_TB_SIZE, true); 20378c2ecf20Sopenharmony_ci 20388c2ecf20Sopenharmony_ci /* there must be data left over for TB1 or this code must be changed */ 20398c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 20408c2ecf20Sopenharmony_ci 20418c2ecf20Sopenharmony_ci /* map the data for TB1 */ 20428c2ecf20Sopenharmony_ci tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 20438c2ecf20Sopenharmony_ci tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 20448c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 20458c2ecf20Sopenharmony_ci goto out_err; 20468c2ecf20Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 20478c2ecf20Sopenharmony_ci 20488c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, 20498c2ecf20Sopenharmony_ci iwl_txq_get_tfd(trans, txq, txq->write_ptr), 20508c2ecf20Sopenharmony_ci trans->txqs.tfd.size, 20518c2ecf20Sopenharmony_ci &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 20528c2ecf20Sopenharmony_ci hdr_len); 20538c2ecf20Sopenharmony_ci 20548c2ecf20Sopenharmony_ci /* 20558c2ecf20Sopenharmony_ci * If gso_size wasn't set, don't give the frame "amsdu treatment" 20568c2ecf20Sopenharmony_ci * (adding subframes, etc.). 20578c2ecf20Sopenharmony_ci * This can happen in some testing flows when the amsdu was already 20588c2ecf20Sopenharmony_ci * pre-built, and we just need to send the resulting skb. 20598c2ecf20Sopenharmony_ci */ 20608c2ecf20Sopenharmony_ci if (amsdu && skb_shinfo(skb)->gso_size) { 20618c2ecf20Sopenharmony_ci if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 20628c2ecf20Sopenharmony_ci out_meta, dev_cmd, 20638c2ecf20Sopenharmony_ci tb1_len))) 20648c2ecf20Sopenharmony_ci goto out_err; 20658c2ecf20Sopenharmony_ci } else { 20668c2ecf20Sopenharmony_ci struct sk_buff *frag; 20678c2ecf20Sopenharmony_ci 20688c2ecf20Sopenharmony_ci if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 20698c2ecf20Sopenharmony_ci out_meta))) 20708c2ecf20Sopenharmony_ci goto out_err; 20718c2ecf20Sopenharmony_ci 20728c2ecf20Sopenharmony_ci skb_walk_frags(skb, frag) { 20738c2ecf20Sopenharmony_ci if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 20748c2ecf20Sopenharmony_ci out_meta))) 20758c2ecf20Sopenharmony_ci goto out_err; 20768c2ecf20Sopenharmony_ci } 20778c2ecf20Sopenharmony_ci } 20788c2ecf20Sopenharmony_ci 20798c2ecf20Sopenharmony_ci /* building the A-MSDU might have changed this data, so memcpy it now */ 20808c2ecf20Sopenharmony_ci memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 20818c2ecf20Sopenharmony_ci 20828c2ecf20Sopenharmony_ci tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 20838c2ecf20Sopenharmony_ci /* Set up entry for this TFD in Tx byte-count array */ 20848c2ecf20Sopenharmony_ci iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 20858c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_get_num_tbs(trans, 20868c2ecf20Sopenharmony_ci tfd)); 20878c2ecf20Sopenharmony_ci 20888c2ecf20Sopenharmony_ci wait_write_ptr = ieee80211_has_morefrags(fc); 20898c2ecf20Sopenharmony_ci 20908c2ecf20Sopenharmony_ci /* start timer if queue currently empty */ 20918c2ecf20Sopenharmony_ci if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 20928c2ecf20Sopenharmony_ci /* 20938c2ecf20Sopenharmony_ci * If the TXQ is active, then set the timer, if not, 20948c2ecf20Sopenharmony_ci * set the timer in remainder so that the timer will 20958c2ecf20Sopenharmony_ci * be armed with the right value when the station will 20968c2ecf20Sopenharmony_ci * wake up. 20978c2ecf20Sopenharmony_ci */ 20988c2ecf20Sopenharmony_ci if (!txq->frozen) 20998c2ecf20Sopenharmony_ci mod_timer(&txq->stuck_timer, 21008c2ecf20Sopenharmony_ci jiffies + txq->wd_timeout); 21018c2ecf20Sopenharmony_ci else 21028c2ecf20Sopenharmony_ci txq->frozen_expiry_remainder = txq->wd_timeout; 21038c2ecf20Sopenharmony_ci } 21048c2ecf20Sopenharmony_ci 21058c2ecf20Sopenharmony_ci /* Tell device the write index *just past* this latest filled TFD */ 21068c2ecf20Sopenharmony_ci txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 21078c2ecf20Sopenharmony_ci if (!wait_write_ptr) 21088c2ecf20Sopenharmony_ci iwl_pcie_txq_inc_wr_ptr(trans, txq); 21098c2ecf20Sopenharmony_ci 21108c2ecf20Sopenharmony_ci /* 21118c2ecf20Sopenharmony_ci * At this point the frame is "transmitted" successfully 21128c2ecf20Sopenharmony_ci * and we will get a TX status notification eventually. 21138c2ecf20Sopenharmony_ci */ 21148c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 21158c2ecf20Sopenharmony_ci return 0; 21168c2ecf20Sopenharmony_ciout_err: 21178c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 21188c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 21198c2ecf20Sopenharmony_ci return -1; 21208c2ecf20Sopenharmony_ci} 2121