162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation 462306a36Sopenharmony_ci * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 562306a36Sopenharmony_ci * Copyright (C) 2016-2017 Intel Deutschland GmbH 662306a36Sopenharmony_ci */ 762306a36Sopenharmony_ci#include <linux/etherdevice.h> 862306a36Sopenharmony_ci#include <linux/ieee80211.h> 962306a36Sopenharmony_ci#include <linux/slab.h> 1062306a36Sopenharmony_ci#include <linux/sched.h> 1162306a36Sopenharmony_ci#include <net/ip6_checksum.h> 1262306a36Sopenharmony_ci#include <net/tso.h> 1362306a36Sopenharmony_ci 1462306a36Sopenharmony_ci#include "iwl-debug.h" 1562306a36Sopenharmony_ci#include "iwl-csr.h" 1662306a36Sopenharmony_ci#include "iwl-prph.h" 1762306a36Sopenharmony_ci#include "iwl-io.h" 1862306a36Sopenharmony_ci#include "iwl-scd.h" 1962306a36Sopenharmony_ci#include "iwl-op-mode.h" 2062306a36Sopenharmony_ci#include "internal.h" 2162306a36Sopenharmony_ci#include "fw/api/tx.h" 2262306a36Sopenharmony_ci 2362306a36Sopenharmony_ci/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 2462306a36Sopenharmony_ci * DMA services 2562306a36Sopenharmony_ci * 2662306a36Sopenharmony_ci * Theory of operation 2762306a36Sopenharmony_ci * 2862306a36Sopenharmony_ci * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 2962306a36Sopenharmony_ci * of buffer descriptors, each of which points to one or more data buffers for 3062306a36Sopenharmony_ci * the device to read from or fill. Driver and device exchange status of each 3162306a36Sopenharmony_ci * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 3262306a36Sopenharmony_ci * entries in each circular buffer, to protect against confusing empty and full 3362306a36Sopenharmony_ci * queue states. 3462306a36Sopenharmony_ci * 3562306a36Sopenharmony_ci * The device reads or writes the data in the queues via the device's several 3662306a36Sopenharmony_ci * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 3762306a36Sopenharmony_ci * 3862306a36Sopenharmony_ci * For Tx queue, there are low mark and high mark limits. If, after queuing 3962306a36Sopenharmony_ci * the packet for Tx, free space become < low mark, Tx queue stopped. When 4062306a36Sopenharmony_ci * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 4162306a36Sopenharmony_ci * Tx queue resumed. 4262306a36Sopenharmony_ci * 4362306a36Sopenharmony_ci ***************************************************/ 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_ci 4662306a36Sopenharmony_ciint iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 4762306a36Sopenharmony_ci struct iwl_dma_ptr *ptr, size_t size) 4862306a36Sopenharmony_ci{ 4962306a36Sopenharmony_ci if (WARN_ON(ptr->addr)) 5062306a36Sopenharmony_ci return -EINVAL; 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_ci ptr->addr = dma_alloc_coherent(trans->dev, size, 5362306a36Sopenharmony_ci &ptr->dma, GFP_KERNEL); 5462306a36Sopenharmony_ci if (!ptr->addr) 5562306a36Sopenharmony_ci return -ENOMEM; 5662306a36Sopenharmony_ci ptr->size = size; 5762306a36Sopenharmony_ci return 0; 5862306a36Sopenharmony_ci} 5962306a36Sopenharmony_ci 6062306a36Sopenharmony_civoid iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 6162306a36Sopenharmony_ci{ 6262306a36Sopenharmony_ci if (unlikely(!ptr->addr)) 6362306a36Sopenharmony_ci return; 6462306a36Sopenharmony_ci 6562306a36Sopenharmony_ci dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 6662306a36Sopenharmony_ci memset(ptr, 0, sizeof(*ptr)); 6762306a36Sopenharmony_ci} 6862306a36Sopenharmony_ci 6962306a36Sopenharmony_ci/* 7062306a36Sopenharmony_ci * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 7162306a36Sopenharmony_ci */ 7262306a36Sopenharmony_cistatic void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 7362306a36Sopenharmony_ci struct iwl_txq *txq) 7462306a36Sopenharmony_ci{ 7562306a36Sopenharmony_ci u32 reg = 0; 7662306a36Sopenharmony_ci int txq_id = txq->id; 7762306a36Sopenharmony_ci 7862306a36Sopenharmony_ci lockdep_assert_held(&txq->lock); 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci /* 8162306a36Sopenharmony_ci * explicitly wake up the NIC if: 8262306a36Sopenharmony_ci * 1. shadow registers aren't enabled 8362306a36Sopenharmony_ci * 2. NIC is woken up for CMD regardless of shadow outside this function 8462306a36Sopenharmony_ci * 3. there is a chance that the NIC is asleep 8562306a36Sopenharmony_ci */ 8662306a36Sopenharmony_ci if (!trans->trans_cfg->base_params->shadow_reg_enable && 8762306a36Sopenharmony_ci txq_id != trans->txqs.cmd.q_id && 8862306a36Sopenharmony_ci test_bit(STATUS_TPOWER_PMI, &trans->status)) { 8962306a36Sopenharmony_ci /* 9062306a36Sopenharmony_ci * wake up nic if it's powered down ... 9162306a36Sopenharmony_ci * uCode will wake up, and interrupt us again, so next 9262306a36Sopenharmony_ci * time we'll skip this part. 9362306a36Sopenharmony_ci */ 9462306a36Sopenharmony_ci reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 9562306a36Sopenharmony_ci 9662306a36Sopenharmony_ci if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 9762306a36Sopenharmony_ci IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 9862306a36Sopenharmony_ci txq_id, reg); 9962306a36Sopenharmony_ci iwl_set_bit(trans, CSR_GP_CNTRL, 10062306a36Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 10162306a36Sopenharmony_ci txq->need_update = true; 10262306a36Sopenharmony_ci return; 10362306a36Sopenharmony_ci } 10462306a36Sopenharmony_ci } 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci /* 10762306a36Sopenharmony_ci * if not in power-save mode, uCode will never sleep when we're 10862306a36Sopenharmony_ci * trying to tx (during RFKILL, we're not trying to tx). 10962306a36Sopenharmony_ci */ 11062306a36Sopenharmony_ci IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 11162306a36Sopenharmony_ci if (!txq->block) 11262306a36Sopenharmony_ci iwl_write32(trans, HBUS_TARG_WRPTR, 11362306a36Sopenharmony_ci txq->write_ptr | (txq_id << 8)); 11462306a36Sopenharmony_ci} 11562306a36Sopenharmony_ci 11662306a36Sopenharmony_civoid iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 11762306a36Sopenharmony_ci{ 11862306a36Sopenharmony_ci int i; 11962306a36Sopenharmony_ci 12062306a36Sopenharmony_ci for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 12162306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[i]; 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci if (!test_bit(i, trans->txqs.queue_used)) 12462306a36Sopenharmony_ci continue; 12562306a36Sopenharmony_ci 12662306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 12762306a36Sopenharmony_ci if (txq->need_update) { 12862306a36Sopenharmony_ci iwl_pcie_txq_inc_wr_ptr(trans, txq); 12962306a36Sopenharmony_ci txq->need_update = false; 13062306a36Sopenharmony_ci } 13162306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 13262306a36Sopenharmony_ci } 13362306a36Sopenharmony_ci} 13462306a36Sopenharmony_ci 13562306a36Sopenharmony_cistatic int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 13662306a36Sopenharmony_ci dma_addr_t addr, u16 len, bool reset) 13762306a36Sopenharmony_ci{ 13862306a36Sopenharmony_ci void *tfd; 13962306a36Sopenharmony_ci u32 num_tbs; 14062306a36Sopenharmony_ci 14162306a36Sopenharmony_ci tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci if (reset) 14462306a36Sopenharmony_ci memset(tfd, 0, trans->txqs.tfd.size); 14562306a36Sopenharmony_ci 14662306a36Sopenharmony_ci num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ci /* Each TFD can point to a maximum max_tbs Tx buffers */ 14962306a36Sopenharmony_ci if (num_tbs >= trans->txqs.tfd.max_tbs) { 15062306a36Sopenharmony_ci IWL_ERR(trans, "Error can not send more than %d chunks\n", 15162306a36Sopenharmony_ci trans->txqs.tfd.max_tbs); 15262306a36Sopenharmony_ci return -EINVAL; 15362306a36Sopenharmony_ci } 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_ci if (WARN(addr & ~IWL_TX_DMA_MASK, 15662306a36Sopenharmony_ci "Unaligned address = %llx\n", (unsigned long long)addr)) 15762306a36Sopenharmony_ci return -EINVAL; 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len); 16062306a36Sopenharmony_ci 16162306a36Sopenharmony_ci return num_tbs; 16262306a36Sopenharmony_ci} 16362306a36Sopenharmony_ci 16462306a36Sopenharmony_cistatic void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 16562306a36Sopenharmony_ci{ 16662306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 16962306a36Sopenharmony_ci return; 17062306a36Sopenharmony_ci 17162306a36Sopenharmony_ci spin_lock(&trans_pcie->reg_lock); 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_ci if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { 17462306a36Sopenharmony_ci spin_unlock(&trans_pcie->reg_lock); 17562306a36Sopenharmony_ci return; 17662306a36Sopenharmony_ci } 17762306a36Sopenharmony_ci 17862306a36Sopenharmony_ci trans_pcie->cmd_hold_nic_awake = false; 17962306a36Sopenharmony_ci __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 18062306a36Sopenharmony_ci CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 18162306a36Sopenharmony_ci spin_unlock(&trans_pcie->reg_lock); 18262306a36Sopenharmony_ci} 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_ci/* 18562306a36Sopenharmony_ci * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 18662306a36Sopenharmony_ci */ 18762306a36Sopenharmony_cistatic void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 18862306a36Sopenharmony_ci{ 18962306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 19062306a36Sopenharmony_ci 19162306a36Sopenharmony_ci if (!txq) { 19262306a36Sopenharmony_ci IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 19362306a36Sopenharmony_ci return; 19462306a36Sopenharmony_ci } 19562306a36Sopenharmony_ci 19662306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 19762306a36Sopenharmony_ci while (txq->write_ptr != txq->read_ptr) { 19862306a36Sopenharmony_ci IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 19962306a36Sopenharmony_ci txq_id, txq->read_ptr); 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) { 20262306a36Sopenharmony_ci struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 20362306a36Sopenharmony_ci 20462306a36Sopenharmony_ci if (WARN_ON_ONCE(!skb)) 20562306a36Sopenharmony_ci continue; 20662306a36Sopenharmony_ci 20762306a36Sopenharmony_ci iwl_txq_free_tso_page(trans, skb); 20862306a36Sopenharmony_ci } 20962306a36Sopenharmony_ci iwl_txq_free_tfd(trans, txq); 21062306a36Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr && 21362306a36Sopenharmony_ci txq_id == trans->txqs.cmd.q_id) 21462306a36Sopenharmony_ci iwl_pcie_clear_cmd_in_flight(trans); 21562306a36Sopenharmony_ci } 21662306a36Sopenharmony_ci 21762306a36Sopenharmony_ci while (!skb_queue_empty(&txq->overflow_q)) { 21862306a36Sopenharmony_ci struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 22162306a36Sopenharmony_ci } 22262306a36Sopenharmony_ci 22362306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_ci /* just in case - this queue may have been stopped */ 22662306a36Sopenharmony_ci iwl_wake_queue(trans, txq); 22762306a36Sopenharmony_ci} 22862306a36Sopenharmony_ci 22962306a36Sopenharmony_ci/* 23062306a36Sopenharmony_ci * iwl_pcie_txq_free - Deallocate DMA queue. 23162306a36Sopenharmony_ci * @txq: Transmit queue to deallocate. 23262306a36Sopenharmony_ci * 23362306a36Sopenharmony_ci * Empty queue by removing and destroying all BD's. 23462306a36Sopenharmony_ci * Free all buffers. 23562306a36Sopenharmony_ci * 0-fill, but do not free "txq" descriptor structure. 23662306a36Sopenharmony_ci */ 23762306a36Sopenharmony_cistatic void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 23862306a36Sopenharmony_ci{ 23962306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 24062306a36Sopenharmony_ci struct device *dev = trans->dev; 24162306a36Sopenharmony_ci int i; 24262306a36Sopenharmony_ci 24362306a36Sopenharmony_ci if (WARN_ON(!txq)) 24462306a36Sopenharmony_ci return; 24562306a36Sopenharmony_ci 24662306a36Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 24762306a36Sopenharmony_ci 24862306a36Sopenharmony_ci /* De-alloc array of command/tx buffers */ 24962306a36Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id) 25062306a36Sopenharmony_ci for (i = 0; i < txq->n_window; i++) { 25162306a36Sopenharmony_ci kfree_sensitive(txq->entries[i].cmd); 25262306a36Sopenharmony_ci kfree_sensitive(txq->entries[i].free_buf); 25362306a36Sopenharmony_ci } 25462306a36Sopenharmony_ci 25562306a36Sopenharmony_ci /* De-alloc circular buffer of TFDs */ 25662306a36Sopenharmony_ci if (txq->tfds) { 25762306a36Sopenharmony_ci dma_free_coherent(dev, 25862306a36Sopenharmony_ci trans->txqs.tfd.size * 25962306a36Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size, 26062306a36Sopenharmony_ci txq->tfds, txq->dma_addr); 26162306a36Sopenharmony_ci txq->dma_addr = 0; 26262306a36Sopenharmony_ci txq->tfds = NULL; 26362306a36Sopenharmony_ci 26462306a36Sopenharmony_ci dma_free_coherent(dev, 26562306a36Sopenharmony_ci sizeof(*txq->first_tb_bufs) * txq->n_window, 26662306a36Sopenharmony_ci txq->first_tb_bufs, txq->first_tb_dma); 26762306a36Sopenharmony_ci } 26862306a36Sopenharmony_ci 26962306a36Sopenharmony_ci kfree(txq->entries); 27062306a36Sopenharmony_ci txq->entries = NULL; 27162306a36Sopenharmony_ci 27262306a36Sopenharmony_ci del_timer_sync(&txq->stuck_timer); 27362306a36Sopenharmony_ci 27462306a36Sopenharmony_ci /* 0-fill queue descriptor structure */ 27562306a36Sopenharmony_ci memset(txq, 0, sizeof(*txq)); 27662306a36Sopenharmony_ci} 27762306a36Sopenharmony_ci 27862306a36Sopenharmony_civoid iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 27962306a36Sopenharmony_ci{ 28062306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 28162306a36Sopenharmony_ci int nq = trans->trans_cfg->base_params->num_of_queues; 28262306a36Sopenharmony_ci int chan; 28362306a36Sopenharmony_ci u32 reg_val; 28462306a36Sopenharmony_ci int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 28562306a36Sopenharmony_ci SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 28662306a36Sopenharmony_ci 28762306a36Sopenharmony_ci /* make sure all queue are not stopped/used */ 28862306a36Sopenharmony_ci memset(trans->txqs.queue_stopped, 0, 28962306a36Sopenharmony_ci sizeof(trans->txqs.queue_stopped)); 29062306a36Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 29162306a36Sopenharmony_ci 29262306a36Sopenharmony_ci trans_pcie->scd_base_addr = 29362306a36Sopenharmony_ci iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 29462306a36Sopenharmony_ci 29562306a36Sopenharmony_ci WARN_ON(scd_base_addr != 0 && 29662306a36Sopenharmony_ci scd_base_addr != trans_pcie->scd_base_addr); 29762306a36Sopenharmony_ci 29862306a36Sopenharmony_ci /* reset context data, TX status and translation data */ 29962306a36Sopenharmony_ci iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 30062306a36Sopenharmony_ci SCD_CONTEXT_MEM_LOWER_BOUND, 30162306a36Sopenharmony_ci NULL, clear_dwords); 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_ci iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 30462306a36Sopenharmony_ci trans->txqs.scd_bc_tbls.dma >> 10); 30562306a36Sopenharmony_ci 30662306a36Sopenharmony_ci /* The chain extension of the SCD doesn't work well. This feature is 30762306a36Sopenharmony_ci * enabled by default by the HW, so we need to disable it manually. 30862306a36Sopenharmony_ci */ 30962306a36Sopenharmony_ci if (trans->trans_cfg->base_params->scd_chain_ext_wa) 31062306a36Sopenharmony_ci iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 31162306a36Sopenharmony_ci 31262306a36Sopenharmony_ci iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 31362306a36Sopenharmony_ci trans->txqs.cmd.fifo, 31462306a36Sopenharmony_ci trans->txqs.cmd.wdg_timeout); 31562306a36Sopenharmony_ci 31662306a36Sopenharmony_ci /* Activate all Tx DMA/FIFO channels */ 31762306a36Sopenharmony_ci iwl_scd_activate_fifos(trans); 31862306a36Sopenharmony_ci 31962306a36Sopenharmony_ci /* Enable DMA channel */ 32062306a36Sopenharmony_ci for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 32162306a36Sopenharmony_ci iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 32262306a36Sopenharmony_ci FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 32362306a36Sopenharmony_ci FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 32462306a36Sopenharmony_ci 32562306a36Sopenharmony_ci /* Update FH chicken bits */ 32662306a36Sopenharmony_ci reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 32762306a36Sopenharmony_ci iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 32862306a36Sopenharmony_ci reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 32962306a36Sopenharmony_ci 33062306a36Sopenharmony_ci /* Enable L1-Active */ 33162306a36Sopenharmony_ci if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 33262306a36Sopenharmony_ci iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 33362306a36Sopenharmony_ci APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 33462306a36Sopenharmony_ci} 33562306a36Sopenharmony_ci 33662306a36Sopenharmony_civoid iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 33762306a36Sopenharmony_ci{ 33862306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 33962306a36Sopenharmony_ci int txq_id; 34062306a36Sopenharmony_ci 34162306a36Sopenharmony_ci /* 34262306a36Sopenharmony_ci * we should never get here in gen2 trans mode return early to avoid 34362306a36Sopenharmony_ci * having invalid accesses 34462306a36Sopenharmony_ci */ 34562306a36Sopenharmony_ci if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 34662306a36Sopenharmony_ci return; 34762306a36Sopenharmony_ci 34862306a36Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 34962306a36Sopenharmony_ci txq_id++) { 35062306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 35162306a36Sopenharmony_ci if (trans->trans_cfg->gen2) 35262306a36Sopenharmony_ci iwl_write_direct64(trans, 35362306a36Sopenharmony_ci FH_MEM_CBBC_QUEUE(trans, txq_id), 35462306a36Sopenharmony_ci txq->dma_addr); 35562306a36Sopenharmony_ci else 35662306a36Sopenharmony_ci iwl_write_direct32(trans, 35762306a36Sopenharmony_ci FH_MEM_CBBC_QUEUE(trans, txq_id), 35862306a36Sopenharmony_ci txq->dma_addr >> 8); 35962306a36Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 36062306a36Sopenharmony_ci txq->read_ptr = 0; 36162306a36Sopenharmony_ci txq->write_ptr = 0; 36262306a36Sopenharmony_ci } 36362306a36Sopenharmony_ci 36462306a36Sopenharmony_ci /* Tell NIC where to find the "keep warm" buffer */ 36562306a36Sopenharmony_ci iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 36662306a36Sopenharmony_ci trans_pcie->kw.dma >> 4); 36762306a36Sopenharmony_ci 36862306a36Sopenharmony_ci /* 36962306a36Sopenharmony_ci * Send 0 as the scd_base_addr since the device may have be reset 37062306a36Sopenharmony_ci * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 37162306a36Sopenharmony_ci * contain garbage. 37262306a36Sopenharmony_ci */ 37362306a36Sopenharmony_ci iwl_pcie_tx_start(trans, 0); 37462306a36Sopenharmony_ci} 37562306a36Sopenharmony_ci 37662306a36Sopenharmony_cistatic void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 37762306a36Sopenharmony_ci{ 37862306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 37962306a36Sopenharmony_ci int ch, ret; 38062306a36Sopenharmony_ci u32 mask = 0; 38162306a36Sopenharmony_ci 38262306a36Sopenharmony_ci spin_lock_bh(&trans_pcie->irq_lock); 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci if (!iwl_trans_grab_nic_access(trans)) 38562306a36Sopenharmony_ci goto out; 38662306a36Sopenharmony_ci 38762306a36Sopenharmony_ci /* Stop each Tx DMA channel */ 38862306a36Sopenharmony_ci for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 38962306a36Sopenharmony_ci iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 39062306a36Sopenharmony_ci mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 39162306a36Sopenharmony_ci } 39262306a36Sopenharmony_ci 39362306a36Sopenharmony_ci /* Wait for DMA channels to be idle */ 39462306a36Sopenharmony_ci ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 39562306a36Sopenharmony_ci if (ret < 0) 39662306a36Sopenharmony_ci IWL_ERR(trans, 39762306a36Sopenharmony_ci "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 39862306a36Sopenharmony_ci ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 39962306a36Sopenharmony_ci 40062306a36Sopenharmony_ci iwl_trans_release_nic_access(trans); 40162306a36Sopenharmony_ci 40262306a36Sopenharmony_ciout: 40362306a36Sopenharmony_ci spin_unlock_bh(&trans_pcie->irq_lock); 40462306a36Sopenharmony_ci} 40562306a36Sopenharmony_ci 40662306a36Sopenharmony_ci/* 40762306a36Sopenharmony_ci * iwl_pcie_tx_stop - Stop all Tx DMA channels 40862306a36Sopenharmony_ci */ 40962306a36Sopenharmony_ciint iwl_pcie_tx_stop(struct iwl_trans *trans) 41062306a36Sopenharmony_ci{ 41162306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 41262306a36Sopenharmony_ci int txq_id; 41362306a36Sopenharmony_ci 41462306a36Sopenharmony_ci /* Turn off all Tx DMA fifos */ 41562306a36Sopenharmony_ci iwl_scd_deactivate_fifos(trans); 41662306a36Sopenharmony_ci 41762306a36Sopenharmony_ci /* Turn off all Tx DMA channels */ 41862306a36Sopenharmony_ci iwl_pcie_tx_stop_fh(trans); 41962306a36Sopenharmony_ci 42062306a36Sopenharmony_ci /* 42162306a36Sopenharmony_ci * This function can be called before the op_mode disabled the 42262306a36Sopenharmony_ci * queues. This happens when we have an rfkill interrupt. 42362306a36Sopenharmony_ci * Since we stop Tx altogether - mark the queues as stopped. 42462306a36Sopenharmony_ci */ 42562306a36Sopenharmony_ci memset(trans->txqs.queue_stopped, 0, 42662306a36Sopenharmony_ci sizeof(trans->txqs.queue_stopped)); 42762306a36Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 42862306a36Sopenharmony_ci 42962306a36Sopenharmony_ci /* This can happen: start_hw, stop_device */ 43062306a36Sopenharmony_ci if (!trans_pcie->txq_memory) 43162306a36Sopenharmony_ci return 0; 43262306a36Sopenharmony_ci 43362306a36Sopenharmony_ci /* Unmap DMA from host system and free skb's */ 43462306a36Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 43562306a36Sopenharmony_ci txq_id++) 43662306a36Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 43762306a36Sopenharmony_ci 43862306a36Sopenharmony_ci return 0; 43962306a36Sopenharmony_ci} 44062306a36Sopenharmony_ci 44162306a36Sopenharmony_ci/* 44262306a36Sopenharmony_ci * iwl_trans_tx_free - Free TXQ Context 44362306a36Sopenharmony_ci * 44462306a36Sopenharmony_ci * Destroy all TX DMA queues and structures 44562306a36Sopenharmony_ci */ 44662306a36Sopenharmony_civoid iwl_pcie_tx_free(struct iwl_trans *trans) 44762306a36Sopenharmony_ci{ 44862306a36Sopenharmony_ci int txq_id; 44962306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 45062306a36Sopenharmony_ci 45162306a36Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 45262306a36Sopenharmony_ci 45362306a36Sopenharmony_ci /* Tx queues */ 45462306a36Sopenharmony_ci if (trans_pcie->txq_memory) { 45562306a36Sopenharmony_ci for (txq_id = 0; 45662306a36Sopenharmony_ci txq_id < trans->trans_cfg->base_params->num_of_queues; 45762306a36Sopenharmony_ci txq_id++) { 45862306a36Sopenharmony_ci iwl_pcie_txq_free(trans, txq_id); 45962306a36Sopenharmony_ci trans->txqs.txq[txq_id] = NULL; 46062306a36Sopenharmony_ci } 46162306a36Sopenharmony_ci } 46262306a36Sopenharmony_ci 46362306a36Sopenharmony_ci kfree(trans_pcie->txq_memory); 46462306a36Sopenharmony_ci trans_pcie->txq_memory = NULL; 46562306a36Sopenharmony_ci 46662306a36Sopenharmony_ci iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 46762306a36Sopenharmony_ci 46862306a36Sopenharmony_ci iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 46962306a36Sopenharmony_ci} 47062306a36Sopenharmony_ci 47162306a36Sopenharmony_ci/* 47262306a36Sopenharmony_ci * iwl_pcie_tx_alloc - allocate TX context 47362306a36Sopenharmony_ci * Allocate all Tx DMA structures and initialize them 47462306a36Sopenharmony_ci */ 47562306a36Sopenharmony_cistatic int iwl_pcie_tx_alloc(struct iwl_trans *trans) 47662306a36Sopenharmony_ci{ 47762306a36Sopenharmony_ci int ret; 47862306a36Sopenharmony_ci int txq_id, slots_num; 47962306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 48062306a36Sopenharmony_ci u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 48162306a36Sopenharmony_ci 48262306a36Sopenharmony_ci if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 48362306a36Sopenharmony_ci return -EINVAL; 48462306a36Sopenharmony_ci 48562306a36Sopenharmony_ci bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 48662306a36Sopenharmony_ci 48762306a36Sopenharmony_ci /*It is not allowed to alloc twice, so warn when this happens. 48862306a36Sopenharmony_ci * We cannot rely on the previous allocation, so free and fail */ 48962306a36Sopenharmony_ci if (WARN_ON(trans_pcie->txq_memory)) { 49062306a36Sopenharmony_ci ret = -EINVAL; 49162306a36Sopenharmony_ci goto error; 49262306a36Sopenharmony_ci } 49362306a36Sopenharmony_ci 49462306a36Sopenharmony_ci ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 49562306a36Sopenharmony_ci bc_tbls_size); 49662306a36Sopenharmony_ci if (ret) { 49762306a36Sopenharmony_ci IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 49862306a36Sopenharmony_ci goto error; 49962306a36Sopenharmony_ci } 50062306a36Sopenharmony_ci 50162306a36Sopenharmony_ci /* Alloc keep-warm buffer */ 50262306a36Sopenharmony_ci ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 50362306a36Sopenharmony_ci if (ret) { 50462306a36Sopenharmony_ci IWL_ERR(trans, "Keep Warm allocation failed\n"); 50562306a36Sopenharmony_ci goto error; 50662306a36Sopenharmony_ci } 50762306a36Sopenharmony_ci 50862306a36Sopenharmony_ci trans_pcie->txq_memory = 50962306a36Sopenharmony_ci kcalloc(trans->trans_cfg->base_params->num_of_queues, 51062306a36Sopenharmony_ci sizeof(struct iwl_txq), GFP_KERNEL); 51162306a36Sopenharmony_ci if (!trans_pcie->txq_memory) { 51262306a36Sopenharmony_ci IWL_ERR(trans, "Not enough memory for txq\n"); 51362306a36Sopenharmony_ci ret = -ENOMEM; 51462306a36Sopenharmony_ci goto error; 51562306a36Sopenharmony_ci } 51662306a36Sopenharmony_ci 51762306a36Sopenharmony_ci /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 51862306a36Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 51962306a36Sopenharmony_ci txq_id++) { 52062306a36Sopenharmony_ci bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 52162306a36Sopenharmony_ci 52262306a36Sopenharmony_ci if (cmd_queue) 52362306a36Sopenharmony_ci slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 52462306a36Sopenharmony_ci trans->cfg->min_txq_size); 52562306a36Sopenharmony_ci else 52662306a36Sopenharmony_ci slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 52762306a36Sopenharmony_ci trans->cfg->min_ba_txq_size); 52862306a36Sopenharmony_ci trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 52962306a36Sopenharmony_ci ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 53062306a36Sopenharmony_ci cmd_queue); 53162306a36Sopenharmony_ci if (ret) { 53262306a36Sopenharmony_ci IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 53362306a36Sopenharmony_ci goto error; 53462306a36Sopenharmony_ci } 53562306a36Sopenharmony_ci trans->txqs.txq[txq_id]->id = txq_id; 53662306a36Sopenharmony_ci } 53762306a36Sopenharmony_ci 53862306a36Sopenharmony_ci return 0; 53962306a36Sopenharmony_ci 54062306a36Sopenharmony_cierror: 54162306a36Sopenharmony_ci iwl_pcie_tx_free(trans); 54262306a36Sopenharmony_ci 54362306a36Sopenharmony_ci return ret; 54462306a36Sopenharmony_ci} 54562306a36Sopenharmony_ci 54662306a36Sopenharmony_ciint iwl_pcie_tx_init(struct iwl_trans *trans) 54762306a36Sopenharmony_ci{ 54862306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 54962306a36Sopenharmony_ci int ret; 55062306a36Sopenharmony_ci int txq_id, slots_num; 55162306a36Sopenharmony_ci bool alloc = false; 55262306a36Sopenharmony_ci 55362306a36Sopenharmony_ci if (!trans_pcie->txq_memory) { 55462306a36Sopenharmony_ci ret = iwl_pcie_tx_alloc(trans); 55562306a36Sopenharmony_ci if (ret) 55662306a36Sopenharmony_ci goto error; 55762306a36Sopenharmony_ci alloc = true; 55862306a36Sopenharmony_ci } 55962306a36Sopenharmony_ci 56062306a36Sopenharmony_ci spin_lock_bh(&trans_pcie->irq_lock); 56162306a36Sopenharmony_ci 56262306a36Sopenharmony_ci /* Turn off all Tx DMA fifos */ 56362306a36Sopenharmony_ci iwl_scd_deactivate_fifos(trans); 56462306a36Sopenharmony_ci 56562306a36Sopenharmony_ci /* Tell NIC where to find the "keep warm" buffer */ 56662306a36Sopenharmony_ci iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 56762306a36Sopenharmony_ci trans_pcie->kw.dma >> 4); 56862306a36Sopenharmony_ci 56962306a36Sopenharmony_ci spin_unlock_bh(&trans_pcie->irq_lock); 57062306a36Sopenharmony_ci 57162306a36Sopenharmony_ci /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 57262306a36Sopenharmony_ci for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 57362306a36Sopenharmony_ci txq_id++) { 57462306a36Sopenharmony_ci bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 57562306a36Sopenharmony_ci 57662306a36Sopenharmony_ci if (cmd_queue) 57762306a36Sopenharmony_ci slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 57862306a36Sopenharmony_ci trans->cfg->min_txq_size); 57962306a36Sopenharmony_ci else 58062306a36Sopenharmony_ci slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 58162306a36Sopenharmony_ci trans->cfg->min_ba_txq_size); 58262306a36Sopenharmony_ci ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 58362306a36Sopenharmony_ci cmd_queue); 58462306a36Sopenharmony_ci if (ret) { 58562306a36Sopenharmony_ci IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 58662306a36Sopenharmony_ci goto error; 58762306a36Sopenharmony_ci } 58862306a36Sopenharmony_ci 58962306a36Sopenharmony_ci /* 59062306a36Sopenharmony_ci * Tell nic where to find circular buffer of TFDs for a 59162306a36Sopenharmony_ci * given Tx queue, and enable the DMA channel used for that 59262306a36Sopenharmony_ci * queue. 59362306a36Sopenharmony_ci * Circular buffer (TFD queue in DRAM) physical base address 59462306a36Sopenharmony_ci */ 59562306a36Sopenharmony_ci iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 59662306a36Sopenharmony_ci trans->txqs.txq[txq_id]->dma_addr >> 8); 59762306a36Sopenharmony_ci } 59862306a36Sopenharmony_ci 59962306a36Sopenharmony_ci iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 60062306a36Sopenharmony_ci if (trans->trans_cfg->base_params->num_of_queues > 20) 60162306a36Sopenharmony_ci iwl_set_bits_prph(trans, SCD_GP_CTRL, 60262306a36Sopenharmony_ci SCD_GP_CTRL_ENABLE_31_QUEUES); 60362306a36Sopenharmony_ci 60462306a36Sopenharmony_ci return 0; 60562306a36Sopenharmony_cierror: 60662306a36Sopenharmony_ci /*Upon error, free only if we allocated something */ 60762306a36Sopenharmony_ci if (alloc) 60862306a36Sopenharmony_ci iwl_pcie_tx_free(trans); 60962306a36Sopenharmony_ci return ret; 61062306a36Sopenharmony_ci} 61162306a36Sopenharmony_ci 61262306a36Sopenharmony_cistatic int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 61362306a36Sopenharmony_ci const struct iwl_host_cmd *cmd) 61462306a36Sopenharmony_ci{ 61562306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 61662306a36Sopenharmony_ci 61762306a36Sopenharmony_ci /* Make sure the NIC is still alive in the bus */ 61862306a36Sopenharmony_ci if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 61962306a36Sopenharmony_ci return -ENODEV; 62062306a36Sopenharmony_ci 62162306a36Sopenharmony_ci if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 62262306a36Sopenharmony_ci return 0; 62362306a36Sopenharmony_ci 62462306a36Sopenharmony_ci /* 62562306a36Sopenharmony_ci * wake up the NIC to make sure that the firmware will see the host 62662306a36Sopenharmony_ci * command - we will let the NIC sleep once all the host commands 62762306a36Sopenharmony_ci * returned. This needs to be done only on NICs that have 62862306a36Sopenharmony_ci * apmg_wake_up_wa set (see above.) 62962306a36Sopenharmony_ci */ 63062306a36Sopenharmony_ci if (!_iwl_trans_pcie_grab_nic_access(trans)) 63162306a36Sopenharmony_ci return -EIO; 63262306a36Sopenharmony_ci 63362306a36Sopenharmony_ci /* 63462306a36Sopenharmony_ci * In iwl_trans_grab_nic_access(), we've acquired the reg_lock. 63562306a36Sopenharmony_ci * There, we also returned immediately if cmd_hold_nic_awake is 63662306a36Sopenharmony_ci * already true, so it's OK to unconditionally set it to true. 63762306a36Sopenharmony_ci */ 63862306a36Sopenharmony_ci trans_pcie->cmd_hold_nic_awake = true; 63962306a36Sopenharmony_ci spin_unlock(&trans_pcie->reg_lock); 64062306a36Sopenharmony_ci 64162306a36Sopenharmony_ci return 0; 64262306a36Sopenharmony_ci} 64362306a36Sopenharmony_ci 64462306a36Sopenharmony_ci/* 64562306a36Sopenharmony_ci * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 64662306a36Sopenharmony_ci * 64762306a36Sopenharmony_ci * When FW advances 'R' index, all entries between old and new 'R' index 64862306a36Sopenharmony_ci * need to be reclaimed. As result, some free space forms. If there is 64962306a36Sopenharmony_ci * enough free space (> low mark), wake the stack that feeds us. 65062306a36Sopenharmony_ci */ 65162306a36Sopenharmony_cistatic void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 65262306a36Sopenharmony_ci{ 65362306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 65462306a36Sopenharmony_ci int nfreed = 0; 65562306a36Sopenharmony_ci u16 r; 65662306a36Sopenharmony_ci 65762306a36Sopenharmony_ci lockdep_assert_held(&txq->lock); 65862306a36Sopenharmony_ci 65962306a36Sopenharmony_ci idx = iwl_txq_get_cmd_index(txq, idx); 66062306a36Sopenharmony_ci r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 66162306a36Sopenharmony_ci 66262306a36Sopenharmony_ci if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 66362306a36Sopenharmony_ci (!iwl_txq_used(txq, idx))) { 66462306a36Sopenharmony_ci WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 66562306a36Sopenharmony_ci "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 66662306a36Sopenharmony_ci __func__, txq_id, idx, 66762306a36Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size, 66862306a36Sopenharmony_ci txq->write_ptr, txq->read_ptr); 66962306a36Sopenharmony_ci return; 67062306a36Sopenharmony_ci } 67162306a36Sopenharmony_ci 67262306a36Sopenharmony_ci for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 67362306a36Sopenharmony_ci r = iwl_txq_inc_wrap(trans, r)) { 67462306a36Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 67562306a36Sopenharmony_ci 67662306a36Sopenharmony_ci if (nfreed++ > 0) { 67762306a36Sopenharmony_ci IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 67862306a36Sopenharmony_ci idx, txq->write_ptr, r); 67962306a36Sopenharmony_ci iwl_force_nmi(trans); 68062306a36Sopenharmony_ci } 68162306a36Sopenharmony_ci } 68262306a36Sopenharmony_ci 68362306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) 68462306a36Sopenharmony_ci iwl_pcie_clear_cmd_in_flight(trans); 68562306a36Sopenharmony_ci 68662306a36Sopenharmony_ci iwl_txq_progress(txq); 68762306a36Sopenharmony_ci} 68862306a36Sopenharmony_ci 68962306a36Sopenharmony_cistatic int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 69062306a36Sopenharmony_ci u16 txq_id) 69162306a36Sopenharmony_ci{ 69262306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 69362306a36Sopenharmony_ci u32 tbl_dw_addr; 69462306a36Sopenharmony_ci u32 tbl_dw; 69562306a36Sopenharmony_ci u16 scd_q2ratid; 69662306a36Sopenharmony_ci 69762306a36Sopenharmony_ci scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 69862306a36Sopenharmony_ci 69962306a36Sopenharmony_ci tbl_dw_addr = trans_pcie->scd_base_addr + 70062306a36Sopenharmony_ci SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 70162306a36Sopenharmony_ci 70262306a36Sopenharmony_ci tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 70362306a36Sopenharmony_ci 70462306a36Sopenharmony_ci if (txq_id & 0x1) 70562306a36Sopenharmony_ci tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 70662306a36Sopenharmony_ci else 70762306a36Sopenharmony_ci tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 70862306a36Sopenharmony_ci 70962306a36Sopenharmony_ci iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 71062306a36Sopenharmony_ci 71162306a36Sopenharmony_ci return 0; 71262306a36Sopenharmony_ci} 71362306a36Sopenharmony_ci 71462306a36Sopenharmony_ci/* Receiver address (actually, Rx station's index into station table), 71562306a36Sopenharmony_ci * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 71662306a36Sopenharmony_ci#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 71762306a36Sopenharmony_ci 71862306a36Sopenharmony_cibool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 71962306a36Sopenharmony_ci const struct iwl_trans_txq_scd_cfg *cfg, 72062306a36Sopenharmony_ci unsigned int wdg_timeout) 72162306a36Sopenharmony_ci{ 72262306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 72362306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 72462306a36Sopenharmony_ci int fifo = -1; 72562306a36Sopenharmony_ci bool scd_bug = false; 72662306a36Sopenharmony_ci 72762306a36Sopenharmony_ci if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 72862306a36Sopenharmony_ci WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 72962306a36Sopenharmony_ci 73062306a36Sopenharmony_ci txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 73162306a36Sopenharmony_ci 73262306a36Sopenharmony_ci if (cfg) { 73362306a36Sopenharmony_ci fifo = cfg->fifo; 73462306a36Sopenharmony_ci 73562306a36Sopenharmony_ci /* Disable the scheduler prior configuring the cmd queue */ 73662306a36Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id && 73762306a36Sopenharmony_ci trans_pcie->scd_set_active) 73862306a36Sopenharmony_ci iwl_scd_enable_set_active(trans, 0); 73962306a36Sopenharmony_ci 74062306a36Sopenharmony_ci /* Stop this Tx queue before configuring it */ 74162306a36Sopenharmony_ci iwl_scd_txq_set_inactive(trans, txq_id); 74262306a36Sopenharmony_ci 74362306a36Sopenharmony_ci /* Set this queue as a chain-building queue unless it is CMD */ 74462306a36Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) 74562306a36Sopenharmony_ci iwl_scd_txq_set_chain(trans, txq_id); 74662306a36Sopenharmony_ci 74762306a36Sopenharmony_ci if (cfg->aggregate) { 74862306a36Sopenharmony_ci u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 74962306a36Sopenharmony_ci 75062306a36Sopenharmony_ci /* Map receiver-address / traffic-ID to this queue */ 75162306a36Sopenharmony_ci iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 75262306a36Sopenharmony_ci 75362306a36Sopenharmony_ci /* enable aggregations for the queue */ 75462306a36Sopenharmony_ci iwl_scd_txq_enable_agg(trans, txq_id); 75562306a36Sopenharmony_ci txq->ampdu = true; 75662306a36Sopenharmony_ci } else { 75762306a36Sopenharmony_ci /* 75862306a36Sopenharmony_ci * disable aggregations for the queue, this will also 75962306a36Sopenharmony_ci * make the ra_tid mapping configuration irrelevant 76062306a36Sopenharmony_ci * since it is now a non-AGG queue. 76162306a36Sopenharmony_ci */ 76262306a36Sopenharmony_ci iwl_scd_txq_disable_agg(trans, txq_id); 76362306a36Sopenharmony_ci 76462306a36Sopenharmony_ci ssn = txq->read_ptr; 76562306a36Sopenharmony_ci } 76662306a36Sopenharmony_ci } else { 76762306a36Sopenharmony_ci /* 76862306a36Sopenharmony_ci * If we need to move the SCD write pointer by steps of 76962306a36Sopenharmony_ci * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 77062306a36Sopenharmony_ci * the op_mode know by returning true later. 77162306a36Sopenharmony_ci * Do this only in case cfg is NULL since this trick can 77262306a36Sopenharmony_ci * be done only if we have DQA enabled which is true for mvm 77362306a36Sopenharmony_ci * only. And mvm never sets a cfg pointer. 77462306a36Sopenharmony_ci * This is really ugly, but this is the easiest way out for 77562306a36Sopenharmony_ci * this sad hardware issue. 77662306a36Sopenharmony_ci * This bug has been fixed on devices 9000 and up. 77762306a36Sopenharmony_ci */ 77862306a36Sopenharmony_ci scd_bug = !trans->trans_cfg->mq_rx_supported && 77962306a36Sopenharmony_ci !((ssn - txq->write_ptr) & 0x3f) && 78062306a36Sopenharmony_ci (ssn != txq->write_ptr); 78162306a36Sopenharmony_ci if (scd_bug) 78262306a36Sopenharmony_ci ssn++; 78362306a36Sopenharmony_ci } 78462306a36Sopenharmony_ci 78562306a36Sopenharmony_ci /* Place first TFD at index corresponding to start sequence number. 78662306a36Sopenharmony_ci * Assumes that ssn_idx is valid (!= 0xFFF) */ 78762306a36Sopenharmony_ci txq->read_ptr = (ssn & 0xff); 78862306a36Sopenharmony_ci txq->write_ptr = (ssn & 0xff); 78962306a36Sopenharmony_ci iwl_write_direct32(trans, HBUS_TARG_WRPTR, 79062306a36Sopenharmony_ci (ssn & 0xff) | (txq_id << 8)); 79162306a36Sopenharmony_ci 79262306a36Sopenharmony_ci if (cfg) { 79362306a36Sopenharmony_ci u8 frame_limit = cfg->frame_limit; 79462306a36Sopenharmony_ci 79562306a36Sopenharmony_ci iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 79662306a36Sopenharmony_ci 79762306a36Sopenharmony_ci /* Set up Tx window size and frame limit for this queue */ 79862306a36Sopenharmony_ci iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 79962306a36Sopenharmony_ci SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 80062306a36Sopenharmony_ci iwl_trans_write_mem32(trans, 80162306a36Sopenharmony_ci trans_pcie->scd_base_addr + 80262306a36Sopenharmony_ci SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 80362306a36Sopenharmony_ci SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 80462306a36Sopenharmony_ci SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 80562306a36Sopenharmony_ci 80662306a36Sopenharmony_ci /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 80762306a36Sopenharmony_ci iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 80862306a36Sopenharmony_ci (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 80962306a36Sopenharmony_ci (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 81062306a36Sopenharmony_ci (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 81162306a36Sopenharmony_ci SCD_QUEUE_STTS_REG_MSK); 81262306a36Sopenharmony_ci 81362306a36Sopenharmony_ci /* enable the scheduler for this queue (only) */ 81462306a36Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id && 81562306a36Sopenharmony_ci trans_pcie->scd_set_active) 81662306a36Sopenharmony_ci iwl_scd_enable_set_active(trans, BIT(txq_id)); 81762306a36Sopenharmony_ci 81862306a36Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, 81962306a36Sopenharmony_ci "Activate queue %d on FIFO %d WrPtr: %d\n", 82062306a36Sopenharmony_ci txq_id, fifo, ssn & 0xff); 82162306a36Sopenharmony_ci } else { 82262306a36Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, 82362306a36Sopenharmony_ci "Activate queue %d WrPtr: %d\n", 82462306a36Sopenharmony_ci txq_id, ssn & 0xff); 82562306a36Sopenharmony_ci } 82662306a36Sopenharmony_ci 82762306a36Sopenharmony_ci return scd_bug; 82862306a36Sopenharmony_ci} 82962306a36Sopenharmony_ci 83062306a36Sopenharmony_civoid iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 83162306a36Sopenharmony_ci bool shared_mode) 83262306a36Sopenharmony_ci{ 83362306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 83462306a36Sopenharmony_ci 83562306a36Sopenharmony_ci txq->ampdu = !shared_mode; 83662306a36Sopenharmony_ci} 83762306a36Sopenharmony_ci 83862306a36Sopenharmony_civoid iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 83962306a36Sopenharmony_ci bool configure_scd) 84062306a36Sopenharmony_ci{ 84162306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 84262306a36Sopenharmony_ci u32 stts_addr = trans_pcie->scd_base_addr + 84362306a36Sopenharmony_ci SCD_TX_STTS_QUEUE_OFFSET(txq_id); 84462306a36Sopenharmony_ci static const u32 zero_val[4] = {}; 84562306a36Sopenharmony_ci 84662306a36Sopenharmony_ci trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 84762306a36Sopenharmony_ci trans->txqs.txq[txq_id]->frozen = false; 84862306a36Sopenharmony_ci 84962306a36Sopenharmony_ci /* 85062306a36Sopenharmony_ci * Upon HW Rfkill - we stop the device, and then stop the queues 85162306a36Sopenharmony_ci * in the op_mode. Just for the sake of the simplicity of the op_mode, 85262306a36Sopenharmony_ci * allow the op_mode to call txq_disable after it already called 85362306a36Sopenharmony_ci * stop_device. 85462306a36Sopenharmony_ci */ 85562306a36Sopenharmony_ci if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 85662306a36Sopenharmony_ci WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 85762306a36Sopenharmony_ci "queue %d not used", txq_id); 85862306a36Sopenharmony_ci return; 85962306a36Sopenharmony_ci } 86062306a36Sopenharmony_ci 86162306a36Sopenharmony_ci if (configure_scd) { 86262306a36Sopenharmony_ci iwl_scd_txq_set_inactive(trans, txq_id); 86362306a36Sopenharmony_ci 86462306a36Sopenharmony_ci iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, 86562306a36Sopenharmony_ci ARRAY_SIZE(zero_val)); 86662306a36Sopenharmony_ci } 86762306a36Sopenharmony_ci 86862306a36Sopenharmony_ci iwl_pcie_txq_unmap(trans, txq_id); 86962306a36Sopenharmony_ci trans->txqs.txq[txq_id]->ampdu = false; 87062306a36Sopenharmony_ci 87162306a36Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 87262306a36Sopenharmony_ci} 87362306a36Sopenharmony_ci 87462306a36Sopenharmony_ci/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 87562306a36Sopenharmony_ci 87662306a36Sopenharmony_ci/* 87762306a36Sopenharmony_ci * iwl_pcie_enqueue_hcmd - enqueue a uCode command 87862306a36Sopenharmony_ci * @priv: device private data point 87962306a36Sopenharmony_ci * @cmd: a pointer to the ucode command structure 88062306a36Sopenharmony_ci * 88162306a36Sopenharmony_ci * The function returns < 0 values to indicate the operation 88262306a36Sopenharmony_ci * failed. On success, it returns the index (>= 0) of command in the 88362306a36Sopenharmony_ci * command queue. 88462306a36Sopenharmony_ci */ 88562306a36Sopenharmony_ciint iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 88662306a36Sopenharmony_ci struct iwl_host_cmd *cmd) 88762306a36Sopenharmony_ci{ 88862306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 88962306a36Sopenharmony_ci struct iwl_device_cmd *out_cmd; 89062306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta; 89162306a36Sopenharmony_ci void *dup_buf = NULL; 89262306a36Sopenharmony_ci dma_addr_t phys_addr; 89362306a36Sopenharmony_ci int idx; 89462306a36Sopenharmony_ci u16 copy_size, cmd_size, tb0_size; 89562306a36Sopenharmony_ci bool had_nocopy = false; 89662306a36Sopenharmony_ci u8 group_id = iwl_cmd_groupid(cmd->id); 89762306a36Sopenharmony_ci int i, ret; 89862306a36Sopenharmony_ci u32 cmd_pos; 89962306a36Sopenharmony_ci const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 90062306a36Sopenharmony_ci u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 90162306a36Sopenharmony_ci unsigned long flags; 90262306a36Sopenharmony_ci 90362306a36Sopenharmony_ci if (WARN(!trans->wide_cmd_header && 90462306a36Sopenharmony_ci group_id > IWL_ALWAYS_LONG_GROUP, 90562306a36Sopenharmony_ci "unsupported wide command %#x\n", cmd->id)) 90662306a36Sopenharmony_ci return -EINVAL; 90762306a36Sopenharmony_ci 90862306a36Sopenharmony_ci if (group_id != 0) { 90962306a36Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header_wide); 91062306a36Sopenharmony_ci cmd_size = sizeof(struct iwl_cmd_header_wide); 91162306a36Sopenharmony_ci } else { 91262306a36Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header); 91362306a36Sopenharmony_ci cmd_size = sizeof(struct iwl_cmd_header); 91462306a36Sopenharmony_ci } 91562306a36Sopenharmony_ci 91662306a36Sopenharmony_ci /* need one for the header if the first is NOCOPY */ 91762306a36Sopenharmony_ci BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 91862306a36Sopenharmony_ci 91962306a36Sopenharmony_ci for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 92062306a36Sopenharmony_ci cmddata[i] = cmd->data[i]; 92162306a36Sopenharmony_ci cmdlen[i] = cmd->len[i]; 92262306a36Sopenharmony_ci 92362306a36Sopenharmony_ci if (!cmd->len[i]) 92462306a36Sopenharmony_ci continue; 92562306a36Sopenharmony_ci 92662306a36Sopenharmony_ci /* need at least IWL_FIRST_TB_SIZE copied */ 92762306a36Sopenharmony_ci if (copy_size < IWL_FIRST_TB_SIZE) { 92862306a36Sopenharmony_ci int copy = IWL_FIRST_TB_SIZE - copy_size; 92962306a36Sopenharmony_ci 93062306a36Sopenharmony_ci if (copy > cmdlen[i]) 93162306a36Sopenharmony_ci copy = cmdlen[i]; 93262306a36Sopenharmony_ci cmdlen[i] -= copy; 93362306a36Sopenharmony_ci cmddata[i] += copy; 93462306a36Sopenharmony_ci copy_size += copy; 93562306a36Sopenharmony_ci } 93662306a36Sopenharmony_ci 93762306a36Sopenharmony_ci if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 93862306a36Sopenharmony_ci had_nocopy = true; 93962306a36Sopenharmony_ci if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 94062306a36Sopenharmony_ci idx = -EINVAL; 94162306a36Sopenharmony_ci goto free_dup_buf; 94262306a36Sopenharmony_ci } 94362306a36Sopenharmony_ci } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 94462306a36Sopenharmony_ci /* 94562306a36Sopenharmony_ci * This is also a chunk that isn't copied 94662306a36Sopenharmony_ci * to the static buffer so set had_nocopy. 94762306a36Sopenharmony_ci */ 94862306a36Sopenharmony_ci had_nocopy = true; 94962306a36Sopenharmony_ci 95062306a36Sopenharmony_ci /* only allowed once */ 95162306a36Sopenharmony_ci if (WARN_ON(dup_buf)) { 95262306a36Sopenharmony_ci idx = -EINVAL; 95362306a36Sopenharmony_ci goto free_dup_buf; 95462306a36Sopenharmony_ci } 95562306a36Sopenharmony_ci 95662306a36Sopenharmony_ci dup_buf = kmemdup(cmddata[i], cmdlen[i], 95762306a36Sopenharmony_ci GFP_ATOMIC); 95862306a36Sopenharmony_ci if (!dup_buf) 95962306a36Sopenharmony_ci return -ENOMEM; 96062306a36Sopenharmony_ci } else { 96162306a36Sopenharmony_ci /* NOCOPY must not be followed by normal! */ 96262306a36Sopenharmony_ci if (WARN_ON(had_nocopy)) { 96362306a36Sopenharmony_ci idx = -EINVAL; 96462306a36Sopenharmony_ci goto free_dup_buf; 96562306a36Sopenharmony_ci } 96662306a36Sopenharmony_ci copy_size += cmdlen[i]; 96762306a36Sopenharmony_ci } 96862306a36Sopenharmony_ci cmd_size += cmd->len[i]; 96962306a36Sopenharmony_ci } 97062306a36Sopenharmony_ci 97162306a36Sopenharmony_ci /* 97262306a36Sopenharmony_ci * If any of the command structures end up being larger than 97362306a36Sopenharmony_ci * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 97462306a36Sopenharmony_ci * allocated into separate TFDs, then we will need to 97562306a36Sopenharmony_ci * increase the size of the buffers. 97662306a36Sopenharmony_ci */ 97762306a36Sopenharmony_ci if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 97862306a36Sopenharmony_ci "Command %s (%#x) is too large (%d bytes)\n", 97962306a36Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), 98062306a36Sopenharmony_ci cmd->id, copy_size)) { 98162306a36Sopenharmony_ci idx = -EINVAL; 98262306a36Sopenharmony_ci goto free_dup_buf; 98362306a36Sopenharmony_ci } 98462306a36Sopenharmony_ci 98562306a36Sopenharmony_ci spin_lock_irqsave(&txq->lock, flags); 98662306a36Sopenharmony_ci 98762306a36Sopenharmony_ci if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 98862306a36Sopenharmony_ci spin_unlock_irqrestore(&txq->lock, flags); 98962306a36Sopenharmony_ci 99062306a36Sopenharmony_ci IWL_ERR(trans, "No space in command queue\n"); 99162306a36Sopenharmony_ci iwl_op_mode_cmd_queue_full(trans->op_mode); 99262306a36Sopenharmony_ci idx = -ENOSPC; 99362306a36Sopenharmony_ci goto free_dup_buf; 99462306a36Sopenharmony_ci } 99562306a36Sopenharmony_ci 99662306a36Sopenharmony_ci idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 99762306a36Sopenharmony_ci out_cmd = txq->entries[idx].cmd; 99862306a36Sopenharmony_ci out_meta = &txq->entries[idx].meta; 99962306a36Sopenharmony_ci 100062306a36Sopenharmony_ci memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 100162306a36Sopenharmony_ci if (cmd->flags & CMD_WANT_SKB) 100262306a36Sopenharmony_ci out_meta->source = cmd; 100362306a36Sopenharmony_ci 100462306a36Sopenharmony_ci /* set up the header */ 100562306a36Sopenharmony_ci if (group_id != 0) { 100662306a36Sopenharmony_ci out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 100762306a36Sopenharmony_ci out_cmd->hdr_wide.group_id = group_id; 100862306a36Sopenharmony_ci out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 100962306a36Sopenharmony_ci out_cmd->hdr_wide.length = 101062306a36Sopenharmony_ci cpu_to_le16(cmd_size - 101162306a36Sopenharmony_ci sizeof(struct iwl_cmd_header_wide)); 101262306a36Sopenharmony_ci out_cmd->hdr_wide.reserved = 0; 101362306a36Sopenharmony_ci out_cmd->hdr_wide.sequence = 101462306a36Sopenharmony_ci cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 101562306a36Sopenharmony_ci INDEX_TO_SEQ(txq->write_ptr)); 101662306a36Sopenharmony_ci 101762306a36Sopenharmony_ci cmd_pos = sizeof(struct iwl_cmd_header_wide); 101862306a36Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header_wide); 101962306a36Sopenharmony_ci } else { 102062306a36Sopenharmony_ci out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 102162306a36Sopenharmony_ci out_cmd->hdr.sequence = 102262306a36Sopenharmony_ci cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 102362306a36Sopenharmony_ci INDEX_TO_SEQ(txq->write_ptr)); 102462306a36Sopenharmony_ci out_cmd->hdr.group_id = 0; 102562306a36Sopenharmony_ci 102662306a36Sopenharmony_ci cmd_pos = sizeof(struct iwl_cmd_header); 102762306a36Sopenharmony_ci copy_size = sizeof(struct iwl_cmd_header); 102862306a36Sopenharmony_ci } 102962306a36Sopenharmony_ci 103062306a36Sopenharmony_ci /* and copy the data that needs to be copied */ 103162306a36Sopenharmony_ci for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 103262306a36Sopenharmony_ci int copy; 103362306a36Sopenharmony_ci 103462306a36Sopenharmony_ci if (!cmd->len[i]) 103562306a36Sopenharmony_ci continue; 103662306a36Sopenharmony_ci 103762306a36Sopenharmony_ci /* copy everything if not nocopy/dup */ 103862306a36Sopenharmony_ci if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 103962306a36Sopenharmony_ci IWL_HCMD_DFL_DUP))) { 104062306a36Sopenharmony_ci copy = cmd->len[i]; 104162306a36Sopenharmony_ci 104262306a36Sopenharmony_ci memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 104362306a36Sopenharmony_ci cmd_pos += copy; 104462306a36Sopenharmony_ci copy_size += copy; 104562306a36Sopenharmony_ci continue; 104662306a36Sopenharmony_ci } 104762306a36Sopenharmony_ci 104862306a36Sopenharmony_ci /* 104962306a36Sopenharmony_ci * Otherwise we need at least IWL_FIRST_TB_SIZE copied 105062306a36Sopenharmony_ci * in total (for bi-directional DMA), but copy up to what 105162306a36Sopenharmony_ci * we can fit into the payload for debug dump purposes. 105262306a36Sopenharmony_ci */ 105362306a36Sopenharmony_ci copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 105462306a36Sopenharmony_ci 105562306a36Sopenharmony_ci memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 105662306a36Sopenharmony_ci cmd_pos += copy; 105762306a36Sopenharmony_ci 105862306a36Sopenharmony_ci /* However, treat copy_size the proper way, we need it below */ 105962306a36Sopenharmony_ci if (copy_size < IWL_FIRST_TB_SIZE) { 106062306a36Sopenharmony_ci copy = IWL_FIRST_TB_SIZE - copy_size; 106162306a36Sopenharmony_ci 106262306a36Sopenharmony_ci if (copy > cmd->len[i]) 106362306a36Sopenharmony_ci copy = cmd->len[i]; 106462306a36Sopenharmony_ci copy_size += copy; 106562306a36Sopenharmony_ci } 106662306a36Sopenharmony_ci } 106762306a36Sopenharmony_ci 106862306a36Sopenharmony_ci IWL_DEBUG_HC(trans, 106962306a36Sopenharmony_ci "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 107062306a36Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), 107162306a36Sopenharmony_ci group_id, out_cmd->hdr.cmd, 107262306a36Sopenharmony_ci le16_to_cpu(out_cmd->hdr.sequence), 107362306a36Sopenharmony_ci cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 107462306a36Sopenharmony_ci 107562306a36Sopenharmony_ci /* start the TFD with the minimum copy bytes */ 107662306a36Sopenharmony_ci tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 107762306a36Sopenharmony_ci memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 107862306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, 107962306a36Sopenharmony_ci iwl_txq_get_first_tb_dma(txq, idx), 108062306a36Sopenharmony_ci tb0_size, true); 108162306a36Sopenharmony_ci 108262306a36Sopenharmony_ci /* map first command fragment, if any remains */ 108362306a36Sopenharmony_ci if (copy_size > tb0_size) { 108462306a36Sopenharmony_ci phys_addr = dma_map_single(trans->dev, 108562306a36Sopenharmony_ci ((u8 *)&out_cmd->hdr) + tb0_size, 108662306a36Sopenharmony_ci copy_size - tb0_size, 108762306a36Sopenharmony_ci DMA_TO_DEVICE); 108862306a36Sopenharmony_ci if (dma_mapping_error(trans->dev, phys_addr)) { 108962306a36Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 109062306a36Sopenharmony_ci txq->write_ptr); 109162306a36Sopenharmony_ci idx = -ENOMEM; 109262306a36Sopenharmony_ci goto out; 109362306a36Sopenharmony_ci } 109462306a36Sopenharmony_ci 109562306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 109662306a36Sopenharmony_ci copy_size - tb0_size, false); 109762306a36Sopenharmony_ci } 109862306a36Sopenharmony_ci 109962306a36Sopenharmony_ci /* map the remaining (adjusted) nocopy/dup fragments */ 110062306a36Sopenharmony_ci for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 110162306a36Sopenharmony_ci void *data = (void *)(uintptr_t)cmddata[i]; 110262306a36Sopenharmony_ci 110362306a36Sopenharmony_ci if (!cmdlen[i]) 110462306a36Sopenharmony_ci continue; 110562306a36Sopenharmony_ci if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 110662306a36Sopenharmony_ci IWL_HCMD_DFL_DUP))) 110762306a36Sopenharmony_ci continue; 110862306a36Sopenharmony_ci if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 110962306a36Sopenharmony_ci data = dup_buf; 111062306a36Sopenharmony_ci phys_addr = dma_map_single(trans->dev, data, 111162306a36Sopenharmony_ci cmdlen[i], DMA_TO_DEVICE); 111262306a36Sopenharmony_ci if (dma_mapping_error(trans->dev, phys_addr)) { 111362306a36Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 111462306a36Sopenharmony_ci txq->write_ptr); 111562306a36Sopenharmony_ci idx = -ENOMEM; 111662306a36Sopenharmony_ci goto out; 111762306a36Sopenharmony_ci } 111862306a36Sopenharmony_ci 111962306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 112062306a36Sopenharmony_ci } 112162306a36Sopenharmony_ci 112262306a36Sopenharmony_ci BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 112362306a36Sopenharmony_ci out_meta->flags = cmd->flags; 112462306a36Sopenharmony_ci if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 112562306a36Sopenharmony_ci kfree_sensitive(txq->entries[idx].free_buf); 112662306a36Sopenharmony_ci txq->entries[idx].free_buf = dup_buf; 112762306a36Sopenharmony_ci 112862306a36Sopenharmony_ci trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 112962306a36Sopenharmony_ci 113062306a36Sopenharmony_ci /* start timer if queue currently empty */ 113162306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 113262306a36Sopenharmony_ci mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 113362306a36Sopenharmony_ci 113462306a36Sopenharmony_ci ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 113562306a36Sopenharmony_ci if (ret < 0) { 113662306a36Sopenharmony_ci idx = ret; 113762306a36Sopenharmony_ci goto out; 113862306a36Sopenharmony_ci } 113962306a36Sopenharmony_ci 114062306a36Sopenharmony_ci /* Increment and update queue's write index */ 114162306a36Sopenharmony_ci txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 114262306a36Sopenharmony_ci iwl_pcie_txq_inc_wr_ptr(trans, txq); 114362306a36Sopenharmony_ci 114462306a36Sopenharmony_ci out: 114562306a36Sopenharmony_ci spin_unlock_irqrestore(&txq->lock, flags); 114662306a36Sopenharmony_ci free_dup_buf: 114762306a36Sopenharmony_ci if (idx < 0) 114862306a36Sopenharmony_ci kfree(dup_buf); 114962306a36Sopenharmony_ci return idx; 115062306a36Sopenharmony_ci} 115162306a36Sopenharmony_ci 115262306a36Sopenharmony_ci/* 115362306a36Sopenharmony_ci * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 115462306a36Sopenharmony_ci * @rxb: Rx buffer to reclaim 115562306a36Sopenharmony_ci */ 115662306a36Sopenharmony_civoid iwl_pcie_hcmd_complete(struct iwl_trans *trans, 115762306a36Sopenharmony_ci struct iwl_rx_cmd_buffer *rxb) 115862306a36Sopenharmony_ci{ 115962306a36Sopenharmony_ci struct iwl_rx_packet *pkt = rxb_addr(rxb); 116062306a36Sopenharmony_ci u16 sequence = le16_to_cpu(pkt->hdr.sequence); 116162306a36Sopenharmony_ci u8 group_id; 116262306a36Sopenharmony_ci u32 cmd_id; 116362306a36Sopenharmony_ci int txq_id = SEQ_TO_QUEUE(sequence); 116462306a36Sopenharmony_ci int index = SEQ_TO_INDEX(sequence); 116562306a36Sopenharmony_ci int cmd_index; 116662306a36Sopenharmony_ci struct iwl_device_cmd *cmd; 116762306a36Sopenharmony_ci struct iwl_cmd_meta *meta; 116862306a36Sopenharmony_ci struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 116962306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 117062306a36Sopenharmony_ci 117162306a36Sopenharmony_ci /* If a Tx command is being handled and it isn't in the actual 117262306a36Sopenharmony_ci * command queue then there a command routing bug has been introduced 117362306a36Sopenharmony_ci * in the queue management code. */ 117462306a36Sopenharmony_ci if (WARN(txq_id != trans->txqs.cmd.q_id, 117562306a36Sopenharmony_ci "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 117662306a36Sopenharmony_ci txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 117762306a36Sopenharmony_ci txq->write_ptr)) { 117862306a36Sopenharmony_ci iwl_print_hex_error(trans, pkt, 32); 117962306a36Sopenharmony_ci return; 118062306a36Sopenharmony_ci } 118162306a36Sopenharmony_ci 118262306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 118362306a36Sopenharmony_ci 118462306a36Sopenharmony_ci cmd_index = iwl_txq_get_cmd_index(txq, index); 118562306a36Sopenharmony_ci cmd = txq->entries[cmd_index].cmd; 118662306a36Sopenharmony_ci meta = &txq->entries[cmd_index].meta; 118762306a36Sopenharmony_ci group_id = cmd->hdr.group_id; 118862306a36Sopenharmony_ci cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); 118962306a36Sopenharmony_ci 119062306a36Sopenharmony_ci if (trans->trans_cfg->gen2) 119162306a36Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, meta, 119262306a36Sopenharmony_ci iwl_txq_get_tfd(trans, txq, index)); 119362306a36Sopenharmony_ci else 119462306a36Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 119562306a36Sopenharmony_ci 119662306a36Sopenharmony_ci /* Input error checking is done when commands are added to queue. */ 119762306a36Sopenharmony_ci if (meta->flags & CMD_WANT_SKB) { 119862306a36Sopenharmony_ci struct page *p = rxb_steal_page(rxb); 119962306a36Sopenharmony_ci 120062306a36Sopenharmony_ci meta->source->resp_pkt = pkt; 120162306a36Sopenharmony_ci meta->source->_rx_page_addr = (unsigned long)page_address(p); 120262306a36Sopenharmony_ci meta->source->_rx_page_order = trans_pcie->rx_page_order; 120362306a36Sopenharmony_ci } 120462306a36Sopenharmony_ci 120562306a36Sopenharmony_ci if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 120662306a36Sopenharmony_ci iwl_op_mode_async_cb(trans->op_mode, cmd); 120762306a36Sopenharmony_ci 120862306a36Sopenharmony_ci iwl_pcie_cmdq_reclaim(trans, txq_id, index); 120962306a36Sopenharmony_ci 121062306a36Sopenharmony_ci if (!(meta->flags & CMD_ASYNC)) { 121162306a36Sopenharmony_ci if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 121262306a36Sopenharmony_ci IWL_WARN(trans, 121362306a36Sopenharmony_ci "HCMD_ACTIVE already clear for command %s\n", 121462306a36Sopenharmony_ci iwl_get_cmd_string(trans, cmd_id)); 121562306a36Sopenharmony_ci } 121662306a36Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 121762306a36Sopenharmony_ci IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 121862306a36Sopenharmony_ci iwl_get_cmd_string(trans, cmd_id)); 121962306a36Sopenharmony_ci wake_up(&trans->wait_command_queue); 122062306a36Sopenharmony_ci } 122162306a36Sopenharmony_ci 122262306a36Sopenharmony_ci meta->flags = 0; 122362306a36Sopenharmony_ci 122462306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 122562306a36Sopenharmony_ci} 122662306a36Sopenharmony_ci 122762306a36Sopenharmony_cistatic int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 122862306a36Sopenharmony_ci struct iwl_txq *txq, u8 hdr_len, 122962306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta) 123062306a36Sopenharmony_ci{ 123162306a36Sopenharmony_ci u16 head_tb_len; 123262306a36Sopenharmony_ci int i; 123362306a36Sopenharmony_ci 123462306a36Sopenharmony_ci /* 123562306a36Sopenharmony_ci * Set up TFD's third entry to point directly to remainder 123662306a36Sopenharmony_ci * of skb's head, if any 123762306a36Sopenharmony_ci */ 123862306a36Sopenharmony_ci head_tb_len = skb_headlen(skb) - hdr_len; 123962306a36Sopenharmony_ci 124062306a36Sopenharmony_ci if (head_tb_len > 0) { 124162306a36Sopenharmony_ci dma_addr_t tb_phys = dma_map_single(trans->dev, 124262306a36Sopenharmony_ci skb->data + hdr_len, 124362306a36Sopenharmony_ci head_tb_len, DMA_TO_DEVICE); 124462306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 124562306a36Sopenharmony_ci return -EINVAL; 124662306a36Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 124762306a36Sopenharmony_ci tb_phys, head_tb_len); 124862306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 124962306a36Sopenharmony_ci } 125062306a36Sopenharmony_ci 125162306a36Sopenharmony_ci /* set up the remaining entries to point to the data */ 125262306a36Sopenharmony_ci for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 125362306a36Sopenharmony_ci const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 125462306a36Sopenharmony_ci dma_addr_t tb_phys; 125562306a36Sopenharmony_ci int tb_idx; 125662306a36Sopenharmony_ci 125762306a36Sopenharmony_ci if (!skb_frag_size(frag)) 125862306a36Sopenharmony_ci continue; 125962306a36Sopenharmony_ci 126062306a36Sopenharmony_ci tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 126162306a36Sopenharmony_ci skb_frag_size(frag), DMA_TO_DEVICE); 126262306a36Sopenharmony_ci 126362306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 126462306a36Sopenharmony_ci return -EINVAL; 126562306a36Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 126662306a36Sopenharmony_ci tb_phys, skb_frag_size(frag)); 126762306a36Sopenharmony_ci tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 126862306a36Sopenharmony_ci skb_frag_size(frag), false); 126962306a36Sopenharmony_ci if (tb_idx < 0) 127062306a36Sopenharmony_ci return tb_idx; 127162306a36Sopenharmony_ci 127262306a36Sopenharmony_ci out_meta->tbs |= BIT(tb_idx); 127362306a36Sopenharmony_ci } 127462306a36Sopenharmony_ci 127562306a36Sopenharmony_ci return 0; 127662306a36Sopenharmony_ci} 127762306a36Sopenharmony_ci 127862306a36Sopenharmony_ci#ifdef CONFIG_INET 127962306a36Sopenharmony_cistatic int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 128062306a36Sopenharmony_ci struct iwl_txq *txq, u8 hdr_len, 128162306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta, 128262306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 128362306a36Sopenharmony_ci u16 tb1_len) 128462306a36Sopenharmony_ci{ 128562306a36Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 128662306a36Sopenharmony_ci struct ieee80211_hdr *hdr = (void *)skb->data; 128762306a36Sopenharmony_ci unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 128862306a36Sopenharmony_ci unsigned int mss = skb_shinfo(skb)->gso_size; 128962306a36Sopenharmony_ci u16 length, iv_len, amsdu_pad; 129062306a36Sopenharmony_ci u8 *start_hdr; 129162306a36Sopenharmony_ci struct iwl_tso_hdr_page *hdr_page; 129262306a36Sopenharmony_ci struct tso_t tso; 129362306a36Sopenharmony_ci 129462306a36Sopenharmony_ci /* if the packet is protected, then it must be CCMP or GCMP */ 129562306a36Sopenharmony_ci BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 129662306a36Sopenharmony_ci iv_len = ieee80211_has_protected(hdr->frame_control) ? 129762306a36Sopenharmony_ci IEEE80211_CCMP_HDR_LEN : 0; 129862306a36Sopenharmony_ci 129962306a36Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, 130062306a36Sopenharmony_ci iwl_txq_get_tfd(trans, txq, txq->write_ptr), 130162306a36Sopenharmony_ci trans->txqs.tfd.size, 130262306a36Sopenharmony_ci &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 130362306a36Sopenharmony_ci 130462306a36Sopenharmony_ci ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 130562306a36Sopenharmony_ci snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 130662306a36Sopenharmony_ci total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 130762306a36Sopenharmony_ci amsdu_pad = 0; 130862306a36Sopenharmony_ci 130962306a36Sopenharmony_ci /* total amount of header we may need for this A-MSDU */ 131062306a36Sopenharmony_ci hdr_room = DIV_ROUND_UP(total_len, mss) * 131162306a36Sopenharmony_ci (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 131262306a36Sopenharmony_ci 131362306a36Sopenharmony_ci /* Our device supports 9 segments at most, it will fit in 1 page */ 131462306a36Sopenharmony_ci hdr_page = get_page_hdr(trans, hdr_room, skb); 131562306a36Sopenharmony_ci if (!hdr_page) 131662306a36Sopenharmony_ci return -ENOMEM; 131762306a36Sopenharmony_ci 131862306a36Sopenharmony_ci start_hdr = hdr_page->pos; 131962306a36Sopenharmony_ci memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 132062306a36Sopenharmony_ci hdr_page->pos += iv_len; 132162306a36Sopenharmony_ci 132262306a36Sopenharmony_ci /* 132362306a36Sopenharmony_ci * Pull the ieee80211 header + IV to be able to use TSO core, 132462306a36Sopenharmony_ci * we will restore it for the tx_status flow. 132562306a36Sopenharmony_ci */ 132662306a36Sopenharmony_ci skb_pull(skb, hdr_len + iv_len); 132762306a36Sopenharmony_ci 132862306a36Sopenharmony_ci /* 132962306a36Sopenharmony_ci * Remove the length of all the headers that we don't actually 133062306a36Sopenharmony_ci * have in the MPDU by themselves, but that we duplicate into 133162306a36Sopenharmony_ci * all the different MSDUs inside the A-MSDU. 133262306a36Sopenharmony_ci */ 133362306a36Sopenharmony_ci le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 133462306a36Sopenharmony_ci 133562306a36Sopenharmony_ci tso_start(skb, &tso); 133662306a36Sopenharmony_ci 133762306a36Sopenharmony_ci while (total_len) { 133862306a36Sopenharmony_ci /* this is the data left for this subframe */ 133962306a36Sopenharmony_ci unsigned int data_left = 134062306a36Sopenharmony_ci min_t(unsigned int, mss, total_len); 134162306a36Sopenharmony_ci unsigned int hdr_tb_len; 134262306a36Sopenharmony_ci dma_addr_t hdr_tb_phys; 134362306a36Sopenharmony_ci u8 *subf_hdrs_start = hdr_page->pos; 134462306a36Sopenharmony_ci 134562306a36Sopenharmony_ci total_len -= data_left; 134662306a36Sopenharmony_ci 134762306a36Sopenharmony_ci memset(hdr_page->pos, 0, amsdu_pad); 134862306a36Sopenharmony_ci hdr_page->pos += amsdu_pad; 134962306a36Sopenharmony_ci amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 135062306a36Sopenharmony_ci data_left)) & 0x3; 135162306a36Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 135262306a36Sopenharmony_ci hdr_page->pos += ETH_ALEN; 135362306a36Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 135462306a36Sopenharmony_ci hdr_page->pos += ETH_ALEN; 135562306a36Sopenharmony_ci 135662306a36Sopenharmony_ci length = snap_ip_tcp_hdrlen + data_left; 135762306a36Sopenharmony_ci *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 135862306a36Sopenharmony_ci hdr_page->pos += sizeof(length); 135962306a36Sopenharmony_ci 136062306a36Sopenharmony_ci /* 136162306a36Sopenharmony_ci * This will copy the SNAP as well which will be considered 136262306a36Sopenharmony_ci * as MAC header. 136362306a36Sopenharmony_ci */ 136462306a36Sopenharmony_ci tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 136562306a36Sopenharmony_ci 136662306a36Sopenharmony_ci hdr_page->pos += snap_ip_tcp_hdrlen; 136762306a36Sopenharmony_ci 136862306a36Sopenharmony_ci hdr_tb_len = hdr_page->pos - start_hdr; 136962306a36Sopenharmony_ci hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 137062306a36Sopenharmony_ci hdr_tb_len, DMA_TO_DEVICE); 137162306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) 137262306a36Sopenharmony_ci return -EINVAL; 137362306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 137462306a36Sopenharmony_ci hdr_tb_len, false); 137562306a36Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 137662306a36Sopenharmony_ci hdr_tb_phys, hdr_tb_len); 137762306a36Sopenharmony_ci /* add this subframe's headers' length to the tx_cmd */ 137862306a36Sopenharmony_ci le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 137962306a36Sopenharmony_ci 138062306a36Sopenharmony_ci /* prepare the start_hdr for the next subframe */ 138162306a36Sopenharmony_ci start_hdr = hdr_page->pos; 138262306a36Sopenharmony_ci 138362306a36Sopenharmony_ci /* put the payload */ 138462306a36Sopenharmony_ci while (data_left) { 138562306a36Sopenharmony_ci unsigned int size = min_t(unsigned int, tso.size, 138662306a36Sopenharmony_ci data_left); 138762306a36Sopenharmony_ci dma_addr_t tb_phys; 138862306a36Sopenharmony_ci 138962306a36Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tso.data, 139062306a36Sopenharmony_ci size, DMA_TO_DEVICE); 139162306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 139262306a36Sopenharmony_ci return -EINVAL; 139362306a36Sopenharmony_ci 139462306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 139562306a36Sopenharmony_ci size, false); 139662306a36Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 139762306a36Sopenharmony_ci tb_phys, size); 139862306a36Sopenharmony_ci 139962306a36Sopenharmony_ci data_left -= size; 140062306a36Sopenharmony_ci tso_build_data(skb, &tso, size); 140162306a36Sopenharmony_ci } 140262306a36Sopenharmony_ci } 140362306a36Sopenharmony_ci 140462306a36Sopenharmony_ci /* re -add the WiFi header and IV */ 140562306a36Sopenharmony_ci skb_push(skb, hdr_len + iv_len); 140662306a36Sopenharmony_ci 140762306a36Sopenharmony_ci return 0; 140862306a36Sopenharmony_ci} 140962306a36Sopenharmony_ci#else /* CONFIG_INET */ 141062306a36Sopenharmony_cistatic int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 141162306a36Sopenharmony_ci struct iwl_txq *txq, u8 hdr_len, 141262306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta, 141362306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 141462306a36Sopenharmony_ci u16 tb1_len) 141562306a36Sopenharmony_ci{ 141662306a36Sopenharmony_ci /* No A-MSDU without CONFIG_INET */ 141762306a36Sopenharmony_ci WARN_ON(1); 141862306a36Sopenharmony_ci 141962306a36Sopenharmony_ci return -1; 142062306a36Sopenharmony_ci} 142162306a36Sopenharmony_ci#endif /* CONFIG_INET */ 142262306a36Sopenharmony_ci 142362306a36Sopenharmony_ciint iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 142462306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, int txq_id) 142562306a36Sopenharmony_ci{ 142662306a36Sopenharmony_ci struct ieee80211_hdr *hdr; 142762306a36Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 142862306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta; 142962306a36Sopenharmony_ci struct iwl_txq *txq; 143062306a36Sopenharmony_ci dma_addr_t tb0_phys, tb1_phys, scratch_phys; 143162306a36Sopenharmony_ci void *tb1_addr; 143262306a36Sopenharmony_ci void *tfd; 143362306a36Sopenharmony_ci u16 len, tb1_len; 143462306a36Sopenharmony_ci bool wait_write_ptr; 143562306a36Sopenharmony_ci __le16 fc; 143662306a36Sopenharmony_ci u8 hdr_len; 143762306a36Sopenharmony_ci u16 wifi_seq; 143862306a36Sopenharmony_ci bool amsdu; 143962306a36Sopenharmony_ci 144062306a36Sopenharmony_ci txq = trans->txqs.txq[txq_id]; 144162306a36Sopenharmony_ci 144262306a36Sopenharmony_ci if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 144362306a36Sopenharmony_ci "TX on unused queue %d\n", txq_id)) 144462306a36Sopenharmony_ci return -EINVAL; 144562306a36Sopenharmony_ci 144662306a36Sopenharmony_ci if (skb_is_nonlinear(skb) && 144762306a36Sopenharmony_ci skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 144862306a36Sopenharmony_ci __skb_linearize(skb)) 144962306a36Sopenharmony_ci return -ENOMEM; 145062306a36Sopenharmony_ci 145162306a36Sopenharmony_ci /* mac80211 always puts the full header into the SKB's head, 145262306a36Sopenharmony_ci * so there's no need to check if it's readable there 145362306a36Sopenharmony_ci */ 145462306a36Sopenharmony_ci hdr = (struct ieee80211_hdr *)skb->data; 145562306a36Sopenharmony_ci fc = hdr->frame_control; 145662306a36Sopenharmony_ci hdr_len = ieee80211_hdrlen(fc); 145762306a36Sopenharmony_ci 145862306a36Sopenharmony_ci spin_lock(&txq->lock); 145962306a36Sopenharmony_ci 146062306a36Sopenharmony_ci if (iwl_txq_space(trans, txq) < txq->high_mark) { 146162306a36Sopenharmony_ci iwl_txq_stop(trans, txq); 146262306a36Sopenharmony_ci 146362306a36Sopenharmony_ci /* don't put the packet on the ring, if there is no room */ 146462306a36Sopenharmony_ci if (unlikely(iwl_txq_space(trans, txq) < 3)) { 146562306a36Sopenharmony_ci struct iwl_device_tx_cmd **dev_cmd_ptr; 146662306a36Sopenharmony_ci 146762306a36Sopenharmony_ci dev_cmd_ptr = (void *)((u8 *)skb->cb + 146862306a36Sopenharmony_ci trans->txqs.dev_cmd_offs); 146962306a36Sopenharmony_ci 147062306a36Sopenharmony_ci *dev_cmd_ptr = dev_cmd; 147162306a36Sopenharmony_ci __skb_queue_tail(&txq->overflow_q, skb); 147262306a36Sopenharmony_ci 147362306a36Sopenharmony_ci spin_unlock(&txq->lock); 147462306a36Sopenharmony_ci return 0; 147562306a36Sopenharmony_ci } 147662306a36Sopenharmony_ci } 147762306a36Sopenharmony_ci 147862306a36Sopenharmony_ci /* In AGG mode, the index in the ring must correspond to the WiFi 147962306a36Sopenharmony_ci * sequence number. This is a HW requirements to help the SCD to parse 148062306a36Sopenharmony_ci * the BA. 148162306a36Sopenharmony_ci * Check here that the packets are in the right place on the ring. 148262306a36Sopenharmony_ci */ 148362306a36Sopenharmony_ci wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 148462306a36Sopenharmony_ci WARN_ONCE(txq->ampdu && 148562306a36Sopenharmony_ci (wifi_seq & 0xff) != txq->write_ptr, 148662306a36Sopenharmony_ci "Q: %d WiFi Seq %d tfdNum %d", 148762306a36Sopenharmony_ci txq_id, wifi_seq, txq->write_ptr); 148862306a36Sopenharmony_ci 148962306a36Sopenharmony_ci /* Set up driver data for this TFD */ 149062306a36Sopenharmony_ci txq->entries[txq->write_ptr].skb = skb; 149162306a36Sopenharmony_ci txq->entries[txq->write_ptr].cmd = dev_cmd; 149262306a36Sopenharmony_ci 149362306a36Sopenharmony_ci dev_cmd->hdr.sequence = 149462306a36Sopenharmony_ci cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 149562306a36Sopenharmony_ci INDEX_TO_SEQ(txq->write_ptr))); 149662306a36Sopenharmony_ci 149762306a36Sopenharmony_ci tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 149862306a36Sopenharmony_ci scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 149962306a36Sopenharmony_ci offsetof(struct iwl_tx_cmd, scratch); 150062306a36Sopenharmony_ci 150162306a36Sopenharmony_ci tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 150262306a36Sopenharmony_ci tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 150362306a36Sopenharmony_ci 150462306a36Sopenharmony_ci /* Set up first empty entry in queue's array of Tx/cmd buffers */ 150562306a36Sopenharmony_ci out_meta = &txq->entries[txq->write_ptr].meta; 150662306a36Sopenharmony_ci out_meta->flags = 0; 150762306a36Sopenharmony_ci 150862306a36Sopenharmony_ci /* 150962306a36Sopenharmony_ci * The second TB (tb1) points to the remainder of the TX command 151062306a36Sopenharmony_ci * and the 802.11 header - dword aligned size 151162306a36Sopenharmony_ci * (This calculation modifies the TX command, so do it before the 151262306a36Sopenharmony_ci * setup of the first TB) 151362306a36Sopenharmony_ci */ 151462306a36Sopenharmony_ci len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 151562306a36Sopenharmony_ci hdr_len - IWL_FIRST_TB_SIZE; 151662306a36Sopenharmony_ci /* do not align A-MSDU to dword as the subframe header aligns it */ 151762306a36Sopenharmony_ci amsdu = ieee80211_is_data_qos(fc) && 151862306a36Sopenharmony_ci (*ieee80211_get_qos_ctl(hdr) & 151962306a36Sopenharmony_ci IEEE80211_QOS_CTL_A_MSDU_PRESENT); 152062306a36Sopenharmony_ci if (!amsdu) { 152162306a36Sopenharmony_ci tb1_len = ALIGN(len, 4); 152262306a36Sopenharmony_ci /* Tell NIC about any 2-byte padding after MAC header */ 152362306a36Sopenharmony_ci if (tb1_len != len) 152462306a36Sopenharmony_ci tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 152562306a36Sopenharmony_ci } else { 152662306a36Sopenharmony_ci tb1_len = len; 152762306a36Sopenharmony_ci } 152862306a36Sopenharmony_ci 152962306a36Sopenharmony_ci /* 153062306a36Sopenharmony_ci * The first TB points to bi-directional DMA data, we'll 153162306a36Sopenharmony_ci * memcpy the data into it later. 153262306a36Sopenharmony_ci */ 153362306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 153462306a36Sopenharmony_ci IWL_FIRST_TB_SIZE, true); 153562306a36Sopenharmony_ci 153662306a36Sopenharmony_ci /* there must be data left over for TB1 or this code must be changed */ 153762306a36Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 153862306a36Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 153962306a36Sopenharmony_ci offsetofend(struct iwl_tx_cmd, scratch) > 154062306a36Sopenharmony_ci IWL_FIRST_TB_SIZE); 154162306a36Sopenharmony_ci 154262306a36Sopenharmony_ci /* map the data for TB1 */ 154362306a36Sopenharmony_ci tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 154462306a36Sopenharmony_ci tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 154562306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 154662306a36Sopenharmony_ci goto out_err; 154762306a36Sopenharmony_ci iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 154862306a36Sopenharmony_ci 154962306a36Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, 155062306a36Sopenharmony_ci iwl_txq_get_tfd(trans, txq, txq->write_ptr), 155162306a36Sopenharmony_ci trans->txqs.tfd.size, 155262306a36Sopenharmony_ci &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 155362306a36Sopenharmony_ci hdr_len); 155462306a36Sopenharmony_ci 155562306a36Sopenharmony_ci /* 155662306a36Sopenharmony_ci * If gso_size wasn't set, don't give the frame "amsdu treatment" 155762306a36Sopenharmony_ci * (adding subframes, etc.). 155862306a36Sopenharmony_ci * This can happen in some testing flows when the amsdu was already 155962306a36Sopenharmony_ci * pre-built, and we just need to send the resulting skb. 156062306a36Sopenharmony_ci */ 156162306a36Sopenharmony_ci if (amsdu && skb_shinfo(skb)->gso_size) { 156262306a36Sopenharmony_ci if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 156362306a36Sopenharmony_ci out_meta, dev_cmd, 156462306a36Sopenharmony_ci tb1_len))) 156562306a36Sopenharmony_ci goto out_err; 156662306a36Sopenharmony_ci } else { 156762306a36Sopenharmony_ci struct sk_buff *frag; 156862306a36Sopenharmony_ci 156962306a36Sopenharmony_ci if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 157062306a36Sopenharmony_ci out_meta))) 157162306a36Sopenharmony_ci goto out_err; 157262306a36Sopenharmony_ci 157362306a36Sopenharmony_ci skb_walk_frags(skb, frag) { 157462306a36Sopenharmony_ci if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 157562306a36Sopenharmony_ci out_meta))) 157662306a36Sopenharmony_ci goto out_err; 157762306a36Sopenharmony_ci } 157862306a36Sopenharmony_ci } 157962306a36Sopenharmony_ci 158062306a36Sopenharmony_ci /* building the A-MSDU might have changed this data, so memcpy it now */ 158162306a36Sopenharmony_ci memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 158262306a36Sopenharmony_ci 158362306a36Sopenharmony_ci tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 158462306a36Sopenharmony_ci /* Set up entry for this TFD in Tx byte-count array */ 158562306a36Sopenharmony_ci iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 158662306a36Sopenharmony_ci iwl_txq_gen1_tfd_get_num_tbs(trans, 158762306a36Sopenharmony_ci tfd)); 158862306a36Sopenharmony_ci 158962306a36Sopenharmony_ci wait_write_ptr = ieee80211_has_morefrags(fc); 159062306a36Sopenharmony_ci 159162306a36Sopenharmony_ci /* start timer if queue currently empty */ 159262306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 159362306a36Sopenharmony_ci /* 159462306a36Sopenharmony_ci * If the TXQ is active, then set the timer, if not, 159562306a36Sopenharmony_ci * set the timer in remainder so that the timer will 159662306a36Sopenharmony_ci * be armed with the right value when the station will 159762306a36Sopenharmony_ci * wake up. 159862306a36Sopenharmony_ci */ 159962306a36Sopenharmony_ci if (!txq->frozen) 160062306a36Sopenharmony_ci mod_timer(&txq->stuck_timer, 160162306a36Sopenharmony_ci jiffies + txq->wd_timeout); 160262306a36Sopenharmony_ci else 160362306a36Sopenharmony_ci txq->frozen_expiry_remainder = txq->wd_timeout; 160462306a36Sopenharmony_ci } 160562306a36Sopenharmony_ci 160662306a36Sopenharmony_ci /* Tell device the write index *just past* this latest filled TFD */ 160762306a36Sopenharmony_ci txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 160862306a36Sopenharmony_ci if (!wait_write_ptr) 160962306a36Sopenharmony_ci iwl_pcie_txq_inc_wr_ptr(trans, txq); 161062306a36Sopenharmony_ci 161162306a36Sopenharmony_ci /* 161262306a36Sopenharmony_ci * At this point the frame is "transmitted" successfully 161362306a36Sopenharmony_ci * and we will get a TX status notification eventually. 161462306a36Sopenharmony_ci */ 161562306a36Sopenharmony_ci spin_unlock(&txq->lock); 161662306a36Sopenharmony_ci return 0; 161762306a36Sopenharmony_ciout_err: 161862306a36Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 161962306a36Sopenharmony_ci spin_unlock(&txq->lock); 162062306a36Sopenharmony_ci return -1; 162162306a36Sopenharmony_ci} 1622