162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright (C) 2020-2023 Intel Corporation 462306a36Sopenharmony_ci */ 562306a36Sopenharmony_ci#include <net/tso.h> 662306a36Sopenharmony_ci#include <linux/tcp.h> 762306a36Sopenharmony_ci 862306a36Sopenharmony_ci#include "iwl-debug.h" 962306a36Sopenharmony_ci#include "iwl-io.h" 1062306a36Sopenharmony_ci#include "fw/api/commands.h" 1162306a36Sopenharmony_ci#include "fw/api/tx.h" 1262306a36Sopenharmony_ci#include "fw/api/datapath.h" 1362306a36Sopenharmony_ci#include "fw/api/debug.h" 1462306a36Sopenharmony_ci#include "queue/tx.h" 1562306a36Sopenharmony_ci#include "iwl-fh.h" 1662306a36Sopenharmony_ci#include "iwl-scd.h" 1762306a36Sopenharmony_ci#include <linux/dmapool.h> 1862306a36Sopenharmony_ci 1962306a36Sopenharmony_ci/* 2062306a36Sopenharmony_ci * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array 2162306a36Sopenharmony_ci */ 2262306a36Sopenharmony_cistatic void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, 2362306a36Sopenharmony_ci struct iwl_txq *txq, u16 byte_cnt, 2462306a36Sopenharmony_ci int num_tbs) 2562306a36Sopenharmony_ci{ 2662306a36Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 2762306a36Sopenharmony_ci u8 filled_tfd_size, num_fetch_chunks; 2862306a36Sopenharmony_ci u16 len = byte_cnt; 2962306a36Sopenharmony_ci __le16 bc_ent; 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_ci if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) 3262306a36Sopenharmony_ci return; 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_ci filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 3562306a36Sopenharmony_ci num_tbs * sizeof(struct iwl_tfh_tb); 3662306a36Sopenharmony_ci /* 3762306a36Sopenharmony_ci * filled_tfd_size contains the number of filled bytes in the TFD. 3862306a36Sopenharmony_ci * Dividing it by 64 will give the number of chunks to fetch 3962306a36Sopenharmony_ci * to SRAM- 0 for one chunk, 1 for 2 and so on. 4062306a36Sopenharmony_ci * If, for example, TFD contains only 3 TBs then 32 bytes 4162306a36Sopenharmony_ci * of the TFD are used, and only one chunk of 64 bytes should 4262306a36Sopenharmony_ci * be fetched 4362306a36Sopenharmony_ci */ 4462306a36Sopenharmony_ci num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 4562306a36Sopenharmony_ci 4662306a36Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 4762306a36Sopenharmony_ci struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; 4862306a36Sopenharmony_ci 4962306a36Sopenharmony_ci /* Starting from AX210, the HW expects bytes */ 5062306a36Sopenharmony_ci WARN_ON(trans->txqs.bc_table_dword); 5162306a36Sopenharmony_ci WARN_ON(len > 0x3FFF); 5262306a36Sopenharmony_ci bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); 5362306a36Sopenharmony_ci scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; 5462306a36Sopenharmony_ci } else { 5562306a36Sopenharmony_ci struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 5662306a36Sopenharmony_ci 5762306a36Sopenharmony_ci /* Before AX210, the HW expects DW */ 5862306a36Sopenharmony_ci WARN_ON(!trans->txqs.bc_table_dword); 5962306a36Sopenharmony_ci len = DIV_ROUND_UP(len, 4); 6062306a36Sopenharmony_ci WARN_ON(len > 0xFFF); 6162306a36Sopenharmony_ci bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 6262306a36Sopenharmony_ci scd_bc_tbl->tfd_offset[idx] = bc_ent; 6362306a36Sopenharmony_ci } 6462306a36Sopenharmony_ci} 6562306a36Sopenharmony_ci 6662306a36Sopenharmony_ci/* 6762306a36Sopenharmony_ci * iwl_txq_inc_wr_ptr - Send new write index to hardware 6862306a36Sopenharmony_ci */ 6962306a36Sopenharmony_civoid iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 7062306a36Sopenharmony_ci{ 7162306a36Sopenharmony_ci lockdep_assert_held(&txq->lock); 7262306a36Sopenharmony_ci 7362306a36Sopenharmony_ci IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_ci /* 7662306a36Sopenharmony_ci * if not in power-save mode, uCode will never sleep when we're 7762306a36Sopenharmony_ci * trying to tx (during RFKILL, we're not trying to tx). 7862306a36Sopenharmony_ci */ 7962306a36Sopenharmony_ci iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); 8062306a36Sopenharmony_ci} 8162306a36Sopenharmony_ci 8262306a36Sopenharmony_cistatic u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, 8362306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd) 8462306a36Sopenharmony_ci{ 8562306a36Sopenharmony_ci return le16_to_cpu(tfd->num_tbs) & 0x1f; 8662306a36Sopenharmony_ci} 8762306a36Sopenharmony_ci 8862306a36Sopenharmony_ciint iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, 8962306a36Sopenharmony_ci dma_addr_t addr, u16 len) 9062306a36Sopenharmony_ci{ 9162306a36Sopenharmony_ci int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); 9262306a36Sopenharmony_ci struct iwl_tfh_tb *tb; 9362306a36Sopenharmony_ci 9462306a36Sopenharmony_ci /* Only WARN here so we know about the issue, but we mess up our 9562306a36Sopenharmony_ci * unmap path because not every place currently checks for errors 9662306a36Sopenharmony_ci * returned from this function - it can only return an error if 9762306a36Sopenharmony_ci * there's no more space, and so when we know there is enough we 9862306a36Sopenharmony_ci * don't always check ... 9962306a36Sopenharmony_ci */ 10062306a36Sopenharmony_ci WARN(iwl_txq_crosses_4g_boundary(addr, len), 10162306a36Sopenharmony_ci "possible DMA problem with iova:0x%llx, len:%d\n", 10262306a36Sopenharmony_ci (unsigned long long)addr, len); 10362306a36Sopenharmony_ci 10462306a36Sopenharmony_ci if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) 10562306a36Sopenharmony_ci return -EINVAL; 10662306a36Sopenharmony_ci tb = &tfd->tbs[idx]; 10762306a36Sopenharmony_ci 10862306a36Sopenharmony_ci /* Each TFD can point to a maximum max_tbs Tx buffers */ 10962306a36Sopenharmony_ci if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { 11062306a36Sopenharmony_ci IWL_ERR(trans, "Error can not send more than %d chunks\n", 11162306a36Sopenharmony_ci trans->txqs.tfd.max_tbs); 11262306a36Sopenharmony_ci return -EINVAL; 11362306a36Sopenharmony_ci } 11462306a36Sopenharmony_ci 11562306a36Sopenharmony_ci put_unaligned_le64(addr, &tb->addr); 11662306a36Sopenharmony_ci tb->tb_len = cpu_to_le16(len); 11762306a36Sopenharmony_ci 11862306a36Sopenharmony_ci tfd->num_tbs = cpu_to_le16(idx + 1); 11962306a36Sopenharmony_ci 12062306a36Sopenharmony_ci return idx; 12162306a36Sopenharmony_ci} 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_cistatic void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, 12462306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd) 12562306a36Sopenharmony_ci{ 12662306a36Sopenharmony_ci tfd->num_tbs = 0; 12762306a36Sopenharmony_ci 12862306a36Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, 12962306a36Sopenharmony_ci trans->invalid_tx_cmd.size); 13062306a36Sopenharmony_ci} 13162306a36Sopenharmony_ci 13262306a36Sopenharmony_civoid iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 13362306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd) 13462306a36Sopenharmony_ci{ 13562306a36Sopenharmony_ci int i, num_tbs; 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_ci /* Sanity check on number of chunks */ 13862306a36Sopenharmony_ci num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci if (num_tbs > trans->txqs.tfd.max_tbs) { 14162306a36Sopenharmony_ci IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 14262306a36Sopenharmony_ci return; 14362306a36Sopenharmony_ci } 14462306a36Sopenharmony_ci 14562306a36Sopenharmony_ci /* first TB is never freed - it's the bidirectional DMA data */ 14662306a36Sopenharmony_ci for (i = 1; i < num_tbs; i++) { 14762306a36Sopenharmony_ci if (meta->tbs & BIT(i)) 14862306a36Sopenharmony_ci dma_unmap_page(trans->dev, 14962306a36Sopenharmony_ci le64_to_cpu(tfd->tbs[i].addr), 15062306a36Sopenharmony_ci le16_to_cpu(tfd->tbs[i].tb_len), 15162306a36Sopenharmony_ci DMA_TO_DEVICE); 15262306a36Sopenharmony_ci else 15362306a36Sopenharmony_ci dma_unmap_single(trans->dev, 15462306a36Sopenharmony_ci le64_to_cpu(tfd->tbs[i].addr), 15562306a36Sopenharmony_ci le16_to_cpu(tfd->tbs[i].tb_len), 15662306a36Sopenharmony_ci DMA_TO_DEVICE); 15762306a36Sopenharmony_ci } 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci iwl_txq_set_tfd_invalid_gen2(trans, tfd); 16062306a36Sopenharmony_ci} 16162306a36Sopenharmony_ci 16262306a36Sopenharmony_civoid iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 16362306a36Sopenharmony_ci{ 16462306a36Sopenharmony_ci /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 16562306a36Sopenharmony_ci * idx is bounded by n_window 16662306a36Sopenharmony_ci */ 16762306a36Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 16862306a36Sopenharmony_ci struct sk_buff *skb; 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci lockdep_assert_held(&txq->lock); 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_ci if (!txq->entries) 17362306a36Sopenharmony_ci return; 17462306a36Sopenharmony_ci 17562306a36Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 17662306a36Sopenharmony_ci iwl_txq_get_tfd(trans, txq, idx)); 17762306a36Sopenharmony_ci 17862306a36Sopenharmony_ci skb = txq->entries[idx].skb; 17962306a36Sopenharmony_ci 18062306a36Sopenharmony_ci /* Can be called from irqs-disabled context 18162306a36Sopenharmony_ci * If skb is not NULL, it means that the whole queue is being 18262306a36Sopenharmony_ci * freed and that the queue is not empty - free the skb 18362306a36Sopenharmony_ci */ 18462306a36Sopenharmony_ci if (skb) { 18562306a36Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 18662306a36Sopenharmony_ci txq->entries[idx].skb = NULL; 18762306a36Sopenharmony_ci } 18862306a36Sopenharmony_ci} 18962306a36Sopenharmony_ci 19062306a36Sopenharmony_cistatic struct page *get_workaround_page(struct iwl_trans *trans, 19162306a36Sopenharmony_ci struct sk_buff *skb) 19262306a36Sopenharmony_ci{ 19362306a36Sopenharmony_ci struct page **page_ptr; 19462306a36Sopenharmony_ci struct page *ret; 19562306a36Sopenharmony_ci 19662306a36Sopenharmony_ci page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci ret = alloc_page(GFP_ATOMIC); 19962306a36Sopenharmony_ci if (!ret) 20062306a36Sopenharmony_ci return NULL; 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_ci /* set the chaining pointer to the previous page if there */ 20362306a36Sopenharmony_ci *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; 20462306a36Sopenharmony_ci *page_ptr = ret; 20562306a36Sopenharmony_ci 20662306a36Sopenharmony_ci return ret; 20762306a36Sopenharmony_ci} 20862306a36Sopenharmony_ci 20962306a36Sopenharmony_ci/* 21062306a36Sopenharmony_ci * Add a TB and if needed apply the FH HW bug workaround; 21162306a36Sopenharmony_ci * meta != NULL indicates that it's a page mapping and we 21262306a36Sopenharmony_ci * need to dma_unmap_page() and set the meta->tbs bit in 21362306a36Sopenharmony_ci * this case. 21462306a36Sopenharmony_ci */ 21562306a36Sopenharmony_cistatic int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, 21662306a36Sopenharmony_ci struct sk_buff *skb, 21762306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd, 21862306a36Sopenharmony_ci dma_addr_t phys, void *virt, 21962306a36Sopenharmony_ci u16 len, struct iwl_cmd_meta *meta) 22062306a36Sopenharmony_ci{ 22162306a36Sopenharmony_ci dma_addr_t oldphys = phys; 22262306a36Sopenharmony_ci struct page *page; 22362306a36Sopenharmony_ci int ret; 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, phys))) 22662306a36Sopenharmony_ci return -ENOMEM; 22762306a36Sopenharmony_ci 22862306a36Sopenharmony_ci if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { 22962306a36Sopenharmony_ci ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_ci if (ret < 0) 23262306a36Sopenharmony_ci goto unmap; 23362306a36Sopenharmony_ci 23462306a36Sopenharmony_ci if (meta) 23562306a36Sopenharmony_ci meta->tbs |= BIT(ret); 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_ci ret = 0; 23862306a36Sopenharmony_ci goto trace; 23962306a36Sopenharmony_ci } 24062306a36Sopenharmony_ci 24162306a36Sopenharmony_ci /* 24262306a36Sopenharmony_ci * Work around a hardware bug. If (as expressed in the 24362306a36Sopenharmony_ci * condition above) the TB ends on a 32-bit boundary, 24462306a36Sopenharmony_ci * then the next TB may be accessed with the wrong 24562306a36Sopenharmony_ci * address. 24662306a36Sopenharmony_ci * To work around it, copy the data elsewhere and make 24762306a36Sopenharmony_ci * a new mapping for it so the device will not fail. 24862306a36Sopenharmony_ci */ 24962306a36Sopenharmony_ci 25062306a36Sopenharmony_ci if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { 25162306a36Sopenharmony_ci ret = -ENOBUFS; 25262306a36Sopenharmony_ci goto unmap; 25362306a36Sopenharmony_ci } 25462306a36Sopenharmony_ci 25562306a36Sopenharmony_ci page = get_workaround_page(trans, skb); 25662306a36Sopenharmony_ci if (!page) { 25762306a36Sopenharmony_ci ret = -ENOMEM; 25862306a36Sopenharmony_ci goto unmap; 25962306a36Sopenharmony_ci } 26062306a36Sopenharmony_ci 26162306a36Sopenharmony_ci memcpy(page_address(page), virt, len); 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci phys = dma_map_single(trans->dev, page_address(page), len, 26462306a36Sopenharmony_ci DMA_TO_DEVICE); 26562306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, phys))) 26662306a36Sopenharmony_ci return -ENOMEM; 26762306a36Sopenharmony_ci ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 26862306a36Sopenharmony_ci if (ret < 0) { 26962306a36Sopenharmony_ci /* unmap the new allocation as single */ 27062306a36Sopenharmony_ci oldphys = phys; 27162306a36Sopenharmony_ci meta = NULL; 27262306a36Sopenharmony_ci goto unmap; 27362306a36Sopenharmony_ci } 27462306a36Sopenharmony_ci IWL_WARN(trans, 27562306a36Sopenharmony_ci "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", 27662306a36Sopenharmony_ci len, (unsigned long long)oldphys, (unsigned long long)phys); 27762306a36Sopenharmony_ci 27862306a36Sopenharmony_ci ret = 0; 27962306a36Sopenharmony_ciunmap: 28062306a36Sopenharmony_ci if (meta) 28162306a36Sopenharmony_ci dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); 28262306a36Sopenharmony_ci else 28362306a36Sopenharmony_ci dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); 28462306a36Sopenharmony_citrace: 28562306a36Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); 28662306a36Sopenharmony_ci 28762306a36Sopenharmony_ci return ret; 28862306a36Sopenharmony_ci} 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci#ifdef CONFIG_INET 29162306a36Sopenharmony_cistruct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 29262306a36Sopenharmony_ci struct sk_buff *skb) 29362306a36Sopenharmony_ci{ 29462306a36Sopenharmony_ci struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); 29562306a36Sopenharmony_ci struct page **page_ptr; 29662306a36Sopenharmony_ci 29762306a36Sopenharmony_ci page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 29862306a36Sopenharmony_ci 29962306a36Sopenharmony_ci if (WARN_ON(*page_ptr)) 30062306a36Sopenharmony_ci return NULL; 30162306a36Sopenharmony_ci 30262306a36Sopenharmony_ci if (!p->page) 30362306a36Sopenharmony_ci goto alloc; 30462306a36Sopenharmony_ci 30562306a36Sopenharmony_ci /* 30662306a36Sopenharmony_ci * Check if there's enough room on this page 30762306a36Sopenharmony_ci * 30862306a36Sopenharmony_ci * Note that we put a page chaining pointer *last* in the 30962306a36Sopenharmony_ci * page - we need it somewhere, and if it's there then we 31062306a36Sopenharmony_ci * avoid DMA mapping the last bits of the page which may 31162306a36Sopenharmony_ci * trigger the 32-bit boundary hardware bug. 31262306a36Sopenharmony_ci * 31362306a36Sopenharmony_ci * (see also get_workaround_page() in tx-gen2.c) 31462306a36Sopenharmony_ci */ 31562306a36Sopenharmony_ci if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - 31662306a36Sopenharmony_ci sizeof(void *)) 31762306a36Sopenharmony_ci goto out; 31862306a36Sopenharmony_ci 31962306a36Sopenharmony_ci /* We don't have enough room on this page, get a new one. */ 32062306a36Sopenharmony_ci __free_page(p->page); 32162306a36Sopenharmony_ci 32262306a36Sopenharmony_cialloc: 32362306a36Sopenharmony_ci p->page = alloc_page(GFP_ATOMIC); 32462306a36Sopenharmony_ci if (!p->page) 32562306a36Sopenharmony_ci return NULL; 32662306a36Sopenharmony_ci p->pos = page_address(p->page); 32762306a36Sopenharmony_ci /* set the chaining pointer to NULL */ 32862306a36Sopenharmony_ci *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; 32962306a36Sopenharmony_ciout: 33062306a36Sopenharmony_ci *page_ptr = p->page; 33162306a36Sopenharmony_ci get_page(p->page); 33262306a36Sopenharmony_ci return p; 33362306a36Sopenharmony_ci} 33462306a36Sopenharmony_ci#endif 33562306a36Sopenharmony_ci 33662306a36Sopenharmony_cistatic int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, 33762306a36Sopenharmony_ci struct sk_buff *skb, 33862306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd, int start_len, 33962306a36Sopenharmony_ci u8 hdr_len, 34062306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd) 34162306a36Sopenharmony_ci{ 34262306a36Sopenharmony_ci#ifdef CONFIG_INET 34362306a36Sopenharmony_ci struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; 34462306a36Sopenharmony_ci struct ieee80211_hdr *hdr = (void *)skb->data; 34562306a36Sopenharmony_ci unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 34662306a36Sopenharmony_ci unsigned int mss = skb_shinfo(skb)->gso_size; 34762306a36Sopenharmony_ci u16 length, amsdu_pad; 34862306a36Sopenharmony_ci u8 *start_hdr; 34962306a36Sopenharmony_ci struct iwl_tso_hdr_page *hdr_page; 35062306a36Sopenharmony_ci struct tso_t tso; 35162306a36Sopenharmony_ci 35262306a36Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), 35362306a36Sopenharmony_ci &dev_cmd->hdr, start_len, 0); 35462306a36Sopenharmony_ci 35562306a36Sopenharmony_ci ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 35662306a36Sopenharmony_ci snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 35762306a36Sopenharmony_ci total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; 35862306a36Sopenharmony_ci amsdu_pad = 0; 35962306a36Sopenharmony_ci 36062306a36Sopenharmony_ci /* total amount of header we may need for this A-MSDU */ 36162306a36Sopenharmony_ci hdr_room = DIV_ROUND_UP(total_len, mss) * 36262306a36Sopenharmony_ci (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 36362306a36Sopenharmony_ci 36462306a36Sopenharmony_ci /* Our device supports 9 segments at most, it will fit in 1 page */ 36562306a36Sopenharmony_ci hdr_page = get_page_hdr(trans, hdr_room, skb); 36662306a36Sopenharmony_ci if (!hdr_page) 36762306a36Sopenharmony_ci return -ENOMEM; 36862306a36Sopenharmony_ci 36962306a36Sopenharmony_ci start_hdr = hdr_page->pos; 37062306a36Sopenharmony_ci 37162306a36Sopenharmony_ci /* 37262306a36Sopenharmony_ci * Pull the ieee80211 header to be able to use TSO core, 37362306a36Sopenharmony_ci * we will restore it for the tx_status flow. 37462306a36Sopenharmony_ci */ 37562306a36Sopenharmony_ci skb_pull(skb, hdr_len); 37662306a36Sopenharmony_ci 37762306a36Sopenharmony_ci /* 37862306a36Sopenharmony_ci * Remove the length of all the headers that we don't actually 37962306a36Sopenharmony_ci * have in the MPDU by themselves, but that we duplicate into 38062306a36Sopenharmony_ci * all the different MSDUs inside the A-MSDU. 38162306a36Sopenharmony_ci */ 38262306a36Sopenharmony_ci le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci tso_start(skb, &tso); 38562306a36Sopenharmony_ci 38662306a36Sopenharmony_ci while (total_len) { 38762306a36Sopenharmony_ci /* this is the data left for this subframe */ 38862306a36Sopenharmony_ci unsigned int data_left = min_t(unsigned int, mss, total_len); 38962306a36Sopenharmony_ci unsigned int tb_len; 39062306a36Sopenharmony_ci dma_addr_t tb_phys; 39162306a36Sopenharmony_ci u8 *subf_hdrs_start = hdr_page->pos; 39262306a36Sopenharmony_ci 39362306a36Sopenharmony_ci total_len -= data_left; 39462306a36Sopenharmony_ci 39562306a36Sopenharmony_ci memset(hdr_page->pos, 0, amsdu_pad); 39662306a36Sopenharmony_ci hdr_page->pos += amsdu_pad; 39762306a36Sopenharmony_ci amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 39862306a36Sopenharmony_ci data_left)) & 0x3; 39962306a36Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 40062306a36Sopenharmony_ci hdr_page->pos += ETH_ALEN; 40162306a36Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 40262306a36Sopenharmony_ci hdr_page->pos += ETH_ALEN; 40362306a36Sopenharmony_ci 40462306a36Sopenharmony_ci length = snap_ip_tcp_hdrlen + data_left; 40562306a36Sopenharmony_ci *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 40662306a36Sopenharmony_ci hdr_page->pos += sizeof(length); 40762306a36Sopenharmony_ci 40862306a36Sopenharmony_ci /* 40962306a36Sopenharmony_ci * This will copy the SNAP as well which will be considered 41062306a36Sopenharmony_ci * as MAC header. 41162306a36Sopenharmony_ci */ 41262306a36Sopenharmony_ci tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 41362306a36Sopenharmony_ci 41462306a36Sopenharmony_ci hdr_page->pos += snap_ip_tcp_hdrlen; 41562306a36Sopenharmony_ci 41662306a36Sopenharmony_ci tb_len = hdr_page->pos - start_hdr; 41762306a36Sopenharmony_ci tb_phys = dma_map_single(trans->dev, start_hdr, 41862306a36Sopenharmony_ci tb_len, DMA_TO_DEVICE); 41962306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 42062306a36Sopenharmony_ci goto out_err; 42162306a36Sopenharmony_ci /* 42262306a36Sopenharmony_ci * No need for _with_wa, this is from the TSO page and 42362306a36Sopenharmony_ci * we leave some space at the end of it so can't hit 42462306a36Sopenharmony_ci * the buggy scenario. 42562306a36Sopenharmony_ci */ 42662306a36Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); 42762306a36Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 42862306a36Sopenharmony_ci tb_phys, tb_len); 42962306a36Sopenharmony_ci /* add this subframe's headers' length to the tx_cmd */ 43062306a36Sopenharmony_ci le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 43162306a36Sopenharmony_ci 43262306a36Sopenharmony_ci /* prepare the start_hdr for the next subframe */ 43362306a36Sopenharmony_ci start_hdr = hdr_page->pos; 43462306a36Sopenharmony_ci 43562306a36Sopenharmony_ci /* put the payload */ 43662306a36Sopenharmony_ci while (data_left) { 43762306a36Sopenharmony_ci int ret; 43862306a36Sopenharmony_ci 43962306a36Sopenharmony_ci tb_len = min_t(unsigned int, tso.size, data_left); 44062306a36Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tso.data, 44162306a36Sopenharmony_ci tb_len, DMA_TO_DEVICE); 44262306a36Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, 44362306a36Sopenharmony_ci tb_phys, tso.data, 44462306a36Sopenharmony_ci tb_len, NULL); 44562306a36Sopenharmony_ci if (ret) 44662306a36Sopenharmony_ci goto out_err; 44762306a36Sopenharmony_ci 44862306a36Sopenharmony_ci data_left -= tb_len; 44962306a36Sopenharmony_ci tso_build_data(skb, &tso, tb_len); 45062306a36Sopenharmony_ci } 45162306a36Sopenharmony_ci } 45262306a36Sopenharmony_ci 45362306a36Sopenharmony_ci /* re -add the WiFi header */ 45462306a36Sopenharmony_ci skb_push(skb, hdr_len); 45562306a36Sopenharmony_ci 45662306a36Sopenharmony_ci return 0; 45762306a36Sopenharmony_ci 45862306a36Sopenharmony_ciout_err: 45962306a36Sopenharmony_ci#endif 46062306a36Sopenharmony_ci return -EINVAL; 46162306a36Sopenharmony_ci} 46262306a36Sopenharmony_ci 46362306a36Sopenharmony_cistatic struct 46462306a36Sopenharmony_ciiwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, 46562306a36Sopenharmony_ci struct iwl_txq *txq, 46662306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 46762306a36Sopenharmony_ci struct sk_buff *skb, 46862306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta, 46962306a36Sopenharmony_ci int hdr_len, 47062306a36Sopenharmony_ci int tx_cmd_len) 47162306a36Sopenharmony_ci{ 47262306a36Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 47362306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 47462306a36Sopenharmony_ci dma_addr_t tb_phys; 47562306a36Sopenharmony_ci int len; 47662306a36Sopenharmony_ci void *tb1_addr; 47762306a36Sopenharmony_ci 47862306a36Sopenharmony_ci tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 47962306a36Sopenharmony_ci 48062306a36Sopenharmony_ci /* 48162306a36Sopenharmony_ci * No need for _with_wa, the first TB allocation is aligned up 48262306a36Sopenharmony_ci * to a 64-byte boundary and thus can't be at the end or cross 48362306a36Sopenharmony_ci * a page boundary (much less a 2^32 boundary). 48462306a36Sopenharmony_ci */ 48562306a36Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 48662306a36Sopenharmony_ci 48762306a36Sopenharmony_ci /* 48862306a36Sopenharmony_ci * The second TB (tb1) points to the remainder of the TX command 48962306a36Sopenharmony_ci * and the 802.11 header - dword aligned size 49062306a36Sopenharmony_ci * (This calculation modifies the TX command, so do it before the 49162306a36Sopenharmony_ci * setup of the first TB) 49262306a36Sopenharmony_ci */ 49362306a36Sopenharmony_ci len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 49462306a36Sopenharmony_ci IWL_FIRST_TB_SIZE; 49562306a36Sopenharmony_ci 49662306a36Sopenharmony_ci /* do not align A-MSDU to dword as the subframe header aligns it */ 49762306a36Sopenharmony_ci 49862306a36Sopenharmony_ci /* map the data for TB1 */ 49962306a36Sopenharmony_ci tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 50062306a36Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); 50162306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 50262306a36Sopenharmony_ci goto out_err; 50362306a36Sopenharmony_ci /* 50462306a36Sopenharmony_ci * No need for _with_wa(), we ensure (via alignment) that the data 50562306a36Sopenharmony_ci * here can never cross or end at a page boundary. 50662306a36Sopenharmony_ci */ 50762306a36Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); 50862306a36Sopenharmony_ci 50962306a36Sopenharmony_ci if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, 51062306a36Sopenharmony_ci hdr_len, dev_cmd)) 51162306a36Sopenharmony_ci goto out_err; 51262306a36Sopenharmony_ci 51362306a36Sopenharmony_ci /* building the A-MSDU might have changed this data, memcpy it now */ 51462306a36Sopenharmony_ci memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 51562306a36Sopenharmony_ci return tfd; 51662306a36Sopenharmony_ci 51762306a36Sopenharmony_ciout_err: 51862306a36Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 51962306a36Sopenharmony_ci return NULL; 52062306a36Sopenharmony_ci} 52162306a36Sopenharmony_ci 52262306a36Sopenharmony_cistatic int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, 52362306a36Sopenharmony_ci struct sk_buff *skb, 52462306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd, 52562306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta) 52662306a36Sopenharmony_ci{ 52762306a36Sopenharmony_ci int i; 52862306a36Sopenharmony_ci 52962306a36Sopenharmony_ci for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 53062306a36Sopenharmony_ci const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 53162306a36Sopenharmony_ci dma_addr_t tb_phys; 53262306a36Sopenharmony_ci unsigned int fragsz = skb_frag_size(frag); 53362306a36Sopenharmony_ci int ret; 53462306a36Sopenharmony_ci 53562306a36Sopenharmony_ci if (!fragsz) 53662306a36Sopenharmony_ci continue; 53762306a36Sopenharmony_ci 53862306a36Sopenharmony_ci tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 53962306a36Sopenharmony_ci fragsz, DMA_TO_DEVICE); 54062306a36Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 54162306a36Sopenharmony_ci skb_frag_address(frag), 54262306a36Sopenharmony_ci fragsz, out_meta); 54362306a36Sopenharmony_ci if (ret) 54462306a36Sopenharmony_ci return ret; 54562306a36Sopenharmony_ci } 54662306a36Sopenharmony_ci 54762306a36Sopenharmony_ci return 0; 54862306a36Sopenharmony_ci} 54962306a36Sopenharmony_ci 55062306a36Sopenharmony_cistatic struct 55162306a36Sopenharmony_ciiwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, 55262306a36Sopenharmony_ci struct iwl_txq *txq, 55362306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 55462306a36Sopenharmony_ci struct sk_buff *skb, 55562306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta, 55662306a36Sopenharmony_ci int hdr_len, 55762306a36Sopenharmony_ci int tx_cmd_len, 55862306a36Sopenharmony_ci bool pad) 55962306a36Sopenharmony_ci{ 56062306a36Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 56162306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 56262306a36Sopenharmony_ci dma_addr_t tb_phys; 56362306a36Sopenharmony_ci int len, tb1_len, tb2_len; 56462306a36Sopenharmony_ci void *tb1_addr; 56562306a36Sopenharmony_ci struct sk_buff *frag; 56662306a36Sopenharmony_ci 56762306a36Sopenharmony_ci tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 56862306a36Sopenharmony_ci 56962306a36Sopenharmony_ci /* The first TB points to bi-directional DMA data */ 57062306a36Sopenharmony_ci memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 57162306a36Sopenharmony_ci 57262306a36Sopenharmony_ci /* 57362306a36Sopenharmony_ci * No need for _with_wa, the first TB allocation is aligned up 57462306a36Sopenharmony_ci * to a 64-byte boundary and thus can't be at the end or cross 57562306a36Sopenharmony_ci * a page boundary (much less a 2^32 boundary). 57662306a36Sopenharmony_ci */ 57762306a36Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 57862306a36Sopenharmony_ci 57962306a36Sopenharmony_ci /* 58062306a36Sopenharmony_ci * The second TB (tb1) points to the remainder of the TX command 58162306a36Sopenharmony_ci * and the 802.11 header - dword aligned size 58262306a36Sopenharmony_ci * (This calculation modifies the TX command, so do it before the 58362306a36Sopenharmony_ci * setup of the first TB) 58462306a36Sopenharmony_ci */ 58562306a36Sopenharmony_ci len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 58662306a36Sopenharmony_ci IWL_FIRST_TB_SIZE; 58762306a36Sopenharmony_ci 58862306a36Sopenharmony_ci if (pad) 58962306a36Sopenharmony_ci tb1_len = ALIGN(len, 4); 59062306a36Sopenharmony_ci else 59162306a36Sopenharmony_ci tb1_len = len; 59262306a36Sopenharmony_ci 59362306a36Sopenharmony_ci /* map the data for TB1 */ 59462306a36Sopenharmony_ci tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 59562306a36Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 59662306a36Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 59762306a36Sopenharmony_ci goto out_err; 59862306a36Sopenharmony_ci /* 59962306a36Sopenharmony_ci * No need for _with_wa(), we ensure (via alignment) that the data 60062306a36Sopenharmony_ci * here can never cross or end at a page boundary. 60162306a36Sopenharmony_ci */ 60262306a36Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 60362306a36Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 60462306a36Sopenharmony_ci IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 60562306a36Sopenharmony_ci 60662306a36Sopenharmony_ci /* set up TFD's third entry to point to remainder of skb's head */ 60762306a36Sopenharmony_ci tb2_len = skb_headlen(skb) - hdr_len; 60862306a36Sopenharmony_ci 60962306a36Sopenharmony_ci if (tb2_len > 0) { 61062306a36Sopenharmony_ci int ret; 61162306a36Sopenharmony_ci 61262306a36Sopenharmony_ci tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, 61362306a36Sopenharmony_ci tb2_len, DMA_TO_DEVICE); 61462306a36Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 61562306a36Sopenharmony_ci skb->data + hdr_len, tb2_len, 61662306a36Sopenharmony_ci NULL); 61762306a36Sopenharmony_ci if (ret) 61862306a36Sopenharmony_ci goto out_err; 61962306a36Sopenharmony_ci } 62062306a36Sopenharmony_ci 62162306a36Sopenharmony_ci if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 62262306a36Sopenharmony_ci goto out_err; 62362306a36Sopenharmony_ci 62462306a36Sopenharmony_ci skb_walk_frags(skb, frag) { 62562306a36Sopenharmony_ci int ret; 62662306a36Sopenharmony_ci 62762306a36Sopenharmony_ci tb_phys = dma_map_single(trans->dev, frag->data, 62862306a36Sopenharmony_ci skb_headlen(frag), DMA_TO_DEVICE); 62962306a36Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 63062306a36Sopenharmony_ci frag->data, 63162306a36Sopenharmony_ci skb_headlen(frag), NULL); 63262306a36Sopenharmony_ci if (ret) 63362306a36Sopenharmony_ci goto out_err; 63462306a36Sopenharmony_ci if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) 63562306a36Sopenharmony_ci goto out_err; 63662306a36Sopenharmony_ci } 63762306a36Sopenharmony_ci 63862306a36Sopenharmony_ci return tfd; 63962306a36Sopenharmony_ci 64062306a36Sopenharmony_ciout_err: 64162306a36Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 64262306a36Sopenharmony_ci return NULL; 64362306a36Sopenharmony_ci} 64462306a36Sopenharmony_ci 64562306a36Sopenharmony_cistatic 64662306a36Sopenharmony_cistruct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, 64762306a36Sopenharmony_ci struct iwl_txq *txq, 64862306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 64962306a36Sopenharmony_ci struct sk_buff *skb, 65062306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta) 65162306a36Sopenharmony_ci{ 65262306a36Sopenharmony_ci struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 65362306a36Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 65462306a36Sopenharmony_ci struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 65562306a36Sopenharmony_ci int len, hdr_len; 65662306a36Sopenharmony_ci bool amsdu; 65762306a36Sopenharmony_ci 65862306a36Sopenharmony_ci /* There must be data left over for TB1 or this code must be changed */ 65962306a36Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); 66062306a36Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 66162306a36Sopenharmony_ci offsetofend(struct iwl_tx_cmd_gen2, dram_info) > 66262306a36Sopenharmony_ci IWL_FIRST_TB_SIZE); 66362306a36Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); 66462306a36Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 66562306a36Sopenharmony_ci offsetofend(struct iwl_tx_cmd_gen3, dram_info) > 66662306a36Sopenharmony_ci IWL_FIRST_TB_SIZE); 66762306a36Sopenharmony_ci 66862306a36Sopenharmony_ci memset(tfd, 0, sizeof(*tfd)); 66962306a36Sopenharmony_ci 67062306a36Sopenharmony_ci if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 67162306a36Sopenharmony_ci len = sizeof(struct iwl_tx_cmd_gen2); 67262306a36Sopenharmony_ci else 67362306a36Sopenharmony_ci len = sizeof(struct iwl_tx_cmd_gen3); 67462306a36Sopenharmony_ci 67562306a36Sopenharmony_ci amsdu = ieee80211_is_data_qos(hdr->frame_control) && 67662306a36Sopenharmony_ci (*ieee80211_get_qos_ctl(hdr) & 67762306a36Sopenharmony_ci IEEE80211_QOS_CTL_A_MSDU_PRESENT); 67862306a36Sopenharmony_ci 67962306a36Sopenharmony_ci hdr_len = ieee80211_hdrlen(hdr->frame_control); 68062306a36Sopenharmony_ci 68162306a36Sopenharmony_ci /* 68262306a36Sopenharmony_ci * Only build A-MSDUs here if doing so by GSO, otherwise it may be 68362306a36Sopenharmony_ci * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been 68462306a36Sopenharmony_ci * built in the higher layers already. 68562306a36Sopenharmony_ci */ 68662306a36Sopenharmony_ci if (amsdu && skb_shinfo(skb)->gso_size) 68762306a36Sopenharmony_ci return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, 68862306a36Sopenharmony_ci out_meta, hdr_len, len); 68962306a36Sopenharmony_ci return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 69062306a36Sopenharmony_ci hdr_len, len, !amsdu); 69162306a36Sopenharmony_ci} 69262306a36Sopenharmony_ci 69362306a36Sopenharmony_ciint iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) 69462306a36Sopenharmony_ci{ 69562306a36Sopenharmony_ci unsigned int max; 69662306a36Sopenharmony_ci unsigned int used; 69762306a36Sopenharmony_ci 69862306a36Sopenharmony_ci /* 69962306a36Sopenharmony_ci * To avoid ambiguity between empty and completely full queues, there 70062306a36Sopenharmony_ci * should always be less than max_tfd_queue_size elements in the queue. 70162306a36Sopenharmony_ci * If q->n_window is smaller than max_tfd_queue_size, there is no need 70262306a36Sopenharmony_ci * to reserve any queue entries for this purpose. 70362306a36Sopenharmony_ci */ 70462306a36Sopenharmony_ci if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) 70562306a36Sopenharmony_ci max = q->n_window; 70662306a36Sopenharmony_ci else 70762306a36Sopenharmony_ci max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; 70862306a36Sopenharmony_ci 70962306a36Sopenharmony_ci /* 71062306a36Sopenharmony_ci * max_tfd_queue_size is a power of 2, so the following is equivalent to 71162306a36Sopenharmony_ci * modulo by max_tfd_queue_size and is well defined. 71262306a36Sopenharmony_ci */ 71362306a36Sopenharmony_ci used = (q->write_ptr - q->read_ptr) & 71462306a36Sopenharmony_ci (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 71562306a36Sopenharmony_ci 71662306a36Sopenharmony_ci if (WARN_ON(used > max)) 71762306a36Sopenharmony_ci return 0; 71862306a36Sopenharmony_ci 71962306a36Sopenharmony_ci return max - used; 72062306a36Sopenharmony_ci} 72162306a36Sopenharmony_ci 72262306a36Sopenharmony_ciint iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 72362306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, int txq_id) 72462306a36Sopenharmony_ci{ 72562306a36Sopenharmony_ci struct iwl_cmd_meta *out_meta; 72662306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 72762306a36Sopenharmony_ci u16 cmd_len; 72862306a36Sopenharmony_ci int idx; 72962306a36Sopenharmony_ci void *tfd; 73062306a36Sopenharmony_ci 73162306a36Sopenharmony_ci if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 73262306a36Sopenharmony_ci "queue %d out of range", txq_id)) 73362306a36Sopenharmony_ci return -EINVAL; 73462306a36Sopenharmony_ci 73562306a36Sopenharmony_ci if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 73662306a36Sopenharmony_ci "TX on unused queue %d\n", txq_id)) 73762306a36Sopenharmony_ci return -EINVAL; 73862306a36Sopenharmony_ci 73962306a36Sopenharmony_ci if (skb_is_nonlinear(skb) && 74062306a36Sopenharmony_ci skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 74162306a36Sopenharmony_ci __skb_linearize(skb)) 74262306a36Sopenharmony_ci return -ENOMEM; 74362306a36Sopenharmony_ci 74462306a36Sopenharmony_ci spin_lock(&txq->lock); 74562306a36Sopenharmony_ci 74662306a36Sopenharmony_ci if (iwl_txq_space(trans, txq) < txq->high_mark) { 74762306a36Sopenharmony_ci iwl_txq_stop(trans, txq); 74862306a36Sopenharmony_ci 74962306a36Sopenharmony_ci /* don't put the packet on the ring, if there is no room */ 75062306a36Sopenharmony_ci if (unlikely(iwl_txq_space(trans, txq) < 3)) { 75162306a36Sopenharmony_ci struct iwl_device_tx_cmd **dev_cmd_ptr; 75262306a36Sopenharmony_ci 75362306a36Sopenharmony_ci dev_cmd_ptr = (void *)((u8 *)skb->cb + 75462306a36Sopenharmony_ci trans->txqs.dev_cmd_offs); 75562306a36Sopenharmony_ci 75662306a36Sopenharmony_ci *dev_cmd_ptr = dev_cmd; 75762306a36Sopenharmony_ci __skb_queue_tail(&txq->overflow_q, skb); 75862306a36Sopenharmony_ci spin_unlock(&txq->lock); 75962306a36Sopenharmony_ci return 0; 76062306a36Sopenharmony_ci } 76162306a36Sopenharmony_ci } 76262306a36Sopenharmony_ci 76362306a36Sopenharmony_ci idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 76462306a36Sopenharmony_ci 76562306a36Sopenharmony_ci /* Set up driver data for this TFD */ 76662306a36Sopenharmony_ci txq->entries[idx].skb = skb; 76762306a36Sopenharmony_ci txq->entries[idx].cmd = dev_cmd; 76862306a36Sopenharmony_ci 76962306a36Sopenharmony_ci dev_cmd->hdr.sequence = 77062306a36Sopenharmony_ci cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 77162306a36Sopenharmony_ci INDEX_TO_SEQ(idx))); 77262306a36Sopenharmony_ci 77362306a36Sopenharmony_ci /* Set up first empty entry in queue's array of Tx/cmd buffers */ 77462306a36Sopenharmony_ci out_meta = &txq->entries[idx].meta; 77562306a36Sopenharmony_ci out_meta->flags = 0; 77662306a36Sopenharmony_ci 77762306a36Sopenharmony_ci tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); 77862306a36Sopenharmony_ci if (!tfd) { 77962306a36Sopenharmony_ci spin_unlock(&txq->lock); 78062306a36Sopenharmony_ci return -1; 78162306a36Sopenharmony_ci } 78262306a36Sopenharmony_ci 78362306a36Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 78462306a36Sopenharmony_ci struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = 78562306a36Sopenharmony_ci (void *)dev_cmd->payload; 78662306a36Sopenharmony_ci 78762306a36Sopenharmony_ci cmd_len = le16_to_cpu(tx_cmd_gen3->len); 78862306a36Sopenharmony_ci } else { 78962306a36Sopenharmony_ci struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = 79062306a36Sopenharmony_ci (void *)dev_cmd->payload; 79162306a36Sopenharmony_ci 79262306a36Sopenharmony_ci cmd_len = le16_to_cpu(tx_cmd_gen2->len); 79362306a36Sopenharmony_ci } 79462306a36Sopenharmony_ci 79562306a36Sopenharmony_ci /* Set up entry for this TFD in Tx byte-count array */ 79662306a36Sopenharmony_ci iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, 79762306a36Sopenharmony_ci iwl_txq_gen2_get_num_tbs(trans, tfd)); 79862306a36Sopenharmony_ci 79962306a36Sopenharmony_ci /* start timer if queue currently empty */ 80062306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 80162306a36Sopenharmony_ci mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 80262306a36Sopenharmony_ci 80362306a36Sopenharmony_ci /* Tell device the write index *just past* this latest filled TFD */ 80462306a36Sopenharmony_ci txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 80562306a36Sopenharmony_ci iwl_txq_inc_wr_ptr(trans, txq); 80662306a36Sopenharmony_ci /* 80762306a36Sopenharmony_ci * At this point the frame is "transmitted" successfully 80862306a36Sopenharmony_ci * and we will get a TX status notification eventually. 80962306a36Sopenharmony_ci */ 81062306a36Sopenharmony_ci spin_unlock(&txq->lock); 81162306a36Sopenharmony_ci return 0; 81262306a36Sopenharmony_ci} 81362306a36Sopenharmony_ci 81462306a36Sopenharmony_ci/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 81562306a36Sopenharmony_ci 81662306a36Sopenharmony_ci/* 81762306a36Sopenharmony_ci * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's 81862306a36Sopenharmony_ci */ 81962306a36Sopenharmony_civoid iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) 82062306a36Sopenharmony_ci{ 82162306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 82262306a36Sopenharmony_ci 82362306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 82462306a36Sopenharmony_ci while (txq->write_ptr != txq->read_ptr) { 82562306a36Sopenharmony_ci IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 82662306a36Sopenharmony_ci txq_id, txq->read_ptr); 82762306a36Sopenharmony_ci 82862306a36Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) { 82962306a36Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 83062306a36Sopenharmony_ci struct sk_buff *skb = txq->entries[idx].skb; 83162306a36Sopenharmony_ci 83262306a36Sopenharmony_ci if (!WARN_ON_ONCE(!skb)) 83362306a36Sopenharmony_ci iwl_txq_free_tso_page(trans, skb); 83462306a36Sopenharmony_ci } 83562306a36Sopenharmony_ci iwl_txq_gen2_free_tfd(trans, txq); 83662306a36Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 83762306a36Sopenharmony_ci } 83862306a36Sopenharmony_ci 83962306a36Sopenharmony_ci while (!skb_queue_empty(&txq->overflow_q)) { 84062306a36Sopenharmony_ci struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 84162306a36Sopenharmony_ci 84262306a36Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 84362306a36Sopenharmony_ci } 84462306a36Sopenharmony_ci 84562306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 84662306a36Sopenharmony_ci 84762306a36Sopenharmony_ci /* just in case - this queue may have been stopped */ 84862306a36Sopenharmony_ci iwl_wake_queue(trans, txq); 84962306a36Sopenharmony_ci} 85062306a36Sopenharmony_ci 85162306a36Sopenharmony_cistatic void iwl_txq_gen2_free_memory(struct iwl_trans *trans, 85262306a36Sopenharmony_ci struct iwl_txq *txq) 85362306a36Sopenharmony_ci{ 85462306a36Sopenharmony_ci struct device *dev = trans->dev; 85562306a36Sopenharmony_ci 85662306a36Sopenharmony_ci /* De-alloc circular buffer of TFDs */ 85762306a36Sopenharmony_ci if (txq->tfds) { 85862306a36Sopenharmony_ci dma_free_coherent(dev, 85962306a36Sopenharmony_ci trans->txqs.tfd.size * txq->n_window, 86062306a36Sopenharmony_ci txq->tfds, txq->dma_addr); 86162306a36Sopenharmony_ci dma_free_coherent(dev, 86262306a36Sopenharmony_ci sizeof(*txq->first_tb_bufs) * txq->n_window, 86362306a36Sopenharmony_ci txq->first_tb_bufs, txq->first_tb_dma); 86462306a36Sopenharmony_ci } 86562306a36Sopenharmony_ci 86662306a36Sopenharmony_ci kfree(txq->entries); 86762306a36Sopenharmony_ci if (txq->bc_tbl.addr) 86862306a36Sopenharmony_ci dma_pool_free(trans->txqs.bc_pool, 86962306a36Sopenharmony_ci txq->bc_tbl.addr, txq->bc_tbl.dma); 87062306a36Sopenharmony_ci kfree(txq); 87162306a36Sopenharmony_ci} 87262306a36Sopenharmony_ci 87362306a36Sopenharmony_ci/* 87462306a36Sopenharmony_ci * iwl_pcie_txq_free - Deallocate DMA queue. 87562306a36Sopenharmony_ci * @txq: Transmit queue to deallocate. 87662306a36Sopenharmony_ci * 87762306a36Sopenharmony_ci * Empty queue by removing and destroying all BD's. 87862306a36Sopenharmony_ci * Free all buffers. 87962306a36Sopenharmony_ci * 0-fill, but do not free "txq" descriptor structure. 88062306a36Sopenharmony_ci */ 88162306a36Sopenharmony_cistatic void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) 88262306a36Sopenharmony_ci{ 88362306a36Sopenharmony_ci struct iwl_txq *txq; 88462306a36Sopenharmony_ci int i; 88562306a36Sopenharmony_ci 88662306a36Sopenharmony_ci if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 88762306a36Sopenharmony_ci "queue %d out of range", txq_id)) 88862306a36Sopenharmony_ci return; 88962306a36Sopenharmony_ci 89062306a36Sopenharmony_ci txq = trans->txqs.txq[txq_id]; 89162306a36Sopenharmony_ci 89262306a36Sopenharmony_ci if (WARN_ON(!txq)) 89362306a36Sopenharmony_ci return; 89462306a36Sopenharmony_ci 89562306a36Sopenharmony_ci iwl_txq_gen2_unmap(trans, txq_id); 89662306a36Sopenharmony_ci 89762306a36Sopenharmony_ci /* De-alloc array of command/tx buffers */ 89862306a36Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id) 89962306a36Sopenharmony_ci for (i = 0; i < txq->n_window; i++) { 90062306a36Sopenharmony_ci kfree_sensitive(txq->entries[i].cmd); 90162306a36Sopenharmony_ci kfree_sensitive(txq->entries[i].free_buf); 90262306a36Sopenharmony_ci } 90362306a36Sopenharmony_ci del_timer_sync(&txq->stuck_timer); 90462306a36Sopenharmony_ci 90562306a36Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 90662306a36Sopenharmony_ci 90762306a36Sopenharmony_ci trans->txqs.txq[txq_id] = NULL; 90862306a36Sopenharmony_ci 90962306a36Sopenharmony_ci clear_bit(txq_id, trans->txqs.queue_used); 91062306a36Sopenharmony_ci} 91162306a36Sopenharmony_ci 91262306a36Sopenharmony_ci/* 91362306a36Sopenharmony_ci * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 91462306a36Sopenharmony_ci */ 91562306a36Sopenharmony_cistatic int iwl_queue_init(struct iwl_txq *q, int slots_num) 91662306a36Sopenharmony_ci{ 91762306a36Sopenharmony_ci q->n_window = slots_num; 91862306a36Sopenharmony_ci 91962306a36Sopenharmony_ci /* slots_num must be power-of-two size, otherwise 92062306a36Sopenharmony_ci * iwl_txq_get_cmd_index is broken. */ 92162306a36Sopenharmony_ci if (WARN_ON(!is_power_of_2(slots_num))) 92262306a36Sopenharmony_ci return -EINVAL; 92362306a36Sopenharmony_ci 92462306a36Sopenharmony_ci q->low_mark = q->n_window / 4; 92562306a36Sopenharmony_ci if (q->low_mark < 4) 92662306a36Sopenharmony_ci q->low_mark = 4; 92762306a36Sopenharmony_ci 92862306a36Sopenharmony_ci q->high_mark = q->n_window / 8; 92962306a36Sopenharmony_ci if (q->high_mark < 2) 93062306a36Sopenharmony_ci q->high_mark = 2; 93162306a36Sopenharmony_ci 93262306a36Sopenharmony_ci q->write_ptr = 0; 93362306a36Sopenharmony_ci q->read_ptr = 0; 93462306a36Sopenharmony_ci 93562306a36Sopenharmony_ci return 0; 93662306a36Sopenharmony_ci} 93762306a36Sopenharmony_ci 93862306a36Sopenharmony_ciint iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 93962306a36Sopenharmony_ci bool cmd_queue) 94062306a36Sopenharmony_ci{ 94162306a36Sopenharmony_ci int ret; 94262306a36Sopenharmony_ci u32 tfd_queue_max_size = 94362306a36Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size; 94462306a36Sopenharmony_ci 94562306a36Sopenharmony_ci txq->need_update = false; 94662306a36Sopenharmony_ci 94762306a36Sopenharmony_ci /* max_tfd_queue_size must be power-of-two size, otherwise 94862306a36Sopenharmony_ci * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ 94962306a36Sopenharmony_ci if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 95062306a36Sopenharmony_ci "Max tfd queue size must be a power of two, but is %d", 95162306a36Sopenharmony_ci tfd_queue_max_size)) 95262306a36Sopenharmony_ci return -EINVAL; 95362306a36Sopenharmony_ci 95462306a36Sopenharmony_ci /* Initialize queue's high/low-water marks, and head/tail indexes */ 95562306a36Sopenharmony_ci ret = iwl_queue_init(txq, slots_num); 95662306a36Sopenharmony_ci if (ret) 95762306a36Sopenharmony_ci return ret; 95862306a36Sopenharmony_ci 95962306a36Sopenharmony_ci spin_lock_init(&txq->lock); 96062306a36Sopenharmony_ci 96162306a36Sopenharmony_ci if (cmd_queue) { 96262306a36Sopenharmony_ci static struct lock_class_key iwl_txq_cmd_queue_lock_class; 96362306a36Sopenharmony_ci 96462306a36Sopenharmony_ci lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 96562306a36Sopenharmony_ci } 96662306a36Sopenharmony_ci 96762306a36Sopenharmony_ci __skb_queue_head_init(&txq->overflow_q); 96862306a36Sopenharmony_ci 96962306a36Sopenharmony_ci return 0; 97062306a36Sopenharmony_ci} 97162306a36Sopenharmony_ci 97262306a36Sopenharmony_civoid iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) 97362306a36Sopenharmony_ci{ 97462306a36Sopenharmony_ci struct page **page_ptr; 97562306a36Sopenharmony_ci struct page *next; 97662306a36Sopenharmony_ci 97762306a36Sopenharmony_ci page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 97862306a36Sopenharmony_ci next = *page_ptr; 97962306a36Sopenharmony_ci *page_ptr = NULL; 98062306a36Sopenharmony_ci 98162306a36Sopenharmony_ci while (next) { 98262306a36Sopenharmony_ci struct page *tmp = next; 98362306a36Sopenharmony_ci 98462306a36Sopenharmony_ci next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - 98562306a36Sopenharmony_ci sizeof(void *)); 98662306a36Sopenharmony_ci __free_page(tmp); 98762306a36Sopenharmony_ci } 98862306a36Sopenharmony_ci} 98962306a36Sopenharmony_ci 99062306a36Sopenharmony_civoid iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 99162306a36Sopenharmony_ci{ 99262306a36Sopenharmony_ci u32 txq_id = txq->id; 99362306a36Sopenharmony_ci u32 status; 99462306a36Sopenharmony_ci bool active; 99562306a36Sopenharmony_ci u8 fifo; 99662306a36Sopenharmony_ci 99762306a36Sopenharmony_ci if (trans->trans_cfg->gen2) { 99862306a36Sopenharmony_ci IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 99962306a36Sopenharmony_ci txq->read_ptr, txq->write_ptr); 100062306a36Sopenharmony_ci /* TODO: access new SCD registers and dump them */ 100162306a36Sopenharmony_ci return; 100262306a36Sopenharmony_ci } 100362306a36Sopenharmony_ci 100462306a36Sopenharmony_ci status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 100562306a36Sopenharmony_ci fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 100662306a36Sopenharmony_ci active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 100762306a36Sopenharmony_ci 100862306a36Sopenharmony_ci IWL_ERR(trans, 100962306a36Sopenharmony_ci "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 101062306a36Sopenharmony_ci txq_id, active ? "" : "in", fifo, 101162306a36Sopenharmony_ci jiffies_to_msecs(txq->wd_timeout), 101262306a36Sopenharmony_ci txq->read_ptr, txq->write_ptr, 101362306a36Sopenharmony_ci iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 101462306a36Sopenharmony_ci (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 101562306a36Sopenharmony_ci iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 101662306a36Sopenharmony_ci (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 101762306a36Sopenharmony_ci iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 101862306a36Sopenharmony_ci} 101962306a36Sopenharmony_ci 102062306a36Sopenharmony_cistatic void iwl_txq_stuck_timer(struct timer_list *t) 102162306a36Sopenharmony_ci{ 102262306a36Sopenharmony_ci struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 102362306a36Sopenharmony_ci struct iwl_trans *trans = txq->trans; 102462306a36Sopenharmony_ci 102562306a36Sopenharmony_ci spin_lock(&txq->lock); 102662306a36Sopenharmony_ci /* check if triggered erroneously */ 102762306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) { 102862306a36Sopenharmony_ci spin_unlock(&txq->lock); 102962306a36Sopenharmony_ci return; 103062306a36Sopenharmony_ci } 103162306a36Sopenharmony_ci spin_unlock(&txq->lock); 103262306a36Sopenharmony_ci 103362306a36Sopenharmony_ci iwl_txq_log_scd_error(trans, txq); 103462306a36Sopenharmony_ci 103562306a36Sopenharmony_ci iwl_force_nmi(trans); 103662306a36Sopenharmony_ci} 103762306a36Sopenharmony_ci 103862306a36Sopenharmony_cistatic void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, 103962306a36Sopenharmony_ci struct iwl_tfd *tfd) 104062306a36Sopenharmony_ci{ 104162306a36Sopenharmony_ci tfd->num_tbs = 0; 104262306a36Sopenharmony_ci 104362306a36Sopenharmony_ci iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma, 104462306a36Sopenharmony_ci trans->invalid_tx_cmd.size); 104562306a36Sopenharmony_ci} 104662306a36Sopenharmony_ci 104762306a36Sopenharmony_ciint iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 104862306a36Sopenharmony_ci bool cmd_queue) 104962306a36Sopenharmony_ci{ 105062306a36Sopenharmony_ci size_t num_entries = trans->trans_cfg->gen2 ? 105162306a36Sopenharmony_ci slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; 105262306a36Sopenharmony_ci size_t tfd_sz; 105362306a36Sopenharmony_ci size_t tb0_buf_sz; 105462306a36Sopenharmony_ci int i; 105562306a36Sopenharmony_ci 105662306a36Sopenharmony_ci if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) 105762306a36Sopenharmony_ci return -EINVAL; 105862306a36Sopenharmony_ci 105962306a36Sopenharmony_ci if (WARN_ON(txq->entries || txq->tfds)) 106062306a36Sopenharmony_ci return -EINVAL; 106162306a36Sopenharmony_ci 106262306a36Sopenharmony_ci tfd_sz = trans->txqs.tfd.size * num_entries; 106362306a36Sopenharmony_ci 106462306a36Sopenharmony_ci timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 106562306a36Sopenharmony_ci txq->trans = trans; 106662306a36Sopenharmony_ci 106762306a36Sopenharmony_ci txq->n_window = slots_num; 106862306a36Sopenharmony_ci 106962306a36Sopenharmony_ci txq->entries = kcalloc(slots_num, 107062306a36Sopenharmony_ci sizeof(struct iwl_pcie_txq_entry), 107162306a36Sopenharmony_ci GFP_KERNEL); 107262306a36Sopenharmony_ci 107362306a36Sopenharmony_ci if (!txq->entries) 107462306a36Sopenharmony_ci goto error; 107562306a36Sopenharmony_ci 107662306a36Sopenharmony_ci if (cmd_queue) 107762306a36Sopenharmony_ci for (i = 0; i < slots_num; i++) { 107862306a36Sopenharmony_ci txq->entries[i].cmd = 107962306a36Sopenharmony_ci kmalloc(sizeof(struct iwl_device_cmd), 108062306a36Sopenharmony_ci GFP_KERNEL); 108162306a36Sopenharmony_ci if (!txq->entries[i].cmd) 108262306a36Sopenharmony_ci goto error; 108362306a36Sopenharmony_ci } 108462306a36Sopenharmony_ci 108562306a36Sopenharmony_ci /* Circular buffer of transmit frame descriptors (TFDs), 108662306a36Sopenharmony_ci * shared with device */ 108762306a36Sopenharmony_ci txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 108862306a36Sopenharmony_ci &txq->dma_addr, GFP_KERNEL); 108962306a36Sopenharmony_ci if (!txq->tfds) 109062306a36Sopenharmony_ci goto error; 109162306a36Sopenharmony_ci 109262306a36Sopenharmony_ci BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 109362306a36Sopenharmony_ci 109462306a36Sopenharmony_ci tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 109562306a36Sopenharmony_ci 109662306a36Sopenharmony_ci txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 109762306a36Sopenharmony_ci &txq->first_tb_dma, 109862306a36Sopenharmony_ci GFP_KERNEL); 109962306a36Sopenharmony_ci if (!txq->first_tb_bufs) 110062306a36Sopenharmony_ci goto err_free_tfds; 110162306a36Sopenharmony_ci 110262306a36Sopenharmony_ci for (i = 0; i < num_entries; i++) { 110362306a36Sopenharmony_ci void *tfd = iwl_txq_get_tfd(trans, txq, i); 110462306a36Sopenharmony_ci 110562306a36Sopenharmony_ci if (trans->trans_cfg->gen2) 110662306a36Sopenharmony_ci iwl_txq_set_tfd_invalid_gen2(trans, tfd); 110762306a36Sopenharmony_ci else 110862306a36Sopenharmony_ci iwl_txq_set_tfd_invalid_gen1(trans, tfd); 110962306a36Sopenharmony_ci } 111062306a36Sopenharmony_ci 111162306a36Sopenharmony_ci return 0; 111262306a36Sopenharmony_cierr_free_tfds: 111362306a36Sopenharmony_ci dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 111462306a36Sopenharmony_ci txq->tfds = NULL; 111562306a36Sopenharmony_cierror: 111662306a36Sopenharmony_ci if (txq->entries && cmd_queue) 111762306a36Sopenharmony_ci for (i = 0; i < slots_num; i++) 111862306a36Sopenharmony_ci kfree(txq->entries[i].cmd); 111962306a36Sopenharmony_ci kfree(txq->entries); 112062306a36Sopenharmony_ci txq->entries = NULL; 112162306a36Sopenharmony_ci 112262306a36Sopenharmony_ci return -ENOMEM; 112362306a36Sopenharmony_ci} 112462306a36Sopenharmony_ci 112562306a36Sopenharmony_cistatic struct iwl_txq * 112662306a36Sopenharmony_ciiwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) 112762306a36Sopenharmony_ci{ 112862306a36Sopenharmony_ci size_t bc_tbl_size, bc_tbl_entries; 112962306a36Sopenharmony_ci struct iwl_txq *txq; 113062306a36Sopenharmony_ci int ret; 113162306a36Sopenharmony_ci 113262306a36Sopenharmony_ci WARN_ON(!trans->txqs.bc_tbl_size); 113362306a36Sopenharmony_ci 113462306a36Sopenharmony_ci bc_tbl_size = trans->txqs.bc_tbl_size; 113562306a36Sopenharmony_ci bc_tbl_entries = bc_tbl_size / sizeof(u16); 113662306a36Sopenharmony_ci 113762306a36Sopenharmony_ci if (WARN_ON(size > bc_tbl_entries)) 113862306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 113962306a36Sopenharmony_ci 114062306a36Sopenharmony_ci txq = kzalloc(sizeof(*txq), GFP_KERNEL); 114162306a36Sopenharmony_ci if (!txq) 114262306a36Sopenharmony_ci return ERR_PTR(-ENOMEM); 114362306a36Sopenharmony_ci 114462306a36Sopenharmony_ci txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, 114562306a36Sopenharmony_ci &txq->bc_tbl.dma); 114662306a36Sopenharmony_ci if (!txq->bc_tbl.addr) { 114762306a36Sopenharmony_ci IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 114862306a36Sopenharmony_ci kfree(txq); 114962306a36Sopenharmony_ci return ERR_PTR(-ENOMEM); 115062306a36Sopenharmony_ci } 115162306a36Sopenharmony_ci 115262306a36Sopenharmony_ci ret = iwl_txq_alloc(trans, txq, size, false); 115362306a36Sopenharmony_ci if (ret) { 115462306a36Sopenharmony_ci IWL_ERR(trans, "Tx queue alloc failed\n"); 115562306a36Sopenharmony_ci goto error; 115662306a36Sopenharmony_ci } 115762306a36Sopenharmony_ci ret = iwl_txq_init(trans, txq, size, false); 115862306a36Sopenharmony_ci if (ret) { 115962306a36Sopenharmony_ci IWL_ERR(trans, "Tx queue init failed\n"); 116062306a36Sopenharmony_ci goto error; 116162306a36Sopenharmony_ci } 116262306a36Sopenharmony_ci 116362306a36Sopenharmony_ci txq->wd_timeout = msecs_to_jiffies(timeout); 116462306a36Sopenharmony_ci 116562306a36Sopenharmony_ci return txq; 116662306a36Sopenharmony_ci 116762306a36Sopenharmony_cierror: 116862306a36Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 116962306a36Sopenharmony_ci return ERR_PTR(ret); 117062306a36Sopenharmony_ci} 117162306a36Sopenharmony_ci 117262306a36Sopenharmony_cistatic int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, 117362306a36Sopenharmony_ci struct iwl_host_cmd *hcmd) 117462306a36Sopenharmony_ci{ 117562306a36Sopenharmony_ci struct iwl_tx_queue_cfg_rsp *rsp; 117662306a36Sopenharmony_ci int ret, qid; 117762306a36Sopenharmony_ci u32 wr_ptr; 117862306a36Sopenharmony_ci 117962306a36Sopenharmony_ci if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != 118062306a36Sopenharmony_ci sizeof(*rsp))) { 118162306a36Sopenharmony_ci ret = -EINVAL; 118262306a36Sopenharmony_ci goto error_free_resp; 118362306a36Sopenharmony_ci } 118462306a36Sopenharmony_ci 118562306a36Sopenharmony_ci rsp = (void *)hcmd->resp_pkt->data; 118662306a36Sopenharmony_ci qid = le16_to_cpu(rsp->queue_number); 118762306a36Sopenharmony_ci wr_ptr = le16_to_cpu(rsp->write_pointer); 118862306a36Sopenharmony_ci 118962306a36Sopenharmony_ci if (qid >= ARRAY_SIZE(trans->txqs.txq)) { 119062306a36Sopenharmony_ci WARN_ONCE(1, "queue index %d unsupported", qid); 119162306a36Sopenharmony_ci ret = -EIO; 119262306a36Sopenharmony_ci goto error_free_resp; 119362306a36Sopenharmony_ci } 119462306a36Sopenharmony_ci 119562306a36Sopenharmony_ci if (test_and_set_bit(qid, trans->txqs.queue_used)) { 119662306a36Sopenharmony_ci WARN_ONCE(1, "queue %d already used", qid); 119762306a36Sopenharmony_ci ret = -EIO; 119862306a36Sopenharmony_ci goto error_free_resp; 119962306a36Sopenharmony_ci } 120062306a36Sopenharmony_ci 120162306a36Sopenharmony_ci if (WARN_ONCE(trans->txqs.txq[qid], 120262306a36Sopenharmony_ci "queue %d already allocated\n", qid)) { 120362306a36Sopenharmony_ci ret = -EIO; 120462306a36Sopenharmony_ci goto error_free_resp; 120562306a36Sopenharmony_ci } 120662306a36Sopenharmony_ci 120762306a36Sopenharmony_ci txq->id = qid; 120862306a36Sopenharmony_ci trans->txqs.txq[qid] = txq; 120962306a36Sopenharmony_ci wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 121062306a36Sopenharmony_ci 121162306a36Sopenharmony_ci /* Place first TFD at index corresponding to start sequence number */ 121262306a36Sopenharmony_ci txq->read_ptr = wr_ptr; 121362306a36Sopenharmony_ci txq->write_ptr = wr_ptr; 121462306a36Sopenharmony_ci 121562306a36Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 121662306a36Sopenharmony_ci 121762306a36Sopenharmony_ci iwl_free_resp(hcmd); 121862306a36Sopenharmony_ci return qid; 121962306a36Sopenharmony_ci 122062306a36Sopenharmony_cierror_free_resp: 122162306a36Sopenharmony_ci iwl_free_resp(hcmd); 122262306a36Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 122362306a36Sopenharmony_ci return ret; 122462306a36Sopenharmony_ci} 122562306a36Sopenharmony_ci 122662306a36Sopenharmony_ciint iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, 122762306a36Sopenharmony_ci u8 tid, int size, unsigned int timeout) 122862306a36Sopenharmony_ci{ 122962306a36Sopenharmony_ci struct iwl_txq *txq; 123062306a36Sopenharmony_ci union { 123162306a36Sopenharmony_ci struct iwl_tx_queue_cfg_cmd old; 123262306a36Sopenharmony_ci struct iwl_scd_queue_cfg_cmd new; 123362306a36Sopenharmony_ci } cmd; 123462306a36Sopenharmony_ci struct iwl_host_cmd hcmd = { 123562306a36Sopenharmony_ci .flags = CMD_WANT_SKB, 123662306a36Sopenharmony_ci }; 123762306a36Sopenharmony_ci int ret; 123862306a36Sopenharmony_ci 123962306a36Sopenharmony_ci if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && 124062306a36Sopenharmony_ci trans->hw_rev_step == SILICON_A_STEP) 124162306a36Sopenharmony_ci size = 4096; 124262306a36Sopenharmony_ci 124362306a36Sopenharmony_ci txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); 124462306a36Sopenharmony_ci if (IS_ERR(txq)) 124562306a36Sopenharmony_ci return PTR_ERR(txq); 124662306a36Sopenharmony_ci 124762306a36Sopenharmony_ci if (trans->txqs.queue_alloc_cmd_ver == 0) { 124862306a36Sopenharmony_ci memset(&cmd.old, 0, sizeof(cmd.old)); 124962306a36Sopenharmony_ci cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); 125062306a36Sopenharmony_ci cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); 125162306a36Sopenharmony_ci cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 125262306a36Sopenharmony_ci cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); 125362306a36Sopenharmony_ci cmd.old.tid = tid; 125462306a36Sopenharmony_ci 125562306a36Sopenharmony_ci if (hweight32(sta_mask) != 1) { 125662306a36Sopenharmony_ci ret = -EINVAL; 125762306a36Sopenharmony_ci goto error; 125862306a36Sopenharmony_ci } 125962306a36Sopenharmony_ci cmd.old.sta_id = ffs(sta_mask) - 1; 126062306a36Sopenharmony_ci 126162306a36Sopenharmony_ci hcmd.id = SCD_QUEUE_CFG; 126262306a36Sopenharmony_ci hcmd.len[0] = sizeof(cmd.old); 126362306a36Sopenharmony_ci hcmd.data[0] = &cmd.old; 126462306a36Sopenharmony_ci } else if (trans->txqs.queue_alloc_cmd_ver == 3) { 126562306a36Sopenharmony_ci memset(&cmd.new, 0, sizeof(cmd.new)); 126662306a36Sopenharmony_ci cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); 126762306a36Sopenharmony_ci cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); 126862306a36Sopenharmony_ci cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); 126962306a36Sopenharmony_ci cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 127062306a36Sopenharmony_ci cmd.new.u.add.flags = cpu_to_le32(flags); 127162306a36Sopenharmony_ci cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); 127262306a36Sopenharmony_ci cmd.new.u.add.tid = tid; 127362306a36Sopenharmony_ci 127462306a36Sopenharmony_ci hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); 127562306a36Sopenharmony_ci hcmd.len[0] = sizeof(cmd.new); 127662306a36Sopenharmony_ci hcmd.data[0] = &cmd.new; 127762306a36Sopenharmony_ci } else { 127862306a36Sopenharmony_ci ret = -EOPNOTSUPP; 127962306a36Sopenharmony_ci goto error; 128062306a36Sopenharmony_ci } 128162306a36Sopenharmony_ci 128262306a36Sopenharmony_ci ret = iwl_trans_send_cmd(trans, &hcmd); 128362306a36Sopenharmony_ci if (ret) 128462306a36Sopenharmony_ci goto error; 128562306a36Sopenharmony_ci 128662306a36Sopenharmony_ci return iwl_txq_alloc_response(trans, txq, &hcmd); 128762306a36Sopenharmony_ci 128862306a36Sopenharmony_cierror: 128962306a36Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 129062306a36Sopenharmony_ci return ret; 129162306a36Sopenharmony_ci} 129262306a36Sopenharmony_ci 129362306a36Sopenharmony_civoid iwl_txq_dyn_free(struct iwl_trans *trans, int queue) 129462306a36Sopenharmony_ci{ 129562306a36Sopenharmony_ci if (WARN(queue >= IWL_MAX_TVQM_QUEUES, 129662306a36Sopenharmony_ci "queue %d out of range", queue)) 129762306a36Sopenharmony_ci return; 129862306a36Sopenharmony_ci 129962306a36Sopenharmony_ci /* 130062306a36Sopenharmony_ci * Upon HW Rfkill - we stop the device, and then stop the queues 130162306a36Sopenharmony_ci * in the op_mode. Just for the sake of the simplicity of the op_mode, 130262306a36Sopenharmony_ci * allow the op_mode to call txq_disable after it already called 130362306a36Sopenharmony_ci * stop_device. 130462306a36Sopenharmony_ci */ 130562306a36Sopenharmony_ci if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { 130662306a36Sopenharmony_ci WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 130762306a36Sopenharmony_ci "queue %d not used", queue); 130862306a36Sopenharmony_ci return; 130962306a36Sopenharmony_ci } 131062306a36Sopenharmony_ci 131162306a36Sopenharmony_ci iwl_txq_gen2_free(trans, queue); 131262306a36Sopenharmony_ci 131362306a36Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); 131462306a36Sopenharmony_ci} 131562306a36Sopenharmony_ci 131662306a36Sopenharmony_civoid iwl_txq_gen2_tx_free(struct iwl_trans *trans) 131762306a36Sopenharmony_ci{ 131862306a36Sopenharmony_ci int i; 131962306a36Sopenharmony_ci 132062306a36Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 132162306a36Sopenharmony_ci 132262306a36Sopenharmony_ci /* Free all TX queues */ 132362306a36Sopenharmony_ci for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { 132462306a36Sopenharmony_ci if (!trans->txqs.txq[i]) 132562306a36Sopenharmony_ci continue; 132662306a36Sopenharmony_ci 132762306a36Sopenharmony_ci iwl_txq_gen2_free(trans, i); 132862306a36Sopenharmony_ci } 132962306a36Sopenharmony_ci} 133062306a36Sopenharmony_ci 133162306a36Sopenharmony_ciint iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) 133262306a36Sopenharmony_ci{ 133362306a36Sopenharmony_ci struct iwl_txq *queue; 133462306a36Sopenharmony_ci int ret; 133562306a36Sopenharmony_ci 133662306a36Sopenharmony_ci /* alloc and init the tx queue */ 133762306a36Sopenharmony_ci if (!trans->txqs.txq[txq_id]) { 133862306a36Sopenharmony_ci queue = kzalloc(sizeof(*queue), GFP_KERNEL); 133962306a36Sopenharmony_ci if (!queue) { 134062306a36Sopenharmony_ci IWL_ERR(trans, "Not enough memory for tx queue\n"); 134162306a36Sopenharmony_ci return -ENOMEM; 134262306a36Sopenharmony_ci } 134362306a36Sopenharmony_ci trans->txqs.txq[txq_id] = queue; 134462306a36Sopenharmony_ci ret = iwl_txq_alloc(trans, queue, queue_size, true); 134562306a36Sopenharmony_ci if (ret) { 134662306a36Sopenharmony_ci IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 134762306a36Sopenharmony_ci goto error; 134862306a36Sopenharmony_ci } 134962306a36Sopenharmony_ci } else { 135062306a36Sopenharmony_ci queue = trans->txqs.txq[txq_id]; 135162306a36Sopenharmony_ci } 135262306a36Sopenharmony_ci 135362306a36Sopenharmony_ci ret = iwl_txq_init(trans, queue, queue_size, 135462306a36Sopenharmony_ci (txq_id == trans->txqs.cmd.q_id)); 135562306a36Sopenharmony_ci if (ret) { 135662306a36Sopenharmony_ci IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 135762306a36Sopenharmony_ci goto error; 135862306a36Sopenharmony_ci } 135962306a36Sopenharmony_ci trans->txqs.txq[txq_id]->id = txq_id; 136062306a36Sopenharmony_ci set_bit(txq_id, trans->txqs.queue_used); 136162306a36Sopenharmony_ci 136262306a36Sopenharmony_ci return 0; 136362306a36Sopenharmony_ci 136462306a36Sopenharmony_cierror: 136562306a36Sopenharmony_ci iwl_txq_gen2_tx_free(trans); 136662306a36Sopenharmony_ci return ret; 136762306a36Sopenharmony_ci} 136862306a36Sopenharmony_ci 136962306a36Sopenharmony_cistatic inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, 137062306a36Sopenharmony_ci struct iwl_tfd *tfd, u8 idx) 137162306a36Sopenharmony_ci{ 137262306a36Sopenharmony_ci struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 137362306a36Sopenharmony_ci dma_addr_t addr; 137462306a36Sopenharmony_ci dma_addr_t hi_len; 137562306a36Sopenharmony_ci 137662306a36Sopenharmony_ci addr = get_unaligned_le32(&tb->lo); 137762306a36Sopenharmony_ci 137862306a36Sopenharmony_ci if (sizeof(dma_addr_t) <= sizeof(u32)) 137962306a36Sopenharmony_ci return addr; 138062306a36Sopenharmony_ci 138162306a36Sopenharmony_ci hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 138262306a36Sopenharmony_ci 138362306a36Sopenharmony_ci /* 138462306a36Sopenharmony_ci * shift by 16 twice to avoid warnings on 32-bit 138562306a36Sopenharmony_ci * (where this code never runs anyway due to the 138662306a36Sopenharmony_ci * if statement above) 138762306a36Sopenharmony_ci */ 138862306a36Sopenharmony_ci return addr | ((hi_len << 16) << 16); 138962306a36Sopenharmony_ci} 139062306a36Sopenharmony_ci 139162306a36Sopenharmony_civoid iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 139262306a36Sopenharmony_ci struct iwl_cmd_meta *meta, 139362306a36Sopenharmony_ci struct iwl_txq *txq, int index) 139462306a36Sopenharmony_ci{ 139562306a36Sopenharmony_ci int i, num_tbs; 139662306a36Sopenharmony_ci struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); 139762306a36Sopenharmony_ci 139862306a36Sopenharmony_ci /* Sanity check on number of chunks */ 139962306a36Sopenharmony_ci num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 140062306a36Sopenharmony_ci 140162306a36Sopenharmony_ci if (num_tbs > trans->txqs.tfd.max_tbs) { 140262306a36Sopenharmony_ci IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 140362306a36Sopenharmony_ci /* @todo issue fatal error, it is quite serious situation */ 140462306a36Sopenharmony_ci return; 140562306a36Sopenharmony_ci } 140662306a36Sopenharmony_ci 140762306a36Sopenharmony_ci /* first TB is never freed - it's the bidirectional DMA data */ 140862306a36Sopenharmony_ci 140962306a36Sopenharmony_ci for (i = 1; i < num_tbs; i++) { 141062306a36Sopenharmony_ci if (meta->tbs & BIT(i)) 141162306a36Sopenharmony_ci dma_unmap_page(trans->dev, 141262306a36Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_addr(trans, 141362306a36Sopenharmony_ci tfd, i), 141462306a36Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_len(trans, 141562306a36Sopenharmony_ci tfd, i), 141662306a36Sopenharmony_ci DMA_TO_DEVICE); 141762306a36Sopenharmony_ci else 141862306a36Sopenharmony_ci dma_unmap_single(trans->dev, 141962306a36Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_addr(trans, 142062306a36Sopenharmony_ci tfd, i), 142162306a36Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_len(trans, 142262306a36Sopenharmony_ci tfd, i), 142362306a36Sopenharmony_ci DMA_TO_DEVICE); 142462306a36Sopenharmony_ci } 142562306a36Sopenharmony_ci 142662306a36Sopenharmony_ci meta->tbs = 0; 142762306a36Sopenharmony_ci 142862306a36Sopenharmony_ci iwl_txq_set_tfd_invalid_gen1(trans, tfd); 142962306a36Sopenharmony_ci} 143062306a36Sopenharmony_ci 143162306a36Sopenharmony_ci#define IWL_TX_CRC_SIZE 4 143262306a36Sopenharmony_ci#define IWL_TX_DELIMITER_SIZE 4 143362306a36Sopenharmony_ci 143462306a36Sopenharmony_ci/* 143562306a36Sopenharmony_ci * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 143662306a36Sopenharmony_ci */ 143762306a36Sopenharmony_civoid iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 143862306a36Sopenharmony_ci struct iwl_txq *txq, u16 byte_cnt, 143962306a36Sopenharmony_ci int num_tbs) 144062306a36Sopenharmony_ci{ 144162306a36Sopenharmony_ci struct iwlagn_scd_bc_tbl *scd_bc_tbl; 144262306a36Sopenharmony_ci int write_ptr = txq->write_ptr; 144362306a36Sopenharmony_ci int txq_id = txq->id; 144462306a36Sopenharmony_ci u8 sec_ctl = 0; 144562306a36Sopenharmony_ci u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 144662306a36Sopenharmony_ci __le16 bc_ent; 144762306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 144862306a36Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 144962306a36Sopenharmony_ci u8 sta_id = tx_cmd->sta_id; 145062306a36Sopenharmony_ci 145162306a36Sopenharmony_ci scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 145262306a36Sopenharmony_ci 145362306a36Sopenharmony_ci sec_ctl = tx_cmd->sec_ctl; 145462306a36Sopenharmony_ci 145562306a36Sopenharmony_ci switch (sec_ctl & TX_CMD_SEC_MSK) { 145662306a36Sopenharmony_ci case TX_CMD_SEC_CCM: 145762306a36Sopenharmony_ci len += IEEE80211_CCMP_MIC_LEN; 145862306a36Sopenharmony_ci break; 145962306a36Sopenharmony_ci case TX_CMD_SEC_TKIP: 146062306a36Sopenharmony_ci len += IEEE80211_TKIP_ICV_LEN; 146162306a36Sopenharmony_ci break; 146262306a36Sopenharmony_ci case TX_CMD_SEC_WEP: 146362306a36Sopenharmony_ci len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 146462306a36Sopenharmony_ci break; 146562306a36Sopenharmony_ci } 146662306a36Sopenharmony_ci if (trans->txqs.bc_table_dword) 146762306a36Sopenharmony_ci len = DIV_ROUND_UP(len, 4); 146862306a36Sopenharmony_ci 146962306a36Sopenharmony_ci if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 147062306a36Sopenharmony_ci return; 147162306a36Sopenharmony_ci 147262306a36Sopenharmony_ci bc_ent = cpu_to_le16(len | (sta_id << 12)); 147362306a36Sopenharmony_ci 147462306a36Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 147562306a36Sopenharmony_ci 147662306a36Sopenharmony_ci if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 147762306a36Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 147862306a36Sopenharmony_ci bc_ent; 147962306a36Sopenharmony_ci} 148062306a36Sopenharmony_ci 148162306a36Sopenharmony_civoid iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 148262306a36Sopenharmony_ci struct iwl_txq *txq) 148362306a36Sopenharmony_ci{ 148462306a36Sopenharmony_ci struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 148562306a36Sopenharmony_ci int txq_id = txq->id; 148662306a36Sopenharmony_ci int read_ptr = txq->read_ptr; 148762306a36Sopenharmony_ci u8 sta_id = 0; 148862306a36Sopenharmony_ci __le16 bc_ent; 148962306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 149062306a36Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 149162306a36Sopenharmony_ci 149262306a36Sopenharmony_ci WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 149362306a36Sopenharmony_ci 149462306a36Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) 149562306a36Sopenharmony_ci sta_id = tx_cmd->sta_id; 149662306a36Sopenharmony_ci 149762306a36Sopenharmony_ci bc_ent = cpu_to_le16(1 | (sta_id << 12)); 149862306a36Sopenharmony_ci 149962306a36Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 150062306a36Sopenharmony_ci 150162306a36Sopenharmony_ci if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 150262306a36Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 150362306a36Sopenharmony_ci bc_ent; 150462306a36Sopenharmony_ci} 150562306a36Sopenharmony_ci 150662306a36Sopenharmony_ci/* 150762306a36Sopenharmony_ci * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 150862306a36Sopenharmony_ci * @trans - transport private data 150962306a36Sopenharmony_ci * @txq - tx queue 151062306a36Sopenharmony_ci * @dma_dir - the direction of the DMA mapping 151162306a36Sopenharmony_ci * 151262306a36Sopenharmony_ci * Does NOT advance any TFD circular buffer read/write indexes 151362306a36Sopenharmony_ci * Does NOT free the TFD itself (which is within circular buffer) 151462306a36Sopenharmony_ci */ 151562306a36Sopenharmony_civoid iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 151662306a36Sopenharmony_ci{ 151762306a36Sopenharmony_ci /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 151862306a36Sopenharmony_ci * idx is bounded by n_window 151962306a36Sopenharmony_ci */ 152062306a36Sopenharmony_ci int rd_ptr = txq->read_ptr; 152162306a36Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, rd_ptr); 152262306a36Sopenharmony_ci struct sk_buff *skb; 152362306a36Sopenharmony_ci 152462306a36Sopenharmony_ci lockdep_assert_held(&txq->lock); 152562306a36Sopenharmony_ci 152662306a36Sopenharmony_ci if (!txq->entries) 152762306a36Sopenharmony_ci return; 152862306a36Sopenharmony_ci 152962306a36Sopenharmony_ci /* We have only q->n_window txq->entries, but we use 153062306a36Sopenharmony_ci * TFD_QUEUE_SIZE_MAX tfds 153162306a36Sopenharmony_ci */ 153262306a36Sopenharmony_ci if (trans->trans_cfg->gen2) 153362306a36Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 153462306a36Sopenharmony_ci iwl_txq_get_tfd(trans, txq, rd_ptr)); 153562306a36Sopenharmony_ci else 153662306a36Sopenharmony_ci iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, 153762306a36Sopenharmony_ci txq, rd_ptr); 153862306a36Sopenharmony_ci 153962306a36Sopenharmony_ci /* free SKB */ 154062306a36Sopenharmony_ci skb = txq->entries[idx].skb; 154162306a36Sopenharmony_ci 154262306a36Sopenharmony_ci /* Can be called from irqs-disabled context 154362306a36Sopenharmony_ci * If skb is not NULL, it means that the whole queue is being 154462306a36Sopenharmony_ci * freed and that the queue is not empty - free the skb 154562306a36Sopenharmony_ci */ 154662306a36Sopenharmony_ci if (skb) { 154762306a36Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 154862306a36Sopenharmony_ci txq->entries[idx].skb = NULL; 154962306a36Sopenharmony_ci } 155062306a36Sopenharmony_ci} 155162306a36Sopenharmony_ci 155262306a36Sopenharmony_civoid iwl_txq_progress(struct iwl_txq *txq) 155362306a36Sopenharmony_ci{ 155462306a36Sopenharmony_ci lockdep_assert_held(&txq->lock); 155562306a36Sopenharmony_ci 155662306a36Sopenharmony_ci if (!txq->wd_timeout) 155762306a36Sopenharmony_ci return; 155862306a36Sopenharmony_ci 155962306a36Sopenharmony_ci /* 156062306a36Sopenharmony_ci * station is asleep and we send data - that must 156162306a36Sopenharmony_ci * be uAPSD or PS-Poll. Don't rearm the timer. 156262306a36Sopenharmony_ci */ 156362306a36Sopenharmony_ci if (txq->frozen) 156462306a36Sopenharmony_ci return; 156562306a36Sopenharmony_ci 156662306a36Sopenharmony_ci /* 156762306a36Sopenharmony_ci * if empty delete timer, otherwise move timer forward 156862306a36Sopenharmony_ci * since we're making progress on this queue 156962306a36Sopenharmony_ci */ 157062306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) 157162306a36Sopenharmony_ci del_timer(&txq->stuck_timer); 157262306a36Sopenharmony_ci else 157362306a36Sopenharmony_ci mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 157462306a36Sopenharmony_ci} 157562306a36Sopenharmony_ci 157662306a36Sopenharmony_ci/* Frees buffers until index _not_ inclusive */ 157762306a36Sopenharmony_civoid iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 157862306a36Sopenharmony_ci struct sk_buff_head *skbs, bool is_flush) 157962306a36Sopenharmony_ci{ 158062306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 158162306a36Sopenharmony_ci int tfd_num, read_ptr, last_to_free; 158262306a36Sopenharmony_ci 158362306a36Sopenharmony_ci /* This function is not meant to release cmd queue*/ 158462306a36Sopenharmony_ci if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) 158562306a36Sopenharmony_ci return; 158662306a36Sopenharmony_ci 158762306a36Sopenharmony_ci if (WARN_ON(!txq)) 158862306a36Sopenharmony_ci return; 158962306a36Sopenharmony_ci 159062306a36Sopenharmony_ci tfd_num = iwl_txq_get_cmd_index(txq, ssn); 159162306a36Sopenharmony_ci read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); 159262306a36Sopenharmony_ci 159362306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 159462306a36Sopenharmony_ci 159562306a36Sopenharmony_ci if (!test_bit(txq_id, trans->txqs.queue_used)) { 159662306a36Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 159762306a36Sopenharmony_ci txq_id, ssn); 159862306a36Sopenharmony_ci goto out; 159962306a36Sopenharmony_ci } 160062306a36Sopenharmony_ci 160162306a36Sopenharmony_ci if (read_ptr == tfd_num) 160262306a36Sopenharmony_ci goto out; 160362306a36Sopenharmony_ci 160462306a36Sopenharmony_ci IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 160562306a36Sopenharmony_ci txq_id, txq->read_ptr, tfd_num, ssn); 160662306a36Sopenharmony_ci 160762306a36Sopenharmony_ci /*Since we free until index _not_ inclusive, the one before index is 160862306a36Sopenharmony_ci * the last we will free. This one must be used */ 160962306a36Sopenharmony_ci last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 161062306a36Sopenharmony_ci 161162306a36Sopenharmony_ci if (!iwl_txq_used(txq, last_to_free)) { 161262306a36Sopenharmony_ci IWL_ERR(trans, 161362306a36Sopenharmony_ci "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 161462306a36Sopenharmony_ci __func__, txq_id, last_to_free, 161562306a36Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size, 161662306a36Sopenharmony_ci txq->write_ptr, txq->read_ptr); 161762306a36Sopenharmony_ci 161862306a36Sopenharmony_ci iwl_op_mode_time_point(trans->op_mode, 161962306a36Sopenharmony_ci IWL_FW_INI_TIME_POINT_FAKE_TX, 162062306a36Sopenharmony_ci NULL); 162162306a36Sopenharmony_ci goto out; 162262306a36Sopenharmony_ci } 162362306a36Sopenharmony_ci 162462306a36Sopenharmony_ci if (WARN_ON(!skb_queue_empty(skbs))) 162562306a36Sopenharmony_ci goto out; 162662306a36Sopenharmony_ci 162762306a36Sopenharmony_ci for (; 162862306a36Sopenharmony_ci read_ptr != tfd_num; 162962306a36Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), 163062306a36Sopenharmony_ci read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { 163162306a36Sopenharmony_ci struct sk_buff *skb = txq->entries[read_ptr].skb; 163262306a36Sopenharmony_ci 163362306a36Sopenharmony_ci if (WARN_ON_ONCE(!skb)) 163462306a36Sopenharmony_ci continue; 163562306a36Sopenharmony_ci 163662306a36Sopenharmony_ci iwl_txq_free_tso_page(trans, skb); 163762306a36Sopenharmony_ci 163862306a36Sopenharmony_ci __skb_queue_tail(skbs, skb); 163962306a36Sopenharmony_ci 164062306a36Sopenharmony_ci txq->entries[read_ptr].skb = NULL; 164162306a36Sopenharmony_ci 164262306a36Sopenharmony_ci if (!trans->trans_cfg->gen2) 164362306a36Sopenharmony_ci iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); 164462306a36Sopenharmony_ci 164562306a36Sopenharmony_ci iwl_txq_free_tfd(trans, txq); 164662306a36Sopenharmony_ci } 164762306a36Sopenharmony_ci 164862306a36Sopenharmony_ci iwl_txq_progress(txq); 164962306a36Sopenharmony_ci 165062306a36Sopenharmony_ci if (iwl_txq_space(trans, txq) > txq->low_mark && 165162306a36Sopenharmony_ci test_bit(txq_id, trans->txqs.queue_stopped)) { 165262306a36Sopenharmony_ci struct sk_buff_head overflow_skbs; 165362306a36Sopenharmony_ci struct sk_buff *skb; 165462306a36Sopenharmony_ci 165562306a36Sopenharmony_ci __skb_queue_head_init(&overflow_skbs); 165662306a36Sopenharmony_ci skb_queue_splice_init(&txq->overflow_q, 165762306a36Sopenharmony_ci is_flush ? skbs : &overflow_skbs); 165862306a36Sopenharmony_ci 165962306a36Sopenharmony_ci /* 166062306a36Sopenharmony_ci * We are going to transmit from the overflow queue. 166162306a36Sopenharmony_ci * Remember this state so that wait_for_txq_empty will know we 166262306a36Sopenharmony_ci * are adding more packets to the TFD queue. It cannot rely on 166362306a36Sopenharmony_ci * the state of &txq->overflow_q, as we just emptied it, but 166462306a36Sopenharmony_ci * haven't TXed the content yet. 166562306a36Sopenharmony_ci */ 166662306a36Sopenharmony_ci txq->overflow_tx = true; 166762306a36Sopenharmony_ci 166862306a36Sopenharmony_ci /* 166962306a36Sopenharmony_ci * This is tricky: we are in reclaim path which is non 167062306a36Sopenharmony_ci * re-entrant, so noone will try to take the access the 167162306a36Sopenharmony_ci * txq data from that path. We stopped tx, so we can't 167262306a36Sopenharmony_ci * have tx as well. Bottom line, we can unlock and re-lock 167362306a36Sopenharmony_ci * later. 167462306a36Sopenharmony_ci */ 167562306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 167662306a36Sopenharmony_ci 167762306a36Sopenharmony_ci while ((skb = __skb_dequeue(&overflow_skbs))) { 167862306a36Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd_ptr; 167962306a36Sopenharmony_ci 168062306a36Sopenharmony_ci dev_cmd_ptr = *(void **)((u8 *)skb->cb + 168162306a36Sopenharmony_ci trans->txqs.dev_cmd_offs); 168262306a36Sopenharmony_ci 168362306a36Sopenharmony_ci /* 168462306a36Sopenharmony_ci * Note that we can very well be overflowing again. 168562306a36Sopenharmony_ci * In that case, iwl_txq_space will be small again 168662306a36Sopenharmony_ci * and we won't wake mac80211's queue. 168762306a36Sopenharmony_ci */ 168862306a36Sopenharmony_ci iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 168962306a36Sopenharmony_ci } 169062306a36Sopenharmony_ci 169162306a36Sopenharmony_ci if (iwl_txq_space(trans, txq) > txq->low_mark) 169262306a36Sopenharmony_ci iwl_wake_queue(trans, txq); 169362306a36Sopenharmony_ci 169462306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 169562306a36Sopenharmony_ci txq->overflow_tx = false; 169662306a36Sopenharmony_ci } 169762306a36Sopenharmony_ci 169862306a36Sopenharmony_ciout: 169962306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 170062306a36Sopenharmony_ci} 170162306a36Sopenharmony_ci 170262306a36Sopenharmony_ci/* Set wr_ptr of specific device and txq */ 170362306a36Sopenharmony_civoid iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 170462306a36Sopenharmony_ci{ 170562306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 170662306a36Sopenharmony_ci 170762306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 170862306a36Sopenharmony_ci 170962306a36Sopenharmony_ci txq->write_ptr = ptr; 171062306a36Sopenharmony_ci txq->read_ptr = txq->write_ptr; 171162306a36Sopenharmony_ci 171262306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 171362306a36Sopenharmony_ci} 171462306a36Sopenharmony_ci 171562306a36Sopenharmony_civoid iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, 171662306a36Sopenharmony_ci bool freeze) 171762306a36Sopenharmony_ci{ 171862306a36Sopenharmony_ci int queue; 171962306a36Sopenharmony_ci 172062306a36Sopenharmony_ci for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 172162306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[queue]; 172262306a36Sopenharmony_ci unsigned long now; 172362306a36Sopenharmony_ci 172462306a36Sopenharmony_ci spin_lock_bh(&txq->lock); 172562306a36Sopenharmony_ci 172662306a36Sopenharmony_ci now = jiffies; 172762306a36Sopenharmony_ci 172862306a36Sopenharmony_ci if (txq->frozen == freeze) 172962306a36Sopenharmony_ci goto next_queue; 173062306a36Sopenharmony_ci 173162306a36Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 173262306a36Sopenharmony_ci freeze ? "Freezing" : "Waking", queue); 173362306a36Sopenharmony_ci 173462306a36Sopenharmony_ci txq->frozen = freeze; 173562306a36Sopenharmony_ci 173662306a36Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) 173762306a36Sopenharmony_ci goto next_queue; 173862306a36Sopenharmony_ci 173962306a36Sopenharmony_ci if (freeze) { 174062306a36Sopenharmony_ci if (unlikely(time_after(now, 174162306a36Sopenharmony_ci txq->stuck_timer.expires))) { 174262306a36Sopenharmony_ci /* 174362306a36Sopenharmony_ci * The timer should have fired, maybe it is 174462306a36Sopenharmony_ci * spinning right now on the lock. 174562306a36Sopenharmony_ci */ 174662306a36Sopenharmony_ci goto next_queue; 174762306a36Sopenharmony_ci } 174862306a36Sopenharmony_ci /* remember how long until the timer fires */ 174962306a36Sopenharmony_ci txq->frozen_expiry_remainder = 175062306a36Sopenharmony_ci txq->stuck_timer.expires - now; 175162306a36Sopenharmony_ci del_timer(&txq->stuck_timer); 175262306a36Sopenharmony_ci goto next_queue; 175362306a36Sopenharmony_ci } 175462306a36Sopenharmony_ci 175562306a36Sopenharmony_ci /* 175662306a36Sopenharmony_ci * Wake a non-empty queue -> arm timer with the 175762306a36Sopenharmony_ci * remainder before it froze 175862306a36Sopenharmony_ci */ 175962306a36Sopenharmony_ci mod_timer(&txq->stuck_timer, 176062306a36Sopenharmony_ci now + txq->frozen_expiry_remainder); 176162306a36Sopenharmony_ci 176262306a36Sopenharmony_cinext_queue: 176362306a36Sopenharmony_ci spin_unlock_bh(&txq->lock); 176462306a36Sopenharmony_ci } 176562306a36Sopenharmony_ci} 176662306a36Sopenharmony_ci 176762306a36Sopenharmony_ci#define HOST_COMPLETE_TIMEOUT (2 * HZ) 176862306a36Sopenharmony_ci 176962306a36Sopenharmony_cistatic int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, 177062306a36Sopenharmony_ci struct iwl_host_cmd *cmd) 177162306a36Sopenharmony_ci{ 177262306a36Sopenharmony_ci const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); 177362306a36Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 177462306a36Sopenharmony_ci int cmd_idx; 177562306a36Sopenharmony_ci int ret; 177662306a36Sopenharmony_ci 177762306a36Sopenharmony_ci IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); 177862306a36Sopenharmony_ci 177962306a36Sopenharmony_ci if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 178062306a36Sopenharmony_ci &trans->status), 178162306a36Sopenharmony_ci "Command %s: a command is already active!\n", cmd_str)) 178262306a36Sopenharmony_ci return -EIO; 178362306a36Sopenharmony_ci 178462306a36Sopenharmony_ci IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); 178562306a36Sopenharmony_ci 178662306a36Sopenharmony_ci cmd_idx = trans->ops->send_cmd(trans, cmd); 178762306a36Sopenharmony_ci if (cmd_idx < 0) { 178862306a36Sopenharmony_ci ret = cmd_idx; 178962306a36Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 179062306a36Sopenharmony_ci IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", 179162306a36Sopenharmony_ci cmd_str, ret); 179262306a36Sopenharmony_ci return ret; 179362306a36Sopenharmony_ci } 179462306a36Sopenharmony_ci 179562306a36Sopenharmony_ci ret = wait_event_timeout(trans->wait_command_queue, 179662306a36Sopenharmony_ci !test_bit(STATUS_SYNC_HCMD_ACTIVE, 179762306a36Sopenharmony_ci &trans->status), 179862306a36Sopenharmony_ci HOST_COMPLETE_TIMEOUT); 179962306a36Sopenharmony_ci if (!ret) { 180062306a36Sopenharmony_ci IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 180162306a36Sopenharmony_ci cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 180262306a36Sopenharmony_ci 180362306a36Sopenharmony_ci IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 180462306a36Sopenharmony_ci txq->read_ptr, txq->write_ptr); 180562306a36Sopenharmony_ci 180662306a36Sopenharmony_ci clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 180762306a36Sopenharmony_ci IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 180862306a36Sopenharmony_ci cmd_str); 180962306a36Sopenharmony_ci ret = -ETIMEDOUT; 181062306a36Sopenharmony_ci 181162306a36Sopenharmony_ci iwl_trans_sync_nmi(trans); 181262306a36Sopenharmony_ci goto cancel; 181362306a36Sopenharmony_ci } 181462306a36Sopenharmony_ci 181562306a36Sopenharmony_ci if (test_bit(STATUS_FW_ERROR, &trans->status)) { 181662306a36Sopenharmony_ci if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, 181762306a36Sopenharmony_ci &trans->status)) { 181862306a36Sopenharmony_ci IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); 181962306a36Sopenharmony_ci dump_stack(); 182062306a36Sopenharmony_ci } 182162306a36Sopenharmony_ci ret = -EIO; 182262306a36Sopenharmony_ci goto cancel; 182362306a36Sopenharmony_ci } 182462306a36Sopenharmony_ci 182562306a36Sopenharmony_ci if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 182662306a36Sopenharmony_ci test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 182762306a36Sopenharmony_ci IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 182862306a36Sopenharmony_ci ret = -ERFKILL; 182962306a36Sopenharmony_ci goto cancel; 183062306a36Sopenharmony_ci } 183162306a36Sopenharmony_ci 183262306a36Sopenharmony_ci if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 183362306a36Sopenharmony_ci IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); 183462306a36Sopenharmony_ci ret = -EIO; 183562306a36Sopenharmony_ci goto cancel; 183662306a36Sopenharmony_ci } 183762306a36Sopenharmony_ci 183862306a36Sopenharmony_ci return 0; 183962306a36Sopenharmony_ci 184062306a36Sopenharmony_cicancel: 184162306a36Sopenharmony_ci if (cmd->flags & CMD_WANT_SKB) { 184262306a36Sopenharmony_ci /* 184362306a36Sopenharmony_ci * Cancel the CMD_WANT_SKB flag for the cmd in the 184462306a36Sopenharmony_ci * TX cmd queue. Otherwise in case the cmd comes 184562306a36Sopenharmony_ci * in later, it will possibly set an invalid 184662306a36Sopenharmony_ci * address (cmd->meta.source). 184762306a36Sopenharmony_ci */ 184862306a36Sopenharmony_ci txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 184962306a36Sopenharmony_ci } 185062306a36Sopenharmony_ci 185162306a36Sopenharmony_ci if (cmd->resp_pkt) { 185262306a36Sopenharmony_ci iwl_free_resp(cmd); 185362306a36Sopenharmony_ci cmd->resp_pkt = NULL; 185462306a36Sopenharmony_ci } 185562306a36Sopenharmony_ci 185662306a36Sopenharmony_ci return ret; 185762306a36Sopenharmony_ci} 185862306a36Sopenharmony_ci 185962306a36Sopenharmony_ciint iwl_trans_txq_send_hcmd(struct iwl_trans *trans, 186062306a36Sopenharmony_ci struct iwl_host_cmd *cmd) 186162306a36Sopenharmony_ci{ 186262306a36Sopenharmony_ci /* Make sure the NIC is still alive in the bus */ 186362306a36Sopenharmony_ci if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 186462306a36Sopenharmony_ci return -ENODEV; 186562306a36Sopenharmony_ci 186662306a36Sopenharmony_ci if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 186762306a36Sopenharmony_ci test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 186862306a36Sopenharmony_ci IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 186962306a36Sopenharmony_ci cmd->id); 187062306a36Sopenharmony_ci return -ERFKILL; 187162306a36Sopenharmony_ci } 187262306a36Sopenharmony_ci 187362306a36Sopenharmony_ci if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 187462306a36Sopenharmony_ci !(cmd->flags & CMD_SEND_IN_D3))) { 187562306a36Sopenharmony_ci IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); 187662306a36Sopenharmony_ci return -EHOSTDOWN; 187762306a36Sopenharmony_ci } 187862306a36Sopenharmony_ci 187962306a36Sopenharmony_ci if (cmd->flags & CMD_ASYNC) { 188062306a36Sopenharmony_ci int ret; 188162306a36Sopenharmony_ci 188262306a36Sopenharmony_ci /* An asynchronous command can not expect an SKB to be set. */ 188362306a36Sopenharmony_ci if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 188462306a36Sopenharmony_ci return -EINVAL; 188562306a36Sopenharmony_ci 188662306a36Sopenharmony_ci ret = trans->ops->send_cmd(trans, cmd); 188762306a36Sopenharmony_ci if (ret < 0) { 188862306a36Sopenharmony_ci IWL_ERR(trans, 188962306a36Sopenharmony_ci "Error sending %s: enqueue_hcmd failed: %d\n", 189062306a36Sopenharmony_ci iwl_get_cmd_string(trans, cmd->id), ret); 189162306a36Sopenharmony_ci return ret; 189262306a36Sopenharmony_ci } 189362306a36Sopenharmony_ci return 0; 189462306a36Sopenharmony_ci } 189562306a36Sopenharmony_ci 189662306a36Sopenharmony_ci return iwl_trans_txq_send_hcmd_sync(trans, cmd); 189762306a36Sopenharmony_ci} 189862306a36Sopenharmony_ci 1899