18c2ecf20Sopenharmony_ci/****************************************************************************** 28c2ecf20Sopenharmony_ci * 38c2ecf20Sopenharmony_ci * This file is provided under a dual BSD/GPLv2 license. When using or 48c2ecf20Sopenharmony_ci * redistributing this file, you may do so under either license. 58c2ecf20Sopenharmony_ci * 68c2ecf20Sopenharmony_ci * GPL LICENSE SUMMARY 78c2ecf20Sopenharmony_ci * 88c2ecf20Sopenharmony_ci * Copyright(c) 2020 Intel Corporation 98c2ecf20Sopenharmony_ci * 108c2ecf20Sopenharmony_ci * This program is free software; you can redistribute it and/or modify 118c2ecf20Sopenharmony_ci * it under the terms of version 2 of the GNU General Public License as 128c2ecf20Sopenharmony_ci * published by the Free Software Foundation. 138c2ecf20Sopenharmony_ci * 148c2ecf20Sopenharmony_ci * This program is distributed in the hope that it will be useful, but 158c2ecf20Sopenharmony_ci * WITHOUT ANY WARRANTY; without even the implied warranty of 168c2ecf20Sopenharmony_ci * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 178c2ecf20Sopenharmony_ci * General Public License for more details. 188c2ecf20Sopenharmony_ci * 198c2ecf20Sopenharmony_ci * BSD LICENSE 208c2ecf20Sopenharmony_ci * 218c2ecf20Sopenharmony_ci * Copyright(c) 2020 Intel Corporation 228c2ecf20Sopenharmony_ci * All rights reserved. 238c2ecf20Sopenharmony_ci * 248c2ecf20Sopenharmony_ci * Redistribution and use in source and binary forms, with or without 258c2ecf20Sopenharmony_ci * modification, are permitted provided that the following conditions 268c2ecf20Sopenharmony_ci * are met: 278c2ecf20Sopenharmony_ci * 288c2ecf20Sopenharmony_ci * * Redistributions of source code must retain the above copyright 298c2ecf20Sopenharmony_ci * notice, this list of conditions and the following disclaimer. 308c2ecf20Sopenharmony_ci * * Redistributions in binary form must reproduce the above copyright 318c2ecf20Sopenharmony_ci * notice, this list of conditions and the following disclaimer in 328c2ecf20Sopenharmony_ci * the documentation and/or other materials provided with the 338c2ecf20Sopenharmony_ci * distribution. 348c2ecf20Sopenharmony_ci * * Neither the name Intel Corporation nor the names of its 358c2ecf20Sopenharmony_ci * contributors may be used to endorse or promote products derived 368c2ecf20Sopenharmony_ci * from this software without specific prior written permission. 378c2ecf20Sopenharmony_ci * 388c2ecf20Sopenharmony_ci * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 398c2ecf20Sopenharmony_ci * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 408c2ecf20Sopenharmony_ci * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 418c2ecf20Sopenharmony_ci * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 428c2ecf20Sopenharmony_ci * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 438c2ecf20Sopenharmony_ci * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 448c2ecf20Sopenharmony_ci * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 458c2ecf20Sopenharmony_ci * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 468c2ecf20Sopenharmony_ci * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 478c2ecf20Sopenharmony_ci * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 488c2ecf20Sopenharmony_ci * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 498c2ecf20Sopenharmony_ci * 508c2ecf20Sopenharmony_ci *****************************************************************************/ 518c2ecf20Sopenharmony_ci#include <net/tso.h> 528c2ecf20Sopenharmony_ci#include <linux/tcp.h> 538c2ecf20Sopenharmony_ci 548c2ecf20Sopenharmony_ci#include "iwl-debug.h" 558c2ecf20Sopenharmony_ci#include "iwl-io.h" 568c2ecf20Sopenharmony_ci#include "fw/api/tx.h" 578c2ecf20Sopenharmony_ci#include "queue/tx.h" 588c2ecf20Sopenharmony_ci#include "iwl-fh.h" 598c2ecf20Sopenharmony_ci#include "iwl-scd.h" 608c2ecf20Sopenharmony_ci#include <linux/dmapool.h> 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_ci/* 638c2ecf20Sopenharmony_ci * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels 648c2ecf20Sopenharmony_ci */ 658c2ecf20Sopenharmony_civoid iwl_txq_gen2_tx_stop(struct iwl_trans *trans) 668c2ecf20Sopenharmony_ci{ 678c2ecf20Sopenharmony_ci int txq_id; 688c2ecf20Sopenharmony_ci 698c2ecf20Sopenharmony_ci /* 708c2ecf20Sopenharmony_ci * This function can be called before the op_mode disabled the 718c2ecf20Sopenharmony_ci * queues. This happens when we have an rfkill interrupt. 728c2ecf20Sopenharmony_ci * Since we stop Tx altogether - mark the queues as stopped. 738c2ecf20Sopenharmony_ci */ 748c2ecf20Sopenharmony_ci memset(trans->txqs.queue_stopped, 0, 758c2ecf20Sopenharmony_ci sizeof(trans->txqs.queue_stopped)); 768c2ecf20Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 778c2ecf20Sopenharmony_ci 788c2ecf20Sopenharmony_ci /* Unmap DMA from host system and free skb's */ 798c2ecf20Sopenharmony_ci for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { 808c2ecf20Sopenharmony_ci if (!trans->txqs.txq[txq_id]) 818c2ecf20Sopenharmony_ci continue; 828c2ecf20Sopenharmony_ci iwl_txq_gen2_unmap(trans, txq_id); 838c2ecf20Sopenharmony_ci } 848c2ecf20Sopenharmony_ci} 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci/* 878c2ecf20Sopenharmony_ci * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array 888c2ecf20Sopenharmony_ci */ 898c2ecf20Sopenharmony_cistatic void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, 908c2ecf20Sopenharmony_ci struct iwl_txq *txq, u16 byte_cnt, 918c2ecf20Sopenharmony_ci int num_tbs) 928c2ecf20Sopenharmony_ci{ 938c2ecf20Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 948c2ecf20Sopenharmony_ci u8 filled_tfd_size, num_fetch_chunks; 958c2ecf20Sopenharmony_ci u16 len = byte_cnt; 968c2ecf20Sopenharmony_ci __le16 bc_ent; 978c2ecf20Sopenharmony_ci 988c2ecf20Sopenharmony_ci if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) 998c2ecf20Sopenharmony_ci return; 1008c2ecf20Sopenharmony_ci 1018c2ecf20Sopenharmony_ci filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 1028c2ecf20Sopenharmony_ci num_tbs * sizeof(struct iwl_tfh_tb); 1038c2ecf20Sopenharmony_ci /* 1048c2ecf20Sopenharmony_ci * filled_tfd_size contains the number of filled bytes in the TFD. 1058c2ecf20Sopenharmony_ci * Dividing it by 64 will give the number of chunks to fetch 1068c2ecf20Sopenharmony_ci * to SRAM- 0 for one chunk, 1 for 2 and so on. 1078c2ecf20Sopenharmony_ci * If, for example, TFD contains only 3 TBs then 32 bytes 1088c2ecf20Sopenharmony_ci * of the TFD are used, and only one chunk of 64 bytes should 1098c2ecf20Sopenharmony_ci * be fetched 1108c2ecf20Sopenharmony_ci */ 1118c2ecf20Sopenharmony_ci num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 1128c2ecf20Sopenharmony_ci 1138c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 1148c2ecf20Sopenharmony_ci struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; 1158c2ecf20Sopenharmony_ci 1168c2ecf20Sopenharmony_ci /* Starting from AX210, the HW expects bytes */ 1178c2ecf20Sopenharmony_ci WARN_ON(trans->txqs.bc_table_dword); 1188c2ecf20Sopenharmony_ci WARN_ON(len > 0x3FFF); 1198c2ecf20Sopenharmony_ci bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); 1208c2ecf20Sopenharmony_ci scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 1218c2ecf20Sopenharmony_ci } else { 1228c2ecf20Sopenharmony_ci struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 1238c2ecf20Sopenharmony_ci 1248c2ecf20Sopenharmony_ci /* Before AX210, the HW expects DW */ 1258c2ecf20Sopenharmony_ci WARN_ON(!trans->txqs.bc_table_dword); 1268c2ecf20Sopenharmony_ci len = DIV_ROUND_UP(len, 4); 1278c2ecf20Sopenharmony_ci WARN_ON(len > 0xFFF); 1288c2ecf20Sopenharmony_ci bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 1298c2ecf20Sopenharmony_ci scd_bc_tbl->tfd_offset[idx] = bc_ent; 1308c2ecf20Sopenharmony_ci } 1318c2ecf20Sopenharmony_ci} 1328c2ecf20Sopenharmony_ci 1338c2ecf20Sopenharmony_ci/* 1348c2ecf20Sopenharmony_ci * iwl_txq_inc_wr_ptr - Send new write index to hardware 1358c2ecf20Sopenharmony_ci */ 1368c2ecf20Sopenharmony_civoid iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) 1378c2ecf20Sopenharmony_ci{ 1388c2ecf20Sopenharmony_ci lockdep_assert_held(&txq->lock); 1398c2ecf20Sopenharmony_ci 1408c2ecf20Sopenharmony_ci IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); 1418c2ecf20Sopenharmony_ci 1428c2ecf20Sopenharmony_ci /* 1438c2ecf20Sopenharmony_ci * if not in power-save mode, uCode will never sleep when we're 1448c2ecf20Sopenharmony_ci * trying to tx (during RFKILL, we're not trying to tx). 1458c2ecf20Sopenharmony_ci */ 1468c2ecf20Sopenharmony_ci iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); 1478c2ecf20Sopenharmony_ci} 1488c2ecf20Sopenharmony_ci 1498c2ecf20Sopenharmony_cistatic u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, 1508c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd) 1518c2ecf20Sopenharmony_ci{ 1528c2ecf20Sopenharmony_ci return le16_to_cpu(tfd->num_tbs) & 0x1f; 1538c2ecf20Sopenharmony_ci} 1548c2ecf20Sopenharmony_ci 1558c2ecf20Sopenharmony_civoid iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 1568c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd) 1578c2ecf20Sopenharmony_ci{ 1588c2ecf20Sopenharmony_ci int i, num_tbs; 1598c2ecf20Sopenharmony_ci 1608c2ecf20Sopenharmony_ci /* Sanity check on number of chunks */ 1618c2ecf20Sopenharmony_ci num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); 1628c2ecf20Sopenharmony_ci 1638c2ecf20Sopenharmony_ci if (num_tbs > trans->txqs.tfd.max_tbs) { 1648c2ecf20Sopenharmony_ci IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 1658c2ecf20Sopenharmony_ci return; 1668c2ecf20Sopenharmony_ci } 1678c2ecf20Sopenharmony_ci 1688c2ecf20Sopenharmony_ci /* first TB is never freed - it's the bidirectional DMA data */ 1698c2ecf20Sopenharmony_ci for (i = 1; i < num_tbs; i++) { 1708c2ecf20Sopenharmony_ci if (meta->tbs & BIT(i)) 1718c2ecf20Sopenharmony_ci dma_unmap_page(trans->dev, 1728c2ecf20Sopenharmony_ci le64_to_cpu(tfd->tbs[i].addr), 1738c2ecf20Sopenharmony_ci le16_to_cpu(tfd->tbs[i].tb_len), 1748c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 1758c2ecf20Sopenharmony_ci else 1768c2ecf20Sopenharmony_ci dma_unmap_single(trans->dev, 1778c2ecf20Sopenharmony_ci le64_to_cpu(tfd->tbs[i].addr), 1788c2ecf20Sopenharmony_ci le16_to_cpu(tfd->tbs[i].tb_len), 1798c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 1808c2ecf20Sopenharmony_ci } 1818c2ecf20Sopenharmony_ci 1828c2ecf20Sopenharmony_ci tfd->num_tbs = 0; 1838c2ecf20Sopenharmony_ci} 1848c2ecf20Sopenharmony_ci 1858c2ecf20Sopenharmony_civoid iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) 1868c2ecf20Sopenharmony_ci{ 1878c2ecf20Sopenharmony_ci /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 1888c2ecf20Sopenharmony_ci * idx is bounded by n_window 1898c2ecf20Sopenharmony_ci */ 1908c2ecf20Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1918c2ecf20Sopenharmony_ci 1928c2ecf20Sopenharmony_ci lockdep_assert_held(&txq->lock); 1938c2ecf20Sopenharmony_ci 1948c2ecf20Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 1958c2ecf20Sopenharmony_ci iwl_txq_get_tfd(trans, txq, idx)); 1968c2ecf20Sopenharmony_ci 1978c2ecf20Sopenharmony_ci /* free SKB */ 1988c2ecf20Sopenharmony_ci if (txq->entries) { 1998c2ecf20Sopenharmony_ci struct sk_buff *skb; 2008c2ecf20Sopenharmony_ci 2018c2ecf20Sopenharmony_ci skb = txq->entries[idx].skb; 2028c2ecf20Sopenharmony_ci 2038c2ecf20Sopenharmony_ci /* Can be called from irqs-disabled context 2048c2ecf20Sopenharmony_ci * If skb is not NULL, it means that the whole queue is being 2058c2ecf20Sopenharmony_ci * freed and that the queue is not empty - free the skb 2068c2ecf20Sopenharmony_ci */ 2078c2ecf20Sopenharmony_ci if (skb) { 2088c2ecf20Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 2098c2ecf20Sopenharmony_ci txq->entries[idx].skb = NULL; 2108c2ecf20Sopenharmony_ci } 2118c2ecf20Sopenharmony_ci } 2128c2ecf20Sopenharmony_ci} 2138c2ecf20Sopenharmony_ci 2148c2ecf20Sopenharmony_ciint iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, 2158c2ecf20Sopenharmony_ci dma_addr_t addr, u16 len) 2168c2ecf20Sopenharmony_ci{ 2178c2ecf20Sopenharmony_ci int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); 2188c2ecf20Sopenharmony_ci struct iwl_tfh_tb *tb; 2198c2ecf20Sopenharmony_ci 2208c2ecf20Sopenharmony_ci /* 2218c2ecf20Sopenharmony_ci * Only WARN here so we know about the issue, but we mess up our 2228c2ecf20Sopenharmony_ci * unmap path because not every place currently checks for errors 2238c2ecf20Sopenharmony_ci * returned from this function - it can only return an error if 2248c2ecf20Sopenharmony_ci * there's no more space, and so when we know there is enough we 2258c2ecf20Sopenharmony_ci * don't always check ... 2268c2ecf20Sopenharmony_ci */ 2278c2ecf20Sopenharmony_ci WARN(iwl_txq_crosses_4g_boundary(addr, len), 2288c2ecf20Sopenharmony_ci "possible DMA problem with iova:0x%llx, len:%d\n", 2298c2ecf20Sopenharmony_ci (unsigned long long)addr, len); 2308c2ecf20Sopenharmony_ci 2318c2ecf20Sopenharmony_ci if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) 2328c2ecf20Sopenharmony_ci return -EINVAL; 2338c2ecf20Sopenharmony_ci tb = &tfd->tbs[idx]; 2348c2ecf20Sopenharmony_ci 2358c2ecf20Sopenharmony_ci /* Each TFD can point to a maximum max_tbs Tx buffers */ 2368c2ecf20Sopenharmony_ci if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { 2378c2ecf20Sopenharmony_ci IWL_ERR(trans, "Error can not send more than %d chunks\n", 2388c2ecf20Sopenharmony_ci trans->txqs.tfd.max_tbs); 2398c2ecf20Sopenharmony_ci return -EINVAL; 2408c2ecf20Sopenharmony_ci } 2418c2ecf20Sopenharmony_ci 2428c2ecf20Sopenharmony_ci put_unaligned_le64(addr, &tb->addr); 2438c2ecf20Sopenharmony_ci tb->tb_len = cpu_to_le16(len); 2448c2ecf20Sopenharmony_ci 2458c2ecf20Sopenharmony_ci tfd->num_tbs = cpu_to_le16(idx + 1); 2468c2ecf20Sopenharmony_ci 2478c2ecf20Sopenharmony_ci return idx; 2488c2ecf20Sopenharmony_ci} 2498c2ecf20Sopenharmony_ci 2508c2ecf20Sopenharmony_cistatic struct page *get_workaround_page(struct iwl_trans *trans, 2518c2ecf20Sopenharmony_ci struct sk_buff *skb) 2528c2ecf20Sopenharmony_ci{ 2538c2ecf20Sopenharmony_ci struct page **page_ptr; 2548c2ecf20Sopenharmony_ci struct page *ret; 2558c2ecf20Sopenharmony_ci 2568c2ecf20Sopenharmony_ci page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 2578c2ecf20Sopenharmony_ci 2588c2ecf20Sopenharmony_ci ret = alloc_page(GFP_ATOMIC); 2598c2ecf20Sopenharmony_ci if (!ret) 2608c2ecf20Sopenharmony_ci return NULL; 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci /* set the chaining pointer to the previous page if there */ 2638c2ecf20Sopenharmony_ci *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; 2648c2ecf20Sopenharmony_ci *page_ptr = ret; 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci return ret; 2678c2ecf20Sopenharmony_ci} 2688c2ecf20Sopenharmony_ci 2698c2ecf20Sopenharmony_ci/* 2708c2ecf20Sopenharmony_ci * Add a TB and if needed apply the FH HW bug workaround; 2718c2ecf20Sopenharmony_ci * meta != NULL indicates that it's a page mapping and we 2728c2ecf20Sopenharmony_ci * need to dma_unmap_page() and set the meta->tbs bit in 2738c2ecf20Sopenharmony_ci * this case. 2748c2ecf20Sopenharmony_ci */ 2758c2ecf20Sopenharmony_cistatic int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, 2768c2ecf20Sopenharmony_ci struct sk_buff *skb, 2778c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd, 2788c2ecf20Sopenharmony_ci dma_addr_t phys, void *virt, 2798c2ecf20Sopenharmony_ci u16 len, struct iwl_cmd_meta *meta) 2808c2ecf20Sopenharmony_ci{ 2818c2ecf20Sopenharmony_ci dma_addr_t oldphys = phys; 2828c2ecf20Sopenharmony_ci struct page *page; 2838c2ecf20Sopenharmony_ci int ret; 2848c2ecf20Sopenharmony_ci 2858c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, phys))) 2868c2ecf20Sopenharmony_ci return -ENOMEM; 2878c2ecf20Sopenharmony_ci 2888c2ecf20Sopenharmony_ci if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { 2898c2ecf20Sopenharmony_ci ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 2908c2ecf20Sopenharmony_ci 2918c2ecf20Sopenharmony_ci if (ret < 0) 2928c2ecf20Sopenharmony_ci goto unmap; 2938c2ecf20Sopenharmony_ci 2948c2ecf20Sopenharmony_ci if (meta) 2958c2ecf20Sopenharmony_ci meta->tbs |= BIT(ret); 2968c2ecf20Sopenharmony_ci 2978c2ecf20Sopenharmony_ci ret = 0; 2988c2ecf20Sopenharmony_ci goto trace; 2998c2ecf20Sopenharmony_ci } 3008c2ecf20Sopenharmony_ci 3018c2ecf20Sopenharmony_ci /* 3028c2ecf20Sopenharmony_ci * Work around a hardware bug. If (as expressed in the 3038c2ecf20Sopenharmony_ci * condition above) the TB ends on a 32-bit boundary, 3048c2ecf20Sopenharmony_ci * then the next TB may be accessed with the wrong 3058c2ecf20Sopenharmony_ci * address. 3068c2ecf20Sopenharmony_ci * To work around it, copy the data elsewhere and make 3078c2ecf20Sopenharmony_ci * a new mapping for it so the device will not fail. 3088c2ecf20Sopenharmony_ci */ 3098c2ecf20Sopenharmony_ci 3108c2ecf20Sopenharmony_ci if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { 3118c2ecf20Sopenharmony_ci ret = -ENOBUFS; 3128c2ecf20Sopenharmony_ci goto unmap; 3138c2ecf20Sopenharmony_ci } 3148c2ecf20Sopenharmony_ci 3158c2ecf20Sopenharmony_ci page = get_workaround_page(trans, skb); 3168c2ecf20Sopenharmony_ci if (!page) { 3178c2ecf20Sopenharmony_ci ret = -ENOMEM; 3188c2ecf20Sopenharmony_ci goto unmap; 3198c2ecf20Sopenharmony_ci } 3208c2ecf20Sopenharmony_ci 3218c2ecf20Sopenharmony_ci memcpy(page_address(page), virt, len); 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci phys = dma_map_single(trans->dev, page_address(page), len, 3248c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 3258c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, phys))) 3268c2ecf20Sopenharmony_ci return -ENOMEM; 3278c2ecf20Sopenharmony_ci ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); 3288c2ecf20Sopenharmony_ci if (ret < 0) { 3298c2ecf20Sopenharmony_ci /* unmap the new allocation as single */ 3308c2ecf20Sopenharmony_ci oldphys = phys; 3318c2ecf20Sopenharmony_ci meta = NULL; 3328c2ecf20Sopenharmony_ci goto unmap; 3338c2ecf20Sopenharmony_ci } 3348c2ecf20Sopenharmony_ci IWL_WARN(trans, 3358c2ecf20Sopenharmony_ci "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", 3368c2ecf20Sopenharmony_ci len, (unsigned long long)oldphys, (unsigned long long)phys); 3378c2ecf20Sopenharmony_ci 3388c2ecf20Sopenharmony_ci ret = 0; 3398c2ecf20Sopenharmony_ciunmap: 3408c2ecf20Sopenharmony_ci if (meta) 3418c2ecf20Sopenharmony_ci dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); 3428c2ecf20Sopenharmony_ci else 3438c2ecf20Sopenharmony_ci dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); 3448c2ecf20Sopenharmony_citrace: 3458c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); 3468c2ecf20Sopenharmony_ci 3478c2ecf20Sopenharmony_ci return ret; 3488c2ecf20Sopenharmony_ci} 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_ci#ifdef CONFIG_INET 3518c2ecf20Sopenharmony_cistruct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, 3528c2ecf20Sopenharmony_ci struct sk_buff *skb) 3538c2ecf20Sopenharmony_ci{ 3548c2ecf20Sopenharmony_ci struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); 3558c2ecf20Sopenharmony_ci struct page **page_ptr; 3568c2ecf20Sopenharmony_ci 3578c2ecf20Sopenharmony_ci page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 3588c2ecf20Sopenharmony_ci 3598c2ecf20Sopenharmony_ci if (WARN_ON(*page_ptr)) 3608c2ecf20Sopenharmony_ci return NULL; 3618c2ecf20Sopenharmony_ci 3628c2ecf20Sopenharmony_ci if (!p->page) 3638c2ecf20Sopenharmony_ci goto alloc; 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci /* 3668c2ecf20Sopenharmony_ci * Check if there's enough room on this page 3678c2ecf20Sopenharmony_ci * 3688c2ecf20Sopenharmony_ci * Note that we put a page chaining pointer *last* in the 3698c2ecf20Sopenharmony_ci * page - we need it somewhere, and if it's there then we 3708c2ecf20Sopenharmony_ci * avoid DMA mapping the last bits of the page which may 3718c2ecf20Sopenharmony_ci * trigger the 32-bit boundary hardware bug. 3728c2ecf20Sopenharmony_ci * 3738c2ecf20Sopenharmony_ci * (see also get_workaround_page() in tx-gen2.c) 3748c2ecf20Sopenharmony_ci */ 3758c2ecf20Sopenharmony_ci if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - 3768c2ecf20Sopenharmony_ci sizeof(void *)) 3778c2ecf20Sopenharmony_ci goto out; 3788c2ecf20Sopenharmony_ci 3798c2ecf20Sopenharmony_ci /* We don't have enough room on this page, get a new one. */ 3808c2ecf20Sopenharmony_ci __free_page(p->page); 3818c2ecf20Sopenharmony_ci 3828c2ecf20Sopenharmony_cialloc: 3838c2ecf20Sopenharmony_ci p->page = alloc_page(GFP_ATOMIC); 3848c2ecf20Sopenharmony_ci if (!p->page) 3858c2ecf20Sopenharmony_ci return NULL; 3868c2ecf20Sopenharmony_ci p->pos = page_address(p->page); 3878c2ecf20Sopenharmony_ci /* set the chaining pointer to NULL */ 3888c2ecf20Sopenharmony_ci *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; 3898c2ecf20Sopenharmony_ciout: 3908c2ecf20Sopenharmony_ci *page_ptr = p->page; 3918c2ecf20Sopenharmony_ci get_page(p->page); 3928c2ecf20Sopenharmony_ci return p; 3938c2ecf20Sopenharmony_ci} 3948c2ecf20Sopenharmony_ci#endif 3958c2ecf20Sopenharmony_ci 3968c2ecf20Sopenharmony_cistatic int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, 3978c2ecf20Sopenharmony_ci struct sk_buff *skb, 3988c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd, int start_len, 3998c2ecf20Sopenharmony_ci u8 hdr_len, 4008c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd) 4018c2ecf20Sopenharmony_ci{ 4028c2ecf20Sopenharmony_ci#ifdef CONFIG_INET 4038c2ecf20Sopenharmony_ci struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; 4048c2ecf20Sopenharmony_ci struct ieee80211_hdr *hdr = (void *)skb->data; 4058c2ecf20Sopenharmony_ci unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 4068c2ecf20Sopenharmony_ci unsigned int mss = skb_shinfo(skb)->gso_size; 4078c2ecf20Sopenharmony_ci u16 length, amsdu_pad; 4088c2ecf20Sopenharmony_ci u8 *start_hdr; 4098c2ecf20Sopenharmony_ci struct iwl_tso_hdr_page *hdr_page; 4108c2ecf20Sopenharmony_ci struct tso_t tso; 4118c2ecf20Sopenharmony_ci 4128c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), 4138c2ecf20Sopenharmony_ci &dev_cmd->hdr, start_len, 0); 4148c2ecf20Sopenharmony_ci 4158c2ecf20Sopenharmony_ci ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 4168c2ecf20Sopenharmony_ci snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 4178c2ecf20Sopenharmony_ci total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; 4188c2ecf20Sopenharmony_ci amsdu_pad = 0; 4198c2ecf20Sopenharmony_ci 4208c2ecf20Sopenharmony_ci /* total amount of header we may need for this A-MSDU */ 4218c2ecf20Sopenharmony_ci hdr_room = DIV_ROUND_UP(total_len, mss) * 4228c2ecf20Sopenharmony_ci (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 4238c2ecf20Sopenharmony_ci 4248c2ecf20Sopenharmony_ci /* Our device supports 9 segments at most, it will fit in 1 page */ 4258c2ecf20Sopenharmony_ci hdr_page = get_page_hdr(trans, hdr_room, skb); 4268c2ecf20Sopenharmony_ci if (!hdr_page) 4278c2ecf20Sopenharmony_ci return -ENOMEM; 4288c2ecf20Sopenharmony_ci 4298c2ecf20Sopenharmony_ci start_hdr = hdr_page->pos; 4308c2ecf20Sopenharmony_ci 4318c2ecf20Sopenharmony_ci /* 4328c2ecf20Sopenharmony_ci * Pull the ieee80211 header to be able to use TSO core, 4338c2ecf20Sopenharmony_ci * we will restore it for the tx_status flow. 4348c2ecf20Sopenharmony_ci */ 4358c2ecf20Sopenharmony_ci skb_pull(skb, hdr_len); 4368c2ecf20Sopenharmony_ci 4378c2ecf20Sopenharmony_ci /* 4388c2ecf20Sopenharmony_ci * Remove the length of all the headers that we don't actually 4398c2ecf20Sopenharmony_ci * have in the MPDU by themselves, but that we duplicate into 4408c2ecf20Sopenharmony_ci * all the different MSDUs inside the A-MSDU. 4418c2ecf20Sopenharmony_ci */ 4428c2ecf20Sopenharmony_ci le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_ci tso_start(skb, &tso); 4458c2ecf20Sopenharmony_ci 4468c2ecf20Sopenharmony_ci while (total_len) { 4478c2ecf20Sopenharmony_ci /* this is the data left for this subframe */ 4488c2ecf20Sopenharmony_ci unsigned int data_left = min_t(unsigned int, mss, total_len); 4498c2ecf20Sopenharmony_ci struct sk_buff *csum_skb = NULL; 4508c2ecf20Sopenharmony_ci unsigned int tb_len; 4518c2ecf20Sopenharmony_ci dma_addr_t tb_phys; 4528c2ecf20Sopenharmony_ci u8 *subf_hdrs_start = hdr_page->pos; 4538c2ecf20Sopenharmony_ci 4548c2ecf20Sopenharmony_ci total_len -= data_left; 4558c2ecf20Sopenharmony_ci 4568c2ecf20Sopenharmony_ci memset(hdr_page->pos, 0, amsdu_pad); 4578c2ecf20Sopenharmony_ci hdr_page->pos += amsdu_pad; 4588c2ecf20Sopenharmony_ci amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 4598c2ecf20Sopenharmony_ci data_left)) & 0x3; 4608c2ecf20Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 4618c2ecf20Sopenharmony_ci hdr_page->pos += ETH_ALEN; 4628c2ecf20Sopenharmony_ci ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 4638c2ecf20Sopenharmony_ci hdr_page->pos += ETH_ALEN; 4648c2ecf20Sopenharmony_ci 4658c2ecf20Sopenharmony_ci length = snap_ip_tcp_hdrlen + data_left; 4668c2ecf20Sopenharmony_ci *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 4678c2ecf20Sopenharmony_ci hdr_page->pos += sizeof(length); 4688c2ecf20Sopenharmony_ci 4698c2ecf20Sopenharmony_ci /* 4708c2ecf20Sopenharmony_ci * This will copy the SNAP as well which will be considered 4718c2ecf20Sopenharmony_ci * as MAC header. 4728c2ecf20Sopenharmony_ci */ 4738c2ecf20Sopenharmony_ci tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 4748c2ecf20Sopenharmony_ci 4758c2ecf20Sopenharmony_ci hdr_page->pos += snap_ip_tcp_hdrlen; 4768c2ecf20Sopenharmony_ci 4778c2ecf20Sopenharmony_ci tb_len = hdr_page->pos - start_hdr; 4788c2ecf20Sopenharmony_ci tb_phys = dma_map_single(trans->dev, start_hdr, 4798c2ecf20Sopenharmony_ci tb_len, DMA_TO_DEVICE); 4808c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 4818c2ecf20Sopenharmony_ci dev_kfree_skb(csum_skb); 4828c2ecf20Sopenharmony_ci goto out_err; 4838c2ecf20Sopenharmony_ci } 4848c2ecf20Sopenharmony_ci /* 4858c2ecf20Sopenharmony_ci * No need for _with_wa, this is from the TSO page and 4868c2ecf20Sopenharmony_ci * we leave some space at the end of it so can't hit 4878c2ecf20Sopenharmony_ci * the buggy scenario. 4888c2ecf20Sopenharmony_ci */ 4898c2ecf20Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); 4908c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 4918c2ecf20Sopenharmony_ci tb_phys, tb_len); 4928c2ecf20Sopenharmony_ci /* add this subframe's headers' length to the tx_cmd */ 4938c2ecf20Sopenharmony_ci le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 4948c2ecf20Sopenharmony_ci 4958c2ecf20Sopenharmony_ci /* prepare the start_hdr for the next subframe */ 4968c2ecf20Sopenharmony_ci start_hdr = hdr_page->pos; 4978c2ecf20Sopenharmony_ci 4988c2ecf20Sopenharmony_ci /* put the payload */ 4998c2ecf20Sopenharmony_ci while (data_left) { 5008c2ecf20Sopenharmony_ci int ret; 5018c2ecf20Sopenharmony_ci 5028c2ecf20Sopenharmony_ci tb_len = min_t(unsigned int, tso.size, data_left); 5038c2ecf20Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tso.data, 5048c2ecf20Sopenharmony_ci tb_len, DMA_TO_DEVICE); 5058c2ecf20Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, 5068c2ecf20Sopenharmony_ci tb_phys, tso.data, 5078c2ecf20Sopenharmony_ci tb_len, NULL); 5088c2ecf20Sopenharmony_ci if (ret) { 5098c2ecf20Sopenharmony_ci dev_kfree_skb(csum_skb); 5108c2ecf20Sopenharmony_ci goto out_err; 5118c2ecf20Sopenharmony_ci } 5128c2ecf20Sopenharmony_ci 5138c2ecf20Sopenharmony_ci data_left -= tb_len; 5148c2ecf20Sopenharmony_ci tso_build_data(skb, &tso, tb_len); 5158c2ecf20Sopenharmony_ci } 5168c2ecf20Sopenharmony_ci } 5178c2ecf20Sopenharmony_ci 5188c2ecf20Sopenharmony_ci /* re -add the WiFi header */ 5198c2ecf20Sopenharmony_ci skb_push(skb, hdr_len); 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci return 0; 5228c2ecf20Sopenharmony_ci 5238c2ecf20Sopenharmony_ciout_err: 5248c2ecf20Sopenharmony_ci#endif 5258c2ecf20Sopenharmony_ci return -EINVAL; 5268c2ecf20Sopenharmony_ci} 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_cistatic struct 5298c2ecf20Sopenharmony_ciiwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, 5308c2ecf20Sopenharmony_ci struct iwl_txq *txq, 5318c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 5328c2ecf20Sopenharmony_ci struct sk_buff *skb, 5338c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta, 5348c2ecf20Sopenharmony_ci int hdr_len, 5358c2ecf20Sopenharmony_ci int tx_cmd_len) 5368c2ecf20Sopenharmony_ci{ 5378c2ecf20Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 5388c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 5398c2ecf20Sopenharmony_ci dma_addr_t tb_phys; 5408c2ecf20Sopenharmony_ci int len; 5418c2ecf20Sopenharmony_ci void *tb1_addr; 5428c2ecf20Sopenharmony_ci 5438c2ecf20Sopenharmony_ci tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 5448c2ecf20Sopenharmony_ci 5458c2ecf20Sopenharmony_ci /* 5468c2ecf20Sopenharmony_ci * No need for _with_wa, the first TB allocation is aligned up 5478c2ecf20Sopenharmony_ci * to a 64-byte boundary and thus can't be at the end or cross 5488c2ecf20Sopenharmony_ci * a page boundary (much less a 2^32 boundary). 5498c2ecf20Sopenharmony_ci */ 5508c2ecf20Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 5518c2ecf20Sopenharmony_ci 5528c2ecf20Sopenharmony_ci /* 5538c2ecf20Sopenharmony_ci * The second TB (tb1) points to the remainder of the TX command 5548c2ecf20Sopenharmony_ci * and the 802.11 header - dword aligned size 5558c2ecf20Sopenharmony_ci * (This calculation modifies the TX command, so do it before the 5568c2ecf20Sopenharmony_ci * setup of the first TB) 5578c2ecf20Sopenharmony_ci */ 5588c2ecf20Sopenharmony_ci len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 5598c2ecf20Sopenharmony_ci IWL_FIRST_TB_SIZE; 5608c2ecf20Sopenharmony_ci 5618c2ecf20Sopenharmony_ci /* do not align A-MSDU to dword as the subframe header aligns it */ 5628c2ecf20Sopenharmony_ci 5638c2ecf20Sopenharmony_ci /* map the data for TB1 */ 5648c2ecf20Sopenharmony_ci tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 5658c2ecf20Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); 5668c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 5678c2ecf20Sopenharmony_ci goto out_err; 5688c2ecf20Sopenharmony_ci /* 5698c2ecf20Sopenharmony_ci * No need for _with_wa(), we ensure (via alignment) that the data 5708c2ecf20Sopenharmony_ci * here can never cross or end at a page boundary. 5718c2ecf20Sopenharmony_ci */ 5728c2ecf20Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); 5738c2ecf20Sopenharmony_ci 5748c2ecf20Sopenharmony_ci if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, 5758c2ecf20Sopenharmony_ci hdr_len, dev_cmd)) 5768c2ecf20Sopenharmony_ci goto out_err; 5778c2ecf20Sopenharmony_ci 5788c2ecf20Sopenharmony_ci /* building the A-MSDU might have changed this data, memcpy it now */ 5798c2ecf20Sopenharmony_ci memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 5808c2ecf20Sopenharmony_ci return tfd; 5818c2ecf20Sopenharmony_ci 5828c2ecf20Sopenharmony_ciout_err: 5838c2ecf20Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 5848c2ecf20Sopenharmony_ci return NULL; 5858c2ecf20Sopenharmony_ci} 5868c2ecf20Sopenharmony_ci 5878c2ecf20Sopenharmony_cistatic int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, 5888c2ecf20Sopenharmony_ci struct sk_buff *skb, 5898c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd, 5908c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta) 5918c2ecf20Sopenharmony_ci{ 5928c2ecf20Sopenharmony_ci int i; 5938c2ecf20Sopenharmony_ci 5948c2ecf20Sopenharmony_ci for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 5958c2ecf20Sopenharmony_ci const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 5968c2ecf20Sopenharmony_ci dma_addr_t tb_phys; 5978c2ecf20Sopenharmony_ci unsigned int fragsz = skb_frag_size(frag); 5988c2ecf20Sopenharmony_ci int ret; 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_ci if (!fragsz) 6018c2ecf20Sopenharmony_ci continue; 6028c2ecf20Sopenharmony_ci 6038c2ecf20Sopenharmony_ci tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 6048c2ecf20Sopenharmony_ci fragsz, DMA_TO_DEVICE); 6058c2ecf20Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6068c2ecf20Sopenharmony_ci skb_frag_address(frag), 6078c2ecf20Sopenharmony_ci fragsz, out_meta); 6088c2ecf20Sopenharmony_ci if (ret) 6098c2ecf20Sopenharmony_ci return ret; 6108c2ecf20Sopenharmony_ci } 6118c2ecf20Sopenharmony_ci 6128c2ecf20Sopenharmony_ci return 0; 6138c2ecf20Sopenharmony_ci} 6148c2ecf20Sopenharmony_ci 6158c2ecf20Sopenharmony_cistatic struct 6168c2ecf20Sopenharmony_ciiwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, 6178c2ecf20Sopenharmony_ci struct iwl_txq *txq, 6188c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 6198c2ecf20Sopenharmony_ci struct sk_buff *skb, 6208c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta, 6218c2ecf20Sopenharmony_ci int hdr_len, 6228c2ecf20Sopenharmony_ci int tx_cmd_len, 6238c2ecf20Sopenharmony_ci bool pad) 6248c2ecf20Sopenharmony_ci{ 6258c2ecf20Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 6268c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 6278c2ecf20Sopenharmony_ci dma_addr_t tb_phys; 6288c2ecf20Sopenharmony_ci int len, tb1_len, tb2_len; 6298c2ecf20Sopenharmony_ci void *tb1_addr; 6308c2ecf20Sopenharmony_ci struct sk_buff *frag; 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci tb_phys = iwl_txq_get_first_tb_dma(txq, idx); 6338c2ecf20Sopenharmony_ci 6348c2ecf20Sopenharmony_ci /* The first TB points to bi-directional DMA data */ 6358c2ecf20Sopenharmony_ci memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); 6368c2ecf20Sopenharmony_ci 6378c2ecf20Sopenharmony_ci /* 6388c2ecf20Sopenharmony_ci * No need for _with_wa, the first TB allocation is aligned up 6398c2ecf20Sopenharmony_ci * to a 64-byte boundary and thus can't be at the end or cross 6408c2ecf20Sopenharmony_ci * a page boundary (much less a 2^32 boundary). 6418c2ecf20Sopenharmony_ci */ 6428c2ecf20Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 6438c2ecf20Sopenharmony_ci 6448c2ecf20Sopenharmony_ci /* 6458c2ecf20Sopenharmony_ci * The second TB (tb1) points to the remainder of the TX command 6468c2ecf20Sopenharmony_ci * and the 802.11 header - dword aligned size 6478c2ecf20Sopenharmony_ci * (This calculation modifies the TX command, so do it before the 6488c2ecf20Sopenharmony_ci * setup of the first TB) 6498c2ecf20Sopenharmony_ci */ 6508c2ecf20Sopenharmony_ci len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 6518c2ecf20Sopenharmony_ci IWL_FIRST_TB_SIZE; 6528c2ecf20Sopenharmony_ci 6538c2ecf20Sopenharmony_ci if (pad) 6548c2ecf20Sopenharmony_ci tb1_len = ALIGN(len, 4); 6558c2ecf20Sopenharmony_ci else 6568c2ecf20Sopenharmony_ci tb1_len = len; 6578c2ecf20Sopenharmony_ci 6588c2ecf20Sopenharmony_ci /* map the data for TB1 */ 6598c2ecf20Sopenharmony_ci tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 6608c2ecf20Sopenharmony_ci tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 6618c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 6628c2ecf20Sopenharmony_ci goto out_err; 6638c2ecf20Sopenharmony_ci /* 6648c2ecf20Sopenharmony_ci * No need for _with_wa(), we ensure (via alignment) that the data 6658c2ecf20Sopenharmony_ci * here can never cross or end at a page boundary. 6668c2ecf20Sopenharmony_ci */ 6678c2ecf20Sopenharmony_ci iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 6688c2ecf20Sopenharmony_ci trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 6698c2ecf20Sopenharmony_ci IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 6708c2ecf20Sopenharmony_ci 6718c2ecf20Sopenharmony_ci /* set up TFD's third entry to point to remainder of skb's head */ 6728c2ecf20Sopenharmony_ci tb2_len = skb_headlen(skb) - hdr_len; 6738c2ecf20Sopenharmony_ci 6748c2ecf20Sopenharmony_ci if (tb2_len > 0) { 6758c2ecf20Sopenharmony_ci int ret; 6768c2ecf20Sopenharmony_ci 6778c2ecf20Sopenharmony_ci tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, 6788c2ecf20Sopenharmony_ci tb2_len, DMA_TO_DEVICE); 6798c2ecf20Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6808c2ecf20Sopenharmony_ci skb->data + hdr_len, tb2_len, 6818c2ecf20Sopenharmony_ci NULL); 6828c2ecf20Sopenharmony_ci if (ret) 6838c2ecf20Sopenharmony_ci goto out_err; 6848c2ecf20Sopenharmony_ci } 6858c2ecf20Sopenharmony_ci 6868c2ecf20Sopenharmony_ci if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 6878c2ecf20Sopenharmony_ci goto out_err; 6888c2ecf20Sopenharmony_ci 6898c2ecf20Sopenharmony_ci skb_walk_frags(skb, frag) { 6908c2ecf20Sopenharmony_ci int ret; 6918c2ecf20Sopenharmony_ci 6928c2ecf20Sopenharmony_ci tb_phys = dma_map_single(trans->dev, frag->data, 6938c2ecf20Sopenharmony_ci skb_headlen(frag), DMA_TO_DEVICE); 6948c2ecf20Sopenharmony_ci ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, 6958c2ecf20Sopenharmony_ci frag->data, 6968c2ecf20Sopenharmony_ci skb_headlen(frag), NULL); 6978c2ecf20Sopenharmony_ci if (ret) 6988c2ecf20Sopenharmony_ci goto out_err; 6998c2ecf20Sopenharmony_ci if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) 7008c2ecf20Sopenharmony_ci goto out_err; 7018c2ecf20Sopenharmony_ci } 7028c2ecf20Sopenharmony_ci 7038c2ecf20Sopenharmony_ci return tfd; 7048c2ecf20Sopenharmony_ci 7058c2ecf20Sopenharmony_ciout_err: 7068c2ecf20Sopenharmony_ci iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 7078c2ecf20Sopenharmony_ci return NULL; 7088c2ecf20Sopenharmony_ci} 7098c2ecf20Sopenharmony_ci 7108c2ecf20Sopenharmony_cistatic 7118c2ecf20Sopenharmony_cistruct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, 7128c2ecf20Sopenharmony_ci struct iwl_txq *txq, 7138c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, 7148c2ecf20Sopenharmony_ci struct sk_buff *skb, 7158c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta) 7168c2ecf20Sopenharmony_ci{ 7178c2ecf20Sopenharmony_ci struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 7188c2ecf20Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 7198c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); 7208c2ecf20Sopenharmony_ci int len, hdr_len; 7218c2ecf20Sopenharmony_ci bool amsdu; 7228c2ecf20Sopenharmony_ci 7238c2ecf20Sopenharmony_ci /* There must be data left over for TB1 or this code must be changed */ 7248c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); 7258c2ecf20Sopenharmony_ci 7268c2ecf20Sopenharmony_ci memset(tfd, 0, sizeof(*tfd)); 7278c2ecf20Sopenharmony_ci 7288c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 7298c2ecf20Sopenharmony_ci len = sizeof(struct iwl_tx_cmd_gen2); 7308c2ecf20Sopenharmony_ci else 7318c2ecf20Sopenharmony_ci len = sizeof(struct iwl_tx_cmd_gen3); 7328c2ecf20Sopenharmony_ci 7338c2ecf20Sopenharmony_ci amsdu = ieee80211_is_data_qos(hdr->frame_control) && 7348c2ecf20Sopenharmony_ci (*ieee80211_get_qos_ctl(hdr) & 7358c2ecf20Sopenharmony_ci IEEE80211_QOS_CTL_A_MSDU_PRESENT); 7368c2ecf20Sopenharmony_ci 7378c2ecf20Sopenharmony_ci hdr_len = ieee80211_hdrlen(hdr->frame_control); 7388c2ecf20Sopenharmony_ci 7398c2ecf20Sopenharmony_ci /* 7408c2ecf20Sopenharmony_ci * Only build A-MSDUs here if doing so by GSO, otherwise it may be 7418c2ecf20Sopenharmony_ci * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been 7428c2ecf20Sopenharmony_ci * built in the higher layers already. 7438c2ecf20Sopenharmony_ci */ 7448c2ecf20Sopenharmony_ci if (amsdu && skb_shinfo(skb)->gso_size) 7458c2ecf20Sopenharmony_ci return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, 7468c2ecf20Sopenharmony_ci out_meta, hdr_len, len); 7478c2ecf20Sopenharmony_ci return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 7488c2ecf20Sopenharmony_ci hdr_len, len, !amsdu); 7498c2ecf20Sopenharmony_ci} 7508c2ecf20Sopenharmony_ci 7518c2ecf20Sopenharmony_ciint iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) 7528c2ecf20Sopenharmony_ci{ 7538c2ecf20Sopenharmony_ci unsigned int max; 7548c2ecf20Sopenharmony_ci unsigned int used; 7558c2ecf20Sopenharmony_ci 7568c2ecf20Sopenharmony_ci /* 7578c2ecf20Sopenharmony_ci * To avoid ambiguity between empty and completely full queues, there 7588c2ecf20Sopenharmony_ci * should always be less than max_tfd_queue_size elements in the queue. 7598c2ecf20Sopenharmony_ci * If q->n_window is smaller than max_tfd_queue_size, there is no need 7608c2ecf20Sopenharmony_ci * to reserve any queue entries for this purpose. 7618c2ecf20Sopenharmony_ci */ 7628c2ecf20Sopenharmony_ci if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) 7638c2ecf20Sopenharmony_ci max = q->n_window; 7648c2ecf20Sopenharmony_ci else 7658c2ecf20Sopenharmony_ci max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; 7668c2ecf20Sopenharmony_ci 7678c2ecf20Sopenharmony_ci /* 7688c2ecf20Sopenharmony_ci * max_tfd_queue_size is a power of 2, so the following is equivalent to 7698c2ecf20Sopenharmony_ci * modulo by max_tfd_queue_size and is well defined. 7708c2ecf20Sopenharmony_ci */ 7718c2ecf20Sopenharmony_ci used = (q->write_ptr - q->read_ptr) & 7728c2ecf20Sopenharmony_ci (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 7738c2ecf20Sopenharmony_ci 7748c2ecf20Sopenharmony_ci if (WARN_ON(used > max)) 7758c2ecf20Sopenharmony_ci return 0; 7768c2ecf20Sopenharmony_ci 7778c2ecf20Sopenharmony_ci return max - used; 7788c2ecf20Sopenharmony_ci} 7798c2ecf20Sopenharmony_ci 7808c2ecf20Sopenharmony_ciint iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 7818c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd, int txq_id) 7828c2ecf20Sopenharmony_ci{ 7838c2ecf20Sopenharmony_ci struct iwl_cmd_meta *out_meta; 7848c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 7858c2ecf20Sopenharmony_ci u16 cmd_len; 7868c2ecf20Sopenharmony_ci int idx; 7878c2ecf20Sopenharmony_ci void *tfd; 7888c2ecf20Sopenharmony_ci 7898c2ecf20Sopenharmony_ci if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 7908c2ecf20Sopenharmony_ci "queue %d out of range", txq_id)) 7918c2ecf20Sopenharmony_ci return -EINVAL; 7928c2ecf20Sopenharmony_ci 7938c2ecf20Sopenharmony_ci if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 7948c2ecf20Sopenharmony_ci "TX on unused queue %d\n", txq_id)) 7958c2ecf20Sopenharmony_ci return -EINVAL; 7968c2ecf20Sopenharmony_ci 7978c2ecf20Sopenharmony_ci if (skb_is_nonlinear(skb) && 7988c2ecf20Sopenharmony_ci skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 7998c2ecf20Sopenharmony_ci __skb_linearize(skb)) 8008c2ecf20Sopenharmony_ci return -ENOMEM; 8018c2ecf20Sopenharmony_ci 8028c2ecf20Sopenharmony_ci spin_lock(&txq->lock); 8038c2ecf20Sopenharmony_ci 8048c2ecf20Sopenharmony_ci if (iwl_txq_space(trans, txq) < txq->high_mark) { 8058c2ecf20Sopenharmony_ci iwl_txq_stop(trans, txq); 8068c2ecf20Sopenharmony_ci 8078c2ecf20Sopenharmony_ci /* don't put the packet on the ring, if there is no room */ 8088c2ecf20Sopenharmony_ci if (unlikely(iwl_txq_space(trans, txq) < 3)) { 8098c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd **dev_cmd_ptr; 8108c2ecf20Sopenharmony_ci 8118c2ecf20Sopenharmony_ci dev_cmd_ptr = (void *)((u8 *)skb->cb + 8128c2ecf20Sopenharmony_ci trans->txqs.dev_cmd_offs); 8138c2ecf20Sopenharmony_ci 8148c2ecf20Sopenharmony_ci *dev_cmd_ptr = dev_cmd; 8158c2ecf20Sopenharmony_ci __skb_queue_tail(&txq->overflow_q, skb); 8168c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 8178c2ecf20Sopenharmony_ci return 0; 8188c2ecf20Sopenharmony_ci } 8198c2ecf20Sopenharmony_ci } 8208c2ecf20Sopenharmony_ci 8218c2ecf20Sopenharmony_ci idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 8228c2ecf20Sopenharmony_ci 8238c2ecf20Sopenharmony_ci /* Set up driver data for this TFD */ 8248c2ecf20Sopenharmony_ci txq->entries[idx].skb = skb; 8258c2ecf20Sopenharmony_ci txq->entries[idx].cmd = dev_cmd; 8268c2ecf20Sopenharmony_ci 8278c2ecf20Sopenharmony_ci dev_cmd->hdr.sequence = 8288c2ecf20Sopenharmony_ci cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 8298c2ecf20Sopenharmony_ci INDEX_TO_SEQ(idx))); 8308c2ecf20Sopenharmony_ci 8318c2ecf20Sopenharmony_ci /* Set up first empty entry in queue's array of Tx/cmd buffers */ 8328c2ecf20Sopenharmony_ci out_meta = &txq->entries[idx].meta; 8338c2ecf20Sopenharmony_ci out_meta->flags = 0; 8348c2ecf20Sopenharmony_ci 8358c2ecf20Sopenharmony_ci tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); 8368c2ecf20Sopenharmony_ci if (!tfd) { 8378c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 8388c2ecf20Sopenharmony_ci return -1; 8398c2ecf20Sopenharmony_ci } 8408c2ecf20Sopenharmony_ci 8418c2ecf20Sopenharmony_ci if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 8428c2ecf20Sopenharmony_ci struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = 8438c2ecf20Sopenharmony_ci (void *)dev_cmd->payload; 8448c2ecf20Sopenharmony_ci 8458c2ecf20Sopenharmony_ci cmd_len = le16_to_cpu(tx_cmd_gen3->len); 8468c2ecf20Sopenharmony_ci } else { 8478c2ecf20Sopenharmony_ci struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = 8488c2ecf20Sopenharmony_ci (void *)dev_cmd->payload; 8498c2ecf20Sopenharmony_ci 8508c2ecf20Sopenharmony_ci cmd_len = le16_to_cpu(tx_cmd_gen2->len); 8518c2ecf20Sopenharmony_ci } 8528c2ecf20Sopenharmony_ci 8538c2ecf20Sopenharmony_ci /* Set up entry for this TFD in Tx byte-count array */ 8548c2ecf20Sopenharmony_ci iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, 8558c2ecf20Sopenharmony_ci iwl_txq_gen2_get_num_tbs(trans, tfd)); 8568c2ecf20Sopenharmony_ci 8578c2ecf20Sopenharmony_ci /* start timer if queue currently empty */ 8588c2ecf20Sopenharmony_ci if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 8598c2ecf20Sopenharmony_ci mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 8608c2ecf20Sopenharmony_ci 8618c2ecf20Sopenharmony_ci /* Tell device the write index *just past* this latest filled TFD */ 8628c2ecf20Sopenharmony_ci txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 8638c2ecf20Sopenharmony_ci iwl_txq_inc_wr_ptr(trans, txq); 8648c2ecf20Sopenharmony_ci /* 8658c2ecf20Sopenharmony_ci * At this point the frame is "transmitted" successfully 8668c2ecf20Sopenharmony_ci * and we will get a TX status notification eventually. 8678c2ecf20Sopenharmony_ci */ 8688c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 8698c2ecf20Sopenharmony_ci return 0; 8708c2ecf20Sopenharmony_ci} 8718c2ecf20Sopenharmony_ci 8728c2ecf20Sopenharmony_ci/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 8738c2ecf20Sopenharmony_ci 8748c2ecf20Sopenharmony_ci/* 8758c2ecf20Sopenharmony_ci * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's 8768c2ecf20Sopenharmony_ci */ 8778c2ecf20Sopenharmony_civoid iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) 8788c2ecf20Sopenharmony_ci{ 8798c2ecf20Sopenharmony_ci struct iwl_txq *txq = trans->txqs.txq[txq_id]; 8808c2ecf20Sopenharmony_ci 8818c2ecf20Sopenharmony_ci spin_lock_bh(&txq->lock); 8828c2ecf20Sopenharmony_ci while (txq->write_ptr != txq->read_ptr) { 8838c2ecf20Sopenharmony_ci IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 8848c2ecf20Sopenharmony_ci txq_id, txq->read_ptr); 8858c2ecf20Sopenharmony_ci 8868c2ecf20Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) { 8878c2ecf20Sopenharmony_ci int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); 8888c2ecf20Sopenharmony_ci struct sk_buff *skb = txq->entries[idx].skb; 8898c2ecf20Sopenharmony_ci 8908c2ecf20Sopenharmony_ci if (!WARN_ON_ONCE(!skb)) 8918c2ecf20Sopenharmony_ci iwl_txq_free_tso_page(trans, skb); 8928c2ecf20Sopenharmony_ci } 8938c2ecf20Sopenharmony_ci iwl_txq_gen2_free_tfd(trans, txq); 8948c2ecf20Sopenharmony_ci txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 8958c2ecf20Sopenharmony_ci } 8968c2ecf20Sopenharmony_ci 8978c2ecf20Sopenharmony_ci while (!skb_queue_empty(&txq->overflow_q)) { 8988c2ecf20Sopenharmony_ci struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 8998c2ecf20Sopenharmony_ci 9008c2ecf20Sopenharmony_ci iwl_op_mode_free_skb(trans->op_mode, skb); 9018c2ecf20Sopenharmony_ci } 9028c2ecf20Sopenharmony_ci 9038c2ecf20Sopenharmony_ci spin_unlock_bh(&txq->lock); 9048c2ecf20Sopenharmony_ci 9058c2ecf20Sopenharmony_ci /* just in case - this queue may have been stopped */ 9068c2ecf20Sopenharmony_ci iwl_wake_queue(trans, txq); 9078c2ecf20Sopenharmony_ci} 9088c2ecf20Sopenharmony_ci 9098c2ecf20Sopenharmony_cistatic void iwl_txq_gen2_free_memory(struct iwl_trans *trans, 9108c2ecf20Sopenharmony_ci struct iwl_txq *txq) 9118c2ecf20Sopenharmony_ci{ 9128c2ecf20Sopenharmony_ci struct device *dev = trans->dev; 9138c2ecf20Sopenharmony_ci 9148c2ecf20Sopenharmony_ci /* De-alloc circular buffer of TFDs */ 9158c2ecf20Sopenharmony_ci if (txq->tfds) { 9168c2ecf20Sopenharmony_ci dma_free_coherent(dev, 9178c2ecf20Sopenharmony_ci trans->txqs.tfd.size * txq->n_window, 9188c2ecf20Sopenharmony_ci txq->tfds, txq->dma_addr); 9198c2ecf20Sopenharmony_ci dma_free_coherent(dev, 9208c2ecf20Sopenharmony_ci sizeof(*txq->first_tb_bufs) * txq->n_window, 9218c2ecf20Sopenharmony_ci txq->first_tb_bufs, txq->first_tb_dma); 9228c2ecf20Sopenharmony_ci } 9238c2ecf20Sopenharmony_ci 9248c2ecf20Sopenharmony_ci kfree(txq->entries); 9258c2ecf20Sopenharmony_ci if (txq->bc_tbl.addr) 9268c2ecf20Sopenharmony_ci dma_pool_free(trans->txqs.bc_pool, 9278c2ecf20Sopenharmony_ci txq->bc_tbl.addr, txq->bc_tbl.dma); 9288c2ecf20Sopenharmony_ci kfree(txq); 9298c2ecf20Sopenharmony_ci} 9308c2ecf20Sopenharmony_ci 9318c2ecf20Sopenharmony_ci/* 9328c2ecf20Sopenharmony_ci * iwl_pcie_txq_free - Deallocate DMA queue. 9338c2ecf20Sopenharmony_ci * @txq: Transmit queue to deallocate. 9348c2ecf20Sopenharmony_ci * 9358c2ecf20Sopenharmony_ci * Empty queue by removing and destroying all BD's. 9368c2ecf20Sopenharmony_ci * Free all buffers. 9378c2ecf20Sopenharmony_ci * 0-fill, but do not free "txq" descriptor structure. 9388c2ecf20Sopenharmony_ci */ 9398c2ecf20Sopenharmony_cistatic void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) 9408c2ecf20Sopenharmony_ci{ 9418c2ecf20Sopenharmony_ci struct iwl_txq *txq; 9428c2ecf20Sopenharmony_ci int i; 9438c2ecf20Sopenharmony_ci 9448c2ecf20Sopenharmony_ci if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, 9458c2ecf20Sopenharmony_ci "queue %d out of range", txq_id)) 9468c2ecf20Sopenharmony_ci return; 9478c2ecf20Sopenharmony_ci 9488c2ecf20Sopenharmony_ci txq = trans->txqs.txq[txq_id]; 9498c2ecf20Sopenharmony_ci 9508c2ecf20Sopenharmony_ci if (WARN_ON(!txq)) 9518c2ecf20Sopenharmony_ci return; 9528c2ecf20Sopenharmony_ci 9538c2ecf20Sopenharmony_ci iwl_txq_gen2_unmap(trans, txq_id); 9548c2ecf20Sopenharmony_ci 9558c2ecf20Sopenharmony_ci /* De-alloc array of command/tx buffers */ 9568c2ecf20Sopenharmony_ci if (txq_id == trans->txqs.cmd.q_id) 9578c2ecf20Sopenharmony_ci for (i = 0; i < txq->n_window; i++) { 9588c2ecf20Sopenharmony_ci kfree_sensitive(txq->entries[i].cmd); 9598c2ecf20Sopenharmony_ci kfree_sensitive(txq->entries[i].free_buf); 9608c2ecf20Sopenharmony_ci } 9618c2ecf20Sopenharmony_ci del_timer_sync(&txq->stuck_timer); 9628c2ecf20Sopenharmony_ci 9638c2ecf20Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 9648c2ecf20Sopenharmony_ci 9658c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id] = NULL; 9668c2ecf20Sopenharmony_ci 9678c2ecf20Sopenharmony_ci clear_bit(txq_id, trans->txqs.queue_used); 9688c2ecf20Sopenharmony_ci} 9698c2ecf20Sopenharmony_ci 9708c2ecf20Sopenharmony_ci/* 9718c2ecf20Sopenharmony_ci * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 9728c2ecf20Sopenharmony_ci */ 9738c2ecf20Sopenharmony_cistatic int iwl_queue_init(struct iwl_txq *q, int slots_num) 9748c2ecf20Sopenharmony_ci{ 9758c2ecf20Sopenharmony_ci q->n_window = slots_num; 9768c2ecf20Sopenharmony_ci 9778c2ecf20Sopenharmony_ci /* slots_num must be power-of-two size, otherwise 9788c2ecf20Sopenharmony_ci * iwl_txq_get_cmd_index is broken. */ 9798c2ecf20Sopenharmony_ci if (WARN_ON(!is_power_of_2(slots_num))) 9808c2ecf20Sopenharmony_ci return -EINVAL; 9818c2ecf20Sopenharmony_ci 9828c2ecf20Sopenharmony_ci q->low_mark = q->n_window / 4; 9838c2ecf20Sopenharmony_ci if (q->low_mark < 4) 9848c2ecf20Sopenharmony_ci q->low_mark = 4; 9858c2ecf20Sopenharmony_ci 9868c2ecf20Sopenharmony_ci q->high_mark = q->n_window / 8; 9878c2ecf20Sopenharmony_ci if (q->high_mark < 2) 9888c2ecf20Sopenharmony_ci q->high_mark = 2; 9898c2ecf20Sopenharmony_ci 9908c2ecf20Sopenharmony_ci q->write_ptr = 0; 9918c2ecf20Sopenharmony_ci q->read_ptr = 0; 9928c2ecf20Sopenharmony_ci 9938c2ecf20Sopenharmony_ci return 0; 9948c2ecf20Sopenharmony_ci} 9958c2ecf20Sopenharmony_ci 9968c2ecf20Sopenharmony_ciint iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 9978c2ecf20Sopenharmony_ci bool cmd_queue) 9988c2ecf20Sopenharmony_ci{ 9998c2ecf20Sopenharmony_ci int ret; 10008c2ecf20Sopenharmony_ci u32 tfd_queue_max_size = 10018c2ecf20Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size; 10028c2ecf20Sopenharmony_ci 10038c2ecf20Sopenharmony_ci txq->need_update = false; 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci /* max_tfd_queue_size must be power-of-two size, otherwise 10068c2ecf20Sopenharmony_ci * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ 10078c2ecf20Sopenharmony_ci if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 10088c2ecf20Sopenharmony_ci "Max tfd queue size must be a power of two, but is %d", 10098c2ecf20Sopenharmony_ci tfd_queue_max_size)) 10108c2ecf20Sopenharmony_ci return -EINVAL; 10118c2ecf20Sopenharmony_ci 10128c2ecf20Sopenharmony_ci /* Initialize queue's high/low-water marks, and head/tail indexes */ 10138c2ecf20Sopenharmony_ci ret = iwl_queue_init(txq, slots_num); 10148c2ecf20Sopenharmony_ci if (ret) 10158c2ecf20Sopenharmony_ci return ret; 10168c2ecf20Sopenharmony_ci 10178c2ecf20Sopenharmony_ci spin_lock_init(&txq->lock); 10188c2ecf20Sopenharmony_ci 10198c2ecf20Sopenharmony_ci if (cmd_queue) { 10208c2ecf20Sopenharmony_ci static struct lock_class_key iwl_txq_cmd_queue_lock_class; 10218c2ecf20Sopenharmony_ci 10228c2ecf20Sopenharmony_ci lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 10238c2ecf20Sopenharmony_ci } 10248c2ecf20Sopenharmony_ci 10258c2ecf20Sopenharmony_ci __skb_queue_head_init(&txq->overflow_q); 10268c2ecf20Sopenharmony_ci 10278c2ecf20Sopenharmony_ci return 0; 10288c2ecf20Sopenharmony_ci} 10298c2ecf20Sopenharmony_ci 10308c2ecf20Sopenharmony_civoid iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) 10318c2ecf20Sopenharmony_ci{ 10328c2ecf20Sopenharmony_ci struct page **page_ptr; 10338c2ecf20Sopenharmony_ci struct page *next; 10348c2ecf20Sopenharmony_ci 10358c2ecf20Sopenharmony_ci page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); 10368c2ecf20Sopenharmony_ci next = *page_ptr; 10378c2ecf20Sopenharmony_ci *page_ptr = NULL; 10388c2ecf20Sopenharmony_ci 10398c2ecf20Sopenharmony_ci while (next) { 10408c2ecf20Sopenharmony_ci struct page *tmp = next; 10418c2ecf20Sopenharmony_ci 10428c2ecf20Sopenharmony_ci next = *(void **)(page_address(next) + PAGE_SIZE - 10438c2ecf20Sopenharmony_ci sizeof(void *)); 10448c2ecf20Sopenharmony_ci __free_page(tmp); 10458c2ecf20Sopenharmony_ci } 10468c2ecf20Sopenharmony_ci} 10478c2ecf20Sopenharmony_ci 10488c2ecf20Sopenharmony_civoid iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 10498c2ecf20Sopenharmony_ci{ 10508c2ecf20Sopenharmony_ci u32 txq_id = txq->id; 10518c2ecf20Sopenharmony_ci u32 status; 10528c2ecf20Sopenharmony_ci bool active; 10538c2ecf20Sopenharmony_ci u8 fifo; 10548c2ecf20Sopenharmony_ci 10558c2ecf20Sopenharmony_ci if (trans->trans_cfg->use_tfh) { 10568c2ecf20Sopenharmony_ci IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 10578c2ecf20Sopenharmony_ci txq->read_ptr, txq->write_ptr); 10588c2ecf20Sopenharmony_ci /* TODO: access new SCD registers and dump them */ 10598c2ecf20Sopenharmony_ci return; 10608c2ecf20Sopenharmony_ci } 10618c2ecf20Sopenharmony_ci 10628c2ecf20Sopenharmony_ci status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 10638c2ecf20Sopenharmony_ci fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 10648c2ecf20Sopenharmony_ci active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 10658c2ecf20Sopenharmony_ci 10668c2ecf20Sopenharmony_ci IWL_ERR(trans, 10678c2ecf20Sopenharmony_ci "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 10688c2ecf20Sopenharmony_ci txq_id, active ? "" : "in", fifo, 10698c2ecf20Sopenharmony_ci jiffies_to_msecs(txq->wd_timeout), 10708c2ecf20Sopenharmony_ci txq->read_ptr, txq->write_ptr, 10718c2ecf20Sopenharmony_ci iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 10728c2ecf20Sopenharmony_ci (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10738c2ecf20Sopenharmony_ci iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 10748c2ecf20Sopenharmony_ci (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 10758c2ecf20Sopenharmony_ci iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 10768c2ecf20Sopenharmony_ci} 10778c2ecf20Sopenharmony_ci 10788c2ecf20Sopenharmony_cistatic void iwl_txq_stuck_timer(struct timer_list *t) 10798c2ecf20Sopenharmony_ci{ 10808c2ecf20Sopenharmony_ci struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 10818c2ecf20Sopenharmony_ci struct iwl_trans *trans = txq->trans; 10828c2ecf20Sopenharmony_ci 10838c2ecf20Sopenharmony_ci spin_lock(&txq->lock); 10848c2ecf20Sopenharmony_ci /* check if triggered erroneously */ 10858c2ecf20Sopenharmony_ci if (txq->read_ptr == txq->write_ptr) { 10868c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 10878c2ecf20Sopenharmony_ci return; 10888c2ecf20Sopenharmony_ci } 10898c2ecf20Sopenharmony_ci spin_unlock(&txq->lock); 10908c2ecf20Sopenharmony_ci 10918c2ecf20Sopenharmony_ci iwl_txq_log_scd_error(trans, txq); 10928c2ecf20Sopenharmony_ci 10938c2ecf20Sopenharmony_ci iwl_force_nmi(trans); 10948c2ecf20Sopenharmony_ci} 10958c2ecf20Sopenharmony_ci 10968c2ecf20Sopenharmony_ciint iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, 10978c2ecf20Sopenharmony_ci bool cmd_queue) 10988c2ecf20Sopenharmony_ci{ 10998c2ecf20Sopenharmony_ci size_t tfd_sz = trans->txqs.tfd.size * 11008c2ecf20Sopenharmony_ci trans->trans_cfg->base_params->max_tfd_queue_size; 11018c2ecf20Sopenharmony_ci size_t tb0_buf_sz; 11028c2ecf20Sopenharmony_ci int i; 11038c2ecf20Sopenharmony_ci 11048c2ecf20Sopenharmony_ci if (WARN_ON(txq->entries || txq->tfds)) 11058c2ecf20Sopenharmony_ci return -EINVAL; 11068c2ecf20Sopenharmony_ci 11078c2ecf20Sopenharmony_ci if (trans->trans_cfg->use_tfh) 11088c2ecf20Sopenharmony_ci tfd_sz = trans->txqs.tfd.size * slots_num; 11098c2ecf20Sopenharmony_ci 11108c2ecf20Sopenharmony_ci timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 11118c2ecf20Sopenharmony_ci txq->trans = trans; 11128c2ecf20Sopenharmony_ci 11138c2ecf20Sopenharmony_ci txq->n_window = slots_num; 11148c2ecf20Sopenharmony_ci 11158c2ecf20Sopenharmony_ci txq->entries = kcalloc(slots_num, 11168c2ecf20Sopenharmony_ci sizeof(struct iwl_pcie_txq_entry), 11178c2ecf20Sopenharmony_ci GFP_KERNEL); 11188c2ecf20Sopenharmony_ci 11198c2ecf20Sopenharmony_ci if (!txq->entries) 11208c2ecf20Sopenharmony_ci goto error; 11218c2ecf20Sopenharmony_ci 11228c2ecf20Sopenharmony_ci if (cmd_queue) 11238c2ecf20Sopenharmony_ci for (i = 0; i < slots_num; i++) { 11248c2ecf20Sopenharmony_ci txq->entries[i].cmd = 11258c2ecf20Sopenharmony_ci kmalloc(sizeof(struct iwl_device_cmd), 11268c2ecf20Sopenharmony_ci GFP_KERNEL); 11278c2ecf20Sopenharmony_ci if (!txq->entries[i].cmd) 11288c2ecf20Sopenharmony_ci goto error; 11298c2ecf20Sopenharmony_ci } 11308c2ecf20Sopenharmony_ci 11318c2ecf20Sopenharmony_ci /* Circular buffer of transmit frame descriptors (TFDs), 11328c2ecf20Sopenharmony_ci * shared with device */ 11338c2ecf20Sopenharmony_ci txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 11348c2ecf20Sopenharmony_ci &txq->dma_addr, GFP_KERNEL); 11358c2ecf20Sopenharmony_ci if (!txq->tfds) 11368c2ecf20Sopenharmony_ci goto error; 11378c2ecf20Sopenharmony_ci 11388c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 11398c2ecf20Sopenharmony_ci 11408c2ecf20Sopenharmony_ci tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 11418c2ecf20Sopenharmony_ci 11428c2ecf20Sopenharmony_ci txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 11438c2ecf20Sopenharmony_ci &txq->first_tb_dma, 11448c2ecf20Sopenharmony_ci GFP_KERNEL); 11458c2ecf20Sopenharmony_ci if (!txq->first_tb_bufs) 11468c2ecf20Sopenharmony_ci goto err_free_tfds; 11478c2ecf20Sopenharmony_ci 11488c2ecf20Sopenharmony_ci return 0; 11498c2ecf20Sopenharmony_cierr_free_tfds: 11508c2ecf20Sopenharmony_ci dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 11518c2ecf20Sopenharmony_ci txq->tfds = NULL; 11528c2ecf20Sopenharmony_cierror: 11538c2ecf20Sopenharmony_ci if (txq->entries && cmd_queue) 11548c2ecf20Sopenharmony_ci for (i = 0; i < slots_num; i++) 11558c2ecf20Sopenharmony_ci kfree(txq->entries[i].cmd); 11568c2ecf20Sopenharmony_ci kfree(txq->entries); 11578c2ecf20Sopenharmony_ci txq->entries = NULL; 11588c2ecf20Sopenharmony_ci 11598c2ecf20Sopenharmony_ci return -ENOMEM; 11608c2ecf20Sopenharmony_ci} 11618c2ecf20Sopenharmony_ci 11628c2ecf20Sopenharmony_cistatic int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, 11638c2ecf20Sopenharmony_ci struct iwl_txq **intxq, int size, 11648c2ecf20Sopenharmony_ci unsigned int timeout) 11658c2ecf20Sopenharmony_ci{ 11668c2ecf20Sopenharmony_ci size_t bc_tbl_size, bc_tbl_entries; 11678c2ecf20Sopenharmony_ci struct iwl_txq *txq; 11688c2ecf20Sopenharmony_ci int ret; 11698c2ecf20Sopenharmony_ci 11708c2ecf20Sopenharmony_ci WARN_ON(!trans->txqs.bc_tbl_size); 11718c2ecf20Sopenharmony_ci 11728c2ecf20Sopenharmony_ci bc_tbl_size = trans->txqs.bc_tbl_size; 11738c2ecf20Sopenharmony_ci bc_tbl_entries = bc_tbl_size / sizeof(u16); 11748c2ecf20Sopenharmony_ci 11758c2ecf20Sopenharmony_ci if (WARN_ON(size > bc_tbl_entries)) 11768c2ecf20Sopenharmony_ci return -EINVAL; 11778c2ecf20Sopenharmony_ci 11788c2ecf20Sopenharmony_ci txq = kzalloc(sizeof(*txq), GFP_KERNEL); 11798c2ecf20Sopenharmony_ci if (!txq) 11808c2ecf20Sopenharmony_ci return -ENOMEM; 11818c2ecf20Sopenharmony_ci 11828c2ecf20Sopenharmony_ci txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, 11838c2ecf20Sopenharmony_ci &txq->bc_tbl.dma); 11848c2ecf20Sopenharmony_ci if (!txq->bc_tbl.addr) { 11858c2ecf20Sopenharmony_ci IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 11868c2ecf20Sopenharmony_ci kfree(txq); 11878c2ecf20Sopenharmony_ci return -ENOMEM; 11888c2ecf20Sopenharmony_ci } 11898c2ecf20Sopenharmony_ci 11908c2ecf20Sopenharmony_ci ret = iwl_txq_alloc(trans, txq, size, false); 11918c2ecf20Sopenharmony_ci if (ret) { 11928c2ecf20Sopenharmony_ci IWL_ERR(trans, "Tx queue alloc failed\n"); 11938c2ecf20Sopenharmony_ci goto error; 11948c2ecf20Sopenharmony_ci } 11958c2ecf20Sopenharmony_ci ret = iwl_txq_init(trans, txq, size, false); 11968c2ecf20Sopenharmony_ci if (ret) { 11978c2ecf20Sopenharmony_ci IWL_ERR(trans, "Tx queue init failed\n"); 11988c2ecf20Sopenharmony_ci goto error; 11998c2ecf20Sopenharmony_ci } 12008c2ecf20Sopenharmony_ci 12018c2ecf20Sopenharmony_ci txq->wd_timeout = msecs_to_jiffies(timeout); 12028c2ecf20Sopenharmony_ci 12038c2ecf20Sopenharmony_ci *intxq = txq; 12048c2ecf20Sopenharmony_ci return 0; 12058c2ecf20Sopenharmony_ci 12068c2ecf20Sopenharmony_cierror: 12078c2ecf20Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 12088c2ecf20Sopenharmony_ci return ret; 12098c2ecf20Sopenharmony_ci} 12108c2ecf20Sopenharmony_ci 12118c2ecf20Sopenharmony_cistatic int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, 12128c2ecf20Sopenharmony_ci struct iwl_host_cmd *hcmd) 12138c2ecf20Sopenharmony_ci{ 12148c2ecf20Sopenharmony_ci struct iwl_tx_queue_cfg_rsp *rsp; 12158c2ecf20Sopenharmony_ci int ret, qid; 12168c2ecf20Sopenharmony_ci u32 wr_ptr; 12178c2ecf20Sopenharmony_ci 12188c2ecf20Sopenharmony_ci if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != 12198c2ecf20Sopenharmony_ci sizeof(*rsp))) { 12208c2ecf20Sopenharmony_ci ret = -EINVAL; 12218c2ecf20Sopenharmony_ci goto error_free_resp; 12228c2ecf20Sopenharmony_ci } 12238c2ecf20Sopenharmony_ci 12248c2ecf20Sopenharmony_ci rsp = (void *)hcmd->resp_pkt->data; 12258c2ecf20Sopenharmony_ci qid = le16_to_cpu(rsp->queue_number); 12268c2ecf20Sopenharmony_ci wr_ptr = le16_to_cpu(rsp->write_pointer); 12278c2ecf20Sopenharmony_ci 12288c2ecf20Sopenharmony_ci if (qid >= ARRAY_SIZE(trans->txqs.txq)) { 12298c2ecf20Sopenharmony_ci WARN_ONCE(1, "queue index %d unsupported", qid); 12308c2ecf20Sopenharmony_ci ret = -EIO; 12318c2ecf20Sopenharmony_ci goto error_free_resp; 12328c2ecf20Sopenharmony_ci } 12338c2ecf20Sopenharmony_ci 12348c2ecf20Sopenharmony_ci if (test_and_set_bit(qid, trans->txqs.queue_used)) { 12358c2ecf20Sopenharmony_ci WARN_ONCE(1, "queue %d already used", qid); 12368c2ecf20Sopenharmony_ci ret = -EIO; 12378c2ecf20Sopenharmony_ci goto error_free_resp; 12388c2ecf20Sopenharmony_ci } 12398c2ecf20Sopenharmony_ci 12408c2ecf20Sopenharmony_ci txq->id = qid; 12418c2ecf20Sopenharmony_ci trans->txqs.txq[qid] = txq; 12428c2ecf20Sopenharmony_ci wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); 12438c2ecf20Sopenharmony_ci 12448c2ecf20Sopenharmony_ci /* Place first TFD at index corresponding to start sequence number */ 12458c2ecf20Sopenharmony_ci txq->read_ptr = wr_ptr; 12468c2ecf20Sopenharmony_ci txq->write_ptr = wr_ptr; 12478c2ecf20Sopenharmony_ci 12488c2ecf20Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 12498c2ecf20Sopenharmony_ci 12508c2ecf20Sopenharmony_ci iwl_free_resp(hcmd); 12518c2ecf20Sopenharmony_ci return qid; 12528c2ecf20Sopenharmony_ci 12538c2ecf20Sopenharmony_cierror_free_resp: 12548c2ecf20Sopenharmony_ci iwl_free_resp(hcmd); 12558c2ecf20Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 12568c2ecf20Sopenharmony_ci return ret; 12578c2ecf20Sopenharmony_ci} 12588c2ecf20Sopenharmony_ci 12598c2ecf20Sopenharmony_ciint iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, 12608c2ecf20Sopenharmony_ci int cmd_id, int size, unsigned int timeout) 12618c2ecf20Sopenharmony_ci{ 12628c2ecf20Sopenharmony_ci struct iwl_txq *txq = NULL; 12638c2ecf20Sopenharmony_ci struct iwl_tx_queue_cfg_cmd cmd = { 12648c2ecf20Sopenharmony_ci .flags = flags, 12658c2ecf20Sopenharmony_ci .sta_id = sta_id, 12668c2ecf20Sopenharmony_ci .tid = tid, 12678c2ecf20Sopenharmony_ci }; 12688c2ecf20Sopenharmony_ci struct iwl_host_cmd hcmd = { 12698c2ecf20Sopenharmony_ci .id = cmd_id, 12708c2ecf20Sopenharmony_ci .len = { sizeof(cmd) }, 12718c2ecf20Sopenharmony_ci .data = { &cmd, }, 12728c2ecf20Sopenharmony_ci .flags = CMD_WANT_SKB, 12738c2ecf20Sopenharmony_ci }; 12748c2ecf20Sopenharmony_ci int ret; 12758c2ecf20Sopenharmony_ci 12768c2ecf20Sopenharmony_ci ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout); 12778c2ecf20Sopenharmony_ci if (ret) 12788c2ecf20Sopenharmony_ci return ret; 12798c2ecf20Sopenharmony_ci 12808c2ecf20Sopenharmony_ci cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); 12818c2ecf20Sopenharmony_ci cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); 12828c2ecf20Sopenharmony_ci cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); 12838c2ecf20Sopenharmony_ci 12848c2ecf20Sopenharmony_ci ret = iwl_trans_send_cmd(trans, &hcmd); 12858c2ecf20Sopenharmony_ci if (ret) 12868c2ecf20Sopenharmony_ci goto error; 12878c2ecf20Sopenharmony_ci 12888c2ecf20Sopenharmony_ci return iwl_txq_alloc_response(trans, txq, &hcmd); 12898c2ecf20Sopenharmony_ci 12908c2ecf20Sopenharmony_cierror: 12918c2ecf20Sopenharmony_ci iwl_txq_gen2_free_memory(trans, txq); 12928c2ecf20Sopenharmony_ci return ret; 12938c2ecf20Sopenharmony_ci} 12948c2ecf20Sopenharmony_ci 12958c2ecf20Sopenharmony_civoid iwl_txq_dyn_free(struct iwl_trans *trans, int queue) 12968c2ecf20Sopenharmony_ci{ 12978c2ecf20Sopenharmony_ci if (WARN(queue >= IWL_MAX_TVQM_QUEUES, 12988c2ecf20Sopenharmony_ci "queue %d out of range", queue)) 12998c2ecf20Sopenharmony_ci return; 13008c2ecf20Sopenharmony_ci 13018c2ecf20Sopenharmony_ci /* 13028c2ecf20Sopenharmony_ci * Upon HW Rfkill - we stop the device, and then stop the queues 13038c2ecf20Sopenharmony_ci * in the op_mode. Just for the sake of the simplicity of the op_mode, 13048c2ecf20Sopenharmony_ci * allow the op_mode to call txq_disable after it already called 13058c2ecf20Sopenharmony_ci * stop_device. 13068c2ecf20Sopenharmony_ci */ 13078c2ecf20Sopenharmony_ci if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { 13088c2ecf20Sopenharmony_ci WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 13098c2ecf20Sopenharmony_ci "queue %d not used", queue); 13108c2ecf20Sopenharmony_ci return; 13118c2ecf20Sopenharmony_ci } 13128c2ecf20Sopenharmony_ci 13138c2ecf20Sopenharmony_ci iwl_txq_gen2_unmap(trans, queue); 13148c2ecf20Sopenharmony_ci 13158c2ecf20Sopenharmony_ci iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]); 13168c2ecf20Sopenharmony_ci 13178c2ecf20Sopenharmony_ci trans->txqs.txq[queue] = NULL; 13188c2ecf20Sopenharmony_ci 13198c2ecf20Sopenharmony_ci IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); 13208c2ecf20Sopenharmony_ci} 13218c2ecf20Sopenharmony_ci 13228c2ecf20Sopenharmony_civoid iwl_txq_gen2_tx_free(struct iwl_trans *trans) 13238c2ecf20Sopenharmony_ci{ 13248c2ecf20Sopenharmony_ci int i; 13258c2ecf20Sopenharmony_ci 13268c2ecf20Sopenharmony_ci memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 13278c2ecf20Sopenharmony_ci 13288c2ecf20Sopenharmony_ci /* Free all TX queues */ 13298c2ecf20Sopenharmony_ci for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { 13308c2ecf20Sopenharmony_ci if (!trans->txqs.txq[i]) 13318c2ecf20Sopenharmony_ci continue; 13328c2ecf20Sopenharmony_ci 13338c2ecf20Sopenharmony_ci iwl_txq_gen2_free(trans, i); 13348c2ecf20Sopenharmony_ci } 13358c2ecf20Sopenharmony_ci} 13368c2ecf20Sopenharmony_ci 13378c2ecf20Sopenharmony_ciint iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) 13388c2ecf20Sopenharmony_ci{ 13398c2ecf20Sopenharmony_ci struct iwl_txq *queue; 13408c2ecf20Sopenharmony_ci int ret; 13418c2ecf20Sopenharmony_ci 13428c2ecf20Sopenharmony_ci /* alloc and init the tx queue */ 13438c2ecf20Sopenharmony_ci if (!trans->txqs.txq[txq_id]) { 13448c2ecf20Sopenharmony_ci queue = kzalloc(sizeof(*queue), GFP_KERNEL); 13458c2ecf20Sopenharmony_ci if (!queue) { 13468c2ecf20Sopenharmony_ci IWL_ERR(trans, "Not enough memory for tx queue\n"); 13478c2ecf20Sopenharmony_ci return -ENOMEM; 13488c2ecf20Sopenharmony_ci } 13498c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id] = queue; 13508c2ecf20Sopenharmony_ci ret = iwl_txq_alloc(trans, queue, queue_size, true); 13518c2ecf20Sopenharmony_ci if (ret) { 13528c2ecf20Sopenharmony_ci IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 13538c2ecf20Sopenharmony_ci goto error; 13548c2ecf20Sopenharmony_ci } 13558c2ecf20Sopenharmony_ci } else { 13568c2ecf20Sopenharmony_ci queue = trans->txqs.txq[txq_id]; 13578c2ecf20Sopenharmony_ci } 13588c2ecf20Sopenharmony_ci 13598c2ecf20Sopenharmony_ci ret = iwl_txq_init(trans, queue, queue_size, 13608c2ecf20Sopenharmony_ci (txq_id == trans->txqs.cmd.q_id)); 13618c2ecf20Sopenharmony_ci if (ret) { 13628c2ecf20Sopenharmony_ci IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 13638c2ecf20Sopenharmony_ci goto error; 13648c2ecf20Sopenharmony_ci } 13658c2ecf20Sopenharmony_ci trans->txqs.txq[txq_id]->id = txq_id; 13668c2ecf20Sopenharmony_ci set_bit(txq_id, trans->txqs.queue_used); 13678c2ecf20Sopenharmony_ci 13688c2ecf20Sopenharmony_ci return 0; 13698c2ecf20Sopenharmony_ci 13708c2ecf20Sopenharmony_cierror: 13718c2ecf20Sopenharmony_ci iwl_txq_gen2_tx_free(trans); 13728c2ecf20Sopenharmony_ci return ret; 13738c2ecf20Sopenharmony_ci} 13748c2ecf20Sopenharmony_ci 13758c2ecf20Sopenharmony_cistatic inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, 13768c2ecf20Sopenharmony_ci void *_tfd, u8 idx) 13778c2ecf20Sopenharmony_ci{ 13788c2ecf20Sopenharmony_ci struct iwl_tfd *tfd; 13798c2ecf20Sopenharmony_ci struct iwl_tfd_tb *tb; 13808c2ecf20Sopenharmony_ci dma_addr_t addr; 13818c2ecf20Sopenharmony_ci dma_addr_t hi_len; 13828c2ecf20Sopenharmony_ci 13838c2ecf20Sopenharmony_ci if (trans->trans_cfg->use_tfh) { 13848c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd = _tfd; 13858c2ecf20Sopenharmony_ci struct iwl_tfh_tb *tb = &tfd->tbs[idx]; 13868c2ecf20Sopenharmony_ci 13878c2ecf20Sopenharmony_ci return (dma_addr_t)(le64_to_cpu(tb->addr)); 13888c2ecf20Sopenharmony_ci } 13898c2ecf20Sopenharmony_ci 13908c2ecf20Sopenharmony_ci tfd = _tfd; 13918c2ecf20Sopenharmony_ci tb = &tfd->tbs[idx]; 13928c2ecf20Sopenharmony_ci addr = get_unaligned_le32(&tb->lo); 13938c2ecf20Sopenharmony_ci 13948c2ecf20Sopenharmony_ci if (sizeof(dma_addr_t) <= sizeof(u32)) 13958c2ecf20Sopenharmony_ci return addr; 13968c2ecf20Sopenharmony_ci 13978c2ecf20Sopenharmony_ci hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 13988c2ecf20Sopenharmony_ci 13998c2ecf20Sopenharmony_ci /* 14008c2ecf20Sopenharmony_ci * shift by 16 twice to avoid warnings on 32-bit 14018c2ecf20Sopenharmony_ci * (where this code never runs anyway due to the 14028c2ecf20Sopenharmony_ci * if statement above) 14038c2ecf20Sopenharmony_ci */ 14048c2ecf20Sopenharmony_ci return addr | ((hi_len << 16) << 16); 14058c2ecf20Sopenharmony_ci} 14068c2ecf20Sopenharmony_ci 14078c2ecf20Sopenharmony_civoid iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 14088c2ecf20Sopenharmony_ci struct iwl_cmd_meta *meta, 14098c2ecf20Sopenharmony_ci struct iwl_txq *txq, int index) 14108c2ecf20Sopenharmony_ci{ 14118c2ecf20Sopenharmony_ci int i, num_tbs; 14128c2ecf20Sopenharmony_ci void *tfd = iwl_txq_get_tfd(trans, txq, index); 14138c2ecf20Sopenharmony_ci 14148c2ecf20Sopenharmony_ci /* Sanity check on number of chunks */ 14158c2ecf20Sopenharmony_ci num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 14168c2ecf20Sopenharmony_ci 14178c2ecf20Sopenharmony_ci if (num_tbs > trans->txqs.tfd.max_tbs) { 14188c2ecf20Sopenharmony_ci IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 14198c2ecf20Sopenharmony_ci /* @todo issue fatal error, it is quite serious situation */ 14208c2ecf20Sopenharmony_ci return; 14218c2ecf20Sopenharmony_ci } 14228c2ecf20Sopenharmony_ci 14238c2ecf20Sopenharmony_ci /* first TB is never freed - it's the bidirectional DMA data */ 14248c2ecf20Sopenharmony_ci 14258c2ecf20Sopenharmony_ci for (i = 1; i < num_tbs; i++) { 14268c2ecf20Sopenharmony_ci if (meta->tbs & BIT(i)) 14278c2ecf20Sopenharmony_ci dma_unmap_page(trans->dev, 14288c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_addr(trans, 14298c2ecf20Sopenharmony_ci tfd, i), 14308c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_len(trans, 14318c2ecf20Sopenharmony_ci tfd, i), 14328c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 14338c2ecf20Sopenharmony_ci else 14348c2ecf20Sopenharmony_ci dma_unmap_single(trans->dev, 14358c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_addr(trans, 14368c2ecf20Sopenharmony_ci tfd, i), 14378c2ecf20Sopenharmony_ci iwl_txq_gen1_tfd_tb_get_len(trans, 14388c2ecf20Sopenharmony_ci tfd, i), 14398c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 14408c2ecf20Sopenharmony_ci } 14418c2ecf20Sopenharmony_ci 14428c2ecf20Sopenharmony_ci meta->tbs = 0; 14438c2ecf20Sopenharmony_ci 14448c2ecf20Sopenharmony_ci if (trans->trans_cfg->use_tfh) { 14458c2ecf20Sopenharmony_ci struct iwl_tfh_tfd *tfd_fh = (void *)tfd; 14468c2ecf20Sopenharmony_ci 14478c2ecf20Sopenharmony_ci tfd_fh->num_tbs = 0; 14488c2ecf20Sopenharmony_ci } else { 14498c2ecf20Sopenharmony_ci struct iwl_tfd *tfd_fh = (void *)tfd; 14508c2ecf20Sopenharmony_ci 14518c2ecf20Sopenharmony_ci tfd_fh->num_tbs = 0; 14528c2ecf20Sopenharmony_ci } 14538c2ecf20Sopenharmony_ci} 14548c2ecf20Sopenharmony_ci 14558c2ecf20Sopenharmony_ci#define IWL_TX_CRC_SIZE 4 14568c2ecf20Sopenharmony_ci#define IWL_TX_DELIMITER_SIZE 4 14578c2ecf20Sopenharmony_ci 14588c2ecf20Sopenharmony_ci/* 14598c2ecf20Sopenharmony_ci * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 14608c2ecf20Sopenharmony_ci */ 14618c2ecf20Sopenharmony_civoid iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 14628c2ecf20Sopenharmony_ci struct iwl_txq *txq, u16 byte_cnt, 14638c2ecf20Sopenharmony_ci int num_tbs) 14648c2ecf20Sopenharmony_ci{ 14658c2ecf20Sopenharmony_ci struct iwlagn_scd_bc_tbl *scd_bc_tbl; 14668c2ecf20Sopenharmony_ci int write_ptr = txq->write_ptr; 14678c2ecf20Sopenharmony_ci int txq_id = txq->id; 14688c2ecf20Sopenharmony_ci u8 sec_ctl = 0; 14698c2ecf20Sopenharmony_ci u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 14708c2ecf20Sopenharmony_ci __le16 bc_ent; 14718c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 14728c2ecf20Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 14738c2ecf20Sopenharmony_ci u8 sta_id = tx_cmd->sta_id; 14748c2ecf20Sopenharmony_ci 14758c2ecf20Sopenharmony_ci scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 14768c2ecf20Sopenharmony_ci 14778c2ecf20Sopenharmony_ci sec_ctl = tx_cmd->sec_ctl; 14788c2ecf20Sopenharmony_ci 14798c2ecf20Sopenharmony_ci switch (sec_ctl & TX_CMD_SEC_MSK) { 14808c2ecf20Sopenharmony_ci case TX_CMD_SEC_CCM: 14818c2ecf20Sopenharmony_ci len += IEEE80211_CCMP_MIC_LEN; 14828c2ecf20Sopenharmony_ci break; 14838c2ecf20Sopenharmony_ci case TX_CMD_SEC_TKIP: 14848c2ecf20Sopenharmony_ci len += IEEE80211_TKIP_ICV_LEN; 14858c2ecf20Sopenharmony_ci break; 14868c2ecf20Sopenharmony_ci case TX_CMD_SEC_WEP: 14878c2ecf20Sopenharmony_ci len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 14888c2ecf20Sopenharmony_ci break; 14898c2ecf20Sopenharmony_ci } 14908c2ecf20Sopenharmony_ci if (trans->txqs.bc_table_dword) 14918c2ecf20Sopenharmony_ci len = DIV_ROUND_UP(len, 4); 14928c2ecf20Sopenharmony_ci 14938c2ecf20Sopenharmony_ci if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 14948c2ecf20Sopenharmony_ci return; 14958c2ecf20Sopenharmony_ci 14968c2ecf20Sopenharmony_ci bc_ent = cpu_to_le16(len | (sta_id << 12)); 14978c2ecf20Sopenharmony_ci 14988c2ecf20Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 14998c2ecf20Sopenharmony_ci 15008c2ecf20Sopenharmony_ci if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 15018c2ecf20Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 15028c2ecf20Sopenharmony_ci bc_ent; 15038c2ecf20Sopenharmony_ci} 15048c2ecf20Sopenharmony_ci 15058c2ecf20Sopenharmony_civoid iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 15068c2ecf20Sopenharmony_ci struct iwl_txq *txq) 15078c2ecf20Sopenharmony_ci{ 15088c2ecf20Sopenharmony_ci struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; 15098c2ecf20Sopenharmony_ci int txq_id = txq->id; 15108c2ecf20Sopenharmony_ci int read_ptr = txq->read_ptr; 15118c2ecf20Sopenharmony_ci u8 sta_id = 0; 15128c2ecf20Sopenharmony_ci __le16 bc_ent; 15138c2ecf20Sopenharmony_ci struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 15148c2ecf20Sopenharmony_ci struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 15158c2ecf20Sopenharmony_ci 15168c2ecf20Sopenharmony_ci WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 15178c2ecf20Sopenharmony_ci 15188c2ecf20Sopenharmony_ci if (txq_id != trans->txqs.cmd.q_id) 15198c2ecf20Sopenharmony_ci sta_id = tx_cmd->sta_id; 15208c2ecf20Sopenharmony_ci 15218c2ecf20Sopenharmony_ci bc_ent = cpu_to_le16(1 | (sta_id << 12)); 15228c2ecf20Sopenharmony_ci 15238c2ecf20Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 15248c2ecf20Sopenharmony_ci 15258c2ecf20Sopenharmony_ci if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 15268c2ecf20Sopenharmony_ci scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 15278c2ecf20Sopenharmony_ci bc_ent; 15288c2ecf20Sopenharmony_ci} 1529