18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Copyright (c) 2018, Intel Corporation. */ 38c2ecf20Sopenharmony_ci 48c2ecf20Sopenharmony_ci/* The driver transmit and receive code */ 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci#include <linux/prefetch.h> 78c2ecf20Sopenharmony_ci#include <linux/mm.h> 88c2ecf20Sopenharmony_ci#include <linux/bpf_trace.h> 98c2ecf20Sopenharmony_ci#include <net/xdp.h> 108c2ecf20Sopenharmony_ci#include "ice_txrx_lib.h" 118c2ecf20Sopenharmony_ci#include "ice_lib.h" 128c2ecf20Sopenharmony_ci#include "ice.h" 138c2ecf20Sopenharmony_ci#include "ice_dcb_lib.h" 148c2ecf20Sopenharmony_ci#include "ice_xsk.h" 158c2ecf20Sopenharmony_ci 168c2ecf20Sopenharmony_ci#define ICE_RX_HDR_SIZE 256 178c2ecf20Sopenharmony_ci 188c2ecf20Sopenharmony_ci#define FDIR_DESC_RXDID 0x40 198c2ecf20Sopenharmony_ci#define ICE_FDIR_CLEAN_DELAY 10 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci/** 228c2ecf20Sopenharmony_ci * ice_prgm_fdir_fltr - Program a Flow Director filter 238c2ecf20Sopenharmony_ci * @vsi: VSI to send dummy packet 248c2ecf20Sopenharmony_ci * @fdir_desc: flow director descriptor 258c2ecf20Sopenharmony_ci * @raw_packet: allocated buffer for flow director 268c2ecf20Sopenharmony_ci */ 278c2ecf20Sopenharmony_ciint 288c2ecf20Sopenharmony_ciice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, 298c2ecf20Sopenharmony_ci u8 *raw_packet) 308c2ecf20Sopenharmony_ci{ 318c2ecf20Sopenharmony_ci struct ice_tx_buf *tx_buf, *first; 328c2ecf20Sopenharmony_ci struct ice_fltr_desc *f_desc; 338c2ecf20Sopenharmony_ci struct ice_tx_desc *tx_desc; 348c2ecf20Sopenharmony_ci struct ice_ring *tx_ring; 358c2ecf20Sopenharmony_ci struct device *dev; 368c2ecf20Sopenharmony_ci dma_addr_t dma; 378c2ecf20Sopenharmony_ci u32 td_cmd; 388c2ecf20Sopenharmony_ci u16 i; 398c2ecf20Sopenharmony_ci 408c2ecf20Sopenharmony_ci /* VSI and Tx ring */ 418c2ecf20Sopenharmony_ci if (!vsi) 428c2ecf20Sopenharmony_ci return -ENOENT; 438c2ecf20Sopenharmony_ci tx_ring = vsi->tx_rings[0]; 448c2ecf20Sopenharmony_ci if (!tx_ring || !tx_ring->desc) 458c2ecf20Sopenharmony_ci return -ENOENT; 468c2ecf20Sopenharmony_ci dev = tx_ring->dev; 478c2ecf20Sopenharmony_ci 488c2ecf20Sopenharmony_ci /* we are using two descriptors to add/del a filter and we can wait */ 498c2ecf20Sopenharmony_ci for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { 508c2ecf20Sopenharmony_ci if (!i) 518c2ecf20Sopenharmony_ci return -EAGAIN; 528c2ecf20Sopenharmony_ci msleep_interruptible(1); 538c2ecf20Sopenharmony_ci } 548c2ecf20Sopenharmony_ci 558c2ecf20Sopenharmony_ci dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE, 568c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 578c2ecf20Sopenharmony_ci 588c2ecf20Sopenharmony_ci if (dma_mapping_error(dev, dma)) 598c2ecf20Sopenharmony_ci return -EINVAL; 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_ci /* grab the next descriptor */ 628c2ecf20Sopenharmony_ci i = tx_ring->next_to_use; 638c2ecf20Sopenharmony_ci first = &tx_ring->tx_buf[i]; 648c2ecf20Sopenharmony_ci f_desc = ICE_TX_FDIRDESC(tx_ring, i); 658c2ecf20Sopenharmony_ci memcpy(f_desc, fdir_desc, sizeof(*f_desc)); 668c2ecf20Sopenharmony_ci 678c2ecf20Sopenharmony_ci i++; 688c2ecf20Sopenharmony_ci i = (i < tx_ring->count) ? i : 0; 698c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, i); 708c2ecf20Sopenharmony_ci tx_buf = &tx_ring->tx_buf[i]; 718c2ecf20Sopenharmony_ci 728c2ecf20Sopenharmony_ci i++; 738c2ecf20Sopenharmony_ci tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 748c2ecf20Sopenharmony_ci 758c2ecf20Sopenharmony_ci memset(tx_buf, 0, sizeof(*tx_buf)); 768c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE); 778c2ecf20Sopenharmony_ci dma_unmap_addr_set(tx_buf, dma, dma); 788c2ecf20Sopenharmony_ci 798c2ecf20Sopenharmony_ci tx_desc->buf_addr = cpu_to_le64(dma); 808c2ecf20Sopenharmony_ci td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY | 818c2ecf20Sopenharmony_ci ICE_TX_DESC_CMD_RE; 828c2ecf20Sopenharmony_ci 838c2ecf20Sopenharmony_ci tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT; 848c2ecf20Sopenharmony_ci tx_buf->raw_buf = raw_packet; 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 878c2ecf20Sopenharmony_ci ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0); 888c2ecf20Sopenharmony_ci 898c2ecf20Sopenharmony_ci /* Force memory write to complete before letting h/w know 908c2ecf20Sopenharmony_ci * there are new descriptors to fetch. 918c2ecf20Sopenharmony_ci */ 928c2ecf20Sopenharmony_ci wmb(); 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_ci /* mark the data descriptor to be watched */ 958c2ecf20Sopenharmony_ci first->next_to_watch = tx_desc; 968c2ecf20Sopenharmony_ci 978c2ecf20Sopenharmony_ci writel(tx_ring->next_to_use, tx_ring->tail); 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci return 0; 1008c2ecf20Sopenharmony_ci} 1018c2ecf20Sopenharmony_ci 1028c2ecf20Sopenharmony_ci/** 1038c2ecf20Sopenharmony_ci * ice_unmap_and_free_tx_buf - Release a Tx buffer 1048c2ecf20Sopenharmony_ci * @ring: the ring that owns the buffer 1058c2ecf20Sopenharmony_ci * @tx_buf: the buffer to free 1068c2ecf20Sopenharmony_ci */ 1078c2ecf20Sopenharmony_cistatic void 1088c2ecf20Sopenharmony_ciice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf) 1098c2ecf20Sopenharmony_ci{ 1108c2ecf20Sopenharmony_ci if (tx_buf->skb) { 1118c2ecf20Sopenharmony_ci if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 1128c2ecf20Sopenharmony_ci devm_kfree(ring->dev, tx_buf->raw_buf); 1138c2ecf20Sopenharmony_ci else if (ice_ring_is_xdp(ring)) 1148c2ecf20Sopenharmony_ci page_frag_free(tx_buf->raw_buf); 1158c2ecf20Sopenharmony_ci else 1168c2ecf20Sopenharmony_ci dev_kfree_skb_any(tx_buf->skb); 1178c2ecf20Sopenharmony_ci if (dma_unmap_len(tx_buf, len)) 1188c2ecf20Sopenharmony_ci dma_unmap_single(ring->dev, 1198c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 1208c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), 1218c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 1228c2ecf20Sopenharmony_ci } else if (dma_unmap_len(tx_buf, len)) { 1238c2ecf20Sopenharmony_ci dma_unmap_page(ring->dev, 1248c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 1258c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), 1268c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 1278c2ecf20Sopenharmony_ci } 1288c2ecf20Sopenharmony_ci 1298c2ecf20Sopenharmony_ci tx_buf->next_to_watch = NULL; 1308c2ecf20Sopenharmony_ci tx_buf->skb = NULL; 1318c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 1328c2ecf20Sopenharmony_ci /* tx_buf must be completely set up in the transmit path */ 1338c2ecf20Sopenharmony_ci} 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_cistatic struct netdev_queue *txring_txq(const struct ice_ring *ring) 1368c2ecf20Sopenharmony_ci{ 1378c2ecf20Sopenharmony_ci return netdev_get_tx_queue(ring->netdev, ring->q_index); 1388c2ecf20Sopenharmony_ci} 1398c2ecf20Sopenharmony_ci 1408c2ecf20Sopenharmony_ci/** 1418c2ecf20Sopenharmony_ci * ice_clean_tx_ring - Free any empty Tx buffers 1428c2ecf20Sopenharmony_ci * @tx_ring: ring to be cleaned 1438c2ecf20Sopenharmony_ci */ 1448c2ecf20Sopenharmony_civoid ice_clean_tx_ring(struct ice_ring *tx_ring) 1458c2ecf20Sopenharmony_ci{ 1468c2ecf20Sopenharmony_ci u16 i; 1478c2ecf20Sopenharmony_ci 1488c2ecf20Sopenharmony_ci if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { 1498c2ecf20Sopenharmony_ci ice_xsk_clean_xdp_ring(tx_ring); 1508c2ecf20Sopenharmony_ci goto tx_skip_free; 1518c2ecf20Sopenharmony_ci } 1528c2ecf20Sopenharmony_ci 1538c2ecf20Sopenharmony_ci /* ring already cleared, nothing to do */ 1548c2ecf20Sopenharmony_ci if (!tx_ring->tx_buf) 1558c2ecf20Sopenharmony_ci return; 1568c2ecf20Sopenharmony_ci 1578c2ecf20Sopenharmony_ci /* Free all the Tx ring sk_buffs */ 1588c2ecf20Sopenharmony_ci for (i = 0; i < tx_ring->count; i++) 1598c2ecf20Sopenharmony_ci ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); 1608c2ecf20Sopenharmony_ci 1618c2ecf20Sopenharmony_citx_skip_free: 1628c2ecf20Sopenharmony_ci memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); 1638c2ecf20Sopenharmony_ci 1648c2ecf20Sopenharmony_ci /* Zero out the descriptor ring */ 1658c2ecf20Sopenharmony_ci memset(tx_ring->desc, 0, tx_ring->size); 1668c2ecf20Sopenharmony_ci 1678c2ecf20Sopenharmony_ci tx_ring->next_to_use = 0; 1688c2ecf20Sopenharmony_ci tx_ring->next_to_clean = 0; 1698c2ecf20Sopenharmony_ci 1708c2ecf20Sopenharmony_ci if (!tx_ring->netdev) 1718c2ecf20Sopenharmony_ci return; 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_ci /* cleanup Tx queue statistics */ 1748c2ecf20Sopenharmony_ci netdev_tx_reset_queue(txring_txq(tx_ring)); 1758c2ecf20Sopenharmony_ci} 1768c2ecf20Sopenharmony_ci 1778c2ecf20Sopenharmony_ci/** 1788c2ecf20Sopenharmony_ci * ice_free_tx_ring - Free Tx resources per queue 1798c2ecf20Sopenharmony_ci * @tx_ring: Tx descriptor ring for a specific queue 1808c2ecf20Sopenharmony_ci * 1818c2ecf20Sopenharmony_ci * Free all transmit software resources 1828c2ecf20Sopenharmony_ci */ 1838c2ecf20Sopenharmony_civoid ice_free_tx_ring(struct ice_ring *tx_ring) 1848c2ecf20Sopenharmony_ci{ 1858c2ecf20Sopenharmony_ci ice_clean_tx_ring(tx_ring); 1868c2ecf20Sopenharmony_ci devm_kfree(tx_ring->dev, tx_ring->tx_buf); 1878c2ecf20Sopenharmony_ci tx_ring->tx_buf = NULL; 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_ci if (tx_ring->desc) { 1908c2ecf20Sopenharmony_ci dmam_free_coherent(tx_ring->dev, tx_ring->size, 1918c2ecf20Sopenharmony_ci tx_ring->desc, tx_ring->dma); 1928c2ecf20Sopenharmony_ci tx_ring->desc = NULL; 1938c2ecf20Sopenharmony_ci } 1948c2ecf20Sopenharmony_ci} 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci/** 1978c2ecf20Sopenharmony_ci * ice_clean_tx_irq - Reclaim resources after transmit completes 1988c2ecf20Sopenharmony_ci * @tx_ring: Tx ring to clean 1998c2ecf20Sopenharmony_ci * @napi_budget: Used to determine if we are in netpoll 2008c2ecf20Sopenharmony_ci * 2018c2ecf20Sopenharmony_ci * Returns true if there's any budget left (e.g. the clean is finished) 2028c2ecf20Sopenharmony_ci */ 2038c2ecf20Sopenharmony_cistatic bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget) 2048c2ecf20Sopenharmony_ci{ 2058c2ecf20Sopenharmony_ci unsigned int total_bytes = 0, total_pkts = 0; 2068c2ecf20Sopenharmony_ci unsigned int budget = ICE_DFLT_IRQ_WORK; 2078c2ecf20Sopenharmony_ci struct ice_vsi *vsi = tx_ring->vsi; 2088c2ecf20Sopenharmony_ci s16 i = tx_ring->next_to_clean; 2098c2ecf20Sopenharmony_ci struct ice_tx_desc *tx_desc; 2108c2ecf20Sopenharmony_ci struct ice_tx_buf *tx_buf; 2118c2ecf20Sopenharmony_ci 2128c2ecf20Sopenharmony_ci tx_buf = &tx_ring->tx_buf[i]; 2138c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, i); 2148c2ecf20Sopenharmony_ci i -= tx_ring->count; 2158c2ecf20Sopenharmony_ci 2168c2ecf20Sopenharmony_ci prefetch(&vsi->state); 2178c2ecf20Sopenharmony_ci 2188c2ecf20Sopenharmony_ci do { 2198c2ecf20Sopenharmony_ci struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 2208c2ecf20Sopenharmony_ci 2218c2ecf20Sopenharmony_ci /* if next_to_watch is not set then there is no work pending */ 2228c2ecf20Sopenharmony_ci if (!eop_desc) 2238c2ecf20Sopenharmony_ci break; 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_ci smp_rmb(); /* prevent any other reads prior to eop_desc */ 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci /* if the descriptor isn't done, no work yet to do */ 2288c2ecf20Sopenharmony_ci if (!(eop_desc->cmd_type_offset_bsz & 2298c2ecf20Sopenharmony_ci cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 2308c2ecf20Sopenharmony_ci break; 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci /* clear next_to_watch to prevent false hangs */ 2338c2ecf20Sopenharmony_ci tx_buf->next_to_watch = NULL; 2348c2ecf20Sopenharmony_ci 2358c2ecf20Sopenharmony_ci /* update the statistics for this packet */ 2368c2ecf20Sopenharmony_ci total_bytes += tx_buf->bytecount; 2378c2ecf20Sopenharmony_ci total_pkts += tx_buf->gso_segs; 2388c2ecf20Sopenharmony_ci 2398c2ecf20Sopenharmony_ci if (ice_ring_is_xdp(tx_ring)) 2408c2ecf20Sopenharmony_ci page_frag_free(tx_buf->raw_buf); 2418c2ecf20Sopenharmony_ci else 2428c2ecf20Sopenharmony_ci /* free the skb */ 2438c2ecf20Sopenharmony_ci napi_consume_skb(tx_buf->skb, napi_budget); 2448c2ecf20Sopenharmony_ci 2458c2ecf20Sopenharmony_ci /* unmap skb header data */ 2468c2ecf20Sopenharmony_ci dma_unmap_single(tx_ring->dev, 2478c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 2488c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), 2498c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 2508c2ecf20Sopenharmony_ci 2518c2ecf20Sopenharmony_ci /* clear tx_buf data */ 2528c2ecf20Sopenharmony_ci tx_buf->skb = NULL; 2538c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 2548c2ecf20Sopenharmony_ci 2558c2ecf20Sopenharmony_ci /* unmap remaining buffers */ 2568c2ecf20Sopenharmony_ci while (tx_desc != eop_desc) { 2578c2ecf20Sopenharmony_ci tx_buf++; 2588c2ecf20Sopenharmony_ci tx_desc++; 2598c2ecf20Sopenharmony_ci i++; 2608c2ecf20Sopenharmony_ci if (unlikely(!i)) { 2618c2ecf20Sopenharmony_ci i -= tx_ring->count; 2628c2ecf20Sopenharmony_ci tx_buf = tx_ring->tx_buf; 2638c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, 0); 2648c2ecf20Sopenharmony_ci } 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci /* unmap any remaining paged data */ 2678c2ecf20Sopenharmony_ci if (dma_unmap_len(tx_buf, len)) { 2688c2ecf20Sopenharmony_ci dma_unmap_page(tx_ring->dev, 2698c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 2708c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), 2718c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 2728c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 2738c2ecf20Sopenharmony_ci } 2748c2ecf20Sopenharmony_ci } 2758c2ecf20Sopenharmony_ci 2768c2ecf20Sopenharmony_ci /* move us one more past the eop_desc for start of next pkt */ 2778c2ecf20Sopenharmony_ci tx_buf++; 2788c2ecf20Sopenharmony_ci tx_desc++; 2798c2ecf20Sopenharmony_ci i++; 2808c2ecf20Sopenharmony_ci if (unlikely(!i)) { 2818c2ecf20Sopenharmony_ci i -= tx_ring->count; 2828c2ecf20Sopenharmony_ci tx_buf = tx_ring->tx_buf; 2838c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, 0); 2848c2ecf20Sopenharmony_ci } 2858c2ecf20Sopenharmony_ci 2868c2ecf20Sopenharmony_ci prefetch(tx_desc); 2878c2ecf20Sopenharmony_ci 2888c2ecf20Sopenharmony_ci /* update budget accounting */ 2898c2ecf20Sopenharmony_ci budget--; 2908c2ecf20Sopenharmony_ci } while (likely(budget)); 2918c2ecf20Sopenharmony_ci 2928c2ecf20Sopenharmony_ci i += tx_ring->count; 2938c2ecf20Sopenharmony_ci tx_ring->next_to_clean = i; 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes); 2968c2ecf20Sopenharmony_ci 2978c2ecf20Sopenharmony_ci if (ice_ring_is_xdp(tx_ring)) 2988c2ecf20Sopenharmony_ci return !!budget; 2998c2ecf20Sopenharmony_ci 3008c2ecf20Sopenharmony_ci netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, 3018c2ecf20Sopenharmony_ci total_bytes); 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3048c2ecf20Sopenharmony_ci if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) && 3058c2ecf20Sopenharmony_ci (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3068c2ecf20Sopenharmony_ci /* Make sure that anybody stopping the queue after this 3078c2ecf20Sopenharmony_ci * sees the new next_to_clean. 3088c2ecf20Sopenharmony_ci */ 3098c2ecf20Sopenharmony_ci smp_mb(); 3108c2ecf20Sopenharmony_ci if (__netif_subqueue_stopped(tx_ring->netdev, 3118c2ecf20Sopenharmony_ci tx_ring->q_index) && 3128c2ecf20Sopenharmony_ci !test_bit(__ICE_DOWN, vsi->state)) { 3138c2ecf20Sopenharmony_ci netif_wake_subqueue(tx_ring->netdev, 3148c2ecf20Sopenharmony_ci tx_ring->q_index); 3158c2ecf20Sopenharmony_ci ++tx_ring->tx_stats.restart_q; 3168c2ecf20Sopenharmony_ci } 3178c2ecf20Sopenharmony_ci } 3188c2ecf20Sopenharmony_ci 3198c2ecf20Sopenharmony_ci return !!budget; 3208c2ecf20Sopenharmony_ci} 3218c2ecf20Sopenharmony_ci 3228c2ecf20Sopenharmony_ci/** 3238c2ecf20Sopenharmony_ci * ice_setup_tx_ring - Allocate the Tx descriptors 3248c2ecf20Sopenharmony_ci * @tx_ring: the Tx ring to set up 3258c2ecf20Sopenharmony_ci * 3268c2ecf20Sopenharmony_ci * Return 0 on success, negative on error 3278c2ecf20Sopenharmony_ci */ 3288c2ecf20Sopenharmony_ciint ice_setup_tx_ring(struct ice_ring *tx_ring) 3298c2ecf20Sopenharmony_ci{ 3308c2ecf20Sopenharmony_ci struct device *dev = tx_ring->dev; 3318c2ecf20Sopenharmony_ci 3328c2ecf20Sopenharmony_ci if (!dev) 3338c2ecf20Sopenharmony_ci return -ENOMEM; 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci /* warn if we are about to overwrite the pointer */ 3368c2ecf20Sopenharmony_ci WARN_ON(tx_ring->tx_buf); 3378c2ecf20Sopenharmony_ci tx_ring->tx_buf = 3388c2ecf20Sopenharmony_ci devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, 3398c2ecf20Sopenharmony_ci GFP_KERNEL); 3408c2ecf20Sopenharmony_ci if (!tx_ring->tx_buf) 3418c2ecf20Sopenharmony_ci return -ENOMEM; 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci /* round up to nearest page */ 3448c2ecf20Sopenharmony_ci tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 3458c2ecf20Sopenharmony_ci PAGE_SIZE); 3468c2ecf20Sopenharmony_ci tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 3478c2ecf20Sopenharmony_ci GFP_KERNEL); 3488c2ecf20Sopenharmony_ci if (!tx_ring->desc) { 3498c2ecf20Sopenharmony_ci dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 3508c2ecf20Sopenharmony_ci tx_ring->size); 3518c2ecf20Sopenharmony_ci goto err; 3528c2ecf20Sopenharmony_ci } 3538c2ecf20Sopenharmony_ci 3548c2ecf20Sopenharmony_ci tx_ring->next_to_use = 0; 3558c2ecf20Sopenharmony_ci tx_ring->next_to_clean = 0; 3568c2ecf20Sopenharmony_ci tx_ring->tx_stats.prev_pkt = -1; 3578c2ecf20Sopenharmony_ci return 0; 3588c2ecf20Sopenharmony_ci 3598c2ecf20Sopenharmony_cierr: 3608c2ecf20Sopenharmony_ci devm_kfree(dev, tx_ring->tx_buf); 3618c2ecf20Sopenharmony_ci tx_ring->tx_buf = NULL; 3628c2ecf20Sopenharmony_ci return -ENOMEM; 3638c2ecf20Sopenharmony_ci} 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci/** 3668c2ecf20Sopenharmony_ci * ice_clean_rx_ring - Free Rx buffers 3678c2ecf20Sopenharmony_ci * @rx_ring: ring to be cleaned 3688c2ecf20Sopenharmony_ci */ 3698c2ecf20Sopenharmony_civoid ice_clean_rx_ring(struct ice_ring *rx_ring) 3708c2ecf20Sopenharmony_ci{ 3718c2ecf20Sopenharmony_ci struct device *dev = rx_ring->dev; 3728c2ecf20Sopenharmony_ci u16 i; 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_ci /* ring already cleared, nothing to do */ 3758c2ecf20Sopenharmony_ci if (!rx_ring->rx_buf) 3768c2ecf20Sopenharmony_ci return; 3778c2ecf20Sopenharmony_ci 3788c2ecf20Sopenharmony_ci if (rx_ring->xsk_pool) { 3798c2ecf20Sopenharmony_ci ice_xsk_clean_rx_ring(rx_ring); 3808c2ecf20Sopenharmony_ci goto rx_skip_free; 3818c2ecf20Sopenharmony_ci } 3828c2ecf20Sopenharmony_ci 3838c2ecf20Sopenharmony_ci /* Free all the Rx ring sk_buffs */ 3848c2ecf20Sopenharmony_ci for (i = 0; i < rx_ring->count; i++) { 3858c2ecf20Sopenharmony_ci struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 3868c2ecf20Sopenharmony_ci 3878c2ecf20Sopenharmony_ci if (rx_buf->skb) { 3888c2ecf20Sopenharmony_ci dev_kfree_skb(rx_buf->skb); 3898c2ecf20Sopenharmony_ci rx_buf->skb = NULL; 3908c2ecf20Sopenharmony_ci } 3918c2ecf20Sopenharmony_ci if (!rx_buf->page) 3928c2ecf20Sopenharmony_ci continue; 3938c2ecf20Sopenharmony_ci 3948c2ecf20Sopenharmony_ci /* Invalidate cache lines that may have been written to by 3958c2ecf20Sopenharmony_ci * device so that we avoid corrupting memory. 3968c2ecf20Sopenharmony_ci */ 3978c2ecf20Sopenharmony_ci dma_sync_single_range_for_cpu(dev, rx_buf->dma, 3988c2ecf20Sopenharmony_ci rx_buf->page_offset, 3998c2ecf20Sopenharmony_ci rx_ring->rx_buf_len, 4008c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 4018c2ecf20Sopenharmony_ci 4028c2ecf20Sopenharmony_ci /* free resources associated with mapping */ 4038c2ecf20Sopenharmony_ci dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), 4048c2ecf20Sopenharmony_ci DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 4058c2ecf20Sopenharmony_ci __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 4068c2ecf20Sopenharmony_ci 4078c2ecf20Sopenharmony_ci rx_buf->page = NULL; 4088c2ecf20Sopenharmony_ci rx_buf->page_offset = 0; 4098c2ecf20Sopenharmony_ci } 4108c2ecf20Sopenharmony_ci 4118c2ecf20Sopenharmony_cirx_skip_free: 4128c2ecf20Sopenharmony_ci memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); 4138c2ecf20Sopenharmony_ci 4148c2ecf20Sopenharmony_ci /* Zero out the descriptor ring */ 4158c2ecf20Sopenharmony_ci memset(rx_ring->desc, 0, rx_ring->size); 4168c2ecf20Sopenharmony_ci 4178c2ecf20Sopenharmony_ci rx_ring->next_to_alloc = 0; 4188c2ecf20Sopenharmony_ci rx_ring->next_to_clean = 0; 4198c2ecf20Sopenharmony_ci rx_ring->next_to_use = 0; 4208c2ecf20Sopenharmony_ci} 4218c2ecf20Sopenharmony_ci 4228c2ecf20Sopenharmony_ci/** 4238c2ecf20Sopenharmony_ci * ice_free_rx_ring - Free Rx resources 4248c2ecf20Sopenharmony_ci * @rx_ring: ring to clean the resources from 4258c2ecf20Sopenharmony_ci * 4268c2ecf20Sopenharmony_ci * Free all receive software resources 4278c2ecf20Sopenharmony_ci */ 4288c2ecf20Sopenharmony_civoid ice_free_rx_ring(struct ice_ring *rx_ring) 4298c2ecf20Sopenharmony_ci{ 4308c2ecf20Sopenharmony_ci ice_clean_rx_ring(rx_ring); 4318c2ecf20Sopenharmony_ci if (rx_ring->vsi->type == ICE_VSI_PF) 4328c2ecf20Sopenharmony_ci if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 4338c2ecf20Sopenharmony_ci xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 4348c2ecf20Sopenharmony_ci rx_ring->xdp_prog = NULL; 4358c2ecf20Sopenharmony_ci devm_kfree(rx_ring->dev, rx_ring->rx_buf); 4368c2ecf20Sopenharmony_ci rx_ring->rx_buf = NULL; 4378c2ecf20Sopenharmony_ci 4388c2ecf20Sopenharmony_ci if (rx_ring->desc) { 4398c2ecf20Sopenharmony_ci dmam_free_coherent(rx_ring->dev, rx_ring->size, 4408c2ecf20Sopenharmony_ci rx_ring->desc, rx_ring->dma); 4418c2ecf20Sopenharmony_ci rx_ring->desc = NULL; 4428c2ecf20Sopenharmony_ci } 4438c2ecf20Sopenharmony_ci} 4448c2ecf20Sopenharmony_ci 4458c2ecf20Sopenharmony_ci/** 4468c2ecf20Sopenharmony_ci * ice_setup_rx_ring - Allocate the Rx descriptors 4478c2ecf20Sopenharmony_ci * @rx_ring: the Rx ring to set up 4488c2ecf20Sopenharmony_ci * 4498c2ecf20Sopenharmony_ci * Return 0 on success, negative on error 4508c2ecf20Sopenharmony_ci */ 4518c2ecf20Sopenharmony_ciint ice_setup_rx_ring(struct ice_ring *rx_ring) 4528c2ecf20Sopenharmony_ci{ 4538c2ecf20Sopenharmony_ci struct device *dev = rx_ring->dev; 4548c2ecf20Sopenharmony_ci 4558c2ecf20Sopenharmony_ci if (!dev) 4568c2ecf20Sopenharmony_ci return -ENOMEM; 4578c2ecf20Sopenharmony_ci 4588c2ecf20Sopenharmony_ci /* warn if we are about to overwrite the pointer */ 4598c2ecf20Sopenharmony_ci WARN_ON(rx_ring->rx_buf); 4608c2ecf20Sopenharmony_ci rx_ring->rx_buf = 4618c2ecf20Sopenharmony_ci devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, 4628c2ecf20Sopenharmony_ci GFP_KERNEL); 4638c2ecf20Sopenharmony_ci if (!rx_ring->rx_buf) 4648c2ecf20Sopenharmony_ci return -ENOMEM; 4658c2ecf20Sopenharmony_ci 4668c2ecf20Sopenharmony_ci /* round up to nearest page */ 4678c2ecf20Sopenharmony_ci rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), 4688c2ecf20Sopenharmony_ci PAGE_SIZE); 4698c2ecf20Sopenharmony_ci rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 4708c2ecf20Sopenharmony_ci GFP_KERNEL); 4718c2ecf20Sopenharmony_ci if (!rx_ring->desc) { 4728c2ecf20Sopenharmony_ci dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 4738c2ecf20Sopenharmony_ci rx_ring->size); 4748c2ecf20Sopenharmony_ci goto err; 4758c2ecf20Sopenharmony_ci } 4768c2ecf20Sopenharmony_ci 4778c2ecf20Sopenharmony_ci rx_ring->next_to_use = 0; 4788c2ecf20Sopenharmony_ci rx_ring->next_to_clean = 0; 4798c2ecf20Sopenharmony_ci 4808c2ecf20Sopenharmony_ci if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 4818c2ecf20Sopenharmony_ci WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 4828c2ecf20Sopenharmony_ci 4838c2ecf20Sopenharmony_ci if (rx_ring->vsi->type == ICE_VSI_PF && 4848c2ecf20Sopenharmony_ci !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 4858c2ecf20Sopenharmony_ci if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 4868c2ecf20Sopenharmony_ci rx_ring->q_index)) 4878c2ecf20Sopenharmony_ci goto err; 4888c2ecf20Sopenharmony_ci return 0; 4898c2ecf20Sopenharmony_ci 4908c2ecf20Sopenharmony_cierr: 4918c2ecf20Sopenharmony_ci devm_kfree(dev, rx_ring->rx_buf); 4928c2ecf20Sopenharmony_ci rx_ring->rx_buf = NULL; 4938c2ecf20Sopenharmony_ci return -ENOMEM; 4948c2ecf20Sopenharmony_ci} 4958c2ecf20Sopenharmony_ci 4968c2ecf20Sopenharmony_ci/** 4978c2ecf20Sopenharmony_ci * ice_rx_offset - Return expected offset into page to access data 4988c2ecf20Sopenharmony_ci * @rx_ring: Ring we are requesting offset of 4998c2ecf20Sopenharmony_ci * 5008c2ecf20Sopenharmony_ci * Returns the offset value for ring into the data buffer. 5018c2ecf20Sopenharmony_ci */ 5028c2ecf20Sopenharmony_cistatic unsigned int ice_rx_offset(struct ice_ring *rx_ring) 5038c2ecf20Sopenharmony_ci{ 5048c2ecf20Sopenharmony_ci if (ice_ring_uses_build_skb(rx_ring)) 5058c2ecf20Sopenharmony_ci return ICE_SKB_PAD; 5068c2ecf20Sopenharmony_ci else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 5078c2ecf20Sopenharmony_ci return XDP_PACKET_HEADROOM; 5088c2ecf20Sopenharmony_ci 5098c2ecf20Sopenharmony_ci return 0; 5108c2ecf20Sopenharmony_ci} 5118c2ecf20Sopenharmony_ci 5128c2ecf20Sopenharmony_cistatic unsigned int 5138c2ecf20Sopenharmony_ciice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) 5148c2ecf20Sopenharmony_ci{ 5158c2ecf20Sopenharmony_ci unsigned int truesize; 5168c2ecf20Sopenharmony_ci 5178c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 5188c2ecf20Sopenharmony_ci truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ 5198c2ecf20Sopenharmony_ci#else 5208c2ecf20Sopenharmony_ci truesize = ice_rx_offset(rx_ring) ? 5218c2ecf20Sopenharmony_ci SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) + 5228c2ecf20Sopenharmony_ci SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 5238c2ecf20Sopenharmony_ci SKB_DATA_ALIGN(size); 5248c2ecf20Sopenharmony_ci#endif 5258c2ecf20Sopenharmony_ci return truesize; 5268c2ecf20Sopenharmony_ci} 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_ci/** 5298c2ecf20Sopenharmony_ci * ice_run_xdp - Executes an XDP program on initialized xdp_buff 5308c2ecf20Sopenharmony_ci * @rx_ring: Rx ring 5318c2ecf20Sopenharmony_ci * @xdp: xdp_buff used as input to the XDP program 5328c2ecf20Sopenharmony_ci * @xdp_prog: XDP program to run 5338c2ecf20Sopenharmony_ci * 5348c2ecf20Sopenharmony_ci * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 5358c2ecf20Sopenharmony_ci */ 5368c2ecf20Sopenharmony_cistatic int 5378c2ecf20Sopenharmony_ciice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, 5388c2ecf20Sopenharmony_ci struct bpf_prog *xdp_prog) 5398c2ecf20Sopenharmony_ci{ 5408c2ecf20Sopenharmony_ci struct ice_ring *xdp_ring; 5418c2ecf20Sopenharmony_ci int err, result; 5428c2ecf20Sopenharmony_ci u32 act; 5438c2ecf20Sopenharmony_ci 5448c2ecf20Sopenharmony_ci act = bpf_prog_run_xdp(xdp_prog, xdp); 5458c2ecf20Sopenharmony_ci switch (act) { 5468c2ecf20Sopenharmony_ci case XDP_PASS: 5478c2ecf20Sopenharmony_ci return ICE_XDP_PASS; 5488c2ecf20Sopenharmony_ci case XDP_TX: 5498c2ecf20Sopenharmony_ci xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; 5508c2ecf20Sopenharmony_ci result = ice_xmit_xdp_buff(xdp, xdp_ring); 5518c2ecf20Sopenharmony_ci if (result == ICE_XDP_CONSUMED) 5528c2ecf20Sopenharmony_ci goto out_failure; 5538c2ecf20Sopenharmony_ci return result; 5548c2ecf20Sopenharmony_ci case XDP_REDIRECT: 5558c2ecf20Sopenharmony_ci err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 5568c2ecf20Sopenharmony_ci if (err) 5578c2ecf20Sopenharmony_ci goto out_failure; 5588c2ecf20Sopenharmony_ci return ICE_XDP_REDIR; 5598c2ecf20Sopenharmony_ci default: 5608c2ecf20Sopenharmony_ci bpf_warn_invalid_xdp_action(act); 5618c2ecf20Sopenharmony_ci fallthrough; 5628c2ecf20Sopenharmony_ci case XDP_ABORTED: 5638c2ecf20Sopenharmony_ciout_failure: 5648c2ecf20Sopenharmony_ci trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5658c2ecf20Sopenharmony_ci fallthrough; 5668c2ecf20Sopenharmony_ci case XDP_DROP: 5678c2ecf20Sopenharmony_ci return ICE_XDP_CONSUMED; 5688c2ecf20Sopenharmony_ci } 5698c2ecf20Sopenharmony_ci} 5708c2ecf20Sopenharmony_ci 5718c2ecf20Sopenharmony_ci/** 5728c2ecf20Sopenharmony_ci * ice_xdp_xmit - submit packets to XDP ring for transmission 5738c2ecf20Sopenharmony_ci * @dev: netdev 5748c2ecf20Sopenharmony_ci * @n: number of XDP frames to be transmitted 5758c2ecf20Sopenharmony_ci * @frames: XDP frames to be transmitted 5768c2ecf20Sopenharmony_ci * @flags: transmit flags 5778c2ecf20Sopenharmony_ci * 5788c2ecf20Sopenharmony_ci * Returns number of frames successfully sent. Frames that fail are 5798c2ecf20Sopenharmony_ci * free'ed via XDP return API. 5808c2ecf20Sopenharmony_ci * For error cases, a negative errno code is returned and no-frames 5818c2ecf20Sopenharmony_ci * are transmitted (caller must handle freeing frames). 5828c2ecf20Sopenharmony_ci */ 5838c2ecf20Sopenharmony_ciint 5848c2ecf20Sopenharmony_ciice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 5858c2ecf20Sopenharmony_ci u32 flags) 5868c2ecf20Sopenharmony_ci{ 5878c2ecf20Sopenharmony_ci struct ice_netdev_priv *np = netdev_priv(dev); 5888c2ecf20Sopenharmony_ci unsigned int queue_index = smp_processor_id(); 5898c2ecf20Sopenharmony_ci struct ice_vsi *vsi = np->vsi; 5908c2ecf20Sopenharmony_ci struct ice_ring *xdp_ring; 5918c2ecf20Sopenharmony_ci int drops = 0, i; 5928c2ecf20Sopenharmony_ci 5938c2ecf20Sopenharmony_ci if (test_bit(__ICE_DOWN, vsi->state)) 5948c2ecf20Sopenharmony_ci return -ENETDOWN; 5958c2ecf20Sopenharmony_ci 5968c2ecf20Sopenharmony_ci if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) 5978c2ecf20Sopenharmony_ci return -ENXIO; 5988c2ecf20Sopenharmony_ci 5998c2ecf20Sopenharmony_ci if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6008c2ecf20Sopenharmony_ci return -EINVAL; 6018c2ecf20Sopenharmony_ci 6028c2ecf20Sopenharmony_ci xdp_ring = vsi->xdp_rings[queue_index]; 6038c2ecf20Sopenharmony_ci for (i = 0; i < n; i++) { 6048c2ecf20Sopenharmony_ci struct xdp_frame *xdpf = frames[i]; 6058c2ecf20Sopenharmony_ci int err; 6068c2ecf20Sopenharmony_ci 6078c2ecf20Sopenharmony_ci err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 6088c2ecf20Sopenharmony_ci if (err != ICE_XDP_TX) { 6098c2ecf20Sopenharmony_ci xdp_return_frame_rx_napi(xdpf); 6108c2ecf20Sopenharmony_ci drops++; 6118c2ecf20Sopenharmony_ci } 6128c2ecf20Sopenharmony_ci } 6138c2ecf20Sopenharmony_ci 6148c2ecf20Sopenharmony_ci if (unlikely(flags & XDP_XMIT_FLUSH)) 6158c2ecf20Sopenharmony_ci ice_xdp_ring_update_tail(xdp_ring); 6168c2ecf20Sopenharmony_ci 6178c2ecf20Sopenharmony_ci return n - drops; 6188c2ecf20Sopenharmony_ci} 6198c2ecf20Sopenharmony_ci 6208c2ecf20Sopenharmony_ci/** 6218c2ecf20Sopenharmony_ci * ice_alloc_mapped_page - recycle or make a new page 6228c2ecf20Sopenharmony_ci * @rx_ring: ring to use 6238c2ecf20Sopenharmony_ci * @bi: rx_buf struct to modify 6248c2ecf20Sopenharmony_ci * 6258c2ecf20Sopenharmony_ci * Returns true if the page was successfully allocated or 6268c2ecf20Sopenharmony_ci * reused. 6278c2ecf20Sopenharmony_ci */ 6288c2ecf20Sopenharmony_cistatic bool 6298c2ecf20Sopenharmony_ciice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) 6308c2ecf20Sopenharmony_ci{ 6318c2ecf20Sopenharmony_ci struct page *page = bi->page; 6328c2ecf20Sopenharmony_ci dma_addr_t dma; 6338c2ecf20Sopenharmony_ci 6348c2ecf20Sopenharmony_ci /* since we are recycling buffers we should seldom need to alloc */ 6358c2ecf20Sopenharmony_ci if (likely(page)) 6368c2ecf20Sopenharmony_ci return true; 6378c2ecf20Sopenharmony_ci 6388c2ecf20Sopenharmony_ci /* alloc new page for storage */ 6398c2ecf20Sopenharmony_ci page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); 6408c2ecf20Sopenharmony_ci if (unlikely(!page)) { 6418c2ecf20Sopenharmony_ci rx_ring->rx_stats.alloc_page_failed++; 6428c2ecf20Sopenharmony_ci return false; 6438c2ecf20Sopenharmony_ci } 6448c2ecf20Sopenharmony_ci 6458c2ecf20Sopenharmony_ci /* map page for use */ 6468c2ecf20Sopenharmony_ci dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), 6478c2ecf20Sopenharmony_ci DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); 6488c2ecf20Sopenharmony_ci 6498c2ecf20Sopenharmony_ci /* if mapping failed free memory back to system since 6508c2ecf20Sopenharmony_ci * there isn't much point in holding memory we can't use 6518c2ecf20Sopenharmony_ci */ 6528c2ecf20Sopenharmony_ci if (dma_mapping_error(rx_ring->dev, dma)) { 6538c2ecf20Sopenharmony_ci __free_pages(page, ice_rx_pg_order(rx_ring)); 6548c2ecf20Sopenharmony_ci rx_ring->rx_stats.alloc_page_failed++; 6558c2ecf20Sopenharmony_ci return false; 6568c2ecf20Sopenharmony_ci } 6578c2ecf20Sopenharmony_ci 6588c2ecf20Sopenharmony_ci bi->dma = dma; 6598c2ecf20Sopenharmony_ci bi->page = page; 6608c2ecf20Sopenharmony_ci bi->page_offset = ice_rx_offset(rx_ring); 6618c2ecf20Sopenharmony_ci page_ref_add(page, USHRT_MAX - 1); 6628c2ecf20Sopenharmony_ci bi->pagecnt_bias = USHRT_MAX; 6638c2ecf20Sopenharmony_ci 6648c2ecf20Sopenharmony_ci return true; 6658c2ecf20Sopenharmony_ci} 6668c2ecf20Sopenharmony_ci 6678c2ecf20Sopenharmony_ci/** 6688c2ecf20Sopenharmony_ci * ice_alloc_rx_bufs - Replace used receive buffers 6698c2ecf20Sopenharmony_ci * @rx_ring: ring to place buffers on 6708c2ecf20Sopenharmony_ci * @cleaned_count: number of buffers to replace 6718c2ecf20Sopenharmony_ci * 6728c2ecf20Sopenharmony_ci * Returns false if all allocations were successful, true if any fail. Returning 6738c2ecf20Sopenharmony_ci * true signals to the caller that we didn't replace cleaned_count buffers and 6748c2ecf20Sopenharmony_ci * there is more work to do. 6758c2ecf20Sopenharmony_ci * 6768c2ecf20Sopenharmony_ci * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx 6778c2ecf20Sopenharmony_ci * buffers. Then bump tail at most one time. Grouping like this lets us avoid 6788c2ecf20Sopenharmony_ci * multiple tail writes per call. 6798c2ecf20Sopenharmony_ci */ 6808c2ecf20Sopenharmony_cibool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count) 6818c2ecf20Sopenharmony_ci{ 6828c2ecf20Sopenharmony_ci union ice_32b_rx_flex_desc *rx_desc; 6838c2ecf20Sopenharmony_ci u16 ntu = rx_ring->next_to_use; 6848c2ecf20Sopenharmony_ci struct ice_rx_buf *bi; 6858c2ecf20Sopenharmony_ci 6868c2ecf20Sopenharmony_ci /* do nothing if no valid netdev defined */ 6878c2ecf20Sopenharmony_ci if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || 6888c2ecf20Sopenharmony_ci !cleaned_count) 6898c2ecf20Sopenharmony_ci return false; 6908c2ecf20Sopenharmony_ci 6918c2ecf20Sopenharmony_ci /* get the Rx descriptor and buffer based on next_to_use */ 6928c2ecf20Sopenharmony_ci rx_desc = ICE_RX_DESC(rx_ring, ntu); 6938c2ecf20Sopenharmony_ci bi = &rx_ring->rx_buf[ntu]; 6948c2ecf20Sopenharmony_ci 6958c2ecf20Sopenharmony_ci do { 6968c2ecf20Sopenharmony_ci /* if we fail here, we have work remaining */ 6978c2ecf20Sopenharmony_ci if (!ice_alloc_mapped_page(rx_ring, bi)) 6988c2ecf20Sopenharmony_ci break; 6998c2ecf20Sopenharmony_ci 7008c2ecf20Sopenharmony_ci /* sync the buffer for use by the device */ 7018c2ecf20Sopenharmony_ci dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 7028c2ecf20Sopenharmony_ci bi->page_offset, 7038c2ecf20Sopenharmony_ci rx_ring->rx_buf_len, 7048c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 7058c2ecf20Sopenharmony_ci 7068c2ecf20Sopenharmony_ci /* Refresh the desc even if buffer_addrs didn't change 7078c2ecf20Sopenharmony_ci * because each write-back erases this info. 7088c2ecf20Sopenharmony_ci */ 7098c2ecf20Sopenharmony_ci rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 7108c2ecf20Sopenharmony_ci 7118c2ecf20Sopenharmony_ci rx_desc++; 7128c2ecf20Sopenharmony_ci bi++; 7138c2ecf20Sopenharmony_ci ntu++; 7148c2ecf20Sopenharmony_ci if (unlikely(ntu == rx_ring->count)) { 7158c2ecf20Sopenharmony_ci rx_desc = ICE_RX_DESC(rx_ring, 0); 7168c2ecf20Sopenharmony_ci bi = rx_ring->rx_buf; 7178c2ecf20Sopenharmony_ci ntu = 0; 7188c2ecf20Sopenharmony_ci } 7198c2ecf20Sopenharmony_ci 7208c2ecf20Sopenharmony_ci /* clear the status bits for the next_to_use descriptor */ 7218c2ecf20Sopenharmony_ci rx_desc->wb.status_error0 = 0; 7228c2ecf20Sopenharmony_ci 7238c2ecf20Sopenharmony_ci cleaned_count--; 7248c2ecf20Sopenharmony_ci } while (cleaned_count); 7258c2ecf20Sopenharmony_ci 7268c2ecf20Sopenharmony_ci if (rx_ring->next_to_use != ntu) 7278c2ecf20Sopenharmony_ci ice_release_rx_desc(rx_ring, ntu); 7288c2ecf20Sopenharmony_ci 7298c2ecf20Sopenharmony_ci return !!cleaned_count; 7308c2ecf20Sopenharmony_ci} 7318c2ecf20Sopenharmony_ci 7328c2ecf20Sopenharmony_ci/** 7338c2ecf20Sopenharmony_ci * ice_page_is_reserved - check if reuse is possible 7348c2ecf20Sopenharmony_ci * @page: page struct to check 7358c2ecf20Sopenharmony_ci */ 7368c2ecf20Sopenharmony_cistatic bool ice_page_is_reserved(struct page *page) 7378c2ecf20Sopenharmony_ci{ 7388c2ecf20Sopenharmony_ci return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 7398c2ecf20Sopenharmony_ci} 7408c2ecf20Sopenharmony_ci 7418c2ecf20Sopenharmony_ci/** 7428c2ecf20Sopenharmony_ci * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse 7438c2ecf20Sopenharmony_ci * @rx_buf: Rx buffer to adjust 7448c2ecf20Sopenharmony_ci * @size: Size of adjustment 7458c2ecf20Sopenharmony_ci * 7468c2ecf20Sopenharmony_ci * Update the offset within page so that Rx buf will be ready to be reused. 7478c2ecf20Sopenharmony_ci * For systems with PAGE_SIZE < 8192 this function will flip the page offset 7488c2ecf20Sopenharmony_ci * so the second half of page assigned to Rx buffer will be used, otherwise 7498c2ecf20Sopenharmony_ci * the offset is moved by "size" bytes 7508c2ecf20Sopenharmony_ci */ 7518c2ecf20Sopenharmony_cistatic void 7528c2ecf20Sopenharmony_ciice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) 7538c2ecf20Sopenharmony_ci{ 7548c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 7558c2ecf20Sopenharmony_ci /* flip page offset to other buffer */ 7568c2ecf20Sopenharmony_ci rx_buf->page_offset ^= size; 7578c2ecf20Sopenharmony_ci#else 7588c2ecf20Sopenharmony_ci /* move offset up to the next cache line */ 7598c2ecf20Sopenharmony_ci rx_buf->page_offset += size; 7608c2ecf20Sopenharmony_ci#endif 7618c2ecf20Sopenharmony_ci} 7628c2ecf20Sopenharmony_ci 7638c2ecf20Sopenharmony_ci/** 7648c2ecf20Sopenharmony_ci * ice_can_reuse_rx_page - Determine if page can be reused for another Rx 7658c2ecf20Sopenharmony_ci * @rx_buf: buffer containing the page 7668c2ecf20Sopenharmony_ci * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call 7678c2ecf20Sopenharmony_ci * 7688c2ecf20Sopenharmony_ci * If page is reusable, we have a green light for calling ice_reuse_rx_page, 7698c2ecf20Sopenharmony_ci * which will assign the current buffer to the buffer that next_to_alloc is 7708c2ecf20Sopenharmony_ci * pointing to; otherwise, the DMA mapping needs to be destroyed and 7718c2ecf20Sopenharmony_ci * page freed 7728c2ecf20Sopenharmony_ci */ 7738c2ecf20Sopenharmony_cistatic bool 7748c2ecf20Sopenharmony_ciice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) 7758c2ecf20Sopenharmony_ci{ 7768c2ecf20Sopenharmony_ci unsigned int pagecnt_bias = rx_buf->pagecnt_bias; 7778c2ecf20Sopenharmony_ci struct page *page = rx_buf->page; 7788c2ecf20Sopenharmony_ci 7798c2ecf20Sopenharmony_ci /* avoid re-using remote pages */ 7808c2ecf20Sopenharmony_ci if (unlikely(ice_page_is_reserved(page))) 7818c2ecf20Sopenharmony_ci return false; 7828c2ecf20Sopenharmony_ci 7838c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 7848c2ecf20Sopenharmony_ci /* if we are only owner of page we can reuse it */ 7858c2ecf20Sopenharmony_ci if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 7868c2ecf20Sopenharmony_ci return false; 7878c2ecf20Sopenharmony_ci#else 7888c2ecf20Sopenharmony_ci#define ICE_LAST_OFFSET \ 7898c2ecf20Sopenharmony_ci (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) 7908c2ecf20Sopenharmony_ci if (rx_buf->page_offset > ICE_LAST_OFFSET) 7918c2ecf20Sopenharmony_ci return false; 7928c2ecf20Sopenharmony_ci#endif /* PAGE_SIZE < 8192) */ 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_ci /* If we have drained the page fragment pool we need to update 7958c2ecf20Sopenharmony_ci * the pagecnt_bias and page count so that we fully restock the 7968c2ecf20Sopenharmony_ci * number of references the driver holds. 7978c2ecf20Sopenharmony_ci */ 7988c2ecf20Sopenharmony_ci if (unlikely(pagecnt_bias == 1)) { 7998c2ecf20Sopenharmony_ci page_ref_add(page, USHRT_MAX - 1); 8008c2ecf20Sopenharmony_ci rx_buf->pagecnt_bias = USHRT_MAX; 8018c2ecf20Sopenharmony_ci } 8028c2ecf20Sopenharmony_ci 8038c2ecf20Sopenharmony_ci return true; 8048c2ecf20Sopenharmony_ci} 8058c2ecf20Sopenharmony_ci 8068c2ecf20Sopenharmony_ci/** 8078c2ecf20Sopenharmony_ci * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag 8088c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 8098c2ecf20Sopenharmony_ci * @rx_buf: buffer containing page to add 8108c2ecf20Sopenharmony_ci * @skb: sk_buff to place the data into 8118c2ecf20Sopenharmony_ci * @size: packet length from rx_desc 8128c2ecf20Sopenharmony_ci * 8138c2ecf20Sopenharmony_ci * This function will add the data contained in rx_buf->page to the skb. 8148c2ecf20Sopenharmony_ci * It will just attach the page as a frag to the skb. 8158c2ecf20Sopenharmony_ci * The function will then update the page offset. 8168c2ecf20Sopenharmony_ci */ 8178c2ecf20Sopenharmony_cistatic void 8188c2ecf20Sopenharmony_ciice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 8198c2ecf20Sopenharmony_ci struct sk_buff *skb, unsigned int size) 8208c2ecf20Sopenharmony_ci{ 8218c2ecf20Sopenharmony_ci#if (PAGE_SIZE >= 8192) 8228c2ecf20Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring)); 8238c2ecf20Sopenharmony_ci#else 8248c2ecf20Sopenharmony_ci unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 8258c2ecf20Sopenharmony_ci#endif 8268c2ecf20Sopenharmony_ci 8278c2ecf20Sopenharmony_ci if (!size) 8288c2ecf20Sopenharmony_ci return; 8298c2ecf20Sopenharmony_ci skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, 8308c2ecf20Sopenharmony_ci rx_buf->page_offset, size, truesize); 8318c2ecf20Sopenharmony_ci 8328c2ecf20Sopenharmony_ci /* page is being used so we must update the page offset */ 8338c2ecf20Sopenharmony_ci ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 8348c2ecf20Sopenharmony_ci} 8358c2ecf20Sopenharmony_ci 8368c2ecf20Sopenharmony_ci/** 8378c2ecf20Sopenharmony_ci * ice_reuse_rx_page - page flip buffer and store it back on the ring 8388c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to store buffers on 8398c2ecf20Sopenharmony_ci * @old_buf: donor buffer to have page reused 8408c2ecf20Sopenharmony_ci * 8418c2ecf20Sopenharmony_ci * Synchronizes page for reuse by the adapter 8428c2ecf20Sopenharmony_ci */ 8438c2ecf20Sopenharmony_cistatic void 8448c2ecf20Sopenharmony_ciice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) 8458c2ecf20Sopenharmony_ci{ 8468c2ecf20Sopenharmony_ci u16 nta = rx_ring->next_to_alloc; 8478c2ecf20Sopenharmony_ci struct ice_rx_buf *new_buf; 8488c2ecf20Sopenharmony_ci 8498c2ecf20Sopenharmony_ci new_buf = &rx_ring->rx_buf[nta]; 8508c2ecf20Sopenharmony_ci 8518c2ecf20Sopenharmony_ci /* update, and store next to alloc */ 8528c2ecf20Sopenharmony_ci nta++; 8538c2ecf20Sopenharmony_ci rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 8548c2ecf20Sopenharmony_ci 8558c2ecf20Sopenharmony_ci /* Transfer page from old buffer to new buffer. 8568c2ecf20Sopenharmony_ci * Move each member individually to avoid possible store 8578c2ecf20Sopenharmony_ci * forwarding stalls and unnecessary copy of skb. 8588c2ecf20Sopenharmony_ci */ 8598c2ecf20Sopenharmony_ci new_buf->dma = old_buf->dma; 8608c2ecf20Sopenharmony_ci new_buf->page = old_buf->page; 8618c2ecf20Sopenharmony_ci new_buf->page_offset = old_buf->page_offset; 8628c2ecf20Sopenharmony_ci new_buf->pagecnt_bias = old_buf->pagecnt_bias; 8638c2ecf20Sopenharmony_ci} 8648c2ecf20Sopenharmony_ci 8658c2ecf20Sopenharmony_ci/** 8668c2ecf20Sopenharmony_ci * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use 8678c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 8688c2ecf20Sopenharmony_ci * @skb: skb to be used 8698c2ecf20Sopenharmony_ci * @size: size of buffer to add to skb 8708c2ecf20Sopenharmony_ci * @rx_buf_pgcnt: rx_buf page refcount 8718c2ecf20Sopenharmony_ci * 8728c2ecf20Sopenharmony_ci * This function will pull an Rx buffer from the ring and synchronize it 8738c2ecf20Sopenharmony_ci * for use by the CPU. 8748c2ecf20Sopenharmony_ci */ 8758c2ecf20Sopenharmony_cistatic struct ice_rx_buf * 8768c2ecf20Sopenharmony_ciice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, 8778c2ecf20Sopenharmony_ci const unsigned int size, int *rx_buf_pgcnt) 8788c2ecf20Sopenharmony_ci{ 8798c2ecf20Sopenharmony_ci struct ice_rx_buf *rx_buf; 8808c2ecf20Sopenharmony_ci 8818c2ecf20Sopenharmony_ci rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 8828c2ecf20Sopenharmony_ci *rx_buf_pgcnt = 8838c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 8848c2ecf20Sopenharmony_ci page_count(rx_buf->page); 8858c2ecf20Sopenharmony_ci#else 8868c2ecf20Sopenharmony_ci 0; 8878c2ecf20Sopenharmony_ci#endif 8888c2ecf20Sopenharmony_ci prefetchw(rx_buf->page); 8898c2ecf20Sopenharmony_ci *skb = rx_buf->skb; 8908c2ecf20Sopenharmony_ci 8918c2ecf20Sopenharmony_ci if (!size) 8928c2ecf20Sopenharmony_ci return rx_buf; 8938c2ecf20Sopenharmony_ci /* we are reusing so sync this buffer for CPU use */ 8948c2ecf20Sopenharmony_ci dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 8958c2ecf20Sopenharmony_ci rx_buf->page_offset, size, 8968c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 8978c2ecf20Sopenharmony_ci 8988c2ecf20Sopenharmony_ci /* We have pulled a buffer for use, so decrement pagecnt_bias */ 8998c2ecf20Sopenharmony_ci rx_buf->pagecnt_bias--; 9008c2ecf20Sopenharmony_ci 9018c2ecf20Sopenharmony_ci return rx_buf; 9028c2ecf20Sopenharmony_ci} 9038c2ecf20Sopenharmony_ci 9048c2ecf20Sopenharmony_ci/** 9058c2ecf20Sopenharmony_ci * ice_build_skb - Build skb around an existing buffer 9068c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 9078c2ecf20Sopenharmony_ci * @rx_buf: Rx buffer to pull data from 9088c2ecf20Sopenharmony_ci * @xdp: xdp_buff pointing to the data 9098c2ecf20Sopenharmony_ci * 9108c2ecf20Sopenharmony_ci * This function builds an skb around an existing Rx buffer, taking care 9118c2ecf20Sopenharmony_ci * to set up the skb correctly and avoid any memcpy overhead. 9128c2ecf20Sopenharmony_ci */ 9138c2ecf20Sopenharmony_cistatic struct sk_buff * 9148c2ecf20Sopenharmony_ciice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 9158c2ecf20Sopenharmony_ci struct xdp_buff *xdp) 9168c2ecf20Sopenharmony_ci{ 9178c2ecf20Sopenharmony_ci u8 metasize = xdp->data - xdp->data_meta; 9188c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 9198c2ecf20Sopenharmony_ci unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 9208c2ecf20Sopenharmony_ci#else 9218c2ecf20Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 9228c2ecf20Sopenharmony_ci SKB_DATA_ALIGN(xdp->data_end - 9238c2ecf20Sopenharmony_ci xdp->data_hard_start); 9248c2ecf20Sopenharmony_ci#endif 9258c2ecf20Sopenharmony_ci struct sk_buff *skb; 9268c2ecf20Sopenharmony_ci 9278c2ecf20Sopenharmony_ci /* Prefetch first cache line of first page. If xdp->data_meta 9288c2ecf20Sopenharmony_ci * is unused, this points exactly as xdp->data, otherwise we 9298c2ecf20Sopenharmony_ci * likely have a consumer accessing first few bytes of meta 9308c2ecf20Sopenharmony_ci * data, and then actual data. 9318c2ecf20Sopenharmony_ci */ 9328c2ecf20Sopenharmony_ci net_prefetch(xdp->data_meta); 9338c2ecf20Sopenharmony_ci /* build an skb around the page buffer */ 9348c2ecf20Sopenharmony_ci skb = build_skb(xdp->data_hard_start, truesize); 9358c2ecf20Sopenharmony_ci if (unlikely(!skb)) 9368c2ecf20Sopenharmony_ci return NULL; 9378c2ecf20Sopenharmony_ci 9388c2ecf20Sopenharmony_ci /* must to record Rx queue, otherwise OS features such as 9398c2ecf20Sopenharmony_ci * symmetric queue won't work 9408c2ecf20Sopenharmony_ci */ 9418c2ecf20Sopenharmony_ci skb_record_rx_queue(skb, rx_ring->q_index); 9428c2ecf20Sopenharmony_ci 9438c2ecf20Sopenharmony_ci /* update pointers within the skb to store the data */ 9448c2ecf20Sopenharmony_ci skb_reserve(skb, xdp->data - xdp->data_hard_start); 9458c2ecf20Sopenharmony_ci __skb_put(skb, xdp->data_end - xdp->data); 9468c2ecf20Sopenharmony_ci if (metasize) 9478c2ecf20Sopenharmony_ci skb_metadata_set(skb, metasize); 9488c2ecf20Sopenharmony_ci 9498c2ecf20Sopenharmony_ci /* buffer is used by skb, update page_offset */ 9508c2ecf20Sopenharmony_ci ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 9518c2ecf20Sopenharmony_ci 9528c2ecf20Sopenharmony_ci return skb; 9538c2ecf20Sopenharmony_ci} 9548c2ecf20Sopenharmony_ci 9558c2ecf20Sopenharmony_ci/** 9568c2ecf20Sopenharmony_ci * ice_construct_skb - Allocate skb and populate it 9578c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 9588c2ecf20Sopenharmony_ci * @rx_buf: Rx buffer to pull data from 9598c2ecf20Sopenharmony_ci * @xdp: xdp_buff pointing to the data 9608c2ecf20Sopenharmony_ci * 9618c2ecf20Sopenharmony_ci * This function allocates an skb. It then populates it with the page 9628c2ecf20Sopenharmony_ci * data from the current receive descriptor, taking care to set up the 9638c2ecf20Sopenharmony_ci * skb correctly. 9648c2ecf20Sopenharmony_ci */ 9658c2ecf20Sopenharmony_cistatic struct sk_buff * 9668c2ecf20Sopenharmony_ciice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 9678c2ecf20Sopenharmony_ci struct xdp_buff *xdp) 9688c2ecf20Sopenharmony_ci{ 9698c2ecf20Sopenharmony_ci unsigned int size = xdp->data_end - xdp->data; 9708c2ecf20Sopenharmony_ci unsigned int headlen; 9718c2ecf20Sopenharmony_ci struct sk_buff *skb; 9728c2ecf20Sopenharmony_ci 9738c2ecf20Sopenharmony_ci /* prefetch first cache line of first page */ 9748c2ecf20Sopenharmony_ci net_prefetch(xdp->data); 9758c2ecf20Sopenharmony_ci 9768c2ecf20Sopenharmony_ci /* allocate a skb to store the frags */ 9778c2ecf20Sopenharmony_ci skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, 9788c2ecf20Sopenharmony_ci GFP_ATOMIC | __GFP_NOWARN); 9798c2ecf20Sopenharmony_ci if (unlikely(!skb)) 9808c2ecf20Sopenharmony_ci return NULL; 9818c2ecf20Sopenharmony_ci 9828c2ecf20Sopenharmony_ci skb_record_rx_queue(skb, rx_ring->q_index); 9838c2ecf20Sopenharmony_ci /* Determine available headroom for copy */ 9848c2ecf20Sopenharmony_ci headlen = size; 9858c2ecf20Sopenharmony_ci if (headlen > ICE_RX_HDR_SIZE) 9868c2ecf20Sopenharmony_ci headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); 9878c2ecf20Sopenharmony_ci 9888c2ecf20Sopenharmony_ci /* align pull length to size of long to optimize memcpy performance */ 9898c2ecf20Sopenharmony_ci memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, 9908c2ecf20Sopenharmony_ci sizeof(long))); 9918c2ecf20Sopenharmony_ci 9928c2ecf20Sopenharmony_ci /* if we exhaust the linear part then add what is left as a frag */ 9938c2ecf20Sopenharmony_ci size -= headlen; 9948c2ecf20Sopenharmony_ci if (size) { 9958c2ecf20Sopenharmony_ci#if (PAGE_SIZE >= 8192) 9968c2ecf20Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(size); 9978c2ecf20Sopenharmony_ci#else 9988c2ecf20Sopenharmony_ci unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; 9998c2ecf20Sopenharmony_ci#endif 10008c2ecf20Sopenharmony_ci skb_add_rx_frag(skb, 0, rx_buf->page, 10018c2ecf20Sopenharmony_ci rx_buf->page_offset + headlen, size, truesize); 10028c2ecf20Sopenharmony_ci /* buffer is used by skb, update page_offset */ 10038c2ecf20Sopenharmony_ci ice_rx_buf_adjust_pg_offset(rx_buf, truesize); 10048c2ecf20Sopenharmony_ci } else { 10058c2ecf20Sopenharmony_ci /* buffer is unused, reset bias back to rx_buf; data was copied 10068c2ecf20Sopenharmony_ci * onto skb's linear part so there's no need for adjusting 10078c2ecf20Sopenharmony_ci * page offset and we can reuse this buffer as-is 10088c2ecf20Sopenharmony_ci */ 10098c2ecf20Sopenharmony_ci rx_buf->pagecnt_bias++; 10108c2ecf20Sopenharmony_ci } 10118c2ecf20Sopenharmony_ci 10128c2ecf20Sopenharmony_ci return skb; 10138c2ecf20Sopenharmony_ci} 10148c2ecf20Sopenharmony_ci 10158c2ecf20Sopenharmony_ci/** 10168c2ecf20Sopenharmony_ci * ice_put_rx_buf - Clean up used buffer and either recycle or free 10178c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 10188c2ecf20Sopenharmony_ci * @rx_buf: Rx buffer to pull data from 10198c2ecf20Sopenharmony_ci * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() 10208c2ecf20Sopenharmony_ci * 10218c2ecf20Sopenharmony_ci * This function will update next_to_clean and then clean up the contents 10228c2ecf20Sopenharmony_ci * of the rx_buf. It will either recycle the buffer or unmap it and free 10238c2ecf20Sopenharmony_ci * the associated resources. 10248c2ecf20Sopenharmony_ci */ 10258c2ecf20Sopenharmony_cistatic void 10268c2ecf20Sopenharmony_ciice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, 10278c2ecf20Sopenharmony_ci int rx_buf_pgcnt) 10288c2ecf20Sopenharmony_ci{ 10298c2ecf20Sopenharmony_ci u16 ntc = rx_ring->next_to_clean + 1; 10308c2ecf20Sopenharmony_ci 10318c2ecf20Sopenharmony_ci /* fetch, update, and store next to clean */ 10328c2ecf20Sopenharmony_ci ntc = (ntc < rx_ring->count) ? ntc : 0; 10338c2ecf20Sopenharmony_ci rx_ring->next_to_clean = ntc; 10348c2ecf20Sopenharmony_ci 10358c2ecf20Sopenharmony_ci if (!rx_buf) 10368c2ecf20Sopenharmony_ci return; 10378c2ecf20Sopenharmony_ci 10388c2ecf20Sopenharmony_ci if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { 10398c2ecf20Sopenharmony_ci /* hand second half of page back to the ring */ 10408c2ecf20Sopenharmony_ci ice_reuse_rx_page(rx_ring, rx_buf); 10418c2ecf20Sopenharmony_ci } else { 10428c2ecf20Sopenharmony_ci /* we are not reusing the buffer so unmap it */ 10438c2ecf20Sopenharmony_ci dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, 10448c2ecf20Sopenharmony_ci ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, 10458c2ecf20Sopenharmony_ci ICE_RX_DMA_ATTR); 10468c2ecf20Sopenharmony_ci __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); 10478c2ecf20Sopenharmony_ci } 10488c2ecf20Sopenharmony_ci 10498c2ecf20Sopenharmony_ci /* clear contents of buffer_info */ 10508c2ecf20Sopenharmony_ci rx_buf->page = NULL; 10518c2ecf20Sopenharmony_ci rx_buf->skb = NULL; 10528c2ecf20Sopenharmony_ci} 10538c2ecf20Sopenharmony_ci 10548c2ecf20Sopenharmony_ci/** 10558c2ecf20Sopenharmony_ci * ice_is_non_eop - process handling of non-EOP buffers 10568c2ecf20Sopenharmony_ci * @rx_ring: Rx ring being processed 10578c2ecf20Sopenharmony_ci * @rx_desc: Rx descriptor for current buffer 10588c2ecf20Sopenharmony_ci * @skb: Current socket buffer containing buffer in progress 10598c2ecf20Sopenharmony_ci * 10608c2ecf20Sopenharmony_ci * If the buffer is an EOP buffer, this function exits returning false, 10618c2ecf20Sopenharmony_ci * otherwise return true indicating that this is in fact a non-EOP buffer. 10628c2ecf20Sopenharmony_ci */ 10638c2ecf20Sopenharmony_cistatic bool 10648c2ecf20Sopenharmony_ciice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 10658c2ecf20Sopenharmony_ci struct sk_buff *skb) 10668c2ecf20Sopenharmony_ci{ 10678c2ecf20Sopenharmony_ci /* if we are the last buffer then there is nothing else to do */ 10688c2ecf20Sopenharmony_ci#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) 10698c2ecf20Sopenharmony_ci if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) 10708c2ecf20Sopenharmony_ci return false; 10718c2ecf20Sopenharmony_ci 10728c2ecf20Sopenharmony_ci /* place skb in next buffer to be received */ 10738c2ecf20Sopenharmony_ci rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb; 10748c2ecf20Sopenharmony_ci rx_ring->rx_stats.non_eop_descs++; 10758c2ecf20Sopenharmony_ci 10768c2ecf20Sopenharmony_ci return true; 10778c2ecf20Sopenharmony_ci} 10788c2ecf20Sopenharmony_ci 10798c2ecf20Sopenharmony_ci/** 10808c2ecf20Sopenharmony_ci * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 10818c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 10828c2ecf20Sopenharmony_ci * @budget: Total limit on number of packets to process 10838c2ecf20Sopenharmony_ci * 10848c2ecf20Sopenharmony_ci * This function provides a "bounce buffer" approach to Rx interrupt 10858c2ecf20Sopenharmony_ci * processing. The advantage to this is that on systems that have 10868c2ecf20Sopenharmony_ci * expensive overhead for IOMMU access this provides a means of avoiding 10878c2ecf20Sopenharmony_ci * it by maintaining the mapping of the page to the system. 10888c2ecf20Sopenharmony_ci * 10898c2ecf20Sopenharmony_ci * Returns amount of work completed 10908c2ecf20Sopenharmony_ci */ 10918c2ecf20Sopenharmony_ciint ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) 10928c2ecf20Sopenharmony_ci{ 10938c2ecf20Sopenharmony_ci unsigned int total_rx_bytes = 0, total_rx_pkts = 0; 10948c2ecf20Sopenharmony_ci u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 10958c2ecf20Sopenharmony_ci unsigned int xdp_res, xdp_xmit = 0; 10968c2ecf20Sopenharmony_ci struct bpf_prog *xdp_prog = NULL; 10978c2ecf20Sopenharmony_ci struct xdp_buff xdp; 10988c2ecf20Sopenharmony_ci bool failure; 10998c2ecf20Sopenharmony_ci 11008c2ecf20Sopenharmony_ci xdp.rxq = &rx_ring->xdp_rxq; 11018c2ecf20Sopenharmony_ci /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 11028c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 11038c2ecf20Sopenharmony_ci xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0); 11048c2ecf20Sopenharmony_ci#endif 11058c2ecf20Sopenharmony_ci 11068c2ecf20Sopenharmony_ci /* start the loop to process Rx packets bounded by 'budget' */ 11078c2ecf20Sopenharmony_ci while (likely(total_rx_pkts < (unsigned int)budget)) { 11088c2ecf20Sopenharmony_ci union ice_32b_rx_flex_desc *rx_desc; 11098c2ecf20Sopenharmony_ci struct ice_rx_buf *rx_buf; 11108c2ecf20Sopenharmony_ci struct sk_buff *skb; 11118c2ecf20Sopenharmony_ci unsigned int size; 11128c2ecf20Sopenharmony_ci u16 stat_err_bits; 11138c2ecf20Sopenharmony_ci int rx_buf_pgcnt; 11148c2ecf20Sopenharmony_ci u16 vlan_tag = 0; 11158c2ecf20Sopenharmony_ci u8 rx_ptype; 11168c2ecf20Sopenharmony_ci 11178c2ecf20Sopenharmony_ci /* get the Rx desc from Rx ring based on 'next_to_clean' */ 11188c2ecf20Sopenharmony_ci rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 11198c2ecf20Sopenharmony_ci 11208c2ecf20Sopenharmony_ci /* status_error_len will always be zero for unused descriptors 11218c2ecf20Sopenharmony_ci * because it's cleared in cleanup, and overlaps with hdr_addr 11228c2ecf20Sopenharmony_ci * which is always zero because packet split isn't used, if the 11238c2ecf20Sopenharmony_ci * hardware wrote DD then it will be non-zero 11248c2ecf20Sopenharmony_ci */ 11258c2ecf20Sopenharmony_ci stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 11268c2ecf20Sopenharmony_ci if (!ice_test_staterr(rx_desc, stat_err_bits)) 11278c2ecf20Sopenharmony_ci break; 11288c2ecf20Sopenharmony_ci 11298c2ecf20Sopenharmony_ci /* This memory barrier is needed to keep us from reading 11308c2ecf20Sopenharmony_ci * any other fields out of the rx_desc until we know the 11318c2ecf20Sopenharmony_ci * DD bit is set. 11328c2ecf20Sopenharmony_ci */ 11338c2ecf20Sopenharmony_ci dma_rmb(); 11348c2ecf20Sopenharmony_ci 11358c2ecf20Sopenharmony_ci if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { 11368c2ecf20Sopenharmony_ci ice_put_rx_buf(rx_ring, NULL, 0); 11378c2ecf20Sopenharmony_ci cleaned_count++; 11388c2ecf20Sopenharmony_ci continue; 11398c2ecf20Sopenharmony_ci } 11408c2ecf20Sopenharmony_ci 11418c2ecf20Sopenharmony_ci size = le16_to_cpu(rx_desc->wb.pkt_len) & 11428c2ecf20Sopenharmony_ci ICE_RX_FLX_DESC_PKT_LEN_M; 11438c2ecf20Sopenharmony_ci 11448c2ecf20Sopenharmony_ci /* retrieve a buffer from the ring */ 11458c2ecf20Sopenharmony_ci rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt); 11468c2ecf20Sopenharmony_ci 11478c2ecf20Sopenharmony_ci if (!size) { 11488c2ecf20Sopenharmony_ci xdp.data = NULL; 11498c2ecf20Sopenharmony_ci xdp.data_end = NULL; 11508c2ecf20Sopenharmony_ci xdp.data_hard_start = NULL; 11518c2ecf20Sopenharmony_ci xdp.data_meta = NULL; 11528c2ecf20Sopenharmony_ci goto construct_skb; 11538c2ecf20Sopenharmony_ci } 11548c2ecf20Sopenharmony_ci 11558c2ecf20Sopenharmony_ci xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; 11568c2ecf20Sopenharmony_ci xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); 11578c2ecf20Sopenharmony_ci xdp.data_meta = xdp.data; 11588c2ecf20Sopenharmony_ci xdp.data_end = xdp.data + size; 11598c2ecf20Sopenharmony_ci#if (PAGE_SIZE > 4096) 11608c2ecf20Sopenharmony_ci /* At larger PAGE_SIZE, frame_sz depend on len size */ 11618c2ecf20Sopenharmony_ci xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); 11628c2ecf20Sopenharmony_ci#endif 11638c2ecf20Sopenharmony_ci 11648c2ecf20Sopenharmony_ci rcu_read_lock(); 11658c2ecf20Sopenharmony_ci xdp_prog = READ_ONCE(rx_ring->xdp_prog); 11668c2ecf20Sopenharmony_ci if (!xdp_prog) { 11678c2ecf20Sopenharmony_ci rcu_read_unlock(); 11688c2ecf20Sopenharmony_ci goto construct_skb; 11698c2ecf20Sopenharmony_ci } 11708c2ecf20Sopenharmony_ci 11718c2ecf20Sopenharmony_ci xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog); 11728c2ecf20Sopenharmony_ci rcu_read_unlock(); 11738c2ecf20Sopenharmony_ci if (!xdp_res) 11748c2ecf20Sopenharmony_ci goto construct_skb; 11758c2ecf20Sopenharmony_ci if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { 11768c2ecf20Sopenharmony_ci xdp_xmit |= xdp_res; 11778c2ecf20Sopenharmony_ci ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz); 11788c2ecf20Sopenharmony_ci } else { 11798c2ecf20Sopenharmony_ci rx_buf->pagecnt_bias++; 11808c2ecf20Sopenharmony_ci } 11818c2ecf20Sopenharmony_ci total_rx_bytes += size; 11828c2ecf20Sopenharmony_ci total_rx_pkts++; 11838c2ecf20Sopenharmony_ci 11848c2ecf20Sopenharmony_ci cleaned_count++; 11858c2ecf20Sopenharmony_ci ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 11868c2ecf20Sopenharmony_ci continue; 11878c2ecf20Sopenharmony_ciconstruct_skb: 11888c2ecf20Sopenharmony_ci if (skb) { 11898c2ecf20Sopenharmony_ci ice_add_rx_frag(rx_ring, rx_buf, skb, size); 11908c2ecf20Sopenharmony_ci } else if (likely(xdp.data)) { 11918c2ecf20Sopenharmony_ci if (ice_ring_uses_build_skb(rx_ring)) 11928c2ecf20Sopenharmony_ci skb = ice_build_skb(rx_ring, rx_buf, &xdp); 11938c2ecf20Sopenharmony_ci else 11948c2ecf20Sopenharmony_ci skb = ice_construct_skb(rx_ring, rx_buf, &xdp); 11958c2ecf20Sopenharmony_ci } 11968c2ecf20Sopenharmony_ci /* exit if we failed to retrieve a buffer */ 11978c2ecf20Sopenharmony_ci if (!skb) { 11988c2ecf20Sopenharmony_ci rx_ring->rx_stats.alloc_buf_failed++; 11998c2ecf20Sopenharmony_ci if (rx_buf) 12008c2ecf20Sopenharmony_ci rx_buf->pagecnt_bias++; 12018c2ecf20Sopenharmony_ci break; 12028c2ecf20Sopenharmony_ci } 12038c2ecf20Sopenharmony_ci 12048c2ecf20Sopenharmony_ci ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); 12058c2ecf20Sopenharmony_ci cleaned_count++; 12068c2ecf20Sopenharmony_ci 12078c2ecf20Sopenharmony_ci /* skip if it is NOP desc */ 12088c2ecf20Sopenharmony_ci if (ice_is_non_eop(rx_ring, rx_desc, skb)) 12098c2ecf20Sopenharmony_ci continue; 12108c2ecf20Sopenharmony_ci 12118c2ecf20Sopenharmony_ci stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 12128c2ecf20Sopenharmony_ci if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) { 12138c2ecf20Sopenharmony_ci dev_kfree_skb_any(skb); 12148c2ecf20Sopenharmony_ci continue; 12158c2ecf20Sopenharmony_ci } 12168c2ecf20Sopenharmony_ci 12178c2ecf20Sopenharmony_ci stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 12188c2ecf20Sopenharmony_ci if (ice_test_staterr(rx_desc, stat_err_bits)) 12198c2ecf20Sopenharmony_ci vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 12208c2ecf20Sopenharmony_ci 12218c2ecf20Sopenharmony_ci /* pad the skb if needed, to make a valid ethernet frame */ 12228c2ecf20Sopenharmony_ci if (eth_skb_pad(skb)) { 12238c2ecf20Sopenharmony_ci skb = NULL; 12248c2ecf20Sopenharmony_ci continue; 12258c2ecf20Sopenharmony_ci } 12268c2ecf20Sopenharmony_ci 12278c2ecf20Sopenharmony_ci /* probably a little skewed due to removing CRC */ 12288c2ecf20Sopenharmony_ci total_rx_bytes += skb->len; 12298c2ecf20Sopenharmony_ci 12308c2ecf20Sopenharmony_ci /* populate checksum, VLAN, and protocol */ 12318c2ecf20Sopenharmony_ci rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 12328c2ecf20Sopenharmony_ci ICE_RX_FLEX_DESC_PTYPE_M; 12338c2ecf20Sopenharmony_ci 12348c2ecf20Sopenharmony_ci ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 12358c2ecf20Sopenharmony_ci 12368c2ecf20Sopenharmony_ci /* send completed skb up the stack */ 12378c2ecf20Sopenharmony_ci ice_receive_skb(rx_ring, skb, vlan_tag); 12388c2ecf20Sopenharmony_ci 12398c2ecf20Sopenharmony_ci /* update budget accounting */ 12408c2ecf20Sopenharmony_ci total_rx_pkts++; 12418c2ecf20Sopenharmony_ci } 12428c2ecf20Sopenharmony_ci 12438c2ecf20Sopenharmony_ci /* return up to cleaned_count buffers to hardware */ 12448c2ecf20Sopenharmony_ci failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); 12458c2ecf20Sopenharmony_ci 12468c2ecf20Sopenharmony_ci if (xdp_prog) 12478c2ecf20Sopenharmony_ci ice_finalize_xdp_rx(rx_ring, xdp_xmit); 12488c2ecf20Sopenharmony_ci 12498c2ecf20Sopenharmony_ci ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); 12508c2ecf20Sopenharmony_ci 12518c2ecf20Sopenharmony_ci /* guarantee a trip back through this routine if there was a failure */ 12528c2ecf20Sopenharmony_ci return failure ? budget : (int)total_rx_pkts; 12538c2ecf20Sopenharmony_ci} 12548c2ecf20Sopenharmony_ci 12558c2ecf20Sopenharmony_ci/** 12568c2ecf20Sopenharmony_ci * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic 12578c2ecf20Sopenharmony_ci * @port_info: port_info structure containing the current link speed 12588c2ecf20Sopenharmony_ci * @avg_pkt_size: average size of Tx or Rx packets based on clean routine 12598c2ecf20Sopenharmony_ci * @itr: ITR value to update 12608c2ecf20Sopenharmony_ci * 12618c2ecf20Sopenharmony_ci * Calculate how big of an increment should be applied to the ITR value passed 12628c2ecf20Sopenharmony_ci * in based on wmem_default, SKB overhead, ethernet overhead, and the current 12638c2ecf20Sopenharmony_ci * link speed. 12648c2ecf20Sopenharmony_ci * 12658c2ecf20Sopenharmony_ci * The following is a calculation derived from: 12668c2ecf20Sopenharmony_ci * wmem_default / (size + overhead) = desired_pkts_per_int 12678c2ecf20Sopenharmony_ci * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 12688c2ecf20Sopenharmony_ci * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 12698c2ecf20Sopenharmony_ci * 12708c2ecf20Sopenharmony_ci * Assuming wmem_default is 212992 and overhead is 640 bytes per 12718c2ecf20Sopenharmony_ci * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 12728c2ecf20Sopenharmony_ci * formula down to: 12738c2ecf20Sopenharmony_ci * 12748c2ecf20Sopenharmony_ci * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24 12758c2ecf20Sopenharmony_ci * ITR = -------------------------------------------- * -------------- 12768c2ecf20Sopenharmony_ci * rate pkt_size + 640 12778c2ecf20Sopenharmony_ci */ 12788c2ecf20Sopenharmony_cistatic unsigned int 12798c2ecf20Sopenharmony_ciice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info, 12808c2ecf20Sopenharmony_ci unsigned int avg_pkt_size, 12818c2ecf20Sopenharmony_ci unsigned int itr) 12828c2ecf20Sopenharmony_ci{ 12838c2ecf20Sopenharmony_ci switch (port_info->phy.link_info.link_speed) { 12848c2ecf20Sopenharmony_ci case ICE_AQ_LINK_SPEED_100GB: 12858c2ecf20Sopenharmony_ci itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24), 12868c2ecf20Sopenharmony_ci avg_pkt_size + 640); 12878c2ecf20Sopenharmony_ci break; 12888c2ecf20Sopenharmony_ci case ICE_AQ_LINK_SPEED_50GB: 12898c2ecf20Sopenharmony_ci itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24), 12908c2ecf20Sopenharmony_ci avg_pkt_size + 640); 12918c2ecf20Sopenharmony_ci break; 12928c2ecf20Sopenharmony_ci case ICE_AQ_LINK_SPEED_40GB: 12938c2ecf20Sopenharmony_ci itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24), 12948c2ecf20Sopenharmony_ci avg_pkt_size + 640); 12958c2ecf20Sopenharmony_ci break; 12968c2ecf20Sopenharmony_ci case ICE_AQ_LINK_SPEED_25GB: 12978c2ecf20Sopenharmony_ci itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24), 12988c2ecf20Sopenharmony_ci avg_pkt_size + 640); 12998c2ecf20Sopenharmony_ci break; 13008c2ecf20Sopenharmony_ci case ICE_AQ_LINK_SPEED_20GB: 13018c2ecf20Sopenharmony_ci itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24), 13028c2ecf20Sopenharmony_ci avg_pkt_size + 640); 13038c2ecf20Sopenharmony_ci break; 13048c2ecf20Sopenharmony_ci case ICE_AQ_LINK_SPEED_10GB: 13058c2ecf20Sopenharmony_ci default: 13068c2ecf20Sopenharmony_ci itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24), 13078c2ecf20Sopenharmony_ci avg_pkt_size + 640); 13088c2ecf20Sopenharmony_ci break; 13098c2ecf20Sopenharmony_ci } 13108c2ecf20Sopenharmony_ci 13118c2ecf20Sopenharmony_ci if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 13128c2ecf20Sopenharmony_ci itr &= ICE_ITR_ADAPTIVE_LATENCY; 13138c2ecf20Sopenharmony_ci itr += ICE_ITR_ADAPTIVE_MAX_USECS; 13148c2ecf20Sopenharmony_ci } 13158c2ecf20Sopenharmony_ci 13168c2ecf20Sopenharmony_ci return itr; 13178c2ecf20Sopenharmony_ci} 13188c2ecf20Sopenharmony_ci 13198c2ecf20Sopenharmony_ci/** 13208c2ecf20Sopenharmony_ci * ice_update_itr - update the adaptive ITR value based on statistics 13218c2ecf20Sopenharmony_ci * @q_vector: structure containing interrupt and ring information 13228c2ecf20Sopenharmony_ci * @rc: structure containing ring performance data 13238c2ecf20Sopenharmony_ci * 13248c2ecf20Sopenharmony_ci * Stores a new ITR value based on packets and byte 13258c2ecf20Sopenharmony_ci * counts during the last interrupt. The advantage of per interrupt 13268c2ecf20Sopenharmony_ci * computation is faster updates and more accurate ITR for the current 13278c2ecf20Sopenharmony_ci * traffic pattern. Constants in this function were computed 13288c2ecf20Sopenharmony_ci * based on theoretical maximum wire speed and thresholds were set based 13298c2ecf20Sopenharmony_ci * on testing data as well as attempting to minimize response time 13308c2ecf20Sopenharmony_ci * while increasing bulk throughput. 13318c2ecf20Sopenharmony_ci */ 13328c2ecf20Sopenharmony_cistatic void 13338c2ecf20Sopenharmony_ciice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) 13348c2ecf20Sopenharmony_ci{ 13358c2ecf20Sopenharmony_ci unsigned long next_update = jiffies; 13368c2ecf20Sopenharmony_ci unsigned int packets, bytes, itr; 13378c2ecf20Sopenharmony_ci bool container_is_rx; 13388c2ecf20Sopenharmony_ci 13398c2ecf20Sopenharmony_ci if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting)) 13408c2ecf20Sopenharmony_ci return; 13418c2ecf20Sopenharmony_ci 13428c2ecf20Sopenharmony_ci /* If itr_countdown is set it means we programmed an ITR within 13438c2ecf20Sopenharmony_ci * the last 4 interrupt cycles. This has a side effect of us 13448c2ecf20Sopenharmony_ci * potentially firing an early interrupt. In order to work around 13458c2ecf20Sopenharmony_ci * this we need to throw out any data received for a few 13468c2ecf20Sopenharmony_ci * interrupts following the update. 13478c2ecf20Sopenharmony_ci */ 13488c2ecf20Sopenharmony_ci if (q_vector->itr_countdown) { 13498c2ecf20Sopenharmony_ci itr = rc->target_itr; 13508c2ecf20Sopenharmony_ci goto clear_counts; 13518c2ecf20Sopenharmony_ci } 13528c2ecf20Sopenharmony_ci 13538c2ecf20Sopenharmony_ci container_is_rx = (&q_vector->rx == rc); 13548c2ecf20Sopenharmony_ci /* For Rx we want to push the delay up and default to low latency. 13558c2ecf20Sopenharmony_ci * for Tx we want to pull the delay down and default to high latency. 13568c2ecf20Sopenharmony_ci */ 13578c2ecf20Sopenharmony_ci itr = container_is_rx ? 13588c2ecf20Sopenharmony_ci ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY : 13598c2ecf20Sopenharmony_ci ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY; 13608c2ecf20Sopenharmony_ci 13618c2ecf20Sopenharmony_ci /* If we didn't update within up to 1 - 2 jiffies we can assume 13628c2ecf20Sopenharmony_ci * that either packets are coming in so slow there hasn't been 13638c2ecf20Sopenharmony_ci * any work, or that there is so much work that NAPI is dealing 13648c2ecf20Sopenharmony_ci * with interrupt moderation and we don't need to do anything. 13658c2ecf20Sopenharmony_ci */ 13668c2ecf20Sopenharmony_ci if (time_after(next_update, rc->next_update)) 13678c2ecf20Sopenharmony_ci goto clear_counts; 13688c2ecf20Sopenharmony_ci 13698c2ecf20Sopenharmony_ci prefetch(q_vector->vsi->port_info); 13708c2ecf20Sopenharmony_ci 13718c2ecf20Sopenharmony_ci packets = rc->total_pkts; 13728c2ecf20Sopenharmony_ci bytes = rc->total_bytes; 13738c2ecf20Sopenharmony_ci 13748c2ecf20Sopenharmony_ci if (container_is_rx) { 13758c2ecf20Sopenharmony_ci /* If Rx there are 1 to 4 packets and bytes are less than 13768c2ecf20Sopenharmony_ci * 9000 assume insufficient data to use bulk rate limiting 13778c2ecf20Sopenharmony_ci * approach unless Tx is already in bulk rate limiting. We 13788c2ecf20Sopenharmony_ci * are likely latency driven. 13798c2ecf20Sopenharmony_ci */ 13808c2ecf20Sopenharmony_ci if (packets && packets < 4 && bytes < 9000 && 13818c2ecf20Sopenharmony_ci (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) { 13828c2ecf20Sopenharmony_ci itr = ICE_ITR_ADAPTIVE_LATENCY; 13838c2ecf20Sopenharmony_ci goto adjust_by_size_and_speed; 13848c2ecf20Sopenharmony_ci } 13858c2ecf20Sopenharmony_ci } else if (packets < 4) { 13868c2ecf20Sopenharmony_ci /* If we have Tx and Rx ITR maxed and Tx ITR is running in 13878c2ecf20Sopenharmony_ci * bulk mode and we are receiving 4 or fewer packets just 13888c2ecf20Sopenharmony_ci * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 13898c2ecf20Sopenharmony_ci * that the Rx can relax. 13908c2ecf20Sopenharmony_ci */ 13918c2ecf20Sopenharmony_ci if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS && 13928c2ecf20Sopenharmony_ci (q_vector->rx.target_itr & ICE_ITR_MASK) == 13938c2ecf20Sopenharmony_ci ICE_ITR_ADAPTIVE_MAX_USECS) 13948c2ecf20Sopenharmony_ci goto clear_counts; 13958c2ecf20Sopenharmony_ci } else if (packets > 32) { 13968c2ecf20Sopenharmony_ci /* If we have processed over 32 packets in a single interrupt 13978c2ecf20Sopenharmony_ci * for Tx assume we need to switch over to "bulk" mode. 13988c2ecf20Sopenharmony_ci */ 13998c2ecf20Sopenharmony_ci rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY; 14008c2ecf20Sopenharmony_ci } 14018c2ecf20Sopenharmony_ci 14028c2ecf20Sopenharmony_ci /* We have no packets to actually measure against. This means 14038c2ecf20Sopenharmony_ci * either one of the other queues on this vector is active or 14048c2ecf20Sopenharmony_ci * we are a Tx queue doing TSO with too high of an interrupt rate. 14058c2ecf20Sopenharmony_ci * 14068c2ecf20Sopenharmony_ci * Between 4 and 56 we can assume that our current interrupt delay 14078c2ecf20Sopenharmony_ci * is only slightly too low. As such we should increase it by a small 14088c2ecf20Sopenharmony_ci * fixed amount. 14098c2ecf20Sopenharmony_ci */ 14108c2ecf20Sopenharmony_ci if (packets < 56) { 14118c2ecf20Sopenharmony_ci itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC; 14128c2ecf20Sopenharmony_ci if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) { 14138c2ecf20Sopenharmony_ci itr &= ICE_ITR_ADAPTIVE_LATENCY; 14148c2ecf20Sopenharmony_ci itr += ICE_ITR_ADAPTIVE_MAX_USECS; 14158c2ecf20Sopenharmony_ci } 14168c2ecf20Sopenharmony_ci goto clear_counts; 14178c2ecf20Sopenharmony_ci } 14188c2ecf20Sopenharmony_ci 14198c2ecf20Sopenharmony_ci if (packets <= 256) { 14208c2ecf20Sopenharmony_ci itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 14218c2ecf20Sopenharmony_ci itr &= ICE_ITR_MASK; 14228c2ecf20Sopenharmony_ci 14238c2ecf20Sopenharmony_ci /* Between 56 and 112 is our "goldilocks" zone where we are 14248c2ecf20Sopenharmony_ci * working out "just right". Just report that our current 14258c2ecf20Sopenharmony_ci * ITR is good for us. 14268c2ecf20Sopenharmony_ci */ 14278c2ecf20Sopenharmony_ci if (packets <= 112) 14288c2ecf20Sopenharmony_ci goto clear_counts; 14298c2ecf20Sopenharmony_ci 14308c2ecf20Sopenharmony_ci /* If packet count is 128 or greater we are likely looking 14318c2ecf20Sopenharmony_ci * at a slight overrun of the delay we want. Try halving 14328c2ecf20Sopenharmony_ci * our delay to see if that will cut the number of packets 14338c2ecf20Sopenharmony_ci * in half per interrupt. 14348c2ecf20Sopenharmony_ci */ 14358c2ecf20Sopenharmony_ci itr >>= 1; 14368c2ecf20Sopenharmony_ci itr &= ICE_ITR_MASK; 14378c2ecf20Sopenharmony_ci if (itr < ICE_ITR_ADAPTIVE_MIN_USECS) 14388c2ecf20Sopenharmony_ci itr = ICE_ITR_ADAPTIVE_MIN_USECS; 14398c2ecf20Sopenharmony_ci 14408c2ecf20Sopenharmony_ci goto clear_counts; 14418c2ecf20Sopenharmony_ci } 14428c2ecf20Sopenharmony_ci 14438c2ecf20Sopenharmony_ci /* The paths below assume we are dealing with a bulk ITR since 14448c2ecf20Sopenharmony_ci * number of packets is greater than 256. We are just going to have 14458c2ecf20Sopenharmony_ci * to compute a value and try to bring the count under control, 14468c2ecf20Sopenharmony_ci * though for smaller packet sizes there isn't much we can do as 14478c2ecf20Sopenharmony_ci * NAPI polling will likely be kicking in sooner rather than later. 14488c2ecf20Sopenharmony_ci */ 14498c2ecf20Sopenharmony_ci itr = ICE_ITR_ADAPTIVE_BULK; 14508c2ecf20Sopenharmony_ci 14518c2ecf20Sopenharmony_ciadjust_by_size_and_speed: 14528c2ecf20Sopenharmony_ci 14538c2ecf20Sopenharmony_ci /* based on checks above packets cannot be 0 so division is safe */ 14548c2ecf20Sopenharmony_ci itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info, 14558c2ecf20Sopenharmony_ci bytes / packets, itr); 14568c2ecf20Sopenharmony_ci 14578c2ecf20Sopenharmony_ciclear_counts: 14588c2ecf20Sopenharmony_ci /* write back value */ 14598c2ecf20Sopenharmony_ci rc->target_itr = itr; 14608c2ecf20Sopenharmony_ci 14618c2ecf20Sopenharmony_ci /* next update should occur within next jiffy */ 14628c2ecf20Sopenharmony_ci rc->next_update = next_update + 1; 14638c2ecf20Sopenharmony_ci 14648c2ecf20Sopenharmony_ci rc->total_bytes = 0; 14658c2ecf20Sopenharmony_ci rc->total_pkts = 0; 14668c2ecf20Sopenharmony_ci} 14678c2ecf20Sopenharmony_ci 14688c2ecf20Sopenharmony_ci/** 14698c2ecf20Sopenharmony_ci * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register 14708c2ecf20Sopenharmony_ci * @itr_idx: interrupt throttling index 14718c2ecf20Sopenharmony_ci * @itr: interrupt throttling value in usecs 14728c2ecf20Sopenharmony_ci */ 14738c2ecf20Sopenharmony_cistatic u32 ice_buildreg_itr(u16 itr_idx, u16 itr) 14748c2ecf20Sopenharmony_ci{ 14758c2ecf20Sopenharmony_ci /* The ITR value is reported in microseconds, and the register value is 14768c2ecf20Sopenharmony_ci * recorded in 2 microsecond units. For this reason we only need to 14778c2ecf20Sopenharmony_ci * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this 14788c2ecf20Sopenharmony_ci * granularity as a shift instead of division. The mask makes sure the 14798c2ecf20Sopenharmony_ci * ITR value is never odd so we don't accidentally write into the field 14808c2ecf20Sopenharmony_ci * prior to the ITR field. 14818c2ecf20Sopenharmony_ci */ 14828c2ecf20Sopenharmony_ci itr &= ICE_ITR_MASK; 14838c2ecf20Sopenharmony_ci 14848c2ecf20Sopenharmony_ci return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | 14858c2ecf20Sopenharmony_ci (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | 14868c2ecf20Sopenharmony_ci (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)); 14878c2ecf20Sopenharmony_ci} 14888c2ecf20Sopenharmony_ci 14898c2ecf20Sopenharmony_ci/* The act of updating the ITR will cause it to immediately trigger. In order 14908c2ecf20Sopenharmony_ci * to prevent this from throwing off adaptive update statistics we defer the 14918c2ecf20Sopenharmony_ci * update so that it can only happen so often. So after either Tx or Rx are 14928c2ecf20Sopenharmony_ci * updated we make the adaptive scheme wait until either the ITR completely 14938c2ecf20Sopenharmony_ci * expires via the next_update expiration or we have been through at least 14948c2ecf20Sopenharmony_ci * 3 interrupts. 14958c2ecf20Sopenharmony_ci */ 14968c2ecf20Sopenharmony_ci#define ITR_COUNTDOWN_START 3 14978c2ecf20Sopenharmony_ci 14988c2ecf20Sopenharmony_ci/** 14998c2ecf20Sopenharmony_ci * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt 15008c2ecf20Sopenharmony_ci * @q_vector: q_vector for which ITR is being updated and interrupt enabled 15018c2ecf20Sopenharmony_ci */ 15028c2ecf20Sopenharmony_cistatic void ice_update_ena_itr(struct ice_q_vector *q_vector) 15038c2ecf20Sopenharmony_ci{ 15048c2ecf20Sopenharmony_ci struct ice_ring_container *tx = &q_vector->tx; 15058c2ecf20Sopenharmony_ci struct ice_ring_container *rx = &q_vector->rx; 15068c2ecf20Sopenharmony_ci struct ice_vsi *vsi = q_vector->vsi; 15078c2ecf20Sopenharmony_ci u32 itr_val; 15088c2ecf20Sopenharmony_ci 15098c2ecf20Sopenharmony_ci /* when exiting WB_ON_ITR lets set a low ITR value and trigger 15108c2ecf20Sopenharmony_ci * interrupts to expire right away in case we have more work ready to go 15118c2ecf20Sopenharmony_ci * already 15128c2ecf20Sopenharmony_ci */ 15138c2ecf20Sopenharmony_ci if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) { 15148c2ecf20Sopenharmony_ci itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS); 15158c2ecf20Sopenharmony_ci wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val); 15168c2ecf20Sopenharmony_ci /* set target back to last user set value */ 15178c2ecf20Sopenharmony_ci rx->target_itr = rx->itr_setting; 15188c2ecf20Sopenharmony_ci /* set current to what we just wrote and dynamic if needed */ 15198c2ecf20Sopenharmony_ci rx->current_itr = ICE_WB_ON_ITR_USECS | 15208c2ecf20Sopenharmony_ci (rx->itr_setting & ICE_ITR_DYNAMIC); 15218c2ecf20Sopenharmony_ci /* allow normal interrupt flow to start */ 15228c2ecf20Sopenharmony_ci q_vector->itr_countdown = 0; 15238c2ecf20Sopenharmony_ci return; 15248c2ecf20Sopenharmony_ci } 15258c2ecf20Sopenharmony_ci 15268c2ecf20Sopenharmony_ci /* This will do nothing if dynamic updates are not enabled */ 15278c2ecf20Sopenharmony_ci ice_update_itr(q_vector, tx); 15288c2ecf20Sopenharmony_ci ice_update_itr(q_vector, rx); 15298c2ecf20Sopenharmony_ci 15308c2ecf20Sopenharmony_ci /* This block of logic allows us to get away with only updating 15318c2ecf20Sopenharmony_ci * one ITR value with each interrupt. The idea is to perform a 15328c2ecf20Sopenharmony_ci * pseudo-lazy update with the following criteria. 15338c2ecf20Sopenharmony_ci * 15348c2ecf20Sopenharmony_ci * 1. Rx is given higher priority than Tx if both are in same state 15358c2ecf20Sopenharmony_ci * 2. If we must reduce an ITR that is given highest priority. 15368c2ecf20Sopenharmony_ci * 3. We then give priority to increasing ITR based on amount. 15378c2ecf20Sopenharmony_ci */ 15388c2ecf20Sopenharmony_ci if (rx->target_itr < rx->current_itr) { 15398c2ecf20Sopenharmony_ci /* Rx ITR needs to be reduced, this is highest priority */ 15408c2ecf20Sopenharmony_ci itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 15418c2ecf20Sopenharmony_ci rx->current_itr = rx->target_itr; 15428c2ecf20Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 15438c2ecf20Sopenharmony_ci } else if ((tx->target_itr < tx->current_itr) || 15448c2ecf20Sopenharmony_ci ((rx->target_itr - rx->current_itr) < 15458c2ecf20Sopenharmony_ci (tx->target_itr - tx->current_itr))) { 15468c2ecf20Sopenharmony_ci /* Tx ITR needs to be reduced, this is second priority 15478c2ecf20Sopenharmony_ci * Tx ITR needs to be increased more than Rx, fourth priority 15488c2ecf20Sopenharmony_ci */ 15498c2ecf20Sopenharmony_ci itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr); 15508c2ecf20Sopenharmony_ci tx->current_itr = tx->target_itr; 15518c2ecf20Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 15528c2ecf20Sopenharmony_ci } else if (rx->current_itr != rx->target_itr) { 15538c2ecf20Sopenharmony_ci /* Rx ITR needs to be increased, third priority */ 15548c2ecf20Sopenharmony_ci itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr); 15558c2ecf20Sopenharmony_ci rx->current_itr = rx->target_itr; 15568c2ecf20Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 15578c2ecf20Sopenharmony_ci } else { 15588c2ecf20Sopenharmony_ci /* Still have to re-enable the interrupts */ 15598c2ecf20Sopenharmony_ci itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); 15608c2ecf20Sopenharmony_ci if (q_vector->itr_countdown) 15618c2ecf20Sopenharmony_ci q_vector->itr_countdown--; 15628c2ecf20Sopenharmony_ci } 15638c2ecf20Sopenharmony_ci 15648c2ecf20Sopenharmony_ci if (!test_bit(__ICE_DOWN, q_vector->vsi->state)) 15658c2ecf20Sopenharmony_ci wr32(&q_vector->vsi->back->hw, 15668c2ecf20Sopenharmony_ci GLINT_DYN_CTL(q_vector->reg_idx), 15678c2ecf20Sopenharmony_ci itr_val); 15688c2ecf20Sopenharmony_ci} 15698c2ecf20Sopenharmony_ci 15708c2ecf20Sopenharmony_ci/** 15718c2ecf20Sopenharmony_ci * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector 15728c2ecf20Sopenharmony_ci * @q_vector: q_vector to set WB_ON_ITR on 15738c2ecf20Sopenharmony_ci * 15748c2ecf20Sopenharmony_ci * We need to tell hardware to write-back completed descriptors even when 15758c2ecf20Sopenharmony_ci * interrupts are disabled. Descriptors will be written back on cache line 15768c2ecf20Sopenharmony_ci * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR 15778c2ecf20Sopenharmony_ci * descriptors may not be written back if they don't fill a cache line until the 15788c2ecf20Sopenharmony_ci * next interrupt. 15798c2ecf20Sopenharmony_ci * 15808c2ecf20Sopenharmony_ci * This sets the write-back frequency to 2 microseconds as that is the minimum 15818c2ecf20Sopenharmony_ci * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to 15828c2ecf20Sopenharmony_ci * make sure hardware knows we aren't meddling with the INTENA_M bit. 15838c2ecf20Sopenharmony_ci */ 15848c2ecf20Sopenharmony_cistatic void ice_set_wb_on_itr(struct ice_q_vector *q_vector) 15858c2ecf20Sopenharmony_ci{ 15868c2ecf20Sopenharmony_ci struct ice_vsi *vsi = q_vector->vsi; 15878c2ecf20Sopenharmony_ci 15888c2ecf20Sopenharmony_ci /* already in WB_ON_ITR mode no need to change it */ 15898c2ecf20Sopenharmony_ci if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) 15908c2ecf20Sopenharmony_ci return; 15918c2ecf20Sopenharmony_ci 15928c2ecf20Sopenharmony_ci if (q_vector->num_ring_rx) 15938c2ecf20Sopenharmony_ci wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 15948c2ecf20Sopenharmony_ci ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 15958c2ecf20Sopenharmony_ci ICE_RX_ITR)); 15968c2ecf20Sopenharmony_ci 15978c2ecf20Sopenharmony_ci if (q_vector->num_ring_tx) 15988c2ecf20Sopenharmony_ci wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), 15998c2ecf20Sopenharmony_ci ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS, 16008c2ecf20Sopenharmony_ci ICE_TX_ITR)); 16018c2ecf20Sopenharmony_ci 16028c2ecf20Sopenharmony_ci q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE; 16038c2ecf20Sopenharmony_ci} 16048c2ecf20Sopenharmony_ci 16058c2ecf20Sopenharmony_ci/** 16068c2ecf20Sopenharmony_ci * ice_napi_poll - NAPI polling Rx/Tx cleanup routine 16078c2ecf20Sopenharmony_ci * @napi: napi struct with our devices info in it 16088c2ecf20Sopenharmony_ci * @budget: amount of work driver is allowed to do this pass, in packets 16098c2ecf20Sopenharmony_ci * 16108c2ecf20Sopenharmony_ci * This function will clean all queues associated with a q_vector. 16118c2ecf20Sopenharmony_ci * 16128c2ecf20Sopenharmony_ci * Returns the amount of work done 16138c2ecf20Sopenharmony_ci */ 16148c2ecf20Sopenharmony_ciint ice_napi_poll(struct napi_struct *napi, int budget) 16158c2ecf20Sopenharmony_ci{ 16168c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector = 16178c2ecf20Sopenharmony_ci container_of(napi, struct ice_q_vector, napi); 16188c2ecf20Sopenharmony_ci bool clean_complete = true; 16198c2ecf20Sopenharmony_ci struct ice_ring *ring; 16208c2ecf20Sopenharmony_ci int budget_per_ring; 16218c2ecf20Sopenharmony_ci int work_done = 0; 16228c2ecf20Sopenharmony_ci 16238c2ecf20Sopenharmony_ci /* Since the actual Tx work is minimal, we can give the Tx a larger 16248c2ecf20Sopenharmony_ci * budget and be more aggressive about cleaning up the Tx descriptors. 16258c2ecf20Sopenharmony_ci */ 16268c2ecf20Sopenharmony_ci ice_for_each_ring(ring, q_vector->tx) { 16278c2ecf20Sopenharmony_ci bool wd = ring->xsk_pool ? 16288c2ecf20Sopenharmony_ci ice_clean_tx_irq_zc(ring, budget) : 16298c2ecf20Sopenharmony_ci ice_clean_tx_irq(ring, budget); 16308c2ecf20Sopenharmony_ci 16318c2ecf20Sopenharmony_ci if (!wd) 16328c2ecf20Sopenharmony_ci clean_complete = false; 16338c2ecf20Sopenharmony_ci } 16348c2ecf20Sopenharmony_ci 16358c2ecf20Sopenharmony_ci /* Handle case where we are called by netpoll with a budget of 0 */ 16368c2ecf20Sopenharmony_ci if (unlikely(budget <= 0)) 16378c2ecf20Sopenharmony_ci return budget; 16388c2ecf20Sopenharmony_ci 16398c2ecf20Sopenharmony_ci /* normally we have 1 Rx ring per q_vector */ 16408c2ecf20Sopenharmony_ci if (unlikely(q_vector->num_ring_rx > 1)) 16418c2ecf20Sopenharmony_ci /* We attempt to distribute budget to each Rx queue fairly, but 16428c2ecf20Sopenharmony_ci * don't allow the budget to go below 1 because that would exit 16438c2ecf20Sopenharmony_ci * polling early. 16448c2ecf20Sopenharmony_ci */ 16458c2ecf20Sopenharmony_ci budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); 16468c2ecf20Sopenharmony_ci else 16478c2ecf20Sopenharmony_ci /* Max of 1 Rx ring in this q_vector so give it the budget */ 16488c2ecf20Sopenharmony_ci budget_per_ring = budget; 16498c2ecf20Sopenharmony_ci 16508c2ecf20Sopenharmony_ci ice_for_each_ring(ring, q_vector->rx) { 16518c2ecf20Sopenharmony_ci int cleaned; 16528c2ecf20Sopenharmony_ci 16538c2ecf20Sopenharmony_ci /* A dedicated path for zero-copy allows making a single 16548c2ecf20Sopenharmony_ci * comparison in the irq context instead of many inside the 16558c2ecf20Sopenharmony_ci * ice_clean_rx_irq function and makes the codebase cleaner. 16568c2ecf20Sopenharmony_ci */ 16578c2ecf20Sopenharmony_ci cleaned = ring->xsk_pool ? 16588c2ecf20Sopenharmony_ci ice_clean_rx_irq_zc(ring, budget_per_ring) : 16598c2ecf20Sopenharmony_ci ice_clean_rx_irq(ring, budget_per_ring); 16608c2ecf20Sopenharmony_ci work_done += cleaned; 16618c2ecf20Sopenharmony_ci /* if we clean as many as budgeted, we must not be done */ 16628c2ecf20Sopenharmony_ci if (cleaned >= budget_per_ring) 16638c2ecf20Sopenharmony_ci clean_complete = false; 16648c2ecf20Sopenharmony_ci } 16658c2ecf20Sopenharmony_ci 16668c2ecf20Sopenharmony_ci /* If work not completed, return budget and polling will return */ 16678c2ecf20Sopenharmony_ci if (!clean_complete) 16688c2ecf20Sopenharmony_ci return budget; 16698c2ecf20Sopenharmony_ci 16708c2ecf20Sopenharmony_ci /* Exit the polling mode, but don't re-enable interrupts if stack might 16718c2ecf20Sopenharmony_ci * poll us due to busy-polling 16728c2ecf20Sopenharmony_ci */ 16738c2ecf20Sopenharmony_ci if (likely(napi_complete_done(napi, work_done))) 16748c2ecf20Sopenharmony_ci ice_update_ena_itr(q_vector); 16758c2ecf20Sopenharmony_ci else 16768c2ecf20Sopenharmony_ci ice_set_wb_on_itr(q_vector); 16778c2ecf20Sopenharmony_ci 16788c2ecf20Sopenharmony_ci return min_t(int, work_done, budget - 1); 16798c2ecf20Sopenharmony_ci} 16808c2ecf20Sopenharmony_ci 16818c2ecf20Sopenharmony_ci/** 16828c2ecf20Sopenharmony_ci * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions 16838c2ecf20Sopenharmony_ci * @tx_ring: the ring to be checked 16848c2ecf20Sopenharmony_ci * @size: the size buffer we want to assure is available 16858c2ecf20Sopenharmony_ci * 16868c2ecf20Sopenharmony_ci * Returns -EBUSY if a stop is needed, else 0 16878c2ecf20Sopenharmony_ci */ 16888c2ecf20Sopenharmony_cistatic int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 16898c2ecf20Sopenharmony_ci{ 16908c2ecf20Sopenharmony_ci netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index); 16918c2ecf20Sopenharmony_ci /* Memory barrier before checking head and tail */ 16928c2ecf20Sopenharmony_ci smp_mb(); 16938c2ecf20Sopenharmony_ci 16948c2ecf20Sopenharmony_ci /* Check again in a case another CPU has just made room available. */ 16958c2ecf20Sopenharmony_ci if (likely(ICE_DESC_UNUSED(tx_ring) < size)) 16968c2ecf20Sopenharmony_ci return -EBUSY; 16978c2ecf20Sopenharmony_ci 16988c2ecf20Sopenharmony_ci /* A reprieve! - use start_subqueue because it doesn't call schedule */ 16998c2ecf20Sopenharmony_ci netif_start_subqueue(tx_ring->netdev, tx_ring->q_index); 17008c2ecf20Sopenharmony_ci ++tx_ring->tx_stats.restart_q; 17018c2ecf20Sopenharmony_ci return 0; 17028c2ecf20Sopenharmony_ci} 17038c2ecf20Sopenharmony_ci 17048c2ecf20Sopenharmony_ci/** 17058c2ecf20Sopenharmony_ci * ice_maybe_stop_tx - 1st level check for Tx stop conditions 17068c2ecf20Sopenharmony_ci * @tx_ring: the ring to be checked 17078c2ecf20Sopenharmony_ci * @size: the size buffer we want to assure is available 17088c2ecf20Sopenharmony_ci * 17098c2ecf20Sopenharmony_ci * Returns 0 if stop is not needed 17108c2ecf20Sopenharmony_ci */ 17118c2ecf20Sopenharmony_cistatic int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) 17128c2ecf20Sopenharmony_ci{ 17138c2ecf20Sopenharmony_ci if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) 17148c2ecf20Sopenharmony_ci return 0; 17158c2ecf20Sopenharmony_ci 17168c2ecf20Sopenharmony_ci return __ice_maybe_stop_tx(tx_ring, size); 17178c2ecf20Sopenharmony_ci} 17188c2ecf20Sopenharmony_ci 17198c2ecf20Sopenharmony_ci/** 17208c2ecf20Sopenharmony_ci * ice_tx_map - Build the Tx descriptor 17218c2ecf20Sopenharmony_ci * @tx_ring: ring to send buffer on 17228c2ecf20Sopenharmony_ci * @first: first buffer info buffer to use 17238c2ecf20Sopenharmony_ci * @off: pointer to struct that holds offload parameters 17248c2ecf20Sopenharmony_ci * 17258c2ecf20Sopenharmony_ci * This function loops over the skb data pointed to by *first 17268c2ecf20Sopenharmony_ci * and gets a physical address for each memory location and programs 17278c2ecf20Sopenharmony_ci * it and the length into the transmit descriptor. 17288c2ecf20Sopenharmony_ci */ 17298c2ecf20Sopenharmony_cistatic void 17308c2ecf20Sopenharmony_ciice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, 17318c2ecf20Sopenharmony_ci struct ice_tx_offload_params *off) 17328c2ecf20Sopenharmony_ci{ 17338c2ecf20Sopenharmony_ci u64 td_offset, td_tag, td_cmd; 17348c2ecf20Sopenharmony_ci u16 i = tx_ring->next_to_use; 17358c2ecf20Sopenharmony_ci unsigned int data_len, size; 17368c2ecf20Sopenharmony_ci struct ice_tx_desc *tx_desc; 17378c2ecf20Sopenharmony_ci struct ice_tx_buf *tx_buf; 17388c2ecf20Sopenharmony_ci struct sk_buff *skb; 17398c2ecf20Sopenharmony_ci skb_frag_t *frag; 17408c2ecf20Sopenharmony_ci dma_addr_t dma; 17418c2ecf20Sopenharmony_ci 17428c2ecf20Sopenharmony_ci td_tag = off->td_l2tag1; 17438c2ecf20Sopenharmony_ci td_cmd = off->td_cmd; 17448c2ecf20Sopenharmony_ci td_offset = off->td_offset; 17458c2ecf20Sopenharmony_ci skb = first->skb; 17468c2ecf20Sopenharmony_ci 17478c2ecf20Sopenharmony_ci data_len = skb->data_len; 17488c2ecf20Sopenharmony_ci size = skb_headlen(skb); 17498c2ecf20Sopenharmony_ci 17508c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, i); 17518c2ecf20Sopenharmony_ci 17528c2ecf20Sopenharmony_ci if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) { 17538c2ecf20Sopenharmony_ci td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1; 17548c2ecf20Sopenharmony_ci td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >> 17558c2ecf20Sopenharmony_ci ICE_TX_FLAGS_VLAN_S; 17568c2ecf20Sopenharmony_ci } 17578c2ecf20Sopenharmony_ci 17588c2ecf20Sopenharmony_ci dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 17598c2ecf20Sopenharmony_ci 17608c2ecf20Sopenharmony_ci tx_buf = first; 17618c2ecf20Sopenharmony_ci 17628c2ecf20Sopenharmony_ci for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 17638c2ecf20Sopenharmony_ci unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 17648c2ecf20Sopenharmony_ci 17658c2ecf20Sopenharmony_ci if (dma_mapping_error(tx_ring->dev, dma)) 17668c2ecf20Sopenharmony_ci goto dma_error; 17678c2ecf20Sopenharmony_ci 17688c2ecf20Sopenharmony_ci /* record length, and DMA address */ 17698c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, size); 17708c2ecf20Sopenharmony_ci dma_unmap_addr_set(tx_buf, dma, dma); 17718c2ecf20Sopenharmony_ci 17728c2ecf20Sopenharmony_ci /* align size to end of page */ 17738c2ecf20Sopenharmony_ci max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1); 17748c2ecf20Sopenharmony_ci tx_desc->buf_addr = cpu_to_le64(dma); 17758c2ecf20Sopenharmony_ci 17768c2ecf20Sopenharmony_ci /* account for data chunks larger than the hardware 17778c2ecf20Sopenharmony_ci * can handle 17788c2ecf20Sopenharmony_ci */ 17798c2ecf20Sopenharmony_ci while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { 17808c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 17818c2ecf20Sopenharmony_ci ice_build_ctob(td_cmd, td_offset, max_data, 17828c2ecf20Sopenharmony_ci td_tag); 17838c2ecf20Sopenharmony_ci 17848c2ecf20Sopenharmony_ci tx_desc++; 17858c2ecf20Sopenharmony_ci i++; 17868c2ecf20Sopenharmony_ci 17878c2ecf20Sopenharmony_ci if (i == tx_ring->count) { 17888c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, 0); 17898c2ecf20Sopenharmony_ci i = 0; 17908c2ecf20Sopenharmony_ci } 17918c2ecf20Sopenharmony_ci 17928c2ecf20Sopenharmony_ci dma += max_data; 17938c2ecf20Sopenharmony_ci size -= max_data; 17948c2ecf20Sopenharmony_ci 17958c2ecf20Sopenharmony_ci max_data = ICE_MAX_DATA_PER_TXD_ALIGNED; 17968c2ecf20Sopenharmony_ci tx_desc->buf_addr = cpu_to_le64(dma); 17978c2ecf20Sopenharmony_ci } 17988c2ecf20Sopenharmony_ci 17998c2ecf20Sopenharmony_ci if (likely(!data_len)) 18008c2ecf20Sopenharmony_ci break; 18018c2ecf20Sopenharmony_ci 18028c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset, 18038c2ecf20Sopenharmony_ci size, td_tag); 18048c2ecf20Sopenharmony_ci 18058c2ecf20Sopenharmony_ci tx_desc++; 18068c2ecf20Sopenharmony_ci i++; 18078c2ecf20Sopenharmony_ci 18088c2ecf20Sopenharmony_ci if (i == tx_ring->count) { 18098c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, 0); 18108c2ecf20Sopenharmony_ci i = 0; 18118c2ecf20Sopenharmony_ci } 18128c2ecf20Sopenharmony_ci 18138c2ecf20Sopenharmony_ci size = skb_frag_size(frag); 18148c2ecf20Sopenharmony_ci data_len -= size; 18158c2ecf20Sopenharmony_ci 18168c2ecf20Sopenharmony_ci dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 18178c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 18188c2ecf20Sopenharmony_ci 18198c2ecf20Sopenharmony_ci tx_buf = &tx_ring->tx_buf[i]; 18208c2ecf20Sopenharmony_ci } 18218c2ecf20Sopenharmony_ci 18228c2ecf20Sopenharmony_ci /* record bytecount for BQL */ 18238c2ecf20Sopenharmony_ci netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 18248c2ecf20Sopenharmony_ci 18258c2ecf20Sopenharmony_ci /* record SW timestamp if HW timestamp is not available */ 18268c2ecf20Sopenharmony_ci skb_tx_timestamp(first->skb); 18278c2ecf20Sopenharmony_ci 18288c2ecf20Sopenharmony_ci i++; 18298c2ecf20Sopenharmony_ci if (i == tx_ring->count) 18308c2ecf20Sopenharmony_ci i = 0; 18318c2ecf20Sopenharmony_ci 18328c2ecf20Sopenharmony_ci /* write last descriptor with RS and EOP bits */ 18338c2ecf20Sopenharmony_ci td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; 18348c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 18358c2ecf20Sopenharmony_ci ice_build_ctob(td_cmd, td_offset, size, td_tag); 18368c2ecf20Sopenharmony_ci 18378c2ecf20Sopenharmony_ci /* Force memory writes to complete before letting h/w know there 18388c2ecf20Sopenharmony_ci * are new descriptors to fetch. 18398c2ecf20Sopenharmony_ci * 18408c2ecf20Sopenharmony_ci * We also use this memory barrier to make certain all of the 18418c2ecf20Sopenharmony_ci * status bits have been updated before next_to_watch is written. 18428c2ecf20Sopenharmony_ci */ 18438c2ecf20Sopenharmony_ci wmb(); 18448c2ecf20Sopenharmony_ci 18458c2ecf20Sopenharmony_ci /* set next_to_watch value indicating a packet is present */ 18468c2ecf20Sopenharmony_ci first->next_to_watch = tx_desc; 18478c2ecf20Sopenharmony_ci 18488c2ecf20Sopenharmony_ci tx_ring->next_to_use = i; 18498c2ecf20Sopenharmony_ci 18508c2ecf20Sopenharmony_ci ice_maybe_stop_tx(tx_ring, DESC_NEEDED); 18518c2ecf20Sopenharmony_ci 18528c2ecf20Sopenharmony_ci /* notify HW of packet */ 18538c2ecf20Sopenharmony_ci if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) 18548c2ecf20Sopenharmony_ci writel(i, tx_ring->tail); 18558c2ecf20Sopenharmony_ci 18568c2ecf20Sopenharmony_ci return; 18578c2ecf20Sopenharmony_ci 18588c2ecf20Sopenharmony_cidma_error: 18598c2ecf20Sopenharmony_ci /* clear DMA mappings for failed tx_buf map */ 18608c2ecf20Sopenharmony_ci for (;;) { 18618c2ecf20Sopenharmony_ci tx_buf = &tx_ring->tx_buf[i]; 18628c2ecf20Sopenharmony_ci ice_unmap_and_free_tx_buf(tx_ring, tx_buf); 18638c2ecf20Sopenharmony_ci if (tx_buf == first) 18648c2ecf20Sopenharmony_ci break; 18658c2ecf20Sopenharmony_ci if (i == 0) 18668c2ecf20Sopenharmony_ci i = tx_ring->count; 18678c2ecf20Sopenharmony_ci i--; 18688c2ecf20Sopenharmony_ci } 18698c2ecf20Sopenharmony_ci 18708c2ecf20Sopenharmony_ci tx_ring->next_to_use = i; 18718c2ecf20Sopenharmony_ci} 18728c2ecf20Sopenharmony_ci 18738c2ecf20Sopenharmony_ci/** 18748c2ecf20Sopenharmony_ci * ice_tx_csum - Enable Tx checksum offloads 18758c2ecf20Sopenharmony_ci * @first: pointer to the first descriptor 18768c2ecf20Sopenharmony_ci * @off: pointer to struct that holds offload parameters 18778c2ecf20Sopenharmony_ci * 18788c2ecf20Sopenharmony_ci * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise. 18798c2ecf20Sopenharmony_ci */ 18808c2ecf20Sopenharmony_cistatic 18818c2ecf20Sopenharmony_ciint ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 18828c2ecf20Sopenharmony_ci{ 18838c2ecf20Sopenharmony_ci u32 l4_len = 0, l3_len = 0, l2_len = 0; 18848c2ecf20Sopenharmony_ci struct sk_buff *skb = first->skb; 18858c2ecf20Sopenharmony_ci union { 18868c2ecf20Sopenharmony_ci struct iphdr *v4; 18878c2ecf20Sopenharmony_ci struct ipv6hdr *v6; 18888c2ecf20Sopenharmony_ci unsigned char *hdr; 18898c2ecf20Sopenharmony_ci } ip; 18908c2ecf20Sopenharmony_ci union { 18918c2ecf20Sopenharmony_ci struct tcphdr *tcp; 18928c2ecf20Sopenharmony_ci unsigned char *hdr; 18938c2ecf20Sopenharmony_ci } l4; 18948c2ecf20Sopenharmony_ci __be16 frag_off, protocol; 18958c2ecf20Sopenharmony_ci unsigned char *exthdr; 18968c2ecf20Sopenharmony_ci u32 offset, cmd = 0; 18978c2ecf20Sopenharmony_ci u8 l4_proto = 0; 18988c2ecf20Sopenharmony_ci 18998c2ecf20Sopenharmony_ci if (skb->ip_summed != CHECKSUM_PARTIAL) 19008c2ecf20Sopenharmony_ci return 0; 19018c2ecf20Sopenharmony_ci 19028c2ecf20Sopenharmony_ci ip.hdr = skb_network_header(skb); 19038c2ecf20Sopenharmony_ci l4.hdr = skb_transport_header(skb); 19048c2ecf20Sopenharmony_ci 19058c2ecf20Sopenharmony_ci /* compute outer L2 header size */ 19068c2ecf20Sopenharmony_ci l2_len = ip.hdr - skb->data; 19078c2ecf20Sopenharmony_ci offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; 19088c2ecf20Sopenharmony_ci 19098c2ecf20Sopenharmony_ci protocol = vlan_get_protocol(skb); 19108c2ecf20Sopenharmony_ci 19118c2ecf20Sopenharmony_ci if (protocol == htons(ETH_P_IP)) 19128c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_IPV4; 19138c2ecf20Sopenharmony_ci else if (protocol == htons(ETH_P_IPV6)) 19148c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_IPV6; 19158c2ecf20Sopenharmony_ci 19168c2ecf20Sopenharmony_ci if (skb->encapsulation) { 19178c2ecf20Sopenharmony_ci bool gso_ena = false; 19188c2ecf20Sopenharmony_ci u32 tunnel = 0; 19198c2ecf20Sopenharmony_ci 19208c2ecf20Sopenharmony_ci /* define outer network header type */ 19218c2ecf20Sopenharmony_ci if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 19228c2ecf20Sopenharmony_ci tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ? 19238c2ecf20Sopenharmony_ci ICE_TX_CTX_EIPT_IPV4 : 19248c2ecf20Sopenharmony_ci ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 19258c2ecf20Sopenharmony_ci l4_proto = ip.v4->protocol; 19268c2ecf20Sopenharmony_ci } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 19278c2ecf20Sopenharmony_ci int ret; 19288c2ecf20Sopenharmony_ci 19298c2ecf20Sopenharmony_ci tunnel |= ICE_TX_CTX_EIPT_IPV6; 19308c2ecf20Sopenharmony_ci exthdr = ip.hdr + sizeof(*ip.v6); 19318c2ecf20Sopenharmony_ci l4_proto = ip.v6->nexthdr; 19328c2ecf20Sopenharmony_ci ret = ipv6_skip_exthdr(skb, exthdr - skb->data, 19338c2ecf20Sopenharmony_ci &l4_proto, &frag_off); 19348c2ecf20Sopenharmony_ci if (ret < 0) 19358c2ecf20Sopenharmony_ci return -1; 19368c2ecf20Sopenharmony_ci } 19378c2ecf20Sopenharmony_ci 19388c2ecf20Sopenharmony_ci /* define outer transport */ 19398c2ecf20Sopenharmony_ci switch (l4_proto) { 19408c2ecf20Sopenharmony_ci case IPPROTO_UDP: 19418c2ecf20Sopenharmony_ci tunnel |= ICE_TXD_CTX_UDP_TUNNELING; 19428c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 19438c2ecf20Sopenharmony_ci break; 19448c2ecf20Sopenharmony_ci case IPPROTO_GRE: 19458c2ecf20Sopenharmony_ci tunnel |= ICE_TXD_CTX_GRE_TUNNELING; 19468c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 19478c2ecf20Sopenharmony_ci break; 19488c2ecf20Sopenharmony_ci case IPPROTO_IPIP: 19498c2ecf20Sopenharmony_ci case IPPROTO_IPV6: 19508c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_TUNNEL; 19518c2ecf20Sopenharmony_ci l4.hdr = skb_inner_network_header(skb); 19528c2ecf20Sopenharmony_ci break; 19538c2ecf20Sopenharmony_ci default: 19548c2ecf20Sopenharmony_ci if (first->tx_flags & ICE_TX_FLAGS_TSO) 19558c2ecf20Sopenharmony_ci return -1; 19568c2ecf20Sopenharmony_ci 19578c2ecf20Sopenharmony_ci skb_checksum_help(skb); 19588c2ecf20Sopenharmony_ci return 0; 19598c2ecf20Sopenharmony_ci } 19608c2ecf20Sopenharmony_ci 19618c2ecf20Sopenharmony_ci /* compute outer L3 header size */ 19628c2ecf20Sopenharmony_ci tunnel |= ((l4.hdr - ip.hdr) / 4) << 19638c2ecf20Sopenharmony_ci ICE_TXD_CTX_QW0_EIPLEN_S; 19648c2ecf20Sopenharmony_ci 19658c2ecf20Sopenharmony_ci /* switch IP header pointer from outer to inner header */ 19668c2ecf20Sopenharmony_ci ip.hdr = skb_inner_network_header(skb); 19678c2ecf20Sopenharmony_ci 19688c2ecf20Sopenharmony_ci /* compute tunnel header size */ 19698c2ecf20Sopenharmony_ci tunnel |= ((ip.hdr - l4.hdr) / 2) << 19708c2ecf20Sopenharmony_ci ICE_TXD_CTX_QW0_NATLEN_S; 19718c2ecf20Sopenharmony_ci 19728c2ecf20Sopenharmony_ci gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; 19738c2ecf20Sopenharmony_ci /* indicate if we need to offload outer UDP header */ 19748c2ecf20Sopenharmony_ci if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena && 19758c2ecf20Sopenharmony_ci (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 19768c2ecf20Sopenharmony_ci tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M; 19778c2ecf20Sopenharmony_ci 19788c2ecf20Sopenharmony_ci /* record tunnel offload values */ 19798c2ecf20Sopenharmony_ci off->cd_tunnel_params |= tunnel; 19808c2ecf20Sopenharmony_ci 19818c2ecf20Sopenharmony_ci /* set DTYP=1 to indicate that it's an Tx context descriptor 19828c2ecf20Sopenharmony_ci * in IPsec tunnel mode with Tx offloads in Quad word 1 19838c2ecf20Sopenharmony_ci */ 19848c2ecf20Sopenharmony_ci off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX; 19858c2ecf20Sopenharmony_ci 19868c2ecf20Sopenharmony_ci /* switch L4 header pointer from outer to inner */ 19878c2ecf20Sopenharmony_ci l4.hdr = skb_inner_transport_header(skb); 19888c2ecf20Sopenharmony_ci l4_proto = 0; 19898c2ecf20Sopenharmony_ci 19908c2ecf20Sopenharmony_ci /* reset type as we transition from outer to inner headers */ 19918c2ecf20Sopenharmony_ci first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); 19928c2ecf20Sopenharmony_ci if (ip.v4->version == 4) 19938c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_IPV4; 19948c2ecf20Sopenharmony_ci if (ip.v6->version == 6) 19958c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_IPV6; 19968c2ecf20Sopenharmony_ci } 19978c2ecf20Sopenharmony_ci 19988c2ecf20Sopenharmony_ci /* Enable IP checksum offloads */ 19998c2ecf20Sopenharmony_ci if (first->tx_flags & ICE_TX_FLAGS_IPV4) { 20008c2ecf20Sopenharmony_ci l4_proto = ip.v4->protocol; 20018c2ecf20Sopenharmony_ci /* the stack computes the IP header already, the only time we 20028c2ecf20Sopenharmony_ci * need the hardware to recompute it is in the case of TSO. 20038c2ecf20Sopenharmony_ci */ 20048c2ecf20Sopenharmony_ci if (first->tx_flags & ICE_TX_FLAGS_TSO) 20058c2ecf20Sopenharmony_ci cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 20068c2ecf20Sopenharmony_ci else 20078c2ecf20Sopenharmony_ci cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 20088c2ecf20Sopenharmony_ci 20098c2ecf20Sopenharmony_ci } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { 20108c2ecf20Sopenharmony_ci cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 20118c2ecf20Sopenharmony_ci exthdr = ip.hdr + sizeof(*ip.v6); 20128c2ecf20Sopenharmony_ci l4_proto = ip.v6->nexthdr; 20138c2ecf20Sopenharmony_ci if (l4.hdr != exthdr) 20148c2ecf20Sopenharmony_ci ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, 20158c2ecf20Sopenharmony_ci &frag_off); 20168c2ecf20Sopenharmony_ci } else { 20178c2ecf20Sopenharmony_ci return -1; 20188c2ecf20Sopenharmony_ci } 20198c2ecf20Sopenharmony_ci 20208c2ecf20Sopenharmony_ci /* compute inner L3 header size */ 20218c2ecf20Sopenharmony_ci l3_len = l4.hdr - ip.hdr; 20228c2ecf20Sopenharmony_ci offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; 20238c2ecf20Sopenharmony_ci 20248c2ecf20Sopenharmony_ci /* Enable L4 checksum offloads */ 20258c2ecf20Sopenharmony_ci switch (l4_proto) { 20268c2ecf20Sopenharmony_ci case IPPROTO_TCP: 20278c2ecf20Sopenharmony_ci /* enable checksum offloads */ 20288c2ecf20Sopenharmony_ci cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 20298c2ecf20Sopenharmony_ci l4_len = l4.tcp->doff; 20308c2ecf20Sopenharmony_ci offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 20318c2ecf20Sopenharmony_ci break; 20328c2ecf20Sopenharmony_ci case IPPROTO_UDP: 20338c2ecf20Sopenharmony_ci /* enable UDP checksum offload */ 20348c2ecf20Sopenharmony_ci cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 20358c2ecf20Sopenharmony_ci l4_len = (sizeof(struct udphdr) >> 2); 20368c2ecf20Sopenharmony_ci offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 20378c2ecf20Sopenharmony_ci break; 20388c2ecf20Sopenharmony_ci case IPPROTO_SCTP: 20398c2ecf20Sopenharmony_ci /* enable SCTP checksum offload */ 20408c2ecf20Sopenharmony_ci cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 20418c2ecf20Sopenharmony_ci l4_len = sizeof(struct sctphdr) >> 2; 20428c2ecf20Sopenharmony_ci offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; 20438c2ecf20Sopenharmony_ci break; 20448c2ecf20Sopenharmony_ci 20458c2ecf20Sopenharmony_ci default: 20468c2ecf20Sopenharmony_ci if (first->tx_flags & ICE_TX_FLAGS_TSO) 20478c2ecf20Sopenharmony_ci return -1; 20488c2ecf20Sopenharmony_ci skb_checksum_help(skb); 20498c2ecf20Sopenharmony_ci return 0; 20508c2ecf20Sopenharmony_ci } 20518c2ecf20Sopenharmony_ci 20528c2ecf20Sopenharmony_ci off->td_cmd |= cmd; 20538c2ecf20Sopenharmony_ci off->td_offset |= offset; 20548c2ecf20Sopenharmony_ci return 1; 20558c2ecf20Sopenharmony_ci} 20568c2ecf20Sopenharmony_ci 20578c2ecf20Sopenharmony_ci/** 20588c2ecf20Sopenharmony_ci * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW 20598c2ecf20Sopenharmony_ci * @tx_ring: ring to send buffer on 20608c2ecf20Sopenharmony_ci * @first: pointer to struct ice_tx_buf 20618c2ecf20Sopenharmony_ci * 20628c2ecf20Sopenharmony_ci * Checks the skb and set up correspondingly several generic transmit flags 20638c2ecf20Sopenharmony_ci * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 20648c2ecf20Sopenharmony_ci */ 20658c2ecf20Sopenharmony_cistatic void 20668c2ecf20Sopenharmony_ciice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first) 20678c2ecf20Sopenharmony_ci{ 20688c2ecf20Sopenharmony_ci struct sk_buff *skb = first->skb; 20698c2ecf20Sopenharmony_ci 20708c2ecf20Sopenharmony_ci /* nothing left to do, software offloaded VLAN */ 20718c2ecf20Sopenharmony_ci if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) 20728c2ecf20Sopenharmony_ci return; 20738c2ecf20Sopenharmony_ci 20748c2ecf20Sopenharmony_ci /* currently, we always assume 802.1Q for VLAN insertion as VLAN 20758c2ecf20Sopenharmony_ci * insertion for 802.1AD is not supported 20768c2ecf20Sopenharmony_ci */ 20778c2ecf20Sopenharmony_ci if (skb_vlan_tag_present(skb)) { 20788c2ecf20Sopenharmony_ci first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S; 20798c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_HW_VLAN; 20808c2ecf20Sopenharmony_ci } 20818c2ecf20Sopenharmony_ci 20828c2ecf20Sopenharmony_ci ice_tx_prepare_vlan_flags_dcb(tx_ring, first); 20838c2ecf20Sopenharmony_ci} 20848c2ecf20Sopenharmony_ci 20858c2ecf20Sopenharmony_ci/** 20868c2ecf20Sopenharmony_ci * ice_tso - computes mss and TSO length to prepare for TSO 20878c2ecf20Sopenharmony_ci * @first: pointer to struct ice_tx_buf 20888c2ecf20Sopenharmony_ci * @off: pointer to struct that holds offload parameters 20898c2ecf20Sopenharmony_ci * 20908c2ecf20Sopenharmony_ci * Returns 0 or error (negative) if TSO can't happen, 1 otherwise. 20918c2ecf20Sopenharmony_ci */ 20928c2ecf20Sopenharmony_cistatic 20938c2ecf20Sopenharmony_ciint ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) 20948c2ecf20Sopenharmony_ci{ 20958c2ecf20Sopenharmony_ci struct sk_buff *skb = first->skb; 20968c2ecf20Sopenharmony_ci union { 20978c2ecf20Sopenharmony_ci struct iphdr *v4; 20988c2ecf20Sopenharmony_ci struct ipv6hdr *v6; 20998c2ecf20Sopenharmony_ci unsigned char *hdr; 21008c2ecf20Sopenharmony_ci } ip; 21018c2ecf20Sopenharmony_ci union { 21028c2ecf20Sopenharmony_ci struct tcphdr *tcp; 21038c2ecf20Sopenharmony_ci struct udphdr *udp; 21048c2ecf20Sopenharmony_ci unsigned char *hdr; 21058c2ecf20Sopenharmony_ci } l4; 21068c2ecf20Sopenharmony_ci u64 cd_mss, cd_tso_len; 21078c2ecf20Sopenharmony_ci u32 paylen; 21088c2ecf20Sopenharmony_ci u8 l4_start; 21098c2ecf20Sopenharmony_ci int err; 21108c2ecf20Sopenharmony_ci 21118c2ecf20Sopenharmony_ci if (skb->ip_summed != CHECKSUM_PARTIAL) 21128c2ecf20Sopenharmony_ci return 0; 21138c2ecf20Sopenharmony_ci 21148c2ecf20Sopenharmony_ci if (!skb_is_gso(skb)) 21158c2ecf20Sopenharmony_ci return 0; 21168c2ecf20Sopenharmony_ci 21178c2ecf20Sopenharmony_ci err = skb_cow_head(skb, 0); 21188c2ecf20Sopenharmony_ci if (err < 0) 21198c2ecf20Sopenharmony_ci return err; 21208c2ecf20Sopenharmony_ci 21218c2ecf20Sopenharmony_ci /* cppcheck-suppress unreadVariable */ 21228c2ecf20Sopenharmony_ci ip.hdr = skb_network_header(skb); 21238c2ecf20Sopenharmony_ci l4.hdr = skb_transport_header(skb); 21248c2ecf20Sopenharmony_ci 21258c2ecf20Sopenharmony_ci /* initialize outer IP header fields */ 21268c2ecf20Sopenharmony_ci if (ip.v4->version == 4) { 21278c2ecf20Sopenharmony_ci ip.v4->tot_len = 0; 21288c2ecf20Sopenharmony_ci ip.v4->check = 0; 21298c2ecf20Sopenharmony_ci } else { 21308c2ecf20Sopenharmony_ci ip.v6->payload_len = 0; 21318c2ecf20Sopenharmony_ci } 21328c2ecf20Sopenharmony_ci 21338c2ecf20Sopenharmony_ci if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 21348c2ecf20Sopenharmony_ci SKB_GSO_GRE_CSUM | 21358c2ecf20Sopenharmony_ci SKB_GSO_IPXIP4 | 21368c2ecf20Sopenharmony_ci SKB_GSO_IPXIP6 | 21378c2ecf20Sopenharmony_ci SKB_GSO_UDP_TUNNEL | 21388c2ecf20Sopenharmony_ci SKB_GSO_UDP_TUNNEL_CSUM)) { 21398c2ecf20Sopenharmony_ci if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 21408c2ecf20Sopenharmony_ci (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 21418c2ecf20Sopenharmony_ci l4.udp->len = 0; 21428c2ecf20Sopenharmony_ci 21438c2ecf20Sopenharmony_ci /* determine offset of outer transport header */ 21448c2ecf20Sopenharmony_ci l4_start = (u8)(l4.hdr - skb->data); 21458c2ecf20Sopenharmony_ci 21468c2ecf20Sopenharmony_ci /* remove payload length from outer checksum */ 21478c2ecf20Sopenharmony_ci paylen = skb->len - l4_start; 21488c2ecf20Sopenharmony_ci csum_replace_by_diff(&l4.udp->check, 21498c2ecf20Sopenharmony_ci (__force __wsum)htonl(paylen)); 21508c2ecf20Sopenharmony_ci } 21518c2ecf20Sopenharmony_ci 21528c2ecf20Sopenharmony_ci /* reset pointers to inner headers */ 21538c2ecf20Sopenharmony_ci 21548c2ecf20Sopenharmony_ci /* cppcheck-suppress unreadVariable */ 21558c2ecf20Sopenharmony_ci ip.hdr = skb_inner_network_header(skb); 21568c2ecf20Sopenharmony_ci l4.hdr = skb_inner_transport_header(skb); 21578c2ecf20Sopenharmony_ci 21588c2ecf20Sopenharmony_ci /* initialize inner IP header fields */ 21598c2ecf20Sopenharmony_ci if (ip.v4->version == 4) { 21608c2ecf20Sopenharmony_ci ip.v4->tot_len = 0; 21618c2ecf20Sopenharmony_ci ip.v4->check = 0; 21628c2ecf20Sopenharmony_ci } else { 21638c2ecf20Sopenharmony_ci ip.v6->payload_len = 0; 21648c2ecf20Sopenharmony_ci } 21658c2ecf20Sopenharmony_ci } 21668c2ecf20Sopenharmony_ci 21678c2ecf20Sopenharmony_ci /* determine offset of transport header */ 21688c2ecf20Sopenharmony_ci l4_start = (u8)(l4.hdr - skb->data); 21698c2ecf20Sopenharmony_ci 21708c2ecf20Sopenharmony_ci /* remove payload length from checksum */ 21718c2ecf20Sopenharmony_ci paylen = skb->len - l4_start; 21728c2ecf20Sopenharmony_ci 21738c2ecf20Sopenharmony_ci if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 21748c2ecf20Sopenharmony_ci csum_replace_by_diff(&l4.udp->check, 21758c2ecf20Sopenharmony_ci (__force __wsum)htonl(paylen)); 21768c2ecf20Sopenharmony_ci /* compute length of UDP segmentation header */ 21778c2ecf20Sopenharmony_ci off->header_len = (u8)sizeof(l4.udp) + l4_start; 21788c2ecf20Sopenharmony_ci } else { 21798c2ecf20Sopenharmony_ci csum_replace_by_diff(&l4.tcp->check, 21808c2ecf20Sopenharmony_ci (__force __wsum)htonl(paylen)); 21818c2ecf20Sopenharmony_ci /* compute length of TCP segmentation header */ 21828c2ecf20Sopenharmony_ci off->header_len = (u8)((l4.tcp->doff * 4) + l4_start); 21838c2ecf20Sopenharmony_ci } 21848c2ecf20Sopenharmony_ci 21858c2ecf20Sopenharmony_ci /* update gso_segs and bytecount */ 21868c2ecf20Sopenharmony_ci first->gso_segs = skb_shinfo(skb)->gso_segs; 21878c2ecf20Sopenharmony_ci first->bytecount += (first->gso_segs - 1) * off->header_len; 21888c2ecf20Sopenharmony_ci 21898c2ecf20Sopenharmony_ci cd_tso_len = skb->len - off->header_len; 21908c2ecf20Sopenharmony_ci cd_mss = skb_shinfo(skb)->gso_size; 21918c2ecf20Sopenharmony_ci 21928c2ecf20Sopenharmony_ci /* record cdesc_qw1 with TSO parameters */ 21938c2ecf20Sopenharmony_ci off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 21948c2ecf20Sopenharmony_ci (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) | 21958c2ecf20Sopenharmony_ci (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 21968c2ecf20Sopenharmony_ci (cd_mss << ICE_TXD_CTX_QW1_MSS_S)); 21978c2ecf20Sopenharmony_ci first->tx_flags |= ICE_TX_FLAGS_TSO; 21988c2ecf20Sopenharmony_ci return 1; 21998c2ecf20Sopenharmony_ci} 22008c2ecf20Sopenharmony_ci 22018c2ecf20Sopenharmony_ci/** 22028c2ecf20Sopenharmony_ci * ice_txd_use_count - estimate the number of descriptors needed for Tx 22038c2ecf20Sopenharmony_ci * @size: transmit request size in bytes 22048c2ecf20Sopenharmony_ci * 22058c2ecf20Sopenharmony_ci * Due to hardware alignment restrictions (4K alignment), we need to 22068c2ecf20Sopenharmony_ci * assume that we can have no more than 12K of data per descriptor, even 22078c2ecf20Sopenharmony_ci * though each descriptor can take up to 16K - 1 bytes of aligned memory. 22088c2ecf20Sopenharmony_ci * Thus, we need to divide by 12K. But division is slow! Instead, 22098c2ecf20Sopenharmony_ci * we decompose the operation into shifts and one relatively cheap 22108c2ecf20Sopenharmony_ci * multiply operation. 22118c2ecf20Sopenharmony_ci * 22128c2ecf20Sopenharmony_ci * To divide by 12K, we first divide by 4K, then divide by 3: 22138c2ecf20Sopenharmony_ci * To divide by 4K, shift right by 12 bits 22148c2ecf20Sopenharmony_ci * To divide by 3, multiply by 85, then divide by 256 22158c2ecf20Sopenharmony_ci * (Divide by 256 is done by shifting right by 8 bits) 22168c2ecf20Sopenharmony_ci * Finally, we add one to round up. Because 256 isn't an exact multiple of 22178c2ecf20Sopenharmony_ci * 3, we'll underestimate near each multiple of 12K. This is actually more 22188c2ecf20Sopenharmony_ci * accurate as we have 4K - 1 of wiggle room that we can fit into the last 22198c2ecf20Sopenharmony_ci * segment. For our purposes this is accurate out to 1M which is orders of 22208c2ecf20Sopenharmony_ci * magnitude greater than our largest possible GSO size. 22218c2ecf20Sopenharmony_ci * 22228c2ecf20Sopenharmony_ci * This would then be implemented as: 22238c2ecf20Sopenharmony_ci * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; 22248c2ecf20Sopenharmony_ci * 22258c2ecf20Sopenharmony_ci * Since multiplication and division are commutative, we can reorder 22268c2ecf20Sopenharmony_ci * operations into: 22278c2ecf20Sopenharmony_ci * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 22288c2ecf20Sopenharmony_ci */ 22298c2ecf20Sopenharmony_cistatic unsigned int ice_txd_use_count(unsigned int size) 22308c2ecf20Sopenharmony_ci{ 22318c2ecf20Sopenharmony_ci return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR; 22328c2ecf20Sopenharmony_ci} 22338c2ecf20Sopenharmony_ci 22348c2ecf20Sopenharmony_ci/** 22358c2ecf20Sopenharmony_ci * ice_xmit_desc_count - calculate number of Tx descriptors needed 22368c2ecf20Sopenharmony_ci * @skb: send buffer 22378c2ecf20Sopenharmony_ci * 22388c2ecf20Sopenharmony_ci * Returns number of data descriptors needed for this skb. 22398c2ecf20Sopenharmony_ci */ 22408c2ecf20Sopenharmony_cistatic unsigned int ice_xmit_desc_count(struct sk_buff *skb) 22418c2ecf20Sopenharmony_ci{ 22428c2ecf20Sopenharmony_ci const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 22438c2ecf20Sopenharmony_ci unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 22448c2ecf20Sopenharmony_ci unsigned int count = 0, size = skb_headlen(skb); 22458c2ecf20Sopenharmony_ci 22468c2ecf20Sopenharmony_ci for (;;) { 22478c2ecf20Sopenharmony_ci count += ice_txd_use_count(size); 22488c2ecf20Sopenharmony_ci 22498c2ecf20Sopenharmony_ci if (!nr_frags--) 22508c2ecf20Sopenharmony_ci break; 22518c2ecf20Sopenharmony_ci 22528c2ecf20Sopenharmony_ci size = skb_frag_size(frag++); 22538c2ecf20Sopenharmony_ci } 22548c2ecf20Sopenharmony_ci 22558c2ecf20Sopenharmony_ci return count; 22568c2ecf20Sopenharmony_ci} 22578c2ecf20Sopenharmony_ci 22588c2ecf20Sopenharmony_ci/** 22598c2ecf20Sopenharmony_ci * __ice_chk_linearize - Check if there are more than 8 buffers per packet 22608c2ecf20Sopenharmony_ci * @skb: send buffer 22618c2ecf20Sopenharmony_ci * 22628c2ecf20Sopenharmony_ci * Note: This HW can't DMA more than 8 buffers to build a packet on the wire 22638c2ecf20Sopenharmony_ci * and so we need to figure out the cases where we need to linearize the skb. 22648c2ecf20Sopenharmony_ci * 22658c2ecf20Sopenharmony_ci * For TSO we need to count the TSO header and segment payload separately. 22668c2ecf20Sopenharmony_ci * As such we need to check cases where we have 7 fragments or more as we 22678c2ecf20Sopenharmony_ci * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 22688c2ecf20Sopenharmony_ci * the segment payload in the first descriptor, and another 7 for the 22698c2ecf20Sopenharmony_ci * fragments. 22708c2ecf20Sopenharmony_ci */ 22718c2ecf20Sopenharmony_cistatic bool __ice_chk_linearize(struct sk_buff *skb) 22728c2ecf20Sopenharmony_ci{ 22738c2ecf20Sopenharmony_ci const skb_frag_t *frag, *stale; 22748c2ecf20Sopenharmony_ci int nr_frags, sum; 22758c2ecf20Sopenharmony_ci 22768c2ecf20Sopenharmony_ci /* no need to check if number of frags is less than 7 */ 22778c2ecf20Sopenharmony_ci nr_frags = skb_shinfo(skb)->nr_frags; 22788c2ecf20Sopenharmony_ci if (nr_frags < (ICE_MAX_BUF_TXD - 1)) 22798c2ecf20Sopenharmony_ci return false; 22808c2ecf20Sopenharmony_ci 22818c2ecf20Sopenharmony_ci /* We need to walk through the list and validate that each group 22828c2ecf20Sopenharmony_ci * of 6 fragments totals at least gso_size. 22838c2ecf20Sopenharmony_ci */ 22848c2ecf20Sopenharmony_ci nr_frags -= ICE_MAX_BUF_TXD - 2; 22858c2ecf20Sopenharmony_ci frag = &skb_shinfo(skb)->frags[0]; 22868c2ecf20Sopenharmony_ci 22878c2ecf20Sopenharmony_ci /* Initialize size to the negative value of gso_size minus 1. We 22888c2ecf20Sopenharmony_ci * use this as the worst case scenario in which the frag ahead 22898c2ecf20Sopenharmony_ci * of us only provides one byte which is why we are limited to 6 22908c2ecf20Sopenharmony_ci * descriptors for a single transmit as the header and previous 22918c2ecf20Sopenharmony_ci * fragment are already consuming 2 descriptors. 22928c2ecf20Sopenharmony_ci */ 22938c2ecf20Sopenharmony_ci sum = 1 - skb_shinfo(skb)->gso_size; 22948c2ecf20Sopenharmony_ci 22958c2ecf20Sopenharmony_ci /* Add size of frags 0 through 4 to create our initial sum */ 22968c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 22978c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 22988c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 22998c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 23008c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 23018c2ecf20Sopenharmony_ci 23028c2ecf20Sopenharmony_ci /* Walk through fragments adding latest fragment, testing it, and 23038c2ecf20Sopenharmony_ci * then removing stale fragments from the sum. 23048c2ecf20Sopenharmony_ci */ 23058c2ecf20Sopenharmony_ci for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 23068c2ecf20Sopenharmony_ci int stale_size = skb_frag_size(stale); 23078c2ecf20Sopenharmony_ci 23088c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 23098c2ecf20Sopenharmony_ci 23108c2ecf20Sopenharmony_ci /* The stale fragment may present us with a smaller 23118c2ecf20Sopenharmony_ci * descriptor than the actual fragment size. To account 23128c2ecf20Sopenharmony_ci * for that we need to remove all the data on the front and 23138c2ecf20Sopenharmony_ci * figure out what the remainder would be in the last 23148c2ecf20Sopenharmony_ci * descriptor associated with the fragment. 23158c2ecf20Sopenharmony_ci */ 23168c2ecf20Sopenharmony_ci if (stale_size > ICE_MAX_DATA_PER_TXD) { 23178c2ecf20Sopenharmony_ci int align_pad = -(skb_frag_off(stale)) & 23188c2ecf20Sopenharmony_ci (ICE_MAX_READ_REQ_SIZE - 1); 23198c2ecf20Sopenharmony_ci 23208c2ecf20Sopenharmony_ci sum -= align_pad; 23218c2ecf20Sopenharmony_ci stale_size -= align_pad; 23228c2ecf20Sopenharmony_ci 23238c2ecf20Sopenharmony_ci do { 23248c2ecf20Sopenharmony_ci sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; 23258c2ecf20Sopenharmony_ci stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; 23268c2ecf20Sopenharmony_ci } while (stale_size > ICE_MAX_DATA_PER_TXD); 23278c2ecf20Sopenharmony_ci } 23288c2ecf20Sopenharmony_ci 23298c2ecf20Sopenharmony_ci /* if sum is negative we failed to make sufficient progress */ 23308c2ecf20Sopenharmony_ci if (sum < 0) 23318c2ecf20Sopenharmony_ci return true; 23328c2ecf20Sopenharmony_ci 23338c2ecf20Sopenharmony_ci if (!nr_frags--) 23348c2ecf20Sopenharmony_ci break; 23358c2ecf20Sopenharmony_ci 23368c2ecf20Sopenharmony_ci sum -= stale_size; 23378c2ecf20Sopenharmony_ci } 23388c2ecf20Sopenharmony_ci 23398c2ecf20Sopenharmony_ci return false; 23408c2ecf20Sopenharmony_ci} 23418c2ecf20Sopenharmony_ci 23428c2ecf20Sopenharmony_ci/** 23438c2ecf20Sopenharmony_ci * ice_chk_linearize - Check if there are more than 8 fragments per packet 23448c2ecf20Sopenharmony_ci * @skb: send buffer 23458c2ecf20Sopenharmony_ci * @count: number of buffers used 23468c2ecf20Sopenharmony_ci * 23478c2ecf20Sopenharmony_ci * Note: Our HW can't scatter-gather more than 8 fragments to build 23488c2ecf20Sopenharmony_ci * a packet on the wire and so we need to figure out the cases where we 23498c2ecf20Sopenharmony_ci * need to linearize the skb. 23508c2ecf20Sopenharmony_ci */ 23518c2ecf20Sopenharmony_cistatic bool ice_chk_linearize(struct sk_buff *skb, unsigned int count) 23528c2ecf20Sopenharmony_ci{ 23538c2ecf20Sopenharmony_ci /* Both TSO and single send will work if count is less than 8 */ 23548c2ecf20Sopenharmony_ci if (likely(count < ICE_MAX_BUF_TXD)) 23558c2ecf20Sopenharmony_ci return false; 23568c2ecf20Sopenharmony_ci 23578c2ecf20Sopenharmony_ci if (skb_is_gso(skb)) 23588c2ecf20Sopenharmony_ci return __ice_chk_linearize(skb); 23598c2ecf20Sopenharmony_ci 23608c2ecf20Sopenharmony_ci /* we can support up to 8 data buffers for a single send */ 23618c2ecf20Sopenharmony_ci return count != ICE_MAX_BUF_TXD; 23628c2ecf20Sopenharmony_ci} 23638c2ecf20Sopenharmony_ci 23648c2ecf20Sopenharmony_ci/** 23658c2ecf20Sopenharmony_ci * ice_xmit_frame_ring - Sends buffer on Tx ring 23668c2ecf20Sopenharmony_ci * @skb: send buffer 23678c2ecf20Sopenharmony_ci * @tx_ring: ring to send buffer on 23688c2ecf20Sopenharmony_ci * 23698c2ecf20Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code 23708c2ecf20Sopenharmony_ci */ 23718c2ecf20Sopenharmony_cistatic netdev_tx_t 23728c2ecf20Sopenharmony_ciice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) 23738c2ecf20Sopenharmony_ci{ 23748c2ecf20Sopenharmony_ci struct ice_tx_offload_params offload = { 0 }; 23758c2ecf20Sopenharmony_ci struct ice_vsi *vsi = tx_ring->vsi; 23768c2ecf20Sopenharmony_ci struct ice_tx_buf *first; 23778c2ecf20Sopenharmony_ci struct ethhdr *eth; 23788c2ecf20Sopenharmony_ci unsigned int count; 23798c2ecf20Sopenharmony_ci int tso, csum; 23808c2ecf20Sopenharmony_ci 23818c2ecf20Sopenharmony_ci count = ice_xmit_desc_count(skb); 23828c2ecf20Sopenharmony_ci if (ice_chk_linearize(skb, count)) { 23838c2ecf20Sopenharmony_ci if (__skb_linearize(skb)) 23848c2ecf20Sopenharmony_ci goto out_drop; 23858c2ecf20Sopenharmony_ci count = ice_txd_use_count(skb->len); 23868c2ecf20Sopenharmony_ci tx_ring->tx_stats.tx_linearize++; 23878c2ecf20Sopenharmony_ci } 23888c2ecf20Sopenharmony_ci 23898c2ecf20Sopenharmony_ci /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD, 23908c2ecf20Sopenharmony_ci * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD, 23918c2ecf20Sopenharmony_ci * + 4 desc gap to avoid the cache line where head is, 23928c2ecf20Sopenharmony_ci * + 1 desc for context descriptor, 23938c2ecf20Sopenharmony_ci * otherwise try next time 23948c2ecf20Sopenharmony_ci */ 23958c2ecf20Sopenharmony_ci if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE + 23968c2ecf20Sopenharmony_ci ICE_DESCS_FOR_CTX_DESC)) { 23978c2ecf20Sopenharmony_ci tx_ring->tx_stats.tx_busy++; 23988c2ecf20Sopenharmony_ci return NETDEV_TX_BUSY; 23998c2ecf20Sopenharmony_ci } 24008c2ecf20Sopenharmony_ci 24018c2ecf20Sopenharmony_ci offload.tx_ring = tx_ring; 24028c2ecf20Sopenharmony_ci 24038c2ecf20Sopenharmony_ci /* record the location of the first descriptor for this packet */ 24048c2ecf20Sopenharmony_ci first = &tx_ring->tx_buf[tx_ring->next_to_use]; 24058c2ecf20Sopenharmony_ci first->skb = skb; 24068c2ecf20Sopenharmony_ci first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); 24078c2ecf20Sopenharmony_ci first->gso_segs = 1; 24088c2ecf20Sopenharmony_ci first->tx_flags = 0; 24098c2ecf20Sopenharmony_ci 24108c2ecf20Sopenharmony_ci /* prepare the VLAN tagging flags for Tx */ 24118c2ecf20Sopenharmony_ci ice_tx_prepare_vlan_flags(tx_ring, first); 24128c2ecf20Sopenharmony_ci 24138c2ecf20Sopenharmony_ci /* set up TSO offload */ 24148c2ecf20Sopenharmony_ci tso = ice_tso(first, &offload); 24158c2ecf20Sopenharmony_ci if (tso < 0) 24168c2ecf20Sopenharmony_ci goto out_drop; 24178c2ecf20Sopenharmony_ci 24188c2ecf20Sopenharmony_ci /* always set up Tx checksum offload */ 24198c2ecf20Sopenharmony_ci csum = ice_tx_csum(first, &offload); 24208c2ecf20Sopenharmony_ci if (csum < 0) 24218c2ecf20Sopenharmony_ci goto out_drop; 24228c2ecf20Sopenharmony_ci 24238c2ecf20Sopenharmony_ci /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ 24248c2ecf20Sopenharmony_ci eth = (struct ethhdr *)skb_mac_header(skb); 24258c2ecf20Sopenharmony_ci if (unlikely((skb->priority == TC_PRIO_CONTROL || 24268c2ecf20Sopenharmony_ci eth->h_proto == htons(ETH_P_LLDP)) && 24278c2ecf20Sopenharmony_ci vsi->type == ICE_VSI_PF && 24288c2ecf20Sopenharmony_ci vsi->port_info->qos_cfg.is_sw_lldp)) 24298c2ecf20Sopenharmony_ci offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | 24308c2ecf20Sopenharmony_ci ICE_TX_CTX_DESC_SWTCH_UPLINK << 24318c2ecf20Sopenharmony_ci ICE_TXD_CTX_QW1_CMD_S); 24328c2ecf20Sopenharmony_ci 24338c2ecf20Sopenharmony_ci if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { 24348c2ecf20Sopenharmony_ci struct ice_tx_ctx_desc *cdesc; 24358c2ecf20Sopenharmony_ci u16 i = tx_ring->next_to_use; 24368c2ecf20Sopenharmony_ci 24378c2ecf20Sopenharmony_ci /* grab the next descriptor */ 24388c2ecf20Sopenharmony_ci cdesc = ICE_TX_CTX_DESC(tx_ring, i); 24398c2ecf20Sopenharmony_ci i++; 24408c2ecf20Sopenharmony_ci tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 24418c2ecf20Sopenharmony_ci 24428c2ecf20Sopenharmony_ci /* setup context descriptor */ 24438c2ecf20Sopenharmony_ci cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); 24448c2ecf20Sopenharmony_ci cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); 24458c2ecf20Sopenharmony_ci cdesc->rsvd = cpu_to_le16(0); 24468c2ecf20Sopenharmony_ci cdesc->qw1 = cpu_to_le64(offload.cd_qw1); 24478c2ecf20Sopenharmony_ci } 24488c2ecf20Sopenharmony_ci 24498c2ecf20Sopenharmony_ci ice_tx_map(tx_ring, first, &offload); 24508c2ecf20Sopenharmony_ci return NETDEV_TX_OK; 24518c2ecf20Sopenharmony_ci 24528c2ecf20Sopenharmony_ciout_drop: 24538c2ecf20Sopenharmony_ci dev_kfree_skb_any(skb); 24548c2ecf20Sopenharmony_ci return NETDEV_TX_OK; 24558c2ecf20Sopenharmony_ci} 24568c2ecf20Sopenharmony_ci 24578c2ecf20Sopenharmony_ci/** 24588c2ecf20Sopenharmony_ci * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer 24598c2ecf20Sopenharmony_ci * @skb: send buffer 24608c2ecf20Sopenharmony_ci * @netdev: network interface device structure 24618c2ecf20Sopenharmony_ci * 24628c2ecf20Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code 24638c2ecf20Sopenharmony_ci */ 24648c2ecf20Sopenharmony_cinetdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) 24658c2ecf20Sopenharmony_ci{ 24668c2ecf20Sopenharmony_ci struct ice_netdev_priv *np = netdev_priv(netdev); 24678c2ecf20Sopenharmony_ci struct ice_vsi *vsi = np->vsi; 24688c2ecf20Sopenharmony_ci struct ice_ring *tx_ring; 24698c2ecf20Sopenharmony_ci 24708c2ecf20Sopenharmony_ci tx_ring = vsi->tx_rings[skb->queue_mapping]; 24718c2ecf20Sopenharmony_ci 24728c2ecf20Sopenharmony_ci /* hardware can't handle really short frames, hardware padding works 24738c2ecf20Sopenharmony_ci * beyond this point 24748c2ecf20Sopenharmony_ci */ 24758c2ecf20Sopenharmony_ci if (skb_put_padto(skb, ICE_MIN_TX_LEN)) 24768c2ecf20Sopenharmony_ci return NETDEV_TX_OK; 24778c2ecf20Sopenharmony_ci 24788c2ecf20Sopenharmony_ci return ice_xmit_frame_ring(skb, tx_ring); 24798c2ecf20Sopenharmony_ci} 24808c2ecf20Sopenharmony_ci 24818c2ecf20Sopenharmony_ci/** 24828c2ecf20Sopenharmony_ci * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue 24838c2ecf20Sopenharmony_ci * @tx_ring: tx_ring to clean 24848c2ecf20Sopenharmony_ci */ 24858c2ecf20Sopenharmony_civoid ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring) 24868c2ecf20Sopenharmony_ci{ 24878c2ecf20Sopenharmony_ci struct ice_vsi *vsi = tx_ring->vsi; 24888c2ecf20Sopenharmony_ci s16 i = tx_ring->next_to_clean; 24898c2ecf20Sopenharmony_ci int budget = ICE_DFLT_IRQ_WORK; 24908c2ecf20Sopenharmony_ci struct ice_tx_desc *tx_desc; 24918c2ecf20Sopenharmony_ci struct ice_tx_buf *tx_buf; 24928c2ecf20Sopenharmony_ci 24938c2ecf20Sopenharmony_ci tx_buf = &tx_ring->tx_buf[i]; 24948c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, i); 24958c2ecf20Sopenharmony_ci i -= tx_ring->count; 24968c2ecf20Sopenharmony_ci 24978c2ecf20Sopenharmony_ci do { 24988c2ecf20Sopenharmony_ci struct ice_tx_desc *eop_desc = tx_buf->next_to_watch; 24998c2ecf20Sopenharmony_ci 25008c2ecf20Sopenharmony_ci /* if next_to_watch is not set then there is no pending work */ 25018c2ecf20Sopenharmony_ci if (!eop_desc) 25028c2ecf20Sopenharmony_ci break; 25038c2ecf20Sopenharmony_ci 25048c2ecf20Sopenharmony_ci /* prevent any other reads prior to eop_desc */ 25058c2ecf20Sopenharmony_ci smp_rmb(); 25068c2ecf20Sopenharmony_ci 25078c2ecf20Sopenharmony_ci /* if the descriptor isn't done, no work to do */ 25088c2ecf20Sopenharmony_ci if (!(eop_desc->cmd_type_offset_bsz & 25098c2ecf20Sopenharmony_ci cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 25108c2ecf20Sopenharmony_ci break; 25118c2ecf20Sopenharmony_ci 25128c2ecf20Sopenharmony_ci /* clear next_to_watch to prevent false hangs */ 25138c2ecf20Sopenharmony_ci tx_buf->next_to_watch = NULL; 25148c2ecf20Sopenharmony_ci tx_desc->buf_addr = 0; 25158c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 0; 25168c2ecf20Sopenharmony_ci 25178c2ecf20Sopenharmony_ci /* move past filter desc */ 25188c2ecf20Sopenharmony_ci tx_buf++; 25198c2ecf20Sopenharmony_ci tx_desc++; 25208c2ecf20Sopenharmony_ci i++; 25218c2ecf20Sopenharmony_ci if (unlikely(!i)) { 25228c2ecf20Sopenharmony_ci i -= tx_ring->count; 25238c2ecf20Sopenharmony_ci tx_buf = tx_ring->tx_buf; 25248c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, 0); 25258c2ecf20Sopenharmony_ci } 25268c2ecf20Sopenharmony_ci 25278c2ecf20Sopenharmony_ci /* unmap the data header */ 25288c2ecf20Sopenharmony_ci if (dma_unmap_len(tx_buf, len)) 25298c2ecf20Sopenharmony_ci dma_unmap_single(tx_ring->dev, 25308c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 25318c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), 25328c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 25338c2ecf20Sopenharmony_ci if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT) 25348c2ecf20Sopenharmony_ci devm_kfree(tx_ring->dev, tx_buf->raw_buf); 25358c2ecf20Sopenharmony_ci 25368c2ecf20Sopenharmony_ci /* clear next_to_watch to prevent false hangs */ 25378c2ecf20Sopenharmony_ci tx_buf->raw_buf = NULL; 25388c2ecf20Sopenharmony_ci tx_buf->tx_flags = 0; 25398c2ecf20Sopenharmony_ci tx_buf->next_to_watch = NULL; 25408c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 25418c2ecf20Sopenharmony_ci tx_desc->buf_addr = 0; 25428c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 0; 25438c2ecf20Sopenharmony_ci 25448c2ecf20Sopenharmony_ci /* move past eop_desc for start of next FD desc */ 25458c2ecf20Sopenharmony_ci tx_buf++; 25468c2ecf20Sopenharmony_ci tx_desc++; 25478c2ecf20Sopenharmony_ci i++; 25488c2ecf20Sopenharmony_ci if (unlikely(!i)) { 25498c2ecf20Sopenharmony_ci i -= tx_ring->count; 25508c2ecf20Sopenharmony_ci tx_buf = tx_ring->tx_buf; 25518c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(tx_ring, 0); 25528c2ecf20Sopenharmony_ci } 25538c2ecf20Sopenharmony_ci 25548c2ecf20Sopenharmony_ci budget--; 25558c2ecf20Sopenharmony_ci } while (likely(budget)); 25568c2ecf20Sopenharmony_ci 25578c2ecf20Sopenharmony_ci i += tx_ring->count; 25588c2ecf20Sopenharmony_ci tx_ring->next_to_clean = i; 25598c2ecf20Sopenharmony_ci 25608c2ecf20Sopenharmony_ci /* re-enable interrupt if needed */ 25618c2ecf20Sopenharmony_ci ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]); 25628c2ecf20Sopenharmony_ci} 2563