18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Copyright(c) 2013 - 2018 Intel Corporation. */ 38c2ecf20Sopenharmony_ci 48c2ecf20Sopenharmony_ci#include <linux/prefetch.h> 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci#include "iavf.h" 78c2ecf20Sopenharmony_ci#include "iavf_trace.h" 88c2ecf20Sopenharmony_ci#include "iavf_prototype.h" 98c2ecf20Sopenharmony_ci 108c2ecf20Sopenharmony_cistatic inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 118c2ecf20Sopenharmony_ci u32 td_tag) 128c2ecf20Sopenharmony_ci{ 138c2ecf20Sopenharmony_ci return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | 148c2ecf20Sopenharmony_ci ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | 158c2ecf20Sopenharmony_ci ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) | 168c2ecf20Sopenharmony_ci ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | 178c2ecf20Sopenharmony_ci ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT)); 188c2ecf20Sopenharmony_ci} 198c2ecf20Sopenharmony_ci 208c2ecf20Sopenharmony_ci#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS) 218c2ecf20Sopenharmony_ci 228c2ecf20Sopenharmony_ci/** 238c2ecf20Sopenharmony_ci * iavf_unmap_and_free_tx_resource - Release a Tx buffer 248c2ecf20Sopenharmony_ci * @ring: the ring that owns the buffer 258c2ecf20Sopenharmony_ci * @tx_buffer: the buffer to free 268c2ecf20Sopenharmony_ci **/ 278c2ecf20Sopenharmony_cistatic void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring, 288c2ecf20Sopenharmony_ci struct iavf_tx_buffer *tx_buffer) 298c2ecf20Sopenharmony_ci{ 308c2ecf20Sopenharmony_ci if (tx_buffer->skb) { 318c2ecf20Sopenharmony_ci if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB) 328c2ecf20Sopenharmony_ci kfree(tx_buffer->raw_buf); 338c2ecf20Sopenharmony_ci else 348c2ecf20Sopenharmony_ci dev_kfree_skb_any(tx_buffer->skb); 358c2ecf20Sopenharmony_ci if (dma_unmap_len(tx_buffer, len)) 368c2ecf20Sopenharmony_ci dma_unmap_single(ring->dev, 378c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buffer, dma), 388c2ecf20Sopenharmony_ci dma_unmap_len(tx_buffer, len), 398c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 408c2ecf20Sopenharmony_ci } else if (dma_unmap_len(tx_buffer, len)) { 418c2ecf20Sopenharmony_ci dma_unmap_page(ring->dev, 428c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buffer, dma), 438c2ecf20Sopenharmony_ci dma_unmap_len(tx_buffer, len), 448c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 458c2ecf20Sopenharmony_ci } 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci tx_buffer->next_to_watch = NULL; 488c2ecf20Sopenharmony_ci tx_buffer->skb = NULL; 498c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buffer, len, 0); 508c2ecf20Sopenharmony_ci /* tx_buffer must be completely set up in the transmit path */ 518c2ecf20Sopenharmony_ci} 528c2ecf20Sopenharmony_ci 538c2ecf20Sopenharmony_ci/** 548c2ecf20Sopenharmony_ci * iavf_clean_tx_ring - Free any empty Tx buffers 558c2ecf20Sopenharmony_ci * @tx_ring: ring to be cleaned 568c2ecf20Sopenharmony_ci **/ 578c2ecf20Sopenharmony_civoid iavf_clean_tx_ring(struct iavf_ring *tx_ring) 588c2ecf20Sopenharmony_ci{ 598c2ecf20Sopenharmony_ci unsigned long bi_size; 608c2ecf20Sopenharmony_ci u16 i; 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_ci /* ring already cleared, nothing to do */ 638c2ecf20Sopenharmony_ci if (!tx_ring->tx_bi) 648c2ecf20Sopenharmony_ci return; 658c2ecf20Sopenharmony_ci 668c2ecf20Sopenharmony_ci /* Free all the Tx ring sk_buffs */ 678c2ecf20Sopenharmony_ci for (i = 0; i < tx_ring->count; i++) 688c2ecf20Sopenharmony_ci iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); 698c2ecf20Sopenharmony_ci 708c2ecf20Sopenharmony_ci bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; 718c2ecf20Sopenharmony_ci memset(tx_ring->tx_bi, 0, bi_size); 728c2ecf20Sopenharmony_ci 738c2ecf20Sopenharmony_ci /* Zero out the descriptor ring */ 748c2ecf20Sopenharmony_ci memset(tx_ring->desc, 0, tx_ring->size); 758c2ecf20Sopenharmony_ci 768c2ecf20Sopenharmony_ci tx_ring->next_to_use = 0; 778c2ecf20Sopenharmony_ci tx_ring->next_to_clean = 0; 788c2ecf20Sopenharmony_ci 798c2ecf20Sopenharmony_ci if (!tx_ring->netdev) 808c2ecf20Sopenharmony_ci return; 818c2ecf20Sopenharmony_ci 828c2ecf20Sopenharmony_ci /* cleanup Tx queue statistics */ 838c2ecf20Sopenharmony_ci netdev_tx_reset_queue(txring_txq(tx_ring)); 848c2ecf20Sopenharmony_ci} 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci/** 878c2ecf20Sopenharmony_ci * iavf_free_tx_resources - Free Tx resources per queue 888c2ecf20Sopenharmony_ci * @tx_ring: Tx descriptor ring for a specific queue 898c2ecf20Sopenharmony_ci * 908c2ecf20Sopenharmony_ci * Free all transmit software resources 918c2ecf20Sopenharmony_ci **/ 928c2ecf20Sopenharmony_civoid iavf_free_tx_resources(struct iavf_ring *tx_ring) 938c2ecf20Sopenharmony_ci{ 948c2ecf20Sopenharmony_ci iavf_clean_tx_ring(tx_ring); 958c2ecf20Sopenharmony_ci kfree(tx_ring->tx_bi); 968c2ecf20Sopenharmony_ci tx_ring->tx_bi = NULL; 978c2ecf20Sopenharmony_ci 988c2ecf20Sopenharmony_ci if (tx_ring->desc) { 998c2ecf20Sopenharmony_ci dma_free_coherent(tx_ring->dev, tx_ring->size, 1008c2ecf20Sopenharmony_ci tx_ring->desc, tx_ring->dma); 1018c2ecf20Sopenharmony_ci tx_ring->desc = NULL; 1028c2ecf20Sopenharmony_ci } 1038c2ecf20Sopenharmony_ci} 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci/** 1068c2ecf20Sopenharmony_ci * iavf_get_tx_pending - how many Tx descriptors not processed 1078c2ecf20Sopenharmony_ci * @ring: the ring of descriptors 1088c2ecf20Sopenharmony_ci * @in_sw: is tx_pending being checked in SW or HW 1098c2ecf20Sopenharmony_ci * 1108c2ecf20Sopenharmony_ci * Since there is no access to the ring head register 1118c2ecf20Sopenharmony_ci * in XL710, we need to use our local copies 1128c2ecf20Sopenharmony_ci **/ 1138c2ecf20Sopenharmony_ciu32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) 1148c2ecf20Sopenharmony_ci{ 1158c2ecf20Sopenharmony_ci u32 head, tail; 1168c2ecf20Sopenharmony_ci 1178c2ecf20Sopenharmony_ci /* underlying hardware might not allow access and/or always return 1188c2ecf20Sopenharmony_ci * 0 for the head/tail registers so just use the cached values 1198c2ecf20Sopenharmony_ci */ 1208c2ecf20Sopenharmony_ci head = ring->next_to_clean; 1218c2ecf20Sopenharmony_ci tail = ring->next_to_use; 1228c2ecf20Sopenharmony_ci 1238c2ecf20Sopenharmony_ci if (head != tail) 1248c2ecf20Sopenharmony_ci return (head < tail) ? 1258c2ecf20Sopenharmony_ci tail - head : (tail + ring->count - head); 1268c2ecf20Sopenharmony_ci 1278c2ecf20Sopenharmony_ci return 0; 1288c2ecf20Sopenharmony_ci} 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ci/** 1318c2ecf20Sopenharmony_ci * iavf_detect_recover_hung - Function to detect and recover hung_queues 1328c2ecf20Sopenharmony_ci * @vsi: pointer to vsi struct with tx queues 1338c2ecf20Sopenharmony_ci * 1348c2ecf20Sopenharmony_ci * VSI has netdev and netdev has TX queues. This function is to check each of 1358c2ecf20Sopenharmony_ci * those TX queues if they are hung, trigger recovery by issuing SW interrupt. 1368c2ecf20Sopenharmony_ci **/ 1378c2ecf20Sopenharmony_civoid iavf_detect_recover_hung(struct iavf_vsi *vsi) 1388c2ecf20Sopenharmony_ci{ 1398c2ecf20Sopenharmony_ci struct iavf_ring *tx_ring = NULL; 1408c2ecf20Sopenharmony_ci struct net_device *netdev; 1418c2ecf20Sopenharmony_ci unsigned int i; 1428c2ecf20Sopenharmony_ci int packets; 1438c2ecf20Sopenharmony_ci 1448c2ecf20Sopenharmony_ci if (!vsi) 1458c2ecf20Sopenharmony_ci return; 1468c2ecf20Sopenharmony_ci 1478c2ecf20Sopenharmony_ci if (test_bit(__IAVF_VSI_DOWN, vsi->state)) 1488c2ecf20Sopenharmony_ci return; 1498c2ecf20Sopenharmony_ci 1508c2ecf20Sopenharmony_ci netdev = vsi->netdev; 1518c2ecf20Sopenharmony_ci if (!netdev) 1528c2ecf20Sopenharmony_ci return; 1538c2ecf20Sopenharmony_ci 1548c2ecf20Sopenharmony_ci if (!netif_carrier_ok(netdev)) 1558c2ecf20Sopenharmony_ci return; 1568c2ecf20Sopenharmony_ci 1578c2ecf20Sopenharmony_ci for (i = 0; i < vsi->back->num_active_queues; i++) { 1588c2ecf20Sopenharmony_ci tx_ring = &vsi->back->tx_rings[i]; 1598c2ecf20Sopenharmony_ci if (tx_ring && tx_ring->desc) { 1608c2ecf20Sopenharmony_ci /* If packet counter has not changed the queue is 1618c2ecf20Sopenharmony_ci * likely stalled, so force an interrupt for this 1628c2ecf20Sopenharmony_ci * queue. 1638c2ecf20Sopenharmony_ci * 1648c2ecf20Sopenharmony_ci * prev_pkt_ctr would be negative if there was no 1658c2ecf20Sopenharmony_ci * pending work. 1668c2ecf20Sopenharmony_ci */ 1678c2ecf20Sopenharmony_ci packets = tx_ring->stats.packets & INT_MAX; 1688c2ecf20Sopenharmony_ci if (tx_ring->tx_stats.prev_pkt_ctr == packets) { 1698c2ecf20Sopenharmony_ci iavf_force_wb(vsi, tx_ring->q_vector); 1708c2ecf20Sopenharmony_ci continue; 1718c2ecf20Sopenharmony_ci } 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_ci /* Memory barrier between read of packet count and call 1748c2ecf20Sopenharmony_ci * to iavf_get_tx_pending() 1758c2ecf20Sopenharmony_ci */ 1768c2ecf20Sopenharmony_ci smp_rmb(); 1778c2ecf20Sopenharmony_ci tx_ring->tx_stats.prev_pkt_ctr = 1788c2ecf20Sopenharmony_ci iavf_get_tx_pending(tx_ring, true) ? packets : -1; 1798c2ecf20Sopenharmony_ci } 1808c2ecf20Sopenharmony_ci } 1818c2ecf20Sopenharmony_ci} 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci#define WB_STRIDE 4 1848c2ecf20Sopenharmony_ci 1858c2ecf20Sopenharmony_ci/** 1868c2ecf20Sopenharmony_ci * iavf_clean_tx_irq - Reclaim resources after transmit completes 1878c2ecf20Sopenharmony_ci * @vsi: the VSI we care about 1888c2ecf20Sopenharmony_ci * @tx_ring: Tx ring to clean 1898c2ecf20Sopenharmony_ci * @napi_budget: Used to determine if we are in netpoll 1908c2ecf20Sopenharmony_ci * 1918c2ecf20Sopenharmony_ci * Returns true if there's any budget left (e.g. the clean is finished) 1928c2ecf20Sopenharmony_ci **/ 1938c2ecf20Sopenharmony_cistatic bool iavf_clean_tx_irq(struct iavf_vsi *vsi, 1948c2ecf20Sopenharmony_ci struct iavf_ring *tx_ring, int napi_budget) 1958c2ecf20Sopenharmony_ci{ 1968c2ecf20Sopenharmony_ci int i = tx_ring->next_to_clean; 1978c2ecf20Sopenharmony_ci struct iavf_tx_buffer *tx_buf; 1988c2ecf20Sopenharmony_ci struct iavf_tx_desc *tx_desc; 1998c2ecf20Sopenharmony_ci unsigned int total_bytes = 0, total_packets = 0; 2008c2ecf20Sopenharmony_ci unsigned int budget = vsi->work_limit; 2018c2ecf20Sopenharmony_ci 2028c2ecf20Sopenharmony_ci tx_buf = &tx_ring->tx_bi[i]; 2038c2ecf20Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, i); 2048c2ecf20Sopenharmony_ci i -= tx_ring->count; 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci do { 2078c2ecf20Sopenharmony_ci struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch; 2088c2ecf20Sopenharmony_ci 2098c2ecf20Sopenharmony_ci /* if next_to_watch is not set then there is no work pending */ 2108c2ecf20Sopenharmony_ci if (!eop_desc) 2118c2ecf20Sopenharmony_ci break; 2128c2ecf20Sopenharmony_ci 2138c2ecf20Sopenharmony_ci /* prevent any other reads prior to eop_desc */ 2148c2ecf20Sopenharmony_ci smp_rmb(); 2158c2ecf20Sopenharmony_ci 2168c2ecf20Sopenharmony_ci iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 2178c2ecf20Sopenharmony_ci /* if the descriptor isn't done, no work yet to do */ 2188c2ecf20Sopenharmony_ci if (!(eop_desc->cmd_type_offset_bsz & 2198c2ecf20Sopenharmony_ci cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE))) 2208c2ecf20Sopenharmony_ci break; 2218c2ecf20Sopenharmony_ci 2228c2ecf20Sopenharmony_ci /* clear next_to_watch to prevent false hangs */ 2238c2ecf20Sopenharmony_ci tx_buf->next_to_watch = NULL; 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_ci /* update the statistics for this packet */ 2268c2ecf20Sopenharmony_ci total_bytes += tx_buf->bytecount; 2278c2ecf20Sopenharmony_ci total_packets += tx_buf->gso_segs; 2288c2ecf20Sopenharmony_ci 2298c2ecf20Sopenharmony_ci /* free the skb */ 2308c2ecf20Sopenharmony_ci napi_consume_skb(tx_buf->skb, napi_budget); 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci /* unmap skb header data */ 2338c2ecf20Sopenharmony_ci dma_unmap_single(tx_ring->dev, 2348c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 2358c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), 2368c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 2378c2ecf20Sopenharmony_ci 2388c2ecf20Sopenharmony_ci /* clear tx_buffer data */ 2398c2ecf20Sopenharmony_ci tx_buf->skb = NULL; 2408c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 2418c2ecf20Sopenharmony_ci 2428c2ecf20Sopenharmony_ci /* unmap remaining buffers */ 2438c2ecf20Sopenharmony_ci while (tx_desc != eop_desc) { 2448c2ecf20Sopenharmony_ci iavf_trace(clean_tx_irq_unmap, 2458c2ecf20Sopenharmony_ci tx_ring, tx_desc, tx_buf); 2468c2ecf20Sopenharmony_ci 2478c2ecf20Sopenharmony_ci tx_buf++; 2488c2ecf20Sopenharmony_ci tx_desc++; 2498c2ecf20Sopenharmony_ci i++; 2508c2ecf20Sopenharmony_ci if (unlikely(!i)) { 2518c2ecf20Sopenharmony_ci i -= tx_ring->count; 2528c2ecf20Sopenharmony_ci tx_buf = tx_ring->tx_bi; 2538c2ecf20Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 2548c2ecf20Sopenharmony_ci } 2558c2ecf20Sopenharmony_ci 2568c2ecf20Sopenharmony_ci /* unmap any remaining paged data */ 2578c2ecf20Sopenharmony_ci if (dma_unmap_len(tx_buf, len)) { 2588c2ecf20Sopenharmony_ci dma_unmap_page(tx_ring->dev, 2598c2ecf20Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 2608c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), 2618c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 2628c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 2638c2ecf20Sopenharmony_ci } 2648c2ecf20Sopenharmony_ci } 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci /* move us one more past the eop_desc for start of next pkt */ 2678c2ecf20Sopenharmony_ci tx_buf++; 2688c2ecf20Sopenharmony_ci tx_desc++; 2698c2ecf20Sopenharmony_ci i++; 2708c2ecf20Sopenharmony_ci if (unlikely(!i)) { 2718c2ecf20Sopenharmony_ci i -= tx_ring->count; 2728c2ecf20Sopenharmony_ci tx_buf = tx_ring->tx_bi; 2738c2ecf20Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 2748c2ecf20Sopenharmony_ci } 2758c2ecf20Sopenharmony_ci 2768c2ecf20Sopenharmony_ci prefetch(tx_desc); 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_ci /* update budget accounting */ 2798c2ecf20Sopenharmony_ci budget--; 2808c2ecf20Sopenharmony_ci } while (likely(budget)); 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_ci i += tx_ring->count; 2838c2ecf20Sopenharmony_ci tx_ring->next_to_clean = i; 2848c2ecf20Sopenharmony_ci u64_stats_update_begin(&tx_ring->syncp); 2858c2ecf20Sopenharmony_ci tx_ring->stats.bytes += total_bytes; 2868c2ecf20Sopenharmony_ci tx_ring->stats.packets += total_packets; 2878c2ecf20Sopenharmony_ci u64_stats_update_end(&tx_ring->syncp); 2888c2ecf20Sopenharmony_ci tx_ring->q_vector->tx.total_bytes += total_bytes; 2898c2ecf20Sopenharmony_ci tx_ring->q_vector->tx.total_packets += total_packets; 2908c2ecf20Sopenharmony_ci 2918c2ecf20Sopenharmony_ci if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { 2928c2ecf20Sopenharmony_ci /* check to see if there are < 4 descriptors 2938c2ecf20Sopenharmony_ci * waiting to be written back, then kick the hardware to force 2948c2ecf20Sopenharmony_ci * them to be written back in case we stay in NAPI. 2958c2ecf20Sopenharmony_ci * In this mode on X722 we do not enable Interrupt. 2968c2ecf20Sopenharmony_ci */ 2978c2ecf20Sopenharmony_ci unsigned int j = iavf_get_tx_pending(tx_ring, false); 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_ci if (budget && 3008c2ecf20Sopenharmony_ci ((j / WB_STRIDE) == 0) && (j > 0) && 3018c2ecf20Sopenharmony_ci !test_bit(__IAVF_VSI_DOWN, vsi->state) && 3028c2ecf20Sopenharmony_ci (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) 3038c2ecf20Sopenharmony_ci tx_ring->arm_wb = true; 3048c2ecf20Sopenharmony_ci } 3058c2ecf20Sopenharmony_ci 3068c2ecf20Sopenharmony_ci /* notify netdev of completed buffers */ 3078c2ecf20Sopenharmony_ci netdev_tx_completed_queue(txring_txq(tx_ring), 3088c2ecf20Sopenharmony_ci total_packets, total_bytes); 3098c2ecf20Sopenharmony_ci 3108c2ecf20Sopenharmony_ci#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 3118c2ecf20Sopenharmony_ci if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 3128c2ecf20Sopenharmony_ci (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 3138c2ecf20Sopenharmony_ci /* Make sure that anybody stopping the queue after this 3148c2ecf20Sopenharmony_ci * sees the new next_to_clean. 3158c2ecf20Sopenharmony_ci */ 3168c2ecf20Sopenharmony_ci smp_mb(); 3178c2ecf20Sopenharmony_ci if (__netif_subqueue_stopped(tx_ring->netdev, 3188c2ecf20Sopenharmony_ci tx_ring->queue_index) && 3198c2ecf20Sopenharmony_ci !test_bit(__IAVF_VSI_DOWN, vsi->state)) { 3208c2ecf20Sopenharmony_ci netif_wake_subqueue(tx_ring->netdev, 3218c2ecf20Sopenharmony_ci tx_ring->queue_index); 3228c2ecf20Sopenharmony_ci ++tx_ring->tx_stats.restart_queue; 3238c2ecf20Sopenharmony_ci } 3248c2ecf20Sopenharmony_ci } 3258c2ecf20Sopenharmony_ci 3268c2ecf20Sopenharmony_ci return !!budget; 3278c2ecf20Sopenharmony_ci} 3288c2ecf20Sopenharmony_ci 3298c2ecf20Sopenharmony_ci/** 3308c2ecf20Sopenharmony_ci * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled 3318c2ecf20Sopenharmony_ci * @vsi: the VSI we care about 3328c2ecf20Sopenharmony_ci * @q_vector: the vector on which to enable writeback 3338c2ecf20Sopenharmony_ci * 3348c2ecf20Sopenharmony_ci **/ 3358c2ecf20Sopenharmony_cistatic void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, 3368c2ecf20Sopenharmony_ci struct iavf_q_vector *q_vector) 3378c2ecf20Sopenharmony_ci{ 3388c2ecf20Sopenharmony_ci u16 flags = q_vector->tx.ring[0].flags; 3398c2ecf20Sopenharmony_ci u32 val; 3408c2ecf20Sopenharmony_ci 3418c2ecf20Sopenharmony_ci if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR)) 3428c2ecf20Sopenharmony_ci return; 3438c2ecf20Sopenharmony_ci 3448c2ecf20Sopenharmony_ci if (q_vector->arm_wb_state) 3458c2ecf20Sopenharmony_ci return; 3468c2ecf20Sopenharmony_ci 3478c2ecf20Sopenharmony_ci val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | 3488c2ecf20Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_ci wr32(&vsi->back->hw, 3518c2ecf20Sopenharmony_ci IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); 3528c2ecf20Sopenharmony_ci q_vector->arm_wb_state = true; 3538c2ecf20Sopenharmony_ci} 3548c2ecf20Sopenharmony_ci 3558c2ecf20Sopenharmony_ci/** 3568c2ecf20Sopenharmony_ci * iavf_force_wb - Issue SW Interrupt so HW does a wb 3578c2ecf20Sopenharmony_ci * @vsi: the VSI we care about 3588c2ecf20Sopenharmony_ci * @q_vector: the vector on which to force writeback 3598c2ecf20Sopenharmony_ci * 3608c2ecf20Sopenharmony_ci **/ 3618c2ecf20Sopenharmony_civoid iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) 3628c2ecf20Sopenharmony_ci{ 3638c2ecf20Sopenharmony_ci u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 3648c2ecf20Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ 3658c2ecf20Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | 3668c2ecf20Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK 3678c2ecf20Sopenharmony_ci /* allow 00 to be written to the index */; 3688c2ecf20Sopenharmony_ci 3698c2ecf20Sopenharmony_ci wr32(&vsi->back->hw, 3708c2ecf20Sopenharmony_ci IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), 3718c2ecf20Sopenharmony_ci val); 3728c2ecf20Sopenharmony_ci} 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_cistatic inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, 3758c2ecf20Sopenharmony_ci struct iavf_ring_container *rc) 3768c2ecf20Sopenharmony_ci{ 3778c2ecf20Sopenharmony_ci return &q_vector->rx == rc; 3788c2ecf20Sopenharmony_ci} 3798c2ecf20Sopenharmony_ci 3808c2ecf20Sopenharmony_cistatic inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector) 3818c2ecf20Sopenharmony_ci{ 3828c2ecf20Sopenharmony_ci unsigned int divisor; 3838c2ecf20Sopenharmony_ci 3848c2ecf20Sopenharmony_ci switch (q_vector->adapter->link_speed) { 3858c2ecf20Sopenharmony_ci case VIRTCHNL_LINK_SPEED_40GB: 3868c2ecf20Sopenharmony_ci divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024; 3878c2ecf20Sopenharmony_ci break; 3888c2ecf20Sopenharmony_ci case VIRTCHNL_LINK_SPEED_25GB: 3898c2ecf20Sopenharmony_ci case VIRTCHNL_LINK_SPEED_20GB: 3908c2ecf20Sopenharmony_ci divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512; 3918c2ecf20Sopenharmony_ci break; 3928c2ecf20Sopenharmony_ci default: 3938c2ecf20Sopenharmony_ci case VIRTCHNL_LINK_SPEED_10GB: 3948c2ecf20Sopenharmony_ci divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256; 3958c2ecf20Sopenharmony_ci break; 3968c2ecf20Sopenharmony_ci case VIRTCHNL_LINK_SPEED_1GB: 3978c2ecf20Sopenharmony_ci case VIRTCHNL_LINK_SPEED_100MB: 3988c2ecf20Sopenharmony_ci divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32; 3998c2ecf20Sopenharmony_ci break; 4008c2ecf20Sopenharmony_ci } 4018c2ecf20Sopenharmony_ci 4028c2ecf20Sopenharmony_ci return divisor; 4038c2ecf20Sopenharmony_ci} 4048c2ecf20Sopenharmony_ci 4058c2ecf20Sopenharmony_ci/** 4068c2ecf20Sopenharmony_ci * iavf_update_itr - update the dynamic ITR value based on statistics 4078c2ecf20Sopenharmony_ci * @q_vector: structure containing interrupt and ring information 4088c2ecf20Sopenharmony_ci * @rc: structure containing ring performance data 4098c2ecf20Sopenharmony_ci * 4108c2ecf20Sopenharmony_ci * Stores a new ITR value based on packets and byte 4118c2ecf20Sopenharmony_ci * counts during the last interrupt. The advantage of per interrupt 4128c2ecf20Sopenharmony_ci * computation is faster updates and more accurate ITR for the current 4138c2ecf20Sopenharmony_ci * traffic pattern. Constants in this function were computed 4148c2ecf20Sopenharmony_ci * based on theoretical maximum wire speed and thresholds were set based 4158c2ecf20Sopenharmony_ci * on testing data as well as attempting to minimize response time 4168c2ecf20Sopenharmony_ci * while increasing bulk throughput. 4178c2ecf20Sopenharmony_ci **/ 4188c2ecf20Sopenharmony_cistatic void iavf_update_itr(struct iavf_q_vector *q_vector, 4198c2ecf20Sopenharmony_ci struct iavf_ring_container *rc) 4208c2ecf20Sopenharmony_ci{ 4218c2ecf20Sopenharmony_ci unsigned int avg_wire_size, packets, bytes, itr; 4228c2ecf20Sopenharmony_ci unsigned long next_update = jiffies; 4238c2ecf20Sopenharmony_ci 4248c2ecf20Sopenharmony_ci /* If we don't have any rings just leave ourselves set for maximum 4258c2ecf20Sopenharmony_ci * possible latency so we take ourselves out of the equation. 4268c2ecf20Sopenharmony_ci */ 4278c2ecf20Sopenharmony_ci if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) 4288c2ecf20Sopenharmony_ci return; 4298c2ecf20Sopenharmony_ci 4308c2ecf20Sopenharmony_ci /* For Rx we want to push the delay up and default to low latency. 4318c2ecf20Sopenharmony_ci * for Tx we want to pull the delay down and default to high latency. 4328c2ecf20Sopenharmony_ci */ 4338c2ecf20Sopenharmony_ci itr = iavf_container_is_rx(q_vector, rc) ? 4348c2ecf20Sopenharmony_ci IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY : 4358c2ecf20Sopenharmony_ci IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY; 4368c2ecf20Sopenharmony_ci 4378c2ecf20Sopenharmony_ci /* If we didn't update within up to 1 - 2 jiffies we can assume 4388c2ecf20Sopenharmony_ci * that either packets are coming in so slow there hasn't been 4398c2ecf20Sopenharmony_ci * any work, or that there is so much work that NAPI is dealing 4408c2ecf20Sopenharmony_ci * with interrupt moderation and we don't need to do anything. 4418c2ecf20Sopenharmony_ci */ 4428c2ecf20Sopenharmony_ci if (time_after(next_update, rc->next_update)) 4438c2ecf20Sopenharmony_ci goto clear_counts; 4448c2ecf20Sopenharmony_ci 4458c2ecf20Sopenharmony_ci /* If itr_countdown is set it means we programmed an ITR within 4468c2ecf20Sopenharmony_ci * the last 4 interrupt cycles. This has a side effect of us 4478c2ecf20Sopenharmony_ci * potentially firing an early interrupt. In order to work around 4488c2ecf20Sopenharmony_ci * this we need to throw out any data received for a few 4498c2ecf20Sopenharmony_ci * interrupts following the update. 4508c2ecf20Sopenharmony_ci */ 4518c2ecf20Sopenharmony_ci if (q_vector->itr_countdown) { 4528c2ecf20Sopenharmony_ci itr = rc->target_itr; 4538c2ecf20Sopenharmony_ci goto clear_counts; 4548c2ecf20Sopenharmony_ci } 4558c2ecf20Sopenharmony_ci 4568c2ecf20Sopenharmony_ci packets = rc->total_packets; 4578c2ecf20Sopenharmony_ci bytes = rc->total_bytes; 4588c2ecf20Sopenharmony_ci 4598c2ecf20Sopenharmony_ci if (iavf_container_is_rx(q_vector, rc)) { 4608c2ecf20Sopenharmony_ci /* If Rx there are 1 to 4 packets and bytes are less than 4618c2ecf20Sopenharmony_ci * 9000 assume insufficient data to use bulk rate limiting 4628c2ecf20Sopenharmony_ci * approach unless Tx is already in bulk rate limiting. We 4638c2ecf20Sopenharmony_ci * are likely latency driven. 4648c2ecf20Sopenharmony_ci */ 4658c2ecf20Sopenharmony_ci if (packets && packets < 4 && bytes < 9000 && 4668c2ecf20Sopenharmony_ci (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) { 4678c2ecf20Sopenharmony_ci itr = IAVF_ITR_ADAPTIVE_LATENCY; 4688c2ecf20Sopenharmony_ci goto adjust_by_size; 4698c2ecf20Sopenharmony_ci } 4708c2ecf20Sopenharmony_ci } else if (packets < 4) { 4718c2ecf20Sopenharmony_ci /* If we have Tx and Rx ITR maxed and Tx ITR is running in 4728c2ecf20Sopenharmony_ci * bulk mode and we are receiving 4 or fewer packets just 4738c2ecf20Sopenharmony_ci * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 4748c2ecf20Sopenharmony_ci * that the Rx can relax. 4758c2ecf20Sopenharmony_ci */ 4768c2ecf20Sopenharmony_ci if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS && 4778c2ecf20Sopenharmony_ci (q_vector->rx.target_itr & IAVF_ITR_MASK) == 4788c2ecf20Sopenharmony_ci IAVF_ITR_ADAPTIVE_MAX_USECS) 4798c2ecf20Sopenharmony_ci goto clear_counts; 4808c2ecf20Sopenharmony_ci } else if (packets > 32) { 4818c2ecf20Sopenharmony_ci /* If we have processed over 32 packets in a single interrupt 4828c2ecf20Sopenharmony_ci * for Tx assume we need to switch over to "bulk" mode. 4838c2ecf20Sopenharmony_ci */ 4848c2ecf20Sopenharmony_ci rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY; 4858c2ecf20Sopenharmony_ci } 4868c2ecf20Sopenharmony_ci 4878c2ecf20Sopenharmony_ci /* We have no packets to actually measure against. This means 4888c2ecf20Sopenharmony_ci * either one of the other queues on this vector is active or 4898c2ecf20Sopenharmony_ci * we are a Tx queue doing TSO with too high of an interrupt rate. 4908c2ecf20Sopenharmony_ci * 4918c2ecf20Sopenharmony_ci * Between 4 and 56 we can assume that our current interrupt delay 4928c2ecf20Sopenharmony_ci * is only slightly too low. As such we should increase it by a small 4938c2ecf20Sopenharmony_ci * fixed amount. 4948c2ecf20Sopenharmony_ci */ 4958c2ecf20Sopenharmony_ci if (packets < 56) { 4968c2ecf20Sopenharmony_ci itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC; 4978c2ecf20Sopenharmony_ci if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { 4988c2ecf20Sopenharmony_ci itr &= IAVF_ITR_ADAPTIVE_LATENCY; 4998c2ecf20Sopenharmony_ci itr += IAVF_ITR_ADAPTIVE_MAX_USECS; 5008c2ecf20Sopenharmony_ci } 5018c2ecf20Sopenharmony_ci goto clear_counts; 5028c2ecf20Sopenharmony_ci } 5038c2ecf20Sopenharmony_ci 5048c2ecf20Sopenharmony_ci if (packets <= 256) { 5058c2ecf20Sopenharmony_ci itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 5068c2ecf20Sopenharmony_ci itr &= IAVF_ITR_MASK; 5078c2ecf20Sopenharmony_ci 5088c2ecf20Sopenharmony_ci /* Between 56 and 112 is our "goldilocks" zone where we are 5098c2ecf20Sopenharmony_ci * working out "just right". Just report that our current 5108c2ecf20Sopenharmony_ci * ITR is good for us. 5118c2ecf20Sopenharmony_ci */ 5128c2ecf20Sopenharmony_ci if (packets <= 112) 5138c2ecf20Sopenharmony_ci goto clear_counts; 5148c2ecf20Sopenharmony_ci 5158c2ecf20Sopenharmony_ci /* If packet count is 128 or greater we are likely looking 5168c2ecf20Sopenharmony_ci * at a slight overrun of the delay we want. Try halving 5178c2ecf20Sopenharmony_ci * our delay to see if that will cut the number of packets 5188c2ecf20Sopenharmony_ci * in half per interrupt. 5198c2ecf20Sopenharmony_ci */ 5208c2ecf20Sopenharmony_ci itr /= 2; 5218c2ecf20Sopenharmony_ci itr &= IAVF_ITR_MASK; 5228c2ecf20Sopenharmony_ci if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS) 5238c2ecf20Sopenharmony_ci itr = IAVF_ITR_ADAPTIVE_MIN_USECS; 5248c2ecf20Sopenharmony_ci 5258c2ecf20Sopenharmony_ci goto clear_counts; 5268c2ecf20Sopenharmony_ci } 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_ci /* The paths below assume we are dealing with a bulk ITR since 5298c2ecf20Sopenharmony_ci * number of packets is greater than 256. We are just going to have 5308c2ecf20Sopenharmony_ci * to compute a value and try to bring the count under control, 5318c2ecf20Sopenharmony_ci * though for smaller packet sizes there isn't much we can do as 5328c2ecf20Sopenharmony_ci * NAPI polling will likely be kicking in sooner rather than later. 5338c2ecf20Sopenharmony_ci */ 5348c2ecf20Sopenharmony_ci itr = IAVF_ITR_ADAPTIVE_BULK; 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ciadjust_by_size: 5378c2ecf20Sopenharmony_ci /* If packet counts are 256 or greater we can assume we have a gross 5388c2ecf20Sopenharmony_ci * overestimation of what the rate should be. Instead of trying to fine 5398c2ecf20Sopenharmony_ci * tune it just use the formula below to try and dial in an exact value 5408c2ecf20Sopenharmony_ci * give the current packet size of the frame. 5418c2ecf20Sopenharmony_ci */ 5428c2ecf20Sopenharmony_ci avg_wire_size = bytes / packets; 5438c2ecf20Sopenharmony_ci 5448c2ecf20Sopenharmony_ci /* The following is a crude approximation of: 5458c2ecf20Sopenharmony_ci * wmem_default / (size + overhead) = desired_pkts_per_int 5468c2ecf20Sopenharmony_ci * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 5478c2ecf20Sopenharmony_ci * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 5488c2ecf20Sopenharmony_ci * 5498c2ecf20Sopenharmony_ci * Assuming wmem_default is 212992 and overhead is 640 bytes per 5508c2ecf20Sopenharmony_ci * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 5518c2ecf20Sopenharmony_ci * formula down to 5528c2ecf20Sopenharmony_ci * 5538c2ecf20Sopenharmony_ci * (170 * (size + 24)) / (size + 640) = ITR 5548c2ecf20Sopenharmony_ci * 5558c2ecf20Sopenharmony_ci * We first do some math on the packet size and then finally bitshift 5568c2ecf20Sopenharmony_ci * by 8 after rounding up. We also have to account for PCIe link speed 5578c2ecf20Sopenharmony_ci * difference as ITR scales based on this. 5588c2ecf20Sopenharmony_ci */ 5598c2ecf20Sopenharmony_ci if (avg_wire_size <= 60) { 5608c2ecf20Sopenharmony_ci /* Start at 250k ints/sec */ 5618c2ecf20Sopenharmony_ci avg_wire_size = 4096; 5628c2ecf20Sopenharmony_ci } else if (avg_wire_size <= 380) { 5638c2ecf20Sopenharmony_ci /* 250K ints/sec to 60K ints/sec */ 5648c2ecf20Sopenharmony_ci avg_wire_size *= 40; 5658c2ecf20Sopenharmony_ci avg_wire_size += 1696; 5668c2ecf20Sopenharmony_ci } else if (avg_wire_size <= 1084) { 5678c2ecf20Sopenharmony_ci /* 60K ints/sec to 36K ints/sec */ 5688c2ecf20Sopenharmony_ci avg_wire_size *= 15; 5698c2ecf20Sopenharmony_ci avg_wire_size += 11452; 5708c2ecf20Sopenharmony_ci } else if (avg_wire_size <= 1980) { 5718c2ecf20Sopenharmony_ci /* 36K ints/sec to 30K ints/sec */ 5728c2ecf20Sopenharmony_ci avg_wire_size *= 5; 5738c2ecf20Sopenharmony_ci avg_wire_size += 22420; 5748c2ecf20Sopenharmony_ci } else { 5758c2ecf20Sopenharmony_ci /* plateau at a limit of 30K ints/sec */ 5768c2ecf20Sopenharmony_ci avg_wire_size = 32256; 5778c2ecf20Sopenharmony_ci } 5788c2ecf20Sopenharmony_ci 5798c2ecf20Sopenharmony_ci /* If we are in low latency mode halve our delay which doubles the 5808c2ecf20Sopenharmony_ci * rate to somewhere between 100K to 16K ints/sec 5818c2ecf20Sopenharmony_ci */ 5828c2ecf20Sopenharmony_ci if (itr & IAVF_ITR_ADAPTIVE_LATENCY) 5838c2ecf20Sopenharmony_ci avg_wire_size /= 2; 5848c2ecf20Sopenharmony_ci 5858c2ecf20Sopenharmony_ci /* Resultant value is 256 times larger than it needs to be. This 5868c2ecf20Sopenharmony_ci * gives us room to adjust the value as needed to either increase 5878c2ecf20Sopenharmony_ci * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 5888c2ecf20Sopenharmony_ci * 5898c2ecf20Sopenharmony_ci * Use addition as we have already recorded the new latency flag 5908c2ecf20Sopenharmony_ci * for the ITR value. 5918c2ecf20Sopenharmony_ci */ 5928c2ecf20Sopenharmony_ci itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) * 5938c2ecf20Sopenharmony_ci IAVF_ITR_ADAPTIVE_MIN_INC; 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_ci if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { 5968c2ecf20Sopenharmony_ci itr &= IAVF_ITR_ADAPTIVE_LATENCY; 5978c2ecf20Sopenharmony_ci itr += IAVF_ITR_ADAPTIVE_MAX_USECS; 5988c2ecf20Sopenharmony_ci } 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_ciclear_counts: 6018c2ecf20Sopenharmony_ci /* write back value */ 6028c2ecf20Sopenharmony_ci rc->target_itr = itr; 6038c2ecf20Sopenharmony_ci 6048c2ecf20Sopenharmony_ci /* next update should occur within next jiffy */ 6058c2ecf20Sopenharmony_ci rc->next_update = next_update + 1; 6068c2ecf20Sopenharmony_ci 6078c2ecf20Sopenharmony_ci rc->total_bytes = 0; 6088c2ecf20Sopenharmony_ci rc->total_packets = 0; 6098c2ecf20Sopenharmony_ci} 6108c2ecf20Sopenharmony_ci 6118c2ecf20Sopenharmony_ci/** 6128c2ecf20Sopenharmony_ci * iavf_setup_tx_descriptors - Allocate the Tx descriptors 6138c2ecf20Sopenharmony_ci * @tx_ring: the tx ring to set up 6148c2ecf20Sopenharmony_ci * 6158c2ecf20Sopenharmony_ci * Return 0 on success, negative on error 6168c2ecf20Sopenharmony_ci **/ 6178c2ecf20Sopenharmony_ciint iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) 6188c2ecf20Sopenharmony_ci{ 6198c2ecf20Sopenharmony_ci struct device *dev = tx_ring->dev; 6208c2ecf20Sopenharmony_ci int bi_size; 6218c2ecf20Sopenharmony_ci 6228c2ecf20Sopenharmony_ci if (!dev) 6238c2ecf20Sopenharmony_ci return -ENOMEM; 6248c2ecf20Sopenharmony_ci 6258c2ecf20Sopenharmony_ci /* warn if we are about to overwrite the pointer */ 6268c2ecf20Sopenharmony_ci WARN_ON(tx_ring->tx_bi); 6278c2ecf20Sopenharmony_ci bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; 6288c2ecf20Sopenharmony_ci tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 6298c2ecf20Sopenharmony_ci if (!tx_ring->tx_bi) 6308c2ecf20Sopenharmony_ci goto err; 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci /* round up to nearest 4K */ 6338c2ecf20Sopenharmony_ci tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); 6348c2ecf20Sopenharmony_ci tx_ring->size = ALIGN(tx_ring->size, 4096); 6358c2ecf20Sopenharmony_ci tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 6368c2ecf20Sopenharmony_ci &tx_ring->dma, GFP_KERNEL); 6378c2ecf20Sopenharmony_ci if (!tx_ring->desc) { 6388c2ecf20Sopenharmony_ci dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 6398c2ecf20Sopenharmony_ci tx_ring->size); 6408c2ecf20Sopenharmony_ci goto err; 6418c2ecf20Sopenharmony_ci } 6428c2ecf20Sopenharmony_ci 6438c2ecf20Sopenharmony_ci tx_ring->next_to_use = 0; 6448c2ecf20Sopenharmony_ci tx_ring->next_to_clean = 0; 6458c2ecf20Sopenharmony_ci tx_ring->tx_stats.prev_pkt_ctr = -1; 6468c2ecf20Sopenharmony_ci return 0; 6478c2ecf20Sopenharmony_ci 6488c2ecf20Sopenharmony_cierr: 6498c2ecf20Sopenharmony_ci kfree(tx_ring->tx_bi); 6508c2ecf20Sopenharmony_ci tx_ring->tx_bi = NULL; 6518c2ecf20Sopenharmony_ci return -ENOMEM; 6528c2ecf20Sopenharmony_ci} 6538c2ecf20Sopenharmony_ci 6548c2ecf20Sopenharmony_ci/** 6558c2ecf20Sopenharmony_ci * iavf_clean_rx_ring - Free Rx buffers 6568c2ecf20Sopenharmony_ci * @rx_ring: ring to be cleaned 6578c2ecf20Sopenharmony_ci **/ 6588c2ecf20Sopenharmony_civoid iavf_clean_rx_ring(struct iavf_ring *rx_ring) 6598c2ecf20Sopenharmony_ci{ 6608c2ecf20Sopenharmony_ci unsigned long bi_size; 6618c2ecf20Sopenharmony_ci u16 i; 6628c2ecf20Sopenharmony_ci 6638c2ecf20Sopenharmony_ci /* ring already cleared, nothing to do */ 6648c2ecf20Sopenharmony_ci if (!rx_ring->rx_bi) 6658c2ecf20Sopenharmony_ci return; 6668c2ecf20Sopenharmony_ci 6678c2ecf20Sopenharmony_ci if (rx_ring->skb) { 6688c2ecf20Sopenharmony_ci dev_kfree_skb(rx_ring->skb); 6698c2ecf20Sopenharmony_ci rx_ring->skb = NULL; 6708c2ecf20Sopenharmony_ci } 6718c2ecf20Sopenharmony_ci 6728c2ecf20Sopenharmony_ci /* Free all the Rx ring sk_buffs */ 6738c2ecf20Sopenharmony_ci for (i = 0; i < rx_ring->count; i++) { 6748c2ecf20Sopenharmony_ci struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; 6758c2ecf20Sopenharmony_ci 6768c2ecf20Sopenharmony_ci if (!rx_bi->page) 6778c2ecf20Sopenharmony_ci continue; 6788c2ecf20Sopenharmony_ci 6798c2ecf20Sopenharmony_ci /* Invalidate cache lines that may have been written to by 6808c2ecf20Sopenharmony_ci * device so that we avoid corrupting memory. 6818c2ecf20Sopenharmony_ci */ 6828c2ecf20Sopenharmony_ci dma_sync_single_range_for_cpu(rx_ring->dev, 6838c2ecf20Sopenharmony_ci rx_bi->dma, 6848c2ecf20Sopenharmony_ci rx_bi->page_offset, 6858c2ecf20Sopenharmony_ci rx_ring->rx_buf_len, 6868c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 6878c2ecf20Sopenharmony_ci 6888c2ecf20Sopenharmony_ci /* free resources associated with mapping */ 6898c2ecf20Sopenharmony_ci dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, 6908c2ecf20Sopenharmony_ci iavf_rx_pg_size(rx_ring), 6918c2ecf20Sopenharmony_ci DMA_FROM_DEVICE, 6928c2ecf20Sopenharmony_ci IAVF_RX_DMA_ATTR); 6938c2ecf20Sopenharmony_ci 6948c2ecf20Sopenharmony_ci __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); 6958c2ecf20Sopenharmony_ci 6968c2ecf20Sopenharmony_ci rx_bi->page = NULL; 6978c2ecf20Sopenharmony_ci rx_bi->page_offset = 0; 6988c2ecf20Sopenharmony_ci } 6998c2ecf20Sopenharmony_ci 7008c2ecf20Sopenharmony_ci bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; 7018c2ecf20Sopenharmony_ci memset(rx_ring->rx_bi, 0, bi_size); 7028c2ecf20Sopenharmony_ci 7038c2ecf20Sopenharmony_ci /* Zero out the descriptor ring */ 7048c2ecf20Sopenharmony_ci memset(rx_ring->desc, 0, rx_ring->size); 7058c2ecf20Sopenharmony_ci 7068c2ecf20Sopenharmony_ci rx_ring->next_to_alloc = 0; 7078c2ecf20Sopenharmony_ci rx_ring->next_to_clean = 0; 7088c2ecf20Sopenharmony_ci rx_ring->next_to_use = 0; 7098c2ecf20Sopenharmony_ci} 7108c2ecf20Sopenharmony_ci 7118c2ecf20Sopenharmony_ci/** 7128c2ecf20Sopenharmony_ci * iavf_free_rx_resources - Free Rx resources 7138c2ecf20Sopenharmony_ci * @rx_ring: ring to clean the resources from 7148c2ecf20Sopenharmony_ci * 7158c2ecf20Sopenharmony_ci * Free all receive software resources 7168c2ecf20Sopenharmony_ci **/ 7178c2ecf20Sopenharmony_civoid iavf_free_rx_resources(struct iavf_ring *rx_ring) 7188c2ecf20Sopenharmony_ci{ 7198c2ecf20Sopenharmony_ci iavf_clean_rx_ring(rx_ring); 7208c2ecf20Sopenharmony_ci kfree(rx_ring->rx_bi); 7218c2ecf20Sopenharmony_ci rx_ring->rx_bi = NULL; 7228c2ecf20Sopenharmony_ci 7238c2ecf20Sopenharmony_ci if (rx_ring->desc) { 7248c2ecf20Sopenharmony_ci dma_free_coherent(rx_ring->dev, rx_ring->size, 7258c2ecf20Sopenharmony_ci rx_ring->desc, rx_ring->dma); 7268c2ecf20Sopenharmony_ci rx_ring->desc = NULL; 7278c2ecf20Sopenharmony_ci } 7288c2ecf20Sopenharmony_ci} 7298c2ecf20Sopenharmony_ci 7308c2ecf20Sopenharmony_ci/** 7318c2ecf20Sopenharmony_ci * iavf_setup_rx_descriptors - Allocate Rx descriptors 7328c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring (for a specific queue) to setup 7338c2ecf20Sopenharmony_ci * 7348c2ecf20Sopenharmony_ci * Returns 0 on success, negative on failure 7358c2ecf20Sopenharmony_ci **/ 7368c2ecf20Sopenharmony_ciint iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) 7378c2ecf20Sopenharmony_ci{ 7388c2ecf20Sopenharmony_ci struct device *dev = rx_ring->dev; 7398c2ecf20Sopenharmony_ci int bi_size; 7408c2ecf20Sopenharmony_ci 7418c2ecf20Sopenharmony_ci /* warn if we are about to overwrite the pointer */ 7428c2ecf20Sopenharmony_ci WARN_ON(rx_ring->rx_bi); 7438c2ecf20Sopenharmony_ci bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; 7448c2ecf20Sopenharmony_ci rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); 7458c2ecf20Sopenharmony_ci if (!rx_ring->rx_bi) 7468c2ecf20Sopenharmony_ci goto err; 7478c2ecf20Sopenharmony_ci 7488c2ecf20Sopenharmony_ci u64_stats_init(&rx_ring->syncp); 7498c2ecf20Sopenharmony_ci 7508c2ecf20Sopenharmony_ci /* Round up to nearest 4K */ 7518c2ecf20Sopenharmony_ci rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); 7528c2ecf20Sopenharmony_ci rx_ring->size = ALIGN(rx_ring->size, 4096); 7538c2ecf20Sopenharmony_ci rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 7548c2ecf20Sopenharmony_ci &rx_ring->dma, GFP_KERNEL); 7558c2ecf20Sopenharmony_ci 7568c2ecf20Sopenharmony_ci if (!rx_ring->desc) { 7578c2ecf20Sopenharmony_ci dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 7588c2ecf20Sopenharmony_ci rx_ring->size); 7598c2ecf20Sopenharmony_ci goto err; 7608c2ecf20Sopenharmony_ci } 7618c2ecf20Sopenharmony_ci 7628c2ecf20Sopenharmony_ci rx_ring->next_to_alloc = 0; 7638c2ecf20Sopenharmony_ci rx_ring->next_to_clean = 0; 7648c2ecf20Sopenharmony_ci rx_ring->next_to_use = 0; 7658c2ecf20Sopenharmony_ci 7668c2ecf20Sopenharmony_ci return 0; 7678c2ecf20Sopenharmony_cierr: 7688c2ecf20Sopenharmony_ci kfree(rx_ring->rx_bi); 7698c2ecf20Sopenharmony_ci rx_ring->rx_bi = NULL; 7708c2ecf20Sopenharmony_ci return -ENOMEM; 7718c2ecf20Sopenharmony_ci} 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci/** 7748c2ecf20Sopenharmony_ci * iavf_release_rx_desc - Store the new tail and head values 7758c2ecf20Sopenharmony_ci * @rx_ring: ring to bump 7768c2ecf20Sopenharmony_ci * @val: new head index 7778c2ecf20Sopenharmony_ci **/ 7788c2ecf20Sopenharmony_cistatic inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) 7798c2ecf20Sopenharmony_ci{ 7808c2ecf20Sopenharmony_ci rx_ring->next_to_use = val; 7818c2ecf20Sopenharmony_ci 7828c2ecf20Sopenharmony_ci /* update next to alloc since we have filled the ring */ 7838c2ecf20Sopenharmony_ci rx_ring->next_to_alloc = val; 7848c2ecf20Sopenharmony_ci 7858c2ecf20Sopenharmony_ci /* Force memory writes to complete before letting h/w 7868c2ecf20Sopenharmony_ci * know there are new descriptors to fetch. (Only 7878c2ecf20Sopenharmony_ci * applicable for weak-ordered memory model archs, 7888c2ecf20Sopenharmony_ci * such as IA-64). 7898c2ecf20Sopenharmony_ci */ 7908c2ecf20Sopenharmony_ci wmb(); 7918c2ecf20Sopenharmony_ci writel(val, rx_ring->tail); 7928c2ecf20Sopenharmony_ci} 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_ci/** 7958c2ecf20Sopenharmony_ci * iavf_rx_offset - Return expected offset into page to access data 7968c2ecf20Sopenharmony_ci * @rx_ring: Ring we are requesting offset of 7978c2ecf20Sopenharmony_ci * 7988c2ecf20Sopenharmony_ci * Returns the offset value for ring into the data buffer. 7998c2ecf20Sopenharmony_ci */ 8008c2ecf20Sopenharmony_cistatic inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) 8018c2ecf20Sopenharmony_ci{ 8028c2ecf20Sopenharmony_ci return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; 8038c2ecf20Sopenharmony_ci} 8048c2ecf20Sopenharmony_ci 8058c2ecf20Sopenharmony_ci/** 8068c2ecf20Sopenharmony_ci * iavf_alloc_mapped_page - recycle or make a new page 8078c2ecf20Sopenharmony_ci * @rx_ring: ring to use 8088c2ecf20Sopenharmony_ci * @bi: rx_buffer struct to modify 8098c2ecf20Sopenharmony_ci * 8108c2ecf20Sopenharmony_ci * Returns true if the page was successfully allocated or 8118c2ecf20Sopenharmony_ci * reused. 8128c2ecf20Sopenharmony_ci **/ 8138c2ecf20Sopenharmony_cistatic bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, 8148c2ecf20Sopenharmony_ci struct iavf_rx_buffer *bi) 8158c2ecf20Sopenharmony_ci{ 8168c2ecf20Sopenharmony_ci struct page *page = bi->page; 8178c2ecf20Sopenharmony_ci dma_addr_t dma; 8188c2ecf20Sopenharmony_ci 8198c2ecf20Sopenharmony_ci /* since we are recycling buffers we should seldom need to alloc */ 8208c2ecf20Sopenharmony_ci if (likely(page)) { 8218c2ecf20Sopenharmony_ci rx_ring->rx_stats.page_reuse_count++; 8228c2ecf20Sopenharmony_ci return true; 8238c2ecf20Sopenharmony_ci } 8248c2ecf20Sopenharmony_ci 8258c2ecf20Sopenharmony_ci /* alloc new page for storage */ 8268c2ecf20Sopenharmony_ci page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); 8278c2ecf20Sopenharmony_ci if (unlikely(!page)) { 8288c2ecf20Sopenharmony_ci rx_ring->rx_stats.alloc_page_failed++; 8298c2ecf20Sopenharmony_ci return false; 8308c2ecf20Sopenharmony_ci } 8318c2ecf20Sopenharmony_ci 8328c2ecf20Sopenharmony_ci /* map page for use */ 8338c2ecf20Sopenharmony_ci dma = dma_map_page_attrs(rx_ring->dev, page, 0, 8348c2ecf20Sopenharmony_ci iavf_rx_pg_size(rx_ring), 8358c2ecf20Sopenharmony_ci DMA_FROM_DEVICE, 8368c2ecf20Sopenharmony_ci IAVF_RX_DMA_ATTR); 8378c2ecf20Sopenharmony_ci 8388c2ecf20Sopenharmony_ci /* if mapping failed free memory back to system since 8398c2ecf20Sopenharmony_ci * there isn't much point in holding memory we can't use 8408c2ecf20Sopenharmony_ci */ 8418c2ecf20Sopenharmony_ci if (dma_mapping_error(rx_ring->dev, dma)) { 8428c2ecf20Sopenharmony_ci __free_pages(page, iavf_rx_pg_order(rx_ring)); 8438c2ecf20Sopenharmony_ci rx_ring->rx_stats.alloc_page_failed++; 8448c2ecf20Sopenharmony_ci return false; 8458c2ecf20Sopenharmony_ci } 8468c2ecf20Sopenharmony_ci 8478c2ecf20Sopenharmony_ci bi->dma = dma; 8488c2ecf20Sopenharmony_ci bi->page = page; 8498c2ecf20Sopenharmony_ci bi->page_offset = iavf_rx_offset(rx_ring); 8508c2ecf20Sopenharmony_ci 8518c2ecf20Sopenharmony_ci /* initialize pagecnt_bias to 1 representing we fully own page */ 8528c2ecf20Sopenharmony_ci bi->pagecnt_bias = 1; 8538c2ecf20Sopenharmony_ci 8548c2ecf20Sopenharmony_ci return true; 8558c2ecf20Sopenharmony_ci} 8568c2ecf20Sopenharmony_ci 8578c2ecf20Sopenharmony_ci/** 8588c2ecf20Sopenharmony_ci * iavf_receive_skb - Send a completed packet up the stack 8598c2ecf20Sopenharmony_ci * @rx_ring: rx ring in play 8608c2ecf20Sopenharmony_ci * @skb: packet to send up 8618c2ecf20Sopenharmony_ci * @vlan_tag: vlan tag for packet 8628c2ecf20Sopenharmony_ci **/ 8638c2ecf20Sopenharmony_cistatic void iavf_receive_skb(struct iavf_ring *rx_ring, 8648c2ecf20Sopenharmony_ci struct sk_buff *skb, u16 vlan_tag) 8658c2ecf20Sopenharmony_ci{ 8668c2ecf20Sopenharmony_ci struct iavf_q_vector *q_vector = rx_ring->q_vector; 8678c2ecf20Sopenharmony_ci 8688c2ecf20Sopenharmony_ci if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 8698c2ecf20Sopenharmony_ci (vlan_tag & VLAN_VID_MASK)) 8708c2ecf20Sopenharmony_ci __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 8718c2ecf20Sopenharmony_ci 8728c2ecf20Sopenharmony_ci napi_gro_receive(&q_vector->napi, skb); 8738c2ecf20Sopenharmony_ci} 8748c2ecf20Sopenharmony_ci 8758c2ecf20Sopenharmony_ci/** 8768c2ecf20Sopenharmony_ci * iavf_alloc_rx_buffers - Replace used receive buffers 8778c2ecf20Sopenharmony_ci * @rx_ring: ring to place buffers on 8788c2ecf20Sopenharmony_ci * @cleaned_count: number of buffers to replace 8798c2ecf20Sopenharmony_ci * 8808c2ecf20Sopenharmony_ci * Returns false if all allocations were successful, true if any fail 8818c2ecf20Sopenharmony_ci **/ 8828c2ecf20Sopenharmony_cibool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) 8838c2ecf20Sopenharmony_ci{ 8848c2ecf20Sopenharmony_ci u16 ntu = rx_ring->next_to_use; 8858c2ecf20Sopenharmony_ci union iavf_rx_desc *rx_desc; 8868c2ecf20Sopenharmony_ci struct iavf_rx_buffer *bi; 8878c2ecf20Sopenharmony_ci 8888c2ecf20Sopenharmony_ci /* do nothing if no valid netdev defined */ 8898c2ecf20Sopenharmony_ci if (!rx_ring->netdev || !cleaned_count) 8908c2ecf20Sopenharmony_ci return false; 8918c2ecf20Sopenharmony_ci 8928c2ecf20Sopenharmony_ci rx_desc = IAVF_RX_DESC(rx_ring, ntu); 8938c2ecf20Sopenharmony_ci bi = &rx_ring->rx_bi[ntu]; 8948c2ecf20Sopenharmony_ci 8958c2ecf20Sopenharmony_ci do { 8968c2ecf20Sopenharmony_ci if (!iavf_alloc_mapped_page(rx_ring, bi)) 8978c2ecf20Sopenharmony_ci goto no_buffers; 8988c2ecf20Sopenharmony_ci 8998c2ecf20Sopenharmony_ci /* sync the buffer for use by the device */ 9008c2ecf20Sopenharmony_ci dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 9018c2ecf20Sopenharmony_ci bi->page_offset, 9028c2ecf20Sopenharmony_ci rx_ring->rx_buf_len, 9038c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 9048c2ecf20Sopenharmony_ci 9058c2ecf20Sopenharmony_ci /* Refresh the desc even if buffer_addrs didn't change 9068c2ecf20Sopenharmony_ci * because each write-back erases this info. 9078c2ecf20Sopenharmony_ci */ 9088c2ecf20Sopenharmony_ci rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 9098c2ecf20Sopenharmony_ci 9108c2ecf20Sopenharmony_ci rx_desc++; 9118c2ecf20Sopenharmony_ci bi++; 9128c2ecf20Sopenharmony_ci ntu++; 9138c2ecf20Sopenharmony_ci if (unlikely(ntu == rx_ring->count)) { 9148c2ecf20Sopenharmony_ci rx_desc = IAVF_RX_DESC(rx_ring, 0); 9158c2ecf20Sopenharmony_ci bi = rx_ring->rx_bi; 9168c2ecf20Sopenharmony_ci ntu = 0; 9178c2ecf20Sopenharmony_ci } 9188c2ecf20Sopenharmony_ci 9198c2ecf20Sopenharmony_ci /* clear the status bits for the next_to_use descriptor */ 9208c2ecf20Sopenharmony_ci rx_desc->wb.qword1.status_error_len = 0; 9218c2ecf20Sopenharmony_ci 9228c2ecf20Sopenharmony_ci cleaned_count--; 9238c2ecf20Sopenharmony_ci } while (cleaned_count); 9248c2ecf20Sopenharmony_ci 9258c2ecf20Sopenharmony_ci if (rx_ring->next_to_use != ntu) 9268c2ecf20Sopenharmony_ci iavf_release_rx_desc(rx_ring, ntu); 9278c2ecf20Sopenharmony_ci 9288c2ecf20Sopenharmony_ci return false; 9298c2ecf20Sopenharmony_ci 9308c2ecf20Sopenharmony_cino_buffers: 9318c2ecf20Sopenharmony_ci if (rx_ring->next_to_use != ntu) 9328c2ecf20Sopenharmony_ci iavf_release_rx_desc(rx_ring, ntu); 9338c2ecf20Sopenharmony_ci 9348c2ecf20Sopenharmony_ci /* make sure to come back via polling to try again after 9358c2ecf20Sopenharmony_ci * allocation failure 9368c2ecf20Sopenharmony_ci */ 9378c2ecf20Sopenharmony_ci return true; 9388c2ecf20Sopenharmony_ci} 9398c2ecf20Sopenharmony_ci 9408c2ecf20Sopenharmony_ci/** 9418c2ecf20Sopenharmony_ci * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum 9428c2ecf20Sopenharmony_ci * @vsi: the VSI we care about 9438c2ecf20Sopenharmony_ci * @skb: skb currently being received and modified 9448c2ecf20Sopenharmony_ci * @rx_desc: the receive descriptor 9458c2ecf20Sopenharmony_ci **/ 9468c2ecf20Sopenharmony_cistatic inline void iavf_rx_checksum(struct iavf_vsi *vsi, 9478c2ecf20Sopenharmony_ci struct sk_buff *skb, 9488c2ecf20Sopenharmony_ci union iavf_rx_desc *rx_desc) 9498c2ecf20Sopenharmony_ci{ 9508c2ecf20Sopenharmony_ci struct iavf_rx_ptype_decoded decoded; 9518c2ecf20Sopenharmony_ci u32 rx_error, rx_status; 9528c2ecf20Sopenharmony_ci bool ipv4, ipv6; 9538c2ecf20Sopenharmony_ci u8 ptype; 9548c2ecf20Sopenharmony_ci u64 qword; 9558c2ecf20Sopenharmony_ci 9568c2ecf20Sopenharmony_ci qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 9578c2ecf20Sopenharmony_ci ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; 9588c2ecf20Sopenharmony_ci rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >> 9598c2ecf20Sopenharmony_ci IAVF_RXD_QW1_ERROR_SHIFT; 9608c2ecf20Sopenharmony_ci rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >> 9618c2ecf20Sopenharmony_ci IAVF_RXD_QW1_STATUS_SHIFT; 9628c2ecf20Sopenharmony_ci decoded = decode_rx_desc_ptype(ptype); 9638c2ecf20Sopenharmony_ci 9648c2ecf20Sopenharmony_ci skb->ip_summed = CHECKSUM_NONE; 9658c2ecf20Sopenharmony_ci 9668c2ecf20Sopenharmony_ci skb_checksum_none_assert(skb); 9678c2ecf20Sopenharmony_ci 9688c2ecf20Sopenharmony_ci /* Rx csum enabled and ip headers found? */ 9698c2ecf20Sopenharmony_ci if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 9708c2ecf20Sopenharmony_ci return; 9718c2ecf20Sopenharmony_ci 9728c2ecf20Sopenharmony_ci /* did the hardware decode the packet and checksum? */ 9738c2ecf20Sopenharmony_ci if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) 9748c2ecf20Sopenharmony_ci return; 9758c2ecf20Sopenharmony_ci 9768c2ecf20Sopenharmony_ci /* both known and outer_ip must be set for the below code to work */ 9778c2ecf20Sopenharmony_ci if (!(decoded.known && decoded.outer_ip)) 9788c2ecf20Sopenharmony_ci return; 9798c2ecf20Sopenharmony_ci 9808c2ecf20Sopenharmony_ci ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && 9818c2ecf20Sopenharmony_ci (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); 9828c2ecf20Sopenharmony_ci ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && 9838c2ecf20Sopenharmony_ci (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); 9848c2ecf20Sopenharmony_ci 9858c2ecf20Sopenharmony_ci if (ipv4 && 9868c2ecf20Sopenharmony_ci (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | 9878c2ecf20Sopenharmony_ci BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) 9888c2ecf20Sopenharmony_ci goto checksum_fail; 9898c2ecf20Sopenharmony_ci 9908c2ecf20Sopenharmony_ci /* likely incorrect csum if alternate IP extension headers found */ 9918c2ecf20Sopenharmony_ci if (ipv6 && 9928c2ecf20Sopenharmony_ci rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 9938c2ecf20Sopenharmony_ci /* don't increment checksum err here, non-fatal err */ 9948c2ecf20Sopenharmony_ci return; 9958c2ecf20Sopenharmony_ci 9968c2ecf20Sopenharmony_ci /* there was some L4 error, count error and punt packet to the stack */ 9978c2ecf20Sopenharmony_ci if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) 9988c2ecf20Sopenharmony_ci goto checksum_fail; 9998c2ecf20Sopenharmony_ci 10008c2ecf20Sopenharmony_ci /* handle packets that were not able to be checksummed due 10018c2ecf20Sopenharmony_ci * to arrival speed, in this case the stack can compute 10028c2ecf20Sopenharmony_ci * the csum. 10038c2ecf20Sopenharmony_ci */ 10048c2ecf20Sopenharmony_ci if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) 10058c2ecf20Sopenharmony_ci return; 10068c2ecf20Sopenharmony_ci 10078c2ecf20Sopenharmony_ci /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 10088c2ecf20Sopenharmony_ci switch (decoded.inner_prot) { 10098c2ecf20Sopenharmony_ci case IAVF_RX_PTYPE_INNER_PROT_TCP: 10108c2ecf20Sopenharmony_ci case IAVF_RX_PTYPE_INNER_PROT_UDP: 10118c2ecf20Sopenharmony_ci case IAVF_RX_PTYPE_INNER_PROT_SCTP: 10128c2ecf20Sopenharmony_ci skb->ip_summed = CHECKSUM_UNNECESSARY; 10138c2ecf20Sopenharmony_ci fallthrough; 10148c2ecf20Sopenharmony_ci default: 10158c2ecf20Sopenharmony_ci break; 10168c2ecf20Sopenharmony_ci } 10178c2ecf20Sopenharmony_ci 10188c2ecf20Sopenharmony_ci return; 10198c2ecf20Sopenharmony_ci 10208c2ecf20Sopenharmony_cichecksum_fail: 10218c2ecf20Sopenharmony_ci vsi->back->hw_csum_rx_error++; 10228c2ecf20Sopenharmony_ci} 10238c2ecf20Sopenharmony_ci 10248c2ecf20Sopenharmony_ci/** 10258c2ecf20Sopenharmony_ci * iavf_ptype_to_htype - get a hash type 10268c2ecf20Sopenharmony_ci * @ptype: the ptype value from the descriptor 10278c2ecf20Sopenharmony_ci * 10288c2ecf20Sopenharmony_ci * Returns a hash type to be used by skb_set_hash 10298c2ecf20Sopenharmony_ci **/ 10308c2ecf20Sopenharmony_cistatic inline int iavf_ptype_to_htype(u8 ptype) 10318c2ecf20Sopenharmony_ci{ 10328c2ecf20Sopenharmony_ci struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); 10338c2ecf20Sopenharmony_ci 10348c2ecf20Sopenharmony_ci if (!decoded.known) 10358c2ecf20Sopenharmony_ci return PKT_HASH_TYPE_NONE; 10368c2ecf20Sopenharmony_ci 10378c2ecf20Sopenharmony_ci if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && 10388c2ecf20Sopenharmony_ci decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) 10398c2ecf20Sopenharmony_ci return PKT_HASH_TYPE_L4; 10408c2ecf20Sopenharmony_ci else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && 10418c2ecf20Sopenharmony_ci decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) 10428c2ecf20Sopenharmony_ci return PKT_HASH_TYPE_L3; 10438c2ecf20Sopenharmony_ci else 10448c2ecf20Sopenharmony_ci return PKT_HASH_TYPE_L2; 10458c2ecf20Sopenharmony_ci} 10468c2ecf20Sopenharmony_ci 10478c2ecf20Sopenharmony_ci/** 10488c2ecf20Sopenharmony_ci * iavf_rx_hash - set the hash value in the skb 10498c2ecf20Sopenharmony_ci * @ring: descriptor ring 10508c2ecf20Sopenharmony_ci * @rx_desc: specific descriptor 10518c2ecf20Sopenharmony_ci * @skb: skb currently being received and modified 10528c2ecf20Sopenharmony_ci * @rx_ptype: Rx packet type 10538c2ecf20Sopenharmony_ci **/ 10548c2ecf20Sopenharmony_cistatic inline void iavf_rx_hash(struct iavf_ring *ring, 10558c2ecf20Sopenharmony_ci union iavf_rx_desc *rx_desc, 10568c2ecf20Sopenharmony_ci struct sk_buff *skb, 10578c2ecf20Sopenharmony_ci u8 rx_ptype) 10588c2ecf20Sopenharmony_ci{ 10598c2ecf20Sopenharmony_ci u32 hash; 10608c2ecf20Sopenharmony_ci const __le64 rss_mask = 10618c2ecf20Sopenharmony_ci cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << 10628c2ecf20Sopenharmony_ci IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); 10638c2ecf20Sopenharmony_ci 10648c2ecf20Sopenharmony_ci if (!(ring->netdev->features & NETIF_F_RXHASH)) 10658c2ecf20Sopenharmony_ci return; 10668c2ecf20Sopenharmony_ci 10678c2ecf20Sopenharmony_ci if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { 10688c2ecf20Sopenharmony_ci hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); 10698c2ecf20Sopenharmony_ci skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); 10708c2ecf20Sopenharmony_ci } 10718c2ecf20Sopenharmony_ci} 10728c2ecf20Sopenharmony_ci 10738c2ecf20Sopenharmony_ci/** 10748c2ecf20Sopenharmony_ci * iavf_process_skb_fields - Populate skb header fields from Rx descriptor 10758c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring packet is being transacted on 10768c2ecf20Sopenharmony_ci * @rx_desc: pointer to the EOP Rx descriptor 10778c2ecf20Sopenharmony_ci * @skb: pointer to current skb being populated 10788c2ecf20Sopenharmony_ci * @rx_ptype: the packet type decoded by hardware 10798c2ecf20Sopenharmony_ci * 10808c2ecf20Sopenharmony_ci * This function checks the ring, descriptor, and packet information in 10818c2ecf20Sopenharmony_ci * order to populate the hash, checksum, VLAN, protocol, and 10828c2ecf20Sopenharmony_ci * other fields within the skb. 10838c2ecf20Sopenharmony_ci **/ 10848c2ecf20Sopenharmony_cistatic inline 10858c2ecf20Sopenharmony_civoid iavf_process_skb_fields(struct iavf_ring *rx_ring, 10868c2ecf20Sopenharmony_ci union iavf_rx_desc *rx_desc, struct sk_buff *skb, 10878c2ecf20Sopenharmony_ci u8 rx_ptype) 10888c2ecf20Sopenharmony_ci{ 10898c2ecf20Sopenharmony_ci iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); 10908c2ecf20Sopenharmony_ci 10918c2ecf20Sopenharmony_ci iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); 10928c2ecf20Sopenharmony_ci 10938c2ecf20Sopenharmony_ci skb_record_rx_queue(skb, rx_ring->queue_index); 10948c2ecf20Sopenharmony_ci 10958c2ecf20Sopenharmony_ci /* modifies the skb - consumes the enet header */ 10968c2ecf20Sopenharmony_ci skb->protocol = eth_type_trans(skb, rx_ring->netdev); 10978c2ecf20Sopenharmony_ci} 10988c2ecf20Sopenharmony_ci 10998c2ecf20Sopenharmony_ci/** 11008c2ecf20Sopenharmony_ci * iavf_cleanup_headers - Correct empty headers 11018c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring packet is being transacted on 11028c2ecf20Sopenharmony_ci * @skb: pointer to current skb being fixed 11038c2ecf20Sopenharmony_ci * 11048c2ecf20Sopenharmony_ci * Also address the case where we are pulling data in on pages only 11058c2ecf20Sopenharmony_ci * and as such no data is present in the skb header. 11068c2ecf20Sopenharmony_ci * 11078c2ecf20Sopenharmony_ci * In addition if skb is not at least 60 bytes we need to pad it so that 11088c2ecf20Sopenharmony_ci * it is large enough to qualify as a valid Ethernet frame. 11098c2ecf20Sopenharmony_ci * 11108c2ecf20Sopenharmony_ci * Returns true if an error was encountered and skb was freed. 11118c2ecf20Sopenharmony_ci **/ 11128c2ecf20Sopenharmony_cistatic bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) 11138c2ecf20Sopenharmony_ci{ 11148c2ecf20Sopenharmony_ci /* if eth_skb_pad returns an error the skb was freed */ 11158c2ecf20Sopenharmony_ci if (eth_skb_pad(skb)) 11168c2ecf20Sopenharmony_ci return true; 11178c2ecf20Sopenharmony_ci 11188c2ecf20Sopenharmony_ci return false; 11198c2ecf20Sopenharmony_ci} 11208c2ecf20Sopenharmony_ci 11218c2ecf20Sopenharmony_ci/** 11228c2ecf20Sopenharmony_ci * iavf_reuse_rx_page - page flip buffer and store it back on the ring 11238c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring to store buffers on 11248c2ecf20Sopenharmony_ci * @old_buff: donor buffer to have page reused 11258c2ecf20Sopenharmony_ci * 11268c2ecf20Sopenharmony_ci * Synchronizes page for reuse by the adapter 11278c2ecf20Sopenharmony_ci **/ 11288c2ecf20Sopenharmony_cistatic void iavf_reuse_rx_page(struct iavf_ring *rx_ring, 11298c2ecf20Sopenharmony_ci struct iavf_rx_buffer *old_buff) 11308c2ecf20Sopenharmony_ci{ 11318c2ecf20Sopenharmony_ci struct iavf_rx_buffer *new_buff; 11328c2ecf20Sopenharmony_ci u16 nta = rx_ring->next_to_alloc; 11338c2ecf20Sopenharmony_ci 11348c2ecf20Sopenharmony_ci new_buff = &rx_ring->rx_bi[nta]; 11358c2ecf20Sopenharmony_ci 11368c2ecf20Sopenharmony_ci /* update, and store next to alloc */ 11378c2ecf20Sopenharmony_ci nta++; 11388c2ecf20Sopenharmony_ci rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 11398c2ecf20Sopenharmony_ci 11408c2ecf20Sopenharmony_ci /* transfer page from old buffer to new buffer */ 11418c2ecf20Sopenharmony_ci new_buff->dma = old_buff->dma; 11428c2ecf20Sopenharmony_ci new_buff->page = old_buff->page; 11438c2ecf20Sopenharmony_ci new_buff->page_offset = old_buff->page_offset; 11448c2ecf20Sopenharmony_ci new_buff->pagecnt_bias = old_buff->pagecnt_bias; 11458c2ecf20Sopenharmony_ci} 11468c2ecf20Sopenharmony_ci 11478c2ecf20Sopenharmony_ci/** 11488c2ecf20Sopenharmony_ci * iavf_page_is_reusable - check if any reuse is possible 11498c2ecf20Sopenharmony_ci * @page: page struct to check 11508c2ecf20Sopenharmony_ci * 11518c2ecf20Sopenharmony_ci * A page is not reusable if it was allocated under low memory 11528c2ecf20Sopenharmony_ci * conditions, or it's not in the same NUMA node as this CPU. 11538c2ecf20Sopenharmony_ci */ 11548c2ecf20Sopenharmony_cistatic inline bool iavf_page_is_reusable(struct page *page) 11558c2ecf20Sopenharmony_ci{ 11568c2ecf20Sopenharmony_ci return (page_to_nid(page) == numa_mem_id()) && 11578c2ecf20Sopenharmony_ci !page_is_pfmemalloc(page); 11588c2ecf20Sopenharmony_ci} 11598c2ecf20Sopenharmony_ci 11608c2ecf20Sopenharmony_ci/** 11618c2ecf20Sopenharmony_ci * iavf_can_reuse_rx_page - Determine if this page can be reused by 11628c2ecf20Sopenharmony_ci * the adapter for another receive 11638c2ecf20Sopenharmony_ci * 11648c2ecf20Sopenharmony_ci * @rx_buffer: buffer containing the page 11658c2ecf20Sopenharmony_ci * 11668c2ecf20Sopenharmony_ci * If page is reusable, rx_buffer->page_offset is adjusted to point to 11678c2ecf20Sopenharmony_ci * an unused region in the page. 11688c2ecf20Sopenharmony_ci * 11698c2ecf20Sopenharmony_ci * For small pages, @truesize will be a constant value, half the size 11708c2ecf20Sopenharmony_ci * of the memory at page. We'll attempt to alternate between high and 11718c2ecf20Sopenharmony_ci * low halves of the page, with one half ready for use by the hardware 11728c2ecf20Sopenharmony_ci * and the other half being consumed by the stack. We use the page 11738c2ecf20Sopenharmony_ci * ref count to determine whether the stack has finished consuming the 11748c2ecf20Sopenharmony_ci * portion of this page that was passed up with a previous packet. If 11758c2ecf20Sopenharmony_ci * the page ref count is >1, we'll assume the "other" half page is 11768c2ecf20Sopenharmony_ci * still busy, and this page cannot be reused. 11778c2ecf20Sopenharmony_ci * 11788c2ecf20Sopenharmony_ci * For larger pages, @truesize will be the actual space used by the 11798c2ecf20Sopenharmony_ci * received packet (adjusted upward to an even multiple of the cache 11808c2ecf20Sopenharmony_ci * line size). This will advance through the page by the amount 11818c2ecf20Sopenharmony_ci * actually consumed by the received packets while there is still 11828c2ecf20Sopenharmony_ci * space for a buffer. Each region of larger pages will be used at 11838c2ecf20Sopenharmony_ci * most once, after which the page will not be reused. 11848c2ecf20Sopenharmony_ci * 11858c2ecf20Sopenharmony_ci * In either case, if the page is reusable its refcount is increased. 11868c2ecf20Sopenharmony_ci **/ 11878c2ecf20Sopenharmony_cistatic bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) 11888c2ecf20Sopenharmony_ci{ 11898c2ecf20Sopenharmony_ci unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 11908c2ecf20Sopenharmony_ci struct page *page = rx_buffer->page; 11918c2ecf20Sopenharmony_ci 11928c2ecf20Sopenharmony_ci /* Is any reuse possible? */ 11938c2ecf20Sopenharmony_ci if (unlikely(!iavf_page_is_reusable(page))) 11948c2ecf20Sopenharmony_ci return false; 11958c2ecf20Sopenharmony_ci 11968c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 11978c2ecf20Sopenharmony_ci /* if we are only owner of page we can reuse it */ 11988c2ecf20Sopenharmony_ci if (unlikely((page_count(page) - pagecnt_bias) > 1)) 11998c2ecf20Sopenharmony_ci return false; 12008c2ecf20Sopenharmony_ci#else 12018c2ecf20Sopenharmony_ci#define IAVF_LAST_OFFSET \ 12028c2ecf20Sopenharmony_ci (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048) 12038c2ecf20Sopenharmony_ci if (rx_buffer->page_offset > IAVF_LAST_OFFSET) 12048c2ecf20Sopenharmony_ci return false; 12058c2ecf20Sopenharmony_ci#endif 12068c2ecf20Sopenharmony_ci 12078c2ecf20Sopenharmony_ci /* If we have drained the page fragment pool we need to update 12088c2ecf20Sopenharmony_ci * the pagecnt_bias and page count so that we fully restock the 12098c2ecf20Sopenharmony_ci * number of references the driver holds. 12108c2ecf20Sopenharmony_ci */ 12118c2ecf20Sopenharmony_ci if (unlikely(!pagecnt_bias)) { 12128c2ecf20Sopenharmony_ci page_ref_add(page, USHRT_MAX); 12138c2ecf20Sopenharmony_ci rx_buffer->pagecnt_bias = USHRT_MAX; 12148c2ecf20Sopenharmony_ci } 12158c2ecf20Sopenharmony_ci 12168c2ecf20Sopenharmony_ci return true; 12178c2ecf20Sopenharmony_ci} 12188c2ecf20Sopenharmony_ci 12198c2ecf20Sopenharmony_ci/** 12208c2ecf20Sopenharmony_ci * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff 12218c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 12228c2ecf20Sopenharmony_ci * @rx_buffer: buffer containing page to add 12238c2ecf20Sopenharmony_ci * @skb: sk_buff to place the data into 12248c2ecf20Sopenharmony_ci * @size: packet length from rx_desc 12258c2ecf20Sopenharmony_ci * 12268c2ecf20Sopenharmony_ci * This function will add the data contained in rx_buffer->page to the skb. 12278c2ecf20Sopenharmony_ci * It will just attach the page as a frag to the skb. 12288c2ecf20Sopenharmony_ci * 12298c2ecf20Sopenharmony_ci * The function will then update the page offset. 12308c2ecf20Sopenharmony_ci **/ 12318c2ecf20Sopenharmony_cistatic void iavf_add_rx_frag(struct iavf_ring *rx_ring, 12328c2ecf20Sopenharmony_ci struct iavf_rx_buffer *rx_buffer, 12338c2ecf20Sopenharmony_ci struct sk_buff *skb, 12348c2ecf20Sopenharmony_ci unsigned int size) 12358c2ecf20Sopenharmony_ci{ 12368c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 12378c2ecf20Sopenharmony_ci unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; 12388c2ecf20Sopenharmony_ci#else 12398c2ecf20Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); 12408c2ecf20Sopenharmony_ci#endif 12418c2ecf20Sopenharmony_ci 12428c2ecf20Sopenharmony_ci if (!size) 12438c2ecf20Sopenharmony_ci return; 12448c2ecf20Sopenharmony_ci 12458c2ecf20Sopenharmony_ci skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 12468c2ecf20Sopenharmony_ci rx_buffer->page_offset, size, truesize); 12478c2ecf20Sopenharmony_ci 12488c2ecf20Sopenharmony_ci /* page is being used so we must update the page offset */ 12498c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 12508c2ecf20Sopenharmony_ci rx_buffer->page_offset ^= truesize; 12518c2ecf20Sopenharmony_ci#else 12528c2ecf20Sopenharmony_ci rx_buffer->page_offset += truesize; 12538c2ecf20Sopenharmony_ci#endif 12548c2ecf20Sopenharmony_ci} 12558c2ecf20Sopenharmony_ci 12568c2ecf20Sopenharmony_ci/** 12578c2ecf20Sopenharmony_ci * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use 12588c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 12598c2ecf20Sopenharmony_ci * @size: size of buffer to add to skb 12608c2ecf20Sopenharmony_ci * 12618c2ecf20Sopenharmony_ci * This function will pull an Rx buffer from the ring and synchronize it 12628c2ecf20Sopenharmony_ci * for use by the CPU. 12638c2ecf20Sopenharmony_ci */ 12648c2ecf20Sopenharmony_cistatic struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, 12658c2ecf20Sopenharmony_ci const unsigned int size) 12668c2ecf20Sopenharmony_ci{ 12678c2ecf20Sopenharmony_ci struct iavf_rx_buffer *rx_buffer; 12688c2ecf20Sopenharmony_ci 12698c2ecf20Sopenharmony_ci rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; 12708c2ecf20Sopenharmony_ci prefetchw(rx_buffer->page); 12718c2ecf20Sopenharmony_ci if (!size) 12728c2ecf20Sopenharmony_ci return rx_buffer; 12738c2ecf20Sopenharmony_ci 12748c2ecf20Sopenharmony_ci /* we are reusing so sync this buffer for CPU use */ 12758c2ecf20Sopenharmony_ci dma_sync_single_range_for_cpu(rx_ring->dev, 12768c2ecf20Sopenharmony_ci rx_buffer->dma, 12778c2ecf20Sopenharmony_ci rx_buffer->page_offset, 12788c2ecf20Sopenharmony_ci size, 12798c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 12808c2ecf20Sopenharmony_ci 12818c2ecf20Sopenharmony_ci /* We have pulled a buffer for use, so decrement pagecnt_bias */ 12828c2ecf20Sopenharmony_ci rx_buffer->pagecnt_bias--; 12838c2ecf20Sopenharmony_ci 12848c2ecf20Sopenharmony_ci return rx_buffer; 12858c2ecf20Sopenharmony_ci} 12868c2ecf20Sopenharmony_ci 12878c2ecf20Sopenharmony_ci/** 12888c2ecf20Sopenharmony_ci * iavf_construct_skb - Allocate skb and populate it 12898c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 12908c2ecf20Sopenharmony_ci * @rx_buffer: rx buffer to pull data from 12918c2ecf20Sopenharmony_ci * @size: size of buffer to add to skb 12928c2ecf20Sopenharmony_ci * 12938c2ecf20Sopenharmony_ci * This function allocates an skb. It then populates it with the page 12948c2ecf20Sopenharmony_ci * data from the current receive descriptor, taking care to set up the 12958c2ecf20Sopenharmony_ci * skb correctly. 12968c2ecf20Sopenharmony_ci */ 12978c2ecf20Sopenharmony_cistatic struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, 12988c2ecf20Sopenharmony_ci struct iavf_rx_buffer *rx_buffer, 12998c2ecf20Sopenharmony_ci unsigned int size) 13008c2ecf20Sopenharmony_ci{ 13018c2ecf20Sopenharmony_ci void *va; 13028c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 13038c2ecf20Sopenharmony_ci unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; 13048c2ecf20Sopenharmony_ci#else 13058c2ecf20Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(size); 13068c2ecf20Sopenharmony_ci#endif 13078c2ecf20Sopenharmony_ci unsigned int headlen; 13088c2ecf20Sopenharmony_ci struct sk_buff *skb; 13098c2ecf20Sopenharmony_ci 13108c2ecf20Sopenharmony_ci if (!rx_buffer) 13118c2ecf20Sopenharmony_ci return NULL; 13128c2ecf20Sopenharmony_ci /* prefetch first cache line of first page */ 13138c2ecf20Sopenharmony_ci va = page_address(rx_buffer->page) + rx_buffer->page_offset; 13148c2ecf20Sopenharmony_ci net_prefetch(va); 13158c2ecf20Sopenharmony_ci 13168c2ecf20Sopenharmony_ci /* allocate a skb to store the frags */ 13178c2ecf20Sopenharmony_ci skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 13188c2ecf20Sopenharmony_ci IAVF_RX_HDR_SIZE, 13198c2ecf20Sopenharmony_ci GFP_ATOMIC | __GFP_NOWARN); 13208c2ecf20Sopenharmony_ci if (unlikely(!skb)) 13218c2ecf20Sopenharmony_ci return NULL; 13228c2ecf20Sopenharmony_ci 13238c2ecf20Sopenharmony_ci /* Determine available headroom for copy */ 13248c2ecf20Sopenharmony_ci headlen = size; 13258c2ecf20Sopenharmony_ci if (headlen > IAVF_RX_HDR_SIZE) 13268c2ecf20Sopenharmony_ci headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); 13278c2ecf20Sopenharmony_ci 13288c2ecf20Sopenharmony_ci /* align pull length to size of long to optimize memcpy performance */ 13298c2ecf20Sopenharmony_ci memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 13308c2ecf20Sopenharmony_ci 13318c2ecf20Sopenharmony_ci /* update all of the pointers */ 13328c2ecf20Sopenharmony_ci size -= headlen; 13338c2ecf20Sopenharmony_ci if (size) { 13348c2ecf20Sopenharmony_ci skb_add_rx_frag(skb, 0, rx_buffer->page, 13358c2ecf20Sopenharmony_ci rx_buffer->page_offset + headlen, 13368c2ecf20Sopenharmony_ci size, truesize); 13378c2ecf20Sopenharmony_ci 13388c2ecf20Sopenharmony_ci /* buffer is used by skb, update page_offset */ 13398c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 13408c2ecf20Sopenharmony_ci rx_buffer->page_offset ^= truesize; 13418c2ecf20Sopenharmony_ci#else 13428c2ecf20Sopenharmony_ci rx_buffer->page_offset += truesize; 13438c2ecf20Sopenharmony_ci#endif 13448c2ecf20Sopenharmony_ci } else { 13458c2ecf20Sopenharmony_ci /* buffer is unused, reset bias back to rx_buffer */ 13468c2ecf20Sopenharmony_ci rx_buffer->pagecnt_bias++; 13478c2ecf20Sopenharmony_ci } 13488c2ecf20Sopenharmony_ci 13498c2ecf20Sopenharmony_ci return skb; 13508c2ecf20Sopenharmony_ci} 13518c2ecf20Sopenharmony_ci 13528c2ecf20Sopenharmony_ci/** 13538c2ecf20Sopenharmony_ci * iavf_build_skb - Build skb around an existing buffer 13548c2ecf20Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 13558c2ecf20Sopenharmony_ci * @rx_buffer: Rx buffer to pull data from 13568c2ecf20Sopenharmony_ci * @size: size of buffer to add to skb 13578c2ecf20Sopenharmony_ci * 13588c2ecf20Sopenharmony_ci * This function builds an skb around an existing Rx buffer, taking care 13598c2ecf20Sopenharmony_ci * to set up the skb correctly and avoid any memcpy overhead. 13608c2ecf20Sopenharmony_ci */ 13618c2ecf20Sopenharmony_cistatic struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, 13628c2ecf20Sopenharmony_ci struct iavf_rx_buffer *rx_buffer, 13638c2ecf20Sopenharmony_ci unsigned int size) 13648c2ecf20Sopenharmony_ci{ 13658c2ecf20Sopenharmony_ci void *va; 13668c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 13678c2ecf20Sopenharmony_ci unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; 13688c2ecf20Sopenharmony_ci#else 13698c2ecf20Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 13708c2ecf20Sopenharmony_ci SKB_DATA_ALIGN(IAVF_SKB_PAD + size); 13718c2ecf20Sopenharmony_ci#endif 13728c2ecf20Sopenharmony_ci struct sk_buff *skb; 13738c2ecf20Sopenharmony_ci 13748c2ecf20Sopenharmony_ci if (!rx_buffer || !size) 13758c2ecf20Sopenharmony_ci return NULL; 13768c2ecf20Sopenharmony_ci /* prefetch first cache line of first page */ 13778c2ecf20Sopenharmony_ci va = page_address(rx_buffer->page) + rx_buffer->page_offset; 13788c2ecf20Sopenharmony_ci net_prefetch(va); 13798c2ecf20Sopenharmony_ci 13808c2ecf20Sopenharmony_ci /* build an skb around the page buffer */ 13818c2ecf20Sopenharmony_ci skb = build_skb(va - IAVF_SKB_PAD, truesize); 13828c2ecf20Sopenharmony_ci if (unlikely(!skb)) 13838c2ecf20Sopenharmony_ci return NULL; 13848c2ecf20Sopenharmony_ci 13858c2ecf20Sopenharmony_ci /* update pointers within the skb to store the data */ 13868c2ecf20Sopenharmony_ci skb_reserve(skb, IAVF_SKB_PAD); 13878c2ecf20Sopenharmony_ci __skb_put(skb, size); 13888c2ecf20Sopenharmony_ci 13898c2ecf20Sopenharmony_ci /* buffer is used by skb, update page_offset */ 13908c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192) 13918c2ecf20Sopenharmony_ci rx_buffer->page_offset ^= truesize; 13928c2ecf20Sopenharmony_ci#else 13938c2ecf20Sopenharmony_ci rx_buffer->page_offset += truesize; 13948c2ecf20Sopenharmony_ci#endif 13958c2ecf20Sopenharmony_ci 13968c2ecf20Sopenharmony_ci return skb; 13978c2ecf20Sopenharmony_ci} 13988c2ecf20Sopenharmony_ci 13998c2ecf20Sopenharmony_ci/** 14008c2ecf20Sopenharmony_ci * iavf_put_rx_buffer - Clean up used buffer and either recycle or free 14018c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 14028c2ecf20Sopenharmony_ci * @rx_buffer: rx buffer to pull data from 14038c2ecf20Sopenharmony_ci * 14048c2ecf20Sopenharmony_ci * This function will clean up the contents of the rx_buffer. It will 14058c2ecf20Sopenharmony_ci * either recycle the buffer or unmap it and free the associated resources. 14068c2ecf20Sopenharmony_ci */ 14078c2ecf20Sopenharmony_cistatic void iavf_put_rx_buffer(struct iavf_ring *rx_ring, 14088c2ecf20Sopenharmony_ci struct iavf_rx_buffer *rx_buffer) 14098c2ecf20Sopenharmony_ci{ 14108c2ecf20Sopenharmony_ci if (!rx_buffer) 14118c2ecf20Sopenharmony_ci return; 14128c2ecf20Sopenharmony_ci 14138c2ecf20Sopenharmony_ci if (iavf_can_reuse_rx_page(rx_buffer)) { 14148c2ecf20Sopenharmony_ci /* hand second half of page back to the ring */ 14158c2ecf20Sopenharmony_ci iavf_reuse_rx_page(rx_ring, rx_buffer); 14168c2ecf20Sopenharmony_ci rx_ring->rx_stats.page_reuse_count++; 14178c2ecf20Sopenharmony_ci } else { 14188c2ecf20Sopenharmony_ci /* we are not reusing the buffer so unmap it */ 14198c2ecf20Sopenharmony_ci dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 14208c2ecf20Sopenharmony_ci iavf_rx_pg_size(rx_ring), 14218c2ecf20Sopenharmony_ci DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); 14228c2ecf20Sopenharmony_ci __page_frag_cache_drain(rx_buffer->page, 14238c2ecf20Sopenharmony_ci rx_buffer->pagecnt_bias); 14248c2ecf20Sopenharmony_ci } 14258c2ecf20Sopenharmony_ci 14268c2ecf20Sopenharmony_ci /* clear contents of buffer_info */ 14278c2ecf20Sopenharmony_ci rx_buffer->page = NULL; 14288c2ecf20Sopenharmony_ci} 14298c2ecf20Sopenharmony_ci 14308c2ecf20Sopenharmony_ci/** 14318c2ecf20Sopenharmony_ci * iavf_is_non_eop - process handling of non-EOP buffers 14328c2ecf20Sopenharmony_ci * @rx_ring: Rx ring being processed 14338c2ecf20Sopenharmony_ci * @rx_desc: Rx descriptor for current buffer 14348c2ecf20Sopenharmony_ci * @skb: Current socket buffer containing buffer in progress 14358c2ecf20Sopenharmony_ci * 14368c2ecf20Sopenharmony_ci * This function updates next to clean. If the buffer is an EOP buffer 14378c2ecf20Sopenharmony_ci * this function exits returning false, otherwise it will place the 14388c2ecf20Sopenharmony_ci * sk_buff in the next buffer to be chained and return true indicating 14398c2ecf20Sopenharmony_ci * that this is in fact a non-EOP buffer. 14408c2ecf20Sopenharmony_ci **/ 14418c2ecf20Sopenharmony_cistatic bool iavf_is_non_eop(struct iavf_ring *rx_ring, 14428c2ecf20Sopenharmony_ci union iavf_rx_desc *rx_desc, 14438c2ecf20Sopenharmony_ci struct sk_buff *skb) 14448c2ecf20Sopenharmony_ci{ 14458c2ecf20Sopenharmony_ci u32 ntc = rx_ring->next_to_clean + 1; 14468c2ecf20Sopenharmony_ci 14478c2ecf20Sopenharmony_ci /* fetch, update, and store next to clean */ 14488c2ecf20Sopenharmony_ci ntc = (ntc < rx_ring->count) ? ntc : 0; 14498c2ecf20Sopenharmony_ci rx_ring->next_to_clean = ntc; 14508c2ecf20Sopenharmony_ci 14518c2ecf20Sopenharmony_ci prefetch(IAVF_RX_DESC(rx_ring, ntc)); 14528c2ecf20Sopenharmony_ci 14538c2ecf20Sopenharmony_ci /* if we are the last buffer then there is nothing else to do */ 14548c2ecf20Sopenharmony_ci#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) 14558c2ecf20Sopenharmony_ci if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF))) 14568c2ecf20Sopenharmony_ci return false; 14578c2ecf20Sopenharmony_ci 14588c2ecf20Sopenharmony_ci rx_ring->rx_stats.non_eop_descs++; 14598c2ecf20Sopenharmony_ci 14608c2ecf20Sopenharmony_ci return true; 14618c2ecf20Sopenharmony_ci} 14628c2ecf20Sopenharmony_ci 14638c2ecf20Sopenharmony_ci/** 14648c2ecf20Sopenharmony_ci * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 14658c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 14668c2ecf20Sopenharmony_ci * @budget: Total limit on number of packets to process 14678c2ecf20Sopenharmony_ci * 14688c2ecf20Sopenharmony_ci * This function provides a "bounce buffer" approach to Rx interrupt 14698c2ecf20Sopenharmony_ci * processing. The advantage to this is that on systems that have 14708c2ecf20Sopenharmony_ci * expensive overhead for IOMMU access this provides a means of avoiding 14718c2ecf20Sopenharmony_ci * it by maintaining the mapping of the page to the system. 14728c2ecf20Sopenharmony_ci * 14738c2ecf20Sopenharmony_ci * Returns amount of work completed 14748c2ecf20Sopenharmony_ci **/ 14758c2ecf20Sopenharmony_cistatic int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) 14768c2ecf20Sopenharmony_ci{ 14778c2ecf20Sopenharmony_ci unsigned int total_rx_bytes = 0, total_rx_packets = 0; 14788c2ecf20Sopenharmony_ci struct sk_buff *skb = rx_ring->skb; 14798c2ecf20Sopenharmony_ci u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); 14808c2ecf20Sopenharmony_ci bool failure = false; 14818c2ecf20Sopenharmony_ci 14828c2ecf20Sopenharmony_ci while (likely(total_rx_packets < (unsigned int)budget)) { 14838c2ecf20Sopenharmony_ci struct iavf_rx_buffer *rx_buffer; 14848c2ecf20Sopenharmony_ci union iavf_rx_desc *rx_desc; 14858c2ecf20Sopenharmony_ci unsigned int size; 14868c2ecf20Sopenharmony_ci u16 vlan_tag; 14878c2ecf20Sopenharmony_ci u8 rx_ptype; 14888c2ecf20Sopenharmony_ci u64 qword; 14898c2ecf20Sopenharmony_ci 14908c2ecf20Sopenharmony_ci /* return some buffers to hardware, one at a time is too slow */ 14918c2ecf20Sopenharmony_ci if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { 14928c2ecf20Sopenharmony_ci failure = failure || 14938c2ecf20Sopenharmony_ci iavf_alloc_rx_buffers(rx_ring, cleaned_count); 14948c2ecf20Sopenharmony_ci cleaned_count = 0; 14958c2ecf20Sopenharmony_ci } 14968c2ecf20Sopenharmony_ci 14978c2ecf20Sopenharmony_ci rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 14988c2ecf20Sopenharmony_ci 14998c2ecf20Sopenharmony_ci /* status_error_len will always be zero for unused descriptors 15008c2ecf20Sopenharmony_ci * because it's cleared in cleanup, and overlaps with hdr_addr 15018c2ecf20Sopenharmony_ci * which is always zero because packet split isn't used, if the 15028c2ecf20Sopenharmony_ci * hardware wrote DD then the length will be non-zero 15038c2ecf20Sopenharmony_ci */ 15048c2ecf20Sopenharmony_ci qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 15058c2ecf20Sopenharmony_ci 15068c2ecf20Sopenharmony_ci /* This memory barrier is needed to keep us from reading 15078c2ecf20Sopenharmony_ci * any other fields out of the rx_desc until we have 15088c2ecf20Sopenharmony_ci * verified the descriptor has been written back. 15098c2ecf20Sopenharmony_ci */ 15108c2ecf20Sopenharmony_ci dma_rmb(); 15118c2ecf20Sopenharmony_ci#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT) 15128c2ecf20Sopenharmony_ci if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) 15138c2ecf20Sopenharmony_ci break; 15148c2ecf20Sopenharmony_ci 15158c2ecf20Sopenharmony_ci size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> 15168c2ecf20Sopenharmony_ci IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; 15178c2ecf20Sopenharmony_ci 15188c2ecf20Sopenharmony_ci iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); 15198c2ecf20Sopenharmony_ci rx_buffer = iavf_get_rx_buffer(rx_ring, size); 15208c2ecf20Sopenharmony_ci 15218c2ecf20Sopenharmony_ci /* retrieve a buffer from the ring */ 15228c2ecf20Sopenharmony_ci if (skb) 15238c2ecf20Sopenharmony_ci iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); 15248c2ecf20Sopenharmony_ci else if (ring_uses_build_skb(rx_ring)) 15258c2ecf20Sopenharmony_ci skb = iavf_build_skb(rx_ring, rx_buffer, size); 15268c2ecf20Sopenharmony_ci else 15278c2ecf20Sopenharmony_ci skb = iavf_construct_skb(rx_ring, rx_buffer, size); 15288c2ecf20Sopenharmony_ci 15298c2ecf20Sopenharmony_ci /* exit if we failed to retrieve a buffer */ 15308c2ecf20Sopenharmony_ci if (!skb) { 15318c2ecf20Sopenharmony_ci rx_ring->rx_stats.alloc_buff_failed++; 15328c2ecf20Sopenharmony_ci if (rx_buffer && size) 15338c2ecf20Sopenharmony_ci rx_buffer->pagecnt_bias++; 15348c2ecf20Sopenharmony_ci break; 15358c2ecf20Sopenharmony_ci } 15368c2ecf20Sopenharmony_ci 15378c2ecf20Sopenharmony_ci iavf_put_rx_buffer(rx_ring, rx_buffer); 15388c2ecf20Sopenharmony_ci cleaned_count++; 15398c2ecf20Sopenharmony_ci 15408c2ecf20Sopenharmony_ci if (iavf_is_non_eop(rx_ring, rx_desc, skb)) 15418c2ecf20Sopenharmony_ci continue; 15428c2ecf20Sopenharmony_ci 15438c2ecf20Sopenharmony_ci /* ERR_MASK will only have valid bits if EOP set, and 15448c2ecf20Sopenharmony_ci * what we are doing here is actually checking 15458c2ecf20Sopenharmony_ci * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in 15468c2ecf20Sopenharmony_ci * the error field 15478c2ecf20Sopenharmony_ci */ 15488c2ecf20Sopenharmony_ci if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { 15498c2ecf20Sopenharmony_ci dev_kfree_skb_any(skb); 15508c2ecf20Sopenharmony_ci skb = NULL; 15518c2ecf20Sopenharmony_ci continue; 15528c2ecf20Sopenharmony_ci } 15538c2ecf20Sopenharmony_ci 15548c2ecf20Sopenharmony_ci if (iavf_cleanup_headers(rx_ring, skb)) { 15558c2ecf20Sopenharmony_ci skb = NULL; 15568c2ecf20Sopenharmony_ci continue; 15578c2ecf20Sopenharmony_ci } 15588c2ecf20Sopenharmony_ci 15598c2ecf20Sopenharmony_ci /* probably a little skewed due to removing CRC */ 15608c2ecf20Sopenharmony_ci total_rx_bytes += skb->len; 15618c2ecf20Sopenharmony_ci 15628c2ecf20Sopenharmony_ci qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 15638c2ecf20Sopenharmony_ci rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> 15648c2ecf20Sopenharmony_ci IAVF_RXD_QW1_PTYPE_SHIFT; 15658c2ecf20Sopenharmony_ci 15668c2ecf20Sopenharmony_ci /* populate checksum, VLAN, and protocol */ 15678c2ecf20Sopenharmony_ci iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 15688c2ecf20Sopenharmony_ci 15698c2ecf20Sopenharmony_ci 15708c2ecf20Sopenharmony_ci vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? 15718c2ecf20Sopenharmony_ci le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; 15728c2ecf20Sopenharmony_ci 15738c2ecf20Sopenharmony_ci iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); 15748c2ecf20Sopenharmony_ci iavf_receive_skb(rx_ring, skb, vlan_tag); 15758c2ecf20Sopenharmony_ci skb = NULL; 15768c2ecf20Sopenharmony_ci 15778c2ecf20Sopenharmony_ci /* update budget accounting */ 15788c2ecf20Sopenharmony_ci total_rx_packets++; 15798c2ecf20Sopenharmony_ci } 15808c2ecf20Sopenharmony_ci 15818c2ecf20Sopenharmony_ci rx_ring->skb = skb; 15828c2ecf20Sopenharmony_ci 15838c2ecf20Sopenharmony_ci u64_stats_update_begin(&rx_ring->syncp); 15848c2ecf20Sopenharmony_ci rx_ring->stats.packets += total_rx_packets; 15858c2ecf20Sopenharmony_ci rx_ring->stats.bytes += total_rx_bytes; 15868c2ecf20Sopenharmony_ci u64_stats_update_end(&rx_ring->syncp); 15878c2ecf20Sopenharmony_ci rx_ring->q_vector->rx.total_packets += total_rx_packets; 15888c2ecf20Sopenharmony_ci rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 15898c2ecf20Sopenharmony_ci 15908c2ecf20Sopenharmony_ci /* guarantee a trip back through this routine if there was a failure */ 15918c2ecf20Sopenharmony_ci return failure ? budget : (int)total_rx_packets; 15928c2ecf20Sopenharmony_ci} 15938c2ecf20Sopenharmony_ci 15948c2ecf20Sopenharmony_cistatic inline u32 iavf_buildreg_itr(const int type, u16 itr) 15958c2ecf20Sopenharmony_ci{ 15968c2ecf20Sopenharmony_ci u32 val; 15978c2ecf20Sopenharmony_ci 15988c2ecf20Sopenharmony_ci /* We don't bother with setting the CLEARPBA bit as the data sheet 15998c2ecf20Sopenharmony_ci * points out doing so is "meaningless since it was already 16008c2ecf20Sopenharmony_ci * auto-cleared". The auto-clearing happens when the interrupt is 16018c2ecf20Sopenharmony_ci * asserted. 16028c2ecf20Sopenharmony_ci * 16038c2ecf20Sopenharmony_ci * Hardware errata 28 for also indicates that writing to a 16048c2ecf20Sopenharmony_ci * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear 16058c2ecf20Sopenharmony_ci * an event in the PBA anyway so we need to rely on the automask 16068c2ecf20Sopenharmony_ci * to hold pending events for us until the interrupt is re-enabled 16078c2ecf20Sopenharmony_ci * 16088c2ecf20Sopenharmony_ci * The itr value is reported in microseconds, and the register 16098c2ecf20Sopenharmony_ci * value is recorded in 2 microsecond units. For this reason we 16108c2ecf20Sopenharmony_ci * only need to shift by the interval shift - 1 instead of the 16118c2ecf20Sopenharmony_ci * full value. 16128c2ecf20Sopenharmony_ci */ 16138c2ecf20Sopenharmony_ci itr &= IAVF_ITR_MASK; 16148c2ecf20Sopenharmony_ci 16158c2ecf20Sopenharmony_ci val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 16168c2ecf20Sopenharmony_ci (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | 16178c2ecf20Sopenharmony_ci (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); 16188c2ecf20Sopenharmony_ci 16198c2ecf20Sopenharmony_ci return val; 16208c2ecf20Sopenharmony_ci} 16218c2ecf20Sopenharmony_ci 16228c2ecf20Sopenharmony_ci/* a small macro to shorten up some long lines */ 16238c2ecf20Sopenharmony_ci#define INTREG IAVF_VFINT_DYN_CTLN1 16248c2ecf20Sopenharmony_ci 16258c2ecf20Sopenharmony_ci/* The act of updating the ITR will cause it to immediately trigger. In order 16268c2ecf20Sopenharmony_ci * to prevent this from throwing off adaptive update statistics we defer the 16278c2ecf20Sopenharmony_ci * update so that it can only happen so often. So after either Tx or Rx are 16288c2ecf20Sopenharmony_ci * updated we make the adaptive scheme wait until either the ITR completely 16298c2ecf20Sopenharmony_ci * expires via the next_update expiration or we have been through at least 16308c2ecf20Sopenharmony_ci * 3 interrupts. 16318c2ecf20Sopenharmony_ci */ 16328c2ecf20Sopenharmony_ci#define ITR_COUNTDOWN_START 3 16338c2ecf20Sopenharmony_ci 16348c2ecf20Sopenharmony_ci/** 16358c2ecf20Sopenharmony_ci * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt 16368c2ecf20Sopenharmony_ci * @vsi: the VSI we care about 16378c2ecf20Sopenharmony_ci * @q_vector: q_vector for which itr is being updated and interrupt enabled 16388c2ecf20Sopenharmony_ci * 16398c2ecf20Sopenharmony_ci **/ 16408c2ecf20Sopenharmony_cistatic inline void iavf_update_enable_itr(struct iavf_vsi *vsi, 16418c2ecf20Sopenharmony_ci struct iavf_q_vector *q_vector) 16428c2ecf20Sopenharmony_ci{ 16438c2ecf20Sopenharmony_ci struct iavf_hw *hw = &vsi->back->hw; 16448c2ecf20Sopenharmony_ci u32 intval; 16458c2ecf20Sopenharmony_ci 16468c2ecf20Sopenharmony_ci /* These will do nothing if dynamic updates are not enabled */ 16478c2ecf20Sopenharmony_ci iavf_update_itr(q_vector, &q_vector->tx); 16488c2ecf20Sopenharmony_ci iavf_update_itr(q_vector, &q_vector->rx); 16498c2ecf20Sopenharmony_ci 16508c2ecf20Sopenharmony_ci /* This block of logic allows us to get away with only updating 16518c2ecf20Sopenharmony_ci * one ITR value with each interrupt. The idea is to perform a 16528c2ecf20Sopenharmony_ci * pseudo-lazy update with the following criteria. 16538c2ecf20Sopenharmony_ci * 16548c2ecf20Sopenharmony_ci * 1. Rx is given higher priority than Tx if both are in same state 16558c2ecf20Sopenharmony_ci * 2. If we must reduce an ITR that is given highest priority. 16568c2ecf20Sopenharmony_ci * 3. We then give priority to increasing ITR based on amount. 16578c2ecf20Sopenharmony_ci */ 16588c2ecf20Sopenharmony_ci if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 16598c2ecf20Sopenharmony_ci /* Rx ITR needs to be reduced, this is highest priority */ 16608c2ecf20Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_RX_ITR, 16618c2ecf20Sopenharmony_ci q_vector->rx.target_itr); 16628c2ecf20Sopenharmony_ci q_vector->rx.current_itr = q_vector->rx.target_itr; 16638c2ecf20Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 16648c2ecf20Sopenharmony_ci } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 16658c2ecf20Sopenharmony_ci ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 16668c2ecf20Sopenharmony_ci (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 16678c2ecf20Sopenharmony_ci /* Tx ITR needs to be reduced, this is second priority 16688c2ecf20Sopenharmony_ci * Tx ITR needs to be increased more than Rx, fourth priority 16698c2ecf20Sopenharmony_ci */ 16708c2ecf20Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_TX_ITR, 16718c2ecf20Sopenharmony_ci q_vector->tx.target_itr); 16728c2ecf20Sopenharmony_ci q_vector->tx.current_itr = q_vector->tx.target_itr; 16738c2ecf20Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 16748c2ecf20Sopenharmony_ci } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 16758c2ecf20Sopenharmony_ci /* Rx ITR needs to be increased, third priority */ 16768c2ecf20Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_RX_ITR, 16778c2ecf20Sopenharmony_ci q_vector->rx.target_itr); 16788c2ecf20Sopenharmony_ci q_vector->rx.current_itr = q_vector->rx.target_itr; 16798c2ecf20Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 16808c2ecf20Sopenharmony_ci } else { 16818c2ecf20Sopenharmony_ci /* No ITR update, lowest priority */ 16828c2ecf20Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0); 16838c2ecf20Sopenharmony_ci if (q_vector->itr_countdown) 16848c2ecf20Sopenharmony_ci q_vector->itr_countdown--; 16858c2ecf20Sopenharmony_ci } 16868c2ecf20Sopenharmony_ci 16878c2ecf20Sopenharmony_ci if (!test_bit(__IAVF_VSI_DOWN, vsi->state)) 16888c2ecf20Sopenharmony_ci wr32(hw, INTREG(q_vector->reg_idx), intval); 16898c2ecf20Sopenharmony_ci} 16908c2ecf20Sopenharmony_ci 16918c2ecf20Sopenharmony_ci/** 16928c2ecf20Sopenharmony_ci * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine 16938c2ecf20Sopenharmony_ci * @napi: napi struct with our devices info in it 16948c2ecf20Sopenharmony_ci * @budget: amount of work driver is allowed to do this pass, in packets 16958c2ecf20Sopenharmony_ci * 16968c2ecf20Sopenharmony_ci * This function will clean all queues associated with a q_vector. 16978c2ecf20Sopenharmony_ci * 16988c2ecf20Sopenharmony_ci * Returns the amount of work done 16998c2ecf20Sopenharmony_ci **/ 17008c2ecf20Sopenharmony_ciint iavf_napi_poll(struct napi_struct *napi, int budget) 17018c2ecf20Sopenharmony_ci{ 17028c2ecf20Sopenharmony_ci struct iavf_q_vector *q_vector = 17038c2ecf20Sopenharmony_ci container_of(napi, struct iavf_q_vector, napi); 17048c2ecf20Sopenharmony_ci struct iavf_vsi *vsi = q_vector->vsi; 17058c2ecf20Sopenharmony_ci struct iavf_ring *ring; 17068c2ecf20Sopenharmony_ci bool clean_complete = true; 17078c2ecf20Sopenharmony_ci bool arm_wb = false; 17088c2ecf20Sopenharmony_ci int budget_per_ring; 17098c2ecf20Sopenharmony_ci int work_done = 0; 17108c2ecf20Sopenharmony_ci 17118c2ecf20Sopenharmony_ci if (test_bit(__IAVF_VSI_DOWN, vsi->state)) { 17128c2ecf20Sopenharmony_ci napi_complete(napi); 17138c2ecf20Sopenharmony_ci return 0; 17148c2ecf20Sopenharmony_ci } 17158c2ecf20Sopenharmony_ci 17168c2ecf20Sopenharmony_ci /* Since the actual Tx work is minimal, we can give the Tx a larger 17178c2ecf20Sopenharmony_ci * budget and be more aggressive about cleaning up the Tx descriptors. 17188c2ecf20Sopenharmony_ci */ 17198c2ecf20Sopenharmony_ci iavf_for_each_ring(ring, q_vector->tx) { 17208c2ecf20Sopenharmony_ci if (!iavf_clean_tx_irq(vsi, ring, budget)) { 17218c2ecf20Sopenharmony_ci clean_complete = false; 17228c2ecf20Sopenharmony_ci continue; 17238c2ecf20Sopenharmony_ci } 17248c2ecf20Sopenharmony_ci arm_wb |= ring->arm_wb; 17258c2ecf20Sopenharmony_ci ring->arm_wb = false; 17268c2ecf20Sopenharmony_ci } 17278c2ecf20Sopenharmony_ci 17288c2ecf20Sopenharmony_ci /* Handle case where we are called by netpoll with a budget of 0 */ 17298c2ecf20Sopenharmony_ci if (budget <= 0) 17308c2ecf20Sopenharmony_ci goto tx_only; 17318c2ecf20Sopenharmony_ci 17328c2ecf20Sopenharmony_ci /* We attempt to distribute budget to each Rx queue fairly, but don't 17338c2ecf20Sopenharmony_ci * allow the budget to go below 1 because that would exit polling early. 17348c2ecf20Sopenharmony_ci */ 17358c2ecf20Sopenharmony_ci budget_per_ring = max(budget/q_vector->num_ringpairs, 1); 17368c2ecf20Sopenharmony_ci 17378c2ecf20Sopenharmony_ci iavf_for_each_ring(ring, q_vector->rx) { 17388c2ecf20Sopenharmony_ci int cleaned = iavf_clean_rx_irq(ring, budget_per_ring); 17398c2ecf20Sopenharmony_ci 17408c2ecf20Sopenharmony_ci work_done += cleaned; 17418c2ecf20Sopenharmony_ci /* if we clean as many as budgeted, we must not be done */ 17428c2ecf20Sopenharmony_ci if (cleaned >= budget_per_ring) 17438c2ecf20Sopenharmony_ci clean_complete = false; 17448c2ecf20Sopenharmony_ci } 17458c2ecf20Sopenharmony_ci 17468c2ecf20Sopenharmony_ci /* If work not completed, return budget and polling will return */ 17478c2ecf20Sopenharmony_ci if (!clean_complete) { 17488c2ecf20Sopenharmony_ci int cpu_id = smp_processor_id(); 17498c2ecf20Sopenharmony_ci 17508c2ecf20Sopenharmony_ci /* It is possible that the interrupt affinity has changed but, 17518c2ecf20Sopenharmony_ci * if the cpu is pegged at 100%, polling will never exit while 17528c2ecf20Sopenharmony_ci * traffic continues and the interrupt will be stuck on this 17538c2ecf20Sopenharmony_ci * cpu. We check to make sure affinity is correct before we 17548c2ecf20Sopenharmony_ci * continue to poll, otherwise we must stop polling so the 17558c2ecf20Sopenharmony_ci * interrupt can move to the correct cpu. 17568c2ecf20Sopenharmony_ci */ 17578c2ecf20Sopenharmony_ci if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { 17588c2ecf20Sopenharmony_ci /* Tell napi that we are done polling */ 17598c2ecf20Sopenharmony_ci napi_complete_done(napi, work_done); 17608c2ecf20Sopenharmony_ci 17618c2ecf20Sopenharmony_ci /* Force an interrupt */ 17628c2ecf20Sopenharmony_ci iavf_force_wb(vsi, q_vector); 17638c2ecf20Sopenharmony_ci 17648c2ecf20Sopenharmony_ci /* Return budget-1 so that polling stops */ 17658c2ecf20Sopenharmony_ci return budget - 1; 17668c2ecf20Sopenharmony_ci } 17678c2ecf20Sopenharmony_citx_only: 17688c2ecf20Sopenharmony_ci if (arm_wb) { 17698c2ecf20Sopenharmony_ci q_vector->tx.ring[0].tx_stats.tx_force_wb++; 17708c2ecf20Sopenharmony_ci iavf_enable_wb_on_itr(vsi, q_vector); 17718c2ecf20Sopenharmony_ci } 17728c2ecf20Sopenharmony_ci return budget; 17738c2ecf20Sopenharmony_ci } 17748c2ecf20Sopenharmony_ci 17758c2ecf20Sopenharmony_ci if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) 17768c2ecf20Sopenharmony_ci q_vector->arm_wb_state = false; 17778c2ecf20Sopenharmony_ci 17788c2ecf20Sopenharmony_ci /* Exit the polling mode, but don't re-enable interrupts if stack might 17798c2ecf20Sopenharmony_ci * poll us due to busy-polling 17808c2ecf20Sopenharmony_ci */ 17818c2ecf20Sopenharmony_ci if (likely(napi_complete_done(napi, work_done))) 17828c2ecf20Sopenharmony_ci iavf_update_enable_itr(vsi, q_vector); 17838c2ecf20Sopenharmony_ci 17848c2ecf20Sopenharmony_ci return min(work_done, budget - 1); 17858c2ecf20Sopenharmony_ci} 17868c2ecf20Sopenharmony_ci 17878c2ecf20Sopenharmony_ci/** 17888c2ecf20Sopenharmony_ci * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 17898c2ecf20Sopenharmony_ci * @skb: send buffer 17908c2ecf20Sopenharmony_ci * @tx_ring: ring to send buffer on 17918c2ecf20Sopenharmony_ci * @flags: the tx flags to be set 17928c2ecf20Sopenharmony_ci * 17938c2ecf20Sopenharmony_ci * Checks the skb and set up correspondingly several generic transmit flags 17948c2ecf20Sopenharmony_ci * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 17958c2ecf20Sopenharmony_ci * 17968c2ecf20Sopenharmony_ci * Returns error code indicate the frame should be dropped upon error and the 17978c2ecf20Sopenharmony_ci * otherwise returns 0 to indicate the flags has been set properly. 17988c2ecf20Sopenharmony_ci **/ 17998c2ecf20Sopenharmony_cistatic inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb, 18008c2ecf20Sopenharmony_ci struct iavf_ring *tx_ring, 18018c2ecf20Sopenharmony_ci u32 *flags) 18028c2ecf20Sopenharmony_ci{ 18038c2ecf20Sopenharmony_ci __be16 protocol = skb->protocol; 18048c2ecf20Sopenharmony_ci u32 tx_flags = 0; 18058c2ecf20Sopenharmony_ci 18068c2ecf20Sopenharmony_ci if (protocol == htons(ETH_P_8021Q) && 18078c2ecf20Sopenharmony_ci !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { 18088c2ecf20Sopenharmony_ci /* When HW VLAN acceleration is turned off by the user the 18098c2ecf20Sopenharmony_ci * stack sets the protocol to 8021q so that the driver 18108c2ecf20Sopenharmony_ci * can take any steps required to support the SW only 18118c2ecf20Sopenharmony_ci * VLAN handling. In our case the driver doesn't need 18128c2ecf20Sopenharmony_ci * to take any further steps so just set the protocol 18138c2ecf20Sopenharmony_ci * to the encapsulated ethertype. 18148c2ecf20Sopenharmony_ci */ 18158c2ecf20Sopenharmony_ci skb->protocol = vlan_get_protocol(skb); 18168c2ecf20Sopenharmony_ci goto out; 18178c2ecf20Sopenharmony_ci } 18188c2ecf20Sopenharmony_ci 18198c2ecf20Sopenharmony_ci /* if we have a HW VLAN tag being added, default to the HW one */ 18208c2ecf20Sopenharmony_ci if (skb_vlan_tag_present(skb)) { 18218c2ecf20Sopenharmony_ci tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; 18228c2ecf20Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_HW_VLAN; 18238c2ecf20Sopenharmony_ci /* else if it is a SW VLAN, check the next protocol and store the tag */ 18248c2ecf20Sopenharmony_ci } else if (protocol == htons(ETH_P_8021Q)) { 18258c2ecf20Sopenharmony_ci struct vlan_hdr *vhdr, _vhdr; 18268c2ecf20Sopenharmony_ci 18278c2ecf20Sopenharmony_ci vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 18288c2ecf20Sopenharmony_ci if (!vhdr) 18298c2ecf20Sopenharmony_ci return -EINVAL; 18308c2ecf20Sopenharmony_ci 18318c2ecf20Sopenharmony_ci protocol = vhdr->h_vlan_encapsulated_proto; 18328c2ecf20Sopenharmony_ci tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT; 18338c2ecf20Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_SW_VLAN; 18348c2ecf20Sopenharmony_ci } 18358c2ecf20Sopenharmony_ci 18368c2ecf20Sopenharmony_ciout: 18378c2ecf20Sopenharmony_ci *flags = tx_flags; 18388c2ecf20Sopenharmony_ci return 0; 18398c2ecf20Sopenharmony_ci} 18408c2ecf20Sopenharmony_ci 18418c2ecf20Sopenharmony_ci/** 18428c2ecf20Sopenharmony_ci * iavf_tso - set up the tso context descriptor 18438c2ecf20Sopenharmony_ci * @first: pointer to first Tx buffer for xmit 18448c2ecf20Sopenharmony_ci * @hdr_len: ptr to the size of the packet header 18458c2ecf20Sopenharmony_ci * @cd_type_cmd_tso_mss: Quad Word 1 18468c2ecf20Sopenharmony_ci * 18478c2ecf20Sopenharmony_ci * Returns 0 if no TSO can happen, 1 if tso is going, or error 18488c2ecf20Sopenharmony_ci **/ 18498c2ecf20Sopenharmony_cistatic int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, 18508c2ecf20Sopenharmony_ci u64 *cd_type_cmd_tso_mss) 18518c2ecf20Sopenharmony_ci{ 18528c2ecf20Sopenharmony_ci struct sk_buff *skb = first->skb; 18538c2ecf20Sopenharmony_ci u64 cd_cmd, cd_tso_len, cd_mss; 18548c2ecf20Sopenharmony_ci union { 18558c2ecf20Sopenharmony_ci struct iphdr *v4; 18568c2ecf20Sopenharmony_ci struct ipv6hdr *v6; 18578c2ecf20Sopenharmony_ci unsigned char *hdr; 18588c2ecf20Sopenharmony_ci } ip; 18598c2ecf20Sopenharmony_ci union { 18608c2ecf20Sopenharmony_ci struct tcphdr *tcp; 18618c2ecf20Sopenharmony_ci struct udphdr *udp; 18628c2ecf20Sopenharmony_ci unsigned char *hdr; 18638c2ecf20Sopenharmony_ci } l4; 18648c2ecf20Sopenharmony_ci u32 paylen, l4_offset; 18658c2ecf20Sopenharmony_ci u16 gso_segs, gso_size; 18668c2ecf20Sopenharmony_ci int err; 18678c2ecf20Sopenharmony_ci 18688c2ecf20Sopenharmony_ci if (skb->ip_summed != CHECKSUM_PARTIAL) 18698c2ecf20Sopenharmony_ci return 0; 18708c2ecf20Sopenharmony_ci 18718c2ecf20Sopenharmony_ci if (!skb_is_gso(skb)) 18728c2ecf20Sopenharmony_ci return 0; 18738c2ecf20Sopenharmony_ci 18748c2ecf20Sopenharmony_ci err = skb_cow_head(skb, 0); 18758c2ecf20Sopenharmony_ci if (err < 0) 18768c2ecf20Sopenharmony_ci return err; 18778c2ecf20Sopenharmony_ci 18788c2ecf20Sopenharmony_ci ip.hdr = skb_network_header(skb); 18798c2ecf20Sopenharmony_ci l4.hdr = skb_transport_header(skb); 18808c2ecf20Sopenharmony_ci 18818c2ecf20Sopenharmony_ci /* initialize outer IP header fields */ 18828c2ecf20Sopenharmony_ci if (ip.v4->version == 4) { 18838c2ecf20Sopenharmony_ci ip.v4->tot_len = 0; 18848c2ecf20Sopenharmony_ci ip.v4->check = 0; 18858c2ecf20Sopenharmony_ci } else { 18868c2ecf20Sopenharmony_ci ip.v6->payload_len = 0; 18878c2ecf20Sopenharmony_ci } 18888c2ecf20Sopenharmony_ci 18898c2ecf20Sopenharmony_ci if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 18908c2ecf20Sopenharmony_ci SKB_GSO_GRE_CSUM | 18918c2ecf20Sopenharmony_ci SKB_GSO_IPXIP4 | 18928c2ecf20Sopenharmony_ci SKB_GSO_IPXIP6 | 18938c2ecf20Sopenharmony_ci SKB_GSO_UDP_TUNNEL | 18948c2ecf20Sopenharmony_ci SKB_GSO_UDP_TUNNEL_CSUM)) { 18958c2ecf20Sopenharmony_ci if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 18968c2ecf20Sopenharmony_ci (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 18978c2ecf20Sopenharmony_ci l4.udp->len = 0; 18988c2ecf20Sopenharmony_ci 18998c2ecf20Sopenharmony_ci /* determine offset of outer transport header */ 19008c2ecf20Sopenharmony_ci l4_offset = l4.hdr - skb->data; 19018c2ecf20Sopenharmony_ci 19028c2ecf20Sopenharmony_ci /* remove payload length from outer checksum */ 19038c2ecf20Sopenharmony_ci paylen = skb->len - l4_offset; 19048c2ecf20Sopenharmony_ci csum_replace_by_diff(&l4.udp->check, 19058c2ecf20Sopenharmony_ci (__force __wsum)htonl(paylen)); 19068c2ecf20Sopenharmony_ci } 19078c2ecf20Sopenharmony_ci 19088c2ecf20Sopenharmony_ci /* reset pointers to inner headers */ 19098c2ecf20Sopenharmony_ci ip.hdr = skb_inner_network_header(skb); 19108c2ecf20Sopenharmony_ci l4.hdr = skb_inner_transport_header(skb); 19118c2ecf20Sopenharmony_ci 19128c2ecf20Sopenharmony_ci /* initialize inner IP header fields */ 19138c2ecf20Sopenharmony_ci if (ip.v4->version == 4) { 19148c2ecf20Sopenharmony_ci ip.v4->tot_len = 0; 19158c2ecf20Sopenharmony_ci ip.v4->check = 0; 19168c2ecf20Sopenharmony_ci } else { 19178c2ecf20Sopenharmony_ci ip.v6->payload_len = 0; 19188c2ecf20Sopenharmony_ci } 19198c2ecf20Sopenharmony_ci } 19208c2ecf20Sopenharmony_ci 19218c2ecf20Sopenharmony_ci /* determine offset of inner transport header */ 19228c2ecf20Sopenharmony_ci l4_offset = l4.hdr - skb->data; 19238c2ecf20Sopenharmony_ci 19248c2ecf20Sopenharmony_ci /* remove payload length from inner checksum */ 19258c2ecf20Sopenharmony_ci paylen = skb->len - l4_offset; 19268c2ecf20Sopenharmony_ci csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); 19278c2ecf20Sopenharmony_ci 19288c2ecf20Sopenharmony_ci /* compute length of segmentation header */ 19298c2ecf20Sopenharmony_ci *hdr_len = (l4.tcp->doff * 4) + l4_offset; 19308c2ecf20Sopenharmony_ci 19318c2ecf20Sopenharmony_ci /* pull values out of skb_shinfo */ 19328c2ecf20Sopenharmony_ci gso_size = skb_shinfo(skb)->gso_size; 19338c2ecf20Sopenharmony_ci gso_segs = skb_shinfo(skb)->gso_segs; 19348c2ecf20Sopenharmony_ci 19358c2ecf20Sopenharmony_ci /* update GSO size and bytecount with header size */ 19368c2ecf20Sopenharmony_ci first->gso_segs = gso_segs; 19378c2ecf20Sopenharmony_ci first->bytecount += (first->gso_segs - 1) * *hdr_len; 19388c2ecf20Sopenharmony_ci 19398c2ecf20Sopenharmony_ci /* find the field values */ 19408c2ecf20Sopenharmony_ci cd_cmd = IAVF_TX_CTX_DESC_TSO; 19418c2ecf20Sopenharmony_ci cd_tso_len = skb->len - *hdr_len; 19428c2ecf20Sopenharmony_ci cd_mss = gso_size; 19438c2ecf20Sopenharmony_ci *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | 19448c2ecf20Sopenharmony_ci (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | 19458c2ecf20Sopenharmony_ci (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT); 19468c2ecf20Sopenharmony_ci return 1; 19478c2ecf20Sopenharmony_ci} 19488c2ecf20Sopenharmony_ci 19498c2ecf20Sopenharmony_ci/** 19508c2ecf20Sopenharmony_ci * iavf_tx_enable_csum - Enable Tx checksum offloads 19518c2ecf20Sopenharmony_ci * @skb: send buffer 19528c2ecf20Sopenharmony_ci * @tx_flags: pointer to Tx flags currently set 19538c2ecf20Sopenharmony_ci * @td_cmd: Tx descriptor command bits to set 19548c2ecf20Sopenharmony_ci * @td_offset: Tx descriptor header offsets to set 19558c2ecf20Sopenharmony_ci * @tx_ring: Tx descriptor ring 19568c2ecf20Sopenharmony_ci * @cd_tunneling: ptr to context desc bits 19578c2ecf20Sopenharmony_ci **/ 19588c2ecf20Sopenharmony_cistatic int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 19598c2ecf20Sopenharmony_ci u32 *td_cmd, u32 *td_offset, 19608c2ecf20Sopenharmony_ci struct iavf_ring *tx_ring, 19618c2ecf20Sopenharmony_ci u32 *cd_tunneling) 19628c2ecf20Sopenharmony_ci{ 19638c2ecf20Sopenharmony_ci union { 19648c2ecf20Sopenharmony_ci struct iphdr *v4; 19658c2ecf20Sopenharmony_ci struct ipv6hdr *v6; 19668c2ecf20Sopenharmony_ci unsigned char *hdr; 19678c2ecf20Sopenharmony_ci } ip; 19688c2ecf20Sopenharmony_ci union { 19698c2ecf20Sopenharmony_ci struct tcphdr *tcp; 19708c2ecf20Sopenharmony_ci struct udphdr *udp; 19718c2ecf20Sopenharmony_ci unsigned char *hdr; 19728c2ecf20Sopenharmony_ci } l4; 19738c2ecf20Sopenharmony_ci unsigned char *exthdr; 19748c2ecf20Sopenharmony_ci u32 offset, cmd = 0; 19758c2ecf20Sopenharmony_ci __be16 frag_off; 19768c2ecf20Sopenharmony_ci u8 l4_proto = 0; 19778c2ecf20Sopenharmony_ci 19788c2ecf20Sopenharmony_ci if (skb->ip_summed != CHECKSUM_PARTIAL) 19798c2ecf20Sopenharmony_ci return 0; 19808c2ecf20Sopenharmony_ci 19818c2ecf20Sopenharmony_ci ip.hdr = skb_network_header(skb); 19828c2ecf20Sopenharmony_ci l4.hdr = skb_transport_header(skb); 19838c2ecf20Sopenharmony_ci 19848c2ecf20Sopenharmony_ci /* compute outer L2 header size */ 19858c2ecf20Sopenharmony_ci offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; 19868c2ecf20Sopenharmony_ci 19878c2ecf20Sopenharmony_ci if (skb->encapsulation) { 19888c2ecf20Sopenharmony_ci u32 tunnel = 0; 19898c2ecf20Sopenharmony_ci /* define outer network header type */ 19908c2ecf20Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_IPV4) { 19918c2ecf20Sopenharmony_ci tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? 19928c2ecf20Sopenharmony_ci IAVF_TX_CTX_EXT_IP_IPV4 : 19938c2ecf20Sopenharmony_ci IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM; 19948c2ecf20Sopenharmony_ci 19958c2ecf20Sopenharmony_ci l4_proto = ip.v4->protocol; 19968c2ecf20Sopenharmony_ci } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { 19978c2ecf20Sopenharmony_ci tunnel |= IAVF_TX_CTX_EXT_IP_IPV6; 19988c2ecf20Sopenharmony_ci 19998c2ecf20Sopenharmony_ci exthdr = ip.hdr + sizeof(*ip.v6); 20008c2ecf20Sopenharmony_ci l4_proto = ip.v6->nexthdr; 20018c2ecf20Sopenharmony_ci if (l4.hdr != exthdr) 20028c2ecf20Sopenharmony_ci ipv6_skip_exthdr(skb, exthdr - skb->data, 20038c2ecf20Sopenharmony_ci &l4_proto, &frag_off); 20048c2ecf20Sopenharmony_ci } 20058c2ecf20Sopenharmony_ci 20068c2ecf20Sopenharmony_ci /* define outer transport */ 20078c2ecf20Sopenharmony_ci switch (l4_proto) { 20088c2ecf20Sopenharmony_ci case IPPROTO_UDP: 20098c2ecf20Sopenharmony_ci tunnel |= IAVF_TXD_CTX_UDP_TUNNELING; 20108c2ecf20Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 20118c2ecf20Sopenharmony_ci break; 20128c2ecf20Sopenharmony_ci case IPPROTO_GRE: 20138c2ecf20Sopenharmony_ci tunnel |= IAVF_TXD_CTX_GRE_TUNNELING; 20148c2ecf20Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 20158c2ecf20Sopenharmony_ci break; 20168c2ecf20Sopenharmony_ci case IPPROTO_IPIP: 20178c2ecf20Sopenharmony_ci case IPPROTO_IPV6: 20188c2ecf20Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 20198c2ecf20Sopenharmony_ci l4.hdr = skb_inner_network_header(skb); 20208c2ecf20Sopenharmony_ci break; 20218c2ecf20Sopenharmony_ci default: 20228c2ecf20Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_TSO) 20238c2ecf20Sopenharmony_ci return -1; 20248c2ecf20Sopenharmony_ci 20258c2ecf20Sopenharmony_ci skb_checksum_help(skb); 20268c2ecf20Sopenharmony_ci return 0; 20278c2ecf20Sopenharmony_ci } 20288c2ecf20Sopenharmony_ci 20298c2ecf20Sopenharmony_ci /* compute outer L3 header size */ 20308c2ecf20Sopenharmony_ci tunnel |= ((l4.hdr - ip.hdr) / 4) << 20318c2ecf20Sopenharmony_ci IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT; 20328c2ecf20Sopenharmony_ci 20338c2ecf20Sopenharmony_ci /* switch IP header pointer from outer to inner header */ 20348c2ecf20Sopenharmony_ci ip.hdr = skb_inner_network_header(skb); 20358c2ecf20Sopenharmony_ci 20368c2ecf20Sopenharmony_ci /* compute tunnel header size */ 20378c2ecf20Sopenharmony_ci tunnel |= ((ip.hdr - l4.hdr) / 2) << 20388c2ecf20Sopenharmony_ci IAVF_TXD_CTX_QW0_NATLEN_SHIFT; 20398c2ecf20Sopenharmony_ci 20408c2ecf20Sopenharmony_ci /* indicate if we need to offload outer UDP header */ 20418c2ecf20Sopenharmony_ci if ((*tx_flags & IAVF_TX_FLAGS_TSO) && 20428c2ecf20Sopenharmony_ci !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 20438c2ecf20Sopenharmony_ci (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 20448c2ecf20Sopenharmony_ci tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; 20458c2ecf20Sopenharmony_ci 20468c2ecf20Sopenharmony_ci /* record tunnel offload values */ 20478c2ecf20Sopenharmony_ci *cd_tunneling |= tunnel; 20488c2ecf20Sopenharmony_ci 20498c2ecf20Sopenharmony_ci /* switch L4 header pointer from outer to inner */ 20508c2ecf20Sopenharmony_ci l4.hdr = skb_inner_transport_header(skb); 20518c2ecf20Sopenharmony_ci l4_proto = 0; 20528c2ecf20Sopenharmony_ci 20538c2ecf20Sopenharmony_ci /* reset type as we transition from outer to inner headers */ 20548c2ecf20Sopenharmony_ci *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6); 20558c2ecf20Sopenharmony_ci if (ip.v4->version == 4) 20568c2ecf20Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_IPV4; 20578c2ecf20Sopenharmony_ci if (ip.v6->version == 6) 20588c2ecf20Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_IPV6; 20598c2ecf20Sopenharmony_ci } 20608c2ecf20Sopenharmony_ci 20618c2ecf20Sopenharmony_ci /* Enable IP checksum offloads */ 20628c2ecf20Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_IPV4) { 20638c2ecf20Sopenharmony_ci l4_proto = ip.v4->protocol; 20648c2ecf20Sopenharmony_ci /* the stack computes the IP header already, the only time we 20658c2ecf20Sopenharmony_ci * need the hardware to recompute it is in the case of TSO. 20668c2ecf20Sopenharmony_ci */ 20678c2ecf20Sopenharmony_ci cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? 20688c2ecf20Sopenharmony_ci IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : 20698c2ecf20Sopenharmony_ci IAVF_TX_DESC_CMD_IIPT_IPV4; 20708c2ecf20Sopenharmony_ci } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { 20718c2ecf20Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; 20728c2ecf20Sopenharmony_ci 20738c2ecf20Sopenharmony_ci exthdr = ip.hdr + sizeof(*ip.v6); 20748c2ecf20Sopenharmony_ci l4_proto = ip.v6->nexthdr; 20758c2ecf20Sopenharmony_ci if (l4.hdr != exthdr) 20768c2ecf20Sopenharmony_ci ipv6_skip_exthdr(skb, exthdr - skb->data, 20778c2ecf20Sopenharmony_ci &l4_proto, &frag_off); 20788c2ecf20Sopenharmony_ci } 20798c2ecf20Sopenharmony_ci 20808c2ecf20Sopenharmony_ci /* compute inner L3 header size */ 20818c2ecf20Sopenharmony_ci offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 20828c2ecf20Sopenharmony_ci 20838c2ecf20Sopenharmony_ci /* Enable L4 checksum offloads */ 20848c2ecf20Sopenharmony_ci switch (l4_proto) { 20858c2ecf20Sopenharmony_ci case IPPROTO_TCP: 20868c2ecf20Sopenharmony_ci /* enable checksum offloads */ 20878c2ecf20Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; 20888c2ecf20Sopenharmony_ci offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 20898c2ecf20Sopenharmony_ci break; 20908c2ecf20Sopenharmony_ci case IPPROTO_SCTP: 20918c2ecf20Sopenharmony_ci /* enable SCTP checksum offload */ 20928c2ecf20Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; 20938c2ecf20Sopenharmony_ci offset |= (sizeof(struct sctphdr) >> 2) << 20948c2ecf20Sopenharmony_ci IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 20958c2ecf20Sopenharmony_ci break; 20968c2ecf20Sopenharmony_ci case IPPROTO_UDP: 20978c2ecf20Sopenharmony_ci /* enable UDP checksum offload */ 20988c2ecf20Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; 20998c2ecf20Sopenharmony_ci offset |= (sizeof(struct udphdr) >> 2) << 21008c2ecf20Sopenharmony_ci IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 21018c2ecf20Sopenharmony_ci break; 21028c2ecf20Sopenharmony_ci default: 21038c2ecf20Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_TSO) 21048c2ecf20Sopenharmony_ci return -1; 21058c2ecf20Sopenharmony_ci skb_checksum_help(skb); 21068c2ecf20Sopenharmony_ci return 0; 21078c2ecf20Sopenharmony_ci } 21088c2ecf20Sopenharmony_ci 21098c2ecf20Sopenharmony_ci *td_cmd |= cmd; 21108c2ecf20Sopenharmony_ci *td_offset |= offset; 21118c2ecf20Sopenharmony_ci 21128c2ecf20Sopenharmony_ci return 1; 21138c2ecf20Sopenharmony_ci} 21148c2ecf20Sopenharmony_ci 21158c2ecf20Sopenharmony_ci/** 21168c2ecf20Sopenharmony_ci * iavf_create_tx_ctx Build the Tx context descriptor 21178c2ecf20Sopenharmony_ci * @tx_ring: ring to create the descriptor on 21188c2ecf20Sopenharmony_ci * @cd_type_cmd_tso_mss: Quad Word 1 21198c2ecf20Sopenharmony_ci * @cd_tunneling: Quad Word 0 - bits 0-31 21208c2ecf20Sopenharmony_ci * @cd_l2tag2: Quad Word 0 - bits 32-63 21218c2ecf20Sopenharmony_ci **/ 21228c2ecf20Sopenharmony_cistatic void iavf_create_tx_ctx(struct iavf_ring *tx_ring, 21238c2ecf20Sopenharmony_ci const u64 cd_type_cmd_tso_mss, 21248c2ecf20Sopenharmony_ci const u32 cd_tunneling, const u32 cd_l2tag2) 21258c2ecf20Sopenharmony_ci{ 21268c2ecf20Sopenharmony_ci struct iavf_tx_context_desc *context_desc; 21278c2ecf20Sopenharmony_ci int i = tx_ring->next_to_use; 21288c2ecf20Sopenharmony_ci 21298c2ecf20Sopenharmony_ci if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) && 21308c2ecf20Sopenharmony_ci !cd_tunneling && !cd_l2tag2) 21318c2ecf20Sopenharmony_ci return; 21328c2ecf20Sopenharmony_ci 21338c2ecf20Sopenharmony_ci /* grab the next descriptor */ 21348c2ecf20Sopenharmony_ci context_desc = IAVF_TX_CTXTDESC(tx_ring, i); 21358c2ecf20Sopenharmony_ci 21368c2ecf20Sopenharmony_ci i++; 21378c2ecf20Sopenharmony_ci tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 21388c2ecf20Sopenharmony_ci 21398c2ecf20Sopenharmony_ci /* cpu_to_le32 and assign to struct fields */ 21408c2ecf20Sopenharmony_ci context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 21418c2ecf20Sopenharmony_ci context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); 21428c2ecf20Sopenharmony_ci context_desc->rsvd = cpu_to_le16(0); 21438c2ecf20Sopenharmony_ci context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 21448c2ecf20Sopenharmony_ci} 21458c2ecf20Sopenharmony_ci 21468c2ecf20Sopenharmony_ci/** 21478c2ecf20Sopenharmony_ci * __iavf_chk_linearize - Check if there are more than 8 buffers per packet 21488c2ecf20Sopenharmony_ci * @skb: send buffer 21498c2ecf20Sopenharmony_ci * 21508c2ecf20Sopenharmony_ci * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 21518c2ecf20Sopenharmony_ci * and so we need to figure out the cases where we need to linearize the skb. 21528c2ecf20Sopenharmony_ci * 21538c2ecf20Sopenharmony_ci * For TSO we need to count the TSO header and segment payload separately. 21548c2ecf20Sopenharmony_ci * As such we need to check cases where we have 7 fragments or more as we 21558c2ecf20Sopenharmony_ci * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 21568c2ecf20Sopenharmony_ci * the segment payload in the first descriptor, and another 7 for the 21578c2ecf20Sopenharmony_ci * fragments. 21588c2ecf20Sopenharmony_ci **/ 21598c2ecf20Sopenharmony_cibool __iavf_chk_linearize(struct sk_buff *skb) 21608c2ecf20Sopenharmony_ci{ 21618c2ecf20Sopenharmony_ci const skb_frag_t *frag, *stale; 21628c2ecf20Sopenharmony_ci int nr_frags, sum; 21638c2ecf20Sopenharmony_ci 21648c2ecf20Sopenharmony_ci /* no need to check if number of frags is less than 7 */ 21658c2ecf20Sopenharmony_ci nr_frags = skb_shinfo(skb)->nr_frags; 21668c2ecf20Sopenharmony_ci if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1)) 21678c2ecf20Sopenharmony_ci return false; 21688c2ecf20Sopenharmony_ci 21698c2ecf20Sopenharmony_ci /* We need to walk through the list and validate that each group 21708c2ecf20Sopenharmony_ci * of 6 fragments totals at least gso_size. 21718c2ecf20Sopenharmony_ci */ 21728c2ecf20Sopenharmony_ci nr_frags -= IAVF_MAX_BUFFER_TXD - 2; 21738c2ecf20Sopenharmony_ci frag = &skb_shinfo(skb)->frags[0]; 21748c2ecf20Sopenharmony_ci 21758c2ecf20Sopenharmony_ci /* Initialize size to the negative value of gso_size minus 1. We 21768c2ecf20Sopenharmony_ci * use this as the worst case scenerio in which the frag ahead 21778c2ecf20Sopenharmony_ci * of us only provides one byte which is why we are limited to 6 21788c2ecf20Sopenharmony_ci * descriptors for a single transmit as the header and previous 21798c2ecf20Sopenharmony_ci * fragment are already consuming 2 descriptors. 21808c2ecf20Sopenharmony_ci */ 21818c2ecf20Sopenharmony_ci sum = 1 - skb_shinfo(skb)->gso_size; 21828c2ecf20Sopenharmony_ci 21838c2ecf20Sopenharmony_ci /* Add size of frags 0 through 4 to create our initial sum */ 21848c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 21858c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 21868c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 21878c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 21888c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 21898c2ecf20Sopenharmony_ci 21908c2ecf20Sopenharmony_ci /* Walk through fragments adding latest fragment, testing it, and 21918c2ecf20Sopenharmony_ci * then removing stale fragments from the sum. 21928c2ecf20Sopenharmony_ci */ 21938c2ecf20Sopenharmony_ci for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 21948c2ecf20Sopenharmony_ci int stale_size = skb_frag_size(stale); 21958c2ecf20Sopenharmony_ci 21968c2ecf20Sopenharmony_ci sum += skb_frag_size(frag++); 21978c2ecf20Sopenharmony_ci 21988c2ecf20Sopenharmony_ci /* The stale fragment may present us with a smaller 21998c2ecf20Sopenharmony_ci * descriptor than the actual fragment size. To account 22008c2ecf20Sopenharmony_ci * for that we need to remove all the data on the front and 22018c2ecf20Sopenharmony_ci * figure out what the remainder would be in the last 22028c2ecf20Sopenharmony_ci * descriptor associated with the fragment. 22038c2ecf20Sopenharmony_ci */ 22048c2ecf20Sopenharmony_ci if (stale_size > IAVF_MAX_DATA_PER_TXD) { 22058c2ecf20Sopenharmony_ci int align_pad = -(skb_frag_off(stale)) & 22068c2ecf20Sopenharmony_ci (IAVF_MAX_READ_REQ_SIZE - 1); 22078c2ecf20Sopenharmony_ci 22088c2ecf20Sopenharmony_ci sum -= align_pad; 22098c2ecf20Sopenharmony_ci stale_size -= align_pad; 22108c2ecf20Sopenharmony_ci 22118c2ecf20Sopenharmony_ci do { 22128c2ecf20Sopenharmony_ci sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED; 22138c2ecf20Sopenharmony_ci stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED; 22148c2ecf20Sopenharmony_ci } while (stale_size > IAVF_MAX_DATA_PER_TXD); 22158c2ecf20Sopenharmony_ci } 22168c2ecf20Sopenharmony_ci 22178c2ecf20Sopenharmony_ci /* if sum is negative we failed to make sufficient progress */ 22188c2ecf20Sopenharmony_ci if (sum < 0) 22198c2ecf20Sopenharmony_ci return true; 22208c2ecf20Sopenharmony_ci 22218c2ecf20Sopenharmony_ci if (!nr_frags--) 22228c2ecf20Sopenharmony_ci break; 22238c2ecf20Sopenharmony_ci 22248c2ecf20Sopenharmony_ci sum -= stale_size; 22258c2ecf20Sopenharmony_ci } 22268c2ecf20Sopenharmony_ci 22278c2ecf20Sopenharmony_ci return false; 22288c2ecf20Sopenharmony_ci} 22298c2ecf20Sopenharmony_ci 22308c2ecf20Sopenharmony_ci/** 22318c2ecf20Sopenharmony_ci * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions 22328c2ecf20Sopenharmony_ci * @tx_ring: the ring to be checked 22338c2ecf20Sopenharmony_ci * @size: the size buffer we want to assure is available 22348c2ecf20Sopenharmony_ci * 22358c2ecf20Sopenharmony_ci * Returns -EBUSY if a stop is needed, else 0 22368c2ecf20Sopenharmony_ci **/ 22378c2ecf20Sopenharmony_ciint __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) 22388c2ecf20Sopenharmony_ci{ 22398c2ecf20Sopenharmony_ci netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 22408c2ecf20Sopenharmony_ci /* Memory barrier before checking head and tail */ 22418c2ecf20Sopenharmony_ci smp_mb(); 22428c2ecf20Sopenharmony_ci 22438c2ecf20Sopenharmony_ci /* Check again in a case another CPU has just made room available. */ 22448c2ecf20Sopenharmony_ci if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) 22458c2ecf20Sopenharmony_ci return -EBUSY; 22468c2ecf20Sopenharmony_ci 22478c2ecf20Sopenharmony_ci /* A reprieve! - use start_queue because it doesn't call schedule */ 22488c2ecf20Sopenharmony_ci netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 22498c2ecf20Sopenharmony_ci ++tx_ring->tx_stats.restart_queue; 22508c2ecf20Sopenharmony_ci return 0; 22518c2ecf20Sopenharmony_ci} 22528c2ecf20Sopenharmony_ci 22538c2ecf20Sopenharmony_ci/** 22548c2ecf20Sopenharmony_ci * iavf_tx_map - Build the Tx descriptor 22558c2ecf20Sopenharmony_ci * @tx_ring: ring to send buffer on 22568c2ecf20Sopenharmony_ci * @skb: send buffer 22578c2ecf20Sopenharmony_ci * @first: first buffer info buffer to use 22588c2ecf20Sopenharmony_ci * @tx_flags: collected send information 22598c2ecf20Sopenharmony_ci * @hdr_len: size of the packet header 22608c2ecf20Sopenharmony_ci * @td_cmd: the command field in the descriptor 22618c2ecf20Sopenharmony_ci * @td_offset: offset for checksum or crc 22628c2ecf20Sopenharmony_ci **/ 22638c2ecf20Sopenharmony_cistatic inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, 22648c2ecf20Sopenharmony_ci struct iavf_tx_buffer *first, u32 tx_flags, 22658c2ecf20Sopenharmony_ci const u8 hdr_len, u32 td_cmd, u32 td_offset) 22668c2ecf20Sopenharmony_ci{ 22678c2ecf20Sopenharmony_ci unsigned int data_len = skb->data_len; 22688c2ecf20Sopenharmony_ci unsigned int size = skb_headlen(skb); 22698c2ecf20Sopenharmony_ci skb_frag_t *frag; 22708c2ecf20Sopenharmony_ci struct iavf_tx_buffer *tx_bi; 22718c2ecf20Sopenharmony_ci struct iavf_tx_desc *tx_desc; 22728c2ecf20Sopenharmony_ci u16 i = tx_ring->next_to_use; 22738c2ecf20Sopenharmony_ci u32 td_tag = 0; 22748c2ecf20Sopenharmony_ci dma_addr_t dma; 22758c2ecf20Sopenharmony_ci 22768c2ecf20Sopenharmony_ci if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) { 22778c2ecf20Sopenharmony_ci td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; 22788c2ecf20Sopenharmony_ci td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> 22798c2ecf20Sopenharmony_ci IAVF_TX_FLAGS_VLAN_SHIFT; 22808c2ecf20Sopenharmony_ci } 22818c2ecf20Sopenharmony_ci 22828c2ecf20Sopenharmony_ci first->tx_flags = tx_flags; 22838c2ecf20Sopenharmony_ci 22848c2ecf20Sopenharmony_ci dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 22858c2ecf20Sopenharmony_ci 22868c2ecf20Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, i); 22878c2ecf20Sopenharmony_ci tx_bi = first; 22888c2ecf20Sopenharmony_ci 22898c2ecf20Sopenharmony_ci for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 22908c2ecf20Sopenharmony_ci unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; 22918c2ecf20Sopenharmony_ci 22928c2ecf20Sopenharmony_ci if (dma_mapping_error(tx_ring->dev, dma)) 22938c2ecf20Sopenharmony_ci goto dma_error; 22948c2ecf20Sopenharmony_ci 22958c2ecf20Sopenharmony_ci /* record length, and DMA address */ 22968c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_bi, len, size); 22978c2ecf20Sopenharmony_ci dma_unmap_addr_set(tx_bi, dma, dma); 22988c2ecf20Sopenharmony_ci 22998c2ecf20Sopenharmony_ci /* align size to end of page */ 23008c2ecf20Sopenharmony_ci max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); 23018c2ecf20Sopenharmony_ci tx_desc->buffer_addr = cpu_to_le64(dma); 23028c2ecf20Sopenharmony_ci 23038c2ecf20Sopenharmony_ci while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) { 23048c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 23058c2ecf20Sopenharmony_ci build_ctob(td_cmd, td_offset, 23068c2ecf20Sopenharmony_ci max_data, td_tag); 23078c2ecf20Sopenharmony_ci 23088c2ecf20Sopenharmony_ci tx_desc++; 23098c2ecf20Sopenharmony_ci i++; 23108c2ecf20Sopenharmony_ci 23118c2ecf20Sopenharmony_ci if (i == tx_ring->count) { 23128c2ecf20Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 23138c2ecf20Sopenharmony_ci i = 0; 23148c2ecf20Sopenharmony_ci } 23158c2ecf20Sopenharmony_ci 23168c2ecf20Sopenharmony_ci dma += max_data; 23178c2ecf20Sopenharmony_ci size -= max_data; 23188c2ecf20Sopenharmony_ci 23198c2ecf20Sopenharmony_ci max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; 23208c2ecf20Sopenharmony_ci tx_desc->buffer_addr = cpu_to_le64(dma); 23218c2ecf20Sopenharmony_ci } 23228c2ecf20Sopenharmony_ci 23238c2ecf20Sopenharmony_ci if (likely(!data_len)) 23248c2ecf20Sopenharmony_ci break; 23258c2ecf20Sopenharmony_ci 23268c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 23278c2ecf20Sopenharmony_ci size, td_tag); 23288c2ecf20Sopenharmony_ci 23298c2ecf20Sopenharmony_ci tx_desc++; 23308c2ecf20Sopenharmony_ci i++; 23318c2ecf20Sopenharmony_ci 23328c2ecf20Sopenharmony_ci if (i == tx_ring->count) { 23338c2ecf20Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 23348c2ecf20Sopenharmony_ci i = 0; 23358c2ecf20Sopenharmony_ci } 23368c2ecf20Sopenharmony_ci 23378c2ecf20Sopenharmony_ci size = skb_frag_size(frag); 23388c2ecf20Sopenharmony_ci data_len -= size; 23398c2ecf20Sopenharmony_ci 23408c2ecf20Sopenharmony_ci dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 23418c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 23428c2ecf20Sopenharmony_ci 23438c2ecf20Sopenharmony_ci tx_bi = &tx_ring->tx_bi[i]; 23448c2ecf20Sopenharmony_ci } 23458c2ecf20Sopenharmony_ci 23468c2ecf20Sopenharmony_ci netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 23478c2ecf20Sopenharmony_ci 23488c2ecf20Sopenharmony_ci i++; 23498c2ecf20Sopenharmony_ci if (i == tx_ring->count) 23508c2ecf20Sopenharmony_ci i = 0; 23518c2ecf20Sopenharmony_ci 23528c2ecf20Sopenharmony_ci tx_ring->next_to_use = i; 23538c2ecf20Sopenharmony_ci 23548c2ecf20Sopenharmony_ci iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); 23558c2ecf20Sopenharmony_ci 23568c2ecf20Sopenharmony_ci /* write last descriptor with RS and EOP bits */ 23578c2ecf20Sopenharmony_ci td_cmd |= IAVF_TXD_CMD; 23588c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 23598c2ecf20Sopenharmony_ci build_ctob(td_cmd, td_offset, size, td_tag); 23608c2ecf20Sopenharmony_ci 23618c2ecf20Sopenharmony_ci skb_tx_timestamp(skb); 23628c2ecf20Sopenharmony_ci 23638c2ecf20Sopenharmony_ci /* Force memory writes to complete before letting h/w know there 23648c2ecf20Sopenharmony_ci * are new descriptors to fetch. 23658c2ecf20Sopenharmony_ci * 23668c2ecf20Sopenharmony_ci * We also use this memory barrier to make certain all of the 23678c2ecf20Sopenharmony_ci * status bits have been updated before next_to_watch is written. 23688c2ecf20Sopenharmony_ci */ 23698c2ecf20Sopenharmony_ci wmb(); 23708c2ecf20Sopenharmony_ci 23718c2ecf20Sopenharmony_ci /* set next_to_watch value indicating a packet is present */ 23728c2ecf20Sopenharmony_ci first->next_to_watch = tx_desc; 23738c2ecf20Sopenharmony_ci 23748c2ecf20Sopenharmony_ci /* notify HW of packet */ 23758c2ecf20Sopenharmony_ci if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 23768c2ecf20Sopenharmony_ci writel(i, tx_ring->tail); 23778c2ecf20Sopenharmony_ci } 23788c2ecf20Sopenharmony_ci 23798c2ecf20Sopenharmony_ci return; 23808c2ecf20Sopenharmony_ci 23818c2ecf20Sopenharmony_cidma_error: 23828c2ecf20Sopenharmony_ci dev_info(tx_ring->dev, "TX DMA map failed\n"); 23838c2ecf20Sopenharmony_ci 23848c2ecf20Sopenharmony_ci /* clear dma mappings for failed tx_bi map */ 23858c2ecf20Sopenharmony_ci for (;;) { 23868c2ecf20Sopenharmony_ci tx_bi = &tx_ring->tx_bi[i]; 23878c2ecf20Sopenharmony_ci iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); 23888c2ecf20Sopenharmony_ci if (tx_bi == first) 23898c2ecf20Sopenharmony_ci break; 23908c2ecf20Sopenharmony_ci if (i == 0) 23918c2ecf20Sopenharmony_ci i = tx_ring->count; 23928c2ecf20Sopenharmony_ci i--; 23938c2ecf20Sopenharmony_ci } 23948c2ecf20Sopenharmony_ci 23958c2ecf20Sopenharmony_ci tx_ring->next_to_use = i; 23968c2ecf20Sopenharmony_ci} 23978c2ecf20Sopenharmony_ci 23988c2ecf20Sopenharmony_ci/** 23998c2ecf20Sopenharmony_ci * iavf_xmit_frame_ring - Sends buffer on Tx ring 24008c2ecf20Sopenharmony_ci * @skb: send buffer 24018c2ecf20Sopenharmony_ci * @tx_ring: ring to send buffer on 24028c2ecf20Sopenharmony_ci * 24038c2ecf20Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code 24048c2ecf20Sopenharmony_ci **/ 24058c2ecf20Sopenharmony_cistatic netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, 24068c2ecf20Sopenharmony_ci struct iavf_ring *tx_ring) 24078c2ecf20Sopenharmony_ci{ 24088c2ecf20Sopenharmony_ci u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT; 24098c2ecf20Sopenharmony_ci u32 cd_tunneling = 0, cd_l2tag2 = 0; 24108c2ecf20Sopenharmony_ci struct iavf_tx_buffer *first; 24118c2ecf20Sopenharmony_ci u32 td_offset = 0; 24128c2ecf20Sopenharmony_ci u32 tx_flags = 0; 24138c2ecf20Sopenharmony_ci __be16 protocol; 24148c2ecf20Sopenharmony_ci u32 td_cmd = 0; 24158c2ecf20Sopenharmony_ci u8 hdr_len = 0; 24168c2ecf20Sopenharmony_ci int tso, count; 24178c2ecf20Sopenharmony_ci 24188c2ecf20Sopenharmony_ci /* prefetch the data, we'll need it later */ 24198c2ecf20Sopenharmony_ci prefetch(skb->data); 24208c2ecf20Sopenharmony_ci 24218c2ecf20Sopenharmony_ci iavf_trace(xmit_frame_ring, skb, tx_ring); 24228c2ecf20Sopenharmony_ci 24238c2ecf20Sopenharmony_ci count = iavf_xmit_descriptor_count(skb); 24248c2ecf20Sopenharmony_ci if (iavf_chk_linearize(skb, count)) { 24258c2ecf20Sopenharmony_ci if (__skb_linearize(skb)) { 24268c2ecf20Sopenharmony_ci dev_kfree_skb_any(skb); 24278c2ecf20Sopenharmony_ci return NETDEV_TX_OK; 24288c2ecf20Sopenharmony_ci } 24298c2ecf20Sopenharmony_ci count = iavf_txd_use_count(skb->len); 24308c2ecf20Sopenharmony_ci tx_ring->tx_stats.tx_linearize++; 24318c2ecf20Sopenharmony_ci } 24328c2ecf20Sopenharmony_ci 24338c2ecf20Sopenharmony_ci /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD, 24348c2ecf20Sopenharmony_ci * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD, 24358c2ecf20Sopenharmony_ci * + 4 desc gap to avoid the cache line where head is, 24368c2ecf20Sopenharmony_ci * + 1 desc for context descriptor, 24378c2ecf20Sopenharmony_ci * otherwise try next time 24388c2ecf20Sopenharmony_ci */ 24398c2ecf20Sopenharmony_ci if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { 24408c2ecf20Sopenharmony_ci tx_ring->tx_stats.tx_busy++; 24418c2ecf20Sopenharmony_ci return NETDEV_TX_BUSY; 24428c2ecf20Sopenharmony_ci } 24438c2ecf20Sopenharmony_ci 24448c2ecf20Sopenharmony_ci /* record the location of the first descriptor for this packet */ 24458c2ecf20Sopenharmony_ci first = &tx_ring->tx_bi[tx_ring->next_to_use]; 24468c2ecf20Sopenharmony_ci first->skb = skb; 24478c2ecf20Sopenharmony_ci first->bytecount = skb->len; 24488c2ecf20Sopenharmony_ci first->gso_segs = 1; 24498c2ecf20Sopenharmony_ci 24508c2ecf20Sopenharmony_ci /* prepare the xmit flags */ 24518c2ecf20Sopenharmony_ci if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) 24528c2ecf20Sopenharmony_ci goto out_drop; 24538c2ecf20Sopenharmony_ci 24548c2ecf20Sopenharmony_ci /* obtain protocol of skb */ 24558c2ecf20Sopenharmony_ci protocol = vlan_get_protocol(skb); 24568c2ecf20Sopenharmony_ci 24578c2ecf20Sopenharmony_ci /* setup IPv4/IPv6 offloads */ 24588c2ecf20Sopenharmony_ci if (protocol == htons(ETH_P_IP)) 24598c2ecf20Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_IPV4; 24608c2ecf20Sopenharmony_ci else if (protocol == htons(ETH_P_IPV6)) 24618c2ecf20Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_IPV6; 24628c2ecf20Sopenharmony_ci 24638c2ecf20Sopenharmony_ci tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss); 24648c2ecf20Sopenharmony_ci 24658c2ecf20Sopenharmony_ci if (tso < 0) 24668c2ecf20Sopenharmony_ci goto out_drop; 24678c2ecf20Sopenharmony_ci else if (tso) 24688c2ecf20Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_TSO; 24698c2ecf20Sopenharmony_ci 24708c2ecf20Sopenharmony_ci /* Always offload the checksum, since it's in the data descriptor */ 24718c2ecf20Sopenharmony_ci tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 24728c2ecf20Sopenharmony_ci tx_ring, &cd_tunneling); 24738c2ecf20Sopenharmony_ci if (tso < 0) 24748c2ecf20Sopenharmony_ci goto out_drop; 24758c2ecf20Sopenharmony_ci 24768c2ecf20Sopenharmony_ci /* always enable CRC insertion offload */ 24778c2ecf20Sopenharmony_ci td_cmd |= IAVF_TX_DESC_CMD_ICRC; 24788c2ecf20Sopenharmony_ci 24798c2ecf20Sopenharmony_ci iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 24808c2ecf20Sopenharmony_ci cd_tunneling, cd_l2tag2); 24818c2ecf20Sopenharmony_ci 24828c2ecf20Sopenharmony_ci iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 24838c2ecf20Sopenharmony_ci td_cmd, td_offset); 24848c2ecf20Sopenharmony_ci 24858c2ecf20Sopenharmony_ci return NETDEV_TX_OK; 24868c2ecf20Sopenharmony_ci 24878c2ecf20Sopenharmony_ciout_drop: 24888c2ecf20Sopenharmony_ci iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); 24898c2ecf20Sopenharmony_ci dev_kfree_skb_any(first->skb); 24908c2ecf20Sopenharmony_ci first->skb = NULL; 24918c2ecf20Sopenharmony_ci return NETDEV_TX_OK; 24928c2ecf20Sopenharmony_ci} 24938c2ecf20Sopenharmony_ci 24948c2ecf20Sopenharmony_ci/** 24958c2ecf20Sopenharmony_ci * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer 24968c2ecf20Sopenharmony_ci * @skb: send buffer 24978c2ecf20Sopenharmony_ci * @netdev: network interface device structure 24988c2ecf20Sopenharmony_ci * 24998c2ecf20Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code 25008c2ecf20Sopenharmony_ci **/ 25018c2ecf20Sopenharmony_cinetdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 25028c2ecf20Sopenharmony_ci{ 25038c2ecf20Sopenharmony_ci struct iavf_adapter *adapter = netdev_priv(netdev); 25048c2ecf20Sopenharmony_ci struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; 25058c2ecf20Sopenharmony_ci 25068c2ecf20Sopenharmony_ci /* hardware can't handle really short frames, hardware padding works 25078c2ecf20Sopenharmony_ci * beyond this point 25088c2ecf20Sopenharmony_ci */ 25098c2ecf20Sopenharmony_ci if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { 25108c2ecf20Sopenharmony_ci if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) 25118c2ecf20Sopenharmony_ci return NETDEV_TX_OK; 25128c2ecf20Sopenharmony_ci skb->len = IAVF_MIN_TX_LEN; 25138c2ecf20Sopenharmony_ci skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN); 25148c2ecf20Sopenharmony_ci } 25158c2ecf20Sopenharmony_ci 25168c2ecf20Sopenharmony_ci return iavf_xmit_frame_ring(skb, tx_ring); 25178c2ecf20Sopenharmony_ci} 2518