162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci/* Copyright(c) 2013 - 2018 Intel Corporation. */ 362306a36Sopenharmony_ci 462306a36Sopenharmony_ci#include <linux/prefetch.h> 562306a36Sopenharmony_ci 662306a36Sopenharmony_ci#include "iavf.h" 762306a36Sopenharmony_ci#include "iavf_trace.h" 862306a36Sopenharmony_ci#include "iavf_prototype.h" 962306a36Sopenharmony_ci 1062306a36Sopenharmony_cistatic inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 1162306a36Sopenharmony_ci u32 td_tag) 1262306a36Sopenharmony_ci{ 1362306a36Sopenharmony_ci return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA | 1462306a36Sopenharmony_ci ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) | 1562306a36Sopenharmony_ci ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) | 1662306a36Sopenharmony_ci ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) | 1762306a36Sopenharmony_ci ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT)); 1862306a36Sopenharmony_ci} 1962306a36Sopenharmony_ci 2062306a36Sopenharmony_ci#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS) 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_ci/** 2362306a36Sopenharmony_ci * iavf_unmap_and_free_tx_resource - Release a Tx buffer 2462306a36Sopenharmony_ci * @ring: the ring that owns the buffer 2562306a36Sopenharmony_ci * @tx_buffer: the buffer to free 2662306a36Sopenharmony_ci **/ 2762306a36Sopenharmony_cistatic void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring, 2862306a36Sopenharmony_ci struct iavf_tx_buffer *tx_buffer) 2962306a36Sopenharmony_ci{ 3062306a36Sopenharmony_ci if (tx_buffer->skb) { 3162306a36Sopenharmony_ci if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB) 3262306a36Sopenharmony_ci kfree(tx_buffer->raw_buf); 3362306a36Sopenharmony_ci else 3462306a36Sopenharmony_ci dev_kfree_skb_any(tx_buffer->skb); 3562306a36Sopenharmony_ci if (dma_unmap_len(tx_buffer, len)) 3662306a36Sopenharmony_ci dma_unmap_single(ring->dev, 3762306a36Sopenharmony_ci dma_unmap_addr(tx_buffer, dma), 3862306a36Sopenharmony_ci dma_unmap_len(tx_buffer, len), 3962306a36Sopenharmony_ci DMA_TO_DEVICE); 4062306a36Sopenharmony_ci } else if (dma_unmap_len(tx_buffer, len)) { 4162306a36Sopenharmony_ci dma_unmap_page(ring->dev, 4262306a36Sopenharmony_ci dma_unmap_addr(tx_buffer, dma), 4362306a36Sopenharmony_ci dma_unmap_len(tx_buffer, len), 4462306a36Sopenharmony_ci DMA_TO_DEVICE); 4562306a36Sopenharmony_ci } 4662306a36Sopenharmony_ci 4762306a36Sopenharmony_ci tx_buffer->next_to_watch = NULL; 4862306a36Sopenharmony_ci tx_buffer->skb = NULL; 4962306a36Sopenharmony_ci dma_unmap_len_set(tx_buffer, len, 0); 5062306a36Sopenharmony_ci /* tx_buffer must be completely set up in the transmit path */ 5162306a36Sopenharmony_ci} 5262306a36Sopenharmony_ci 5362306a36Sopenharmony_ci/** 5462306a36Sopenharmony_ci * iavf_clean_tx_ring - Free any empty Tx buffers 5562306a36Sopenharmony_ci * @tx_ring: ring to be cleaned 5662306a36Sopenharmony_ci **/ 5762306a36Sopenharmony_cistatic void iavf_clean_tx_ring(struct iavf_ring *tx_ring) 5862306a36Sopenharmony_ci{ 5962306a36Sopenharmony_ci unsigned long bi_size; 6062306a36Sopenharmony_ci u16 i; 6162306a36Sopenharmony_ci 6262306a36Sopenharmony_ci /* ring already cleared, nothing to do */ 6362306a36Sopenharmony_ci if (!tx_ring->tx_bi) 6462306a36Sopenharmony_ci return; 6562306a36Sopenharmony_ci 6662306a36Sopenharmony_ci /* Free all the Tx ring sk_buffs */ 6762306a36Sopenharmony_ci for (i = 0; i < tx_ring->count; i++) 6862306a36Sopenharmony_ci iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); 6962306a36Sopenharmony_ci 7062306a36Sopenharmony_ci bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; 7162306a36Sopenharmony_ci memset(tx_ring->tx_bi, 0, bi_size); 7262306a36Sopenharmony_ci 7362306a36Sopenharmony_ci /* Zero out the descriptor ring */ 7462306a36Sopenharmony_ci memset(tx_ring->desc, 0, tx_ring->size); 7562306a36Sopenharmony_ci 7662306a36Sopenharmony_ci tx_ring->next_to_use = 0; 7762306a36Sopenharmony_ci tx_ring->next_to_clean = 0; 7862306a36Sopenharmony_ci 7962306a36Sopenharmony_ci if (!tx_ring->netdev) 8062306a36Sopenharmony_ci return; 8162306a36Sopenharmony_ci 8262306a36Sopenharmony_ci /* cleanup Tx queue statistics */ 8362306a36Sopenharmony_ci netdev_tx_reset_queue(txring_txq(tx_ring)); 8462306a36Sopenharmony_ci} 8562306a36Sopenharmony_ci 8662306a36Sopenharmony_ci/** 8762306a36Sopenharmony_ci * iavf_free_tx_resources - Free Tx resources per queue 8862306a36Sopenharmony_ci * @tx_ring: Tx descriptor ring for a specific queue 8962306a36Sopenharmony_ci * 9062306a36Sopenharmony_ci * Free all transmit software resources 9162306a36Sopenharmony_ci **/ 9262306a36Sopenharmony_civoid iavf_free_tx_resources(struct iavf_ring *tx_ring) 9362306a36Sopenharmony_ci{ 9462306a36Sopenharmony_ci iavf_clean_tx_ring(tx_ring); 9562306a36Sopenharmony_ci kfree(tx_ring->tx_bi); 9662306a36Sopenharmony_ci tx_ring->tx_bi = NULL; 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci if (tx_ring->desc) { 9962306a36Sopenharmony_ci dma_free_coherent(tx_ring->dev, tx_ring->size, 10062306a36Sopenharmony_ci tx_ring->desc, tx_ring->dma); 10162306a36Sopenharmony_ci tx_ring->desc = NULL; 10262306a36Sopenharmony_ci } 10362306a36Sopenharmony_ci} 10462306a36Sopenharmony_ci 10562306a36Sopenharmony_ci/** 10662306a36Sopenharmony_ci * iavf_get_tx_pending - how many Tx descriptors not processed 10762306a36Sopenharmony_ci * @ring: the ring of descriptors 10862306a36Sopenharmony_ci * @in_sw: is tx_pending being checked in SW or HW 10962306a36Sopenharmony_ci * 11062306a36Sopenharmony_ci * Since there is no access to the ring head register 11162306a36Sopenharmony_ci * in XL710, we need to use our local copies 11262306a36Sopenharmony_ci **/ 11362306a36Sopenharmony_cistatic u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) 11462306a36Sopenharmony_ci{ 11562306a36Sopenharmony_ci u32 head, tail; 11662306a36Sopenharmony_ci 11762306a36Sopenharmony_ci /* underlying hardware might not allow access and/or always return 11862306a36Sopenharmony_ci * 0 for the head/tail registers so just use the cached values 11962306a36Sopenharmony_ci */ 12062306a36Sopenharmony_ci head = ring->next_to_clean; 12162306a36Sopenharmony_ci tail = ring->next_to_use; 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci if (head != tail) 12462306a36Sopenharmony_ci return (head < tail) ? 12562306a36Sopenharmony_ci tail - head : (tail + ring->count - head); 12662306a36Sopenharmony_ci 12762306a36Sopenharmony_ci return 0; 12862306a36Sopenharmony_ci} 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_ci/** 13162306a36Sopenharmony_ci * iavf_force_wb - Issue SW Interrupt so HW does a wb 13262306a36Sopenharmony_ci * @vsi: the VSI we care about 13362306a36Sopenharmony_ci * @q_vector: the vector on which to force writeback 13462306a36Sopenharmony_ci **/ 13562306a36Sopenharmony_cistatic void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector) 13662306a36Sopenharmony_ci{ 13762306a36Sopenharmony_ci u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 13862306a36Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ 13962306a36Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | 14062306a36Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK 14162306a36Sopenharmony_ci /* allow 00 to be written to the index */; 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci wr32(&vsi->back->hw, 14462306a36Sopenharmony_ci IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), 14562306a36Sopenharmony_ci val); 14662306a36Sopenharmony_ci} 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ci/** 14962306a36Sopenharmony_ci * iavf_detect_recover_hung - Function to detect and recover hung_queues 15062306a36Sopenharmony_ci * @vsi: pointer to vsi struct with tx queues 15162306a36Sopenharmony_ci * 15262306a36Sopenharmony_ci * VSI has netdev and netdev has TX queues. This function is to check each of 15362306a36Sopenharmony_ci * those TX queues if they are hung, trigger recovery by issuing SW interrupt. 15462306a36Sopenharmony_ci **/ 15562306a36Sopenharmony_civoid iavf_detect_recover_hung(struct iavf_vsi *vsi) 15662306a36Sopenharmony_ci{ 15762306a36Sopenharmony_ci struct iavf_ring *tx_ring = NULL; 15862306a36Sopenharmony_ci struct net_device *netdev; 15962306a36Sopenharmony_ci unsigned int i; 16062306a36Sopenharmony_ci int packets; 16162306a36Sopenharmony_ci 16262306a36Sopenharmony_ci if (!vsi) 16362306a36Sopenharmony_ci return; 16462306a36Sopenharmony_ci 16562306a36Sopenharmony_ci if (test_bit(__IAVF_VSI_DOWN, vsi->state)) 16662306a36Sopenharmony_ci return; 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci netdev = vsi->netdev; 16962306a36Sopenharmony_ci if (!netdev) 17062306a36Sopenharmony_ci return; 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_ci if (!netif_carrier_ok(netdev)) 17362306a36Sopenharmony_ci return; 17462306a36Sopenharmony_ci 17562306a36Sopenharmony_ci for (i = 0; i < vsi->back->num_active_queues; i++) { 17662306a36Sopenharmony_ci tx_ring = &vsi->back->tx_rings[i]; 17762306a36Sopenharmony_ci if (tx_ring && tx_ring->desc) { 17862306a36Sopenharmony_ci /* If packet counter has not changed the queue is 17962306a36Sopenharmony_ci * likely stalled, so force an interrupt for this 18062306a36Sopenharmony_ci * queue. 18162306a36Sopenharmony_ci * 18262306a36Sopenharmony_ci * prev_pkt_ctr would be negative if there was no 18362306a36Sopenharmony_ci * pending work. 18462306a36Sopenharmony_ci */ 18562306a36Sopenharmony_ci packets = tx_ring->stats.packets & INT_MAX; 18662306a36Sopenharmony_ci if (tx_ring->tx_stats.prev_pkt_ctr == packets) { 18762306a36Sopenharmony_ci iavf_force_wb(vsi, tx_ring->q_vector); 18862306a36Sopenharmony_ci continue; 18962306a36Sopenharmony_ci } 19062306a36Sopenharmony_ci 19162306a36Sopenharmony_ci /* Memory barrier between read of packet count and call 19262306a36Sopenharmony_ci * to iavf_get_tx_pending() 19362306a36Sopenharmony_ci */ 19462306a36Sopenharmony_ci smp_rmb(); 19562306a36Sopenharmony_ci tx_ring->tx_stats.prev_pkt_ctr = 19662306a36Sopenharmony_ci iavf_get_tx_pending(tx_ring, true) ? packets : -1; 19762306a36Sopenharmony_ci } 19862306a36Sopenharmony_ci } 19962306a36Sopenharmony_ci} 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_ci#define WB_STRIDE 4 20262306a36Sopenharmony_ci 20362306a36Sopenharmony_ci/** 20462306a36Sopenharmony_ci * iavf_clean_tx_irq - Reclaim resources after transmit completes 20562306a36Sopenharmony_ci * @vsi: the VSI we care about 20662306a36Sopenharmony_ci * @tx_ring: Tx ring to clean 20762306a36Sopenharmony_ci * @napi_budget: Used to determine if we are in netpoll 20862306a36Sopenharmony_ci * 20962306a36Sopenharmony_ci * Returns true if there's any budget left (e.g. the clean is finished) 21062306a36Sopenharmony_ci **/ 21162306a36Sopenharmony_cistatic bool iavf_clean_tx_irq(struct iavf_vsi *vsi, 21262306a36Sopenharmony_ci struct iavf_ring *tx_ring, int napi_budget) 21362306a36Sopenharmony_ci{ 21462306a36Sopenharmony_ci int i = tx_ring->next_to_clean; 21562306a36Sopenharmony_ci struct iavf_tx_buffer *tx_buf; 21662306a36Sopenharmony_ci struct iavf_tx_desc *tx_desc; 21762306a36Sopenharmony_ci unsigned int total_bytes = 0, total_packets = 0; 21862306a36Sopenharmony_ci unsigned int budget = IAVF_DEFAULT_IRQ_WORK; 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci tx_buf = &tx_ring->tx_bi[i]; 22162306a36Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, i); 22262306a36Sopenharmony_ci i -= tx_ring->count; 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_ci do { 22562306a36Sopenharmony_ci struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch; 22662306a36Sopenharmony_ci 22762306a36Sopenharmony_ci /* if next_to_watch is not set then there is no work pending */ 22862306a36Sopenharmony_ci if (!eop_desc) 22962306a36Sopenharmony_ci break; 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_ci /* prevent any other reads prior to eop_desc */ 23262306a36Sopenharmony_ci smp_rmb(); 23362306a36Sopenharmony_ci 23462306a36Sopenharmony_ci iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); 23562306a36Sopenharmony_ci /* if the descriptor isn't done, no work yet to do */ 23662306a36Sopenharmony_ci if (!(eop_desc->cmd_type_offset_bsz & 23762306a36Sopenharmony_ci cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE))) 23862306a36Sopenharmony_ci break; 23962306a36Sopenharmony_ci 24062306a36Sopenharmony_ci /* clear next_to_watch to prevent false hangs */ 24162306a36Sopenharmony_ci tx_buf->next_to_watch = NULL; 24262306a36Sopenharmony_ci 24362306a36Sopenharmony_ci /* update the statistics for this packet */ 24462306a36Sopenharmony_ci total_bytes += tx_buf->bytecount; 24562306a36Sopenharmony_ci total_packets += tx_buf->gso_segs; 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci /* free the skb */ 24862306a36Sopenharmony_ci napi_consume_skb(tx_buf->skb, napi_budget); 24962306a36Sopenharmony_ci 25062306a36Sopenharmony_ci /* unmap skb header data */ 25162306a36Sopenharmony_ci dma_unmap_single(tx_ring->dev, 25262306a36Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 25362306a36Sopenharmony_ci dma_unmap_len(tx_buf, len), 25462306a36Sopenharmony_ci DMA_TO_DEVICE); 25562306a36Sopenharmony_ci 25662306a36Sopenharmony_ci /* clear tx_buffer data */ 25762306a36Sopenharmony_ci tx_buf->skb = NULL; 25862306a36Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 25962306a36Sopenharmony_ci 26062306a36Sopenharmony_ci /* unmap remaining buffers */ 26162306a36Sopenharmony_ci while (tx_desc != eop_desc) { 26262306a36Sopenharmony_ci iavf_trace(clean_tx_irq_unmap, 26362306a36Sopenharmony_ci tx_ring, tx_desc, tx_buf); 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_ci tx_buf++; 26662306a36Sopenharmony_ci tx_desc++; 26762306a36Sopenharmony_ci i++; 26862306a36Sopenharmony_ci if (unlikely(!i)) { 26962306a36Sopenharmony_ci i -= tx_ring->count; 27062306a36Sopenharmony_ci tx_buf = tx_ring->tx_bi; 27162306a36Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 27262306a36Sopenharmony_ci } 27362306a36Sopenharmony_ci 27462306a36Sopenharmony_ci /* unmap any remaining paged data */ 27562306a36Sopenharmony_ci if (dma_unmap_len(tx_buf, len)) { 27662306a36Sopenharmony_ci dma_unmap_page(tx_ring->dev, 27762306a36Sopenharmony_ci dma_unmap_addr(tx_buf, dma), 27862306a36Sopenharmony_ci dma_unmap_len(tx_buf, len), 27962306a36Sopenharmony_ci DMA_TO_DEVICE); 28062306a36Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 28162306a36Sopenharmony_ci } 28262306a36Sopenharmony_ci } 28362306a36Sopenharmony_ci 28462306a36Sopenharmony_ci /* move us one more past the eop_desc for start of next pkt */ 28562306a36Sopenharmony_ci tx_buf++; 28662306a36Sopenharmony_ci tx_desc++; 28762306a36Sopenharmony_ci i++; 28862306a36Sopenharmony_ci if (unlikely(!i)) { 28962306a36Sopenharmony_ci i -= tx_ring->count; 29062306a36Sopenharmony_ci tx_buf = tx_ring->tx_bi; 29162306a36Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 29262306a36Sopenharmony_ci } 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci prefetch(tx_desc); 29562306a36Sopenharmony_ci 29662306a36Sopenharmony_ci /* update budget accounting */ 29762306a36Sopenharmony_ci budget--; 29862306a36Sopenharmony_ci } while (likely(budget)); 29962306a36Sopenharmony_ci 30062306a36Sopenharmony_ci i += tx_ring->count; 30162306a36Sopenharmony_ci tx_ring->next_to_clean = i; 30262306a36Sopenharmony_ci u64_stats_update_begin(&tx_ring->syncp); 30362306a36Sopenharmony_ci tx_ring->stats.bytes += total_bytes; 30462306a36Sopenharmony_ci tx_ring->stats.packets += total_packets; 30562306a36Sopenharmony_ci u64_stats_update_end(&tx_ring->syncp); 30662306a36Sopenharmony_ci tx_ring->q_vector->tx.total_bytes += total_bytes; 30762306a36Sopenharmony_ci tx_ring->q_vector->tx.total_packets += total_packets; 30862306a36Sopenharmony_ci 30962306a36Sopenharmony_ci if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) { 31062306a36Sopenharmony_ci /* check to see if there are < 4 descriptors 31162306a36Sopenharmony_ci * waiting to be written back, then kick the hardware to force 31262306a36Sopenharmony_ci * them to be written back in case we stay in NAPI. 31362306a36Sopenharmony_ci * In this mode on X722 we do not enable Interrupt. 31462306a36Sopenharmony_ci */ 31562306a36Sopenharmony_ci unsigned int j = iavf_get_tx_pending(tx_ring, false); 31662306a36Sopenharmony_ci 31762306a36Sopenharmony_ci if (budget && 31862306a36Sopenharmony_ci ((j / WB_STRIDE) == 0) && (j > 0) && 31962306a36Sopenharmony_ci !test_bit(__IAVF_VSI_DOWN, vsi->state) && 32062306a36Sopenharmony_ci (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) 32162306a36Sopenharmony_ci tx_ring->arm_wb = true; 32262306a36Sopenharmony_ci } 32362306a36Sopenharmony_ci 32462306a36Sopenharmony_ci /* notify netdev of completed buffers */ 32562306a36Sopenharmony_ci netdev_tx_completed_queue(txring_txq(tx_ring), 32662306a36Sopenharmony_ci total_packets, total_bytes); 32762306a36Sopenharmony_ci 32862306a36Sopenharmony_ci#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) 32962306a36Sopenharmony_ci if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 33062306a36Sopenharmony_ci (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 33162306a36Sopenharmony_ci /* Make sure that anybody stopping the queue after this 33262306a36Sopenharmony_ci * sees the new next_to_clean. 33362306a36Sopenharmony_ci */ 33462306a36Sopenharmony_ci smp_mb(); 33562306a36Sopenharmony_ci if (__netif_subqueue_stopped(tx_ring->netdev, 33662306a36Sopenharmony_ci tx_ring->queue_index) && 33762306a36Sopenharmony_ci !test_bit(__IAVF_VSI_DOWN, vsi->state)) { 33862306a36Sopenharmony_ci netif_wake_subqueue(tx_ring->netdev, 33962306a36Sopenharmony_ci tx_ring->queue_index); 34062306a36Sopenharmony_ci ++tx_ring->tx_stats.restart_queue; 34162306a36Sopenharmony_ci } 34262306a36Sopenharmony_ci } 34362306a36Sopenharmony_ci 34462306a36Sopenharmony_ci return !!budget; 34562306a36Sopenharmony_ci} 34662306a36Sopenharmony_ci 34762306a36Sopenharmony_ci/** 34862306a36Sopenharmony_ci * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled 34962306a36Sopenharmony_ci * @vsi: the VSI we care about 35062306a36Sopenharmony_ci * @q_vector: the vector on which to enable writeback 35162306a36Sopenharmony_ci * 35262306a36Sopenharmony_ci **/ 35362306a36Sopenharmony_cistatic void iavf_enable_wb_on_itr(struct iavf_vsi *vsi, 35462306a36Sopenharmony_ci struct iavf_q_vector *q_vector) 35562306a36Sopenharmony_ci{ 35662306a36Sopenharmony_ci u16 flags = q_vector->tx.ring[0].flags; 35762306a36Sopenharmony_ci u32 val; 35862306a36Sopenharmony_ci 35962306a36Sopenharmony_ci if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR)) 36062306a36Sopenharmony_ci return; 36162306a36Sopenharmony_ci 36262306a36Sopenharmony_ci if (q_vector->arm_wb_state) 36362306a36Sopenharmony_ci return; 36462306a36Sopenharmony_ci 36562306a36Sopenharmony_ci val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | 36662306a36Sopenharmony_ci IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ 36762306a36Sopenharmony_ci 36862306a36Sopenharmony_ci wr32(&vsi->back->hw, 36962306a36Sopenharmony_ci IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val); 37062306a36Sopenharmony_ci q_vector->arm_wb_state = true; 37162306a36Sopenharmony_ci} 37262306a36Sopenharmony_ci 37362306a36Sopenharmony_cistatic inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector, 37462306a36Sopenharmony_ci struct iavf_ring_container *rc) 37562306a36Sopenharmony_ci{ 37662306a36Sopenharmony_ci return &q_vector->rx == rc; 37762306a36Sopenharmony_ci} 37862306a36Sopenharmony_ci 37962306a36Sopenharmony_ci#define IAVF_AIM_MULTIPLIER_100G 2560 38062306a36Sopenharmony_ci#define IAVF_AIM_MULTIPLIER_50G 1280 38162306a36Sopenharmony_ci#define IAVF_AIM_MULTIPLIER_40G 1024 38262306a36Sopenharmony_ci#define IAVF_AIM_MULTIPLIER_20G 512 38362306a36Sopenharmony_ci#define IAVF_AIM_MULTIPLIER_10G 256 38462306a36Sopenharmony_ci#define IAVF_AIM_MULTIPLIER_1G 32 38562306a36Sopenharmony_ci 38662306a36Sopenharmony_cistatic unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps) 38762306a36Sopenharmony_ci{ 38862306a36Sopenharmony_ci switch (speed_mbps) { 38962306a36Sopenharmony_ci case SPEED_100000: 39062306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_100G; 39162306a36Sopenharmony_ci case SPEED_50000: 39262306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_50G; 39362306a36Sopenharmony_ci case SPEED_40000: 39462306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_40G; 39562306a36Sopenharmony_ci case SPEED_25000: 39662306a36Sopenharmony_ci case SPEED_20000: 39762306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_20G; 39862306a36Sopenharmony_ci case SPEED_10000: 39962306a36Sopenharmony_ci default: 40062306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_10G; 40162306a36Sopenharmony_ci case SPEED_1000: 40262306a36Sopenharmony_ci case SPEED_100: 40362306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_1G; 40462306a36Sopenharmony_ci } 40562306a36Sopenharmony_ci} 40662306a36Sopenharmony_ci 40762306a36Sopenharmony_cistatic unsigned int 40862306a36Sopenharmony_ciiavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl) 40962306a36Sopenharmony_ci{ 41062306a36Sopenharmony_ci switch (speed_virtchnl) { 41162306a36Sopenharmony_ci case VIRTCHNL_LINK_SPEED_40GB: 41262306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_40G; 41362306a36Sopenharmony_ci case VIRTCHNL_LINK_SPEED_25GB: 41462306a36Sopenharmony_ci case VIRTCHNL_LINK_SPEED_20GB: 41562306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_20G; 41662306a36Sopenharmony_ci case VIRTCHNL_LINK_SPEED_10GB: 41762306a36Sopenharmony_ci default: 41862306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_10G; 41962306a36Sopenharmony_ci case VIRTCHNL_LINK_SPEED_1GB: 42062306a36Sopenharmony_ci case VIRTCHNL_LINK_SPEED_100MB: 42162306a36Sopenharmony_ci return IAVF_AIM_MULTIPLIER_1G; 42262306a36Sopenharmony_ci } 42362306a36Sopenharmony_ci} 42462306a36Sopenharmony_ci 42562306a36Sopenharmony_cistatic unsigned int iavf_itr_divisor(struct iavf_adapter *adapter) 42662306a36Sopenharmony_ci{ 42762306a36Sopenharmony_ci if (ADV_LINK_SUPPORT(adapter)) 42862306a36Sopenharmony_ci return IAVF_ITR_ADAPTIVE_MIN_INC * 42962306a36Sopenharmony_ci iavf_mbps_itr_multiplier(adapter->link_speed_mbps); 43062306a36Sopenharmony_ci else 43162306a36Sopenharmony_ci return IAVF_ITR_ADAPTIVE_MIN_INC * 43262306a36Sopenharmony_ci iavf_virtchnl_itr_multiplier(adapter->link_speed); 43362306a36Sopenharmony_ci} 43462306a36Sopenharmony_ci 43562306a36Sopenharmony_ci/** 43662306a36Sopenharmony_ci * iavf_update_itr - update the dynamic ITR value based on statistics 43762306a36Sopenharmony_ci * @q_vector: structure containing interrupt and ring information 43862306a36Sopenharmony_ci * @rc: structure containing ring performance data 43962306a36Sopenharmony_ci * 44062306a36Sopenharmony_ci * Stores a new ITR value based on packets and byte 44162306a36Sopenharmony_ci * counts during the last interrupt. The advantage of per interrupt 44262306a36Sopenharmony_ci * computation is faster updates and more accurate ITR for the current 44362306a36Sopenharmony_ci * traffic pattern. Constants in this function were computed 44462306a36Sopenharmony_ci * based on theoretical maximum wire speed and thresholds were set based 44562306a36Sopenharmony_ci * on testing data as well as attempting to minimize response time 44662306a36Sopenharmony_ci * while increasing bulk throughput. 44762306a36Sopenharmony_ci **/ 44862306a36Sopenharmony_cistatic void iavf_update_itr(struct iavf_q_vector *q_vector, 44962306a36Sopenharmony_ci struct iavf_ring_container *rc) 45062306a36Sopenharmony_ci{ 45162306a36Sopenharmony_ci unsigned int avg_wire_size, packets, bytes, itr; 45262306a36Sopenharmony_ci unsigned long next_update = jiffies; 45362306a36Sopenharmony_ci 45462306a36Sopenharmony_ci /* If we don't have any rings just leave ourselves set for maximum 45562306a36Sopenharmony_ci * possible latency so we take ourselves out of the equation. 45662306a36Sopenharmony_ci */ 45762306a36Sopenharmony_ci if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) 45862306a36Sopenharmony_ci return; 45962306a36Sopenharmony_ci 46062306a36Sopenharmony_ci /* For Rx we want to push the delay up and default to low latency. 46162306a36Sopenharmony_ci * for Tx we want to pull the delay down and default to high latency. 46262306a36Sopenharmony_ci */ 46362306a36Sopenharmony_ci itr = iavf_container_is_rx(q_vector, rc) ? 46462306a36Sopenharmony_ci IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY : 46562306a36Sopenharmony_ci IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY; 46662306a36Sopenharmony_ci 46762306a36Sopenharmony_ci /* If we didn't update within up to 1 - 2 jiffies we can assume 46862306a36Sopenharmony_ci * that either packets are coming in so slow there hasn't been 46962306a36Sopenharmony_ci * any work, or that there is so much work that NAPI is dealing 47062306a36Sopenharmony_ci * with interrupt moderation and we don't need to do anything. 47162306a36Sopenharmony_ci */ 47262306a36Sopenharmony_ci if (time_after(next_update, rc->next_update)) 47362306a36Sopenharmony_ci goto clear_counts; 47462306a36Sopenharmony_ci 47562306a36Sopenharmony_ci /* If itr_countdown is set it means we programmed an ITR within 47662306a36Sopenharmony_ci * the last 4 interrupt cycles. This has a side effect of us 47762306a36Sopenharmony_ci * potentially firing an early interrupt. In order to work around 47862306a36Sopenharmony_ci * this we need to throw out any data received for a few 47962306a36Sopenharmony_ci * interrupts following the update. 48062306a36Sopenharmony_ci */ 48162306a36Sopenharmony_ci if (q_vector->itr_countdown) { 48262306a36Sopenharmony_ci itr = rc->target_itr; 48362306a36Sopenharmony_ci goto clear_counts; 48462306a36Sopenharmony_ci } 48562306a36Sopenharmony_ci 48662306a36Sopenharmony_ci packets = rc->total_packets; 48762306a36Sopenharmony_ci bytes = rc->total_bytes; 48862306a36Sopenharmony_ci 48962306a36Sopenharmony_ci if (iavf_container_is_rx(q_vector, rc)) { 49062306a36Sopenharmony_ci /* If Rx there are 1 to 4 packets and bytes are less than 49162306a36Sopenharmony_ci * 9000 assume insufficient data to use bulk rate limiting 49262306a36Sopenharmony_ci * approach unless Tx is already in bulk rate limiting. We 49362306a36Sopenharmony_ci * are likely latency driven. 49462306a36Sopenharmony_ci */ 49562306a36Sopenharmony_ci if (packets && packets < 4 && bytes < 9000 && 49662306a36Sopenharmony_ci (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) { 49762306a36Sopenharmony_ci itr = IAVF_ITR_ADAPTIVE_LATENCY; 49862306a36Sopenharmony_ci goto adjust_by_size; 49962306a36Sopenharmony_ci } 50062306a36Sopenharmony_ci } else if (packets < 4) { 50162306a36Sopenharmony_ci /* If we have Tx and Rx ITR maxed and Tx ITR is running in 50262306a36Sopenharmony_ci * bulk mode and we are receiving 4 or fewer packets just 50362306a36Sopenharmony_ci * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so 50462306a36Sopenharmony_ci * that the Rx can relax. 50562306a36Sopenharmony_ci */ 50662306a36Sopenharmony_ci if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS && 50762306a36Sopenharmony_ci (q_vector->rx.target_itr & IAVF_ITR_MASK) == 50862306a36Sopenharmony_ci IAVF_ITR_ADAPTIVE_MAX_USECS) 50962306a36Sopenharmony_ci goto clear_counts; 51062306a36Sopenharmony_ci } else if (packets > 32) { 51162306a36Sopenharmony_ci /* If we have processed over 32 packets in a single interrupt 51262306a36Sopenharmony_ci * for Tx assume we need to switch over to "bulk" mode. 51362306a36Sopenharmony_ci */ 51462306a36Sopenharmony_ci rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY; 51562306a36Sopenharmony_ci } 51662306a36Sopenharmony_ci 51762306a36Sopenharmony_ci /* We have no packets to actually measure against. This means 51862306a36Sopenharmony_ci * either one of the other queues on this vector is active or 51962306a36Sopenharmony_ci * we are a Tx queue doing TSO with too high of an interrupt rate. 52062306a36Sopenharmony_ci * 52162306a36Sopenharmony_ci * Between 4 and 56 we can assume that our current interrupt delay 52262306a36Sopenharmony_ci * is only slightly too low. As such we should increase it by a small 52362306a36Sopenharmony_ci * fixed amount. 52462306a36Sopenharmony_ci */ 52562306a36Sopenharmony_ci if (packets < 56) { 52662306a36Sopenharmony_ci itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC; 52762306a36Sopenharmony_ci if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { 52862306a36Sopenharmony_ci itr &= IAVF_ITR_ADAPTIVE_LATENCY; 52962306a36Sopenharmony_ci itr += IAVF_ITR_ADAPTIVE_MAX_USECS; 53062306a36Sopenharmony_ci } 53162306a36Sopenharmony_ci goto clear_counts; 53262306a36Sopenharmony_ci } 53362306a36Sopenharmony_ci 53462306a36Sopenharmony_ci if (packets <= 256) { 53562306a36Sopenharmony_ci itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); 53662306a36Sopenharmony_ci itr &= IAVF_ITR_MASK; 53762306a36Sopenharmony_ci 53862306a36Sopenharmony_ci /* Between 56 and 112 is our "goldilocks" zone where we are 53962306a36Sopenharmony_ci * working out "just right". Just report that our current 54062306a36Sopenharmony_ci * ITR is good for us. 54162306a36Sopenharmony_ci */ 54262306a36Sopenharmony_ci if (packets <= 112) 54362306a36Sopenharmony_ci goto clear_counts; 54462306a36Sopenharmony_ci 54562306a36Sopenharmony_ci /* If packet count is 128 or greater we are likely looking 54662306a36Sopenharmony_ci * at a slight overrun of the delay we want. Try halving 54762306a36Sopenharmony_ci * our delay to see if that will cut the number of packets 54862306a36Sopenharmony_ci * in half per interrupt. 54962306a36Sopenharmony_ci */ 55062306a36Sopenharmony_ci itr /= 2; 55162306a36Sopenharmony_ci itr &= IAVF_ITR_MASK; 55262306a36Sopenharmony_ci if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS) 55362306a36Sopenharmony_ci itr = IAVF_ITR_ADAPTIVE_MIN_USECS; 55462306a36Sopenharmony_ci 55562306a36Sopenharmony_ci goto clear_counts; 55662306a36Sopenharmony_ci } 55762306a36Sopenharmony_ci 55862306a36Sopenharmony_ci /* The paths below assume we are dealing with a bulk ITR since 55962306a36Sopenharmony_ci * number of packets is greater than 256. We are just going to have 56062306a36Sopenharmony_ci * to compute a value and try to bring the count under control, 56162306a36Sopenharmony_ci * though for smaller packet sizes there isn't much we can do as 56262306a36Sopenharmony_ci * NAPI polling will likely be kicking in sooner rather than later. 56362306a36Sopenharmony_ci */ 56462306a36Sopenharmony_ci itr = IAVF_ITR_ADAPTIVE_BULK; 56562306a36Sopenharmony_ci 56662306a36Sopenharmony_ciadjust_by_size: 56762306a36Sopenharmony_ci /* If packet counts are 256 or greater we can assume we have a gross 56862306a36Sopenharmony_ci * overestimation of what the rate should be. Instead of trying to fine 56962306a36Sopenharmony_ci * tune it just use the formula below to try and dial in an exact value 57062306a36Sopenharmony_ci * give the current packet size of the frame. 57162306a36Sopenharmony_ci */ 57262306a36Sopenharmony_ci avg_wire_size = bytes / packets; 57362306a36Sopenharmony_ci 57462306a36Sopenharmony_ci /* The following is a crude approximation of: 57562306a36Sopenharmony_ci * wmem_default / (size + overhead) = desired_pkts_per_int 57662306a36Sopenharmony_ci * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate 57762306a36Sopenharmony_ci * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value 57862306a36Sopenharmony_ci * 57962306a36Sopenharmony_ci * Assuming wmem_default is 212992 and overhead is 640 bytes per 58062306a36Sopenharmony_ci * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the 58162306a36Sopenharmony_ci * formula down to 58262306a36Sopenharmony_ci * 58362306a36Sopenharmony_ci * (170 * (size + 24)) / (size + 640) = ITR 58462306a36Sopenharmony_ci * 58562306a36Sopenharmony_ci * We first do some math on the packet size and then finally bitshift 58662306a36Sopenharmony_ci * by 8 after rounding up. We also have to account for PCIe link speed 58762306a36Sopenharmony_ci * difference as ITR scales based on this. 58862306a36Sopenharmony_ci */ 58962306a36Sopenharmony_ci if (avg_wire_size <= 60) { 59062306a36Sopenharmony_ci /* Start at 250k ints/sec */ 59162306a36Sopenharmony_ci avg_wire_size = 4096; 59262306a36Sopenharmony_ci } else if (avg_wire_size <= 380) { 59362306a36Sopenharmony_ci /* 250K ints/sec to 60K ints/sec */ 59462306a36Sopenharmony_ci avg_wire_size *= 40; 59562306a36Sopenharmony_ci avg_wire_size += 1696; 59662306a36Sopenharmony_ci } else if (avg_wire_size <= 1084) { 59762306a36Sopenharmony_ci /* 60K ints/sec to 36K ints/sec */ 59862306a36Sopenharmony_ci avg_wire_size *= 15; 59962306a36Sopenharmony_ci avg_wire_size += 11452; 60062306a36Sopenharmony_ci } else if (avg_wire_size <= 1980) { 60162306a36Sopenharmony_ci /* 36K ints/sec to 30K ints/sec */ 60262306a36Sopenharmony_ci avg_wire_size *= 5; 60362306a36Sopenharmony_ci avg_wire_size += 22420; 60462306a36Sopenharmony_ci } else { 60562306a36Sopenharmony_ci /* plateau at a limit of 30K ints/sec */ 60662306a36Sopenharmony_ci avg_wire_size = 32256; 60762306a36Sopenharmony_ci } 60862306a36Sopenharmony_ci 60962306a36Sopenharmony_ci /* If we are in low latency mode halve our delay which doubles the 61062306a36Sopenharmony_ci * rate to somewhere between 100K to 16K ints/sec 61162306a36Sopenharmony_ci */ 61262306a36Sopenharmony_ci if (itr & IAVF_ITR_ADAPTIVE_LATENCY) 61362306a36Sopenharmony_ci avg_wire_size /= 2; 61462306a36Sopenharmony_ci 61562306a36Sopenharmony_ci /* Resultant value is 256 times larger than it needs to be. This 61662306a36Sopenharmony_ci * gives us room to adjust the value as needed to either increase 61762306a36Sopenharmony_ci * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. 61862306a36Sopenharmony_ci * 61962306a36Sopenharmony_ci * Use addition as we have already recorded the new latency flag 62062306a36Sopenharmony_ci * for the ITR value. 62162306a36Sopenharmony_ci */ 62262306a36Sopenharmony_ci itr += DIV_ROUND_UP(avg_wire_size, 62362306a36Sopenharmony_ci iavf_itr_divisor(q_vector->adapter)) * 62462306a36Sopenharmony_ci IAVF_ITR_ADAPTIVE_MIN_INC; 62562306a36Sopenharmony_ci 62662306a36Sopenharmony_ci if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) { 62762306a36Sopenharmony_ci itr &= IAVF_ITR_ADAPTIVE_LATENCY; 62862306a36Sopenharmony_ci itr += IAVF_ITR_ADAPTIVE_MAX_USECS; 62962306a36Sopenharmony_ci } 63062306a36Sopenharmony_ci 63162306a36Sopenharmony_ciclear_counts: 63262306a36Sopenharmony_ci /* write back value */ 63362306a36Sopenharmony_ci rc->target_itr = itr; 63462306a36Sopenharmony_ci 63562306a36Sopenharmony_ci /* next update should occur within next jiffy */ 63662306a36Sopenharmony_ci rc->next_update = next_update + 1; 63762306a36Sopenharmony_ci 63862306a36Sopenharmony_ci rc->total_bytes = 0; 63962306a36Sopenharmony_ci rc->total_packets = 0; 64062306a36Sopenharmony_ci} 64162306a36Sopenharmony_ci 64262306a36Sopenharmony_ci/** 64362306a36Sopenharmony_ci * iavf_setup_tx_descriptors - Allocate the Tx descriptors 64462306a36Sopenharmony_ci * @tx_ring: the tx ring to set up 64562306a36Sopenharmony_ci * 64662306a36Sopenharmony_ci * Return 0 on success, negative on error 64762306a36Sopenharmony_ci **/ 64862306a36Sopenharmony_ciint iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) 64962306a36Sopenharmony_ci{ 65062306a36Sopenharmony_ci struct device *dev = tx_ring->dev; 65162306a36Sopenharmony_ci int bi_size; 65262306a36Sopenharmony_ci 65362306a36Sopenharmony_ci if (!dev) 65462306a36Sopenharmony_ci return -ENOMEM; 65562306a36Sopenharmony_ci 65662306a36Sopenharmony_ci /* warn if we are about to overwrite the pointer */ 65762306a36Sopenharmony_ci WARN_ON(tx_ring->tx_bi); 65862306a36Sopenharmony_ci bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; 65962306a36Sopenharmony_ci tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 66062306a36Sopenharmony_ci if (!tx_ring->tx_bi) 66162306a36Sopenharmony_ci goto err; 66262306a36Sopenharmony_ci 66362306a36Sopenharmony_ci /* round up to nearest 4K */ 66462306a36Sopenharmony_ci tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc); 66562306a36Sopenharmony_ci tx_ring->size = ALIGN(tx_ring->size, 4096); 66662306a36Sopenharmony_ci tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 66762306a36Sopenharmony_ci &tx_ring->dma, GFP_KERNEL); 66862306a36Sopenharmony_ci if (!tx_ring->desc) { 66962306a36Sopenharmony_ci dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", 67062306a36Sopenharmony_ci tx_ring->size); 67162306a36Sopenharmony_ci goto err; 67262306a36Sopenharmony_ci } 67362306a36Sopenharmony_ci 67462306a36Sopenharmony_ci tx_ring->next_to_use = 0; 67562306a36Sopenharmony_ci tx_ring->next_to_clean = 0; 67662306a36Sopenharmony_ci tx_ring->tx_stats.prev_pkt_ctr = -1; 67762306a36Sopenharmony_ci return 0; 67862306a36Sopenharmony_ci 67962306a36Sopenharmony_cierr: 68062306a36Sopenharmony_ci kfree(tx_ring->tx_bi); 68162306a36Sopenharmony_ci tx_ring->tx_bi = NULL; 68262306a36Sopenharmony_ci return -ENOMEM; 68362306a36Sopenharmony_ci} 68462306a36Sopenharmony_ci 68562306a36Sopenharmony_ci/** 68662306a36Sopenharmony_ci * iavf_clean_rx_ring - Free Rx buffers 68762306a36Sopenharmony_ci * @rx_ring: ring to be cleaned 68862306a36Sopenharmony_ci **/ 68962306a36Sopenharmony_cistatic void iavf_clean_rx_ring(struct iavf_ring *rx_ring) 69062306a36Sopenharmony_ci{ 69162306a36Sopenharmony_ci unsigned long bi_size; 69262306a36Sopenharmony_ci u16 i; 69362306a36Sopenharmony_ci 69462306a36Sopenharmony_ci /* ring already cleared, nothing to do */ 69562306a36Sopenharmony_ci if (!rx_ring->rx_bi) 69662306a36Sopenharmony_ci return; 69762306a36Sopenharmony_ci 69862306a36Sopenharmony_ci if (rx_ring->skb) { 69962306a36Sopenharmony_ci dev_kfree_skb(rx_ring->skb); 70062306a36Sopenharmony_ci rx_ring->skb = NULL; 70162306a36Sopenharmony_ci } 70262306a36Sopenharmony_ci 70362306a36Sopenharmony_ci /* Free all the Rx ring sk_buffs */ 70462306a36Sopenharmony_ci for (i = 0; i < rx_ring->count; i++) { 70562306a36Sopenharmony_ci struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; 70662306a36Sopenharmony_ci 70762306a36Sopenharmony_ci if (!rx_bi->page) 70862306a36Sopenharmony_ci continue; 70962306a36Sopenharmony_ci 71062306a36Sopenharmony_ci /* Invalidate cache lines that may have been written to by 71162306a36Sopenharmony_ci * device so that we avoid corrupting memory. 71262306a36Sopenharmony_ci */ 71362306a36Sopenharmony_ci dma_sync_single_range_for_cpu(rx_ring->dev, 71462306a36Sopenharmony_ci rx_bi->dma, 71562306a36Sopenharmony_ci rx_bi->page_offset, 71662306a36Sopenharmony_ci rx_ring->rx_buf_len, 71762306a36Sopenharmony_ci DMA_FROM_DEVICE); 71862306a36Sopenharmony_ci 71962306a36Sopenharmony_ci /* free resources associated with mapping */ 72062306a36Sopenharmony_ci dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, 72162306a36Sopenharmony_ci iavf_rx_pg_size(rx_ring), 72262306a36Sopenharmony_ci DMA_FROM_DEVICE, 72362306a36Sopenharmony_ci IAVF_RX_DMA_ATTR); 72462306a36Sopenharmony_ci 72562306a36Sopenharmony_ci __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); 72662306a36Sopenharmony_ci 72762306a36Sopenharmony_ci rx_bi->page = NULL; 72862306a36Sopenharmony_ci rx_bi->page_offset = 0; 72962306a36Sopenharmony_ci } 73062306a36Sopenharmony_ci 73162306a36Sopenharmony_ci bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; 73262306a36Sopenharmony_ci memset(rx_ring->rx_bi, 0, bi_size); 73362306a36Sopenharmony_ci 73462306a36Sopenharmony_ci /* Zero out the descriptor ring */ 73562306a36Sopenharmony_ci memset(rx_ring->desc, 0, rx_ring->size); 73662306a36Sopenharmony_ci 73762306a36Sopenharmony_ci rx_ring->next_to_alloc = 0; 73862306a36Sopenharmony_ci rx_ring->next_to_clean = 0; 73962306a36Sopenharmony_ci rx_ring->next_to_use = 0; 74062306a36Sopenharmony_ci} 74162306a36Sopenharmony_ci 74262306a36Sopenharmony_ci/** 74362306a36Sopenharmony_ci * iavf_free_rx_resources - Free Rx resources 74462306a36Sopenharmony_ci * @rx_ring: ring to clean the resources from 74562306a36Sopenharmony_ci * 74662306a36Sopenharmony_ci * Free all receive software resources 74762306a36Sopenharmony_ci **/ 74862306a36Sopenharmony_civoid iavf_free_rx_resources(struct iavf_ring *rx_ring) 74962306a36Sopenharmony_ci{ 75062306a36Sopenharmony_ci iavf_clean_rx_ring(rx_ring); 75162306a36Sopenharmony_ci kfree(rx_ring->rx_bi); 75262306a36Sopenharmony_ci rx_ring->rx_bi = NULL; 75362306a36Sopenharmony_ci 75462306a36Sopenharmony_ci if (rx_ring->desc) { 75562306a36Sopenharmony_ci dma_free_coherent(rx_ring->dev, rx_ring->size, 75662306a36Sopenharmony_ci rx_ring->desc, rx_ring->dma); 75762306a36Sopenharmony_ci rx_ring->desc = NULL; 75862306a36Sopenharmony_ci } 75962306a36Sopenharmony_ci} 76062306a36Sopenharmony_ci 76162306a36Sopenharmony_ci/** 76262306a36Sopenharmony_ci * iavf_setup_rx_descriptors - Allocate Rx descriptors 76362306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring (for a specific queue) to setup 76462306a36Sopenharmony_ci * 76562306a36Sopenharmony_ci * Returns 0 on success, negative on failure 76662306a36Sopenharmony_ci **/ 76762306a36Sopenharmony_ciint iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) 76862306a36Sopenharmony_ci{ 76962306a36Sopenharmony_ci struct device *dev = rx_ring->dev; 77062306a36Sopenharmony_ci int bi_size; 77162306a36Sopenharmony_ci 77262306a36Sopenharmony_ci /* warn if we are about to overwrite the pointer */ 77362306a36Sopenharmony_ci WARN_ON(rx_ring->rx_bi); 77462306a36Sopenharmony_ci bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; 77562306a36Sopenharmony_ci rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); 77662306a36Sopenharmony_ci if (!rx_ring->rx_bi) 77762306a36Sopenharmony_ci goto err; 77862306a36Sopenharmony_ci 77962306a36Sopenharmony_ci u64_stats_init(&rx_ring->syncp); 78062306a36Sopenharmony_ci 78162306a36Sopenharmony_ci /* Round up to nearest 4K */ 78262306a36Sopenharmony_ci rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); 78362306a36Sopenharmony_ci rx_ring->size = ALIGN(rx_ring->size, 4096); 78462306a36Sopenharmony_ci rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, 78562306a36Sopenharmony_ci &rx_ring->dma, GFP_KERNEL); 78662306a36Sopenharmony_ci 78762306a36Sopenharmony_ci if (!rx_ring->desc) { 78862306a36Sopenharmony_ci dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", 78962306a36Sopenharmony_ci rx_ring->size); 79062306a36Sopenharmony_ci goto err; 79162306a36Sopenharmony_ci } 79262306a36Sopenharmony_ci 79362306a36Sopenharmony_ci rx_ring->next_to_alloc = 0; 79462306a36Sopenharmony_ci rx_ring->next_to_clean = 0; 79562306a36Sopenharmony_ci rx_ring->next_to_use = 0; 79662306a36Sopenharmony_ci 79762306a36Sopenharmony_ci return 0; 79862306a36Sopenharmony_cierr: 79962306a36Sopenharmony_ci kfree(rx_ring->rx_bi); 80062306a36Sopenharmony_ci rx_ring->rx_bi = NULL; 80162306a36Sopenharmony_ci return -ENOMEM; 80262306a36Sopenharmony_ci} 80362306a36Sopenharmony_ci 80462306a36Sopenharmony_ci/** 80562306a36Sopenharmony_ci * iavf_release_rx_desc - Store the new tail and head values 80662306a36Sopenharmony_ci * @rx_ring: ring to bump 80762306a36Sopenharmony_ci * @val: new head index 80862306a36Sopenharmony_ci **/ 80962306a36Sopenharmony_cistatic inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) 81062306a36Sopenharmony_ci{ 81162306a36Sopenharmony_ci rx_ring->next_to_use = val; 81262306a36Sopenharmony_ci 81362306a36Sopenharmony_ci /* update next to alloc since we have filled the ring */ 81462306a36Sopenharmony_ci rx_ring->next_to_alloc = val; 81562306a36Sopenharmony_ci 81662306a36Sopenharmony_ci /* Force memory writes to complete before letting h/w 81762306a36Sopenharmony_ci * know there are new descriptors to fetch. (Only 81862306a36Sopenharmony_ci * applicable for weak-ordered memory model archs, 81962306a36Sopenharmony_ci * such as IA-64). 82062306a36Sopenharmony_ci */ 82162306a36Sopenharmony_ci wmb(); 82262306a36Sopenharmony_ci writel(val, rx_ring->tail); 82362306a36Sopenharmony_ci} 82462306a36Sopenharmony_ci 82562306a36Sopenharmony_ci/** 82662306a36Sopenharmony_ci * iavf_rx_offset - Return expected offset into page to access data 82762306a36Sopenharmony_ci * @rx_ring: Ring we are requesting offset of 82862306a36Sopenharmony_ci * 82962306a36Sopenharmony_ci * Returns the offset value for ring into the data buffer. 83062306a36Sopenharmony_ci */ 83162306a36Sopenharmony_cistatic inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) 83262306a36Sopenharmony_ci{ 83362306a36Sopenharmony_ci return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; 83462306a36Sopenharmony_ci} 83562306a36Sopenharmony_ci 83662306a36Sopenharmony_ci/** 83762306a36Sopenharmony_ci * iavf_alloc_mapped_page - recycle or make a new page 83862306a36Sopenharmony_ci * @rx_ring: ring to use 83962306a36Sopenharmony_ci * @bi: rx_buffer struct to modify 84062306a36Sopenharmony_ci * 84162306a36Sopenharmony_ci * Returns true if the page was successfully allocated or 84262306a36Sopenharmony_ci * reused. 84362306a36Sopenharmony_ci **/ 84462306a36Sopenharmony_cistatic bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, 84562306a36Sopenharmony_ci struct iavf_rx_buffer *bi) 84662306a36Sopenharmony_ci{ 84762306a36Sopenharmony_ci struct page *page = bi->page; 84862306a36Sopenharmony_ci dma_addr_t dma; 84962306a36Sopenharmony_ci 85062306a36Sopenharmony_ci /* since we are recycling buffers we should seldom need to alloc */ 85162306a36Sopenharmony_ci if (likely(page)) { 85262306a36Sopenharmony_ci rx_ring->rx_stats.page_reuse_count++; 85362306a36Sopenharmony_ci return true; 85462306a36Sopenharmony_ci } 85562306a36Sopenharmony_ci 85662306a36Sopenharmony_ci /* alloc new page for storage */ 85762306a36Sopenharmony_ci page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); 85862306a36Sopenharmony_ci if (unlikely(!page)) { 85962306a36Sopenharmony_ci rx_ring->rx_stats.alloc_page_failed++; 86062306a36Sopenharmony_ci return false; 86162306a36Sopenharmony_ci } 86262306a36Sopenharmony_ci 86362306a36Sopenharmony_ci /* map page for use */ 86462306a36Sopenharmony_ci dma = dma_map_page_attrs(rx_ring->dev, page, 0, 86562306a36Sopenharmony_ci iavf_rx_pg_size(rx_ring), 86662306a36Sopenharmony_ci DMA_FROM_DEVICE, 86762306a36Sopenharmony_ci IAVF_RX_DMA_ATTR); 86862306a36Sopenharmony_ci 86962306a36Sopenharmony_ci /* if mapping failed free memory back to system since 87062306a36Sopenharmony_ci * there isn't much point in holding memory we can't use 87162306a36Sopenharmony_ci */ 87262306a36Sopenharmony_ci if (dma_mapping_error(rx_ring->dev, dma)) { 87362306a36Sopenharmony_ci __free_pages(page, iavf_rx_pg_order(rx_ring)); 87462306a36Sopenharmony_ci rx_ring->rx_stats.alloc_page_failed++; 87562306a36Sopenharmony_ci return false; 87662306a36Sopenharmony_ci } 87762306a36Sopenharmony_ci 87862306a36Sopenharmony_ci bi->dma = dma; 87962306a36Sopenharmony_ci bi->page = page; 88062306a36Sopenharmony_ci bi->page_offset = iavf_rx_offset(rx_ring); 88162306a36Sopenharmony_ci 88262306a36Sopenharmony_ci /* initialize pagecnt_bias to 1 representing we fully own page */ 88362306a36Sopenharmony_ci bi->pagecnt_bias = 1; 88462306a36Sopenharmony_ci 88562306a36Sopenharmony_ci return true; 88662306a36Sopenharmony_ci} 88762306a36Sopenharmony_ci 88862306a36Sopenharmony_ci/** 88962306a36Sopenharmony_ci * iavf_receive_skb - Send a completed packet up the stack 89062306a36Sopenharmony_ci * @rx_ring: rx ring in play 89162306a36Sopenharmony_ci * @skb: packet to send up 89262306a36Sopenharmony_ci * @vlan_tag: vlan tag for packet 89362306a36Sopenharmony_ci **/ 89462306a36Sopenharmony_cistatic void iavf_receive_skb(struct iavf_ring *rx_ring, 89562306a36Sopenharmony_ci struct sk_buff *skb, u16 vlan_tag) 89662306a36Sopenharmony_ci{ 89762306a36Sopenharmony_ci struct iavf_q_vector *q_vector = rx_ring->q_vector; 89862306a36Sopenharmony_ci 89962306a36Sopenharmony_ci if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 90062306a36Sopenharmony_ci (vlan_tag & VLAN_VID_MASK)) 90162306a36Sopenharmony_ci __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 90262306a36Sopenharmony_ci else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && 90362306a36Sopenharmony_ci vlan_tag & VLAN_VID_MASK) 90462306a36Sopenharmony_ci __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag); 90562306a36Sopenharmony_ci 90662306a36Sopenharmony_ci napi_gro_receive(&q_vector->napi, skb); 90762306a36Sopenharmony_ci} 90862306a36Sopenharmony_ci 90962306a36Sopenharmony_ci/** 91062306a36Sopenharmony_ci * iavf_alloc_rx_buffers - Replace used receive buffers 91162306a36Sopenharmony_ci * @rx_ring: ring to place buffers on 91262306a36Sopenharmony_ci * @cleaned_count: number of buffers to replace 91362306a36Sopenharmony_ci * 91462306a36Sopenharmony_ci * Returns false if all allocations were successful, true if any fail 91562306a36Sopenharmony_ci **/ 91662306a36Sopenharmony_cibool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) 91762306a36Sopenharmony_ci{ 91862306a36Sopenharmony_ci u16 ntu = rx_ring->next_to_use; 91962306a36Sopenharmony_ci union iavf_rx_desc *rx_desc; 92062306a36Sopenharmony_ci struct iavf_rx_buffer *bi; 92162306a36Sopenharmony_ci 92262306a36Sopenharmony_ci /* do nothing if no valid netdev defined */ 92362306a36Sopenharmony_ci if (!rx_ring->netdev || !cleaned_count) 92462306a36Sopenharmony_ci return false; 92562306a36Sopenharmony_ci 92662306a36Sopenharmony_ci rx_desc = IAVF_RX_DESC(rx_ring, ntu); 92762306a36Sopenharmony_ci bi = &rx_ring->rx_bi[ntu]; 92862306a36Sopenharmony_ci 92962306a36Sopenharmony_ci do { 93062306a36Sopenharmony_ci if (!iavf_alloc_mapped_page(rx_ring, bi)) 93162306a36Sopenharmony_ci goto no_buffers; 93262306a36Sopenharmony_ci 93362306a36Sopenharmony_ci /* sync the buffer for use by the device */ 93462306a36Sopenharmony_ci dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 93562306a36Sopenharmony_ci bi->page_offset, 93662306a36Sopenharmony_ci rx_ring->rx_buf_len, 93762306a36Sopenharmony_ci DMA_FROM_DEVICE); 93862306a36Sopenharmony_ci 93962306a36Sopenharmony_ci /* Refresh the desc even if buffer_addrs didn't change 94062306a36Sopenharmony_ci * because each write-back erases this info. 94162306a36Sopenharmony_ci */ 94262306a36Sopenharmony_ci rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); 94362306a36Sopenharmony_ci 94462306a36Sopenharmony_ci rx_desc++; 94562306a36Sopenharmony_ci bi++; 94662306a36Sopenharmony_ci ntu++; 94762306a36Sopenharmony_ci if (unlikely(ntu == rx_ring->count)) { 94862306a36Sopenharmony_ci rx_desc = IAVF_RX_DESC(rx_ring, 0); 94962306a36Sopenharmony_ci bi = rx_ring->rx_bi; 95062306a36Sopenharmony_ci ntu = 0; 95162306a36Sopenharmony_ci } 95262306a36Sopenharmony_ci 95362306a36Sopenharmony_ci /* clear the status bits for the next_to_use descriptor */ 95462306a36Sopenharmony_ci rx_desc->wb.qword1.status_error_len = 0; 95562306a36Sopenharmony_ci 95662306a36Sopenharmony_ci cleaned_count--; 95762306a36Sopenharmony_ci } while (cleaned_count); 95862306a36Sopenharmony_ci 95962306a36Sopenharmony_ci if (rx_ring->next_to_use != ntu) 96062306a36Sopenharmony_ci iavf_release_rx_desc(rx_ring, ntu); 96162306a36Sopenharmony_ci 96262306a36Sopenharmony_ci return false; 96362306a36Sopenharmony_ci 96462306a36Sopenharmony_cino_buffers: 96562306a36Sopenharmony_ci if (rx_ring->next_to_use != ntu) 96662306a36Sopenharmony_ci iavf_release_rx_desc(rx_ring, ntu); 96762306a36Sopenharmony_ci 96862306a36Sopenharmony_ci /* make sure to come back via polling to try again after 96962306a36Sopenharmony_ci * allocation failure 97062306a36Sopenharmony_ci */ 97162306a36Sopenharmony_ci return true; 97262306a36Sopenharmony_ci} 97362306a36Sopenharmony_ci 97462306a36Sopenharmony_ci/** 97562306a36Sopenharmony_ci * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum 97662306a36Sopenharmony_ci * @vsi: the VSI we care about 97762306a36Sopenharmony_ci * @skb: skb currently being received and modified 97862306a36Sopenharmony_ci * @rx_desc: the receive descriptor 97962306a36Sopenharmony_ci **/ 98062306a36Sopenharmony_cistatic inline void iavf_rx_checksum(struct iavf_vsi *vsi, 98162306a36Sopenharmony_ci struct sk_buff *skb, 98262306a36Sopenharmony_ci union iavf_rx_desc *rx_desc) 98362306a36Sopenharmony_ci{ 98462306a36Sopenharmony_ci struct iavf_rx_ptype_decoded decoded; 98562306a36Sopenharmony_ci u32 rx_error, rx_status; 98662306a36Sopenharmony_ci bool ipv4, ipv6; 98762306a36Sopenharmony_ci u8 ptype; 98862306a36Sopenharmony_ci u64 qword; 98962306a36Sopenharmony_ci 99062306a36Sopenharmony_ci qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 99162306a36Sopenharmony_ci ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT; 99262306a36Sopenharmony_ci rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >> 99362306a36Sopenharmony_ci IAVF_RXD_QW1_ERROR_SHIFT; 99462306a36Sopenharmony_ci rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >> 99562306a36Sopenharmony_ci IAVF_RXD_QW1_STATUS_SHIFT; 99662306a36Sopenharmony_ci decoded = decode_rx_desc_ptype(ptype); 99762306a36Sopenharmony_ci 99862306a36Sopenharmony_ci skb->ip_summed = CHECKSUM_NONE; 99962306a36Sopenharmony_ci 100062306a36Sopenharmony_ci skb_checksum_none_assert(skb); 100162306a36Sopenharmony_ci 100262306a36Sopenharmony_ci /* Rx csum enabled and ip headers found? */ 100362306a36Sopenharmony_ci if (!(vsi->netdev->features & NETIF_F_RXCSUM)) 100462306a36Sopenharmony_ci return; 100562306a36Sopenharmony_ci 100662306a36Sopenharmony_ci /* did the hardware decode the packet and checksum? */ 100762306a36Sopenharmony_ci if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) 100862306a36Sopenharmony_ci return; 100962306a36Sopenharmony_ci 101062306a36Sopenharmony_ci /* both known and outer_ip must be set for the below code to work */ 101162306a36Sopenharmony_ci if (!(decoded.known && decoded.outer_ip)) 101262306a36Sopenharmony_ci return; 101362306a36Sopenharmony_ci 101462306a36Sopenharmony_ci ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && 101562306a36Sopenharmony_ci (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); 101662306a36Sopenharmony_ci ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && 101762306a36Sopenharmony_ci (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); 101862306a36Sopenharmony_ci 101962306a36Sopenharmony_ci if (ipv4 && 102062306a36Sopenharmony_ci (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | 102162306a36Sopenharmony_ci BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) 102262306a36Sopenharmony_ci goto checksum_fail; 102362306a36Sopenharmony_ci 102462306a36Sopenharmony_ci /* likely incorrect csum if alternate IP extension headers found */ 102562306a36Sopenharmony_ci if (ipv6 && 102662306a36Sopenharmony_ci rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 102762306a36Sopenharmony_ci /* don't increment checksum err here, non-fatal err */ 102862306a36Sopenharmony_ci return; 102962306a36Sopenharmony_ci 103062306a36Sopenharmony_ci /* there was some L4 error, count error and punt packet to the stack */ 103162306a36Sopenharmony_ci if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) 103262306a36Sopenharmony_ci goto checksum_fail; 103362306a36Sopenharmony_ci 103462306a36Sopenharmony_ci /* handle packets that were not able to be checksummed due 103562306a36Sopenharmony_ci * to arrival speed, in this case the stack can compute 103662306a36Sopenharmony_ci * the csum. 103762306a36Sopenharmony_ci */ 103862306a36Sopenharmony_ci if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) 103962306a36Sopenharmony_ci return; 104062306a36Sopenharmony_ci 104162306a36Sopenharmony_ci /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 104262306a36Sopenharmony_ci switch (decoded.inner_prot) { 104362306a36Sopenharmony_ci case IAVF_RX_PTYPE_INNER_PROT_TCP: 104462306a36Sopenharmony_ci case IAVF_RX_PTYPE_INNER_PROT_UDP: 104562306a36Sopenharmony_ci case IAVF_RX_PTYPE_INNER_PROT_SCTP: 104662306a36Sopenharmony_ci skb->ip_summed = CHECKSUM_UNNECESSARY; 104762306a36Sopenharmony_ci fallthrough; 104862306a36Sopenharmony_ci default: 104962306a36Sopenharmony_ci break; 105062306a36Sopenharmony_ci } 105162306a36Sopenharmony_ci 105262306a36Sopenharmony_ci return; 105362306a36Sopenharmony_ci 105462306a36Sopenharmony_cichecksum_fail: 105562306a36Sopenharmony_ci vsi->back->hw_csum_rx_error++; 105662306a36Sopenharmony_ci} 105762306a36Sopenharmony_ci 105862306a36Sopenharmony_ci/** 105962306a36Sopenharmony_ci * iavf_ptype_to_htype - get a hash type 106062306a36Sopenharmony_ci * @ptype: the ptype value from the descriptor 106162306a36Sopenharmony_ci * 106262306a36Sopenharmony_ci * Returns a hash type to be used by skb_set_hash 106362306a36Sopenharmony_ci **/ 106462306a36Sopenharmony_cistatic inline int iavf_ptype_to_htype(u8 ptype) 106562306a36Sopenharmony_ci{ 106662306a36Sopenharmony_ci struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); 106762306a36Sopenharmony_ci 106862306a36Sopenharmony_ci if (!decoded.known) 106962306a36Sopenharmony_ci return PKT_HASH_TYPE_NONE; 107062306a36Sopenharmony_ci 107162306a36Sopenharmony_ci if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && 107262306a36Sopenharmony_ci decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) 107362306a36Sopenharmony_ci return PKT_HASH_TYPE_L4; 107462306a36Sopenharmony_ci else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && 107562306a36Sopenharmony_ci decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) 107662306a36Sopenharmony_ci return PKT_HASH_TYPE_L3; 107762306a36Sopenharmony_ci else 107862306a36Sopenharmony_ci return PKT_HASH_TYPE_L2; 107962306a36Sopenharmony_ci} 108062306a36Sopenharmony_ci 108162306a36Sopenharmony_ci/** 108262306a36Sopenharmony_ci * iavf_rx_hash - set the hash value in the skb 108362306a36Sopenharmony_ci * @ring: descriptor ring 108462306a36Sopenharmony_ci * @rx_desc: specific descriptor 108562306a36Sopenharmony_ci * @skb: skb currently being received and modified 108662306a36Sopenharmony_ci * @rx_ptype: Rx packet type 108762306a36Sopenharmony_ci **/ 108862306a36Sopenharmony_cistatic inline void iavf_rx_hash(struct iavf_ring *ring, 108962306a36Sopenharmony_ci union iavf_rx_desc *rx_desc, 109062306a36Sopenharmony_ci struct sk_buff *skb, 109162306a36Sopenharmony_ci u8 rx_ptype) 109262306a36Sopenharmony_ci{ 109362306a36Sopenharmony_ci u32 hash; 109462306a36Sopenharmony_ci const __le64 rss_mask = 109562306a36Sopenharmony_ci cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << 109662306a36Sopenharmony_ci IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); 109762306a36Sopenharmony_ci 109862306a36Sopenharmony_ci if (!(ring->netdev->features & NETIF_F_RXHASH)) 109962306a36Sopenharmony_ci return; 110062306a36Sopenharmony_ci 110162306a36Sopenharmony_ci if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { 110262306a36Sopenharmony_ci hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); 110362306a36Sopenharmony_ci skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); 110462306a36Sopenharmony_ci } 110562306a36Sopenharmony_ci} 110662306a36Sopenharmony_ci 110762306a36Sopenharmony_ci/** 110862306a36Sopenharmony_ci * iavf_process_skb_fields - Populate skb header fields from Rx descriptor 110962306a36Sopenharmony_ci * @rx_ring: rx descriptor ring packet is being transacted on 111062306a36Sopenharmony_ci * @rx_desc: pointer to the EOP Rx descriptor 111162306a36Sopenharmony_ci * @skb: pointer to current skb being populated 111262306a36Sopenharmony_ci * @rx_ptype: the packet type decoded by hardware 111362306a36Sopenharmony_ci * 111462306a36Sopenharmony_ci * This function checks the ring, descriptor, and packet information in 111562306a36Sopenharmony_ci * order to populate the hash, checksum, VLAN, protocol, and 111662306a36Sopenharmony_ci * other fields within the skb. 111762306a36Sopenharmony_ci **/ 111862306a36Sopenharmony_cistatic inline 111962306a36Sopenharmony_civoid iavf_process_skb_fields(struct iavf_ring *rx_ring, 112062306a36Sopenharmony_ci union iavf_rx_desc *rx_desc, struct sk_buff *skb, 112162306a36Sopenharmony_ci u8 rx_ptype) 112262306a36Sopenharmony_ci{ 112362306a36Sopenharmony_ci iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); 112462306a36Sopenharmony_ci 112562306a36Sopenharmony_ci iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); 112662306a36Sopenharmony_ci 112762306a36Sopenharmony_ci skb_record_rx_queue(skb, rx_ring->queue_index); 112862306a36Sopenharmony_ci 112962306a36Sopenharmony_ci /* modifies the skb - consumes the enet header */ 113062306a36Sopenharmony_ci skb->protocol = eth_type_trans(skb, rx_ring->netdev); 113162306a36Sopenharmony_ci} 113262306a36Sopenharmony_ci 113362306a36Sopenharmony_ci/** 113462306a36Sopenharmony_ci * iavf_cleanup_headers - Correct empty headers 113562306a36Sopenharmony_ci * @rx_ring: rx descriptor ring packet is being transacted on 113662306a36Sopenharmony_ci * @skb: pointer to current skb being fixed 113762306a36Sopenharmony_ci * 113862306a36Sopenharmony_ci * Also address the case where we are pulling data in on pages only 113962306a36Sopenharmony_ci * and as such no data is present in the skb header. 114062306a36Sopenharmony_ci * 114162306a36Sopenharmony_ci * In addition if skb is not at least 60 bytes we need to pad it so that 114262306a36Sopenharmony_ci * it is large enough to qualify as a valid Ethernet frame. 114362306a36Sopenharmony_ci * 114462306a36Sopenharmony_ci * Returns true if an error was encountered and skb was freed. 114562306a36Sopenharmony_ci **/ 114662306a36Sopenharmony_cistatic bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) 114762306a36Sopenharmony_ci{ 114862306a36Sopenharmony_ci /* if eth_skb_pad returns an error the skb was freed */ 114962306a36Sopenharmony_ci if (eth_skb_pad(skb)) 115062306a36Sopenharmony_ci return true; 115162306a36Sopenharmony_ci 115262306a36Sopenharmony_ci return false; 115362306a36Sopenharmony_ci} 115462306a36Sopenharmony_ci 115562306a36Sopenharmony_ci/** 115662306a36Sopenharmony_ci * iavf_reuse_rx_page - page flip buffer and store it back on the ring 115762306a36Sopenharmony_ci * @rx_ring: rx descriptor ring to store buffers on 115862306a36Sopenharmony_ci * @old_buff: donor buffer to have page reused 115962306a36Sopenharmony_ci * 116062306a36Sopenharmony_ci * Synchronizes page for reuse by the adapter 116162306a36Sopenharmony_ci **/ 116262306a36Sopenharmony_cistatic void iavf_reuse_rx_page(struct iavf_ring *rx_ring, 116362306a36Sopenharmony_ci struct iavf_rx_buffer *old_buff) 116462306a36Sopenharmony_ci{ 116562306a36Sopenharmony_ci struct iavf_rx_buffer *new_buff; 116662306a36Sopenharmony_ci u16 nta = rx_ring->next_to_alloc; 116762306a36Sopenharmony_ci 116862306a36Sopenharmony_ci new_buff = &rx_ring->rx_bi[nta]; 116962306a36Sopenharmony_ci 117062306a36Sopenharmony_ci /* update, and store next to alloc */ 117162306a36Sopenharmony_ci nta++; 117262306a36Sopenharmony_ci rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 117362306a36Sopenharmony_ci 117462306a36Sopenharmony_ci /* transfer page from old buffer to new buffer */ 117562306a36Sopenharmony_ci new_buff->dma = old_buff->dma; 117662306a36Sopenharmony_ci new_buff->page = old_buff->page; 117762306a36Sopenharmony_ci new_buff->page_offset = old_buff->page_offset; 117862306a36Sopenharmony_ci new_buff->pagecnt_bias = old_buff->pagecnt_bias; 117962306a36Sopenharmony_ci} 118062306a36Sopenharmony_ci 118162306a36Sopenharmony_ci/** 118262306a36Sopenharmony_ci * iavf_can_reuse_rx_page - Determine if this page can be reused by 118362306a36Sopenharmony_ci * the adapter for another receive 118462306a36Sopenharmony_ci * 118562306a36Sopenharmony_ci * @rx_buffer: buffer containing the page 118662306a36Sopenharmony_ci * 118762306a36Sopenharmony_ci * If page is reusable, rx_buffer->page_offset is adjusted to point to 118862306a36Sopenharmony_ci * an unused region in the page. 118962306a36Sopenharmony_ci * 119062306a36Sopenharmony_ci * For small pages, @truesize will be a constant value, half the size 119162306a36Sopenharmony_ci * of the memory at page. We'll attempt to alternate between high and 119262306a36Sopenharmony_ci * low halves of the page, with one half ready for use by the hardware 119362306a36Sopenharmony_ci * and the other half being consumed by the stack. We use the page 119462306a36Sopenharmony_ci * ref count to determine whether the stack has finished consuming the 119562306a36Sopenharmony_ci * portion of this page that was passed up with a previous packet. If 119662306a36Sopenharmony_ci * the page ref count is >1, we'll assume the "other" half page is 119762306a36Sopenharmony_ci * still busy, and this page cannot be reused. 119862306a36Sopenharmony_ci * 119962306a36Sopenharmony_ci * For larger pages, @truesize will be the actual space used by the 120062306a36Sopenharmony_ci * received packet (adjusted upward to an even multiple of the cache 120162306a36Sopenharmony_ci * line size). This will advance through the page by the amount 120262306a36Sopenharmony_ci * actually consumed by the received packets while there is still 120362306a36Sopenharmony_ci * space for a buffer. Each region of larger pages will be used at 120462306a36Sopenharmony_ci * most once, after which the page will not be reused. 120562306a36Sopenharmony_ci * 120662306a36Sopenharmony_ci * In either case, if the page is reusable its refcount is increased. 120762306a36Sopenharmony_ci **/ 120862306a36Sopenharmony_cistatic bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) 120962306a36Sopenharmony_ci{ 121062306a36Sopenharmony_ci unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 121162306a36Sopenharmony_ci struct page *page = rx_buffer->page; 121262306a36Sopenharmony_ci 121362306a36Sopenharmony_ci /* Is any reuse possible? */ 121462306a36Sopenharmony_ci if (!dev_page_is_reusable(page)) 121562306a36Sopenharmony_ci return false; 121662306a36Sopenharmony_ci 121762306a36Sopenharmony_ci#if (PAGE_SIZE < 8192) 121862306a36Sopenharmony_ci /* if we are only owner of page we can reuse it */ 121962306a36Sopenharmony_ci if (unlikely((page_count(page) - pagecnt_bias) > 1)) 122062306a36Sopenharmony_ci return false; 122162306a36Sopenharmony_ci#else 122262306a36Sopenharmony_ci#define IAVF_LAST_OFFSET \ 122362306a36Sopenharmony_ci (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048) 122462306a36Sopenharmony_ci if (rx_buffer->page_offset > IAVF_LAST_OFFSET) 122562306a36Sopenharmony_ci return false; 122662306a36Sopenharmony_ci#endif 122762306a36Sopenharmony_ci 122862306a36Sopenharmony_ci /* If we have drained the page fragment pool we need to update 122962306a36Sopenharmony_ci * the pagecnt_bias and page count so that we fully restock the 123062306a36Sopenharmony_ci * number of references the driver holds. 123162306a36Sopenharmony_ci */ 123262306a36Sopenharmony_ci if (unlikely(!pagecnt_bias)) { 123362306a36Sopenharmony_ci page_ref_add(page, USHRT_MAX); 123462306a36Sopenharmony_ci rx_buffer->pagecnt_bias = USHRT_MAX; 123562306a36Sopenharmony_ci } 123662306a36Sopenharmony_ci 123762306a36Sopenharmony_ci return true; 123862306a36Sopenharmony_ci} 123962306a36Sopenharmony_ci 124062306a36Sopenharmony_ci/** 124162306a36Sopenharmony_ci * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff 124262306a36Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 124362306a36Sopenharmony_ci * @rx_buffer: buffer containing page to add 124462306a36Sopenharmony_ci * @skb: sk_buff to place the data into 124562306a36Sopenharmony_ci * @size: packet length from rx_desc 124662306a36Sopenharmony_ci * 124762306a36Sopenharmony_ci * This function will add the data contained in rx_buffer->page to the skb. 124862306a36Sopenharmony_ci * It will just attach the page as a frag to the skb. 124962306a36Sopenharmony_ci * 125062306a36Sopenharmony_ci * The function will then update the page offset. 125162306a36Sopenharmony_ci **/ 125262306a36Sopenharmony_cistatic void iavf_add_rx_frag(struct iavf_ring *rx_ring, 125362306a36Sopenharmony_ci struct iavf_rx_buffer *rx_buffer, 125462306a36Sopenharmony_ci struct sk_buff *skb, 125562306a36Sopenharmony_ci unsigned int size) 125662306a36Sopenharmony_ci{ 125762306a36Sopenharmony_ci#if (PAGE_SIZE < 8192) 125862306a36Sopenharmony_ci unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; 125962306a36Sopenharmony_ci#else 126062306a36Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); 126162306a36Sopenharmony_ci#endif 126262306a36Sopenharmony_ci 126362306a36Sopenharmony_ci if (!size) 126462306a36Sopenharmony_ci return; 126562306a36Sopenharmony_ci 126662306a36Sopenharmony_ci skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, 126762306a36Sopenharmony_ci rx_buffer->page_offset, size, truesize); 126862306a36Sopenharmony_ci 126962306a36Sopenharmony_ci /* page is being used so we must update the page offset */ 127062306a36Sopenharmony_ci#if (PAGE_SIZE < 8192) 127162306a36Sopenharmony_ci rx_buffer->page_offset ^= truesize; 127262306a36Sopenharmony_ci#else 127362306a36Sopenharmony_ci rx_buffer->page_offset += truesize; 127462306a36Sopenharmony_ci#endif 127562306a36Sopenharmony_ci} 127662306a36Sopenharmony_ci 127762306a36Sopenharmony_ci/** 127862306a36Sopenharmony_ci * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use 127962306a36Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 128062306a36Sopenharmony_ci * @size: size of buffer to add to skb 128162306a36Sopenharmony_ci * 128262306a36Sopenharmony_ci * This function will pull an Rx buffer from the ring and synchronize it 128362306a36Sopenharmony_ci * for use by the CPU. 128462306a36Sopenharmony_ci */ 128562306a36Sopenharmony_cistatic struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, 128662306a36Sopenharmony_ci const unsigned int size) 128762306a36Sopenharmony_ci{ 128862306a36Sopenharmony_ci struct iavf_rx_buffer *rx_buffer; 128962306a36Sopenharmony_ci 129062306a36Sopenharmony_ci rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; 129162306a36Sopenharmony_ci prefetchw(rx_buffer->page); 129262306a36Sopenharmony_ci if (!size) 129362306a36Sopenharmony_ci return rx_buffer; 129462306a36Sopenharmony_ci 129562306a36Sopenharmony_ci /* we are reusing so sync this buffer for CPU use */ 129662306a36Sopenharmony_ci dma_sync_single_range_for_cpu(rx_ring->dev, 129762306a36Sopenharmony_ci rx_buffer->dma, 129862306a36Sopenharmony_ci rx_buffer->page_offset, 129962306a36Sopenharmony_ci size, 130062306a36Sopenharmony_ci DMA_FROM_DEVICE); 130162306a36Sopenharmony_ci 130262306a36Sopenharmony_ci /* We have pulled a buffer for use, so decrement pagecnt_bias */ 130362306a36Sopenharmony_ci rx_buffer->pagecnt_bias--; 130462306a36Sopenharmony_ci 130562306a36Sopenharmony_ci return rx_buffer; 130662306a36Sopenharmony_ci} 130762306a36Sopenharmony_ci 130862306a36Sopenharmony_ci/** 130962306a36Sopenharmony_ci * iavf_construct_skb - Allocate skb and populate it 131062306a36Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 131162306a36Sopenharmony_ci * @rx_buffer: rx buffer to pull data from 131262306a36Sopenharmony_ci * @size: size of buffer to add to skb 131362306a36Sopenharmony_ci * 131462306a36Sopenharmony_ci * This function allocates an skb. It then populates it with the page 131562306a36Sopenharmony_ci * data from the current receive descriptor, taking care to set up the 131662306a36Sopenharmony_ci * skb correctly. 131762306a36Sopenharmony_ci */ 131862306a36Sopenharmony_cistatic struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, 131962306a36Sopenharmony_ci struct iavf_rx_buffer *rx_buffer, 132062306a36Sopenharmony_ci unsigned int size) 132162306a36Sopenharmony_ci{ 132262306a36Sopenharmony_ci void *va; 132362306a36Sopenharmony_ci#if (PAGE_SIZE < 8192) 132462306a36Sopenharmony_ci unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; 132562306a36Sopenharmony_ci#else 132662306a36Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(size); 132762306a36Sopenharmony_ci#endif 132862306a36Sopenharmony_ci unsigned int headlen; 132962306a36Sopenharmony_ci struct sk_buff *skb; 133062306a36Sopenharmony_ci 133162306a36Sopenharmony_ci if (!rx_buffer) 133262306a36Sopenharmony_ci return NULL; 133362306a36Sopenharmony_ci /* prefetch first cache line of first page */ 133462306a36Sopenharmony_ci va = page_address(rx_buffer->page) + rx_buffer->page_offset; 133562306a36Sopenharmony_ci net_prefetch(va); 133662306a36Sopenharmony_ci 133762306a36Sopenharmony_ci /* allocate a skb to store the frags */ 133862306a36Sopenharmony_ci skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 133962306a36Sopenharmony_ci IAVF_RX_HDR_SIZE, 134062306a36Sopenharmony_ci GFP_ATOMIC | __GFP_NOWARN); 134162306a36Sopenharmony_ci if (unlikely(!skb)) 134262306a36Sopenharmony_ci return NULL; 134362306a36Sopenharmony_ci 134462306a36Sopenharmony_ci /* Determine available headroom for copy */ 134562306a36Sopenharmony_ci headlen = size; 134662306a36Sopenharmony_ci if (headlen > IAVF_RX_HDR_SIZE) 134762306a36Sopenharmony_ci headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); 134862306a36Sopenharmony_ci 134962306a36Sopenharmony_ci /* align pull length to size of long to optimize memcpy performance */ 135062306a36Sopenharmony_ci memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); 135162306a36Sopenharmony_ci 135262306a36Sopenharmony_ci /* update all of the pointers */ 135362306a36Sopenharmony_ci size -= headlen; 135462306a36Sopenharmony_ci if (size) { 135562306a36Sopenharmony_ci skb_add_rx_frag(skb, 0, rx_buffer->page, 135662306a36Sopenharmony_ci rx_buffer->page_offset + headlen, 135762306a36Sopenharmony_ci size, truesize); 135862306a36Sopenharmony_ci 135962306a36Sopenharmony_ci /* buffer is used by skb, update page_offset */ 136062306a36Sopenharmony_ci#if (PAGE_SIZE < 8192) 136162306a36Sopenharmony_ci rx_buffer->page_offset ^= truesize; 136262306a36Sopenharmony_ci#else 136362306a36Sopenharmony_ci rx_buffer->page_offset += truesize; 136462306a36Sopenharmony_ci#endif 136562306a36Sopenharmony_ci } else { 136662306a36Sopenharmony_ci /* buffer is unused, reset bias back to rx_buffer */ 136762306a36Sopenharmony_ci rx_buffer->pagecnt_bias++; 136862306a36Sopenharmony_ci } 136962306a36Sopenharmony_ci 137062306a36Sopenharmony_ci return skb; 137162306a36Sopenharmony_ci} 137262306a36Sopenharmony_ci 137362306a36Sopenharmony_ci/** 137462306a36Sopenharmony_ci * iavf_build_skb - Build skb around an existing buffer 137562306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on 137662306a36Sopenharmony_ci * @rx_buffer: Rx buffer to pull data from 137762306a36Sopenharmony_ci * @size: size of buffer to add to skb 137862306a36Sopenharmony_ci * 137962306a36Sopenharmony_ci * This function builds an skb around an existing Rx buffer, taking care 138062306a36Sopenharmony_ci * to set up the skb correctly and avoid any memcpy overhead. 138162306a36Sopenharmony_ci */ 138262306a36Sopenharmony_cistatic struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, 138362306a36Sopenharmony_ci struct iavf_rx_buffer *rx_buffer, 138462306a36Sopenharmony_ci unsigned int size) 138562306a36Sopenharmony_ci{ 138662306a36Sopenharmony_ci void *va; 138762306a36Sopenharmony_ci#if (PAGE_SIZE < 8192) 138862306a36Sopenharmony_ci unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; 138962306a36Sopenharmony_ci#else 139062306a36Sopenharmony_ci unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 139162306a36Sopenharmony_ci SKB_DATA_ALIGN(IAVF_SKB_PAD + size); 139262306a36Sopenharmony_ci#endif 139362306a36Sopenharmony_ci struct sk_buff *skb; 139462306a36Sopenharmony_ci 139562306a36Sopenharmony_ci if (!rx_buffer || !size) 139662306a36Sopenharmony_ci return NULL; 139762306a36Sopenharmony_ci /* prefetch first cache line of first page */ 139862306a36Sopenharmony_ci va = page_address(rx_buffer->page) + rx_buffer->page_offset; 139962306a36Sopenharmony_ci net_prefetch(va); 140062306a36Sopenharmony_ci 140162306a36Sopenharmony_ci /* build an skb around the page buffer */ 140262306a36Sopenharmony_ci skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); 140362306a36Sopenharmony_ci if (unlikely(!skb)) 140462306a36Sopenharmony_ci return NULL; 140562306a36Sopenharmony_ci 140662306a36Sopenharmony_ci /* update pointers within the skb to store the data */ 140762306a36Sopenharmony_ci skb_reserve(skb, IAVF_SKB_PAD); 140862306a36Sopenharmony_ci __skb_put(skb, size); 140962306a36Sopenharmony_ci 141062306a36Sopenharmony_ci /* buffer is used by skb, update page_offset */ 141162306a36Sopenharmony_ci#if (PAGE_SIZE < 8192) 141262306a36Sopenharmony_ci rx_buffer->page_offset ^= truesize; 141362306a36Sopenharmony_ci#else 141462306a36Sopenharmony_ci rx_buffer->page_offset += truesize; 141562306a36Sopenharmony_ci#endif 141662306a36Sopenharmony_ci 141762306a36Sopenharmony_ci return skb; 141862306a36Sopenharmony_ci} 141962306a36Sopenharmony_ci 142062306a36Sopenharmony_ci/** 142162306a36Sopenharmony_ci * iavf_put_rx_buffer - Clean up used buffer and either recycle or free 142262306a36Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 142362306a36Sopenharmony_ci * @rx_buffer: rx buffer to pull data from 142462306a36Sopenharmony_ci * 142562306a36Sopenharmony_ci * This function will clean up the contents of the rx_buffer. It will 142662306a36Sopenharmony_ci * either recycle the buffer or unmap it and free the associated resources. 142762306a36Sopenharmony_ci */ 142862306a36Sopenharmony_cistatic void iavf_put_rx_buffer(struct iavf_ring *rx_ring, 142962306a36Sopenharmony_ci struct iavf_rx_buffer *rx_buffer) 143062306a36Sopenharmony_ci{ 143162306a36Sopenharmony_ci if (!rx_buffer) 143262306a36Sopenharmony_ci return; 143362306a36Sopenharmony_ci 143462306a36Sopenharmony_ci if (iavf_can_reuse_rx_page(rx_buffer)) { 143562306a36Sopenharmony_ci /* hand second half of page back to the ring */ 143662306a36Sopenharmony_ci iavf_reuse_rx_page(rx_ring, rx_buffer); 143762306a36Sopenharmony_ci rx_ring->rx_stats.page_reuse_count++; 143862306a36Sopenharmony_ci } else { 143962306a36Sopenharmony_ci /* we are not reusing the buffer so unmap it */ 144062306a36Sopenharmony_ci dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 144162306a36Sopenharmony_ci iavf_rx_pg_size(rx_ring), 144262306a36Sopenharmony_ci DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); 144362306a36Sopenharmony_ci __page_frag_cache_drain(rx_buffer->page, 144462306a36Sopenharmony_ci rx_buffer->pagecnt_bias); 144562306a36Sopenharmony_ci } 144662306a36Sopenharmony_ci 144762306a36Sopenharmony_ci /* clear contents of buffer_info */ 144862306a36Sopenharmony_ci rx_buffer->page = NULL; 144962306a36Sopenharmony_ci} 145062306a36Sopenharmony_ci 145162306a36Sopenharmony_ci/** 145262306a36Sopenharmony_ci * iavf_is_non_eop - process handling of non-EOP buffers 145362306a36Sopenharmony_ci * @rx_ring: Rx ring being processed 145462306a36Sopenharmony_ci * @rx_desc: Rx descriptor for current buffer 145562306a36Sopenharmony_ci * @skb: Current socket buffer containing buffer in progress 145662306a36Sopenharmony_ci * 145762306a36Sopenharmony_ci * This function updates next to clean. If the buffer is an EOP buffer 145862306a36Sopenharmony_ci * this function exits returning false, otherwise it will place the 145962306a36Sopenharmony_ci * sk_buff in the next buffer to be chained and return true indicating 146062306a36Sopenharmony_ci * that this is in fact a non-EOP buffer. 146162306a36Sopenharmony_ci **/ 146262306a36Sopenharmony_cistatic bool iavf_is_non_eop(struct iavf_ring *rx_ring, 146362306a36Sopenharmony_ci union iavf_rx_desc *rx_desc, 146462306a36Sopenharmony_ci struct sk_buff *skb) 146562306a36Sopenharmony_ci{ 146662306a36Sopenharmony_ci u32 ntc = rx_ring->next_to_clean + 1; 146762306a36Sopenharmony_ci 146862306a36Sopenharmony_ci /* fetch, update, and store next to clean */ 146962306a36Sopenharmony_ci ntc = (ntc < rx_ring->count) ? ntc : 0; 147062306a36Sopenharmony_ci rx_ring->next_to_clean = ntc; 147162306a36Sopenharmony_ci 147262306a36Sopenharmony_ci prefetch(IAVF_RX_DESC(rx_ring, ntc)); 147362306a36Sopenharmony_ci 147462306a36Sopenharmony_ci /* if we are the last buffer then there is nothing else to do */ 147562306a36Sopenharmony_ci#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) 147662306a36Sopenharmony_ci if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF))) 147762306a36Sopenharmony_ci return false; 147862306a36Sopenharmony_ci 147962306a36Sopenharmony_ci rx_ring->rx_stats.non_eop_descs++; 148062306a36Sopenharmony_ci 148162306a36Sopenharmony_ci return true; 148262306a36Sopenharmony_ci} 148362306a36Sopenharmony_ci 148462306a36Sopenharmony_ci/** 148562306a36Sopenharmony_ci * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf 148662306a36Sopenharmony_ci * @rx_ring: rx descriptor ring to transact packets on 148762306a36Sopenharmony_ci * @budget: Total limit on number of packets to process 148862306a36Sopenharmony_ci * 148962306a36Sopenharmony_ci * This function provides a "bounce buffer" approach to Rx interrupt 149062306a36Sopenharmony_ci * processing. The advantage to this is that on systems that have 149162306a36Sopenharmony_ci * expensive overhead for IOMMU access this provides a means of avoiding 149262306a36Sopenharmony_ci * it by maintaining the mapping of the page to the system. 149362306a36Sopenharmony_ci * 149462306a36Sopenharmony_ci * Returns amount of work completed 149562306a36Sopenharmony_ci **/ 149662306a36Sopenharmony_cistatic int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) 149762306a36Sopenharmony_ci{ 149862306a36Sopenharmony_ci unsigned int total_rx_bytes = 0, total_rx_packets = 0; 149962306a36Sopenharmony_ci struct sk_buff *skb = rx_ring->skb; 150062306a36Sopenharmony_ci u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); 150162306a36Sopenharmony_ci bool failure = false; 150262306a36Sopenharmony_ci 150362306a36Sopenharmony_ci while (likely(total_rx_packets < (unsigned int)budget)) { 150462306a36Sopenharmony_ci struct iavf_rx_buffer *rx_buffer; 150562306a36Sopenharmony_ci union iavf_rx_desc *rx_desc; 150662306a36Sopenharmony_ci unsigned int size; 150762306a36Sopenharmony_ci u16 vlan_tag = 0; 150862306a36Sopenharmony_ci u8 rx_ptype; 150962306a36Sopenharmony_ci u64 qword; 151062306a36Sopenharmony_ci 151162306a36Sopenharmony_ci /* return some buffers to hardware, one at a time is too slow */ 151262306a36Sopenharmony_ci if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { 151362306a36Sopenharmony_ci failure = failure || 151462306a36Sopenharmony_ci iavf_alloc_rx_buffers(rx_ring, cleaned_count); 151562306a36Sopenharmony_ci cleaned_count = 0; 151662306a36Sopenharmony_ci } 151762306a36Sopenharmony_ci 151862306a36Sopenharmony_ci rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 151962306a36Sopenharmony_ci 152062306a36Sopenharmony_ci /* status_error_len will always be zero for unused descriptors 152162306a36Sopenharmony_ci * because it's cleared in cleanup, and overlaps with hdr_addr 152262306a36Sopenharmony_ci * which is always zero because packet split isn't used, if the 152362306a36Sopenharmony_ci * hardware wrote DD then the length will be non-zero 152462306a36Sopenharmony_ci */ 152562306a36Sopenharmony_ci qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 152662306a36Sopenharmony_ci 152762306a36Sopenharmony_ci /* This memory barrier is needed to keep us from reading 152862306a36Sopenharmony_ci * any other fields out of the rx_desc until we have 152962306a36Sopenharmony_ci * verified the descriptor has been written back. 153062306a36Sopenharmony_ci */ 153162306a36Sopenharmony_ci dma_rmb(); 153262306a36Sopenharmony_ci#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT) 153362306a36Sopenharmony_ci if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) 153462306a36Sopenharmony_ci break; 153562306a36Sopenharmony_ci 153662306a36Sopenharmony_ci size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >> 153762306a36Sopenharmony_ci IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; 153862306a36Sopenharmony_ci 153962306a36Sopenharmony_ci iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); 154062306a36Sopenharmony_ci rx_buffer = iavf_get_rx_buffer(rx_ring, size); 154162306a36Sopenharmony_ci 154262306a36Sopenharmony_ci /* retrieve a buffer from the ring */ 154362306a36Sopenharmony_ci if (skb) 154462306a36Sopenharmony_ci iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); 154562306a36Sopenharmony_ci else if (ring_uses_build_skb(rx_ring)) 154662306a36Sopenharmony_ci skb = iavf_build_skb(rx_ring, rx_buffer, size); 154762306a36Sopenharmony_ci else 154862306a36Sopenharmony_ci skb = iavf_construct_skb(rx_ring, rx_buffer, size); 154962306a36Sopenharmony_ci 155062306a36Sopenharmony_ci /* exit if we failed to retrieve a buffer */ 155162306a36Sopenharmony_ci if (!skb) { 155262306a36Sopenharmony_ci rx_ring->rx_stats.alloc_buff_failed++; 155362306a36Sopenharmony_ci if (rx_buffer && size) 155462306a36Sopenharmony_ci rx_buffer->pagecnt_bias++; 155562306a36Sopenharmony_ci break; 155662306a36Sopenharmony_ci } 155762306a36Sopenharmony_ci 155862306a36Sopenharmony_ci iavf_put_rx_buffer(rx_ring, rx_buffer); 155962306a36Sopenharmony_ci cleaned_count++; 156062306a36Sopenharmony_ci 156162306a36Sopenharmony_ci if (iavf_is_non_eop(rx_ring, rx_desc, skb)) 156262306a36Sopenharmony_ci continue; 156362306a36Sopenharmony_ci 156462306a36Sopenharmony_ci /* ERR_MASK will only have valid bits if EOP set, and 156562306a36Sopenharmony_ci * what we are doing here is actually checking 156662306a36Sopenharmony_ci * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in 156762306a36Sopenharmony_ci * the error field 156862306a36Sopenharmony_ci */ 156962306a36Sopenharmony_ci if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { 157062306a36Sopenharmony_ci dev_kfree_skb_any(skb); 157162306a36Sopenharmony_ci skb = NULL; 157262306a36Sopenharmony_ci continue; 157362306a36Sopenharmony_ci } 157462306a36Sopenharmony_ci 157562306a36Sopenharmony_ci if (iavf_cleanup_headers(rx_ring, skb)) { 157662306a36Sopenharmony_ci skb = NULL; 157762306a36Sopenharmony_ci continue; 157862306a36Sopenharmony_ci } 157962306a36Sopenharmony_ci 158062306a36Sopenharmony_ci /* probably a little skewed due to removing CRC */ 158162306a36Sopenharmony_ci total_rx_bytes += skb->len; 158262306a36Sopenharmony_ci 158362306a36Sopenharmony_ci qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 158462306a36Sopenharmony_ci rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> 158562306a36Sopenharmony_ci IAVF_RXD_QW1_PTYPE_SHIFT; 158662306a36Sopenharmony_ci 158762306a36Sopenharmony_ci /* populate checksum, VLAN, and protocol */ 158862306a36Sopenharmony_ci iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 158962306a36Sopenharmony_ci 159062306a36Sopenharmony_ci if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && 159162306a36Sopenharmony_ci rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) 159262306a36Sopenharmony_ci vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); 159362306a36Sopenharmony_ci if (rx_desc->wb.qword2.ext_status & 159462306a36Sopenharmony_ci cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && 159562306a36Sopenharmony_ci rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) 159662306a36Sopenharmony_ci vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); 159762306a36Sopenharmony_ci 159862306a36Sopenharmony_ci iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); 159962306a36Sopenharmony_ci iavf_receive_skb(rx_ring, skb, vlan_tag); 160062306a36Sopenharmony_ci skb = NULL; 160162306a36Sopenharmony_ci 160262306a36Sopenharmony_ci /* update budget accounting */ 160362306a36Sopenharmony_ci total_rx_packets++; 160462306a36Sopenharmony_ci } 160562306a36Sopenharmony_ci 160662306a36Sopenharmony_ci rx_ring->skb = skb; 160762306a36Sopenharmony_ci 160862306a36Sopenharmony_ci u64_stats_update_begin(&rx_ring->syncp); 160962306a36Sopenharmony_ci rx_ring->stats.packets += total_rx_packets; 161062306a36Sopenharmony_ci rx_ring->stats.bytes += total_rx_bytes; 161162306a36Sopenharmony_ci u64_stats_update_end(&rx_ring->syncp); 161262306a36Sopenharmony_ci rx_ring->q_vector->rx.total_packets += total_rx_packets; 161362306a36Sopenharmony_ci rx_ring->q_vector->rx.total_bytes += total_rx_bytes; 161462306a36Sopenharmony_ci 161562306a36Sopenharmony_ci /* guarantee a trip back through this routine if there was a failure */ 161662306a36Sopenharmony_ci return failure ? budget : (int)total_rx_packets; 161762306a36Sopenharmony_ci} 161862306a36Sopenharmony_ci 161962306a36Sopenharmony_cistatic inline u32 iavf_buildreg_itr(const int type, u16 itr) 162062306a36Sopenharmony_ci{ 162162306a36Sopenharmony_ci u32 val; 162262306a36Sopenharmony_ci 162362306a36Sopenharmony_ci /* We don't bother with setting the CLEARPBA bit as the data sheet 162462306a36Sopenharmony_ci * points out doing so is "meaningless since it was already 162562306a36Sopenharmony_ci * auto-cleared". The auto-clearing happens when the interrupt is 162662306a36Sopenharmony_ci * asserted. 162762306a36Sopenharmony_ci * 162862306a36Sopenharmony_ci * Hardware errata 28 for also indicates that writing to a 162962306a36Sopenharmony_ci * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear 163062306a36Sopenharmony_ci * an event in the PBA anyway so we need to rely on the automask 163162306a36Sopenharmony_ci * to hold pending events for us until the interrupt is re-enabled 163262306a36Sopenharmony_ci * 163362306a36Sopenharmony_ci * The itr value is reported in microseconds, and the register 163462306a36Sopenharmony_ci * value is recorded in 2 microsecond units. For this reason we 163562306a36Sopenharmony_ci * only need to shift by the interval shift - 1 instead of the 163662306a36Sopenharmony_ci * full value. 163762306a36Sopenharmony_ci */ 163862306a36Sopenharmony_ci itr &= IAVF_ITR_MASK; 163962306a36Sopenharmony_ci 164062306a36Sopenharmony_ci val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 164162306a36Sopenharmony_ci (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | 164262306a36Sopenharmony_ci (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1)); 164362306a36Sopenharmony_ci 164462306a36Sopenharmony_ci return val; 164562306a36Sopenharmony_ci} 164662306a36Sopenharmony_ci 164762306a36Sopenharmony_ci/* a small macro to shorten up some long lines */ 164862306a36Sopenharmony_ci#define INTREG IAVF_VFINT_DYN_CTLN1 164962306a36Sopenharmony_ci 165062306a36Sopenharmony_ci/* The act of updating the ITR will cause it to immediately trigger. In order 165162306a36Sopenharmony_ci * to prevent this from throwing off adaptive update statistics we defer the 165262306a36Sopenharmony_ci * update so that it can only happen so often. So after either Tx or Rx are 165362306a36Sopenharmony_ci * updated we make the adaptive scheme wait until either the ITR completely 165462306a36Sopenharmony_ci * expires via the next_update expiration or we have been through at least 165562306a36Sopenharmony_ci * 3 interrupts. 165662306a36Sopenharmony_ci */ 165762306a36Sopenharmony_ci#define ITR_COUNTDOWN_START 3 165862306a36Sopenharmony_ci 165962306a36Sopenharmony_ci/** 166062306a36Sopenharmony_ci * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt 166162306a36Sopenharmony_ci * @vsi: the VSI we care about 166262306a36Sopenharmony_ci * @q_vector: q_vector for which itr is being updated and interrupt enabled 166362306a36Sopenharmony_ci * 166462306a36Sopenharmony_ci **/ 166562306a36Sopenharmony_cistatic inline void iavf_update_enable_itr(struct iavf_vsi *vsi, 166662306a36Sopenharmony_ci struct iavf_q_vector *q_vector) 166762306a36Sopenharmony_ci{ 166862306a36Sopenharmony_ci struct iavf_hw *hw = &vsi->back->hw; 166962306a36Sopenharmony_ci u32 intval; 167062306a36Sopenharmony_ci 167162306a36Sopenharmony_ci /* These will do nothing if dynamic updates are not enabled */ 167262306a36Sopenharmony_ci iavf_update_itr(q_vector, &q_vector->tx); 167362306a36Sopenharmony_ci iavf_update_itr(q_vector, &q_vector->rx); 167462306a36Sopenharmony_ci 167562306a36Sopenharmony_ci /* This block of logic allows us to get away with only updating 167662306a36Sopenharmony_ci * one ITR value with each interrupt. The idea is to perform a 167762306a36Sopenharmony_ci * pseudo-lazy update with the following criteria. 167862306a36Sopenharmony_ci * 167962306a36Sopenharmony_ci * 1. Rx is given higher priority than Tx if both are in same state 168062306a36Sopenharmony_ci * 2. If we must reduce an ITR that is given highest priority. 168162306a36Sopenharmony_ci * 3. We then give priority to increasing ITR based on amount. 168262306a36Sopenharmony_ci */ 168362306a36Sopenharmony_ci if (q_vector->rx.target_itr < q_vector->rx.current_itr) { 168462306a36Sopenharmony_ci /* Rx ITR needs to be reduced, this is highest priority */ 168562306a36Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_RX_ITR, 168662306a36Sopenharmony_ci q_vector->rx.target_itr); 168762306a36Sopenharmony_ci q_vector->rx.current_itr = q_vector->rx.target_itr; 168862306a36Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 168962306a36Sopenharmony_ci } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || 169062306a36Sopenharmony_ci ((q_vector->rx.target_itr - q_vector->rx.current_itr) < 169162306a36Sopenharmony_ci (q_vector->tx.target_itr - q_vector->tx.current_itr))) { 169262306a36Sopenharmony_ci /* Tx ITR needs to be reduced, this is second priority 169362306a36Sopenharmony_ci * Tx ITR needs to be increased more than Rx, fourth priority 169462306a36Sopenharmony_ci */ 169562306a36Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_TX_ITR, 169662306a36Sopenharmony_ci q_vector->tx.target_itr); 169762306a36Sopenharmony_ci q_vector->tx.current_itr = q_vector->tx.target_itr; 169862306a36Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 169962306a36Sopenharmony_ci } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { 170062306a36Sopenharmony_ci /* Rx ITR needs to be increased, third priority */ 170162306a36Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_RX_ITR, 170262306a36Sopenharmony_ci q_vector->rx.target_itr); 170362306a36Sopenharmony_ci q_vector->rx.current_itr = q_vector->rx.target_itr; 170462306a36Sopenharmony_ci q_vector->itr_countdown = ITR_COUNTDOWN_START; 170562306a36Sopenharmony_ci } else { 170662306a36Sopenharmony_ci /* No ITR update, lowest priority */ 170762306a36Sopenharmony_ci intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0); 170862306a36Sopenharmony_ci if (q_vector->itr_countdown) 170962306a36Sopenharmony_ci q_vector->itr_countdown--; 171062306a36Sopenharmony_ci } 171162306a36Sopenharmony_ci 171262306a36Sopenharmony_ci if (!test_bit(__IAVF_VSI_DOWN, vsi->state)) 171362306a36Sopenharmony_ci wr32(hw, INTREG(q_vector->reg_idx), intval); 171462306a36Sopenharmony_ci} 171562306a36Sopenharmony_ci 171662306a36Sopenharmony_ci/** 171762306a36Sopenharmony_ci * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine 171862306a36Sopenharmony_ci * @napi: napi struct with our devices info in it 171962306a36Sopenharmony_ci * @budget: amount of work driver is allowed to do this pass, in packets 172062306a36Sopenharmony_ci * 172162306a36Sopenharmony_ci * This function will clean all queues associated with a q_vector. 172262306a36Sopenharmony_ci * 172362306a36Sopenharmony_ci * Returns the amount of work done 172462306a36Sopenharmony_ci **/ 172562306a36Sopenharmony_ciint iavf_napi_poll(struct napi_struct *napi, int budget) 172662306a36Sopenharmony_ci{ 172762306a36Sopenharmony_ci struct iavf_q_vector *q_vector = 172862306a36Sopenharmony_ci container_of(napi, struct iavf_q_vector, napi); 172962306a36Sopenharmony_ci struct iavf_vsi *vsi = q_vector->vsi; 173062306a36Sopenharmony_ci struct iavf_ring *ring; 173162306a36Sopenharmony_ci bool clean_complete = true; 173262306a36Sopenharmony_ci bool arm_wb = false; 173362306a36Sopenharmony_ci int budget_per_ring; 173462306a36Sopenharmony_ci int work_done = 0; 173562306a36Sopenharmony_ci 173662306a36Sopenharmony_ci if (test_bit(__IAVF_VSI_DOWN, vsi->state)) { 173762306a36Sopenharmony_ci napi_complete(napi); 173862306a36Sopenharmony_ci return 0; 173962306a36Sopenharmony_ci } 174062306a36Sopenharmony_ci 174162306a36Sopenharmony_ci /* Since the actual Tx work is minimal, we can give the Tx a larger 174262306a36Sopenharmony_ci * budget and be more aggressive about cleaning up the Tx descriptors. 174362306a36Sopenharmony_ci */ 174462306a36Sopenharmony_ci iavf_for_each_ring(ring, q_vector->tx) { 174562306a36Sopenharmony_ci if (!iavf_clean_tx_irq(vsi, ring, budget)) { 174662306a36Sopenharmony_ci clean_complete = false; 174762306a36Sopenharmony_ci continue; 174862306a36Sopenharmony_ci } 174962306a36Sopenharmony_ci arm_wb |= ring->arm_wb; 175062306a36Sopenharmony_ci ring->arm_wb = false; 175162306a36Sopenharmony_ci } 175262306a36Sopenharmony_ci 175362306a36Sopenharmony_ci /* Handle case where we are called by netpoll with a budget of 0 */ 175462306a36Sopenharmony_ci if (budget <= 0) 175562306a36Sopenharmony_ci goto tx_only; 175662306a36Sopenharmony_ci 175762306a36Sopenharmony_ci /* We attempt to distribute budget to each Rx queue fairly, but don't 175862306a36Sopenharmony_ci * allow the budget to go below 1 because that would exit polling early. 175962306a36Sopenharmony_ci */ 176062306a36Sopenharmony_ci budget_per_ring = max(budget/q_vector->num_ringpairs, 1); 176162306a36Sopenharmony_ci 176262306a36Sopenharmony_ci iavf_for_each_ring(ring, q_vector->rx) { 176362306a36Sopenharmony_ci int cleaned = iavf_clean_rx_irq(ring, budget_per_ring); 176462306a36Sopenharmony_ci 176562306a36Sopenharmony_ci work_done += cleaned; 176662306a36Sopenharmony_ci /* if we clean as many as budgeted, we must not be done */ 176762306a36Sopenharmony_ci if (cleaned >= budget_per_ring) 176862306a36Sopenharmony_ci clean_complete = false; 176962306a36Sopenharmony_ci } 177062306a36Sopenharmony_ci 177162306a36Sopenharmony_ci /* If work not completed, return budget and polling will return */ 177262306a36Sopenharmony_ci if (!clean_complete) { 177362306a36Sopenharmony_ci int cpu_id = smp_processor_id(); 177462306a36Sopenharmony_ci 177562306a36Sopenharmony_ci /* It is possible that the interrupt affinity has changed but, 177662306a36Sopenharmony_ci * if the cpu is pegged at 100%, polling will never exit while 177762306a36Sopenharmony_ci * traffic continues and the interrupt will be stuck on this 177862306a36Sopenharmony_ci * cpu. We check to make sure affinity is correct before we 177962306a36Sopenharmony_ci * continue to poll, otherwise we must stop polling so the 178062306a36Sopenharmony_ci * interrupt can move to the correct cpu. 178162306a36Sopenharmony_ci */ 178262306a36Sopenharmony_ci if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { 178362306a36Sopenharmony_ci /* Tell napi that we are done polling */ 178462306a36Sopenharmony_ci napi_complete_done(napi, work_done); 178562306a36Sopenharmony_ci 178662306a36Sopenharmony_ci /* Force an interrupt */ 178762306a36Sopenharmony_ci iavf_force_wb(vsi, q_vector); 178862306a36Sopenharmony_ci 178962306a36Sopenharmony_ci /* Return budget-1 so that polling stops */ 179062306a36Sopenharmony_ci return budget - 1; 179162306a36Sopenharmony_ci } 179262306a36Sopenharmony_citx_only: 179362306a36Sopenharmony_ci if (arm_wb) { 179462306a36Sopenharmony_ci q_vector->tx.ring[0].tx_stats.tx_force_wb++; 179562306a36Sopenharmony_ci iavf_enable_wb_on_itr(vsi, q_vector); 179662306a36Sopenharmony_ci } 179762306a36Sopenharmony_ci return budget; 179862306a36Sopenharmony_ci } 179962306a36Sopenharmony_ci 180062306a36Sopenharmony_ci if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) 180162306a36Sopenharmony_ci q_vector->arm_wb_state = false; 180262306a36Sopenharmony_ci 180362306a36Sopenharmony_ci /* Exit the polling mode, but don't re-enable interrupts if stack might 180462306a36Sopenharmony_ci * poll us due to busy-polling 180562306a36Sopenharmony_ci */ 180662306a36Sopenharmony_ci if (likely(napi_complete_done(napi, work_done))) 180762306a36Sopenharmony_ci iavf_update_enable_itr(vsi, q_vector); 180862306a36Sopenharmony_ci 180962306a36Sopenharmony_ci return min_t(int, work_done, budget - 1); 181062306a36Sopenharmony_ci} 181162306a36Sopenharmony_ci 181262306a36Sopenharmony_ci/** 181362306a36Sopenharmony_ci * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW 181462306a36Sopenharmony_ci * @skb: send buffer 181562306a36Sopenharmony_ci * @tx_ring: ring to send buffer on 181662306a36Sopenharmony_ci * @flags: the tx flags to be set 181762306a36Sopenharmony_ci * 181862306a36Sopenharmony_ci * Checks the skb and set up correspondingly several generic transmit flags 181962306a36Sopenharmony_ci * related to VLAN tagging for the HW, such as VLAN, DCB, etc. 182062306a36Sopenharmony_ci * 182162306a36Sopenharmony_ci * Returns error code indicate the frame should be dropped upon error and the 182262306a36Sopenharmony_ci * otherwise returns 0 to indicate the flags has been set properly. 182362306a36Sopenharmony_ci **/ 182462306a36Sopenharmony_cistatic void iavf_tx_prepare_vlan_flags(struct sk_buff *skb, 182562306a36Sopenharmony_ci struct iavf_ring *tx_ring, u32 *flags) 182662306a36Sopenharmony_ci{ 182762306a36Sopenharmony_ci u32 tx_flags = 0; 182862306a36Sopenharmony_ci 182962306a36Sopenharmony_ci 183062306a36Sopenharmony_ci /* stack will only request hardware VLAN insertion offload for protocols 183162306a36Sopenharmony_ci * that the driver supports and has enabled 183262306a36Sopenharmony_ci */ 183362306a36Sopenharmony_ci if (!skb_vlan_tag_present(skb)) 183462306a36Sopenharmony_ci return; 183562306a36Sopenharmony_ci 183662306a36Sopenharmony_ci tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT; 183762306a36Sopenharmony_ci if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) { 183862306a36Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN; 183962306a36Sopenharmony_ci } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { 184062306a36Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_HW_VLAN; 184162306a36Sopenharmony_ci } else { 184262306a36Sopenharmony_ci dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n"); 184362306a36Sopenharmony_ci return; 184462306a36Sopenharmony_ci } 184562306a36Sopenharmony_ci 184662306a36Sopenharmony_ci *flags = tx_flags; 184762306a36Sopenharmony_ci} 184862306a36Sopenharmony_ci 184962306a36Sopenharmony_ci/** 185062306a36Sopenharmony_ci * iavf_tso - set up the tso context descriptor 185162306a36Sopenharmony_ci * @first: pointer to first Tx buffer for xmit 185262306a36Sopenharmony_ci * @hdr_len: ptr to the size of the packet header 185362306a36Sopenharmony_ci * @cd_type_cmd_tso_mss: Quad Word 1 185462306a36Sopenharmony_ci * 185562306a36Sopenharmony_ci * Returns 0 if no TSO can happen, 1 if tso is going, or error 185662306a36Sopenharmony_ci **/ 185762306a36Sopenharmony_cistatic int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len, 185862306a36Sopenharmony_ci u64 *cd_type_cmd_tso_mss) 185962306a36Sopenharmony_ci{ 186062306a36Sopenharmony_ci struct sk_buff *skb = first->skb; 186162306a36Sopenharmony_ci u64 cd_cmd, cd_tso_len, cd_mss; 186262306a36Sopenharmony_ci union { 186362306a36Sopenharmony_ci struct iphdr *v4; 186462306a36Sopenharmony_ci struct ipv6hdr *v6; 186562306a36Sopenharmony_ci unsigned char *hdr; 186662306a36Sopenharmony_ci } ip; 186762306a36Sopenharmony_ci union { 186862306a36Sopenharmony_ci struct tcphdr *tcp; 186962306a36Sopenharmony_ci struct udphdr *udp; 187062306a36Sopenharmony_ci unsigned char *hdr; 187162306a36Sopenharmony_ci } l4; 187262306a36Sopenharmony_ci u32 paylen, l4_offset; 187362306a36Sopenharmony_ci u16 gso_segs, gso_size; 187462306a36Sopenharmony_ci int err; 187562306a36Sopenharmony_ci 187662306a36Sopenharmony_ci if (skb->ip_summed != CHECKSUM_PARTIAL) 187762306a36Sopenharmony_ci return 0; 187862306a36Sopenharmony_ci 187962306a36Sopenharmony_ci if (!skb_is_gso(skb)) 188062306a36Sopenharmony_ci return 0; 188162306a36Sopenharmony_ci 188262306a36Sopenharmony_ci err = skb_cow_head(skb, 0); 188362306a36Sopenharmony_ci if (err < 0) 188462306a36Sopenharmony_ci return err; 188562306a36Sopenharmony_ci 188662306a36Sopenharmony_ci ip.hdr = skb_network_header(skb); 188762306a36Sopenharmony_ci l4.hdr = skb_transport_header(skb); 188862306a36Sopenharmony_ci 188962306a36Sopenharmony_ci /* initialize outer IP header fields */ 189062306a36Sopenharmony_ci if (ip.v4->version == 4) { 189162306a36Sopenharmony_ci ip.v4->tot_len = 0; 189262306a36Sopenharmony_ci ip.v4->check = 0; 189362306a36Sopenharmony_ci } else { 189462306a36Sopenharmony_ci ip.v6->payload_len = 0; 189562306a36Sopenharmony_ci } 189662306a36Sopenharmony_ci 189762306a36Sopenharmony_ci if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | 189862306a36Sopenharmony_ci SKB_GSO_GRE_CSUM | 189962306a36Sopenharmony_ci SKB_GSO_IPXIP4 | 190062306a36Sopenharmony_ci SKB_GSO_IPXIP6 | 190162306a36Sopenharmony_ci SKB_GSO_UDP_TUNNEL | 190262306a36Sopenharmony_ci SKB_GSO_UDP_TUNNEL_CSUM)) { 190362306a36Sopenharmony_ci if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 190462306a36Sopenharmony_ci (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { 190562306a36Sopenharmony_ci l4.udp->len = 0; 190662306a36Sopenharmony_ci 190762306a36Sopenharmony_ci /* determine offset of outer transport header */ 190862306a36Sopenharmony_ci l4_offset = l4.hdr - skb->data; 190962306a36Sopenharmony_ci 191062306a36Sopenharmony_ci /* remove payload length from outer checksum */ 191162306a36Sopenharmony_ci paylen = skb->len - l4_offset; 191262306a36Sopenharmony_ci csum_replace_by_diff(&l4.udp->check, 191362306a36Sopenharmony_ci (__force __wsum)htonl(paylen)); 191462306a36Sopenharmony_ci } 191562306a36Sopenharmony_ci 191662306a36Sopenharmony_ci /* reset pointers to inner headers */ 191762306a36Sopenharmony_ci ip.hdr = skb_inner_network_header(skb); 191862306a36Sopenharmony_ci l4.hdr = skb_inner_transport_header(skb); 191962306a36Sopenharmony_ci 192062306a36Sopenharmony_ci /* initialize inner IP header fields */ 192162306a36Sopenharmony_ci if (ip.v4->version == 4) { 192262306a36Sopenharmony_ci ip.v4->tot_len = 0; 192362306a36Sopenharmony_ci ip.v4->check = 0; 192462306a36Sopenharmony_ci } else { 192562306a36Sopenharmony_ci ip.v6->payload_len = 0; 192662306a36Sopenharmony_ci } 192762306a36Sopenharmony_ci } 192862306a36Sopenharmony_ci 192962306a36Sopenharmony_ci /* determine offset of inner transport header */ 193062306a36Sopenharmony_ci l4_offset = l4.hdr - skb->data; 193162306a36Sopenharmony_ci /* remove payload length from inner checksum */ 193262306a36Sopenharmony_ci paylen = skb->len - l4_offset; 193362306a36Sopenharmony_ci 193462306a36Sopenharmony_ci if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 193562306a36Sopenharmony_ci csum_replace_by_diff(&l4.udp->check, 193662306a36Sopenharmony_ci (__force __wsum)htonl(paylen)); 193762306a36Sopenharmony_ci /* compute length of UDP segmentation header */ 193862306a36Sopenharmony_ci *hdr_len = (u8)sizeof(l4.udp) + l4_offset; 193962306a36Sopenharmony_ci } else { 194062306a36Sopenharmony_ci csum_replace_by_diff(&l4.tcp->check, 194162306a36Sopenharmony_ci (__force __wsum)htonl(paylen)); 194262306a36Sopenharmony_ci /* compute length of TCP segmentation header */ 194362306a36Sopenharmony_ci *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset); 194462306a36Sopenharmony_ci } 194562306a36Sopenharmony_ci 194662306a36Sopenharmony_ci /* pull values out of skb_shinfo */ 194762306a36Sopenharmony_ci gso_size = skb_shinfo(skb)->gso_size; 194862306a36Sopenharmony_ci gso_segs = skb_shinfo(skb)->gso_segs; 194962306a36Sopenharmony_ci 195062306a36Sopenharmony_ci /* update GSO size and bytecount with header size */ 195162306a36Sopenharmony_ci first->gso_segs = gso_segs; 195262306a36Sopenharmony_ci first->bytecount += (first->gso_segs - 1) * *hdr_len; 195362306a36Sopenharmony_ci 195462306a36Sopenharmony_ci /* find the field values */ 195562306a36Sopenharmony_ci cd_cmd = IAVF_TX_CTX_DESC_TSO; 195662306a36Sopenharmony_ci cd_tso_len = skb->len - *hdr_len; 195762306a36Sopenharmony_ci cd_mss = gso_size; 195862306a36Sopenharmony_ci *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) | 195962306a36Sopenharmony_ci (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) | 196062306a36Sopenharmony_ci (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT); 196162306a36Sopenharmony_ci return 1; 196262306a36Sopenharmony_ci} 196362306a36Sopenharmony_ci 196462306a36Sopenharmony_ci/** 196562306a36Sopenharmony_ci * iavf_tx_enable_csum - Enable Tx checksum offloads 196662306a36Sopenharmony_ci * @skb: send buffer 196762306a36Sopenharmony_ci * @tx_flags: pointer to Tx flags currently set 196862306a36Sopenharmony_ci * @td_cmd: Tx descriptor command bits to set 196962306a36Sopenharmony_ci * @td_offset: Tx descriptor header offsets to set 197062306a36Sopenharmony_ci * @tx_ring: Tx descriptor ring 197162306a36Sopenharmony_ci * @cd_tunneling: ptr to context desc bits 197262306a36Sopenharmony_ci **/ 197362306a36Sopenharmony_cistatic int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, 197462306a36Sopenharmony_ci u32 *td_cmd, u32 *td_offset, 197562306a36Sopenharmony_ci struct iavf_ring *tx_ring, 197662306a36Sopenharmony_ci u32 *cd_tunneling) 197762306a36Sopenharmony_ci{ 197862306a36Sopenharmony_ci union { 197962306a36Sopenharmony_ci struct iphdr *v4; 198062306a36Sopenharmony_ci struct ipv6hdr *v6; 198162306a36Sopenharmony_ci unsigned char *hdr; 198262306a36Sopenharmony_ci } ip; 198362306a36Sopenharmony_ci union { 198462306a36Sopenharmony_ci struct tcphdr *tcp; 198562306a36Sopenharmony_ci struct udphdr *udp; 198662306a36Sopenharmony_ci unsigned char *hdr; 198762306a36Sopenharmony_ci } l4; 198862306a36Sopenharmony_ci unsigned char *exthdr; 198962306a36Sopenharmony_ci u32 offset, cmd = 0; 199062306a36Sopenharmony_ci __be16 frag_off; 199162306a36Sopenharmony_ci u8 l4_proto = 0; 199262306a36Sopenharmony_ci 199362306a36Sopenharmony_ci if (skb->ip_summed != CHECKSUM_PARTIAL) 199462306a36Sopenharmony_ci return 0; 199562306a36Sopenharmony_ci 199662306a36Sopenharmony_ci ip.hdr = skb_network_header(skb); 199762306a36Sopenharmony_ci l4.hdr = skb_transport_header(skb); 199862306a36Sopenharmony_ci 199962306a36Sopenharmony_ci /* compute outer L2 header size */ 200062306a36Sopenharmony_ci offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; 200162306a36Sopenharmony_ci 200262306a36Sopenharmony_ci if (skb->encapsulation) { 200362306a36Sopenharmony_ci u32 tunnel = 0; 200462306a36Sopenharmony_ci /* define outer network header type */ 200562306a36Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_IPV4) { 200662306a36Sopenharmony_ci tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? 200762306a36Sopenharmony_ci IAVF_TX_CTX_EXT_IP_IPV4 : 200862306a36Sopenharmony_ci IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM; 200962306a36Sopenharmony_ci 201062306a36Sopenharmony_ci l4_proto = ip.v4->protocol; 201162306a36Sopenharmony_ci } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { 201262306a36Sopenharmony_ci tunnel |= IAVF_TX_CTX_EXT_IP_IPV6; 201362306a36Sopenharmony_ci 201462306a36Sopenharmony_ci exthdr = ip.hdr + sizeof(*ip.v6); 201562306a36Sopenharmony_ci l4_proto = ip.v6->nexthdr; 201662306a36Sopenharmony_ci if (l4.hdr != exthdr) 201762306a36Sopenharmony_ci ipv6_skip_exthdr(skb, exthdr - skb->data, 201862306a36Sopenharmony_ci &l4_proto, &frag_off); 201962306a36Sopenharmony_ci } 202062306a36Sopenharmony_ci 202162306a36Sopenharmony_ci /* define outer transport */ 202262306a36Sopenharmony_ci switch (l4_proto) { 202362306a36Sopenharmony_ci case IPPROTO_UDP: 202462306a36Sopenharmony_ci tunnel |= IAVF_TXD_CTX_UDP_TUNNELING; 202562306a36Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 202662306a36Sopenharmony_ci break; 202762306a36Sopenharmony_ci case IPPROTO_GRE: 202862306a36Sopenharmony_ci tunnel |= IAVF_TXD_CTX_GRE_TUNNELING; 202962306a36Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 203062306a36Sopenharmony_ci break; 203162306a36Sopenharmony_ci case IPPROTO_IPIP: 203262306a36Sopenharmony_ci case IPPROTO_IPV6: 203362306a36Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL; 203462306a36Sopenharmony_ci l4.hdr = skb_inner_network_header(skb); 203562306a36Sopenharmony_ci break; 203662306a36Sopenharmony_ci default: 203762306a36Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_TSO) 203862306a36Sopenharmony_ci return -1; 203962306a36Sopenharmony_ci 204062306a36Sopenharmony_ci skb_checksum_help(skb); 204162306a36Sopenharmony_ci return 0; 204262306a36Sopenharmony_ci } 204362306a36Sopenharmony_ci 204462306a36Sopenharmony_ci /* compute outer L3 header size */ 204562306a36Sopenharmony_ci tunnel |= ((l4.hdr - ip.hdr) / 4) << 204662306a36Sopenharmony_ci IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT; 204762306a36Sopenharmony_ci 204862306a36Sopenharmony_ci /* switch IP header pointer from outer to inner header */ 204962306a36Sopenharmony_ci ip.hdr = skb_inner_network_header(skb); 205062306a36Sopenharmony_ci 205162306a36Sopenharmony_ci /* compute tunnel header size */ 205262306a36Sopenharmony_ci tunnel |= ((ip.hdr - l4.hdr) / 2) << 205362306a36Sopenharmony_ci IAVF_TXD_CTX_QW0_NATLEN_SHIFT; 205462306a36Sopenharmony_ci 205562306a36Sopenharmony_ci /* indicate if we need to offload outer UDP header */ 205662306a36Sopenharmony_ci if ((*tx_flags & IAVF_TX_FLAGS_TSO) && 205762306a36Sopenharmony_ci !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && 205862306a36Sopenharmony_ci (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) 205962306a36Sopenharmony_ci tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK; 206062306a36Sopenharmony_ci 206162306a36Sopenharmony_ci /* record tunnel offload values */ 206262306a36Sopenharmony_ci *cd_tunneling |= tunnel; 206362306a36Sopenharmony_ci 206462306a36Sopenharmony_ci /* switch L4 header pointer from outer to inner */ 206562306a36Sopenharmony_ci l4.hdr = skb_inner_transport_header(skb); 206662306a36Sopenharmony_ci l4_proto = 0; 206762306a36Sopenharmony_ci 206862306a36Sopenharmony_ci /* reset type as we transition from outer to inner headers */ 206962306a36Sopenharmony_ci *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6); 207062306a36Sopenharmony_ci if (ip.v4->version == 4) 207162306a36Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_IPV4; 207262306a36Sopenharmony_ci if (ip.v6->version == 6) 207362306a36Sopenharmony_ci *tx_flags |= IAVF_TX_FLAGS_IPV6; 207462306a36Sopenharmony_ci } 207562306a36Sopenharmony_ci 207662306a36Sopenharmony_ci /* Enable IP checksum offloads */ 207762306a36Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_IPV4) { 207862306a36Sopenharmony_ci l4_proto = ip.v4->protocol; 207962306a36Sopenharmony_ci /* the stack computes the IP header already, the only time we 208062306a36Sopenharmony_ci * need the hardware to recompute it is in the case of TSO. 208162306a36Sopenharmony_ci */ 208262306a36Sopenharmony_ci cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ? 208362306a36Sopenharmony_ci IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : 208462306a36Sopenharmony_ci IAVF_TX_DESC_CMD_IIPT_IPV4; 208562306a36Sopenharmony_ci } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) { 208662306a36Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; 208762306a36Sopenharmony_ci 208862306a36Sopenharmony_ci exthdr = ip.hdr + sizeof(*ip.v6); 208962306a36Sopenharmony_ci l4_proto = ip.v6->nexthdr; 209062306a36Sopenharmony_ci if (l4.hdr != exthdr) 209162306a36Sopenharmony_ci ipv6_skip_exthdr(skb, exthdr - skb->data, 209262306a36Sopenharmony_ci &l4_proto, &frag_off); 209362306a36Sopenharmony_ci } 209462306a36Sopenharmony_ci 209562306a36Sopenharmony_ci /* compute inner L3 header size */ 209662306a36Sopenharmony_ci offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 209762306a36Sopenharmony_ci 209862306a36Sopenharmony_ci /* Enable L4 checksum offloads */ 209962306a36Sopenharmony_ci switch (l4_proto) { 210062306a36Sopenharmony_ci case IPPROTO_TCP: 210162306a36Sopenharmony_ci /* enable checksum offloads */ 210262306a36Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; 210362306a36Sopenharmony_ci offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 210462306a36Sopenharmony_ci break; 210562306a36Sopenharmony_ci case IPPROTO_SCTP: 210662306a36Sopenharmony_ci /* enable SCTP checksum offload */ 210762306a36Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; 210862306a36Sopenharmony_ci offset |= (sizeof(struct sctphdr) >> 2) << 210962306a36Sopenharmony_ci IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 211062306a36Sopenharmony_ci break; 211162306a36Sopenharmony_ci case IPPROTO_UDP: 211262306a36Sopenharmony_ci /* enable UDP checksum offload */ 211362306a36Sopenharmony_ci cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; 211462306a36Sopenharmony_ci offset |= (sizeof(struct udphdr) >> 2) << 211562306a36Sopenharmony_ci IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 211662306a36Sopenharmony_ci break; 211762306a36Sopenharmony_ci default: 211862306a36Sopenharmony_ci if (*tx_flags & IAVF_TX_FLAGS_TSO) 211962306a36Sopenharmony_ci return -1; 212062306a36Sopenharmony_ci skb_checksum_help(skb); 212162306a36Sopenharmony_ci return 0; 212262306a36Sopenharmony_ci } 212362306a36Sopenharmony_ci 212462306a36Sopenharmony_ci *td_cmd |= cmd; 212562306a36Sopenharmony_ci *td_offset |= offset; 212662306a36Sopenharmony_ci 212762306a36Sopenharmony_ci return 1; 212862306a36Sopenharmony_ci} 212962306a36Sopenharmony_ci 213062306a36Sopenharmony_ci/** 213162306a36Sopenharmony_ci * iavf_create_tx_ctx - Build the Tx context descriptor 213262306a36Sopenharmony_ci * @tx_ring: ring to create the descriptor on 213362306a36Sopenharmony_ci * @cd_type_cmd_tso_mss: Quad Word 1 213462306a36Sopenharmony_ci * @cd_tunneling: Quad Word 0 - bits 0-31 213562306a36Sopenharmony_ci * @cd_l2tag2: Quad Word 0 - bits 32-63 213662306a36Sopenharmony_ci **/ 213762306a36Sopenharmony_cistatic void iavf_create_tx_ctx(struct iavf_ring *tx_ring, 213862306a36Sopenharmony_ci const u64 cd_type_cmd_tso_mss, 213962306a36Sopenharmony_ci const u32 cd_tunneling, const u32 cd_l2tag2) 214062306a36Sopenharmony_ci{ 214162306a36Sopenharmony_ci struct iavf_tx_context_desc *context_desc; 214262306a36Sopenharmony_ci int i = tx_ring->next_to_use; 214362306a36Sopenharmony_ci 214462306a36Sopenharmony_ci if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) && 214562306a36Sopenharmony_ci !cd_tunneling && !cd_l2tag2) 214662306a36Sopenharmony_ci return; 214762306a36Sopenharmony_ci 214862306a36Sopenharmony_ci /* grab the next descriptor */ 214962306a36Sopenharmony_ci context_desc = IAVF_TX_CTXTDESC(tx_ring, i); 215062306a36Sopenharmony_ci 215162306a36Sopenharmony_ci i++; 215262306a36Sopenharmony_ci tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 215362306a36Sopenharmony_ci 215462306a36Sopenharmony_ci /* cpu_to_le32 and assign to struct fields */ 215562306a36Sopenharmony_ci context_desc->tunneling_params = cpu_to_le32(cd_tunneling); 215662306a36Sopenharmony_ci context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); 215762306a36Sopenharmony_ci context_desc->rsvd = cpu_to_le16(0); 215862306a36Sopenharmony_ci context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 215962306a36Sopenharmony_ci} 216062306a36Sopenharmony_ci 216162306a36Sopenharmony_ci/** 216262306a36Sopenharmony_ci * __iavf_chk_linearize - Check if there are more than 8 buffers per packet 216362306a36Sopenharmony_ci * @skb: send buffer 216462306a36Sopenharmony_ci * 216562306a36Sopenharmony_ci * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire 216662306a36Sopenharmony_ci * and so we need to figure out the cases where we need to linearize the skb. 216762306a36Sopenharmony_ci * 216862306a36Sopenharmony_ci * For TSO we need to count the TSO header and segment payload separately. 216962306a36Sopenharmony_ci * As such we need to check cases where we have 7 fragments or more as we 217062306a36Sopenharmony_ci * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for 217162306a36Sopenharmony_ci * the segment payload in the first descriptor, and another 7 for the 217262306a36Sopenharmony_ci * fragments. 217362306a36Sopenharmony_ci **/ 217462306a36Sopenharmony_cibool __iavf_chk_linearize(struct sk_buff *skb) 217562306a36Sopenharmony_ci{ 217662306a36Sopenharmony_ci const skb_frag_t *frag, *stale; 217762306a36Sopenharmony_ci int nr_frags, sum; 217862306a36Sopenharmony_ci 217962306a36Sopenharmony_ci /* no need to check if number of frags is less than 7 */ 218062306a36Sopenharmony_ci nr_frags = skb_shinfo(skb)->nr_frags; 218162306a36Sopenharmony_ci if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1)) 218262306a36Sopenharmony_ci return false; 218362306a36Sopenharmony_ci 218462306a36Sopenharmony_ci /* We need to walk through the list and validate that each group 218562306a36Sopenharmony_ci * of 6 fragments totals at least gso_size. 218662306a36Sopenharmony_ci */ 218762306a36Sopenharmony_ci nr_frags -= IAVF_MAX_BUFFER_TXD - 2; 218862306a36Sopenharmony_ci frag = &skb_shinfo(skb)->frags[0]; 218962306a36Sopenharmony_ci 219062306a36Sopenharmony_ci /* Initialize size to the negative value of gso_size minus 1. We 219162306a36Sopenharmony_ci * use this as the worst case scenerio in which the frag ahead 219262306a36Sopenharmony_ci * of us only provides one byte which is why we are limited to 6 219362306a36Sopenharmony_ci * descriptors for a single transmit as the header and previous 219462306a36Sopenharmony_ci * fragment are already consuming 2 descriptors. 219562306a36Sopenharmony_ci */ 219662306a36Sopenharmony_ci sum = 1 - skb_shinfo(skb)->gso_size; 219762306a36Sopenharmony_ci 219862306a36Sopenharmony_ci /* Add size of frags 0 through 4 to create our initial sum */ 219962306a36Sopenharmony_ci sum += skb_frag_size(frag++); 220062306a36Sopenharmony_ci sum += skb_frag_size(frag++); 220162306a36Sopenharmony_ci sum += skb_frag_size(frag++); 220262306a36Sopenharmony_ci sum += skb_frag_size(frag++); 220362306a36Sopenharmony_ci sum += skb_frag_size(frag++); 220462306a36Sopenharmony_ci 220562306a36Sopenharmony_ci /* Walk through fragments adding latest fragment, testing it, and 220662306a36Sopenharmony_ci * then removing stale fragments from the sum. 220762306a36Sopenharmony_ci */ 220862306a36Sopenharmony_ci for (stale = &skb_shinfo(skb)->frags[0];; stale++) { 220962306a36Sopenharmony_ci int stale_size = skb_frag_size(stale); 221062306a36Sopenharmony_ci 221162306a36Sopenharmony_ci sum += skb_frag_size(frag++); 221262306a36Sopenharmony_ci 221362306a36Sopenharmony_ci /* The stale fragment may present us with a smaller 221462306a36Sopenharmony_ci * descriptor than the actual fragment size. To account 221562306a36Sopenharmony_ci * for that we need to remove all the data on the front and 221662306a36Sopenharmony_ci * figure out what the remainder would be in the last 221762306a36Sopenharmony_ci * descriptor associated with the fragment. 221862306a36Sopenharmony_ci */ 221962306a36Sopenharmony_ci if (stale_size > IAVF_MAX_DATA_PER_TXD) { 222062306a36Sopenharmony_ci int align_pad = -(skb_frag_off(stale)) & 222162306a36Sopenharmony_ci (IAVF_MAX_READ_REQ_SIZE - 1); 222262306a36Sopenharmony_ci 222362306a36Sopenharmony_ci sum -= align_pad; 222462306a36Sopenharmony_ci stale_size -= align_pad; 222562306a36Sopenharmony_ci 222662306a36Sopenharmony_ci do { 222762306a36Sopenharmony_ci sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED; 222862306a36Sopenharmony_ci stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED; 222962306a36Sopenharmony_ci } while (stale_size > IAVF_MAX_DATA_PER_TXD); 223062306a36Sopenharmony_ci } 223162306a36Sopenharmony_ci 223262306a36Sopenharmony_ci /* if sum is negative we failed to make sufficient progress */ 223362306a36Sopenharmony_ci if (sum < 0) 223462306a36Sopenharmony_ci return true; 223562306a36Sopenharmony_ci 223662306a36Sopenharmony_ci if (!nr_frags--) 223762306a36Sopenharmony_ci break; 223862306a36Sopenharmony_ci 223962306a36Sopenharmony_ci sum -= stale_size; 224062306a36Sopenharmony_ci } 224162306a36Sopenharmony_ci 224262306a36Sopenharmony_ci return false; 224362306a36Sopenharmony_ci} 224462306a36Sopenharmony_ci 224562306a36Sopenharmony_ci/** 224662306a36Sopenharmony_ci * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions 224762306a36Sopenharmony_ci * @tx_ring: the ring to be checked 224862306a36Sopenharmony_ci * @size: the size buffer we want to assure is available 224962306a36Sopenharmony_ci * 225062306a36Sopenharmony_ci * Returns -EBUSY if a stop is needed, else 0 225162306a36Sopenharmony_ci **/ 225262306a36Sopenharmony_ciint __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size) 225362306a36Sopenharmony_ci{ 225462306a36Sopenharmony_ci netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 225562306a36Sopenharmony_ci /* Memory barrier before checking head and tail */ 225662306a36Sopenharmony_ci smp_mb(); 225762306a36Sopenharmony_ci 225862306a36Sopenharmony_ci /* Check again in a case another CPU has just made room available. */ 225962306a36Sopenharmony_ci if (likely(IAVF_DESC_UNUSED(tx_ring) < size)) 226062306a36Sopenharmony_ci return -EBUSY; 226162306a36Sopenharmony_ci 226262306a36Sopenharmony_ci /* A reprieve! - use start_queue because it doesn't call schedule */ 226362306a36Sopenharmony_ci netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 226462306a36Sopenharmony_ci ++tx_ring->tx_stats.restart_queue; 226562306a36Sopenharmony_ci return 0; 226662306a36Sopenharmony_ci} 226762306a36Sopenharmony_ci 226862306a36Sopenharmony_ci/** 226962306a36Sopenharmony_ci * iavf_tx_map - Build the Tx descriptor 227062306a36Sopenharmony_ci * @tx_ring: ring to send buffer on 227162306a36Sopenharmony_ci * @skb: send buffer 227262306a36Sopenharmony_ci * @first: first buffer info buffer to use 227362306a36Sopenharmony_ci * @tx_flags: collected send information 227462306a36Sopenharmony_ci * @hdr_len: size of the packet header 227562306a36Sopenharmony_ci * @td_cmd: the command field in the descriptor 227662306a36Sopenharmony_ci * @td_offset: offset for checksum or crc 227762306a36Sopenharmony_ci **/ 227862306a36Sopenharmony_cistatic inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, 227962306a36Sopenharmony_ci struct iavf_tx_buffer *first, u32 tx_flags, 228062306a36Sopenharmony_ci const u8 hdr_len, u32 td_cmd, u32 td_offset) 228162306a36Sopenharmony_ci{ 228262306a36Sopenharmony_ci unsigned int data_len = skb->data_len; 228362306a36Sopenharmony_ci unsigned int size = skb_headlen(skb); 228462306a36Sopenharmony_ci skb_frag_t *frag; 228562306a36Sopenharmony_ci struct iavf_tx_buffer *tx_bi; 228662306a36Sopenharmony_ci struct iavf_tx_desc *tx_desc; 228762306a36Sopenharmony_ci u16 i = tx_ring->next_to_use; 228862306a36Sopenharmony_ci u32 td_tag = 0; 228962306a36Sopenharmony_ci dma_addr_t dma; 229062306a36Sopenharmony_ci 229162306a36Sopenharmony_ci if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) { 229262306a36Sopenharmony_ci td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; 229362306a36Sopenharmony_ci td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> 229462306a36Sopenharmony_ci IAVF_TX_FLAGS_VLAN_SHIFT; 229562306a36Sopenharmony_ci } 229662306a36Sopenharmony_ci 229762306a36Sopenharmony_ci first->tx_flags = tx_flags; 229862306a36Sopenharmony_ci 229962306a36Sopenharmony_ci dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 230062306a36Sopenharmony_ci 230162306a36Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, i); 230262306a36Sopenharmony_ci tx_bi = first; 230362306a36Sopenharmony_ci 230462306a36Sopenharmony_ci for (frag = &skb_shinfo(skb)->frags[0];; frag++) { 230562306a36Sopenharmony_ci unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; 230662306a36Sopenharmony_ci 230762306a36Sopenharmony_ci if (dma_mapping_error(tx_ring->dev, dma)) 230862306a36Sopenharmony_ci goto dma_error; 230962306a36Sopenharmony_ci 231062306a36Sopenharmony_ci /* record length, and DMA address */ 231162306a36Sopenharmony_ci dma_unmap_len_set(tx_bi, len, size); 231262306a36Sopenharmony_ci dma_unmap_addr_set(tx_bi, dma, dma); 231362306a36Sopenharmony_ci 231462306a36Sopenharmony_ci /* align size to end of page */ 231562306a36Sopenharmony_ci max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1); 231662306a36Sopenharmony_ci tx_desc->buffer_addr = cpu_to_le64(dma); 231762306a36Sopenharmony_ci 231862306a36Sopenharmony_ci while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) { 231962306a36Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 232062306a36Sopenharmony_ci build_ctob(td_cmd, td_offset, 232162306a36Sopenharmony_ci max_data, td_tag); 232262306a36Sopenharmony_ci 232362306a36Sopenharmony_ci tx_desc++; 232462306a36Sopenharmony_ci i++; 232562306a36Sopenharmony_ci 232662306a36Sopenharmony_ci if (i == tx_ring->count) { 232762306a36Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 232862306a36Sopenharmony_ci i = 0; 232962306a36Sopenharmony_ci } 233062306a36Sopenharmony_ci 233162306a36Sopenharmony_ci dma += max_data; 233262306a36Sopenharmony_ci size -= max_data; 233362306a36Sopenharmony_ci 233462306a36Sopenharmony_ci max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED; 233562306a36Sopenharmony_ci tx_desc->buffer_addr = cpu_to_le64(dma); 233662306a36Sopenharmony_ci } 233762306a36Sopenharmony_ci 233862306a36Sopenharmony_ci if (likely(!data_len)) 233962306a36Sopenharmony_ci break; 234062306a36Sopenharmony_ci 234162306a36Sopenharmony_ci tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, 234262306a36Sopenharmony_ci size, td_tag); 234362306a36Sopenharmony_ci 234462306a36Sopenharmony_ci tx_desc++; 234562306a36Sopenharmony_ci i++; 234662306a36Sopenharmony_ci 234762306a36Sopenharmony_ci if (i == tx_ring->count) { 234862306a36Sopenharmony_ci tx_desc = IAVF_TX_DESC(tx_ring, 0); 234962306a36Sopenharmony_ci i = 0; 235062306a36Sopenharmony_ci } 235162306a36Sopenharmony_ci 235262306a36Sopenharmony_ci size = skb_frag_size(frag); 235362306a36Sopenharmony_ci data_len -= size; 235462306a36Sopenharmony_ci 235562306a36Sopenharmony_ci dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 235662306a36Sopenharmony_ci DMA_TO_DEVICE); 235762306a36Sopenharmony_ci 235862306a36Sopenharmony_ci tx_bi = &tx_ring->tx_bi[i]; 235962306a36Sopenharmony_ci } 236062306a36Sopenharmony_ci 236162306a36Sopenharmony_ci netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); 236262306a36Sopenharmony_ci 236362306a36Sopenharmony_ci i++; 236462306a36Sopenharmony_ci if (i == tx_ring->count) 236562306a36Sopenharmony_ci i = 0; 236662306a36Sopenharmony_ci 236762306a36Sopenharmony_ci tx_ring->next_to_use = i; 236862306a36Sopenharmony_ci 236962306a36Sopenharmony_ci iavf_maybe_stop_tx(tx_ring, DESC_NEEDED); 237062306a36Sopenharmony_ci 237162306a36Sopenharmony_ci /* write last descriptor with RS and EOP bits */ 237262306a36Sopenharmony_ci td_cmd |= IAVF_TXD_CMD; 237362306a36Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 237462306a36Sopenharmony_ci build_ctob(td_cmd, td_offset, size, td_tag); 237562306a36Sopenharmony_ci 237662306a36Sopenharmony_ci skb_tx_timestamp(skb); 237762306a36Sopenharmony_ci 237862306a36Sopenharmony_ci /* Force memory writes to complete before letting h/w know there 237962306a36Sopenharmony_ci * are new descriptors to fetch. 238062306a36Sopenharmony_ci * 238162306a36Sopenharmony_ci * We also use this memory barrier to make certain all of the 238262306a36Sopenharmony_ci * status bits have been updated before next_to_watch is written. 238362306a36Sopenharmony_ci */ 238462306a36Sopenharmony_ci wmb(); 238562306a36Sopenharmony_ci 238662306a36Sopenharmony_ci /* set next_to_watch value indicating a packet is present */ 238762306a36Sopenharmony_ci first->next_to_watch = tx_desc; 238862306a36Sopenharmony_ci 238962306a36Sopenharmony_ci /* notify HW of packet */ 239062306a36Sopenharmony_ci if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { 239162306a36Sopenharmony_ci writel(i, tx_ring->tail); 239262306a36Sopenharmony_ci } 239362306a36Sopenharmony_ci 239462306a36Sopenharmony_ci return; 239562306a36Sopenharmony_ci 239662306a36Sopenharmony_cidma_error: 239762306a36Sopenharmony_ci dev_info(tx_ring->dev, "TX DMA map failed\n"); 239862306a36Sopenharmony_ci 239962306a36Sopenharmony_ci /* clear dma mappings for failed tx_bi map */ 240062306a36Sopenharmony_ci for (;;) { 240162306a36Sopenharmony_ci tx_bi = &tx_ring->tx_bi[i]; 240262306a36Sopenharmony_ci iavf_unmap_and_free_tx_resource(tx_ring, tx_bi); 240362306a36Sopenharmony_ci if (tx_bi == first) 240462306a36Sopenharmony_ci break; 240562306a36Sopenharmony_ci if (i == 0) 240662306a36Sopenharmony_ci i = tx_ring->count; 240762306a36Sopenharmony_ci i--; 240862306a36Sopenharmony_ci } 240962306a36Sopenharmony_ci 241062306a36Sopenharmony_ci tx_ring->next_to_use = i; 241162306a36Sopenharmony_ci} 241262306a36Sopenharmony_ci 241362306a36Sopenharmony_ci/** 241462306a36Sopenharmony_ci * iavf_xmit_frame_ring - Sends buffer on Tx ring 241562306a36Sopenharmony_ci * @skb: send buffer 241662306a36Sopenharmony_ci * @tx_ring: ring to send buffer on 241762306a36Sopenharmony_ci * 241862306a36Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code 241962306a36Sopenharmony_ci **/ 242062306a36Sopenharmony_cistatic netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, 242162306a36Sopenharmony_ci struct iavf_ring *tx_ring) 242262306a36Sopenharmony_ci{ 242362306a36Sopenharmony_ci u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT; 242462306a36Sopenharmony_ci u32 cd_tunneling = 0, cd_l2tag2 = 0; 242562306a36Sopenharmony_ci struct iavf_tx_buffer *first; 242662306a36Sopenharmony_ci u32 td_offset = 0; 242762306a36Sopenharmony_ci u32 tx_flags = 0; 242862306a36Sopenharmony_ci __be16 protocol; 242962306a36Sopenharmony_ci u32 td_cmd = 0; 243062306a36Sopenharmony_ci u8 hdr_len = 0; 243162306a36Sopenharmony_ci int tso, count; 243262306a36Sopenharmony_ci 243362306a36Sopenharmony_ci /* prefetch the data, we'll need it later */ 243462306a36Sopenharmony_ci prefetch(skb->data); 243562306a36Sopenharmony_ci 243662306a36Sopenharmony_ci iavf_trace(xmit_frame_ring, skb, tx_ring); 243762306a36Sopenharmony_ci 243862306a36Sopenharmony_ci count = iavf_xmit_descriptor_count(skb); 243962306a36Sopenharmony_ci if (iavf_chk_linearize(skb, count)) { 244062306a36Sopenharmony_ci if (__skb_linearize(skb)) { 244162306a36Sopenharmony_ci dev_kfree_skb_any(skb); 244262306a36Sopenharmony_ci return NETDEV_TX_OK; 244362306a36Sopenharmony_ci } 244462306a36Sopenharmony_ci count = iavf_txd_use_count(skb->len); 244562306a36Sopenharmony_ci tx_ring->tx_stats.tx_linearize++; 244662306a36Sopenharmony_ci } 244762306a36Sopenharmony_ci 244862306a36Sopenharmony_ci /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD, 244962306a36Sopenharmony_ci * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD, 245062306a36Sopenharmony_ci * + 4 desc gap to avoid the cache line where head is, 245162306a36Sopenharmony_ci * + 1 desc for context descriptor, 245262306a36Sopenharmony_ci * otherwise try next time 245362306a36Sopenharmony_ci */ 245462306a36Sopenharmony_ci if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) { 245562306a36Sopenharmony_ci tx_ring->tx_stats.tx_busy++; 245662306a36Sopenharmony_ci return NETDEV_TX_BUSY; 245762306a36Sopenharmony_ci } 245862306a36Sopenharmony_ci 245962306a36Sopenharmony_ci /* record the location of the first descriptor for this packet */ 246062306a36Sopenharmony_ci first = &tx_ring->tx_bi[tx_ring->next_to_use]; 246162306a36Sopenharmony_ci first->skb = skb; 246262306a36Sopenharmony_ci first->bytecount = skb->len; 246362306a36Sopenharmony_ci first->gso_segs = 1; 246462306a36Sopenharmony_ci 246562306a36Sopenharmony_ci /* prepare the xmit flags */ 246662306a36Sopenharmony_ci iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags); 246762306a36Sopenharmony_ci if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) { 246862306a36Sopenharmony_ci cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 << 246962306a36Sopenharmony_ci IAVF_TXD_CTX_QW1_CMD_SHIFT; 247062306a36Sopenharmony_ci cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >> 247162306a36Sopenharmony_ci IAVF_TX_FLAGS_VLAN_SHIFT; 247262306a36Sopenharmony_ci } 247362306a36Sopenharmony_ci 247462306a36Sopenharmony_ci /* obtain protocol of skb */ 247562306a36Sopenharmony_ci protocol = vlan_get_protocol(skb); 247662306a36Sopenharmony_ci 247762306a36Sopenharmony_ci /* setup IPv4/IPv6 offloads */ 247862306a36Sopenharmony_ci if (protocol == htons(ETH_P_IP)) 247962306a36Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_IPV4; 248062306a36Sopenharmony_ci else if (protocol == htons(ETH_P_IPV6)) 248162306a36Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_IPV6; 248262306a36Sopenharmony_ci 248362306a36Sopenharmony_ci tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss); 248462306a36Sopenharmony_ci 248562306a36Sopenharmony_ci if (tso < 0) 248662306a36Sopenharmony_ci goto out_drop; 248762306a36Sopenharmony_ci else if (tso) 248862306a36Sopenharmony_ci tx_flags |= IAVF_TX_FLAGS_TSO; 248962306a36Sopenharmony_ci 249062306a36Sopenharmony_ci /* Always offload the checksum, since it's in the data descriptor */ 249162306a36Sopenharmony_ci tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, 249262306a36Sopenharmony_ci tx_ring, &cd_tunneling); 249362306a36Sopenharmony_ci if (tso < 0) 249462306a36Sopenharmony_ci goto out_drop; 249562306a36Sopenharmony_ci 249662306a36Sopenharmony_ci /* always enable CRC insertion offload */ 249762306a36Sopenharmony_ci td_cmd |= IAVF_TX_DESC_CMD_ICRC; 249862306a36Sopenharmony_ci 249962306a36Sopenharmony_ci iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, 250062306a36Sopenharmony_ci cd_tunneling, cd_l2tag2); 250162306a36Sopenharmony_ci 250262306a36Sopenharmony_ci iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 250362306a36Sopenharmony_ci td_cmd, td_offset); 250462306a36Sopenharmony_ci 250562306a36Sopenharmony_ci return NETDEV_TX_OK; 250662306a36Sopenharmony_ci 250762306a36Sopenharmony_ciout_drop: 250862306a36Sopenharmony_ci iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring); 250962306a36Sopenharmony_ci dev_kfree_skb_any(first->skb); 251062306a36Sopenharmony_ci first->skb = NULL; 251162306a36Sopenharmony_ci return NETDEV_TX_OK; 251262306a36Sopenharmony_ci} 251362306a36Sopenharmony_ci 251462306a36Sopenharmony_ci/** 251562306a36Sopenharmony_ci * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer 251662306a36Sopenharmony_ci * @skb: send buffer 251762306a36Sopenharmony_ci * @netdev: network interface device structure 251862306a36Sopenharmony_ci * 251962306a36Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code 252062306a36Sopenharmony_ci **/ 252162306a36Sopenharmony_cinetdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 252262306a36Sopenharmony_ci{ 252362306a36Sopenharmony_ci struct iavf_adapter *adapter = netdev_priv(netdev); 252462306a36Sopenharmony_ci struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; 252562306a36Sopenharmony_ci 252662306a36Sopenharmony_ci /* hardware can't handle really short frames, hardware padding works 252762306a36Sopenharmony_ci * beyond this point 252862306a36Sopenharmony_ci */ 252962306a36Sopenharmony_ci if (unlikely(skb->len < IAVF_MIN_TX_LEN)) { 253062306a36Sopenharmony_ci if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len)) 253162306a36Sopenharmony_ci return NETDEV_TX_OK; 253262306a36Sopenharmony_ci skb->len = IAVF_MIN_TX_LEN; 253362306a36Sopenharmony_ci skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN); 253462306a36Sopenharmony_ci } 253562306a36Sopenharmony_ci 253662306a36Sopenharmony_ci return iavf_xmit_frame_ring(skb, tx_ring); 253762306a36Sopenharmony_ci} 2538