162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
262306a36Sopenharmony_ci/* Copyright (c) 2018, Intel Corporation. */
362306a36Sopenharmony_ci
462306a36Sopenharmony_ci/* The driver transmit and receive code */
562306a36Sopenharmony_ci
662306a36Sopenharmony_ci#include <linux/mm.h>
762306a36Sopenharmony_ci#include <linux/netdevice.h>
862306a36Sopenharmony_ci#include <linux/prefetch.h>
962306a36Sopenharmony_ci#include <linux/bpf_trace.h>
1062306a36Sopenharmony_ci#include <net/dsfield.h>
1162306a36Sopenharmony_ci#include <net/mpls.h>
1262306a36Sopenharmony_ci#include <net/xdp.h>
1362306a36Sopenharmony_ci#include "ice_txrx_lib.h"
1462306a36Sopenharmony_ci#include "ice_lib.h"
1562306a36Sopenharmony_ci#include "ice.h"
1662306a36Sopenharmony_ci#include "ice_trace.h"
1762306a36Sopenharmony_ci#include "ice_dcb_lib.h"
1862306a36Sopenharmony_ci#include "ice_xsk.h"
1962306a36Sopenharmony_ci#include "ice_eswitch.h"
2062306a36Sopenharmony_ci
2162306a36Sopenharmony_ci#define ICE_RX_HDR_SIZE		256
2262306a36Sopenharmony_ci
2362306a36Sopenharmony_ci#define FDIR_DESC_RXDID 0x40
2462306a36Sopenharmony_ci#define ICE_FDIR_CLEAN_DELAY 10
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci/**
2762306a36Sopenharmony_ci * ice_prgm_fdir_fltr - Program a Flow Director filter
2862306a36Sopenharmony_ci * @vsi: VSI to send dummy packet
2962306a36Sopenharmony_ci * @fdir_desc: flow director descriptor
3062306a36Sopenharmony_ci * @raw_packet: allocated buffer for flow director
3162306a36Sopenharmony_ci */
3262306a36Sopenharmony_ciint
3362306a36Sopenharmony_ciice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
3462306a36Sopenharmony_ci		   u8 *raw_packet)
3562306a36Sopenharmony_ci{
3662306a36Sopenharmony_ci	struct ice_tx_buf *tx_buf, *first;
3762306a36Sopenharmony_ci	struct ice_fltr_desc *f_desc;
3862306a36Sopenharmony_ci	struct ice_tx_desc *tx_desc;
3962306a36Sopenharmony_ci	struct ice_tx_ring *tx_ring;
4062306a36Sopenharmony_ci	struct device *dev;
4162306a36Sopenharmony_ci	dma_addr_t dma;
4262306a36Sopenharmony_ci	u32 td_cmd;
4362306a36Sopenharmony_ci	u16 i;
4462306a36Sopenharmony_ci
4562306a36Sopenharmony_ci	/* VSI and Tx ring */
4662306a36Sopenharmony_ci	if (!vsi)
4762306a36Sopenharmony_ci		return -ENOENT;
4862306a36Sopenharmony_ci	tx_ring = vsi->tx_rings[0];
4962306a36Sopenharmony_ci	if (!tx_ring || !tx_ring->desc)
5062306a36Sopenharmony_ci		return -ENOENT;
5162306a36Sopenharmony_ci	dev = tx_ring->dev;
5262306a36Sopenharmony_ci
5362306a36Sopenharmony_ci	/* we are using two descriptors to add/del a filter and we can wait */
5462306a36Sopenharmony_ci	for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
5562306a36Sopenharmony_ci		if (!i)
5662306a36Sopenharmony_ci			return -EAGAIN;
5762306a36Sopenharmony_ci		msleep_interruptible(1);
5862306a36Sopenharmony_ci	}
5962306a36Sopenharmony_ci
6062306a36Sopenharmony_ci	dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
6162306a36Sopenharmony_ci			     DMA_TO_DEVICE);
6262306a36Sopenharmony_ci
6362306a36Sopenharmony_ci	if (dma_mapping_error(dev, dma))
6462306a36Sopenharmony_ci		return -EINVAL;
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_ci	/* grab the next descriptor */
6762306a36Sopenharmony_ci	i = tx_ring->next_to_use;
6862306a36Sopenharmony_ci	first = &tx_ring->tx_buf[i];
6962306a36Sopenharmony_ci	f_desc = ICE_TX_FDIRDESC(tx_ring, i);
7062306a36Sopenharmony_ci	memcpy(f_desc, fdir_desc, sizeof(*f_desc));
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_ci	i++;
7362306a36Sopenharmony_ci	i = (i < tx_ring->count) ? i : 0;
7462306a36Sopenharmony_ci	tx_desc = ICE_TX_DESC(tx_ring, i);
7562306a36Sopenharmony_ci	tx_buf = &tx_ring->tx_buf[i];
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_ci	i++;
7862306a36Sopenharmony_ci	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
7962306a36Sopenharmony_ci
8062306a36Sopenharmony_ci	memset(tx_buf, 0, sizeof(*tx_buf));
8162306a36Sopenharmony_ci	dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
8262306a36Sopenharmony_ci	dma_unmap_addr_set(tx_buf, dma, dma);
8362306a36Sopenharmony_ci
8462306a36Sopenharmony_ci	tx_desc->buf_addr = cpu_to_le64(dma);
8562306a36Sopenharmony_ci	td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
8662306a36Sopenharmony_ci		 ICE_TX_DESC_CMD_RE;
8762306a36Sopenharmony_ci
8862306a36Sopenharmony_ci	tx_buf->type = ICE_TX_BUF_DUMMY;
8962306a36Sopenharmony_ci	tx_buf->raw_buf = raw_packet;
9062306a36Sopenharmony_ci
9162306a36Sopenharmony_ci	tx_desc->cmd_type_offset_bsz =
9262306a36Sopenharmony_ci		ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
9362306a36Sopenharmony_ci
9462306a36Sopenharmony_ci	/* Force memory write to complete before letting h/w know
9562306a36Sopenharmony_ci	 * there are new descriptors to fetch.
9662306a36Sopenharmony_ci	 */
9762306a36Sopenharmony_ci	wmb();
9862306a36Sopenharmony_ci
9962306a36Sopenharmony_ci	/* mark the data descriptor to be watched */
10062306a36Sopenharmony_ci	first->next_to_watch = tx_desc;
10162306a36Sopenharmony_ci
10262306a36Sopenharmony_ci	writel(tx_ring->next_to_use, tx_ring->tail);
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_ci	return 0;
10562306a36Sopenharmony_ci}
10662306a36Sopenharmony_ci
10762306a36Sopenharmony_ci/**
10862306a36Sopenharmony_ci * ice_unmap_and_free_tx_buf - Release a Tx buffer
10962306a36Sopenharmony_ci * @ring: the ring that owns the buffer
11062306a36Sopenharmony_ci * @tx_buf: the buffer to free
11162306a36Sopenharmony_ci */
11262306a36Sopenharmony_cistatic void
11362306a36Sopenharmony_ciice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
11462306a36Sopenharmony_ci{
11562306a36Sopenharmony_ci	if (dma_unmap_len(tx_buf, len))
11662306a36Sopenharmony_ci		dma_unmap_page(ring->dev,
11762306a36Sopenharmony_ci			       dma_unmap_addr(tx_buf, dma),
11862306a36Sopenharmony_ci			       dma_unmap_len(tx_buf, len),
11962306a36Sopenharmony_ci			       DMA_TO_DEVICE);
12062306a36Sopenharmony_ci
12162306a36Sopenharmony_ci	switch (tx_buf->type) {
12262306a36Sopenharmony_ci	case ICE_TX_BUF_DUMMY:
12362306a36Sopenharmony_ci		devm_kfree(ring->dev, tx_buf->raw_buf);
12462306a36Sopenharmony_ci		break;
12562306a36Sopenharmony_ci	case ICE_TX_BUF_SKB:
12662306a36Sopenharmony_ci		dev_kfree_skb_any(tx_buf->skb);
12762306a36Sopenharmony_ci		break;
12862306a36Sopenharmony_ci	case ICE_TX_BUF_XDP_TX:
12962306a36Sopenharmony_ci		page_frag_free(tx_buf->raw_buf);
13062306a36Sopenharmony_ci		break;
13162306a36Sopenharmony_ci	case ICE_TX_BUF_XDP_XMIT:
13262306a36Sopenharmony_ci		xdp_return_frame(tx_buf->xdpf);
13362306a36Sopenharmony_ci		break;
13462306a36Sopenharmony_ci	}
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci	tx_buf->next_to_watch = NULL;
13762306a36Sopenharmony_ci	tx_buf->type = ICE_TX_BUF_EMPTY;
13862306a36Sopenharmony_ci	dma_unmap_len_set(tx_buf, len, 0);
13962306a36Sopenharmony_ci	/* tx_buf must be completely set up in the transmit path */
14062306a36Sopenharmony_ci}
14162306a36Sopenharmony_ci
14262306a36Sopenharmony_cistatic struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
14362306a36Sopenharmony_ci{
14462306a36Sopenharmony_ci	return netdev_get_tx_queue(ring->netdev, ring->q_index);
14562306a36Sopenharmony_ci}
14662306a36Sopenharmony_ci
14762306a36Sopenharmony_ci/**
14862306a36Sopenharmony_ci * ice_clean_tx_ring - Free any empty Tx buffers
14962306a36Sopenharmony_ci * @tx_ring: ring to be cleaned
15062306a36Sopenharmony_ci */
15162306a36Sopenharmony_civoid ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
15262306a36Sopenharmony_ci{
15362306a36Sopenharmony_ci	u32 size;
15462306a36Sopenharmony_ci	u16 i;
15562306a36Sopenharmony_ci
15662306a36Sopenharmony_ci	if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
15762306a36Sopenharmony_ci		ice_xsk_clean_xdp_ring(tx_ring);
15862306a36Sopenharmony_ci		goto tx_skip_free;
15962306a36Sopenharmony_ci	}
16062306a36Sopenharmony_ci
16162306a36Sopenharmony_ci	/* ring already cleared, nothing to do */
16262306a36Sopenharmony_ci	if (!tx_ring->tx_buf)
16362306a36Sopenharmony_ci		return;
16462306a36Sopenharmony_ci
16562306a36Sopenharmony_ci	/* Free all the Tx ring sk_buffs */
16662306a36Sopenharmony_ci	for (i = 0; i < tx_ring->count; i++)
16762306a36Sopenharmony_ci		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
16862306a36Sopenharmony_ci
16962306a36Sopenharmony_citx_skip_free:
17062306a36Sopenharmony_ci	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
17162306a36Sopenharmony_ci
17262306a36Sopenharmony_ci	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
17362306a36Sopenharmony_ci		     PAGE_SIZE);
17462306a36Sopenharmony_ci	/* Zero out the descriptor ring */
17562306a36Sopenharmony_ci	memset(tx_ring->desc, 0, size);
17662306a36Sopenharmony_ci
17762306a36Sopenharmony_ci	tx_ring->next_to_use = 0;
17862306a36Sopenharmony_ci	tx_ring->next_to_clean = 0;
17962306a36Sopenharmony_ci
18062306a36Sopenharmony_ci	if (!tx_ring->netdev)
18162306a36Sopenharmony_ci		return;
18262306a36Sopenharmony_ci
18362306a36Sopenharmony_ci	/* cleanup Tx queue statistics */
18462306a36Sopenharmony_ci	netdev_tx_reset_queue(txring_txq(tx_ring));
18562306a36Sopenharmony_ci}
18662306a36Sopenharmony_ci
18762306a36Sopenharmony_ci/**
18862306a36Sopenharmony_ci * ice_free_tx_ring - Free Tx resources per queue
18962306a36Sopenharmony_ci * @tx_ring: Tx descriptor ring for a specific queue
19062306a36Sopenharmony_ci *
19162306a36Sopenharmony_ci * Free all transmit software resources
19262306a36Sopenharmony_ci */
19362306a36Sopenharmony_civoid ice_free_tx_ring(struct ice_tx_ring *tx_ring)
19462306a36Sopenharmony_ci{
19562306a36Sopenharmony_ci	u32 size;
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_ci	ice_clean_tx_ring(tx_ring);
19862306a36Sopenharmony_ci	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
19962306a36Sopenharmony_ci	tx_ring->tx_buf = NULL;
20062306a36Sopenharmony_ci
20162306a36Sopenharmony_ci	if (tx_ring->desc) {
20262306a36Sopenharmony_ci		size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
20362306a36Sopenharmony_ci			     PAGE_SIZE);
20462306a36Sopenharmony_ci		dmam_free_coherent(tx_ring->dev, size,
20562306a36Sopenharmony_ci				   tx_ring->desc, tx_ring->dma);
20662306a36Sopenharmony_ci		tx_ring->desc = NULL;
20762306a36Sopenharmony_ci	}
20862306a36Sopenharmony_ci}
20962306a36Sopenharmony_ci
21062306a36Sopenharmony_ci/**
21162306a36Sopenharmony_ci * ice_clean_tx_irq - Reclaim resources after transmit completes
21262306a36Sopenharmony_ci * @tx_ring: Tx ring to clean
21362306a36Sopenharmony_ci * @napi_budget: Used to determine if we are in netpoll
21462306a36Sopenharmony_ci *
21562306a36Sopenharmony_ci * Returns true if there's any budget left (e.g. the clean is finished)
21662306a36Sopenharmony_ci */
21762306a36Sopenharmony_cistatic bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
21862306a36Sopenharmony_ci{
21962306a36Sopenharmony_ci	unsigned int total_bytes = 0, total_pkts = 0;
22062306a36Sopenharmony_ci	unsigned int budget = ICE_DFLT_IRQ_WORK;
22162306a36Sopenharmony_ci	struct ice_vsi *vsi = tx_ring->vsi;
22262306a36Sopenharmony_ci	s16 i = tx_ring->next_to_clean;
22362306a36Sopenharmony_ci	struct ice_tx_desc *tx_desc;
22462306a36Sopenharmony_ci	struct ice_tx_buf *tx_buf;
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci	/* get the bql data ready */
22762306a36Sopenharmony_ci	netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
22862306a36Sopenharmony_ci
22962306a36Sopenharmony_ci	tx_buf = &tx_ring->tx_buf[i];
23062306a36Sopenharmony_ci	tx_desc = ICE_TX_DESC(tx_ring, i);
23162306a36Sopenharmony_ci	i -= tx_ring->count;
23262306a36Sopenharmony_ci
23362306a36Sopenharmony_ci	prefetch(&vsi->state);
23462306a36Sopenharmony_ci
23562306a36Sopenharmony_ci	do {
23662306a36Sopenharmony_ci		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
23762306a36Sopenharmony_ci
23862306a36Sopenharmony_ci		/* if next_to_watch is not set then there is no work pending */
23962306a36Sopenharmony_ci		if (!eop_desc)
24062306a36Sopenharmony_ci			break;
24162306a36Sopenharmony_ci
24262306a36Sopenharmony_ci		/* follow the guidelines of other drivers */
24362306a36Sopenharmony_ci		prefetchw(&tx_buf->skb->users);
24462306a36Sopenharmony_ci
24562306a36Sopenharmony_ci		smp_rmb();	/* prevent any other reads prior to eop_desc */
24662306a36Sopenharmony_ci
24762306a36Sopenharmony_ci		ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
24862306a36Sopenharmony_ci		/* if the descriptor isn't done, no work yet to do */
24962306a36Sopenharmony_ci		if (!(eop_desc->cmd_type_offset_bsz &
25062306a36Sopenharmony_ci		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
25162306a36Sopenharmony_ci			break;
25262306a36Sopenharmony_ci
25362306a36Sopenharmony_ci		/* clear next_to_watch to prevent false hangs */
25462306a36Sopenharmony_ci		tx_buf->next_to_watch = NULL;
25562306a36Sopenharmony_ci
25662306a36Sopenharmony_ci		/* update the statistics for this packet */
25762306a36Sopenharmony_ci		total_bytes += tx_buf->bytecount;
25862306a36Sopenharmony_ci		total_pkts += tx_buf->gso_segs;
25962306a36Sopenharmony_ci
26062306a36Sopenharmony_ci		/* free the skb */
26162306a36Sopenharmony_ci		napi_consume_skb(tx_buf->skb, napi_budget);
26262306a36Sopenharmony_ci
26362306a36Sopenharmony_ci		/* unmap skb header data */
26462306a36Sopenharmony_ci		dma_unmap_single(tx_ring->dev,
26562306a36Sopenharmony_ci				 dma_unmap_addr(tx_buf, dma),
26662306a36Sopenharmony_ci				 dma_unmap_len(tx_buf, len),
26762306a36Sopenharmony_ci				 DMA_TO_DEVICE);
26862306a36Sopenharmony_ci
26962306a36Sopenharmony_ci		/* clear tx_buf data */
27062306a36Sopenharmony_ci		tx_buf->type = ICE_TX_BUF_EMPTY;
27162306a36Sopenharmony_ci		dma_unmap_len_set(tx_buf, len, 0);
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ci		/* unmap remaining buffers */
27462306a36Sopenharmony_ci		while (tx_desc != eop_desc) {
27562306a36Sopenharmony_ci			ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
27662306a36Sopenharmony_ci			tx_buf++;
27762306a36Sopenharmony_ci			tx_desc++;
27862306a36Sopenharmony_ci			i++;
27962306a36Sopenharmony_ci			if (unlikely(!i)) {
28062306a36Sopenharmony_ci				i -= tx_ring->count;
28162306a36Sopenharmony_ci				tx_buf = tx_ring->tx_buf;
28262306a36Sopenharmony_ci				tx_desc = ICE_TX_DESC(tx_ring, 0);
28362306a36Sopenharmony_ci			}
28462306a36Sopenharmony_ci
28562306a36Sopenharmony_ci			/* unmap any remaining paged data */
28662306a36Sopenharmony_ci			if (dma_unmap_len(tx_buf, len)) {
28762306a36Sopenharmony_ci				dma_unmap_page(tx_ring->dev,
28862306a36Sopenharmony_ci					       dma_unmap_addr(tx_buf, dma),
28962306a36Sopenharmony_ci					       dma_unmap_len(tx_buf, len),
29062306a36Sopenharmony_ci					       DMA_TO_DEVICE);
29162306a36Sopenharmony_ci				dma_unmap_len_set(tx_buf, len, 0);
29262306a36Sopenharmony_ci			}
29362306a36Sopenharmony_ci		}
29462306a36Sopenharmony_ci		ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
29562306a36Sopenharmony_ci
29662306a36Sopenharmony_ci		/* move us one more past the eop_desc for start of next pkt */
29762306a36Sopenharmony_ci		tx_buf++;
29862306a36Sopenharmony_ci		tx_desc++;
29962306a36Sopenharmony_ci		i++;
30062306a36Sopenharmony_ci		if (unlikely(!i)) {
30162306a36Sopenharmony_ci			i -= tx_ring->count;
30262306a36Sopenharmony_ci			tx_buf = tx_ring->tx_buf;
30362306a36Sopenharmony_ci			tx_desc = ICE_TX_DESC(tx_ring, 0);
30462306a36Sopenharmony_ci		}
30562306a36Sopenharmony_ci
30662306a36Sopenharmony_ci		prefetch(tx_desc);
30762306a36Sopenharmony_ci
30862306a36Sopenharmony_ci		/* update budget accounting */
30962306a36Sopenharmony_ci		budget--;
31062306a36Sopenharmony_ci	} while (likely(budget));
31162306a36Sopenharmony_ci
31262306a36Sopenharmony_ci	i += tx_ring->count;
31362306a36Sopenharmony_ci	tx_ring->next_to_clean = i;
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci	ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
31662306a36Sopenharmony_ci	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
31762306a36Sopenharmony_ci
31862306a36Sopenharmony_ci#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
31962306a36Sopenharmony_ci	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
32062306a36Sopenharmony_ci		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
32162306a36Sopenharmony_ci		/* Make sure that anybody stopping the queue after this
32262306a36Sopenharmony_ci		 * sees the new next_to_clean.
32362306a36Sopenharmony_ci		 */
32462306a36Sopenharmony_ci		smp_mb();
32562306a36Sopenharmony_ci		if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
32662306a36Sopenharmony_ci		    !test_bit(ICE_VSI_DOWN, vsi->state)) {
32762306a36Sopenharmony_ci			netif_tx_wake_queue(txring_txq(tx_ring));
32862306a36Sopenharmony_ci			++tx_ring->ring_stats->tx_stats.restart_q;
32962306a36Sopenharmony_ci		}
33062306a36Sopenharmony_ci	}
33162306a36Sopenharmony_ci
33262306a36Sopenharmony_ci	return !!budget;
33362306a36Sopenharmony_ci}
33462306a36Sopenharmony_ci
33562306a36Sopenharmony_ci/**
33662306a36Sopenharmony_ci * ice_setup_tx_ring - Allocate the Tx descriptors
33762306a36Sopenharmony_ci * @tx_ring: the Tx ring to set up
33862306a36Sopenharmony_ci *
33962306a36Sopenharmony_ci * Return 0 on success, negative on error
34062306a36Sopenharmony_ci */
34162306a36Sopenharmony_ciint ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
34262306a36Sopenharmony_ci{
34362306a36Sopenharmony_ci	struct device *dev = tx_ring->dev;
34462306a36Sopenharmony_ci	u32 size;
34562306a36Sopenharmony_ci
34662306a36Sopenharmony_ci	if (!dev)
34762306a36Sopenharmony_ci		return -ENOMEM;
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci	/* warn if we are about to overwrite the pointer */
35062306a36Sopenharmony_ci	WARN_ON(tx_ring->tx_buf);
35162306a36Sopenharmony_ci	tx_ring->tx_buf =
35262306a36Sopenharmony_ci		devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
35362306a36Sopenharmony_ci			     GFP_KERNEL);
35462306a36Sopenharmony_ci	if (!tx_ring->tx_buf)
35562306a36Sopenharmony_ci		return -ENOMEM;
35662306a36Sopenharmony_ci
35762306a36Sopenharmony_ci	/* round up to nearest page */
35862306a36Sopenharmony_ci	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
35962306a36Sopenharmony_ci		     PAGE_SIZE);
36062306a36Sopenharmony_ci	tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
36162306a36Sopenharmony_ci					    GFP_KERNEL);
36262306a36Sopenharmony_ci	if (!tx_ring->desc) {
36362306a36Sopenharmony_ci		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
36462306a36Sopenharmony_ci			size);
36562306a36Sopenharmony_ci		goto err;
36662306a36Sopenharmony_ci	}
36762306a36Sopenharmony_ci
36862306a36Sopenharmony_ci	tx_ring->next_to_use = 0;
36962306a36Sopenharmony_ci	tx_ring->next_to_clean = 0;
37062306a36Sopenharmony_ci	tx_ring->ring_stats->tx_stats.prev_pkt = -1;
37162306a36Sopenharmony_ci	return 0;
37262306a36Sopenharmony_ci
37362306a36Sopenharmony_cierr:
37462306a36Sopenharmony_ci	devm_kfree(dev, tx_ring->tx_buf);
37562306a36Sopenharmony_ci	tx_ring->tx_buf = NULL;
37662306a36Sopenharmony_ci	return -ENOMEM;
37762306a36Sopenharmony_ci}
37862306a36Sopenharmony_ci
37962306a36Sopenharmony_ci/**
38062306a36Sopenharmony_ci * ice_clean_rx_ring - Free Rx buffers
38162306a36Sopenharmony_ci * @rx_ring: ring to be cleaned
38262306a36Sopenharmony_ci */
38362306a36Sopenharmony_civoid ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
38462306a36Sopenharmony_ci{
38562306a36Sopenharmony_ci	struct xdp_buff *xdp = &rx_ring->xdp;
38662306a36Sopenharmony_ci	struct device *dev = rx_ring->dev;
38762306a36Sopenharmony_ci	u32 size;
38862306a36Sopenharmony_ci	u16 i;
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_ci	/* ring already cleared, nothing to do */
39162306a36Sopenharmony_ci	if (!rx_ring->rx_buf)
39262306a36Sopenharmony_ci		return;
39362306a36Sopenharmony_ci
39462306a36Sopenharmony_ci	if (rx_ring->xsk_pool) {
39562306a36Sopenharmony_ci		ice_xsk_clean_rx_ring(rx_ring);
39662306a36Sopenharmony_ci		goto rx_skip_free;
39762306a36Sopenharmony_ci	}
39862306a36Sopenharmony_ci
39962306a36Sopenharmony_ci	if (xdp->data) {
40062306a36Sopenharmony_ci		xdp_return_buff(xdp);
40162306a36Sopenharmony_ci		xdp->data = NULL;
40262306a36Sopenharmony_ci	}
40362306a36Sopenharmony_ci
40462306a36Sopenharmony_ci	/* Free all the Rx ring sk_buffs */
40562306a36Sopenharmony_ci	for (i = 0; i < rx_ring->count; i++) {
40662306a36Sopenharmony_ci		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
40762306a36Sopenharmony_ci
40862306a36Sopenharmony_ci		if (!rx_buf->page)
40962306a36Sopenharmony_ci			continue;
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ci		/* Invalidate cache lines that may have been written to by
41262306a36Sopenharmony_ci		 * device so that we avoid corrupting memory.
41362306a36Sopenharmony_ci		 */
41462306a36Sopenharmony_ci		dma_sync_single_range_for_cpu(dev, rx_buf->dma,
41562306a36Sopenharmony_ci					      rx_buf->page_offset,
41662306a36Sopenharmony_ci					      rx_ring->rx_buf_len,
41762306a36Sopenharmony_ci					      DMA_FROM_DEVICE);
41862306a36Sopenharmony_ci
41962306a36Sopenharmony_ci		/* free resources associated with mapping */
42062306a36Sopenharmony_ci		dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
42162306a36Sopenharmony_ci				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
42262306a36Sopenharmony_ci		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
42362306a36Sopenharmony_ci
42462306a36Sopenharmony_ci		rx_buf->page = NULL;
42562306a36Sopenharmony_ci		rx_buf->page_offset = 0;
42662306a36Sopenharmony_ci	}
42762306a36Sopenharmony_ci
42862306a36Sopenharmony_cirx_skip_free:
42962306a36Sopenharmony_ci	if (rx_ring->xsk_pool)
43062306a36Sopenharmony_ci		memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
43162306a36Sopenharmony_ci	else
43262306a36Sopenharmony_ci		memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
43362306a36Sopenharmony_ci
43462306a36Sopenharmony_ci	/* Zero out the descriptor ring */
43562306a36Sopenharmony_ci	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
43662306a36Sopenharmony_ci		     PAGE_SIZE);
43762306a36Sopenharmony_ci	memset(rx_ring->desc, 0, size);
43862306a36Sopenharmony_ci
43962306a36Sopenharmony_ci	rx_ring->next_to_alloc = 0;
44062306a36Sopenharmony_ci	rx_ring->next_to_clean = 0;
44162306a36Sopenharmony_ci	rx_ring->first_desc = 0;
44262306a36Sopenharmony_ci	rx_ring->next_to_use = 0;
44362306a36Sopenharmony_ci}
44462306a36Sopenharmony_ci
44562306a36Sopenharmony_ci/**
44662306a36Sopenharmony_ci * ice_free_rx_ring - Free Rx resources
44762306a36Sopenharmony_ci * @rx_ring: ring to clean the resources from
44862306a36Sopenharmony_ci *
44962306a36Sopenharmony_ci * Free all receive software resources
45062306a36Sopenharmony_ci */
45162306a36Sopenharmony_civoid ice_free_rx_ring(struct ice_rx_ring *rx_ring)
45262306a36Sopenharmony_ci{
45362306a36Sopenharmony_ci	u32 size;
45462306a36Sopenharmony_ci
45562306a36Sopenharmony_ci	ice_clean_rx_ring(rx_ring);
45662306a36Sopenharmony_ci	if (rx_ring->vsi->type == ICE_VSI_PF)
45762306a36Sopenharmony_ci		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
45862306a36Sopenharmony_ci			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
45962306a36Sopenharmony_ci	rx_ring->xdp_prog = NULL;
46062306a36Sopenharmony_ci	if (rx_ring->xsk_pool) {
46162306a36Sopenharmony_ci		kfree(rx_ring->xdp_buf);
46262306a36Sopenharmony_ci		rx_ring->xdp_buf = NULL;
46362306a36Sopenharmony_ci	} else {
46462306a36Sopenharmony_ci		kfree(rx_ring->rx_buf);
46562306a36Sopenharmony_ci		rx_ring->rx_buf = NULL;
46662306a36Sopenharmony_ci	}
46762306a36Sopenharmony_ci
46862306a36Sopenharmony_ci	if (rx_ring->desc) {
46962306a36Sopenharmony_ci		size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
47062306a36Sopenharmony_ci			     PAGE_SIZE);
47162306a36Sopenharmony_ci		dmam_free_coherent(rx_ring->dev, size,
47262306a36Sopenharmony_ci				   rx_ring->desc, rx_ring->dma);
47362306a36Sopenharmony_ci		rx_ring->desc = NULL;
47462306a36Sopenharmony_ci	}
47562306a36Sopenharmony_ci}
47662306a36Sopenharmony_ci
47762306a36Sopenharmony_ci/**
47862306a36Sopenharmony_ci * ice_setup_rx_ring - Allocate the Rx descriptors
47962306a36Sopenharmony_ci * @rx_ring: the Rx ring to set up
48062306a36Sopenharmony_ci *
48162306a36Sopenharmony_ci * Return 0 on success, negative on error
48262306a36Sopenharmony_ci */
48362306a36Sopenharmony_ciint ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
48462306a36Sopenharmony_ci{
48562306a36Sopenharmony_ci	struct device *dev = rx_ring->dev;
48662306a36Sopenharmony_ci	u32 size;
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_ci	if (!dev)
48962306a36Sopenharmony_ci		return -ENOMEM;
49062306a36Sopenharmony_ci
49162306a36Sopenharmony_ci	/* warn if we are about to overwrite the pointer */
49262306a36Sopenharmony_ci	WARN_ON(rx_ring->rx_buf);
49362306a36Sopenharmony_ci	rx_ring->rx_buf =
49462306a36Sopenharmony_ci		kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
49562306a36Sopenharmony_ci	if (!rx_ring->rx_buf)
49662306a36Sopenharmony_ci		return -ENOMEM;
49762306a36Sopenharmony_ci
49862306a36Sopenharmony_ci	/* round up to nearest page */
49962306a36Sopenharmony_ci	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
50062306a36Sopenharmony_ci		     PAGE_SIZE);
50162306a36Sopenharmony_ci	rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
50262306a36Sopenharmony_ci					    GFP_KERNEL);
50362306a36Sopenharmony_ci	if (!rx_ring->desc) {
50462306a36Sopenharmony_ci		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
50562306a36Sopenharmony_ci			size);
50662306a36Sopenharmony_ci		goto err;
50762306a36Sopenharmony_ci	}
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_ci	rx_ring->next_to_use = 0;
51062306a36Sopenharmony_ci	rx_ring->next_to_clean = 0;
51162306a36Sopenharmony_ci	rx_ring->first_desc = 0;
51262306a36Sopenharmony_ci
51362306a36Sopenharmony_ci	if (ice_is_xdp_ena_vsi(rx_ring->vsi))
51462306a36Sopenharmony_ci		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
51562306a36Sopenharmony_ci
51662306a36Sopenharmony_ci	return 0;
51762306a36Sopenharmony_ci
51862306a36Sopenharmony_cierr:
51962306a36Sopenharmony_ci	kfree(rx_ring->rx_buf);
52062306a36Sopenharmony_ci	rx_ring->rx_buf = NULL;
52162306a36Sopenharmony_ci	return -ENOMEM;
52262306a36Sopenharmony_ci}
52362306a36Sopenharmony_ci
52462306a36Sopenharmony_ci/**
52562306a36Sopenharmony_ci * ice_rx_frame_truesize
52662306a36Sopenharmony_ci * @rx_ring: ptr to Rx ring
52762306a36Sopenharmony_ci * @size: size
52862306a36Sopenharmony_ci *
52962306a36Sopenharmony_ci * calculate the truesize with taking into the account PAGE_SIZE of
53062306a36Sopenharmony_ci * underlying arch
53162306a36Sopenharmony_ci */
53262306a36Sopenharmony_cistatic unsigned int
53362306a36Sopenharmony_ciice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
53462306a36Sopenharmony_ci{
53562306a36Sopenharmony_ci	unsigned int truesize;
53662306a36Sopenharmony_ci
53762306a36Sopenharmony_ci#if (PAGE_SIZE < 8192)
53862306a36Sopenharmony_ci	truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
53962306a36Sopenharmony_ci#else
54062306a36Sopenharmony_ci	truesize = rx_ring->rx_offset ?
54162306a36Sopenharmony_ci		SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
54262306a36Sopenharmony_ci		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
54362306a36Sopenharmony_ci		SKB_DATA_ALIGN(size);
54462306a36Sopenharmony_ci#endif
54562306a36Sopenharmony_ci	return truesize;
54662306a36Sopenharmony_ci}
54762306a36Sopenharmony_ci
54862306a36Sopenharmony_ci/**
54962306a36Sopenharmony_ci * ice_run_xdp - Executes an XDP program on initialized xdp_buff
55062306a36Sopenharmony_ci * @rx_ring: Rx ring
55162306a36Sopenharmony_ci * @xdp: xdp_buff used as input to the XDP program
55262306a36Sopenharmony_ci * @xdp_prog: XDP program to run
55362306a36Sopenharmony_ci * @xdp_ring: ring to be used for XDP_TX action
55462306a36Sopenharmony_ci * @rx_buf: Rx buffer to store the XDP action
55562306a36Sopenharmony_ci *
55662306a36Sopenharmony_ci * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
55762306a36Sopenharmony_ci */
55862306a36Sopenharmony_cistatic void
55962306a36Sopenharmony_ciice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
56062306a36Sopenharmony_ci	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
56162306a36Sopenharmony_ci	    struct ice_rx_buf *rx_buf)
56262306a36Sopenharmony_ci{
56362306a36Sopenharmony_ci	unsigned int ret = ICE_XDP_PASS;
56462306a36Sopenharmony_ci	u32 act;
56562306a36Sopenharmony_ci
56662306a36Sopenharmony_ci	if (!xdp_prog)
56762306a36Sopenharmony_ci		goto exit;
56862306a36Sopenharmony_ci
56962306a36Sopenharmony_ci	act = bpf_prog_run_xdp(xdp_prog, xdp);
57062306a36Sopenharmony_ci	switch (act) {
57162306a36Sopenharmony_ci	case XDP_PASS:
57262306a36Sopenharmony_ci		break;
57362306a36Sopenharmony_ci	case XDP_TX:
57462306a36Sopenharmony_ci		if (static_branch_unlikely(&ice_xdp_locking_key))
57562306a36Sopenharmony_ci			spin_lock(&xdp_ring->tx_lock);
57662306a36Sopenharmony_ci		ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
57762306a36Sopenharmony_ci		if (static_branch_unlikely(&ice_xdp_locking_key))
57862306a36Sopenharmony_ci			spin_unlock(&xdp_ring->tx_lock);
57962306a36Sopenharmony_ci		if (ret == ICE_XDP_CONSUMED)
58062306a36Sopenharmony_ci			goto out_failure;
58162306a36Sopenharmony_ci		break;
58262306a36Sopenharmony_ci	case XDP_REDIRECT:
58362306a36Sopenharmony_ci		if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
58462306a36Sopenharmony_ci			goto out_failure;
58562306a36Sopenharmony_ci		ret = ICE_XDP_REDIR;
58662306a36Sopenharmony_ci		break;
58762306a36Sopenharmony_ci	default:
58862306a36Sopenharmony_ci		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
58962306a36Sopenharmony_ci		fallthrough;
59062306a36Sopenharmony_ci	case XDP_ABORTED:
59162306a36Sopenharmony_ciout_failure:
59262306a36Sopenharmony_ci		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
59362306a36Sopenharmony_ci		fallthrough;
59462306a36Sopenharmony_ci	case XDP_DROP:
59562306a36Sopenharmony_ci		ret = ICE_XDP_CONSUMED;
59662306a36Sopenharmony_ci	}
59762306a36Sopenharmony_ciexit:
59862306a36Sopenharmony_ci	ice_set_rx_bufs_act(xdp, rx_ring, ret);
59962306a36Sopenharmony_ci}
60062306a36Sopenharmony_ci
60162306a36Sopenharmony_ci/**
60262306a36Sopenharmony_ci * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
60362306a36Sopenharmony_ci * @xdpf: XDP frame that will be converted to XDP buff
60462306a36Sopenharmony_ci * @xdp_ring: XDP ring for transmission
60562306a36Sopenharmony_ci */
60662306a36Sopenharmony_cistatic int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
60762306a36Sopenharmony_ci			     struct ice_tx_ring *xdp_ring)
60862306a36Sopenharmony_ci{
60962306a36Sopenharmony_ci	struct xdp_buff xdp;
61062306a36Sopenharmony_ci
61162306a36Sopenharmony_ci	xdp.data_hard_start = (void *)xdpf;
61262306a36Sopenharmony_ci	xdp.data = xdpf->data;
61362306a36Sopenharmony_ci	xdp.data_end = xdp.data + xdpf->len;
61462306a36Sopenharmony_ci	xdp.frame_sz = xdpf->frame_sz;
61562306a36Sopenharmony_ci	xdp.flags = xdpf->flags;
61662306a36Sopenharmony_ci
61762306a36Sopenharmony_ci	return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
61862306a36Sopenharmony_ci}
61962306a36Sopenharmony_ci
62062306a36Sopenharmony_ci/**
62162306a36Sopenharmony_ci * ice_xdp_xmit - submit packets to XDP ring for transmission
62262306a36Sopenharmony_ci * @dev: netdev
62362306a36Sopenharmony_ci * @n: number of XDP frames to be transmitted
62462306a36Sopenharmony_ci * @frames: XDP frames to be transmitted
62562306a36Sopenharmony_ci * @flags: transmit flags
62662306a36Sopenharmony_ci *
62762306a36Sopenharmony_ci * Returns number of frames successfully sent. Failed frames
62862306a36Sopenharmony_ci * will be free'ed by XDP core.
62962306a36Sopenharmony_ci * For error cases, a negative errno code is returned and no-frames
63062306a36Sopenharmony_ci * are transmitted (caller must handle freeing frames).
63162306a36Sopenharmony_ci */
63262306a36Sopenharmony_ciint
63362306a36Sopenharmony_ciice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
63462306a36Sopenharmony_ci	     u32 flags)
63562306a36Sopenharmony_ci{
63662306a36Sopenharmony_ci	struct ice_netdev_priv *np = netdev_priv(dev);
63762306a36Sopenharmony_ci	unsigned int queue_index = smp_processor_id();
63862306a36Sopenharmony_ci	struct ice_vsi *vsi = np->vsi;
63962306a36Sopenharmony_ci	struct ice_tx_ring *xdp_ring;
64062306a36Sopenharmony_ci	struct ice_tx_buf *tx_buf;
64162306a36Sopenharmony_ci	int nxmit = 0, i;
64262306a36Sopenharmony_ci
64362306a36Sopenharmony_ci	if (test_bit(ICE_VSI_DOWN, vsi->state))
64462306a36Sopenharmony_ci		return -ENETDOWN;
64562306a36Sopenharmony_ci
64662306a36Sopenharmony_ci	if (!ice_is_xdp_ena_vsi(vsi))
64762306a36Sopenharmony_ci		return -ENXIO;
64862306a36Sopenharmony_ci
64962306a36Sopenharmony_ci	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
65062306a36Sopenharmony_ci		return -EINVAL;
65162306a36Sopenharmony_ci
65262306a36Sopenharmony_ci	if (static_branch_unlikely(&ice_xdp_locking_key)) {
65362306a36Sopenharmony_ci		queue_index %= vsi->num_xdp_txq;
65462306a36Sopenharmony_ci		xdp_ring = vsi->xdp_rings[queue_index];
65562306a36Sopenharmony_ci		spin_lock(&xdp_ring->tx_lock);
65662306a36Sopenharmony_ci	} else {
65762306a36Sopenharmony_ci		/* Generally, should not happen */
65862306a36Sopenharmony_ci		if (unlikely(queue_index >= vsi->num_xdp_txq))
65962306a36Sopenharmony_ci			return -ENXIO;
66062306a36Sopenharmony_ci		xdp_ring = vsi->xdp_rings[queue_index];
66162306a36Sopenharmony_ci	}
66262306a36Sopenharmony_ci
66362306a36Sopenharmony_ci	tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
66462306a36Sopenharmony_ci	for (i = 0; i < n; i++) {
66562306a36Sopenharmony_ci		const struct xdp_frame *xdpf = frames[i];
66662306a36Sopenharmony_ci		int err;
66762306a36Sopenharmony_ci
66862306a36Sopenharmony_ci		err = ice_xmit_xdp_ring(xdpf, xdp_ring);
66962306a36Sopenharmony_ci		if (err != ICE_XDP_TX)
67062306a36Sopenharmony_ci			break;
67162306a36Sopenharmony_ci		nxmit++;
67262306a36Sopenharmony_ci	}
67362306a36Sopenharmony_ci
67462306a36Sopenharmony_ci	tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
67562306a36Sopenharmony_ci	if (unlikely(flags & XDP_XMIT_FLUSH))
67662306a36Sopenharmony_ci		ice_xdp_ring_update_tail(xdp_ring);
67762306a36Sopenharmony_ci
67862306a36Sopenharmony_ci	if (static_branch_unlikely(&ice_xdp_locking_key))
67962306a36Sopenharmony_ci		spin_unlock(&xdp_ring->tx_lock);
68062306a36Sopenharmony_ci
68162306a36Sopenharmony_ci	return nxmit;
68262306a36Sopenharmony_ci}
68362306a36Sopenharmony_ci
68462306a36Sopenharmony_ci/**
68562306a36Sopenharmony_ci * ice_alloc_mapped_page - recycle or make a new page
68662306a36Sopenharmony_ci * @rx_ring: ring to use
68762306a36Sopenharmony_ci * @bi: rx_buf struct to modify
68862306a36Sopenharmony_ci *
68962306a36Sopenharmony_ci * Returns true if the page was successfully allocated or
69062306a36Sopenharmony_ci * reused.
69162306a36Sopenharmony_ci */
69262306a36Sopenharmony_cistatic bool
69362306a36Sopenharmony_ciice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
69462306a36Sopenharmony_ci{
69562306a36Sopenharmony_ci	struct page *page = bi->page;
69662306a36Sopenharmony_ci	dma_addr_t dma;
69762306a36Sopenharmony_ci
69862306a36Sopenharmony_ci	/* since we are recycling buffers we should seldom need to alloc */
69962306a36Sopenharmony_ci	if (likely(page))
70062306a36Sopenharmony_ci		return true;
70162306a36Sopenharmony_ci
70262306a36Sopenharmony_ci	/* alloc new page for storage */
70362306a36Sopenharmony_ci	page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
70462306a36Sopenharmony_ci	if (unlikely(!page)) {
70562306a36Sopenharmony_ci		rx_ring->ring_stats->rx_stats.alloc_page_failed++;
70662306a36Sopenharmony_ci		return false;
70762306a36Sopenharmony_ci	}
70862306a36Sopenharmony_ci
70962306a36Sopenharmony_ci	/* map page for use */
71062306a36Sopenharmony_ci	dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
71162306a36Sopenharmony_ci				 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
71262306a36Sopenharmony_ci
71362306a36Sopenharmony_ci	/* if mapping failed free memory back to system since
71462306a36Sopenharmony_ci	 * there isn't much point in holding memory we can't use
71562306a36Sopenharmony_ci	 */
71662306a36Sopenharmony_ci	if (dma_mapping_error(rx_ring->dev, dma)) {
71762306a36Sopenharmony_ci		__free_pages(page, ice_rx_pg_order(rx_ring));
71862306a36Sopenharmony_ci		rx_ring->ring_stats->rx_stats.alloc_page_failed++;
71962306a36Sopenharmony_ci		return false;
72062306a36Sopenharmony_ci	}
72162306a36Sopenharmony_ci
72262306a36Sopenharmony_ci	bi->dma = dma;
72362306a36Sopenharmony_ci	bi->page = page;
72462306a36Sopenharmony_ci	bi->page_offset = rx_ring->rx_offset;
72562306a36Sopenharmony_ci	page_ref_add(page, USHRT_MAX - 1);
72662306a36Sopenharmony_ci	bi->pagecnt_bias = USHRT_MAX;
72762306a36Sopenharmony_ci
72862306a36Sopenharmony_ci	return true;
72962306a36Sopenharmony_ci}
73062306a36Sopenharmony_ci
73162306a36Sopenharmony_ci/**
73262306a36Sopenharmony_ci * ice_alloc_rx_bufs - Replace used receive buffers
73362306a36Sopenharmony_ci * @rx_ring: ring to place buffers on
73462306a36Sopenharmony_ci * @cleaned_count: number of buffers to replace
73562306a36Sopenharmony_ci *
73662306a36Sopenharmony_ci * Returns false if all allocations were successful, true if any fail. Returning
73762306a36Sopenharmony_ci * true signals to the caller that we didn't replace cleaned_count buffers and
73862306a36Sopenharmony_ci * there is more work to do.
73962306a36Sopenharmony_ci *
74062306a36Sopenharmony_ci * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
74162306a36Sopenharmony_ci * buffers. Then bump tail at most one time. Grouping like this lets us avoid
74262306a36Sopenharmony_ci * multiple tail writes per call.
74362306a36Sopenharmony_ci */
74462306a36Sopenharmony_cibool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
74562306a36Sopenharmony_ci{
74662306a36Sopenharmony_ci	union ice_32b_rx_flex_desc *rx_desc;
74762306a36Sopenharmony_ci	u16 ntu = rx_ring->next_to_use;
74862306a36Sopenharmony_ci	struct ice_rx_buf *bi;
74962306a36Sopenharmony_ci
75062306a36Sopenharmony_ci	/* do nothing if no valid netdev defined */
75162306a36Sopenharmony_ci	if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
75262306a36Sopenharmony_ci	    !cleaned_count)
75362306a36Sopenharmony_ci		return false;
75462306a36Sopenharmony_ci
75562306a36Sopenharmony_ci	/* get the Rx descriptor and buffer based on next_to_use */
75662306a36Sopenharmony_ci	rx_desc = ICE_RX_DESC(rx_ring, ntu);
75762306a36Sopenharmony_ci	bi = &rx_ring->rx_buf[ntu];
75862306a36Sopenharmony_ci
75962306a36Sopenharmony_ci	do {
76062306a36Sopenharmony_ci		/* if we fail here, we have work remaining */
76162306a36Sopenharmony_ci		if (!ice_alloc_mapped_page(rx_ring, bi))
76262306a36Sopenharmony_ci			break;
76362306a36Sopenharmony_ci
76462306a36Sopenharmony_ci		/* sync the buffer for use by the device */
76562306a36Sopenharmony_ci		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
76662306a36Sopenharmony_ci						 bi->page_offset,
76762306a36Sopenharmony_ci						 rx_ring->rx_buf_len,
76862306a36Sopenharmony_ci						 DMA_FROM_DEVICE);
76962306a36Sopenharmony_ci
77062306a36Sopenharmony_ci		/* Refresh the desc even if buffer_addrs didn't change
77162306a36Sopenharmony_ci		 * because each write-back erases this info.
77262306a36Sopenharmony_ci		 */
77362306a36Sopenharmony_ci		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
77462306a36Sopenharmony_ci
77562306a36Sopenharmony_ci		rx_desc++;
77662306a36Sopenharmony_ci		bi++;
77762306a36Sopenharmony_ci		ntu++;
77862306a36Sopenharmony_ci		if (unlikely(ntu == rx_ring->count)) {
77962306a36Sopenharmony_ci			rx_desc = ICE_RX_DESC(rx_ring, 0);
78062306a36Sopenharmony_ci			bi = rx_ring->rx_buf;
78162306a36Sopenharmony_ci			ntu = 0;
78262306a36Sopenharmony_ci		}
78362306a36Sopenharmony_ci
78462306a36Sopenharmony_ci		/* clear the status bits for the next_to_use descriptor */
78562306a36Sopenharmony_ci		rx_desc->wb.status_error0 = 0;
78662306a36Sopenharmony_ci
78762306a36Sopenharmony_ci		cleaned_count--;
78862306a36Sopenharmony_ci	} while (cleaned_count);
78962306a36Sopenharmony_ci
79062306a36Sopenharmony_ci	if (rx_ring->next_to_use != ntu)
79162306a36Sopenharmony_ci		ice_release_rx_desc(rx_ring, ntu);
79262306a36Sopenharmony_ci
79362306a36Sopenharmony_ci	return !!cleaned_count;
79462306a36Sopenharmony_ci}
79562306a36Sopenharmony_ci
79662306a36Sopenharmony_ci/**
79762306a36Sopenharmony_ci * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
79862306a36Sopenharmony_ci * @rx_buf: Rx buffer to adjust
79962306a36Sopenharmony_ci * @size: Size of adjustment
80062306a36Sopenharmony_ci *
80162306a36Sopenharmony_ci * Update the offset within page so that Rx buf will be ready to be reused.
80262306a36Sopenharmony_ci * For systems with PAGE_SIZE < 8192 this function will flip the page offset
80362306a36Sopenharmony_ci * so the second half of page assigned to Rx buffer will be used, otherwise
80462306a36Sopenharmony_ci * the offset is moved by "size" bytes
80562306a36Sopenharmony_ci */
80662306a36Sopenharmony_cistatic void
80762306a36Sopenharmony_ciice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
80862306a36Sopenharmony_ci{
80962306a36Sopenharmony_ci#if (PAGE_SIZE < 8192)
81062306a36Sopenharmony_ci	/* flip page offset to other buffer */
81162306a36Sopenharmony_ci	rx_buf->page_offset ^= size;
81262306a36Sopenharmony_ci#else
81362306a36Sopenharmony_ci	/* move offset up to the next cache line */
81462306a36Sopenharmony_ci	rx_buf->page_offset += size;
81562306a36Sopenharmony_ci#endif
81662306a36Sopenharmony_ci}
81762306a36Sopenharmony_ci
81862306a36Sopenharmony_ci/**
81962306a36Sopenharmony_ci * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
82062306a36Sopenharmony_ci * @rx_buf: buffer containing the page
82162306a36Sopenharmony_ci *
82262306a36Sopenharmony_ci * If page is reusable, we have a green light for calling ice_reuse_rx_page,
82362306a36Sopenharmony_ci * which will assign the current buffer to the buffer that next_to_alloc is
82462306a36Sopenharmony_ci * pointing to; otherwise, the DMA mapping needs to be destroyed and
82562306a36Sopenharmony_ci * page freed
82662306a36Sopenharmony_ci */
82762306a36Sopenharmony_cistatic bool
82862306a36Sopenharmony_ciice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
82962306a36Sopenharmony_ci{
83062306a36Sopenharmony_ci	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
83162306a36Sopenharmony_ci	struct page *page = rx_buf->page;
83262306a36Sopenharmony_ci
83362306a36Sopenharmony_ci	/* avoid re-using remote and pfmemalloc pages */
83462306a36Sopenharmony_ci	if (!dev_page_is_reusable(page))
83562306a36Sopenharmony_ci		return false;
83662306a36Sopenharmony_ci
83762306a36Sopenharmony_ci#if (PAGE_SIZE < 8192)
83862306a36Sopenharmony_ci	/* if we are only owner of page we can reuse it */
83962306a36Sopenharmony_ci	if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
84062306a36Sopenharmony_ci		return false;
84162306a36Sopenharmony_ci#else
84262306a36Sopenharmony_ci#define ICE_LAST_OFFSET \
84362306a36Sopenharmony_ci	(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
84462306a36Sopenharmony_ci	if (rx_buf->page_offset > ICE_LAST_OFFSET)
84562306a36Sopenharmony_ci		return false;
84662306a36Sopenharmony_ci#endif /* PAGE_SIZE < 8192) */
84762306a36Sopenharmony_ci
84862306a36Sopenharmony_ci	/* If we have drained the page fragment pool we need to update
84962306a36Sopenharmony_ci	 * the pagecnt_bias and page count so that we fully restock the
85062306a36Sopenharmony_ci	 * number of references the driver holds.
85162306a36Sopenharmony_ci	 */
85262306a36Sopenharmony_ci	if (unlikely(pagecnt_bias == 1)) {
85362306a36Sopenharmony_ci		page_ref_add(page, USHRT_MAX - 1);
85462306a36Sopenharmony_ci		rx_buf->pagecnt_bias = USHRT_MAX;
85562306a36Sopenharmony_ci	}
85662306a36Sopenharmony_ci
85762306a36Sopenharmony_ci	return true;
85862306a36Sopenharmony_ci}
85962306a36Sopenharmony_ci
86062306a36Sopenharmony_ci/**
86162306a36Sopenharmony_ci * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
86262306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on
86362306a36Sopenharmony_ci * @xdp: xdp buff to place the data into
86462306a36Sopenharmony_ci * @rx_buf: buffer containing page to add
86562306a36Sopenharmony_ci * @size: packet length from rx_desc
86662306a36Sopenharmony_ci *
86762306a36Sopenharmony_ci * This function will add the data contained in rx_buf->page to the xdp buf.
86862306a36Sopenharmony_ci * It will just attach the page as a frag.
86962306a36Sopenharmony_ci */
87062306a36Sopenharmony_cistatic int
87162306a36Sopenharmony_ciice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
87262306a36Sopenharmony_ci		 struct ice_rx_buf *rx_buf, const unsigned int size)
87362306a36Sopenharmony_ci{
87462306a36Sopenharmony_ci	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
87562306a36Sopenharmony_ci
87662306a36Sopenharmony_ci	if (!size)
87762306a36Sopenharmony_ci		return 0;
87862306a36Sopenharmony_ci
87962306a36Sopenharmony_ci	if (!xdp_buff_has_frags(xdp)) {
88062306a36Sopenharmony_ci		sinfo->nr_frags = 0;
88162306a36Sopenharmony_ci		sinfo->xdp_frags_size = 0;
88262306a36Sopenharmony_ci		xdp_buff_set_frags_flag(xdp);
88362306a36Sopenharmony_ci	}
88462306a36Sopenharmony_ci
88562306a36Sopenharmony_ci	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
88662306a36Sopenharmony_ci		ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
88762306a36Sopenharmony_ci		return -ENOMEM;
88862306a36Sopenharmony_ci	}
88962306a36Sopenharmony_ci
89062306a36Sopenharmony_ci	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
89162306a36Sopenharmony_ci				   rx_buf->page_offset, size);
89262306a36Sopenharmony_ci	sinfo->xdp_frags_size += size;
89362306a36Sopenharmony_ci	/* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
89462306a36Sopenharmony_ci	 * can pop off frags but driver has to handle it on its own
89562306a36Sopenharmony_ci	 */
89662306a36Sopenharmony_ci	rx_ring->nr_frags = sinfo->nr_frags;
89762306a36Sopenharmony_ci
89862306a36Sopenharmony_ci	if (page_is_pfmemalloc(rx_buf->page))
89962306a36Sopenharmony_ci		xdp_buff_set_frag_pfmemalloc(xdp);
90062306a36Sopenharmony_ci
90162306a36Sopenharmony_ci	return 0;
90262306a36Sopenharmony_ci}
90362306a36Sopenharmony_ci
90462306a36Sopenharmony_ci/**
90562306a36Sopenharmony_ci * ice_reuse_rx_page - page flip buffer and store it back on the ring
90662306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to store buffers on
90762306a36Sopenharmony_ci * @old_buf: donor buffer to have page reused
90862306a36Sopenharmony_ci *
90962306a36Sopenharmony_ci * Synchronizes page for reuse by the adapter
91062306a36Sopenharmony_ci */
91162306a36Sopenharmony_cistatic void
91262306a36Sopenharmony_ciice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
91362306a36Sopenharmony_ci{
91462306a36Sopenharmony_ci	u16 nta = rx_ring->next_to_alloc;
91562306a36Sopenharmony_ci	struct ice_rx_buf *new_buf;
91662306a36Sopenharmony_ci
91762306a36Sopenharmony_ci	new_buf = &rx_ring->rx_buf[nta];
91862306a36Sopenharmony_ci
91962306a36Sopenharmony_ci	/* update, and store next to alloc */
92062306a36Sopenharmony_ci	nta++;
92162306a36Sopenharmony_ci	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
92262306a36Sopenharmony_ci
92362306a36Sopenharmony_ci	/* Transfer page from old buffer to new buffer.
92462306a36Sopenharmony_ci	 * Move each member individually to avoid possible store
92562306a36Sopenharmony_ci	 * forwarding stalls and unnecessary copy of skb.
92662306a36Sopenharmony_ci	 */
92762306a36Sopenharmony_ci	new_buf->dma = old_buf->dma;
92862306a36Sopenharmony_ci	new_buf->page = old_buf->page;
92962306a36Sopenharmony_ci	new_buf->page_offset = old_buf->page_offset;
93062306a36Sopenharmony_ci	new_buf->pagecnt_bias = old_buf->pagecnt_bias;
93162306a36Sopenharmony_ci}
93262306a36Sopenharmony_ci
93362306a36Sopenharmony_ci/**
93462306a36Sopenharmony_ci * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
93562306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on
93662306a36Sopenharmony_ci * @size: size of buffer to add to skb
93762306a36Sopenharmony_ci * @ntc: index of next to clean element
93862306a36Sopenharmony_ci *
93962306a36Sopenharmony_ci * This function will pull an Rx buffer from the ring and synchronize it
94062306a36Sopenharmony_ci * for use by the CPU.
94162306a36Sopenharmony_ci */
94262306a36Sopenharmony_cistatic struct ice_rx_buf *
94362306a36Sopenharmony_ciice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
94462306a36Sopenharmony_ci	       const unsigned int ntc)
94562306a36Sopenharmony_ci{
94662306a36Sopenharmony_ci	struct ice_rx_buf *rx_buf;
94762306a36Sopenharmony_ci
94862306a36Sopenharmony_ci	rx_buf = &rx_ring->rx_buf[ntc];
94962306a36Sopenharmony_ci	rx_buf->pgcnt =
95062306a36Sopenharmony_ci#if (PAGE_SIZE < 8192)
95162306a36Sopenharmony_ci		page_count(rx_buf->page);
95262306a36Sopenharmony_ci#else
95362306a36Sopenharmony_ci		0;
95462306a36Sopenharmony_ci#endif
95562306a36Sopenharmony_ci	prefetchw(rx_buf->page);
95662306a36Sopenharmony_ci
95762306a36Sopenharmony_ci	if (!size)
95862306a36Sopenharmony_ci		return rx_buf;
95962306a36Sopenharmony_ci	/* we are reusing so sync this buffer for CPU use */
96062306a36Sopenharmony_ci	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
96162306a36Sopenharmony_ci				      rx_buf->page_offset, size,
96262306a36Sopenharmony_ci				      DMA_FROM_DEVICE);
96362306a36Sopenharmony_ci
96462306a36Sopenharmony_ci	/* We have pulled a buffer for use, so decrement pagecnt_bias */
96562306a36Sopenharmony_ci	rx_buf->pagecnt_bias--;
96662306a36Sopenharmony_ci
96762306a36Sopenharmony_ci	return rx_buf;
96862306a36Sopenharmony_ci}
96962306a36Sopenharmony_ci
97062306a36Sopenharmony_ci/**
97162306a36Sopenharmony_ci * ice_build_skb - Build skb around an existing buffer
97262306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on
97362306a36Sopenharmony_ci * @xdp: xdp_buff pointing to the data
97462306a36Sopenharmony_ci *
97562306a36Sopenharmony_ci * This function builds an skb around an existing XDP buffer, taking care
97662306a36Sopenharmony_ci * to set up the skb correctly and avoid any memcpy overhead. Driver has
97762306a36Sopenharmony_ci * already combined frags (if any) to skb_shared_info.
97862306a36Sopenharmony_ci */
97962306a36Sopenharmony_cistatic struct sk_buff *
98062306a36Sopenharmony_ciice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
98162306a36Sopenharmony_ci{
98262306a36Sopenharmony_ci	u8 metasize = xdp->data - xdp->data_meta;
98362306a36Sopenharmony_ci	struct skb_shared_info *sinfo = NULL;
98462306a36Sopenharmony_ci	unsigned int nr_frags;
98562306a36Sopenharmony_ci	struct sk_buff *skb;
98662306a36Sopenharmony_ci
98762306a36Sopenharmony_ci	if (unlikely(xdp_buff_has_frags(xdp))) {
98862306a36Sopenharmony_ci		sinfo = xdp_get_shared_info_from_buff(xdp);
98962306a36Sopenharmony_ci		nr_frags = sinfo->nr_frags;
99062306a36Sopenharmony_ci	}
99162306a36Sopenharmony_ci
99262306a36Sopenharmony_ci	/* Prefetch first cache line of first page. If xdp->data_meta
99362306a36Sopenharmony_ci	 * is unused, this points exactly as xdp->data, otherwise we
99462306a36Sopenharmony_ci	 * likely have a consumer accessing first few bytes of meta
99562306a36Sopenharmony_ci	 * data, and then actual data.
99662306a36Sopenharmony_ci	 */
99762306a36Sopenharmony_ci	net_prefetch(xdp->data_meta);
99862306a36Sopenharmony_ci	/* build an skb around the page buffer */
99962306a36Sopenharmony_ci	skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
100062306a36Sopenharmony_ci	if (unlikely(!skb))
100162306a36Sopenharmony_ci		return NULL;
100262306a36Sopenharmony_ci
100362306a36Sopenharmony_ci	/* must to record Rx queue, otherwise OS features such as
100462306a36Sopenharmony_ci	 * symmetric queue won't work
100562306a36Sopenharmony_ci	 */
100662306a36Sopenharmony_ci	skb_record_rx_queue(skb, rx_ring->q_index);
100762306a36Sopenharmony_ci
100862306a36Sopenharmony_ci	/* update pointers within the skb to store the data */
100962306a36Sopenharmony_ci	skb_reserve(skb, xdp->data - xdp->data_hard_start);
101062306a36Sopenharmony_ci	__skb_put(skb, xdp->data_end - xdp->data);
101162306a36Sopenharmony_ci	if (metasize)
101262306a36Sopenharmony_ci		skb_metadata_set(skb, metasize);
101362306a36Sopenharmony_ci
101462306a36Sopenharmony_ci	if (unlikely(xdp_buff_has_frags(xdp)))
101562306a36Sopenharmony_ci		xdp_update_skb_shared_info(skb, nr_frags,
101662306a36Sopenharmony_ci					   sinfo->xdp_frags_size,
101762306a36Sopenharmony_ci					   nr_frags * xdp->frame_sz,
101862306a36Sopenharmony_ci					   xdp_buff_is_frag_pfmemalloc(xdp));
101962306a36Sopenharmony_ci
102062306a36Sopenharmony_ci	return skb;
102162306a36Sopenharmony_ci}
102262306a36Sopenharmony_ci
102362306a36Sopenharmony_ci/**
102462306a36Sopenharmony_ci * ice_construct_skb - Allocate skb and populate it
102562306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on
102662306a36Sopenharmony_ci * @xdp: xdp_buff pointing to the data
102762306a36Sopenharmony_ci *
102862306a36Sopenharmony_ci * This function allocates an skb. It then populates it with the page
102962306a36Sopenharmony_ci * data from the current receive descriptor, taking care to set up the
103062306a36Sopenharmony_ci * skb correctly.
103162306a36Sopenharmony_ci */
103262306a36Sopenharmony_cistatic struct sk_buff *
103362306a36Sopenharmony_ciice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
103462306a36Sopenharmony_ci{
103562306a36Sopenharmony_ci	unsigned int size = xdp->data_end - xdp->data;
103662306a36Sopenharmony_ci	struct skb_shared_info *sinfo = NULL;
103762306a36Sopenharmony_ci	struct ice_rx_buf *rx_buf;
103862306a36Sopenharmony_ci	unsigned int nr_frags = 0;
103962306a36Sopenharmony_ci	unsigned int headlen;
104062306a36Sopenharmony_ci	struct sk_buff *skb;
104162306a36Sopenharmony_ci
104262306a36Sopenharmony_ci	/* prefetch first cache line of first page */
104362306a36Sopenharmony_ci	net_prefetch(xdp->data);
104462306a36Sopenharmony_ci
104562306a36Sopenharmony_ci	if (unlikely(xdp_buff_has_frags(xdp))) {
104662306a36Sopenharmony_ci		sinfo = xdp_get_shared_info_from_buff(xdp);
104762306a36Sopenharmony_ci		nr_frags = sinfo->nr_frags;
104862306a36Sopenharmony_ci	}
104962306a36Sopenharmony_ci
105062306a36Sopenharmony_ci	/* allocate a skb to store the frags */
105162306a36Sopenharmony_ci	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
105262306a36Sopenharmony_ci			       GFP_ATOMIC | __GFP_NOWARN);
105362306a36Sopenharmony_ci	if (unlikely(!skb))
105462306a36Sopenharmony_ci		return NULL;
105562306a36Sopenharmony_ci
105662306a36Sopenharmony_ci	rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
105762306a36Sopenharmony_ci	skb_record_rx_queue(skb, rx_ring->q_index);
105862306a36Sopenharmony_ci	/* Determine available headroom for copy */
105962306a36Sopenharmony_ci	headlen = size;
106062306a36Sopenharmony_ci	if (headlen > ICE_RX_HDR_SIZE)
106162306a36Sopenharmony_ci		headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
106262306a36Sopenharmony_ci
106362306a36Sopenharmony_ci	/* align pull length to size of long to optimize memcpy performance */
106462306a36Sopenharmony_ci	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
106562306a36Sopenharmony_ci							 sizeof(long)));
106662306a36Sopenharmony_ci
106762306a36Sopenharmony_ci	/* if we exhaust the linear part then add what is left as a frag */
106862306a36Sopenharmony_ci	size -= headlen;
106962306a36Sopenharmony_ci	if (size) {
107062306a36Sopenharmony_ci		/* besides adding here a partial frag, we are going to add
107162306a36Sopenharmony_ci		 * frags from xdp_buff, make sure there is enough space for
107262306a36Sopenharmony_ci		 * them
107362306a36Sopenharmony_ci		 */
107462306a36Sopenharmony_ci		if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
107562306a36Sopenharmony_ci			dev_kfree_skb(skb);
107662306a36Sopenharmony_ci			return NULL;
107762306a36Sopenharmony_ci		}
107862306a36Sopenharmony_ci		skb_add_rx_frag(skb, 0, rx_buf->page,
107962306a36Sopenharmony_ci				rx_buf->page_offset + headlen, size,
108062306a36Sopenharmony_ci				xdp->frame_sz);
108162306a36Sopenharmony_ci	} else {
108262306a36Sopenharmony_ci		/* buffer is unused, change the act that should be taken later
108362306a36Sopenharmony_ci		 * on; data was copied onto skb's linear part so there's no
108462306a36Sopenharmony_ci		 * need for adjusting page offset and we can reuse this buffer
108562306a36Sopenharmony_ci		 * as-is
108662306a36Sopenharmony_ci		 */
108762306a36Sopenharmony_ci		rx_buf->act = ICE_SKB_CONSUMED;
108862306a36Sopenharmony_ci	}
108962306a36Sopenharmony_ci
109062306a36Sopenharmony_ci	if (unlikely(xdp_buff_has_frags(xdp))) {
109162306a36Sopenharmony_ci		struct skb_shared_info *skinfo = skb_shinfo(skb);
109262306a36Sopenharmony_ci
109362306a36Sopenharmony_ci		memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
109462306a36Sopenharmony_ci		       sizeof(skb_frag_t) * nr_frags);
109562306a36Sopenharmony_ci
109662306a36Sopenharmony_ci		xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
109762306a36Sopenharmony_ci					   sinfo->xdp_frags_size,
109862306a36Sopenharmony_ci					   nr_frags * xdp->frame_sz,
109962306a36Sopenharmony_ci					   xdp_buff_is_frag_pfmemalloc(xdp));
110062306a36Sopenharmony_ci	}
110162306a36Sopenharmony_ci
110262306a36Sopenharmony_ci	return skb;
110362306a36Sopenharmony_ci}
110462306a36Sopenharmony_ci
110562306a36Sopenharmony_ci/**
110662306a36Sopenharmony_ci * ice_put_rx_buf - Clean up used buffer and either recycle or free
110762306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on
110862306a36Sopenharmony_ci * @rx_buf: Rx buffer to pull data from
110962306a36Sopenharmony_ci *
111062306a36Sopenharmony_ci * This function will clean up the contents of the rx_buf. It will either
111162306a36Sopenharmony_ci * recycle the buffer or unmap it and free the associated resources.
111262306a36Sopenharmony_ci */
111362306a36Sopenharmony_cistatic void
111462306a36Sopenharmony_ciice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
111562306a36Sopenharmony_ci{
111662306a36Sopenharmony_ci	if (!rx_buf)
111762306a36Sopenharmony_ci		return;
111862306a36Sopenharmony_ci
111962306a36Sopenharmony_ci	if (ice_can_reuse_rx_page(rx_buf)) {
112062306a36Sopenharmony_ci		/* hand second half of page back to the ring */
112162306a36Sopenharmony_ci		ice_reuse_rx_page(rx_ring, rx_buf);
112262306a36Sopenharmony_ci	} else {
112362306a36Sopenharmony_ci		/* we are not reusing the buffer so unmap it */
112462306a36Sopenharmony_ci		dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
112562306a36Sopenharmony_ci				     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
112662306a36Sopenharmony_ci				     ICE_RX_DMA_ATTR);
112762306a36Sopenharmony_ci		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
112862306a36Sopenharmony_ci	}
112962306a36Sopenharmony_ci
113062306a36Sopenharmony_ci	/* clear contents of buffer_info */
113162306a36Sopenharmony_ci	rx_buf->page = NULL;
113262306a36Sopenharmony_ci}
113362306a36Sopenharmony_ci
113462306a36Sopenharmony_ci/**
113562306a36Sopenharmony_ci * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
113662306a36Sopenharmony_ci * @rx_ring: Rx descriptor ring to transact packets on
113762306a36Sopenharmony_ci * @budget: Total limit on number of packets to process
113862306a36Sopenharmony_ci *
113962306a36Sopenharmony_ci * This function provides a "bounce buffer" approach to Rx interrupt
114062306a36Sopenharmony_ci * processing. The advantage to this is that on systems that have
114162306a36Sopenharmony_ci * expensive overhead for IOMMU access this provides a means of avoiding
114262306a36Sopenharmony_ci * it by maintaining the mapping of the page to the system.
114362306a36Sopenharmony_ci *
114462306a36Sopenharmony_ci * Returns amount of work completed
114562306a36Sopenharmony_ci */
114662306a36Sopenharmony_ciint ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
114762306a36Sopenharmony_ci{
114862306a36Sopenharmony_ci	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
114962306a36Sopenharmony_ci	unsigned int offset = rx_ring->rx_offset;
115062306a36Sopenharmony_ci	struct xdp_buff *xdp = &rx_ring->xdp;
115162306a36Sopenharmony_ci	u32 cached_ntc = rx_ring->first_desc;
115262306a36Sopenharmony_ci	struct ice_tx_ring *xdp_ring = NULL;
115362306a36Sopenharmony_ci	struct bpf_prog *xdp_prog = NULL;
115462306a36Sopenharmony_ci	u32 ntc = rx_ring->next_to_clean;
115562306a36Sopenharmony_ci	u32 cnt = rx_ring->count;
115662306a36Sopenharmony_ci	u32 xdp_xmit = 0;
115762306a36Sopenharmony_ci	u32 cached_ntu;
115862306a36Sopenharmony_ci	bool failure;
115962306a36Sopenharmony_ci	u32 first;
116062306a36Sopenharmony_ci
116162306a36Sopenharmony_ci	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
116262306a36Sopenharmony_ci#if (PAGE_SIZE < 8192)
116362306a36Sopenharmony_ci	xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
116462306a36Sopenharmony_ci#endif
116562306a36Sopenharmony_ci
116662306a36Sopenharmony_ci	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
116762306a36Sopenharmony_ci	if (xdp_prog) {
116862306a36Sopenharmony_ci		xdp_ring = rx_ring->xdp_ring;
116962306a36Sopenharmony_ci		cached_ntu = xdp_ring->next_to_use;
117062306a36Sopenharmony_ci	}
117162306a36Sopenharmony_ci
117262306a36Sopenharmony_ci	/* start the loop to process Rx packets bounded by 'budget' */
117362306a36Sopenharmony_ci	while (likely(total_rx_pkts < (unsigned int)budget)) {
117462306a36Sopenharmony_ci		union ice_32b_rx_flex_desc *rx_desc;
117562306a36Sopenharmony_ci		struct ice_rx_buf *rx_buf;
117662306a36Sopenharmony_ci		struct sk_buff *skb;
117762306a36Sopenharmony_ci		unsigned int size;
117862306a36Sopenharmony_ci		u16 stat_err_bits;
117962306a36Sopenharmony_ci		u16 vlan_tag = 0;
118062306a36Sopenharmony_ci		u16 rx_ptype;
118162306a36Sopenharmony_ci
118262306a36Sopenharmony_ci		/* get the Rx desc from Rx ring based on 'next_to_clean' */
118362306a36Sopenharmony_ci		rx_desc = ICE_RX_DESC(rx_ring, ntc);
118462306a36Sopenharmony_ci
118562306a36Sopenharmony_ci		/* status_error_len will always be zero for unused descriptors
118662306a36Sopenharmony_ci		 * because it's cleared in cleanup, and overlaps with hdr_addr
118762306a36Sopenharmony_ci		 * which is always zero because packet split isn't used, if the
118862306a36Sopenharmony_ci		 * hardware wrote DD then it will be non-zero
118962306a36Sopenharmony_ci		 */
119062306a36Sopenharmony_ci		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
119162306a36Sopenharmony_ci		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
119262306a36Sopenharmony_ci			break;
119362306a36Sopenharmony_ci
119462306a36Sopenharmony_ci		/* This memory barrier is needed to keep us from reading
119562306a36Sopenharmony_ci		 * any other fields out of the rx_desc until we know the
119662306a36Sopenharmony_ci		 * DD bit is set.
119762306a36Sopenharmony_ci		 */
119862306a36Sopenharmony_ci		dma_rmb();
119962306a36Sopenharmony_ci
120062306a36Sopenharmony_ci		ice_trace(clean_rx_irq, rx_ring, rx_desc);
120162306a36Sopenharmony_ci		if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
120262306a36Sopenharmony_ci			struct ice_vsi *ctrl_vsi = rx_ring->vsi;
120362306a36Sopenharmony_ci
120462306a36Sopenharmony_ci			if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
120562306a36Sopenharmony_ci			    ctrl_vsi->vf)
120662306a36Sopenharmony_ci				ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
120762306a36Sopenharmony_ci			if (++ntc == cnt)
120862306a36Sopenharmony_ci				ntc = 0;
120962306a36Sopenharmony_ci			rx_ring->first_desc = ntc;
121062306a36Sopenharmony_ci			continue;
121162306a36Sopenharmony_ci		}
121262306a36Sopenharmony_ci
121362306a36Sopenharmony_ci		size = le16_to_cpu(rx_desc->wb.pkt_len) &
121462306a36Sopenharmony_ci			ICE_RX_FLX_DESC_PKT_LEN_M;
121562306a36Sopenharmony_ci
121662306a36Sopenharmony_ci		/* retrieve a buffer from the ring */
121762306a36Sopenharmony_ci		rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
121862306a36Sopenharmony_ci
121962306a36Sopenharmony_ci		if (!xdp->data) {
122062306a36Sopenharmony_ci			void *hard_start;
122162306a36Sopenharmony_ci
122262306a36Sopenharmony_ci			hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
122362306a36Sopenharmony_ci				     offset;
122462306a36Sopenharmony_ci			xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
122562306a36Sopenharmony_ci#if (PAGE_SIZE > 4096)
122662306a36Sopenharmony_ci			/* At larger PAGE_SIZE, frame_sz depend on len size */
122762306a36Sopenharmony_ci			xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
122862306a36Sopenharmony_ci#endif
122962306a36Sopenharmony_ci			xdp_buff_clear_frags_flag(xdp);
123062306a36Sopenharmony_ci		} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
123162306a36Sopenharmony_ci			break;
123262306a36Sopenharmony_ci		}
123362306a36Sopenharmony_ci		if (++ntc == cnt)
123462306a36Sopenharmony_ci			ntc = 0;
123562306a36Sopenharmony_ci
123662306a36Sopenharmony_ci		/* skip if it is NOP desc */
123762306a36Sopenharmony_ci		if (ice_is_non_eop(rx_ring, rx_desc))
123862306a36Sopenharmony_ci			continue;
123962306a36Sopenharmony_ci
124062306a36Sopenharmony_ci		ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
124162306a36Sopenharmony_ci		if (rx_buf->act == ICE_XDP_PASS)
124262306a36Sopenharmony_ci			goto construct_skb;
124362306a36Sopenharmony_ci		total_rx_bytes += xdp_get_buff_len(xdp);
124462306a36Sopenharmony_ci		total_rx_pkts++;
124562306a36Sopenharmony_ci
124662306a36Sopenharmony_ci		xdp->data = NULL;
124762306a36Sopenharmony_ci		rx_ring->first_desc = ntc;
124862306a36Sopenharmony_ci		rx_ring->nr_frags = 0;
124962306a36Sopenharmony_ci		continue;
125062306a36Sopenharmony_ciconstruct_skb:
125162306a36Sopenharmony_ci		if (likely(ice_ring_uses_build_skb(rx_ring)))
125262306a36Sopenharmony_ci			skb = ice_build_skb(rx_ring, xdp);
125362306a36Sopenharmony_ci		else
125462306a36Sopenharmony_ci			skb = ice_construct_skb(rx_ring, xdp);
125562306a36Sopenharmony_ci		/* exit if we failed to retrieve a buffer */
125662306a36Sopenharmony_ci		if (!skb) {
125762306a36Sopenharmony_ci			rx_ring->ring_stats->rx_stats.alloc_page_failed++;
125862306a36Sopenharmony_ci			rx_buf->act = ICE_XDP_CONSUMED;
125962306a36Sopenharmony_ci			if (unlikely(xdp_buff_has_frags(xdp)))
126062306a36Sopenharmony_ci				ice_set_rx_bufs_act(xdp, rx_ring,
126162306a36Sopenharmony_ci						    ICE_XDP_CONSUMED);
126262306a36Sopenharmony_ci			xdp->data = NULL;
126362306a36Sopenharmony_ci			rx_ring->first_desc = ntc;
126462306a36Sopenharmony_ci			rx_ring->nr_frags = 0;
126562306a36Sopenharmony_ci			break;
126662306a36Sopenharmony_ci		}
126762306a36Sopenharmony_ci		xdp->data = NULL;
126862306a36Sopenharmony_ci		rx_ring->first_desc = ntc;
126962306a36Sopenharmony_ci		rx_ring->nr_frags = 0;
127062306a36Sopenharmony_ci
127162306a36Sopenharmony_ci		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
127262306a36Sopenharmony_ci		if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
127362306a36Sopenharmony_ci					      stat_err_bits))) {
127462306a36Sopenharmony_ci			dev_kfree_skb_any(skb);
127562306a36Sopenharmony_ci			continue;
127662306a36Sopenharmony_ci		}
127762306a36Sopenharmony_ci
127862306a36Sopenharmony_ci		vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
127962306a36Sopenharmony_ci
128062306a36Sopenharmony_ci		/* pad the skb if needed, to make a valid ethernet frame */
128162306a36Sopenharmony_ci		if (eth_skb_pad(skb))
128262306a36Sopenharmony_ci			continue;
128362306a36Sopenharmony_ci
128462306a36Sopenharmony_ci		/* probably a little skewed due to removing CRC */
128562306a36Sopenharmony_ci		total_rx_bytes += skb->len;
128662306a36Sopenharmony_ci
128762306a36Sopenharmony_ci		/* populate checksum, VLAN, and protocol */
128862306a36Sopenharmony_ci		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
128962306a36Sopenharmony_ci			ICE_RX_FLEX_DESC_PTYPE_M;
129062306a36Sopenharmony_ci
129162306a36Sopenharmony_ci		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
129262306a36Sopenharmony_ci
129362306a36Sopenharmony_ci		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
129462306a36Sopenharmony_ci		/* send completed skb up the stack */
129562306a36Sopenharmony_ci		ice_receive_skb(rx_ring, skb, vlan_tag);
129662306a36Sopenharmony_ci
129762306a36Sopenharmony_ci		/* update budget accounting */
129862306a36Sopenharmony_ci		total_rx_pkts++;
129962306a36Sopenharmony_ci	}
130062306a36Sopenharmony_ci
130162306a36Sopenharmony_ci	first = rx_ring->first_desc;
130262306a36Sopenharmony_ci	while (cached_ntc != first) {
130362306a36Sopenharmony_ci		struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
130462306a36Sopenharmony_ci
130562306a36Sopenharmony_ci		if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
130662306a36Sopenharmony_ci			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
130762306a36Sopenharmony_ci			xdp_xmit |= buf->act;
130862306a36Sopenharmony_ci		} else if (buf->act & ICE_XDP_CONSUMED) {
130962306a36Sopenharmony_ci			buf->pagecnt_bias++;
131062306a36Sopenharmony_ci		} else if (buf->act == ICE_XDP_PASS) {
131162306a36Sopenharmony_ci			ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
131262306a36Sopenharmony_ci		}
131362306a36Sopenharmony_ci
131462306a36Sopenharmony_ci		ice_put_rx_buf(rx_ring, buf);
131562306a36Sopenharmony_ci		if (++cached_ntc >= cnt)
131662306a36Sopenharmony_ci			cached_ntc = 0;
131762306a36Sopenharmony_ci	}
131862306a36Sopenharmony_ci	rx_ring->next_to_clean = ntc;
131962306a36Sopenharmony_ci	/* return up to cleaned_count buffers to hardware */
132062306a36Sopenharmony_ci	failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
132162306a36Sopenharmony_ci
132262306a36Sopenharmony_ci	if (xdp_xmit)
132362306a36Sopenharmony_ci		ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
132462306a36Sopenharmony_ci
132562306a36Sopenharmony_ci	if (rx_ring->ring_stats)
132662306a36Sopenharmony_ci		ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
132762306a36Sopenharmony_ci					 total_rx_bytes);
132862306a36Sopenharmony_ci
132962306a36Sopenharmony_ci	/* guarantee a trip back through this routine if there was a failure */
133062306a36Sopenharmony_ci	return failure ? budget : (int)total_rx_pkts;
133162306a36Sopenharmony_ci}
133262306a36Sopenharmony_ci
133362306a36Sopenharmony_cistatic void __ice_update_sample(struct ice_q_vector *q_vector,
133462306a36Sopenharmony_ci				struct ice_ring_container *rc,
133562306a36Sopenharmony_ci				struct dim_sample *sample,
133662306a36Sopenharmony_ci				bool is_tx)
133762306a36Sopenharmony_ci{
133862306a36Sopenharmony_ci	u64 packets = 0, bytes = 0;
133962306a36Sopenharmony_ci
134062306a36Sopenharmony_ci	if (is_tx) {
134162306a36Sopenharmony_ci		struct ice_tx_ring *tx_ring;
134262306a36Sopenharmony_ci
134362306a36Sopenharmony_ci		ice_for_each_tx_ring(tx_ring, *rc) {
134462306a36Sopenharmony_ci			struct ice_ring_stats *ring_stats;
134562306a36Sopenharmony_ci
134662306a36Sopenharmony_ci			ring_stats = tx_ring->ring_stats;
134762306a36Sopenharmony_ci			if (!ring_stats)
134862306a36Sopenharmony_ci				continue;
134962306a36Sopenharmony_ci			packets += ring_stats->stats.pkts;
135062306a36Sopenharmony_ci			bytes += ring_stats->stats.bytes;
135162306a36Sopenharmony_ci		}
135262306a36Sopenharmony_ci	} else {
135362306a36Sopenharmony_ci		struct ice_rx_ring *rx_ring;
135462306a36Sopenharmony_ci
135562306a36Sopenharmony_ci		ice_for_each_rx_ring(rx_ring, *rc) {
135662306a36Sopenharmony_ci			struct ice_ring_stats *ring_stats;
135762306a36Sopenharmony_ci
135862306a36Sopenharmony_ci			ring_stats = rx_ring->ring_stats;
135962306a36Sopenharmony_ci			if (!ring_stats)
136062306a36Sopenharmony_ci				continue;
136162306a36Sopenharmony_ci			packets += ring_stats->stats.pkts;
136262306a36Sopenharmony_ci			bytes += ring_stats->stats.bytes;
136362306a36Sopenharmony_ci		}
136462306a36Sopenharmony_ci	}
136562306a36Sopenharmony_ci
136662306a36Sopenharmony_ci	dim_update_sample(q_vector->total_events, packets, bytes, sample);
136762306a36Sopenharmony_ci	sample->comp_ctr = 0;
136862306a36Sopenharmony_ci
136962306a36Sopenharmony_ci	/* if dim settings get stale, like when not updated for 1
137062306a36Sopenharmony_ci	 * second or longer, force it to start again. This addresses the
137162306a36Sopenharmony_ci	 * frequent case of an idle queue being switched to by the
137262306a36Sopenharmony_ci	 * scheduler. The 1,000 here means 1,000 milliseconds.
137362306a36Sopenharmony_ci	 */
137462306a36Sopenharmony_ci	if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
137562306a36Sopenharmony_ci		rc->dim.state = DIM_START_MEASURE;
137662306a36Sopenharmony_ci}
137762306a36Sopenharmony_ci
137862306a36Sopenharmony_ci/**
137962306a36Sopenharmony_ci * ice_net_dim - Update net DIM algorithm
138062306a36Sopenharmony_ci * @q_vector: the vector associated with the interrupt
138162306a36Sopenharmony_ci *
138262306a36Sopenharmony_ci * Create a DIM sample and notify net_dim() so that it can possibly decide
138362306a36Sopenharmony_ci * a new ITR value based on incoming packets, bytes, and interrupts.
138462306a36Sopenharmony_ci *
138562306a36Sopenharmony_ci * This function is a no-op if the ring is not configured to dynamic ITR.
138662306a36Sopenharmony_ci */
138762306a36Sopenharmony_cistatic void ice_net_dim(struct ice_q_vector *q_vector)
138862306a36Sopenharmony_ci{
138962306a36Sopenharmony_ci	struct ice_ring_container *tx = &q_vector->tx;
139062306a36Sopenharmony_ci	struct ice_ring_container *rx = &q_vector->rx;
139162306a36Sopenharmony_ci
139262306a36Sopenharmony_ci	if (ITR_IS_DYNAMIC(tx)) {
139362306a36Sopenharmony_ci		struct dim_sample dim_sample;
139462306a36Sopenharmony_ci
139562306a36Sopenharmony_ci		__ice_update_sample(q_vector, tx, &dim_sample, true);
139662306a36Sopenharmony_ci		net_dim(&tx->dim, dim_sample);
139762306a36Sopenharmony_ci	}
139862306a36Sopenharmony_ci
139962306a36Sopenharmony_ci	if (ITR_IS_DYNAMIC(rx)) {
140062306a36Sopenharmony_ci		struct dim_sample dim_sample;
140162306a36Sopenharmony_ci
140262306a36Sopenharmony_ci		__ice_update_sample(q_vector, rx, &dim_sample, false);
140362306a36Sopenharmony_ci		net_dim(&rx->dim, dim_sample);
140462306a36Sopenharmony_ci	}
140562306a36Sopenharmony_ci}
140662306a36Sopenharmony_ci
140762306a36Sopenharmony_ci/**
140862306a36Sopenharmony_ci * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
140962306a36Sopenharmony_ci * @itr_idx: interrupt throttling index
141062306a36Sopenharmony_ci * @itr: interrupt throttling value in usecs
141162306a36Sopenharmony_ci */
141262306a36Sopenharmony_cistatic u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
141362306a36Sopenharmony_ci{
141462306a36Sopenharmony_ci	/* The ITR value is reported in microseconds, and the register value is
141562306a36Sopenharmony_ci	 * recorded in 2 microsecond units. For this reason we only need to
141662306a36Sopenharmony_ci	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
141762306a36Sopenharmony_ci	 * granularity as a shift instead of division. The mask makes sure the
141862306a36Sopenharmony_ci	 * ITR value is never odd so we don't accidentally write into the field
141962306a36Sopenharmony_ci	 * prior to the ITR field.
142062306a36Sopenharmony_ci	 */
142162306a36Sopenharmony_ci	itr &= ICE_ITR_MASK;
142262306a36Sopenharmony_ci
142362306a36Sopenharmony_ci	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
142462306a36Sopenharmony_ci		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
142562306a36Sopenharmony_ci		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
142662306a36Sopenharmony_ci}
142762306a36Sopenharmony_ci
142862306a36Sopenharmony_ci/**
142962306a36Sopenharmony_ci * ice_enable_interrupt - re-enable MSI-X interrupt
143062306a36Sopenharmony_ci * @q_vector: the vector associated with the interrupt to enable
143162306a36Sopenharmony_ci *
143262306a36Sopenharmony_ci * If the VSI is down, the interrupt will not be re-enabled. Also,
143362306a36Sopenharmony_ci * when enabling the interrupt always reset the wb_on_itr to false
143462306a36Sopenharmony_ci * and trigger a software interrupt to clean out internal state.
143562306a36Sopenharmony_ci */
143662306a36Sopenharmony_cistatic void ice_enable_interrupt(struct ice_q_vector *q_vector)
143762306a36Sopenharmony_ci{
143862306a36Sopenharmony_ci	struct ice_vsi *vsi = q_vector->vsi;
143962306a36Sopenharmony_ci	bool wb_en = q_vector->wb_on_itr;
144062306a36Sopenharmony_ci	u32 itr_val;
144162306a36Sopenharmony_ci
144262306a36Sopenharmony_ci	if (test_bit(ICE_DOWN, vsi->state))
144362306a36Sopenharmony_ci		return;
144462306a36Sopenharmony_ci
144562306a36Sopenharmony_ci	/* trigger an ITR delayed software interrupt when exiting busy poll, to
144662306a36Sopenharmony_ci	 * make sure to catch any pending cleanups that might have been missed
144762306a36Sopenharmony_ci	 * due to interrupt state transition. If busy poll or poll isn't
144862306a36Sopenharmony_ci	 * enabled, then don't update ITR, and just enable the interrupt.
144962306a36Sopenharmony_ci	 */
145062306a36Sopenharmony_ci	if (!wb_en) {
145162306a36Sopenharmony_ci		itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
145262306a36Sopenharmony_ci	} else {
145362306a36Sopenharmony_ci		q_vector->wb_on_itr = false;
145462306a36Sopenharmony_ci
145562306a36Sopenharmony_ci		/* do two things here with a single write. Set up the third ITR
145662306a36Sopenharmony_ci		 * index to be used for software interrupt moderation, and then
145762306a36Sopenharmony_ci		 * trigger a software interrupt with a rate limit of 20K on
145862306a36Sopenharmony_ci		 * software interrupts, this will help avoid high interrupt
145962306a36Sopenharmony_ci		 * loads due to frequently polling and exiting polling.
146062306a36Sopenharmony_ci		 */
146162306a36Sopenharmony_ci		itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
146262306a36Sopenharmony_ci		itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
146362306a36Sopenharmony_ci			   ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
146462306a36Sopenharmony_ci			   GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
146562306a36Sopenharmony_ci	}
146662306a36Sopenharmony_ci	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
146762306a36Sopenharmony_ci}
146862306a36Sopenharmony_ci
146962306a36Sopenharmony_ci/**
147062306a36Sopenharmony_ci * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
147162306a36Sopenharmony_ci * @q_vector: q_vector to set WB_ON_ITR on
147262306a36Sopenharmony_ci *
147362306a36Sopenharmony_ci * We need to tell hardware to write-back completed descriptors even when
147462306a36Sopenharmony_ci * interrupts are disabled. Descriptors will be written back on cache line
147562306a36Sopenharmony_ci * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
147662306a36Sopenharmony_ci * descriptors may not be written back if they don't fill a cache line until
147762306a36Sopenharmony_ci * the next interrupt.
147862306a36Sopenharmony_ci *
147962306a36Sopenharmony_ci * This sets the write-back frequency to whatever was set previously for the
148062306a36Sopenharmony_ci * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
148162306a36Sopenharmony_ci * aren't meddling with the INTENA_M bit.
148262306a36Sopenharmony_ci */
148362306a36Sopenharmony_cistatic void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
148462306a36Sopenharmony_ci{
148562306a36Sopenharmony_ci	struct ice_vsi *vsi = q_vector->vsi;
148662306a36Sopenharmony_ci
148762306a36Sopenharmony_ci	/* already in wb_on_itr mode no need to change it */
148862306a36Sopenharmony_ci	if (q_vector->wb_on_itr)
148962306a36Sopenharmony_ci		return;
149062306a36Sopenharmony_ci
149162306a36Sopenharmony_ci	/* use previously set ITR values for all of the ITR indices by
149262306a36Sopenharmony_ci	 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
149362306a36Sopenharmony_ci	 * be static in non-adaptive mode (user configured)
149462306a36Sopenharmony_ci	 */
149562306a36Sopenharmony_ci	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
149662306a36Sopenharmony_ci	     ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
149762306a36Sopenharmony_ci	      GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
149862306a36Sopenharmony_ci	     GLINT_DYN_CTL_WB_ON_ITR_M);
149962306a36Sopenharmony_ci
150062306a36Sopenharmony_ci	q_vector->wb_on_itr = true;
150162306a36Sopenharmony_ci}
150262306a36Sopenharmony_ci
150362306a36Sopenharmony_ci/**
150462306a36Sopenharmony_ci * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
150562306a36Sopenharmony_ci * @napi: napi struct with our devices info in it
150662306a36Sopenharmony_ci * @budget: amount of work driver is allowed to do this pass, in packets
150762306a36Sopenharmony_ci *
150862306a36Sopenharmony_ci * This function will clean all queues associated with a q_vector.
150962306a36Sopenharmony_ci *
151062306a36Sopenharmony_ci * Returns the amount of work done
151162306a36Sopenharmony_ci */
151262306a36Sopenharmony_ciint ice_napi_poll(struct napi_struct *napi, int budget)
151362306a36Sopenharmony_ci{
151462306a36Sopenharmony_ci	struct ice_q_vector *q_vector =
151562306a36Sopenharmony_ci				container_of(napi, struct ice_q_vector, napi);
151662306a36Sopenharmony_ci	struct ice_tx_ring *tx_ring;
151762306a36Sopenharmony_ci	struct ice_rx_ring *rx_ring;
151862306a36Sopenharmony_ci	bool clean_complete = true;
151962306a36Sopenharmony_ci	int budget_per_ring;
152062306a36Sopenharmony_ci	int work_done = 0;
152162306a36Sopenharmony_ci
152262306a36Sopenharmony_ci	/* Since the actual Tx work is minimal, we can give the Tx a larger
152362306a36Sopenharmony_ci	 * budget and be more aggressive about cleaning up the Tx descriptors.
152462306a36Sopenharmony_ci	 */
152562306a36Sopenharmony_ci	ice_for_each_tx_ring(tx_ring, q_vector->tx) {
152662306a36Sopenharmony_ci		bool wd;
152762306a36Sopenharmony_ci
152862306a36Sopenharmony_ci		if (tx_ring->xsk_pool)
152962306a36Sopenharmony_ci			wd = ice_xmit_zc(tx_ring);
153062306a36Sopenharmony_ci		else if (ice_ring_is_xdp(tx_ring))
153162306a36Sopenharmony_ci			wd = true;
153262306a36Sopenharmony_ci		else
153362306a36Sopenharmony_ci			wd = ice_clean_tx_irq(tx_ring, budget);
153462306a36Sopenharmony_ci
153562306a36Sopenharmony_ci		if (!wd)
153662306a36Sopenharmony_ci			clean_complete = false;
153762306a36Sopenharmony_ci	}
153862306a36Sopenharmony_ci
153962306a36Sopenharmony_ci	/* Handle case where we are called by netpoll with a budget of 0 */
154062306a36Sopenharmony_ci	if (unlikely(budget <= 0))
154162306a36Sopenharmony_ci		return budget;
154262306a36Sopenharmony_ci
154362306a36Sopenharmony_ci	/* normally we have 1 Rx ring per q_vector */
154462306a36Sopenharmony_ci	if (unlikely(q_vector->num_ring_rx > 1))
154562306a36Sopenharmony_ci		/* We attempt to distribute budget to each Rx queue fairly, but
154662306a36Sopenharmony_ci		 * don't allow the budget to go below 1 because that would exit
154762306a36Sopenharmony_ci		 * polling early.
154862306a36Sopenharmony_ci		 */
154962306a36Sopenharmony_ci		budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
155062306a36Sopenharmony_ci	else
155162306a36Sopenharmony_ci		/* Max of 1 Rx ring in this q_vector so give it the budget */
155262306a36Sopenharmony_ci		budget_per_ring = budget;
155362306a36Sopenharmony_ci
155462306a36Sopenharmony_ci	ice_for_each_rx_ring(rx_ring, q_vector->rx) {
155562306a36Sopenharmony_ci		int cleaned;
155662306a36Sopenharmony_ci
155762306a36Sopenharmony_ci		/* A dedicated path for zero-copy allows making a single
155862306a36Sopenharmony_ci		 * comparison in the irq context instead of many inside the
155962306a36Sopenharmony_ci		 * ice_clean_rx_irq function and makes the codebase cleaner.
156062306a36Sopenharmony_ci		 */
156162306a36Sopenharmony_ci		cleaned = rx_ring->xsk_pool ?
156262306a36Sopenharmony_ci			  ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
156362306a36Sopenharmony_ci			  ice_clean_rx_irq(rx_ring, budget_per_ring);
156462306a36Sopenharmony_ci		work_done += cleaned;
156562306a36Sopenharmony_ci		/* if we clean as many as budgeted, we must not be done */
156662306a36Sopenharmony_ci		if (cleaned >= budget_per_ring)
156762306a36Sopenharmony_ci			clean_complete = false;
156862306a36Sopenharmony_ci	}
156962306a36Sopenharmony_ci
157062306a36Sopenharmony_ci	/* If work not completed, return budget and polling will return */
157162306a36Sopenharmony_ci	if (!clean_complete) {
157262306a36Sopenharmony_ci		/* Set the writeback on ITR so partial completions of
157362306a36Sopenharmony_ci		 * cache-lines will still continue even if we're polling.
157462306a36Sopenharmony_ci		 */
157562306a36Sopenharmony_ci		ice_set_wb_on_itr(q_vector);
157662306a36Sopenharmony_ci		return budget;
157762306a36Sopenharmony_ci	}
157862306a36Sopenharmony_ci
157962306a36Sopenharmony_ci	/* Exit the polling mode, but don't re-enable interrupts if stack might
158062306a36Sopenharmony_ci	 * poll us due to busy-polling
158162306a36Sopenharmony_ci	 */
158262306a36Sopenharmony_ci	if (napi_complete_done(napi, work_done)) {
158362306a36Sopenharmony_ci		ice_net_dim(q_vector);
158462306a36Sopenharmony_ci		ice_enable_interrupt(q_vector);
158562306a36Sopenharmony_ci	} else {
158662306a36Sopenharmony_ci		ice_set_wb_on_itr(q_vector);
158762306a36Sopenharmony_ci	}
158862306a36Sopenharmony_ci
158962306a36Sopenharmony_ci	return min_t(int, work_done, budget - 1);
159062306a36Sopenharmony_ci}
159162306a36Sopenharmony_ci
159262306a36Sopenharmony_ci/**
159362306a36Sopenharmony_ci * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
159462306a36Sopenharmony_ci * @tx_ring: the ring to be checked
159562306a36Sopenharmony_ci * @size: the size buffer we want to assure is available
159662306a36Sopenharmony_ci *
159762306a36Sopenharmony_ci * Returns -EBUSY if a stop is needed, else 0
159862306a36Sopenharmony_ci */
159962306a36Sopenharmony_cistatic int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
160062306a36Sopenharmony_ci{
160162306a36Sopenharmony_ci	netif_tx_stop_queue(txring_txq(tx_ring));
160262306a36Sopenharmony_ci	/* Memory barrier before checking head and tail */
160362306a36Sopenharmony_ci	smp_mb();
160462306a36Sopenharmony_ci
160562306a36Sopenharmony_ci	/* Check again in a case another CPU has just made room available. */
160662306a36Sopenharmony_ci	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
160762306a36Sopenharmony_ci		return -EBUSY;
160862306a36Sopenharmony_ci
160962306a36Sopenharmony_ci	/* A reprieve! - use start_queue because it doesn't call schedule */
161062306a36Sopenharmony_ci	netif_tx_start_queue(txring_txq(tx_ring));
161162306a36Sopenharmony_ci	++tx_ring->ring_stats->tx_stats.restart_q;
161262306a36Sopenharmony_ci	return 0;
161362306a36Sopenharmony_ci}
161462306a36Sopenharmony_ci
161562306a36Sopenharmony_ci/**
161662306a36Sopenharmony_ci * ice_maybe_stop_tx - 1st level check for Tx stop conditions
161762306a36Sopenharmony_ci * @tx_ring: the ring to be checked
161862306a36Sopenharmony_ci * @size:    the size buffer we want to assure is available
161962306a36Sopenharmony_ci *
162062306a36Sopenharmony_ci * Returns 0 if stop is not needed
162162306a36Sopenharmony_ci */
162262306a36Sopenharmony_cistatic int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
162362306a36Sopenharmony_ci{
162462306a36Sopenharmony_ci	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
162562306a36Sopenharmony_ci		return 0;
162662306a36Sopenharmony_ci
162762306a36Sopenharmony_ci	return __ice_maybe_stop_tx(tx_ring, size);
162862306a36Sopenharmony_ci}
162962306a36Sopenharmony_ci
163062306a36Sopenharmony_ci/**
163162306a36Sopenharmony_ci * ice_tx_map - Build the Tx descriptor
163262306a36Sopenharmony_ci * @tx_ring: ring to send buffer on
163362306a36Sopenharmony_ci * @first: first buffer info buffer to use
163462306a36Sopenharmony_ci * @off: pointer to struct that holds offload parameters
163562306a36Sopenharmony_ci *
163662306a36Sopenharmony_ci * This function loops over the skb data pointed to by *first
163762306a36Sopenharmony_ci * and gets a physical address for each memory location and programs
163862306a36Sopenharmony_ci * it and the length into the transmit descriptor.
163962306a36Sopenharmony_ci */
164062306a36Sopenharmony_cistatic void
164162306a36Sopenharmony_ciice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
164262306a36Sopenharmony_ci	   struct ice_tx_offload_params *off)
164362306a36Sopenharmony_ci{
164462306a36Sopenharmony_ci	u64 td_offset, td_tag, td_cmd;
164562306a36Sopenharmony_ci	u16 i = tx_ring->next_to_use;
164662306a36Sopenharmony_ci	unsigned int data_len, size;
164762306a36Sopenharmony_ci	struct ice_tx_desc *tx_desc;
164862306a36Sopenharmony_ci	struct ice_tx_buf *tx_buf;
164962306a36Sopenharmony_ci	struct sk_buff *skb;
165062306a36Sopenharmony_ci	skb_frag_t *frag;
165162306a36Sopenharmony_ci	dma_addr_t dma;
165262306a36Sopenharmony_ci	bool kick;
165362306a36Sopenharmony_ci
165462306a36Sopenharmony_ci	td_tag = off->td_l2tag1;
165562306a36Sopenharmony_ci	td_cmd = off->td_cmd;
165662306a36Sopenharmony_ci	td_offset = off->td_offset;
165762306a36Sopenharmony_ci	skb = first->skb;
165862306a36Sopenharmony_ci
165962306a36Sopenharmony_ci	data_len = skb->data_len;
166062306a36Sopenharmony_ci	size = skb_headlen(skb);
166162306a36Sopenharmony_ci
166262306a36Sopenharmony_ci	tx_desc = ICE_TX_DESC(tx_ring, i);
166362306a36Sopenharmony_ci
166462306a36Sopenharmony_ci	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
166562306a36Sopenharmony_ci		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
166662306a36Sopenharmony_ci		td_tag = first->vid;
166762306a36Sopenharmony_ci	}
166862306a36Sopenharmony_ci
166962306a36Sopenharmony_ci	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
167062306a36Sopenharmony_ci
167162306a36Sopenharmony_ci	tx_buf = first;
167262306a36Sopenharmony_ci
167362306a36Sopenharmony_ci	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
167462306a36Sopenharmony_ci		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
167562306a36Sopenharmony_ci
167662306a36Sopenharmony_ci		if (dma_mapping_error(tx_ring->dev, dma))
167762306a36Sopenharmony_ci			goto dma_error;
167862306a36Sopenharmony_ci
167962306a36Sopenharmony_ci		/* record length, and DMA address */
168062306a36Sopenharmony_ci		dma_unmap_len_set(tx_buf, len, size);
168162306a36Sopenharmony_ci		dma_unmap_addr_set(tx_buf, dma, dma);
168262306a36Sopenharmony_ci
168362306a36Sopenharmony_ci		/* align size to end of page */
168462306a36Sopenharmony_ci		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
168562306a36Sopenharmony_ci		tx_desc->buf_addr = cpu_to_le64(dma);
168662306a36Sopenharmony_ci
168762306a36Sopenharmony_ci		/* account for data chunks larger than the hardware
168862306a36Sopenharmony_ci		 * can handle
168962306a36Sopenharmony_ci		 */
169062306a36Sopenharmony_ci		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
169162306a36Sopenharmony_ci			tx_desc->cmd_type_offset_bsz =
169262306a36Sopenharmony_ci				ice_build_ctob(td_cmd, td_offset, max_data,
169362306a36Sopenharmony_ci					       td_tag);
169462306a36Sopenharmony_ci
169562306a36Sopenharmony_ci			tx_desc++;
169662306a36Sopenharmony_ci			i++;
169762306a36Sopenharmony_ci
169862306a36Sopenharmony_ci			if (i == tx_ring->count) {
169962306a36Sopenharmony_ci				tx_desc = ICE_TX_DESC(tx_ring, 0);
170062306a36Sopenharmony_ci				i = 0;
170162306a36Sopenharmony_ci			}
170262306a36Sopenharmony_ci
170362306a36Sopenharmony_ci			dma += max_data;
170462306a36Sopenharmony_ci			size -= max_data;
170562306a36Sopenharmony_ci
170662306a36Sopenharmony_ci			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
170762306a36Sopenharmony_ci			tx_desc->buf_addr = cpu_to_le64(dma);
170862306a36Sopenharmony_ci		}
170962306a36Sopenharmony_ci
171062306a36Sopenharmony_ci		if (likely(!data_len))
171162306a36Sopenharmony_ci			break;
171262306a36Sopenharmony_ci
171362306a36Sopenharmony_ci		tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
171462306a36Sopenharmony_ci							      size, td_tag);
171562306a36Sopenharmony_ci
171662306a36Sopenharmony_ci		tx_desc++;
171762306a36Sopenharmony_ci		i++;
171862306a36Sopenharmony_ci
171962306a36Sopenharmony_ci		if (i == tx_ring->count) {
172062306a36Sopenharmony_ci			tx_desc = ICE_TX_DESC(tx_ring, 0);
172162306a36Sopenharmony_ci			i = 0;
172262306a36Sopenharmony_ci		}
172362306a36Sopenharmony_ci
172462306a36Sopenharmony_ci		size = skb_frag_size(frag);
172562306a36Sopenharmony_ci		data_len -= size;
172662306a36Sopenharmony_ci
172762306a36Sopenharmony_ci		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
172862306a36Sopenharmony_ci				       DMA_TO_DEVICE);
172962306a36Sopenharmony_ci
173062306a36Sopenharmony_ci		tx_buf = &tx_ring->tx_buf[i];
173162306a36Sopenharmony_ci		tx_buf->type = ICE_TX_BUF_FRAG;
173262306a36Sopenharmony_ci	}
173362306a36Sopenharmony_ci
173462306a36Sopenharmony_ci	/* record SW timestamp if HW timestamp is not available */
173562306a36Sopenharmony_ci	skb_tx_timestamp(first->skb);
173662306a36Sopenharmony_ci
173762306a36Sopenharmony_ci	i++;
173862306a36Sopenharmony_ci	if (i == tx_ring->count)
173962306a36Sopenharmony_ci		i = 0;
174062306a36Sopenharmony_ci
174162306a36Sopenharmony_ci	/* write last descriptor with RS and EOP bits */
174262306a36Sopenharmony_ci	td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
174362306a36Sopenharmony_ci	tx_desc->cmd_type_offset_bsz =
174462306a36Sopenharmony_ci			ice_build_ctob(td_cmd, td_offset, size, td_tag);
174562306a36Sopenharmony_ci
174662306a36Sopenharmony_ci	/* Force memory writes to complete before letting h/w know there
174762306a36Sopenharmony_ci	 * are new descriptors to fetch.
174862306a36Sopenharmony_ci	 *
174962306a36Sopenharmony_ci	 * We also use this memory barrier to make certain all of the
175062306a36Sopenharmony_ci	 * status bits have been updated before next_to_watch is written.
175162306a36Sopenharmony_ci	 */
175262306a36Sopenharmony_ci	wmb();
175362306a36Sopenharmony_ci
175462306a36Sopenharmony_ci	/* set next_to_watch value indicating a packet is present */
175562306a36Sopenharmony_ci	first->next_to_watch = tx_desc;
175662306a36Sopenharmony_ci
175762306a36Sopenharmony_ci	tx_ring->next_to_use = i;
175862306a36Sopenharmony_ci
175962306a36Sopenharmony_ci	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
176062306a36Sopenharmony_ci
176162306a36Sopenharmony_ci	/* notify HW of packet */
176262306a36Sopenharmony_ci	kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
176362306a36Sopenharmony_ci				      netdev_xmit_more());
176462306a36Sopenharmony_ci	if (kick)
176562306a36Sopenharmony_ci		/* notify HW of packet */
176662306a36Sopenharmony_ci		writel(i, tx_ring->tail);
176762306a36Sopenharmony_ci
176862306a36Sopenharmony_ci	return;
176962306a36Sopenharmony_ci
177062306a36Sopenharmony_cidma_error:
177162306a36Sopenharmony_ci	/* clear DMA mappings for failed tx_buf map */
177262306a36Sopenharmony_ci	for (;;) {
177362306a36Sopenharmony_ci		tx_buf = &tx_ring->tx_buf[i];
177462306a36Sopenharmony_ci		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
177562306a36Sopenharmony_ci		if (tx_buf == first)
177662306a36Sopenharmony_ci			break;
177762306a36Sopenharmony_ci		if (i == 0)
177862306a36Sopenharmony_ci			i = tx_ring->count;
177962306a36Sopenharmony_ci		i--;
178062306a36Sopenharmony_ci	}
178162306a36Sopenharmony_ci
178262306a36Sopenharmony_ci	tx_ring->next_to_use = i;
178362306a36Sopenharmony_ci}
178462306a36Sopenharmony_ci
178562306a36Sopenharmony_ci/**
178662306a36Sopenharmony_ci * ice_tx_csum - Enable Tx checksum offloads
178762306a36Sopenharmony_ci * @first: pointer to the first descriptor
178862306a36Sopenharmony_ci * @off: pointer to struct that holds offload parameters
178962306a36Sopenharmony_ci *
179062306a36Sopenharmony_ci * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
179162306a36Sopenharmony_ci */
179262306a36Sopenharmony_cistatic
179362306a36Sopenharmony_ciint ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
179462306a36Sopenharmony_ci{
179562306a36Sopenharmony_ci	u32 l4_len = 0, l3_len = 0, l2_len = 0;
179662306a36Sopenharmony_ci	struct sk_buff *skb = first->skb;
179762306a36Sopenharmony_ci	union {
179862306a36Sopenharmony_ci		struct iphdr *v4;
179962306a36Sopenharmony_ci		struct ipv6hdr *v6;
180062306a36Sopenharmony_ci		unsigned char *hdr;
180162306a36Sopenharmony_ci	} ip;
180262306a36Sopenharmony_ci	union {
180362306a36Sopenharmony_ci		struct tcphdr *tcp;
180462306a36Sopenharmony_ci		unsigned char *hdr;
180562306a36Sopenharmony_ci	} l4;
180662306a36Sopenharmony_ci	__be16 frag_off, protocol;
180762306a36Sopenharmony_ci	unsigned char *exthdr;
180862306a36Sopenharmony_ci	u32 offset, cmd = 0;
180962306a36Sopenharmony_ci	u8 l4_proto = 0;
181062306a36Sopenharmony_ci
181162306a36Sopenharmony_ci	if (skb->ip_summed != CHECKSUM_PARTIAL)
181262306a36Sopenharmony_ci		return 0;
181362306a36Sopenharmony_ci
181462306a36Sopenharmony_ci	protocol = vlan_get_protocol(skb);
181562306a36Sopenharmony_ci
181662306a36Sopenharmony_ci	if (eth_p_mpls(protocol)) {
181762306a36Sopenharmony_ci		ip.hdr = skb_inner_network_header(skb);
181862306a36Sopenharmony_ci		l4.hdr = skb_checksum_start(skb);
181962306a36Sopenharmony_ci	} else {
182062306a36Sopenharmony_ci		ip.hdr = skb_network_header(skb);
182162306a36Sopenharmony_ci		l4.hdr = skb_transport_header(skb);
182262306a36Sopenharmony_ci	}
182362306a36Sopenharmony_ci
182462306a36Sopenharmony_ci	/* compute outer L2 header size */
182562306a36Sopenharmony_ci	l2_len = ip.hdr - skb->data;
182662306a36Sopenharmony_ci	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
182762306a36Sopenharmony_ci
182862306a36Sopenharmony_ci	/* set the tx_flags to indicate the IP protocol type. this is
182962306a36Sopenharmony_ci	 * required so that checksum header computation below is accurate.
183062306a36Sopenharmony_ci	 */
183162306a36Sopenharmony_ci	if (ip.v4->version == 4)
183262306a36Sopenharmony_ci		first->tx_flags |= ICE_TX_FLAGS_IPV4;
183362306a36Sopenharmony_ci	else if (ip.v6->version == 6)
183462306a36Sopenharmony_ci		first->tx_flags |= ICE_TX_FLAGS_IPV6;
183562306a36Sopenharmony_ci
183662306a36Sopenharmony_ci	if (skb->encapsulation) {
183762306a36Sopenharmony_ci		bool gso_ena = false;
183862306a36Sopenharmony_ci		u32 tunnel = 0;
183962306a36Sopenharmony_ci
184062306a36Sopenharmony_ci		/* define outer network header type */
184162306a36Sopenharmony_ci		if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
184262306a36Sopenharmony_ci			tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
184362306a36Sopenharmony_ci				  ICE_TX_CTX_EIPT_IPV4 :
184462306a36Sopenharmony_ci				  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
184562306a36Sopenharmony_ci			l4_proto = ip.v4->protocol;
184662306a36Sopenharmony_ci		} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
184762306a36Sopenharmony_ci			int ret;
184862306a36Sopenharmony_ci
184962306a36Sopenharmony_ci			tunnel |= ICE_TX_CTX_EIPT_IPV6;
185062306a36Sopenharmony_ci			exthdr = ip.hdr + sizeof(*ip.v6);
185162306a36Sopenharmony_ci			l4_proto = ip.v6->nexthdr;
185262306a36Sopenharmony_ci			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
185362306a36Sopenharmony_ci					       &l4_proto, &frag_off);
185462306a36Sopenharmony_ci			if (ret < 0)
185562306a36Sopenharmony_ci				return -1;
185662306a36Sopenharmony_ci		}
185762306a36Sopenharmony_ci
185862306a36Sopenharmony_ci		/* define outer transport */
185962306a36Sopenharmony_ci		switch (l4_proto) {
186062306a36Sopenharmony_ci		case IPPROTO_UDP:
186162306a36Sopenharmony_ci			tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
186262306a36Sopenharmony_ci			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
186362306a36Sopenharmony_ci			break;
186462306a36Sopenharmony_ci		case IPPROTO_GRE:
186562306a36Sopenharmony_ci			tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
186662306a36Sopenharmony_ci			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
186762306a36Sopenharmony_ci			break;
186862306a36Sopenharmony_ci		case IPPROTO_IPIP:
186962306a36Sopenharmony_ci		case IPPROTO_IPV6:
187062306a36Sopenharmony_ci			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
187162306a36Sopenharmony_ci			l4.hdr = skb_inner_network_header(skb);
187262306a36Sopenharmony_ci			break;
187362306a36Sopenharmony_ci		default:
187462306a36Sopenharmony_ci			if (first->tx_flags & ICE_TX_FLAGS_TSO)
187562306a36Sopenharmony_ci				return -1;
187662306a36Sopenharmony_ci
187762306a36Sopenharmony_ci			skb_checksum_help(skb);
187862306a36Sopenharmony_ci			return 0;
187962306a36Sopenharmony_ci		}
188062306a36Sopenharmony_ci
188162306a36Sopenharmony_ci		/* compute outer L3 header size */
188262306a36Sopenharmony_ci		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
188362306a36Sopenharmony_ci			  ICE_TXD_CTX_QW0_EIPLEN_S;
188462306a36Sopenharmony_ci
188562306a36Sopenharmony_ci		/* switch IP header pointer from outer to inner header */
188662306a36Sopenharmony_ci		ip.hdr = skb_inner_network_header(skb);
188762306a36Sopenharmony_ci
188862306a36Sopenharmony_ci		/* compute tunnel header size */
188962306a36Sopenharmony_ci		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
189062306a36Sopenharmony_ci			   ICE_TXD_CTX_QW0_NATLEN_S;
189162306a36Sopenharmony_ci
189262306a36Sopenharmony_ci		gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
189362306a36Sopenharmony_ci		/* indicate if we need to offload outer UDP header */
189462306a36Sopenharmony_ci		if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
189562306a36Sopenharmony_ci		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
189662306a36Sopenharmony_ci			tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
189762306a36Sopenharmony_ci
189862306a36Sopenharmony_ci		/* record tunnel offload values */
189962306a36Sopenharmony_ci		off->cd_tunnel_params |= tunnel;
190062306a36Sopenharmony_ci
190162306a36Sopenharmony_ci		/* set DTYP=1 to indicate that it's an Tx context descriptor
190262306a36Sopenharmony_ci		 * in IPsec tunnel mode with Tx offloads in Quad word 1
190362306a36Sopenharmony_ci		 */
190462306a36Sopenharmony_ci		off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
190562306a36Sopenharmony_ci
190662306a36Sopenharmony_ci		/* switch L4 header pointer from outer to inner */
190762306a36Sopenharmony_ci		l4.hdr = skb_inner_transport_header(skb);
190862306a36Sopenharmony_ci		l4_proto = 0;
190962306a36Sopenharmony_ci
191062306a36Sopenharmony_ci		/* reset type as we transition from outer to inner headers */
191162306a36Sopenharmony_ci		first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
191262306a36Sopenharmony_ci		if (ip.v4->version == 4)
191362306a36Sopenharmony_ci			first->tx_flags |= ICE_TX_FLAGS_IPV4;
191462306a36Sopenharmony_ci		if (ip.v6->version == 6)
191562306a36Sopenharmony_ci			first->tx_flags |= ICE_TX_FLAGS_IPV6;
191662306a36Sopenharmony_ci	}
191762306a36Sopenharmony_ci
191862306a36Sopenharmony_ci	/* Enable IP checksum offloads */
191962306a36Sopenharmony_ci	if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
192062306a36Sopenharmony_ci		l4_proto = ip.v4->protocol;
192162306a36Sopenharmony_ci		/* the stack computes the IP header already, the only time we
192262306a36Sopenharmony_ci		 * need the hardware to recompute it is in the case of TSO.
192362306a36Sopenharmony_ci		 */
192462306a36Sopenharmony_ci		if (first->tx_flags & ICE_TX_FLAGS_TSO)
192562306a36Sopenharmony_ci			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
192662306a36Sopenharmony_ci		else
192762306a36Sopenharmony_ci			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
192862306a36Sopenharmony_ci
192962306a36Sopenharmony_ci	} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
193062306a36Sopenharmony_ci		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
193162306a36Sopenharmony_ci		exthdr = ip.hdr + sizeof(*ip.v6);
193262306a36Sopenharmony_ci		l4_proto = ip.v6->nexthdr;
193362306a36Sopenharmony_ci		if (l4.hdr != exthdr)
193462306a36Sopenharmony_ci			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
193562306a36Sopenharmony_ci					 &frag_off);
193662306a36Sopenharmony_ci	} else {
193762306a36Sopenharmony_ci		return -1;
193862306a36Sopenharmony_ci	}
193962306a36Sopenharmony_ci
194062306a36Sopenharmony_ci	/* compute inner L3 header size */
194162306a36Sopenharmony_ci	l3_len = l4.hdr - ip.hdr;
194262306a36Sopenharmony_ci	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
194362306a36Sopenharmony_ci
194462306a36Sopenharmony_ci	/* Enable L4 checksum offloads */
194562306a36Sopenharmony_ci	switch (l4_proto) {
194662306a36Sopenharmony_ci	case IPPROTO_TCP:
194762306a36Sopenharmony_ci		/* enable checksum offloads */
194862306a36Sopenharmony_ci		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
194962306a36Sopenharmony_ci		l4_len = l4.tcp->doff;
195062306a36Sopenharmony_ci		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
195162306a36Sopenharmony_ci		break;
195262306a36Sopenharmony_ci	case IPPROTO_UDP:
195362306a36Sopenharmony_ci		/* enable UDP checksum offload */
195462306a36Sopenharmony_ci		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
195562306a36Sopenharmony_ci		l4_len = (sizeof(struct udphdr) >> 2);
195662306a36Sopenharmony_ci		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
195762306a36Sopenharmony_ci		break;
195862306a36Sopenharmony_ci	case IPPROTO_SCTP:
195962306a36Sopenharmony_ci		/* enable SCTP checksum offload */
196062306a36Sopenharmony_ci		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
196162306a36Sopenharmony_ci		l4_len = sizeof(struct sctphdr) >> 2;
196262306a36Sopenharmony_ci		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
196362306a36Sopenharmony_ci		break;
196462306a36Sopenharmony_ci
196562306a36Sopenharmony_ci	default:
196662306a36Sopenharmony_ci		if (first->tx_flags & ICE_TX_FLAGS_TSO)
196762306a36Sopenharmony_ci			return -1;
196862306a36Sopenharmony_ci		skb_checksum_help(skb);
196962306a36Sopenharmony_ci		return 0;
197062306a36Sopenharmony_ci	}
197162306a36Sopenharmony_ci
197262306a36Sopenharmony_ci	off->td_cmd |= cmd;
197362306a36Sopenharmony_ci	off->td_offset |= offset;
197462306a36Sopenharmony_ci	return 1;
197562306a36Sopenharmony_ci}
197662306a36Sopenharmony_ci
197762306a36Sopenharmony_ci/**
197862306a36Sopenharmony_ci * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
197962306a36Sopenharmony_ci * @tx_ring: ring to send buffer on
198062306a36Sopenharmony_ci * @first: pointer to struct ice_tx_buf
198162306a36Sopenharmony_ci *
198262306a36Sopenharmony_ci * Checks the skb and set up correspondingly several generic transmit flags
198362306a36Sopenharmony_ci * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
198462306a36Sopenharmony_ci */
198562306a36Sopenharmony_cistatic void
198662306a36Sopenharmony_ciice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
198762306a36Sopenharmony_ci{
198862306a36Sopenharmony_ci	struct sk_buff *skb = first->skb;
198962306a36Sopenharmony_ci
199062306a36Sopenharmony_ci	/* nothing left to do, software offloaded VLAN */
199162306a36Sopenharmony_ci	if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
199262306a36Sopenharmony_ci		return;
199362306a36Sopenharmony_ci
199462306a36Sopenharmony_ci	/* the VLAN ethertype/tpid is determined by VSI configuration and netdev
199562306a36Sopenharmony_ci	 * feature flags, which the driver only allows either 802.1Q or 802.1ad
199662306a36Sopenharmony_ci	 * VLAN offloads exclusively so we only care about the VLAN ID here
199762306a36Sopenharmony_ci	 */
199862306a36Sopenharmony_ci	if (skb_vlan_tag_present(skb)) {
199962306a36Sopenharmony_ci		first->vid = skb_vlan_tag_get(skb);
200062306a36Sopenharmony_ci		if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
200162306a36Sopenharmony_ci			first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
200262306a36Sopenharmony_ci		else
200362306a36Sopenharmony_ci			first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
200462306a36Sopenharmony_ci	}
200562306a36Sopenharmony_ci
200662306a36Sopenharmony_ci	ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
200762306a36Sopenharmony_ci}
200862306a36Sopenharmony_ci
200962306a36Sopenharmony_ci/**
201062306a36Sopenharmony_ci * ice_tso - computes mss and TSO length to prepare for TSO
201162306a36Sopenharmony_ci * @first: pointer to struct ice_tx_buf
201262306a36Sopenharmony_ci * @off: pointer to struct that holds offload parameters
201362306a36Sopenharmony_ci *
201462306a36Sopenharmony_ci * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
201562306a36Sopenharmony_ci */
201662306a36Sopenharmony_cistatic
201762306a36Sopenharmony_ciint ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
201862306a36Sopenharmony_ci{
201962306a36Sopenharmony_ci	struct sk_buff *skb = first->skb;
202062306a36Sopenharmony_ci	union {
202162306a36Sopenharmony_ci		struct iphdr *v4;
202262306a36Sopenharmony_ci		struct ipv6hdr *v6;
202362306a36Sopenharmony_ci		unsigned char *hdr;
202462306a36Sopenharmony_ci	} ip;
202562306a36Sopenharmony_ci	union {
202662306a36Sopenharmony_ci		struct tcphdr *tcp;
202762306a36Sopenharmony_ci		struct udphdr *udp;
202862306a36Sopenharmony_ci		unsigned char *hdr;
202962306a36Sopenharmony_ci	} l4;
203062306a36Sopenharmony_ci	u64 cd_mss, cd_tso_len;
203162306a36Sopenharmony_ci	__be16 protocol;
203262306a36Sopenharmony_ci	u32 paylen;
203362306a36Sopenharmony_ci	u8 l4_start;
203462306a36Sopenharmony_ci	int err;
203562306a36Sopenharmony_ci
203662306a36Sopenharmony_ci	if (skb->ip_summed != CHECKSUM_PARTIAL)
203762306a36Sopenharmony_ci		return 0;
203862306a36Sopenharmony_ci
203962306a36Sopenharmony_ci	if (!skb_is_gso(skb))
204062306a36Sopenharmony_ci		return 0;
204162306a36Sopenharmony_ci
204262306a36Sopenharmony_ci	err = skb_cow_head(skb, 0);
204362306a36Sopenharmony_ci	if (err < 0)
204462306a36Sopenharmony_ci		return err;
204562306a36Sopenharmony_ci
204662306a36Sopenharmony_ci	protocol = vlan_get_protocol(skb);
204762306a36Sopenharmony_ci
204862306a36Sopenharmony_ci	if (eth_p_mpls(protocol))
204962306a36Sopenharmony_ci		ip.hdr = skb_inner_network_header(skb);
205062306a36Sopenharmony_ci	else
205162306a36Sopenharmony_ci		ip.hdr = skb_network_header(skb);
205262306a36Sopenharmony_ci	l4.hdr = skb_checksum_start(skb);
205362306a36Sopenharmony_ci
205462306a36Sopenharmony_ci	/* initialize outer IP header fields */
205562306a36Sopenharmony_ci	if (ip.v4->version == 4) {
205662306a36Sopenharmony_ci		ip.v4->tot_len = 0;
205762306a36Sopenharmony_ci		ip.v4->check = 0;
205862306a36Sopenharmony_ci	} else {
205962306a36Sopenharmony_ci		ip.v6->payload_len = 0;
206062306a36Sopenharmony_ci	}
206162306a36Sopenharmony_ci
206262306a36Sopenharmony_ci	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
206362306a36Sopenharmony_ci					 SKB_GSO_GRE_CSUM |
206462306a36Sopenharmony_ci					 SKB_GSO_IPXIP4 |
206562306a36Sopenharmony_ci					 SKB_GSO_IPXIP6 |
206662306a36Sopenharmony_ci					 SKB_GSO_UDP_TUNNEL |
206762306a36Sopenharmony_ci					 SKB_GSO_UDP_TUNNEL_CSUM)) {
206862306a36Sopenharmony_ci		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
206962306a36Sopenharmony_ci		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
207062306a36Sopenharmony_ci			l4.udp->len = 0;
207162306a36Sopenharmony_ci
207262306a36Sopenharmony_ci			/* determine offset of outer transport header */
207362306a36Sopenharmony_ci			l4_start = (u8)(l4.hdr - skb->data);
207462306a36Sopenharmony_ci
207562306a36Sopenharmony_ci			/* remove payload length from outer checksum */
207662306a36Sopenharmony_ci			paylen = skb->len - l4_start;
207762306a36Sopenharmony_ci			csum_replace_by_diff(&l4.udp->check,
207862306a36Sopenharmony_ci					     (__force __wsum)htonl(paylen));
207962306a36Sopenharmony_ci		}
208062306a36Sopenharmony_ci
208162306a36Sopenharmony_ci		/* reset pointers to inner headers */
208262306a36Sopenharmony_ci		ip.hdr = skb_inner_network_header(skb);
208362306a36Sopenharmony_ci		l4.hdr = skb_inner_transport_header(skb);
208462306a36Sopenharmony_ci
208562306a36Sopenharmony_ci		/* initialize inner IP header fields */
208662306a36Sopenharmony_ci		if (ip.v4->version == 4) {
208762306a36Sopenharmony_ci			ip.v4->tot_len = 0;
208862306a36Sopenharmony_ci			ip.v4->check = 0;
208962306a36Sopenharmony_ci		} else {
209062306a36Sopenharmony_ci			ip.v6->payload_len = 0;
209162306a36Sopenharmony_ci		}
209262306a36Sopenharmony_ci	}
209362306a36Sopenharmony_ci
209462306a36Sopenharmony_ci	/* determine offset of transport header */
209562306a36Sopenharmony_ci	l4_start = (u8)(l4.hdr - skb->data);
209662306a36Sopenharmony_ci
209762306a36Sopenharmony_ci	/* remove payload length from checksum */
209862306a36Sopenharmony_ci	paylen = skb->len - l4_start;
209962306a36Sopenharmony_ci
210062306a36Sopenharmony_ci	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
210162306a36Sopenharmony_ci		csum_replace_by_diff(&l4.udp->check,
210262306a36Sopenharmony_ci				     (__force __wsum)htonl(paylen));
210362306a36Sopenharmony_ci		/* compute length of UDP segmentation header */
210462306a36Sopenharmony_ci		off->header_len = (u8)sizeof(l4.udp) + l4_start;
210562306a36Sopenharmony_ci	} else {
210662306a36Sopenharmony_ci		csum_replace_by_diff(&l4.tcp->check,
210762306a36Sopenharmony_ci				     (__force __wsum)htonl(paylen));
210862306a36Sopenharmony_ci		/* compute length of TCP segmentation header */
210962306a36Sopenharmony_ci		off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
211062306a36Sopenharmony_ci	}
211162306a36Sopenharmony_ci
211262306a36Sopenharmony_ci	/* update gso_segs and bytecount */
211362306a36Sopenharmony_ci	first->gso_segs = skb_shinfo(skb)->gso_segs;
211462306a36Sopenharmony_ci	first->bytecount += (first->gso_segs - 1) * off->header_len;
211562306a36Sopenharmony_ci
211662306a36Sopenharmony_ci	cd_tso_len = skb->len - off->header_len;
211762306a36Sopenharmony_ci	cd_mss = skb_shinfo(skb)->gso_size;
211862306a36Sopenharmony_ci
211962306a36Sopenharmony_ci	/* record cdesc_qw1 with TSO parameters */
212062306a36Sopenharmony_ci	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
212162306a36Sopenharmony_ci			     (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
212262306a36Sopenharmony_ci			     (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
212362306a36Sopenharmony_ci			     (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
212462306a36Sopenharmony_ci	first->tx_flags |= ICE_TX_FLAGS_TSO;
212562306a36Sopenharmony_ci	return 1;
212662306a36Sopenharmony_ci}
212762306a36Sopenharmony_ci
212862306a36Sopenharmony_ci/**
212962306a36Sopenharmony_ci * ice_txd_use_count  - estimate the number of descriptors needed for Tx
213062306a36Sopenharmony_ci * @size: transmit request size in bytes
213162306a36Sopenharmony_ci *
213262306a36Sopenharmony_ci * Due to hardware alignment restrictions (4K alignment), we need to
213362306a36Sopenharmony_ci * assume that we can have no more than 12K of data per descriptor, even
213462306a36Sopenharmony_ci * though each descriptor can take up to 16K - 1 bytes of aligned memory.
213562306a36Sopenharmony_ci * Thus, we need to divide by 12K. But division is slow! Instead,
213662306a36Sopenharmony_ci * we decompose the operation into shifts and one relatively cheap
213762306a36Sopenharmony_ci * multiply operation.
213862306a36Sopenharmony_ci *
213962306a36Sopenharmony_ci * To divide by 12K, we first divide by 4K, then divide by 3:
214062306a36Sopenharmony_ci *     To divide by 4K, shift right by 12 bits
214162306a36Sopenharmony_ci *     To divide by 3, multiply by 85, then divide by 256
214262306a36Sopenharmony_ci *     (Divide by 256 is done by shifting right by 8 bits)
214362306a36Sopenharmony_ci * Finally, we add one to round up. Because 256 isn't an exact multiple of
214462306a36Sopenharmony_ci * 3, we'll underestimate near each multiple of 12K. This is actually more
214562306a36Sopenharmony_ci * accurate as we have 4K - 1 of wiggle room that we can fit into the last
214662306a36Sopenharmony_ci * segment. For our purposes this is accurate out to 1M which is orders of
214762306a36Sopenharmony_ci * magnitude greater than our largest possible GSO size.
214862306a36Sopenharmony_ci *
214962306a36Sopenharmony_ci * This would then be implemented as:
215062306a36Sopenharmony_ci *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
215162306a36Sopenharmony_ci *
215262306a36Sopenharmony_ci * Since multiplication and division are commutative, we can reorder
215362306a36Sopenharmony_ci * operations into:
215462306a36Sopenharmony_ci *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
215562306a36Sopenharmony_ci */
215662306a36Sopenharmony_cistatic unsigned int ice_txd_use_count(unsigned int size)
215762306a36Sopenharmony_ci{
215862306a36Sopenharmony_ci	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
215962306a36Sopenharmony_ci}
216062306a36Sopenharmony_ci
216162306a36Sopenharmony_ci/**
216262306a36Sopenharmony_ci * ice_xmit_desc_count - calculate number of Tx descriptors needed
216362306a36Sopenharmony_ci * @skb: send buffer
216462306a36Sopenharmony_ci *
216562306a36Sopenharmony_ci * Returns number of data descriptors needed for this skb.
216662306a36Sopenharmony_ci */
216762306a36Sopenharmony_cistatic unsigned int ice_xmit_desc_count(struct sk_buff *skb)
216862306a36Sopenharmony_ci{
216962306a36Sopenharmony_ci	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
217062306a36Sopenharmony_ci	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
217162306a36Sopenharmony_ci	unsigned int count = 0, size = skb_headlen(skb);
217262306a36Sopenharmony_ci
217362306a36Sopenharmony_ci	for (;;) {
217462306a36Sopenharmony_ci		count += ice_txd_use_count(size);
217562306a36Sopenharmony_ci
217662306a36Sopenharmony_ci		if (!nr_frags--)
217762306a36Sopenharmony_ci			break;
217862306a36Sopenharmony_ci
217962306a36Sopenharmony_ci		size = skb_frag_size(frag++);
218062306a36Sopenharmony_ci	}
218162306a36Sopenharmony_ci
218262306a36Sopenharmony_ci	return count;
218362306a36Sopenharmony_ci}
218462306a36Sopenharmony_ci
218562306a36Sopenharmony_ci/**
218662306a36Sopenharmony_ci * __ice_chk_linearize - Check if there are more than 8 buffers per packet
218762306a36Sopenharmony_ci * @skb: send buffer
218862306a36Sopenharmony_ci *
218962306a36Sopenharmony_ci * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
219062306a36Sopenharmony_ci * and so we need to figure out the cases where we need to linearize the skb.
219162306a36Sopenharmony_ci *
219262306a36Sopenharmony_ci * For TSO we need to count the TSO header and segment payload separately.
219362306a36Sopenharmony_ci * As such we need to check cases where we have 7 fragments or more as we
219462306a36Sopenharmony_ci * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
219562306a36Sopenharmony_ci * the segment payload in the first descriptor, and another 7 for the
219662306a36Sopenharmony_ci * fragments.
219762306a36Sopenharmony_ci */
219862306a36Sopenharmony_cistatic bool __ice_chk_linearize(struct sk_buff *skb)
219962306a36Sopenharmony_ci{
220062306a36Sopenharmony_ci	const skb_frag_t *frag, *stale;
220162306a36Sopenharmony_ci	int nr_frags, sum;
220262306a36Sopenharmony_ci
220362306a36Sopenharmony_ci	/* no need to check if number of frags is less than 7 */
220462306a36Sopenharmony_ci	nr_frags = skb_shinfo(skb)->nr_frags;
220562306a36Sopenharmony_ci	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
220662306a36Sopenharmony_ci		return false;
220762306a36Sopenharmony_ci
220862306a36Sopenharmony_ci	/* We need to walk through the list and validate that each group
220962306a36Sopenharmony_ci	 * of 6 fragments totals at least gso_size.
221062306a36Sopenharmony_ci	 */
221162306a36Sopenharmony_ci	nr_frags -= ICE_MAX_BUF_TXD - 2;
221262306a36Sopenharmony_ci	frag = &skb_shinfo(skb)->frags[0];
221362306a36Sopenharmony_ci
221462306a36Sopenharmony_ci	/* Initialize size to the negative value of gso_size minus 1. We
221562306a36Sopenharmony_ci	 * use this as the worst case scenario in which the frag ahead
221662306a36Sopenharmony_ci	 * of us only provides one byte which is why we are limited to 6
221762306a36Sopenharmony_ci	 * descriptors for a single transmit as the header and previous
221862306a36Sopenharmony_ci	 * fragment are already consuming 2 descriptors.
221962306a36Sopenharmony_ci	 */
222062306a36Sopenharmony_ci	sum = 1 - skb_shinfo(skb)->gso_size;
222162306a36Sopenharmony_ci
222262306a36Sopenharmony_ci	/* Add size of frags 0 through 4 to create our initial sum */
222362306a36Sopenharmony_ci	sum += skb_frag_size(frag++);
222462306a36Sopenharmony_ci	sum += skb_frag_size(frag++);
222562306a36Sopenharmony_ci	sum += skb_frag_size(frag++);
222662306a36Sopenharmony_ci	sum += skb_frag_size(frag++);
222762306a36Sopenharmony_ci	sum += skb_frag_size(frag++);
222862306a36Sopenharmony_ci
222962306a36Sopenharmony_ci	/* Walk through fragments adding latest fragment, testing it, and
223062306a36Sopenharmony_ci	 * then removing stale fragments from the sum.
223162306a36Sopenharmony_ci	 */
223262306a36Sopenharmony_ci	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
223362306a36Sopenharmony_ci		int stale_size = skb_frag_size(stale);
223462306a36Sopenharmony_ci
223562306a36Sopenharmony_ci		sum += skb_frag_size(frag++);
223662306a36Sopenharmony_ci
223762306a36Sopenharmony_ci		/* The stale fragment may present us with a smaller
223862306a36Sopenharmony_ci		 * descriptor than the actual fragment size. To account
223962306a36Sopenharmony_ci		 * for that we need to remove all the data on the front and
224062306a36Sopenharmony_ci		 * figure out what the remainder would be in the last
224162306a36Sopenharmony_ci		 * descriptor associated with the fragment.
224262306a36Sopenharmony_ci		 */
224362306a36Sopenharmony_ci		if (stale_size > ICE_MAX_DATA_PER_TXD) {
224462306a36Sopenharmony_ci			int align_pad = -(skb_frag_off(stale)) &
224562306a36Sopenharmony_ci					(ICE_MAX_READ_REQ_SIZE - 1);
224662306a36Sopenharmony_ci
224762306a36Sopenharmony_ci			sum -= align_pad;
224862306a36Sopenharmony_ci			stale_size -= align_pad;
224962306a36Sopenharmony_ci
225062306a36Sopenharmony_ci			do {
225162306a36Sopenharmony_ci				sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
225262306a36Sopenharmony_ci				stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
225362306a36Sopenharmony_ci			} while (stale_size > ICE_MAX_DATA_PER_TXD);
225462306a36Sopenharmony_ci		}
225562306a36Sopenharmony_ci
225662306a36Sopenharmony_ci		/* if sum is negative we failed to make sufficient progress */
225762306a36Sopenharmony_ci		if (sum < 0)
225862306a36Sopenharmony_ci			return true;
225962306a36Sopenharmony_ci
226062306a36Sopenharmony_ci		if (!nr_frags--)
226162306a36Sopenharmony_ci			break;
226262306a36Sopenharmony_ci
226362306a36Sopenharmony_ci		sum -= stale_size;
226462306a36Sopenharmony_ci	}
226562306a36Sopenharmony_ci
226662306a36Sopenharmony_ci	return false;
226762306a36Sopenharmony_ci}
226862306a36Sopenharmony_ci
226962306a36Sopenharmony_ci/**
227062306a36Sopenharmony_ci * ice_chk_linearize - Check if there are more than 8 fragments per packet
227162306a36Sopenharmony_ci * @skb:      send buffer
227262306a36Sopenharmony_ci * @count:    number of buffers used
227362306a36Sopenharmony_ci *
227462306a36Sopenharmony_ci * Note: Our HW can't scatter-gather more than 8 fragments to build
227562306a36Sopenharmony_ci * a packet on the wire and so we need to figure out the cases where we
227662306a36Sopenharmony_ci * need to linearize the skb.
227762306a36Sopenharmony_ci */
227862306a36Sopenharmony_cistatic bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
227962306a36Sopenharmony_ci{
228062306a36Sopenharmony_ci	/* Both TSO and single send will work if count is less than 8 */
228162306a36Sopenharmony_ci	if (likely(count < ICE_MAX_BUF_TXD))
228262306a36Sopenharmony_ci		return false;
228362306a36Sopenharmony_ci
228462306a36Sopenharmony_ci	if (skb_is_gso(skb))
228562306a36Sopenharmony_ci		return __ice_chk_linearize(skb);
228662306a36Sopenharmony_ci
228762306a36Sopenharmony_ci	/* we can support up to 8 data buffers for a single send */
228862306a36Sopenharmony_ci	return count != ICE_MAX_BUF_TXD;
228962306a36Sopenharmony_ci}
229062306a36Sopenharmony_ci
229162306a36Sopenharmony_ci/**
229262306a36Sopenharmony_ci * ice_tstamp - set up context descriptor for hardware timestamp
229362306a36Sopenharmony_ci * @tx_ring: pointer to the Tx ring to send buffer on
229462306a36Sopenharmony_ci * @skb: pointer to the SKB we're sending
229562306a36Sopenharmony_ci * @first: Tx buffer
229662306a36Sopenharmony_ci * @off: Tx offload parameters
229762306a36Sopenharmony_ci */
229862306a36Sopenharmony_cistatic void
229962306a36Sopenharmony_ciice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
230062306a36Sopenharmony_ci	   struct ice_tx_buf *first, struct ice_tx_offload_params *off)
230162306a36Sopenharmony_ci{
230262306a36Sopenharmony_ci	s8 idx;
230362306a36Sopenharmony_ci
230462306a36Sopenharmony_ci	/* only timestamp the outbound packet if the user has requested it */
230562306a36Sopenharmony_ci	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
230662306a36Sopenharmony_ci		return;
230762306a36Sopenharmony_ci
230862306a36Sopenharmony_ci	if (!tx_ring->ptp_tx)
230962306a36Sopenharmony_ci		return;
231062306a36Sopenharmony_ci
231162306a36Sopenharmony_ci	/* Tx timestamps cannot be sampled when doing TSO */
231262306a36Sopenharmony_ci	if (first->tx_flags & ICE_TX_FLAGS_TSO)
231362306a36Sopenharmony_ci		return;
231462306a36Sopenharmony_ci
231562306a36Sopenharmony_ci	/* Grab an open timestamp slot */
231662306a36Sopenharmony_ci	idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
231762306a36Sopenharmony_ci	if (idx < 0) {
231862306a36Sopenharmony_ci		tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
231962306a36Sopenharmony_ci		return;
232062306a36Sopenharmony_ci	}
232162306a36Sopenharmony_ci
232262306a36Sopenharmony_ci	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
232362306a36Sopenharmony_ci			     (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
232462306a36Sopenharmony_ci			     ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
232562306a36Sopenharmony_ci	first->tx_flags |= ICE_TX_FLAGS_TSYN;
232662306a36Sopenharmony_ci}
232762306a36Sopenharmony_ci
232862306a36Sopenharmony_ci/**
232962306a36Sopenharmony_ci * ice_xmit_frame_ring - Sends buffer on Tx ring
233062306a36Sopenharmony_ci * @skb: send buffer
233162306a36Sopenharmony_ci * @tx_ring: ring to send buffer on
233262306a36Sopenharmony_ci *
233362306a36Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code
233462306a36Sopenharmony_ci */
233562306a36Sopenharmony_cistatic netdev_tx_t
233662306a36Sopenharmony_ciice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
233762306a36Sopenharmony_ci{
233862306a36Sopenharmony_ci	struct ice_tx_offload_params offload = { 0 };
233962306a36Sopenharmony_ci	struct ice_vsi *vsi = tx_ring->vsi;
234062306a36Sopenharmony_ci	struct ice_tx_buf *first;
234162306a36Sopenharmony_ci	struct ethhdr *eth;
234262306a36Sopenharmony_ci	unsigned int count;
234362306a36Sopenharmony_ci	int tso, csum;
234462306a36Sopenharmony_ci
234562306a36Sopenharmony_ci	ice_trace(xmit_frame_ring, tx_ring, skb);
234662306a36Sopenharmony_ci
234762306a36Sopenharmony_ci	if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
234862306a36Sopenharmony_ci		goto out_drop;
234962306a36Sopenharmony_ci
235062306a36Sopenharmony_ci	count = ice_xmit_desc_count(skb);
235162306a36Sopenharmony_ci	if (ice_chk_linearize(skb, count)) {
235262306a36Sopenharmony_ci		if (__skb_linearize(skb))
235362306a36Sopenharmony_ci			goto out_drop;
235462306a36Sopenharmony_ci		count = ice_txd_use_count(skb->len);
235562306a36Sopenharmony_ci		tx_ring->ring_stats->tx_stats.tx_linearize++;
235662306a36Sopenharmony_ci	}
235762306a36Sopenharmony_ci
235862306a36Sopenharmony_ci	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
235962306a36Sopenharmony_ci	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
236062306a36Sopenharmony_ci	 *       + 4 desc gap to avoid the cache line where head is,
236162306a36Sopenharmony_ci	 *       + 1 desc for context descriptor,
236262306a36Sopenharmony_ci	 * otherwise try next time
236362306a36Sopenharmony_ci	 */
236462306a36Sopenharmony_ci	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
236562306a36Sopenharmony_ci			      ICE_DESCS_FOR_CTX_DESC)) {
236662306a36Sopenharmony_ci		tx_ring->ring_stats->tx_stats.tx_busy++;
236762306a36Sopenharmony_ci		return NETDEV_TX_BUSY;
236862306a36Sopenharmony_ci	}
236962306a36Sopenharmony_ci
237062306a36Sopenharmony_ci	/* prefetch for bql data which is infrequently used */
237162306a36Sopenharmony_ci	netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
237262306a36Sopenharmony_ci
237362306a36Sopenharmony_ci	offload.tx_ring = tx_ring;
237462306a36Sopenharmony_ci
237562306a36Sopenharmony_ci	/* record the location of the first descriptor for this packet */
237662306a36Sopenharmony_ci	first = &tx_ring->tx_buf[tx_ring->next_to_use];
237762306a36Sopenharmony_ci	first->skb = skb;
237862306a36Sopenharmony_ci	first->type = ICE_TX_BUF_SKB;
237962306a36Sopenharmony_ci	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
238062306a36Sopenharmony_ci	first->gso_segs = 1;
238162306a36Sopenharmony_ci	first->tx_flags = 0;
238262306a36Sopenharmony_ci
238362306a36Sopenharmony_ci	/* prepare the VLAN tagging flags for Tx */
238462306a36Sopenharmony_ci	ice_tx_prepare_vlan_flags(tx_ring, first);
238562306a36Sopenharmony_ci	if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
238662306a36Sopenharmony_ci		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
238762306a36Sopenharmony_ci					(ICE_TX_CTX_DESC_IL2TAG2 <<
238862306a36Sopenharmony_ci					ICE_TXD_CTX_QW1_CMD_S));
238962306a36Sopenharmony_ci		offload.cd_l2tag2 = first->vid;
239062306a36Sopenharmony_ci	}
239162306a36Sopenharmony_ci
239262306a36Sopenharmony_ci	/* set up TSO offload */
239362306a36Sopenharmony_ci	tso = ice_tso(first, &offload);
239462306a36Sopenharmony_ci	if (tso < 0)
239562306a36Sopenharmony_ci		goto out_drop;
239662306a36Sopenharmony_ci
239762306a36Sopenharmony_ci	/* always set up Tx checksum offload */
239862306a36Sopenharmony_ci	csum = ice_tx_csum(first, &offload);
239962306a36Sopenharmony_ci	if (csum < 0)
240062306a36Sopenharmony_ci		goto out_drop;
240162306a36Sopenharmony_ci
240262306a36Sopenharmony_ci	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
240362306a36Sopenharmony_ci	eth = (struct ethhdr *)skb_mac_header(skb);
240462306a36Sopenharmony_ci	if (unlikely((skb->priority == TC_PRIO_CONTROL ||
240562306a36Sopenharmony_ci		      eth->h_proto == htons(ETH_P_LLDP)) &&
240662306a36Sopenharmony_ci		     vsi->type == ICE_VSI_PF &&
240762306a36Sopenharmony_ci		     vsi->port_info->qos_cfg.is_sw_lldp))
240862306a36Sopenharmony_ci		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
240962306a36Sopenharmony_ci					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
241062306a36Sopenharmony_ci					ICE_TXD_CTX_QW1_CMD_S);
241162306a36Sopenharmony_ci
241262306a36Sopenharmony_ci	ice_tstamp(tx_ring, skb, first, &offload);
241362306a36Sopenharmony_ci	if (ice_is_switchdev_running(vsi->back))
241462306a36Sopenharmony_ci		ice_eswitch_set_target_vsi(skb, &offload);
241562306a36Sopenharmony_ci
241662306a36Sopenharmony_ci	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
241762306a36Sopenharmony_ci		struct ice_tx_ctx_desc *cdesc;
241862306a36Sopenharmony_ci		u16 i = tx_ring->next_to_use;
241962306a36Sopenharmony_ci
242062306a36Sopenharmony_ci		/* grab the next descriptor */
242162306a36Sopenharmony_ci		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
242262306a36Sopenharmony_ci		i++;
242362306a36Sopenharmony_ci		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
242462306a36Sopenharmony_ci
242562306a36Sopenharmony_ci		/* setup context descriptor */
242662306a36Sopenharmony_ci		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
242762306a36Sopenharmony_ci		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
242862306a36Sopenharmony_ci		cdesc->rsvd = cpu_to_le16(0);
242962306a36Sopenharmony_ci		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
243062306a36Sopenharmony_ci	}
243162306a36Sopenharmony_ci
243262306a36Sopenharmony_ci	ice_tx_map(tx_ring, first, &offload);
243362306a36Sopenharmony_ci	return NETDEV_TX_OK;
243462306a36Sopenharmony_ci
243562306a36Sopenharmony_ciout_drop:
243662306a36Sopenharmony_ci	ice_trace(xmit_frame_ring_drop, tx_ring, skb);
243762306a36Sopenharmony_ci	dev_kfree_skb_any(skb);
243862306a36Sopenharmony_ci	return NETDEV_TX_OK;
243962306a36Sopenharmony_ci}
244062306a36Sopenharmony_ci
244162306a36Sopenharmony_ci/**
244262306a36Sopenharmony_ci * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
244362306a36Sopenharmony_ci * @skb: send buffer
244462306a36Sopenharmony_ci * @netdev: network interface device structure
244562306a36Sopenharmony_ci *
244662306a36Sopenharmony_ci * Returns NETDEV_TX_OK if sent, else an error code
244762306a36Sopenharmony_ci */
244862306a36Sopenharmony_cinetdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
244962306a36Sopenharmony_ci{
245062306a36Sopenharmony_ci	struct ice_netdev_priv *np = netdev_priv(netdev);
245162306a36Sopenharmony_ci	struct ice_vsi *vsi = np->vsi;
245262306a36Sopenharmony_ci	struct ice_tx_ring *tx_ring;
245362306a36Sopenharmony_ci
245462306a36Sopenharmony_ci	tx_ring = vsi->tx_rings[skb->queue_mapping];
245562306a36Sopenharmony_ci
245662306a36Sopenharmony_ci	/* hardware can't handle really short frames, hardware padding works
245762306a36Sopenharmony_ci	 * beyond this point
245862306a36Sopenharmony_ci	 */
245962306a36Sopenharmony_ci	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
246062306a36Sopenharmony_ci		return NETDEV_TX_OK;
246162306a36Sopenharmony_ci
246262306a36Sopenharmony_ci	return ice_xmit_frame_ring(skb, tx_ring);
246362306a36Sopenharmony_ci}
246462306a36Sopenharmony_ci
246562306a36Sopenharmony_ci/**
246662306a36Sopenharmony_ci * ice_get_dscp_up - return the UP/TC value for a SKB
246762306a36Sopenharmony_ci * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
246862306a36Sopenharmony_ci * @skb: SKB to query for info to determine UP/TC
246962306a36Sopenharmony_ci *
247062306a36Sopenharmony_ci * This function is to only be called when the PF is in L3 DSCP PFC mode
247162306a36Sopenharmony_ci */
247262306a36Sopenharmony_cistatic u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
247362306a36Sopenharmony_ci{
247462306a36Sopenharmony_ci	u8 dscp = 0;
247562306a36Sopenharmony_ci
247662306a36Sopenharmony_ci	if (skb->protocol == htons(ETH_P_IP))
247762306a36Sopenharmony_ci		dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
247862306a36Sopenharmony_ci	else if (skb->protocol == htons(ETH_P_IPV6))
247962306a36Sopenharmony_ci		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
248062306a36Sopenharmony_ci
248162306a36Sopenharmony_ci	return dcbcfg->dscp_map[dscp];
248262306a36Sopenharmony_ci}
248362306a36Sopenharmony_ci
248462306a36Sopenharmony_ciu16
248562306a36Sopenharmony_ciice_select_queue(struct net_device *netdev, struct sk_buff *skb,
248662306a36Sopenharmony_ci		 struct net_device *sb_dev)
248762306a36Sopenharmony_ci{
248862306a36Sopenharmony_ci	struct ice_pf *pf = ice_netdev_to_pf(netdev);
248962306a36Sopenharmony_ci	struct ice_dcbx_cfg *dcbcfg;
249062306a36Sopenharmony_ci
249162306a36Sopenharmony_ci	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
249262306a36Sopenharmony_ci	if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
249362306a36Sopenharmony_ci		skb->priority = ice_get_dscp_up(dcbcfg, skb);
249462306a36Sopenharmony_ci
249562306a36Sopenharmony_ci	return netdev_pick_tx(netdev, skb, sb_dev);
249662306a36Sopenharmony_ci}
249762306a36Sopenharmony_ci
249862306a36Sopenharmony_ci/**
249962306a36Sopenharmony_ci * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
250062306a36Sopenharmony_ci * @tx_ring: tx_ring to clean
250162306a36Sopenharmony_ci */
250262306a36Sopenharmony_civoid ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
250362306a36Sopenharmony_ci{
250462306a36Sopenharmony_ci	struct ice_vsi *vsi = tx_ring->vsi;
250562306a36Sopenharmony_ci	s16 i = tx_ring->next_to_clean;
250662306a36Sopenharmony_ci	int budget = ICE_DFLT_IRQ_WORK;
250762306a36Sopenharmony_ci	struct ice_tx_desc *tx_desc;
250862306a36Sopenharmony_ci	struct ice_tx_buf *tx_buf;
250962306a36Sopenharmony_ci
251062306a36Sopenharmony_ci	tx_buf = &tx_ring->tx_buf[i];
251162306a36Sopenharmony_ci	tx_desc = ICE_TX_DESC(tx_ring, i);
251262306a36Sopenharmony_ci	i -= tx_ring->count;
251362306a36Sopenharmony_ci
251462306a36Sopenharmony_ci	do {
251562306a36Sopenharmony_ci		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
251662306a36Sopenharmony_ci
251762306a36Sopenharmony_ci		/* if next_to_watch is not set then there is no pending work */
251862306a36Sopenharmony_ci		if (!eop_desc)
251962306a36Sopenharmony_ci			break;
252062306a36Sopenharmony_ci
252162306a36Sopenharmony_ci		/* prevent any other reads prior to eop_desc */
252262306a36Sopenharmony_ci		smp_rmb();
252362306a36Sopenharmony_ci
252462306a36Sopenharmony_ci		/* if the descriptor isn't done, no work to do */
252562306a36Sopenharmony_ci		if (!(eop_desc->cmd_type_offset_bsz &
252662306a36Sopenharmony_ci		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
252762306a36Sopenharmony_ci			break;
252862306a36Sopenharmony_ci
252962306a36Sopenharmony_ci		/* clear next_to_watch to prevent false hangs */
253062306a36Sopenharmony_ci		tx_buf->next_to_watch = NULL;
253162306a36Sopenharmony_ci		tx_desc->buf_addr = 0;
253262306a36Sopenharmony_ci		tx_desc->cmd_type_offset_bsz = 0;
253362306a36Sopenharmony_ci
253462306a36Sopenharmony_ci		/* move past filter desc */
253562306a36Sopenharmony_ci		tx_buf++;
253662306a36Sopenharmony_ci		tx_desc++;
253762306a36Sopenharmony_ci		i++;
253862306a36Sopenharmony_ci		if (unlikely(!i)) {
253962306a36Sopenharmony_ci			i -= tx_ring->count;
254062306a36Sopenharmony_ci			tx_buf = tx_ring->tx_buf;
254162306a36Sopenharmony_ci			tx_desc = ICE_TX_DESC(tx_ring, 0);
254262306a36Sopenharmony_ci		}
254362306a36Sopenharmony_ci
254462306a36Sopenharmony_ci		/* unmap the data header */
254562306a36Sopenharmony_ci		if (dma_unmap_len(tx_buf, len))
254662306a36Sopenharmony_ci			dma_unmap_single(tx_ring->dev,
254762306a36Sopenharmony_ci					 dma_unmap_addr(tx_buf, dma),
254862306a36Sopenharmony_ci					 dma_unmap_len(tx_buf, len),
254962306a36Sopenharmony_ci					 DMA_TO_DEVICE);
255062306a36Sopenharmony_ci		if (tx_buf->type == ICE_TX_BUF_DUMMY)
255162306a36Sopenharmony_ci			devm_kfree(tx_ring->dev, tx_buf->raw_buf);
255262306a36Sopenharmony_ci
255362306a36Sopenharmony_ci		/* clear next_to_watch to prevent false hangs */
255462306a36Sopenharmony_ci		tx_buf->type = ICE_TX_BUF_EMPTY;
255562306a36Sopenharmony_ci		tx_buf->tx_flags = 0;
255662306a36Sopenharmony_ci		tx_buf->next_to_watch = NULL;
255762306a36Sopenharmony_ci		dma_unmap_len_set(tx_buf, len, 0);
255862306a36Sopenharmony_ci		tx_desc->buf_addr = 0;
255962306a36Sopenharmony_ci		tx_desc->cmd_type_offset_bsz = 0;
256062306a36Sopenharmony_ci
256162306a36Sopenharmony_ci		/* move past eop_desc for start of next FD desc */
256262306a36Sopenharmony_ci		tx_buf++;
256362306a36Sopenharmony_ci		tx_desc++;
256462306a36Sopenharmony_ci		i++;
256562306a36Sopenharmony_ci		if (unlikely(!i)) {
256662306a36Sopenharmony_ci			i -= tx_ring->count;
256762306a36Sopenharmony_ci			tx_buf = tx_ring->tx_buf;
256862306a36Sopenharmony_ci			tx_desc = ICE_TX_DESC(tx_ring, 0);
256962306a36Sopenharmony_ci		}
257062306a36Sopenharmony_ci
257162306a36Sopenharmony_ci		budget--;
257262306a36Sopenharmony_ci	} while (likely(budget));
257362306a36Sopenharmony_ci
257462306a36Sopenharmony_ci	i += tx_ring->count;
257562306a36Sopenharmony_ci	tx_ring->next_to_clean = i;
257662306a36Sopenharmony_ci
257762306a36Sopenharmony_ci	/* re-enable interrupt if needed */
257862306a36Sopenharmony_ci	ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
257962306a36Sopenharmony_ci}
2580