162306a36Sopenharmony_ci// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
262306a36Sopenharmony_ci/* QLogic qede NIC Driver
362306a36Sopenharmony_ci * Copyright (c) 2015-2017  QLogic Corporation
462306a36Sopenharmony_ci * Copyright (c) 2019-2020 Marvell International Ltd.
562306a36Sopenharmony_ci */
662306a36Sopenharmony_ci
762306a36Sopenharmony_ci#include <linux/netdevice.h>
862306a36Sopenharmony_ci#include <linux/etherdevice.h>
962306a36Sopenharmony_ci#include <linux/skbuff.h>
1062306a36Sopenharmony_ci#include <linux/bpf_trace.h>
1162306a36Sopenharmony_ci#include <net/udp_tunnel.h>
1262306a36Sopenharmony_ci#include <linux/ip.h>
1362306a36Sopenharmony_ci#include <net/gro.h>
1462306a36Sopenharmony_ci#include <net/ipv6.h>
1562306a36Sopenharmony_ci#include <net/tcp.h>
1662306a36Sopenharmony_ci#include <linux/if_ether.h>
1762306a36Sopenharmony_ci#include <linux/if_vlan.h>
1862306a36Sopenharmony_ci#include <net/ip6_checksum.h>
1962306a36Sopenharmony_ci#include "qede_ptp.h"
2062306a36Sopenharmony_ci
2162306a36Sopenharmony_ci#include <linux/qed/qed_if.h>
2262306a36Sopenharmony_ci#include "qede.h"
2362306a36Sopenharmony_ci/*********************************
2462306a36Sopenharmony_ci * Content also used by slowpath *
2562306a36Sopenharmony_ci *********************************/
2662306a36Sopenharmony_ci
2762306a36Sopenharmony_ciint qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
2862306a36Sopenharmony_ci{
2962306a36Sopenharmony_ci	struct sw_rx_data *sw_rx_data;
3062306a36Sopenharmony_ci	struct eth_rx_bd *rx_bd;
3162306a36Sopenharmony_ci	dma_addr_t mapping;
3262306a36Sopenharmony_ci	struct page *data;
3362306a36Sopenharmony_ci
3462306a36Sopenharmony_ci	/* In case lazy-allocation is allowed, postpone allocation until the
3562306a36Sopenharmony_ci	 * end of the NAPI run. We'd still need to make sure the Rx ring has
3662306a36Sopenharmony_ci	 * sufficient buffers to guarantee an additional Rx interrupt.
3762306a36Sopenharmony_ci	 */
3862306a36Sopenharmony_ci	if (allow_lazy && likely(rxq->filled_buffers > 12)) {
3962306a36Sopenharmony_ci		rxq->filled_buffers--;
4062306a36Sopenharmony_ci		return 0;
4162306a36Sopenharmony_ci	}
4262306a36Sopenharmony_ci
4362306a36Sopenharmony_ci	data = alloc_pages(GFP_ATOMIC, 0);
4462306a36Sopenharmony_ci	if (unlikely(!data))
4562306a36Sopenharmony_ci		return -ENOMEM;
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_ci	/* Map the entire page as it would be used
4862306a36Sopenharmony_ci	 * for multiple RX buffer segment size mapping.
4962306a36Sopenharmony_ci	 */
5062306a36Sopenharmony_ci	mapping = dma_map_page(rxq->dev, data, 0,
5162306a36Sopenharmony_ci			       PAGE_SIZE, rxq->data_direction);
5262306a36Sopenharmony_ci	if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
5362306a36Sopenharmony_ci		__free_page(data);
5462306a36Sopenharmony_ci		return -ENOMEM;
5562306a36Sopenharmony_ci	}
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_ci	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
5862306a36Sopenharmony_ci	sw_rx_data->page_offset = 0;
5962306a36Sopenharmony_ci	sw_rx_data->data = data;
6062306a36Sopenharmony_ci	sw_rx_data->mapping = mapping;
6162306a36Sopenharmony_ci
6262306a36Sopenharmony_ci	/* Advance PROD and get BD pointer */
6362306a36Sopenharmony_ci	rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
6462306a36Sopenharmony_ci	WARN_ON(!rx_bd);
6562306a36Sopenharmony_ci	rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
6662306a36Sopenharmony_ci	rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
6762306a36Sopenharmony_ci				     rxq->rx_headroom);
6862306a36Sopenharmony_ci
6962306a36Sopenharmony_ci	rxq->sw_rx_prod++;
7062306a36Sopenharmony_ci	rxq->filled_buffers++;
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_ci	return 0;
7362306a36Sopenharmony_ci}
7462306a36Sopenharmony_ci
7562306a36Sopenharmony_ci/* Unmap the data and free skb */
7662306a36Sopenharmony_ciint qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
7762306a36Sopenharmony_ci{
7862306a36Sopenharmony_ci	u16 idx = txq->sw_tx_cons;
7962306a36Sopenharmony_ci	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
8062306a36Sopenharmony_ci	struct eth_tx_1st_bd *first_bd;
8162306a36Sopenharmony_ci	struct eth_tx_bd *tx_data_bd;
8262306a36Sopenharmony_ci	int bds_consumed = 0;
8362306a36Sopenharmony_ci	int nbds;
8462306a36Sopenharmony_ci	bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
8562306a36Sopenharmony_ci	int i, split_bd_len = 0;
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_ci	if (unlikely(!skb)) {
8862306a36Sopenharmony_ci		DP_ERR(edev,
8962306a36Sopenharmony_ci		       "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
9062306a36Sopenharmony_ci		       idx, txq->sw_tx_cons, txq->sw_tx_prod);
9162306a36Sopenharmony_ci		return -1;
9262306a36Sopenharmony_ci	}
9362306a36Sopenharmony_ci
9462306a36Sopenharmony_ci	*len = skb->len;
9562306a36Sopenharmony_ci
9662306a36Sopenharmony_ci	first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_ci	bds_consumed++;
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_ci	nbds = first_bd->data.nbds;
10162306a36Sopenharmony_ci
10262306a36Sopenharmony_ci	if (data_split) {
10362306a36Sopenharmony_ci		struct eth_tx_bd *split = (struct eth_tx_bd *)
10462306a36Sopenharmony_ci			qed_chain_consume(&txq->tx_pbl);
10562306a36Sopenharmony_ci		split_bd_len = BD_UNMAP_LEN(split);
10662306a36Sopenharmony_ci		bds_consumed++;
10762306a36Sopenharmony_ci	}
10862306a36Sopenharmony_ci	dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
10962306a36Sopenharmony_ci			 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci	/* Unmap the data of the skb frags */
11262306a36Sopenharmony_ci	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
11362306a36Sopenharmony_ci		tx_data_bd = (struct eth_tx_bd *)
11462306a36Sopenharmony_ci			qed_chain_consume(&txq->tx_pbl);
11562306a36Sopenharmony_ci		dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
11662306a36Sopenharmony_ci			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
11762306a36Sopenharmony_ci	}
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci	while (bds_consumed++ < nbds)
12062306a36Sopenharmony_ci		qed_chain_consume(&txq->tx_pbl);
12162306a36Sopenharmony_ci
12262306a36Sopenharmony_ci	/* Free skb */
12362306a36Sopenharmony_ci	dev_kfree_skb_any(skb);
12462306a36Sopenharmony_ci	txq->sw_tx_ring.skbs[idx].skb = NULL;
12562306a36Sopenharmony_ci	txq->sw_tx_ring.skbs[idx].flags = 0;
12662306a36Sopenharmony_ci
12762306a36Sopenharmony_ci	return 0;
12862306a36Sopenharmony_ci}
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_ci/* Unmap the data and free skb when mapping failed during start_xmit */
13162306a36Sopenharmony_cistatic void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
13262306a36Sopenharmony_ci				    struct eth_tx_1st_bd *first_bd,
13362306a36Sopenharmony_ci				    int nbd, bool data_split)
13462306a36Sopenharmony_ci{
13562306a36Sopenharmony_ci	u16 idx = txq->sw_tx_prod;
13662306a36Sopenharmony_ci	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
13762306a36Sopenharmony_ci	struct eth_tx_bd *tx_data_bd;
13862306a36Sopenharmony_ci	int i, split_bd_len = 0;
13962306a36Sopenharmony_ci
14062306a36Sopenharmony_ci	/* Return prod to its position before this skb was handled */
14162306a36Sopenharmony_ci	qed_chain_set_prod(&txq->tx_pbl,
14262306a36Sopenharmony_ci			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
14362306a36Sopenharmony_ci
14462306a36Sopenharmony_ci	first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
14562306a36Sopenharmony_ci
14662306a36Sopenharmony_ci	if (data_split) {
14762306a36Sopenharmony_ci		struct eth_tx_bd *split = (struct eth_tx_bd *)
14862306a36Sopenharmony_ci					  qed_chain_produce(&txq->tx_pbl);
14962306a36Sopenharmony_ci		split_bd_len = BD_UNMAP_LEN(split);
15062306a36Sopenharmony_ci		nbd--;
15162306a36Sopenharmony_ci	}
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_ci	dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
15462306a36Sopenharmony_ci			 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
15562306a36Sopenharmony_ci
15662306a36Sopenharmony_ci	/* Unmap the data of the skb frags */
15762306a36Sopenharmony_ci	for (i = 0; i < nbd; i++) {
15862306a36Sopenharmony_ci		tx_data_bd = (struct eth_tx_bd *)
15962306a36Sopenharmony_ci			qed_chain_produce(&txq->tx_pbl);
16062306a36Sopenharmony_ci		if (tx_data_bd->nbytes)
16162306a36Sopenharmony_ci			dma_unmap_page(txq->dev,
16262306a36Sopenharmony_ci				       BD_UNMAP_ADDR(tx_data_bd),
16362306a36Sopenharmony_ci				       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
16462306a36Sopenharmony_ci	}
16562306a36Sopenharmony_ci
16662306a36Sopenharmony_ci	/* Return again prod to its position before this skb was handled */
16762306a36Sopenharmony_ci	qed_chain_set_prod(&txq->tx_pbl,
16862306a36Sopenharmony_ci			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci	/* Free skb */
17162306a36Sopenharmony_ci	dev_kfree_skb_any(skb);
17262306a36Sopenharmony_ci	txq->sw_tx_ring.skbs[idx].skb = NULL;
17362306a36Sopenharmony_ci	txq->sw_tx_ring.skbs[idx].flags = 0;
17462306a36Sopenharmony_ci}
17562306a36Sopenharmony_ci
17662306a36Sopenharmony_cistatic u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
17762306a36Sopenharmony_ci{
17862306a36Sopenharmony_ci	u32 rc = XMIT_L4_CSUM;
17962306a36Sopenharmony_ci	__be16 l3_proto;
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_ci	if (skb->ip_summed != CHECKSUM_PARTIAL)
18262306a36Sopenharmony_ci		return XMIT_PLAIN;
18362306a36Sopenharmony_ci
18462306a36Sopenharmony_ci	l3_proto = vlan_get_protocol(skb);
18562306a36Sopenharmony_ci	if (l3_proto == htons(ETH_P_IPV6) &&
18662306a36Sopenharmony_ci	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
18762306a36Sopenharmony_ci		*ipv6_ext = 1;
18862306a36Sopenharmony_ci
18962306a36Sopenharmony_ci	if (skb->encapsulation) {
19062306a36Sopenharmony_ci		rc |= XMIT_ENC;
19162306a36Sopenharmony_ci		if (skb_is_gso(skb)) {
19262306a36Sopenharmony_ci			unsigned short gso_type = skb_shinfo(skb)->gso_type;
19362306a36Sopenharmony_ci
19462306a36Sopenharmony_ci			if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
19562306a36Sopenharmony_ci			    (gso_type & SKB_GSO_GRE_CSUM))
19662306a36Sopenharmony_ci				rc |= XMIT_ENC_GSO_L4_CSUM;
19762306a36Sopenharmony_ci
19862306a36Sopenharmony_ci			rc |= XMIT_LSO;
19962306a36Sopenharmony_ci			return rc;
20062306a36Sopenharmony_ci		}
20162306a36Sopenharmony_ci	}
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_ci	if (skb_is_gso(skb))
20462306a36Sopenharmony_ci		rc |= XMIT_LSO;
20562306a36Sopenharmony_ci
20662306a36Sopenharmony_ci	return rc;
20762306a36Sopenharmony_ci}
20862306a36Sopenharmony_ci
20962306a36Sopenharmony_cistatic void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
21062306a36Sopenharmony_ci					 struct eth_tx_2nd_bd *second_bd,
21162306a36Sopenharmony_ci					 struct eth_tx_3rd_bd *third_bd)
21262306a36Sopenharmony_ci{
21362306a36Sopenharmony_ci	u8 l4_proto;
21462306a36Sopenharmony_ci	u16 bd2_bits1 = 0, bd2_bits2 = 0;
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci	bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
21762306a36Sopenharmony_ci
21862306a36Sopenharmony_ci	bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
21962306a36Sopenharmony_ci		     ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
22062306a36Sopenharmony_ci		    << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
22162306a36Sopenharmony_ci
22262306a36Sopenharmony_ci	bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
22362306a36Sopenharmony_ci		      ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
22462306a36Sopenharmony_ci
22562306a36Sopenharmony_ci	if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
22662306a36Sopenharmony_ci		l4_proto = ipv6_hdr(skb)->nexthdr;
22762306a36Sopenharmony_ci	else
22862306a36Sopenharmony_ci		l4_proto = ip_hdr(skb)->protocol;
22962306a36Sopenharmony_ci
23062306a36Sopenharmony_ci	if (l4_proto == IPPROTO_UDP)
23162306a36Sopenharmony_ci		bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
23262306a36Sopenharmony_ci
23362306a36Sopenharmony_ci	if (third_bd)
23462306a36Sopenharmony_ci		third_bd->data.bitfields |=
23562306a36Sopenharmony_ci			cpu_to_le16(((tcp_hdrlen(skb) / 4) &
23662306a36Sopenharmony_ci				ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
23762306a36Sopenharmony_ci				ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
23862306a36Sopenharmony_ci
23962306a36Sopenharmony_ci	second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
24062306a36Sopenharmony_ci	second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
24162306a36Sopenharmony_ci}
24262306a36Sopenharmony_ci
24362306a36Sopenharmony_cistatic int map_frag_to_bd(struct qede_tx_queue *txq,
24462306a36Sopenharmony_ci			  skb_frag_t *frag, struct eth_tx_bd *bd)
24562306a36Sopenharmony_ci{
24662306a36Sopenharmony_ci	dma_addr_t mapping;
24762306a36Sopenharmony_ci
24862306a36Sopenharmony_ci	/* Map skb non-linear frag data for DMA */
24962306a36Sopenharmony_ci	mapping = skb_frag_dma_map(txq->dev, frag, 0,
25062306a36Sopenharmony_ci				   skb_frag_size(frag), DMA_TO_DEVICE);
25162306a36Sopenharmony_ci	if (unlikely(dma_mapping_error(txq->dev, mapping)))
25262306a36Sopenharmony_ci		return -ENOMEM;
25362306a36Sopenharmony_ci
25462306a36Sopenharmony_ci	/* Setup the data pointer of the frag data */
25562306a36Sopenharmony_ci	BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
25662306a36Sopenharmony_ci
25762306a36Sopenharmony_ci	return 0;
25862306a36Sopenharmony_ci}
25962306a36Sopenharmony_ci
26062306a36Sopenharmony_cistatic u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
26162306a36Sopenharmony_ci{
26262306a36Sopenharmony_ci	if (is_encap_pkt)
26362306a36Sopenharmony_ci		return skb_inner_tcp_all_headers(skb);
26462306a36Sopenharmony_ci
26562306a36Sopenharmony_ci	return skb_tcp_all_headers(skb);
26662306a36Sopenharmony_ci}
26762306a36Sopenharmony_ci
26862306a36Sopenharmony_ci/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
26962306a36Sopenharmony_ci#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
27062306a36Sopenharmony_cistatic bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
27162306a36Sopenharmony_ci{
27262306a36Sopenharmony_ci	int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
27362306a36Sopenharmony_ci
27462306a36Sopenharmony_ci	if (xmit_type & XMIT_LSO) {
27562306a36Sopenharmony_ci		int hlen;
27662306a36Sopenharmony_ci
27762306a36Sopenharmony_ci		hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
27862306a36Sopenharmony_ci
27962306a36Sopenharmony_ci		/* linear payload would require its own BD */
28062306a36Sopenharmony_ci		if (skb_headlen(skb) > hlen)
28162306a36Sopenharmony_ci			allowed_frags--;
28262306a36Sopenharmony_ci	}
28362306a36Sopenharmony_ci
28462306a36Sopenharmony_ci	return (skb_shinfo(skb)->nr_frags > allowed_frags);
28562306a36Sopenharmony_ci}
28662306a36Sopenharmony_ci#endif
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_cistatic inline void qede_update_tx_producer(struct qede_tx_queue *txq)
28962306a36Sopenharmony_ci{
29062306a36Sopenharmony_ci	/* wmb makes sure that the BDs data is updated before updating the
29162306a36Sopenharmony_ci	 * producer, otherwise FW may read old data from the BDs.
29262306a36Sopenharmony_ci	 */
29362306a36Sopenharmony_ci	wmb();
29462306a36Sopenharmony_ci	barrier();
29562306a36Sopenharmony_ci	writel(txq->tx_db.raw, txq->doorbell_addr);
29662306a36Sopenharmony_ci
29762306a36Sopenharmony_ci	/* Fence required to flush the write combined buffer, since another
29862306a36Sopenharmony_ci	 * CPU may write to the same doorbell address and data may be lost
29962306a36Sopenharmony_ci	 * due to relaxed order nature of write combined bar.
30062306a36Sopenharmony_ci	 */
30162306a36Sopenharmony_ci	wmb();
30262306a36Sopenharmony_ci}
30362306a36Sopenharmony_ci
30462306a36Sopenharmony_cistatic int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
30562306a36Sopenharmony_ci			 u16 len, struct page *page, struct xdp_frame *xdpf)
30662306a36Sopenharmony_ci{
30762306a36Sopenharmony_ci	struct eth_tx_1st_bd *bd;
30862306a36Sopenharmony_ci	struct sw_tx_xdp *xdp;
30962306a36Sopenharmony_ci	u16 val;
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci	if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
31262306a36Sopenharmony_ci		     txq->num_tx_buffers)) {
31362306a36Sopenharmony_ci		txq->stopped_cnt++;
31462306a36Sopenharmony_ci		return -ENOMEM;
31562306a36Sopenharmony_ci	}
31662306a36Sopenharmony_ci
31762306a36Sopenharmony_ci	bd = qed_chain_produce(&txq->tx_pbl);
31862306a36Sopenharmony_ci	bd->data.nbds = 1;
31962306a36Sopenharmony_ci	bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
32062306a36Sopenharmony_ci
32162306a36Sopenharmony_ci	val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
32262306a36Sopenharmony_ci	       ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
32362306a36Sopenharmony_ci
32462306a36Sopenharmony_ci	bd->data.bitfields = cpu_to_le16(val);
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_ci	/* We can safely ignore the offset, as it's 0 for XDP */
32762306a36Sopenharmony_ci	BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
32862306a36Sopenharmony_ci
32962306a36Sopenharmony_ci	xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
33062306a36Sopenharmony_ci	xdp->mapping = dma;
33162306a36Sopenharmony_ci	xdp->page = page;
33262306a36Sopenharmony_ci	xdp->xdpf = xdpf;
33362306a36Sopenharmony_ci
33462306a36Sopenharmony_ci	txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
33562306a36Sopenharmony_ci
33662306a36Sopenharmony_ci	return 0;
33762306a36Sopenharmony_ci}
33862306a36Sopenharmony_ci
33962306a36Sopenharmony_ciint qede_xdp_transmit(struct net_device *dev, int n_frames,
34062306a36Sopenharmony_ci		      struct xdp_frame **frames, u32 flags)
34162306a36Sopenharmony_ci{
34262306a36Sopenharmony_ci	struct qede_dev *edev = netdev_priv(dev);
34362306a36Sopenharmony_ci	struct device *dmadev = &edev->pdev->dev;
34462306a36Sopenharmony_ci	struct qede_tx_queue *xdp_tx;
34562306a36Sopenharmony_ci	struct xdp_frame *xdpf;
34662306a36Sopenharmony_ci	dma_addr_t mapping;
34762306a36Sopenharmony_ci	int i, nxmit = 0;
34862306a36Sopenharmony_ci	u16 xdp_prod;
34962306a36Sopenharmony_ci
35062306a36Sopenharmony_ci	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
35162306a36Sopenharmony_ci		return -EINVAL;
35262306a36Sopenharmony_ci
35362306a36Sopenharmony_ci	if (unlikely(!netif_running(dev)))
35462306a36Sopenharmony_ci		return -ENETDOWN;
35562306a36Sopenharmony_ci
35662306a36Sopenharmony_ci	i = smp_processor_id() % edev->total_xdp_queues;
35762306a36Sopenharmony_ci	xdp_tx = edev->fp_array[i].xdp_tx;
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_ci	spin_lock(&xdp_tx->xdp_tx_lock);
36062306a36Sopenharmony_ci
36162306a36Sopenharmony_ci	for (i = 0; i < n_frames; i++) {
36262306a36Sopenharmony_ci		xdpf = frames[i];
36362306a36Sopenharmony_ci
36462306a36Sopenharmony_ci		mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
36562306a36Sopenharmony_ci					 DMA_TO_DEVICE);
36662306a36Sopenharmony_ci		if (unlikely(dma_mapping_error(dmadev, mapping)))
36762306a36Sopenharmony_ci			break;
36862306a36Sopenharmony_ci
36962306a36Sopenharmony_ci		if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
37062306a36Sopenharmony_ci					   NULL, xdpf)))
37162306a36Sopenharmony_ci			break;
37262306a36Sopenharmony_ci		nxmit++;
37362306a36Sopenharmony_ci	}
37462306a36Sopenharmony_ci
37562306a36Sopenharmony_ci	if (flags & XDP_XMIT_FLUSH) {
37662306a36Sopenharmony_ci		xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_ci		xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
37962306a36Sopenharmony_ci		qede_update_tx_producer(xdp_tx);
38062306a36Sopenharmony_ci	}
38162306a36Sopenharmony_ci
38262306a36Sopenharmony_ci	spin_unlock(&xdp_tx->xdp_tx_lock);
38362306a36Sopenharmony_ci
38462306a36Sopenharmony_ci	return nxmit;
38562306a36Sopenharmony_ci}
38662306a36Sopenharmony_ci
38762306a36Sopenharmony_ciint qede_txq_has_work(struct qede_tx_queue *txq)
38862306a36Sopenharmony_ci{
38962306a36Sopenharmony_ci	u16 hw_bd_cons;
39062306a36Sopenharmony_ci
39162306a36Sopenharmony_ci	/* Tell compiler that consumer and producer can change */
39262306a36Sopenharmony_ci	barrier();
39362306a36Sopenharmony_ci	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
39462306a36Sopenharmony_ci	if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
39562306a36Sopenharmony_ci		return 0;
39662306a36Sopenharmony_ci
39762306a36Sopenharmony_ci	return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
39862306a36Sopenharmony_ci}
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_cistatic void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
40162306a36Sopenharmony_ci{
40262306a36Sopenharmony_ci	struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
40362306a36Sopenharmony_ci	struct device *dev = &edev->pdev->dev;
40462306a36Sopenharmony_ci	struct xdp_frame *xdpf;
40562306a36Sopenharmony_ci	u16 hw_bd_cons;
40662306a36Sopenharmony_ci
40762306a36Sopenharmony_ci	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
40862306a36Sopenharmony_ci	barrier();
40962306a36Sopenharmony_ci
41062306a36Sopenharmony_ci	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
41162306a36Sopenharmony_ci		xdp_info = xdp_arr + txq->sw_tx_cons;
41262306a36Sopenharmony_ci		xdpf = xdp_info->xdpf;
41362306a36Sopenharmony_ci
41462306a36Sopenharmony_ci		if (xdpf) {
41562306a36Sopenharmony_ci			dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
41662306a36Sopenharmony_ci					 DMA_TO_DEVICE);
41762306a36Sopenharmony_ci			xdp_return_frame(xdpf);
41862306a36Sopenharmony_ci
41962306a36Sopenharmony_ci			xdp_info->xdpf = NULL;
42062306a36Sopenharmony_ci		} else {
42162306a36Sopenharmony_ci			dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
42262306a36Sopenharmony_ci				       DMA_BIDIRECTIONAL);
42362306a36Sopenharmony_ci			__free_page(xdp_info->page);
42462306a36Sopenharmony_ci		}
42562306a36Sopenharmony_ci
42662306a36Sopenharmony_ci		qed_chain_consume(&txq->tx_pbl);
42762306a36Sopenharmony_ci		txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
42862306a36Sopenharmony_ci		txq->xmit_pkts++;
42962306a36Sopenharmony_ci	}
43062306a36Sopenharmony_ci}
43162306a36Sopenharmony_ci
43262306a36Sopenharmony_cistatic int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
43362306a36Sopenharmony_ci{
43462306a36Sopenharmony_ci	unsigned int pkts_compl = 0, bytes_compl = 0;
43562306a36Sopenharmony_ci	struct netdev_queue *netdev_txq;
43662306a36Sopenharmony_ci	u16 hw_bd_cons;
43762306a36Sopenharmony_ci	int rc;
43862306a36Sopenharmony_ci
43962306a36Sopenharmony_ci	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
44062306a36Sopenharmony_ci
44162306a36Sopenharmony_ci	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
44262306a36Sopenharmony_ci	barrier();
44362306a36Sopenharmony_ci
44462306a36Sopenharmony_ci	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
44562306a36Sopenharmony_ci		int len = 0;
44662306a36Sopenharmony_ci
44762306a36Sopenharmony_ci		rc = qede_free_tx_pkt(edev, txq, &len);
44862306a36Sopenharmony_ci		if (rc) {
44962306a36Sopenharmony_ci			DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
45062306a36Sopenharmony_ci				  hw_bd_cons,
45162306a36Sopenharmony_ci				  qed_chain_get_cons_idx(&txq->tx_pbl));
45262306a36Sopenharmony_ci			break;
45362306a36Sopenharmony_ci		}
45462306a36Sopenharmony_ci
45562306a36Sopenharmony_ci		bytes_compl += len;
45662306a36Sopenharmony_ci		pkts_compl++;
45762306a36Sopenharmony_ci		txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
45862306a36Sopenharmony_ci		txq->xmit_pkts++;
45962306a36Sopenharmony_ci	}
46062306a36Sopenharmony_ci
46162306a36Sopenharmony_ci	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
46262306a36Sopenharmony_ci
46362306a36Sopenharmony_ci	/* Need to make the tx_bd_cons update visible to start_xmit()
46462306a36Sopenharmony_ci	 * before checking for netif_tx_queue_stopped().  Without the
46562306a36Sopenharmony_ci	 * memory barrier, there is a small possibility that
46662306a36Sopenharmony_ci	 * start_xmit() will miss it and cause the queue to be stopped
46762306a36Sopenharmony_ci	 * forever.
46862306a36Sopenharmony_ci	 * On the other hand we need an rmb() here to ensure the proper
46962306a36Sopenharmony_ci	 * ordering of bit testing in the following
47062306a36Sopenharmony_ci	 * netif_tx_queue_stopped(txq) call.
47162306a36Sopenharmony_ci	 */
47262306a36Sopenharmony_ci	smp_mb();
47362306a36Sopenharmony_ci
47462306a36Sopenharmony_ci	if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
47562306a36Sopenharmony_ci		/* Taking tx_lock is needed to prevent reenabling the queue
47662306a36Sopenharmony_ci		 * while it's empty. This could have happen if rx_action() gets
47762306a36Sopenharmony_ci		 * suspended in qede_tx_int() after the condition before
47862306a36Sopenharmony_ci		 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
47962306a36Sopenharmony_ci		 *
48062306a36Sopenharmony_ci		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
48162306a36Sopenharmony_ci		 * sends some packets consuming the whole queue again->
48262306a36Sopenharmony_ci		 * stops the queue
48362306a36Sopenharmony_ci		 */
48462306a36Sopenharmony_ci
48562306a36Sopenharmony_ci		__netif_tx_lock(netdev_txq, smp_processor_id());
48662306a36Sopenharmony_ci
48762306a36Sopenharmony_ci		if ((netif_tx_queue_stopped(netdev_txq)) &&
48862306a36Sopenharmony_ci		    (edev->state == QEDE_STATE_OPEN) &&
48962306a36Sopenharmony_ci		    (qed_chain_get_elem_left(&txq->tx_pbl)
49062306a36Sopenharmony_ci		      >= (MAX_SKB_FRAGS + 1))) {
49162306a36Sopenharmony_ci			netif_tx_wake_queue(netdev_txq);
49262306a36Sopenharmony_ci			DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
49362306a36Sopenharmony_ci				   "Wake queue was called\n");
49462306a36Sopenharmony_ci		}
49562306a36Sopenharmony_ci
49662306a36Sopenharmony_ci		__netif_tx_unlock(netdev_txq);
49762306a36Sopenharmony_ci	}
49862306a36Sopenharmony_ci
49962306a36Sopenharmony_ci	return 0;
50062306a36Sopenharmony_ci}
50162306a36Sopenharmony_ci
50262306a36Sopenharmony_cibool qede_has_rx_work(struct qede_rx_queue *rxq)
50362306a36Sopenharmony_ci{
50462306a36Sopenharmony_ci	u16 hw_comp_cons, sw_comp_cons;
50562306a36Sopenharmony_ci
50662306a36Sopenharmony_ci	/* Tell compiler that status block fields can change */
50762306a36Sopenharmony_ci	barrier();
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_ci	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
51062306a36Sopenharmony_ci	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
51162306a36Sopenharmony_ci
51262306a36Sopenharmony_ci	return hw_comp_cons != sw_comp_cons;
51362306a36Sopenharmony_ci}
51462306a36Sopenharmony_ci
51562306a36Sopenharmony_cistatic inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
51662306a36Sopenharmony_ci{
51762306a36Sopenharmony_ci	qed_chain_consume(&rxq->rx_bd_ring);
51862306a36Sopenharmony_ci	rxq->sw_rx_cons++;
51962306a36Sopenharmony_ci}
52062306a36Sopenharmony_ci
52162306a36Sopenharmony_ci/* This function reuses the buffer(from an offset) from
52262306a36Sopenharmony_ci * consumer index to producer index in the bd ring
52362306a36Sopenharmony_ci */
52462306a36Sopenharmony_cistatic inline void qede_reuse_page(struct qede_rx_queue *rxq,
52562306a36Sopenharmony_ci				   struct sw_rx_data *curr_cons)
52662306a36Sopenharmony_ci{
52762306a36Sopenharmony_ci	struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
52862306a36Sopenharmony_ci	struct sw_rx_data *curr_prod;
52962306a36Sopenharmony_ci	dma_addr_t new_mapping;
53062306a36Sopenharmony_ci
53162306a36Sopenharmony_ci	curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
53262306a36Sopenharmony_ci	*curr_prod = *curr_cons;
53362306a36Sopenharmony_ci
53462306a36Sopenharmony_ci	new_mapping = curr_prod->mapping + curr_prod->page_offset;
53562306a36Sopenharmony_ci
53662306a36Sopenharmony_ci	rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
53762306a36Sopenharmony_ci	rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
53862306a36Sopenharmony_ci					  rxq->rx_headroom);
53962306a36Sopenharmony_ci
54062306a36Sopenharmony_ci	rxq->sw_rx_prod++;
54162306a36Sopenharmony_ci	curr_cons->data = NULL;
54262306a36Sopenharmony_ci}
54362306a36Sopenharmony_ci
54462306a36Sopenharmony_ci/* In case of allocation failures reuse buffers
54562306a36Sopenharmony_ci * from consumer index to produce buffers for firmware
54662306a36Sopenharmony_ci */
54762306a36Sopenharmony_civoid qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
54862306a36Sopenharmony_ci{
54962306a36Sopenharmony_ci	struct sw_rx_data *curr_cons;
55062306a36Sopenharmony_ci
55162306a36Sopenharmony_ci	for (; count > 0; count--) {
55262306a36Sopenharmony_ci		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
55362306a36Sopenharmony_ci		qede_reuse_page(rxq, curr_cons);
55462306a36Sopenharmony_ci		qede_rx_bd_ring_consume(rxq);
55562306a36Sopenharmony_ci	}
55662306a36Sopenharmony_ci}
55762306a36Sopenharmony_ci
55862306a36Sopenharmony_cistatic inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
55962306a36Sopenharmony_ci					 struct sw_rx_data *curr_cons)
56062306a36Sopenharmony_ci{
56162306a36Sopenharmony_ci	/* Move to the next segment in the page */
56262306a36Sopenharmony_ci	curr_cons->page_offset += rxq->rx_buf_seg_size;
56362306a36Sopenharmony_ci
56462306a36Sopenharmony_ci	if (curr_cons->page_offset == PAGE_SIZE) {
56562306a36Sopenharmony_ci		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
56662306a36Sopenharmony_ci			/* Since we failed to allocate new buffer
56762306a36Sopenharmony_ci			 * current buffer can be used again.
56862306a36Sopenharmony_ci			 */
56962306a36Sopenharmony_ci			curr_cons->page_offset -= rxq->rx_buf_seg_size;
57062306a36Sopenharmony_ci
57162306a36Sopenharmony_ci			return -ENOMEM;
57262306a36Sopenharmony_ci		}
57362306a36Sopenharmony_ci
57462306a36Sopenharmony_ci		dma_unmap_page(rxq->dev, curr_cons->mapping,
57562306a36Sopenharmony_ci			       PAGE_SIZE, rxq->data_direction);
57662306a36Sopenharmony_ci	} else {
57762306a36Sopenharmony_ci		/* Increment refcount of the page as we don't want
57862306a36Sopenharmony_ci		 * network stack to take the ownership of the page
57962306a36Sopenharmony_ci		 * which can be recycled multiple times by the driver.
58062306a36Sopenharmony_ci		 */
58162306a36Sopenharmony_ci		page_ref_inc(curr_cons->data);
58262306a36Sopenharmony_ci		qede_reuse_page(rxq, curr_cons);
58362306a36Sopenharmony_ci	}
58462306a36Sopenharmony_ci
58562306a36Sopenharmony_ci	return 0;
58662306a36Sopenharmony_ci}
58762306a36Sopenharmony_ci
58862306a36Sopenharmony_civoid qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
58962306a36Sopenharmony_ci{
59062306a36Sopenharmony_ci	u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
59162306a36Sopenharmony_ci	u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
59262306a36Sopenharmony_ci	struct eth_rx_prod_data rx_prods = {0};
59362306a36Sopenharmony_ci
59462306a36Sopenharmony_ci	/* Update producers */
59562306a36Sopenharmony_ci	rx_prods.bd_prod = cpu_to_le16(bd_prod);
59662306a36Sopenharmony_ci	rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
59762306a36Sopenharmony_ci
59862306a36Sopenharmony_ci	/* Make sure that the BD and SGE data is updated before updating the
59962306a36Sopenharmony_ci	 * producers since FW might read the BD/SGE right after the producer
60062306a36Sopenharmony_ci	 * is updated.
60162306a36Sopenharmony_ci	 */
60262306a36Sopenharmony_ci	wmb();
60362306a36Sopenharmony_ci
60462306a36Sopenharmony_ci	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
60562306a36Sopenharmony_ci			(u32 *)&rx_prods);
60662306a36Sopenharmony_ci}
60762306a36Sopenharmony_ci
60862306a36Sopenharmony_cistatic void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
60962306a36Sopenharmony_ci{
61062306a36Sopenharmony_ci	enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
61162306a36Sopenharmony_ci	enum rss_hash_type htype;
61262306a36Sopenharmony_ci	u32 hash = 0;
61362306a36Sopenharmony_ci
61462306a36Sopenharmony_ci	htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
61562306a36Sopenharmony_ci	if (htype) {
61662306a36Sopenharmony_ci		hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
61762306a36Sopenharmony_ci			     (htype == RSS_HASH_TYPE_IPV6)) ?
61862306a36Sopenharmony_ci			    PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
61962306a36Sopenharmony_ci		hash = le32_to_cpu(rss_hash);
62062306a36Sopenharmony_ci	}
62162306a36Sopenharmony_ci	skb_set_hash(skb, hash, hash_type);
62262306a36Sopenharmony_ci}
62362306a36Sopenharmony_ci
62462306a36Sopenharmony_cistatic void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
62562306a36Sopenharmony_ci{
62662306a36Sopenharmony_ci	skb_checksum_none_assert(skb);
62762306a36Sopenharmony_ci
62862306a36Sopenharmony_ci	if (csum_flag & QEDE_CSUM_UNNECESSARY)
62962306a36Sopenharmony_ci		skb->ip_summed = CHECKSUM_UNNECESSARY;
63062306a36Sopenharmony_ci
63162306a36Sopenharmony_ci	if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
63262306a36Sopenharmony_ci		skb->csum_level = 1;
63362306a36Sopenharmony_ci		skb->encapsulation = 1;
63462306a36Sopenharmony_ci	}
63562306a36Sopenharmony_ci}
63662306a36Sopenharmony_ci
63762306a36Sopenharmony_cistatic inline void qede_skb_receive(struct qede_dev *edev,
63862306a36Sopenharmony_ci				    struct qede_fastpath *fp,
63962306a36Sopenharmony_ci				    struct qede_rx_queue *rxq,
64062306a36Sopenharmony_ci				    struct sk_buff *skb, u16 vlan_tag)
64162306a36Sopenharmony_ci{
64262306a36Sopenharmony_ci	if (vlan_tag)
64362306a36Sopenharmony_ci		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
64462306a36Sopenharmony_ci
64562306a36Sopenharmony_ci	napi_gro_receive(&fp->napi, skb);
64662306a36Sopenharmony_ci}
64762306a36Sopenharmony_ci
64862306a36Sopenharmony_cistatic void qede_set_gro_params(struct qede_dev *edev,
64962306a36Sopenharmony_ci				struct sk_buff *skb,
65062306a36Sopenharmony_ci				struct eth_fast_path_rx_tpa_start_cqe *cqe)
65162306a36Sopenharmony_ci{
65262306a36Sopenharmony_ci	u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
65362306a36Sopenharmony_ci
65462306a36Sopenharmony_ci	if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
65562306a36Sopenharmony_ci	    PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
65662306a36Sopenharmony_ci		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
65762306a36Sopenharmony_ci	else
65862306a36Sopenharmony_ci		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
65962306a36Sopenharmony_ci
66062306a36Sopenharmony_ci	skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
66162306a36Sopenharmony_ci				    cqe->header_len;
66262306a36Sopenharmony_ci}
66362306a36Sopenharmony_ci
66462306a36Sopenharmony_cistatic int qede_fill_frag_skb(struct qede_dev *edev,
66562306a36Sopenharmony_ci			      struct qede_rx_queue *rxq,
66662306a36Sopenharmony_ci			      u8 tpa_agg_index, u16 len_on_bd)
66762306a36Sopenharmony_ci{
66862306a36Sopenharmony_ci	struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
66962306a36Sopenharmony_ci							 NUM_RX_BDS_MAX];
67062306a36Sopenharmony_ci	struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
67162306a36Sopenharmony_ci	struct sk_buff *skb = tpa_info->skb;
67262306a36Sopenharmony_ci
67362306a36Sopenharmony_ci	if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
67462306a36Sopenharmony_ci		goto out;
67562306a36Sopenharmony_ci
67662306a36Sopenharmony_ci	/* Add one frag and update the appropriate fields in the skb */
67762306a36Sopenharmony_ci	skb_fill_page_desc(skb, tpa_info->frag_id++,
67862306a36Sopenharmony_ci			   current_bd->data,
67962306a36Sopenharmony_ci			   current_bd->page_offset + rxq->rx_headroom,
68062306a36Sopenharmony_ci			   len_on_bd);
68162306a36Sopenharmony_ci
68262306a36Sopenharmony_ci	if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
68362306a36Sopenharmony_ci		/* Incr page ref count to reuse on allocation failure
68462306a36Sopenharmony_ci		 * so that it doesn't get freed while freeing SKB.
68562306a36Sopenharmony_ci		 */
68662306a36Sopenharmony_ci		page_ref_inc(current_bd->data);
68762306a36Sopenharmony_ci		goto out;
68862306a36Sopenharmony_ci	}
68962306a36Sopenharmony_ci
69062306a36Sopenharmony_ci	qede_rx_bd_ring_consume(rxq);
69162306a36Sopenharmony_ci
69262306a36Sopenharmony_ci	skb->data_len += len_on_bd;
69362306a36Sopenharmony_ci	skb->truesize += rxq->rx_buf_seg_size;
69462306a36Sopenharmony_ci	skb->len += len_on_bd;
69562306a36Sopenharmony_ci
69662306a36Sopenharmony_ci	return 0;
69762306a36Sopenharmony_ci
69862306a36Sopenharmony_ciout:
69962306a36Sopenharmony_ci	tpa_info->state = QEDE_AGG_STATE_ERROR;
70062306a36Sopenharmony_ci	qede_recycle_rx_bd_ring(rxq, 1);
70162306a36Sopenharmony_ci
70262306a36Sopenharmony_ci	return -ENOMEM;
70362306a36Sopenharmony_ci}
70462306a36Sopenharmony_ci
70562306a36Sopenharmony_cistatic bool qede_tunn_exist(u16 flag)
70662306a36Sopenharmony_ci{
70762306a36Sopenharmony_ci	return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
70862306a36Sopenharmony_ci			  PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
70962306a36Sopenharmony_ci}
71062306a36Sopenharmony_ci
71162306a36Sopenharmony_cistatic u8 qede_check_tunn_csum(u16 flag)
71262306a36Sopenharmony_ci{
71362306a36Sopenharmony_ci	u16 csum_flag = 0;
71462306a36Sopenharmony_ci	u8 tcsum = 0;
71562306a36Sopenharmony_ci
71662306a36Sopenharmony_ci	if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
71762306a36Sopenharmony_ci		    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
71862306a36Sopenharmony_ci		csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
71962306a36Sopenharmony_ci			     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
72062306a36Sopenharmony_ci
72162306a36Sopenharmony_ci	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
72262306a36Sopenharmony_ci		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
72362306a36Sopenharmony_ci		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
72462306a36Sopenharmony_ci			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
72562306a36Sopenharmony_ci		tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
72662306a36Sopenharmony_ci	}
72762306a36Sopenharmony_ci
72862306a36Sopenharmony_ci	csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
72962306a36Sopenharmony_ci		     PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
73062306a36Sopenharmony_ci		     PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
73162306a36Sopenharmony_ci		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
73262306a36Sopenharmony_ci
73362306a36Sopenharmony_ci	if (csum_flag & flag)
73462306a36Sopenharmony_ci		return QEDE_CSUM_ERROR;
73562306a36Sopenharmony_ci
73662306a36Sopenharmony_ci	return QEDE_CSUM_UNNECESSARY | tcsum;
73762306a36Sopenharmony_ci}
73862306a36Sopenharmony_ci
73962306a36Sopenharmony_cistatic inline struct sk_buff *
74062306a36Sopenharmony_ciqede_build_skb(struct qede_rx_queue *rxq,
74162306a36Sopenharmony_ci	       struct sw_rx_data *bd, u16 len, u16 pad)
74262306a36Sopenharmony_ci{
74362306a36Sopenharmony_ci	struct sk_buff *skb;
74462306a36Sopenharmony_ci	void *buf;
74562306a36Sopenharmony_ci
74662306a36Sopenharmony_ci	buf = page_address(bd->data) + bd->page_offset;
74762306a36Sopenharmony_ci	skb = build_skb(buf, rxq->rx_buf_seg_size);
74862306a36Sopenharmony_ci
74962306a36Sopenharmony_ci	if (unlikely(!skb))
75062306a36Sopenharmony_ci		return NULL;
75162306a36Sopenharmony_ci
75262306a36Sopenharmony_ci	skb_reserve(skb, pad);
75362306a36Sopenharmony_ci	skb_put(skb, len);
75462306a36Sopenharmony_ci
75562306a36Sopenharmony_ci	return skb;
75662306a36Sopenharmony_ci}
75762306a36Sopenharmony_ci
75862306a36Sopenharmony_cistatic struct sk_buff *
75962306a36Sopenharmony_ciqede_tpa_rx_build_skb(struct qede_dev *edev,
76062306a36Sopenharmony_ci		      struct qede_rx_queue *rxq,
76162306a36Sopenharmony_ci		      struct sw_rx_data *bd, u16 len, u16 pad,
76262306a36Sopenharmony_ci		      bool alloc_skb)
76362306a36Sopenharmony_ci{
76462306a36Sopenharmony_ci	struct sk_buff *skb;
76562306a36Sopenharmony_ci
76662306a36Sopenharmony_ci	skb = qede_build_skb(rxq, bd, len, pad);
76762306a36Sopenharmony_ci	bd->page_offset += rxq->rx_buf_seg_size;
76862306a36Sopenharmony_ci
76962306a36Sopenharmony_ci	if (bd->page_offset == PAGE_SIZE) {
77062306a36Sopenharmony_ci		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
77162306a36Sopenharmony_ci			DP_NOTICE(edev,
77262306a36Sopenharmony_ci				  "Failed to allocate RX buffer for tpa start\n");
77362306a36Sopenharmony_ci			bd->page_offset -= rxq->rx_buf_seg_size;
77462306a36Sopenharmony_ci			page_ref_inc(bd->data);
77562306a36Sopenharmony_ci			dev_kfree_skb_any(skb);
77662306a36Sopenharmony_ci			return NULL;
77762306a36Sopenharmony_ci		}
77862306a36Sopenharmony_ci	} else {
77962306a36Sopenharmony_ci		page_ref_inc(bd->data);
78062306a36Sopenharmony_ci		qede_reuse_page(rxq, bd);
78162306a36Sopenharmony_ci	}
78262306a36Sopenharmony_ci
78362306a36Sopenharmony_ci	/* We've consumed the first BD and prepared an SKB */
78462306a36Sopenharmony_ci	qede_rx_bd_ring_consume(rxq);
78562306a36Sopenharmony_ci
78662306a36Sopenharmony_ci	return skb;
78762306a36Sopenharmony_ci}
78862306a36Sopenharmony_ci
78962306a36Sopenharmony_cistatic struct sk_buff *
79062306a36Sopenharmony_ciqede_rx_build_skb(struct qede_dev *edev,
79162306a36Sopenharmony_ci		  struct qede_rx_queue *rxq,
79262306a36Sopenharmony_ci		  struct sw_rx_data *bd, u16 len, u16 pad)
79362306a36Sopenharmony_ci{
79462306a36Sopenharmony_ci	struct sk_buff *skb = NULL;
79562306a36Sopenharmony_ci
79662306a36Sopenharmony_ci	/* For smaller frames still need to allocate skb, memcpy
79762306a36Sopenharmony_ci	 * data and benefit in reusing the page segment instead of
79862306a36Sopenharmony_ci	 * un-mapping it.
79962306a36Sopenharmony_ci	 */
80062306a36Sopenharmony_ci	if ((len + pad <= edev->rx_copybreak)) {
80162306a36Sopenharmony_ci		unsigned int offset = bd->page_offset + pad;
80262306a36Sopenharmony_ci
80362306a36Sopenharmony_ci		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
80462306a36Sopenharmony_ci		if (unlikely(!skb))
80562306a36Sopenharmony_ci			return NULL;
80662306a36Sopenharmony_ci
80762306a36Sopenharmony_ci		skb_reserve(skb, pad);
80862306a36Sopenharmony_ci		skb_put_data(skb, page_address(bd->data) + offset, len);
80962306a36Sopenharmony_ci		qede_reuse_page(rxq, bd);
81062306a36Sopenharmony_ci		goto out;
81162306a36Sopenharmony_ci	}
81262306a36Sopenharmony_ci
81362306a36Sopenharmony_ci	skb = qede_build_skb(rxq, bd, len, pad);
81462306a36Sopenharmony_ci
81562306a36Sopenharmony_ci	if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
81662306a36Sopenharmony_ci		/* Incr page ref count to reuse on allocation failure so
81762306a36Sopenharmony_ci		 * that it doesn't get freed while freeing SKB [as its
81862306a36Sopenharmony_ci		 * already mapped there].
81962306a36Sopenharmony_ci		 */
82062306a36Sopenharmony_ci		page_ref_inc(bd->data);
82162306a36Sopenharmony_ci		dev_kfree_skb_any(skb);
82262306a36Sopenharmony_ci		return NULL;
82362306a36Sopenharmony_ci	}
82462306a36Sopenharmony_ciout:
82562306a36Sopenharmony_ci	/* We've consumed the first BD and prepared an SKB */
82662306a36Sopenharmony_ci	qede_rx_bd_ring_consume(rxq);
82762306a36Sopenharmony_ci
82862306a36Sopenharmony_ci	return skb;
82962306a36Sopenharmony_ci}
83062306a36Sopenharmony_ci
83162306a36Sopenharmony_cistatic void qede_tpa_start(struct qede_dev *edev,
83262306a36Sopenharmony_ci			   struct qede_rx_queue *rxq,
83362306a36Sopenharmony_ci			   struct eth_fast_path_rx_tpa_start_cqe *cqe)
83462306a36Sopenharmony_ci{
83562306a36Sopenharmony_ci	struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
83662306a36Sopenharmony_ci	struct sw_rx_data *sw_rx_data_cons;
83762306a36Sopenharmony_ci	u16 pad;
83862306a36Sopenharmony_ci
83962306a36Sopenharmony_ci	sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
84062306a36Sopenharmony_ci	pad = cqe->placement_offset + rxq->rx_headroom;
84162306a36Sopenharmony_ci
84262306a36Sopenharmony_ci	tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
84362306a36Sopenharmony_ci					      le16_to_cpu(cqe->len_on_first_bd),
84462306a36Sopenharmony_ci					      pad, false);
84562306a36Sopenharmony_ci	tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
84662306a36Sopenharmony_ci	tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
84762306a36Sopenharmony_ci
84862306a36Sopenharmony_ci	if (unlikely(!tpa_info->skb)) {
84962306a36Sopenharmony_ci		DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
85062306a36Sopenharmony_ci
85162306a36Sopenharmony_ci		/* Consume from ring but do not produce since
85262306a36Sopenharmony_ci		 * this might be used by FW still, it will be re-used
85362306a36Sopenharmony_ci		 * at TPA end.
85462306a36Sopenharmony_ci		 */
85562306a36Sopenharmony_ci		tpa_info->tpa_start_fail = true;
85662306a36Sopenharmony_ci		qede_rx_bd_ring_consume(rxq);
85762306a36Sopenharmony_ci		tpa_info->state = QEDE_AGG_STATE_ERROR;
85862306a36Sopenharmony_ci		goto cons_buf;
85962306a36Sopenharmony_ci	}
86062306a36Sopenharmony_ci
86162306a36Sopenharmony_ci	tpa_info->frag_id = 0;
86262306a36Sopenharmony_ci	tpa_info->state = QEDE_AGG_STATE_START;
86362306a36Sopenharmony_ci
86462306a36Sopenharmony_ci	if ((le16_to_cpu(cqe->pars_flags.flags) >>
86562306a36Sopenharmony_ci	     PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
86662306a36Sopenharmony_ci	    PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
86762306a36Sopenharmony_ci		tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
86862306a36Sopenharmony_ci	else
86962306a36Sopenharmony_ci		tpa_info->vlan_tag = 0;
87062306a36Sopenharmony_ci
87162306a36Sopenharmony_ci	qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
87262306a36Sopenharmony_ci
87362306a36Sopenharmony_ci	/* This is needed in order to enable forwarding support */
87462306a36Sopenharmony_ci	qede_set_gro_params(edev, tpa_info->skb, cqe);
87562306a36Sopenharmony_ci
87662306a36Sopenharmony_cicons_buf: /* We still need to handle bd_len_list to consume buffers */
87762306a36Sopenharmony_ci	if (likely(cqe->bw_ext_bd_len_list[0]))
87862306a36Sopenharmony_ci		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
87962306a36Sopenharmony_ci				   le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
88062306a36Sopenharmony_ci
88162306a36Sopenharmony_ci	if (unlikely(cqe->bw_ext_bd_len_list[1])) {
88262306a36Sopenharmony_ci		DP_ERR(edev,
88362306a36Sopenharmony_ci		       "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
88462306a36Sopenharmony_ci		tpa_info->state = QEDE_AGG_STATE_ERROR;
88562306a36Sopenharmony_ci	}
88662306a36Sopenharmony_ci}
88762306a36Sopenharmony_ci
88862306a36Sopenharmony_ci#ifdef CONFIG_INET
88962306a36Sopenharmony_cistatic void qede_gro_ip_csum(struct sk_buff *skb)
89062306a36Sopenharmony_ci{
89162306a36Sopenharmony_ci	const struct iphdr *iph = ip_hdr(skb);
89262306a36Sopenharmony_ci	struct tcphdr *th;
89362306a36Sopenharmony_ci
89462306a36Sopenharmony_ci	skb_set_transport_header(skb, sizeof(struct iphdr));
89562306a36Sopenharmony_ci	th = tcp_hdr(skb);
89662306a36Sopenharmony_ci
89762306a36Sopenharmony_ci	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
89862306a36Sopenharmony_ci				  iph->saddr, iph->daddr, 0);
89962306a36Sopenharmony_ci
90062306a36Sopenharmony_ci	tcp_gro_complete(skb);
90162306a36Sopenharmony_ci}
90262306a36Sopenharmony_ci
90362306a36Sopenharmony_cistatic void qede_gro_ipv6_csum(struct sk_buff *skb)
90462306a36Sopenharmony_ci{
90562306a36Sopenharmony_ci	struct ipv6hdr *iph = ipv6_hdr(skb);
90662306a36Sopenharmony_ci	struct tcphdr *th;
90762306a36Sopenharmony_ci
90862306a36Sopenharmony_ci	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
90962306a36Sopenharmony_ci	th = tcp_hdr(skb);
91062306a36Sopenharmony_ci
91162306a36Sopenharmony_ci	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
91262306a36Sopenharmony_ci				  &iph->saddr, &iph->daddr, 0);
91362306a36Sopenharmony_ci	tcp_gro_complete(skb);
91462306a36Sopenharmony_ci}
91562306a36Sopenharmony_ci#endif
91662306a36Sopenharmony_ci
91762306a36Sopenharmony_cistatic void qede_gro_receive(struct qede_dev *edev,
91862306a36Sopenharmony_ci			     struct qede_fastpath *fp,
91962306a36Sopenharmony_ci			     struct sk_buff *skb,
92062306a36Sopenharmony_ci			     u16 vlan_tag)
92162306a36Sopenharmony_ci{
92262306a36Sopenharmony_ci	/* FW can send a single MTU sized packet from gro flow
92362306a36Sopenharmony_ci	 * due to aggregation timeout/last segment etc. which
92462306a36Sopenharmony_ci	 * is not expected to be a gro packet. If a skb has zero
92562306a36Sopenharmony_ci	 * frags then simply push it in the stack as non gso skb.
92662306a36Sopenharmony_ci	 */
92762306a36Sopenharmony_ci	if (unlikely(!skb->data_len)) {
92862306a36Sopenharmony_ci		skb_shinfo(skb)->gso_type = 0;
92962306a36Sopenharmony_ci		skb_shinfo(skb)->gso_size = 0;
93062306a36Sopenharmony_ci		goto send_skb;
93162306a36Sopenharmony_ci	}
93262306a36Sopenharmony_ci
93362306a36Sopenharmony_ci#ifdef CONFIG_INET
93462306a36Sopenharmony_ci	if (skb_shinfo(skb)->gso_size) {
93562306a36Sopenharmony_ci		skb_reset_network_header(skb);
93662306a36Sopenharmony_ci
93762306a36Sopenharmony_ci		switch (skb->protocol) {
93862306a36Sopenharmony_ci		case htons(ETH_P_IP):
93962306a36Sopenharmony_ci			qede_gro_ip_csum(skb);
94062306a36Sopenharmony_ci			break;
94162306a36Sopenharmony_ci		case htons(ETH_P_IPV6):
94262306a36Sopenharmony_ci			qede_gro_ipv6_csum(skb);
94362306a36Sopenharmony_ci			break;
94462306a36Sopenharmony_ci		default:
94562306a36Sopenharmony_ci			DP_ERR(edev,
94662306a36Sopenharmony_ci			       "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
94762306a36Sopenharmony_ci			       ntohs(skb->protocol));
94862306a36Sopenharmony_ci		}
94962306a36Sopenharmony_ci	}
95062306a36Sopenharmony_ci#endif
95162306a36Sopenharmony_ci
95262306a36Sopenharmony_cisend_skb:
95362306a36Sopenharmony_ci	skb_record_rx_queue(skb, fp->rxq->rxq_id);
95462306a36Sopenharmony_ci	qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
95562306a36Sopenharmony_ci}
95662306a36Sopenharmony_ci
95762306a36Sopenharmony_cistatic inline void qede_tpa_cont(struct qede_dev *edev,
95862306a36Sopenharmony_ci				 struct qede_rx_queue *rxq,
95962306a36Sopenharmony_ci				 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
96062306a36Sopenharmony_ci{
96162306a36Sopenharmony_ci	int i;
96262306a36Sopenharmony_ci
96362306a36Sopenharmony_ci	for (i = 0; cqe->len_list[i]; i++)
96462306a36Sopenharmony_ci		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
96562306a36Sopenharmony_ci				   le16_to_cpu(cqe->len_list[i]));
96662306a36Sopenharmony_ci
96762306a36Sopenharmony_ci	if (unlikely(i > 1))
96862306a36Sopenharmony_ci		DP_ERR(edev,
96962306a36Sopenharmony_ci		       "Strange - TPA cont with more than a single len_list entry\n");
97062306a36Sopenharmony_ci}
97162306a36Sopenharmony_ci
97262306a36Sopenharmony_cistatic int qede_tpa_end(struct qede_dev *edev,
97362306a36Sopenharmony_ci			struct qede_fastpath *fp,
97462306a36Sopenharmony_ci			struct eth_fast_path_rx_tpa_end_cqe *cqe)
97562306a36Sopenharmony_ci{
97662306a36Sopenharmony_ci	struct qede_rx_queue *rxq = fp->rxq;
97762306a36Sopenharmony_ci	struct qede_agg_info *tpa_info;
97862306a36Sopenharmony_ci	struct sk_buff *skb;
97962306a36Sopenharmony_ci	int i;
98062306a36Sopenharmony_ci
98162306a36Sopenharmony_ci	tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
98262306a36Sopenharmony_ci	skb = tpa_info->skb;
98362306a36Sopenharmony_ci
98462306a36Sopenharmony_ci	if (tpa_info->buffer.page_offset == PAGE_SIZE)
98562306a36Sopenharmony_ci		dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
98662306a36Sopenharmony_ci			       PAGE_SIZE, rxq->data_direction);
98762306a36Sopenharmony_ci
98862306a36Sopenharmony_ci	for (i = 0; cqe->len_list[i]; i++)
98962306a36Sopenharmony_ci		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
99062306a36Sopenharmony_ci				   le16_to_cpu(cqe->len_list[i]));
99162306a36Sopenharmony_ci	if (unlikely(i > 1))
99262306a36Sopenharmony_ci		DP_ERR(edev,
99362306a36Sopenharmony_ci		       "Strange - TPA emd with more than a single len_list entry\n");
99462306a36Sopenharmony_ci
99562306a36Sopenharmony_ci	if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
99662306a36Sopenharmony_ci		goto err;
99762306a36Sopenharmony_ci
99862306a36Sopenharmony_ci	/* Sanity */
99962306a36Sopenharmony_ci	if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
100062306a36Sopenharmony_ci		DP_ERR(edev,
100162306a36Sopenharmony_ci		       "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
100262306a36Sopenharmony_ci		       cqe->num_of_bds, tpa_info->frag_id);
100362306a36Sopenharmony_ci	if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
100462306a36Sopenharmony_ci		DP_ERR(edev,
100562306a36Sopenharmony_ci		       "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
100662306a36Sopenharmony_ci		       le16_to_cpu(cqe->total_packet_len), skb->len);
100762306a36Sopenharmony_ci
100862306a36Sopenharmony_ci	/* Finalize the SKB */
100962306a36Sopenharmony_ci	skb->protocol = eth_type_trans(skb, edev->ndev);
101062306a36Sopenharmony_ci	skb->ip_summed = CHECKSUM_UNNECESSARY;
101162306a36Sopenharmony_ci
101262306a36Sopenharmony_ci	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
101362306a36Sopenharmony_ci	 * to skb_shinfo(skb)->gso_segs
101462306a36Sopenharmony_ci	 */
101562306a36Sopenharmony_ci	NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
101662306a36Sopenharmony_ci
101762306a36Sopenharmony_ci	qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
101862306a36Sopenharmony_ci
101962306a36Sopenharmony_ci	tpa_info->state = QEDE_AGG_STATE_NONE;
102062306a36Sopenharmony_ci
102162306a36Sopenharmony_ci	return 1;
102262306a36Sopenharmony_cierr:
102362306a36Sopenharmony_ci	tpa_info->state = QEDE_AGG_STATE_NONE;
102462306a36Sopenharmony_ci
102562306a36Sopenharmony_ci	if (tpa_info->tpa_start_fail) {
102662306a36Sopenharmony_ci		qede_reuse_page(rxq, &tpa_info->buffer);
102762306a36Sopenharmony_ci		tpa_info->tpa_start_fail = false;
102862306a36Sopenharmony_ci	}
102962306a36Sopenharmony_ci
103062306a36Sopenharmony_ci	dev_kfree_skb_any(tpa_info->skb);
103162306a36Sopenharmony_ci	tpa_info->skb = NULL;
103262306a36Sopenharmony_ci	return 0;
103362306a36Sopenharmony_ci}
103462306a36Sopenharmony_ci
103562306a36Sopenharmony_cistatic u8 qede_check_notunn_csum(u16 flag)
103662306a36Sopenharmony_ci{
103762306a36Sopenharmony_ci	u16 csum_flag = 0;
103862306a36Sopenharmony_ci	u8 csum = 0;
103962306a36Sopenharmony_ci
104062306a36Sopenharmony_ci	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
104162306a36Sopenharmony_ci		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
104262306a36Sopenharmony_ci		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
104362306a36Sopenharmony_ci			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
104462306a36Sopenharmony_ci		csum = QEDE_CSUM_UNNECESSARY;
104562306a36Sopenharmony_ci	}
104662306a36Sopenharmony_ci
104762306a36Sopenharmony_ci	csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
104862306a36Sopenharmony_ci		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
104962306a36Sopenharmony_ci
105062306a36Sopenharmony_ci	if (csum_flag & flag)
105162306a36Sopenharmony_ci		return QEDE_CSUM_ERROR;
105262306a36Sopenharmony_ci
105362306a36Sopenharmony_ci	return csum;
105462306a36Sopenharmony_ci}
105562306a36Sopenharmony_ci
105662306a36Sopenharmony_cistatic u8 qede_check_csum(u16 flag)
105762306a36Sopenharmony_ci{
105862306a36Sopenharmony_ci	if (!qede_tunn_exist(flag))
105962306a36Sopenharmony_ci		return qede_check_notunn_csum(flag);
106062306a36Sopenharmony_ci	else
106162306a36Sopenharmony_ci		return qede_check_tunn_csum(flag);
106262306a36Sopenharmony_ci}
106362306a36Sopenharmony_ci
106462306a36Sopenharmony_cistatic bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
106562306a36Sopenharmony_ci				      u16 flag)
106662306a36Sopenharmony_ci{
106762306a36Sopenharmony_ci	u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
106862306a36Sopenharmony_ci
106962306a36Sopenharmony_ci	if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
107062306a36Sopenharmony_ci			     ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
107162306a36Sopenharmony_ci	    (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
107262306a36Sopenharmony_ci		     PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
107362306a36Sopenharmony_ci		return true;
107462306a36Sopenharmony_ci
107562306a36Sopenharmony_ci	return false;
107662306a36Sopenharmony_ci}
107762306a36Sopenharmony_ci
107862306a36Sopenharmony_ci/* Return true iff packet is to be passed to stack */
107962306a36Sopenharmony_cistatic bool qede_rx_xdp(struct qede_dev *edev,
108062306a36Sopenharmony_ci			struct qede_fastpath *fp,
108162306a36Sopenharmony_ci			struct qede_rx_queue *rxq,
108262306a36Sopenharmony_ci			struct bpf_prog *prog,
108362306a36Sopenharmony_ci			struct sw_rx_data *bd,
108462306a36Sopenharmony_ci			struct eth_fast_path_rx_reg_cqe *cqe,
108562306a36Sopenharmony_ci			u16 *data_offset, u16 *len)
108662306a36Sopenharmony_ci{
108762306a36Sopenharmony_ci	struct xdp_buff xdp;
108862306a36Sopenharmony_ci	enum xdp_action act;
108962306a36Sopenharmony_ci
109062306a36Sopenharmony_ci	xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
109162306a36Sopenharmony_ci	xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
109262306a36Sopenharmony_ci			 *len, false);
109362306a36Sopenharmony_ci
109462306a36Sopenharmony_ci	act = bpf_prog_run_xdp(prog, &xdp);
109562306a36Sopenharmony_ci
109662306a36Sopenharmony_ci	/* Recalculate, as XDP might have changed the headers */
109762306a36Sopenharmony_ci	*data_offset = xdp.data - xdp.data_hard_start;
109862306a36Sopenharmony_ci	*len = xdp.data_end - xdp.data;
109962306a36Sopenharmony_ci
110062306a36Sopenharmony_ci	if (act == XDP_PASS)
110162306a36Sopenharmony_ci		return true;
110262306a36Sopenharmony_ci
110362306a36Sopenharmony_ci	/* Count number of packets not to be passed to stack */
110462306a36Sopenharmony_ci	rxq->xdp_no_pass++;
110562306a36Sopenharmony_ci
110662306a36Sopenharmony_ci	switch (act) {
110762306a36Sopenharmony_ci	case XDP_TX:
110862306a36Sopenharmony_ci		/* We need the replacement buffer before transmit. */
110962306a36Sopenharmony_ci		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
111062306a36Sopenharmony_ci			qede_recycle_rx_bd_ring(rxq, 1);
111162306a36Sopenharmony_ci
111262306a36Sopenharmony_ci			trace_xdp_exception(edev->ndev, prog, act);
111362306a36Sopenharmony_ci			break;
111462306a36Sopenharmony_ci		}
111562306a36Sopenharmony_ci
111662306a36Sopenharmony_ci		/* Now if there's a transmission problem, we'd still have to
111762306a36Sopenharmony_ci		 * throw current buffer, as replacement was already allocated.
111862306a36Sopenharmony_ci		 */
111962306a36Sopenharmony_ci		if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
112062306a36Sopenharmony_ci					   *data_offset, *len, bd->data,
112162306a36Sopenharmony_ci					   NULL))) {
112262306a36Sopenharmony_ci			dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
112362306a36Sopenharmony_ci				       rxq->data_direction);
112462306a36Sopenharmony_ci			__free_page(bd->data);
112562306a36Sopenharmony_ci
112662306a36Sopenharmony_ci			trace_xdp_exception(edev->ndev, prog, act);
112762306a36Sopenharmony_ci		} else {
112862306a36Sopenharmony_ci			dma_sync_single_for_device(rxq->dev,
112962306a36Sopenharmony_ci						   bd->mapping + *data_offset,
113062306a36Sopenharmony_ci						   *len, rxq->data_direction);
113162306a36Sopenharmony_ci			fp->xdp_xmit |= QEDE_XDP_TX;
113262306a36Sopenharmony_ci		}
113362306a36Sopenharmony_ci
113462306a36Sopenharmony_ci		/* Regardless, we've consumed an Rx BD */
113562306a36Sopenharmony_ci		qede_rx_bd_ring_consume(rxq);
113662306a36Sopenharmony_ci		break;
113762306a36Sopenharmony_ci	case XDP_REDIRECT:
113862306a36Sopenharmony_ci		/* We need the replacement buffer before transmit. */
113962306a36Sopenharmony_ci		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
114062306a36Sopenharmony_ci			qede_recycle_rx_bd_ring(rxq, 1);
114162306a36Sopenharmony_ci
114262306a36Sopenharmony_ci			trace_xdp_exception(edev->ndev, prog, act);
114362306a36Sopenharmony_ci			break;
114462306a36Sopenharmony_ci		}
114562306a36Sopenharmony_ci
114662306a36Sopenharmony_ci		dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
114762306a36Sopenharmony_ci			       rxq->data_direction);
114862306a36Sopenharmony_ci
114962306a36Sopenharmony_ci		if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
115062306a36Sopenharmony_ci			DP_NOTICE(edev, "Failed to redirect the packet\n");
115162306a36Sopenharmony_ci		else
115262306a36Sopenharmony_ci			fp->xdp_xmit |= QEDE_XDP_REDIRECT;
115362306a36Sopenharmony_ci
115462306a36Sopenharmony_ci		qede_rx_bd_ring_consume(rxq);
115562306a36Sopenharmony_ci		break;
115662306a36Sopenharmony_ci	default:
115762306a36Sopenharmony_ci		bpf_warn_invalid_xdp_action(edev->ndev, prog, act);
115862306a36Sopenharmony_ci		fallthrough;
115962306a36Sopenharmony_ci	case XDP_ABORTED:
116062306a36Sopenharmony_ci		trace_xdp_exception(edev->ndev, prog, act);
116162306a36Sopenharmony_ci		fallthrough;
116262306a36Sopenharmony_ci	case XDP_DROP:
116362306a36Sopenharmony_ci		qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
116462306a36Sopenharmony_ci	}
116562306a36Sopenharmony_ci
116662306a36Sopenharmony_ci	return false;
116762306a36Sopenharmony_ci}
116862306a36Sopenharmony_ci
116962306a36Sopenharmony_cistatic int qede_rx_build_jumbo(struct qede_dev *edev,
117062306a36Sopenharmony_ci			       struct qede_rx_queue *rxq,
117162306a36Sopenharmony_ci			       struct sk_buff *skb,
117262306a36Sopenharmony_ci			       struct eth_fast_path_rx_reg_cqe *cqe,
117362306a36Sopenharmony_ci			       u16 first_bd_len)
117462306a36Sopenharmony_ci{
117562306a36Sopenharmony_ci	u16 pkt_len = le16_to_cpu(cqe->pkt_len);
117662306a36Sopenharmony_ci	struct sw_rx_data *bd;
117762306a36Sopenharmony_ci	u16 bd_cons_idx;
117862306a36Sopenharmony_ci	u8 num_frags;
117962306a36Sopenharmony_ci
118062306a36Sopenharmony_ci	pkt_len -= first_bd_len;
118162306a36Sopenharmony_ci
118262306a36Sopenharmony_ci	/* We've already used one BD for the SKB. Now take care of the rest */
118362306a36Sopenharmony_ci	for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
118462306a36Sopenharmony_ci		u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
118562306a36Sopenharmony_ci		    pkt_len;
118662306a36Sopenharmony_ci
118762306a36Sopenharmony_ci		if (unlikely(!cur_size)) {
118862306a36Sopenharmony_ci			DP_ERR(edev,
118962306a36Sopenharmony_ci			       "Still got %d BDs for mapping jumbo, but length became 0\n",
119062306a36Sopenharmony_ci			       num_frags);
119162306a36Sopenharmony_ci			goto out;
119262306a36Sopenharmony_ci		}
119362306a36Sopenharmony_ci
119462306a36Sopenharmony_ci		/* We need a replacement buffer for each BD */
119562306a36Sopenharmony_ci		if (unlikely(qede_alloc_rx_buffer(rxq, true)))
119662306a36Sopenharmony_ci			goto out;
119762306a36Sopenharmony_ci
119862306a36Sopenharmony_ci		/* Now that we've allocated the replacement buffer,
119962306a36Sopenharmony_ci		 * we can safely consume the next BD and map it to the SKB.
120062306a36Sopenharmony_ci		 */
120162306a36Sopenharmony_ci		bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
120262306a36Sopenharmony_ci		bd = &rxq->sw_rx_ring[bd_cons_idx];
120362306a36Sopenharmony_ci		qede_rx_bd_ring_consume(rxq);
120462306a36Sopenharmony_ci
120562306a36Sopenharmony_ci		dma_unmap_page(rxq->dev, bd->mapping,
120662306a36Sopenharmony_ci			       PAGE_SIZE, DMA_FROM_DEVICE);
120762306a36Sopenharmony_ci
120862306a36Sopenharmony_ci		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data,
120962306a36Sopenharmony_ci				rxq->rx_headroom, cur_size, PAGE_SIZE);
121062306a36Sopenharmony_ci
121162306a36Sopenharmony_ci		pkt_len -= cur_size;
121262306a36Sopenharmony_ci	}
121362306a36Sopenharmony_ci
121462306a36Sopenharmony_ci	if (unlikely(pkt_len))
121562306a36Sopenharmony_ci		DP_ERR(edev,
121662306a36Sopenharmony_ci		       "Mapped all BDs of jumbo, but still have %d bytes\n",
121762306a36Sopenharmony_ci		       pkt_len);
121862306a36Sopenharmony_ci
121962306a36Sopenharmony_ciout:
122062306a36Sopenharmony_ci	return num_frags;
122162306a36Sopenharmony_ci}
122262306a36Sopenharmony_ci
122362306a36Sopenharmony_cistatic int qede_rx_process_tpa_cqe(struct qede_dev *edev,
122462306a36Sopenharmony_ci				   struct qede_fastpath *fp,
122562306a36Sopenharmony_ci				   struct qede_rx_queue *rxq,
122662306a36Sopenharmony_ci				   union eth_rx_cqe *cqe,
122762306a36Sopenharmony_ci				   enum eth_rx_cqe_type type)
122862306a36Sopenharmony_ci{
122962306a36Sopenharmony_ci	switch (type) {
123062306a36Sopenharmony_ci	case ETH_RX_CQE_TYPE_TPA_START:
123162306a36Sopenharmony_ci		qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
123262306a36Sopenharmony_ci		return 0;
123362306a36Sopenharmony_ci	case ETH_RX_CQE_TYPE_TPA_CONT:
123462306a36Sopenharmony_ci		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
123562306a36Sopenharmony_ci		return 0;
123662306a36Sopenharmony_ci	case ETH_RX_CQE_TYPE_TPA_END:
123762306a36Sopenharmony_ci		return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
123862306a36Sopenharmony_ci	default:
123962306a36Sopenharmony_ci		return 0;
124062306a36Sopenharmony_ci	}
124162306a36Sopenharmony_ci}
124262306a36Sopenharmony_ci
124362306a36Sopenharmony_cistatic int qede_rx_process_cqe(struct qede_dev *edev,
124462306a36Sopenharmony_ci			       struct qede_fastpath *fp,
124562306a36Sopenharmony_ci			       struct qede_rx_queue *rxq)
124662306a36Sopenharmony_ci{
124762306a36Sopenharmony_ci	struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
124862306a36Sopenharmony_ci	struct eth_fast_path_rx_reg_cqe *fp_cqe;
124962306a36Sopenharmony_ci	u16 len, pad, bd_cons_idx, parse_flag;
125062306a36Sopenharmony_ci	enum eth_rx_cqe_type cqe_type;
125162306a36Sopenharmony_ci	union eth_rx_cqe *cqe;
125262306a36Sopenharmony_ci	struct sw_rx_data *bd;
125362306a36Sopenharmony_ci	struct sk_buff *skb;
125462306a36Sopenharmony_ci	__le16 flags;
125562306a36Sopenharmony_ci	u8 csum_flag;
125662306a36Sopenharmony_ci
125762306a36Sopenharmony_ci	/* Get the CQE from the completion ring */
125862306a36Sopenharmony_ci	cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
125962306a36Sopenharmony_ci	cqe_type = cqe->fast_path_regular.type;
126062306a36Sopenharmony_ci
126162306a36Sopenharmony_ci	/* Process an unlikely slowpath event */
126262306a36Sopenharmony_ci	if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
126362306a36Sopenharmony_ci		struct eth_slow_path_rx_cqe *sp_cqe;
126462306a36Sopenharmony_ci
126562306a36Sopenharmony_ci		sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
126662306a36Sopenharmony_ci		edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
126762306a36Sopenharmony_ci		return 0;
126862306a36Sopenharmony_ci	}
126962306a36Sopenharmony_ci
127062306a36Sopenharmony_ci	/* Handle TPA cqes */
127162306a36Sopenharmony_ci	if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
127262306a36Sopenharmony_ci		return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
127362306a36Sopenharmony_ci
127462306a36Sopenharmony_ci	/* Get the data from the SW ring; Consume it only after it's evident
127562306a36Sopenharmony_ci	 * we wouldn't recycle it.
127662306a36Sopenharmony_ci	 */
127762306a36Sopenharmony_ci	bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
127862306a36Sopenharmony_ci	bd = &rxq->sw_rx_ring[bd_cons_idx];
127962306a36Sopenharmony_ci
128062306a36Sopenharmony_ci	fp_cqe = &cqe->fast_path_regular;
128162306a36Sopenharmony_ci	len = le16_to_cpu(fp_cqe->len_on_first_bd);
128262306a36Sopenharmony_ci	pad = fp_cqe->placement_offset + rxq->rx_headroom;
128362306a36Sopenharmony_ci
128462306a36Sopenharmony_ci	/* Run eBPF program if one is attached */
128562306a36Sopenharmony_ci	if (xdp_prog)
128662306a36Sopenharmony_ci		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
128762306a36Sopenharmony_ci				 &pad, &len))
128862306a36Sopenharmony_ci			return 0;
128962306a36Sopenharmony_ci
129062306a36Sopenharmony_ci	/* If this is an error packet then drop it */
129162306a36Sopenharmony_ci	flags = cqe->fast_path_regular.pars_flags.flags;
129262306a36Sopenharmony_ci	parse_flag = le16_to_cpu(flags);
129362306a36Sopenharmony_ci
129462306a36Sopenharmony_ci	csum_flag = qede_check_csum(parse_flag);
129562306a36Sopenharmony_ci	if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
129662306a36Sopenharmony_ci		if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
129762306a36Sopenharmony_ci			rxq->rx_ip_frags++;
129862306a36Sopenharmony_ci		else
129962306a36Sopenharmony_ci			rxq->rx_hw_errors++;
130062306a36Sopenharmony_ci	}
130162306a36Sopenharmony_ci
130262306a36Sopenharmony_ci	/* Basic validation passed; Need to prepare an SKB. This would also
130362306a36Sopenharmony_ci	 * guarantee to finally consume the first BD upon success.
130462306a36Sopenharmony_ci	 */
130562306a36Sopenharmony_ci	skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
130662306a36Sopenharmony_ci	if (!skb) {
130762306a36Sopenharmony_ci		rxq->rx_alloc_errors++;
130862306a36Sopenharmony_ci		qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
130962306a36Sopenharmony_ci		return 0;
131062306a36Sopenharmony_ci	}
131162306a36Sopenharmony_ci
131262306a36Sopenharmony_ci	/* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
131362306a36Sopenharmony_ci	 * by a single cqe.
131462306a36Sopenharmony_ci	 */
131562306a36Sopenharmony_ci	if (fp_cqe->bd_num > 1) {
131662306a36Sopenharmony_ci		u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
131762306a36Sopenharmony_ci							 fp_cqe, len);
131862306a36Sopenharmony_ci
131962306a36Sopenharmony_ci		if (unlikely(unmapped_frags > 0)) {
132062306a36Sopenharmony_ci			qede_recycle_rx_bd_ring(rxq, unmapped_frags);
132162306a36Sopenharmony_ci			dev_kfree_skb_any(skb);
132262306a36Sopenharmony_ci			return 0;
132362306a36Sopenharmony_ci		}
132462306a36Sopenharmony_ci	}
132562306a36Sopenharmony_ci
132662306a36Sopenharmony_ci	/* The SKB contains all the data. Now prepare meta-magic */
132762306a36Sopenharmony_ci	skb->protocol = eth_type_trans(skb, edev->ndev);
132862306a36Sopenharmony_ci	qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
132962306a36Sopenharmony_ci	qede_set_skb_csum(skb, csum_flag);
133062306a36Sopenharmony_ci	skb_record_rx_queue(skb, rxq->rxq_id);
133162306a36Sopenharmony_ci	qede_ptp_record_rx_ts(edev, cqe, skb);
133262306a36Sopenharmony_ci
133362306a36Sopenharmony_ci	/* SKB is prepared - pass it to stack */
133462306a36Sopenharmony_ci	qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
133562306a36Sopenharmony_ci
133662306a36Sopenharmony_ci	return 1;
133762306a36Sopenharmony_ci}
133862306a36Sopenharmony_ci
133962306a36Sopenharmony_cistatic int qede_rx_int(struct qede_fastpath *fp, int budget)
134062306a36Sopenharmony_ci{
134162306a36Sopenharmony_ci	struct qede_rx_queue *rxq = fp->rxq;
134262306a36Sopenharmony_ci	struct qede_dev *edev = fp->edev;
134362306a36Sopenharmony_ci	int work_done = 0, rcv_pkts = 0;
134462306a36Sopenharmony_ci	u16 hw_comp_cons, sw_comp_cons;
134562306a36Sopenharmony_ci
134662306a36Sopenharmony_ci	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
134762306a36Sopenharmony_ci	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
134862306a36Sopenharmony_ci
134962306a36Sopenharmony_ci	/* Memory barrier to prevent the CPU from doing speculative reads of CQE
135062306a36Sopenharmony_ci	 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
135162306a36Sopenharmony_ci	 * read before it is written by FW, then FW writes CQE and SB, and then
135262306a36Sopenharmony_ci	 * the CPU reads the hw_comp_cons, it will use an old CQE.
135362306a36Sopenharmony_ci	 */
135462306a36Sopenharmony_ci	rmb();
135562306a36Sopenharmony_ci
135662306a36Sopenharmony_ci	/* Loop to complete all indicated BDs */
135762306a36Sopenharmony_ci	while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
135862306a36Sopenharmony_ci		rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
135962306a36Sopenharmony_ci		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
136062306a36Sopenharmony_ci		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
136162306a36Sopenharmony_ci		work_done++;
136262306a36Sopenharmony_ci	}
136362306a36Sopenharmony_ci
136462306a36Sopenharmony_ci	rxq->rcv_pkts += rcv_pkts;
136562306a36Sopenharmony_ci
136662306a36Sopenharmony_ci	/* Allocate replacement buffers */
136762306a36Sopenharmony_ci	while (rxq->num_rx_buffers - rxq->filled_buffers)
136862306a36Sopenharmony_ci		if (qede_alloc_rx_buffer(rxq, false))
136962306a36Sopenharmony_ci			break;
137062306a36Sopenharmony_ci
137162306a36Sopenharmony_ci	/* Update producers */
137262306a36Sopenharmony_ci	qede_update_rx_prod(edev, rxq);
137362306a36Sopenharmony_ci
137462306a36Sopenharmony_ci	return work_done;
137562306a36Sopenharmony_ci}
137662306a36Sopenharmony_ci
137762306a36Sopenharmony_cistatic bool qede_poll_is_more_work(struct qede_fastpath *fp)
137862306a36Sopenharmony_ci{
137962306a36Sopenharmony_ci	qed_sb_update_sb_idx(fp->sb_info);
138062306a36Sopenharmony_ci
138162306a36Sopenharmony_ci	/* *_has_*_work() reads the status block, thus we need to ensure that
138262306a36Sopenharmony_ci	 * status block indices have been actually read (qed_sb_update_sb_idx)
138362306a36Sopenharmony_ci	 * prior to this check (*_has_*_work) so that we won't write the
138462306a36Sopenharmony_ci	 * "newer" value of the status block to HW (if there was a DMA right
138562306a36Sopenharmony_ci	 * after qede_has_rx_work and if there is no rmb, the memory reading
138662306a36Sopenharmony_ci	 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
138762306a36Sopenharmony_ci	 * In this case there will never be another interrupt until there is
138862306a36Sopenharmony_ci	 * another update of the status block, while there is still unhandled
138962306a36Sopenharmony_ci	 * work.
139062306a36Sopenharmony_ci	 */
139162306a36Sopenharmony_ci	rmb();
139262306a36Sopenharmony_ci
139362306a36Sopenharmony_ci	if (likely(fp->type & QEDE_FASTPATH_RX))
139462306a36Sopenharmony_ci		if (qede_has_rx_work(fp->rxq))
139562306a36Sopenharmony_ci			return true;
139662306a36Sopenharmony_ci
139762306a36Sopenharmony_ci	if (fp->type & QEDE_FASTPATH_XDP)
139862306a36Sopenharmony_ci		if (qede_txq_has_work(fp->xdp_tx))
139962306a36Sopenharmony_ci			return true;
140062306a36Sopenharmony_ci
140162306a36Sopenharmony_ci	if (likely(fp->type & QEDE_FASTPATH_TX)) {
140262306a36Sopenharmony_ci		int cos;
140362306a36Sopenharmony_ci
140462306a36Sopenharmony_ci		for_each_cos_in_txq(fp->edev, cos) {
140562306a36Sopenharmony_ci			if (qede_txq_has_work(&fp->txq[cos]))
140662306a36Sopenharmony_ci				return true;
140762306a36Sopenharmony_ci		}
140862306a36Sopenharmony_ci	}
140962306a36Sopenharmony_ci
141062306a36Sopenharmony_ci	return false;
141162306a36Sopenharmony_ci}
141262306a36Sopenharmony_ci
141362306a36Sopenharmony_ci/*********************
141462306a36Sopenharmony_ci * NDO & API related *
141562306a36Sopenharmony_ci *********************/
141662306a36Sopenharmony_ciint qede_poll(struct napi_struct *napi, int budget)
141762306a36Sopenharmony_ci{
141862306a36Sopenharmony_ci	struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
141962306a36Sopenharmony_ci						napi);
142062306a36Sopenharmony_ci	struct qede_dev *edev = fp->edev;
142162306a36Sopenharmony_ci	int rx_work_done = 0;
142262306a36Sopenharmony_ci	u16 xdp_prod;
142362306a36Sopenharmony_ci
142462306a36Sopenharmony_ci	fp->xdp_xmit = 0;
142562306a36Sopenharmony_ci
142662306a36Sopenharmony_ci	if (likely(fp->type & QEDE_FASTPATH_TX)) {
142762306a36Sopenharmony_ci		int cos;
142862306a36Sopenharmony_ci
142962306a36Sopenharmony_ci		for_each_cos_in_txq(fp->edev, cos) {
143062306a36Sopenharmony_ci			if (qede_txq_has_work(&fp->txq[cos]))
143162306a36Sopenharmony_ci				qede_tx_int(edev, &fp->txq[cos]);
143262306a36Sopenharmony_ci		}
143362306a36Sopenharmony_ci	}
143462306a36Sopenharmony_ci
143562306a36Sopenharmony_ci	if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
143662306a36Sopenharmony_ci		qede_xdp_tx_int(edev, fp->xdp_tx);
143762306a36Sopenharmony_ci
143862306a36Sopenharmony_ci	rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
143962306a36Sopenharmony_ci			qede_has_rx_work(fp->rxq)) ?
144062306a36Sopenharmony_ci			qede_rx_int(fp, budget) : 0;
144162306a36Sopenharmony_ci
144262306a36Sopenharmony_ci	if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
144362306a36Sopenharmony_ci		xdp_do_flush();
144462306a36Sopenharmony_ci
144562306a36Sopenharmony_ci	/* Handle case where we are called by netpoll with a budget of 0 */
144662306a36Sopenharmony_ci	if (rx_work_done < budget || !budget) {
144762306a36Sopenharmony_ci		if (!qede_poll_is_more_work(fp)) {
144862306a36Sopenharmony_ci			napi_complete_done(napi, rx_work_done);
144962306a36Sopenharmony_ci
145062306a36Sopenharmony_ci			/* Update and reenable interrupts */
145162306a36Sopenharmony_ci			qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
145262306a36Sopenharmony_ci		} else {
145362306a36Sopenharmony_ci			rx_work_done = budget;
145462306a36Sopenharmony_ci		}
145562306a36Sopenharmony_ci	}
145662306a36Sopenharmony_ci
145762306a36Sopenharmony_ci	if (fp->xdp_xmit & QEDE_XDP_TX) {
145862306a36Sopenharmony_ci		xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
145962306a36Sopenharmony_ci
146062306a36Sopenharmony_ci		fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
146162306a36Sopenharmony_ci		qede_update_tx_producer(fp->xdp_tx);
146262306a36Sopenharmony_ci	}
146362306a36Sopenharmony_ci
146462306a36Sopenharmony_ci	return rx_work_done;
146562306a36Sopenharmony_ci}
146662306a36Sopenharmony_ci
146762306a36Sopenharmony_ciirqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
146862306a36Sopenharmony_ci{
146962306a36Sopenharmony_ci	struct qede_fastpath *fp = fp_cookie;
147062306a36Sopenharmony_ci
147162306a36Sopenharmony_ci	qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
147262306a36Sopenharmony_ci
147362306a36Sopenharmony_ci	napi_schedule_irqoff(&fp->napi);
147462306a36Sopenharmony_ci	return IRQ_HANDLED;
147562306a36Sopenharmony_ci}
147662306a36Sopenharmony_ci
147762306a36Sopenharmony_ci/* Main transmit function */
147862306a36Sopenharmony_cinetdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
147962306a36Sopenharmony_ci{
148062306a36Sopenharmony_ci	struct qede_dev *edev = netdev_priv(ndev);
148162306a36Sopenharmony_ci	struct netdev_queue *netdev_txq;
148262306a36Sopenharmony_ci	struct qede_tx_queue *txq;
148362306a36Sopenharmony_ci	struct eth_tx_1st_bd *first_bd;
148462306a36Sopenharmony_ci	struct eth_tx_2nd_bd *second_bd = NULL;
148562306a36Sopenharmony_ci	struct eth_tx_3rd_bd *third_bd = NULL;
148662306a36Sopenharmony_ci	struct eth_tx_bd *tx_data_bd = NULL;
148762306a36Sopenharmony_ci	u16 txq_index, val = 0;
148862306a36Sopenharmony_ci	u8 nbd = 0;
148962306a36Sopenharmony_ci	dma_addr_t mapping;
149062306a36Sopenharmony_ci	int rc, frag_idx = 0, ipv6_ext = 0;
149162306a36Sopenharmony_ci	u8 xmit_type;
149262306a36Sopenharmony_ci	u16 idx;
149362306a36Sopenharmony_ci	u16 hlen;
149462306a36Sopenharmony_ci	bool data_split = false;
149562306a36Sopenharmony_ci
149662306a36Sopenharmony_ci	/* Get tx-queue context and netdev index */
149762306a36Sopenharmony_ci	txq_index = skb_get_queue_mapping(skb);
149862306a36Sopenharmony_ci	WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
149962306a36Sopenharmony_ci	txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
150062306a36Sopenharmony_ci	netdev_txq = netdev_get_tx_queue(ndev, txq_index);
150162306a36Sopenharmony_ci
150262306a36Sopenharmony_ci	WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
150362306a36Sopenharmony_ci
150462306a36Sopenharmony_ci	xmit_type = qede_xmit_type(skb, &ipv6_ext);
150562306a36Sopenharmony_ci
150662306a36Sopenharmony_ci#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
150762306a36Sopenharmony_ci	if (qede_pkt_req_lin(skb, xmit_type)) {
150862306a36Sopenharmony_ci		if (skb_linearize(skb)) {
150962306a36Sopenharmony_ci			txq->tx_mem_alloc_err++;
151062306a36Sopenharmony_ci
151162306a36Sopenharmony_ci			dev_kfree_skb_any(skb);
151262306a36Sopenharmony_ci			return NETDEV_TX_OK;
151362306a36Sopenharmony_ci		}
151462306a36Sopenharmony_ci	}
151562306a36Sopenharmony_ci#endif
151662306a36Sopenharmony_ci
151762306a36Sopenharmony_ci	/* Fill the entry in the SW ring and the BDs in the FW ring */
151862306a36Sopenharmony_ci	idx = txq->sw_tx_prod;
151962306a36Sopenharmony_ci	txq->sw_tx_ring.skbs[idx].skb = skb;
152062306a36Sopenharmony_ci	first_bd = (struct eth_tx_1st_bd *)
152162306a36Sopenharmony_ci		   qed_chain_produce(&txq->tx_pbl);
152262306a36Sopenharmony_ci	memset(first_bd, 0, sizeof(*first_bd));
152362306a36Sopenharmony_ci	first_bd->data.bd_flags.bitfields =
152462306a36Sopenharmony_ci		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
152562306a36Sopenharmony_ci
152662306a36Sopenharmony_ci	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
152762306a36Sopenharmony_ci		qede_ptp_tx_ts(edev, skb);
152862306a36Sopenharmony_ci
152962306a36Sopenharmony_ci	/* Map skb linear data for DMA and set in the first BD */
153062306a36Sopenharmony_ci	mapping = dma_map_single(txq->dev, skb->data,
153162306a36Sopenharmony_ci				 skb_headlen(skb), DMA_TO_DEVICE);
153262306a36Sopenharmony_ci	if (unlikely(dma_mapping_error(txq->dev, mapping))) {
153362306a36Sopenharmony_ci		DP_NOTICE(edev, "SKB mapping failed\n");
153462306a36Sopenharmony_ci		qede_free_failed_tx_pkt(txq, first_bd, 0, false);
153562306a36Sopenharmony_ci		qede_update_tx_producer(txq);
153662306a36Sopenharmony_ci		return NETDEV_TX_OK;
153762306a36Sopenharmony_ci	}
153862306a36Sopenharmony_ci	nbd++;
153962306a36Sopenharmony_ci	BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
154062306a36Sopenharmony_ci
154162306a36Sopenharmony_ci	/* In case there is IPv6 with extension headers or LSO we need 2nd and
154262306a36Sopenharmony_ci	 * 3rd BDs.
154362306a36Sopenharmony_ci	 */
154462306a36Sopenharmony_ci	if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
154562306a36Sopenharmony_ci		second_bd = (struct eth_tx_2nd_bd *)
154662306a36Sopenharmony_ci			qed_chain_produce(&txq->tx_pbl);
154762306a36Sopenharmony_ci		memset(second_bd, 0, sizeof(*second_bd));
154862306a36Sopenharmony_ci
154962306a36Sopenharmony_ci		nbd++;
155062306a36Sopenharmony_ci		third_bd = (struct eth_tx_3rd_bd *)
155162306a36Sopenharmony_ci			qed_chain_produce(&txq->tx_pbl);
155262306a36Sopenharmony_ci		memset(third_bd, 0, sizeof(*third_bd));
155362306a36Sopenharmony_ci
155462306a36Sopenharmony_ci		nbd++;
155562306a36Sopenharmony_ci		/* We need to fill in additional data in second_bd... */
155662306a36Sopenharmony_ci		tx_data_bd = (struct eth_tx_bd *)second_bd;
155762306a36Sopenharmony_ci	}
155862306a36Sopenharmony_ci
155962306a36Sopenharmony_ci	if (skb_vlan_tag_present(skb)) {
156062306a36Sopenharmony_ci		first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
156162306a36Sopenharmony_ci		first_bd->data.bd_flags.bitfields |=
156262306a36Sopenharmony_ci			1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
156362306a36Sopenharmony_ci	}
156462306a36Sopenharmony_ci
156562306a36Sopenharmony_ci	/* Fill the parsing flags & params according to the requested offload */
156662306a36Sopenharmony_ci	if (xmit_type & XMIT_L4_CSUM) {
156762306a36Sopenharmony_ci		/* We don't re-calculate IP checksum as it is already done by
156862306a36Sopenharmony_ci		 * the upper stack
156962306a36Sopenharmony_ci		 */
157062306a36Sopenharmony_ci		first_bd->data.bd_flags.bitfields |=
157162306a36Sopenharmony_ci			1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
157262306a36Sopenharmony_ci
157362306a36Sopenharmony_ci		if (xmit_type & XMIT_ENC) {
157462306a36Sopenharmony_ci			first_bd->data.bd_flags.bitfields |=
157562306a36Sopenharmony_ci				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
157662306a36Sopenharmony_ci
157762306a36Sopenharmony_ci			val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
157862306a36Sopenharmony_ci		}
157962306a36Sopenharmony_ci
158062306a36Sopenharmony_ci		/* Legacy FW had flipped behavior in regard to this bit -
158162306a36Sopenharmony_ci		 * I.e., needed to set to prevent FW from touching encapsulated
158262306a36Sopenharmony_ci		 * packets when it didn't need to.
158362306a36Sopenharmony_ci		 */
158462306a36Sopenharmony_ci		if (unlikely(txq->is_legacy))
158562306a36Sopenharmony_ci			val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
158662306a36Sopenharmony_ci
158762306a36Sopenharmony_ci		/* If the packet is IPv6 with extension header, indicate that
158862306a36Sopenharmony_ci		 * to FW and pass few params, since the device cracker doesn't
158962306a36Sopenharmony_ci		 * support parsing IPv6 with extension header/s.
159062306a36Sopenharmony_ci		 */
159162306a36Sopenharmony_ci		if (unlikely(ipv6_ext))
159262306a36Sopenharmony_ci			qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
159362306a36Sopenharmony_ci	}
159462306a36Sopenharmony_ci
159562306a36Sopenharmony_ci	if (xmit_type & XMIT_LSO) {
159662306a36Sopenharmony_ci		first_bd->data.bd_flags.bitfields |=
159762306a36Sopenharmony_ci			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
159862306a36Sopenharmony_ci		third_bd->data.lso_mss =
159962306a36Sopenharmony_ci			cpu_to_le16(skb_shinfo(skb)->gso_size);
160062306a36Sopenharmony_ci
160162306a36Sopenharmony_ci		if (unlikely(xmit_type & XMIT_ENC)) {
160262306a36Sopenharmony_ci			first_bd->data.bd_flags.bitfields |=
160362306a36Sopenharmony_ci				1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
160462306a36Sopenharmony_ci
160562306a36Sopenharmony_ci			if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
160662306a36Sopenharmony_ci				u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
160762306a36Sopenharmony_ci
160862306a36Sopenharmony_ci				first_bd->data.bd_flags.bitfields |= 1 << tmp;
160962306a36Sopenharmony_ci			}
161062306a36Sopenharmony_ci			hlen = qede_get_skb_hlen(skb, true);
161162306a36Sopenharmony_ci		} else {
161262306a36Sopenharmony_ci			first_bd->data.bd_flags.bitfields |=
161362306a36Sopenharmony_ci				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
161462306a36Sopenharmony_ci			hlen = qede_get_skb_hlen(skb, false);
161562306a36Sopenharmony_ci		}
161662306a36Sopenharmony_ci
161762306a36Sopenharmony_ci		/* @@@TBD - if will not be removed need to check */
161862306a36Sopenharmony_ci		third_bd->data.bitfields |=
161962306a36Sopenharmony_ci			cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
162062306a36Sopenharmony_ci
162162306a36Sopenharmony_ci		/* Make life easier for FW guys who can't deal with header and
162262306a36Sopenharmony_ci		 * data on same BD. If we need to split, use the second bd...
162362306a36Sopenharmony_ci		 */
162462306a36Sopenharmony_ci		if (unlikely(skb_headlen(skb) > hlen)) {
162562306a36Sopenharmony_ci			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
162662306a36Sopenharmony_ci				   "TSO split header size is %d (%x:%x)\n",
162762306a36Sopenharmony_ci				   first_bd->nbytes, first_bd->addr.hi,
162862306a36Sopenharmony_ci				   first_bd->addr.lo);
162962306a36Sopenharmony_ci
163062306a36Sopenharmony_ci			mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
163162306a36Sopenharmony_ci					   le32_to_cpu(first_bd->addr.lo)) +
163262306a36Sopenharmony_ci					   hlen;
163362306a36Sopenharmony_ci
163462306a36Sopenharmony_ci			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
163562306a36Sopenharmony_ci					      le16_to_cpu(first_bd->nbytes) -
163662306a36Sopenharmony_ci					      hlen);
163762306a36Sopenharmony_ci
163862306a36Sopenharmony_ci			/* this marks the BD as one that has no
163962306a36Sopenharmony_ci			 * individual mapping
164062306a36Sopenharmony_ci			 */
164162306a36Sopenharmony_ci			txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
164262306a36Sopenharmony_ci
164362306a36Sopenharmony_ci			first_bd->nbytes = cpu_to_le16(hlen);
164462306a36Sopenharmony_ci
164562306a36Sopenharmony_ci			tx_data_bd = (struct eth_tx_bd *)third_bd;
164662306a36Sopenharmony_ci			data_split = true;
164762306a36Sopenharmony_ci		}
164862306a36Sopenharmony_ci	} else {
164962306a36Sopenharmony_ci		if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
165062306a36Sopenharmony_ci			DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
165162306a36Sopenharmony_ci			qede_free_failed_tx_pkt(txq, first_bd, 0, false);
165262306a36Sopenharmony_ci			qede_update_tx_producer(txq);
165362306a36Sopenharmony_ci			return NETDEV_TX_OK;
165462306a36Sopenharmony_ci		}
165562306a36Sopenharmony_ci
165662306a36Sopenharmony_ci		val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
165762306a36Sopenharmony_ci			 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
165862306a36Sopenharmony_ci	}
165962306a36Sopenharmony_ci
166062306a36Sopenharmony_ci	first_bd->data.bitfields = cpu_to_le16(val);
166162306a36Sopenharmony_ci
166262306a36Sopenharmony_ci	/* Handle fragmented skb */
166362306a36Sopenharmony_ci	/* special handle for frags inside 2nd and 3rd bds.. */
166462306a36Sopenharmony_ci	while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
166562306a36Sopenharmony_ci		rc = map_frag_to_bd(txq,
166662306a36Sopenharmony_ci				    &skb_shinfo(skb)->frags[frag_idx],
166762306a36Sopenharmony_ci				    tx_data_bd);
166862306a36Sopenharmony_ci		if (rc) {
166962306a36Sopenharmony_ci			qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
167062306a36Sopenharmony_ci			qede_update_tx_producer(txq);
167162306a36Sopenharmony_ci			return NETDEV_TX_OK;
167262306a36Sopenharmony_ci		}
167362306a36Sopenharmony_ci
167462306a36Sopenharmony_ci		if (tx_data_bd == (struct eth_tx_bd *)second_bd)
167562306a36Sopenharmony_ci			tx_data_bd = (struct eth_tx_bd *)third_bd;
167662306a36Sopenharmony_ci		else
167762306a36Sopenharmony_ci			tx_data_bd = NULL;
167862306a36Sopenharmony_ci
167962306a36Sopenharmony_ci		frag_idx++;
168062306a36Sopenharmony_ci	}
168162306a36Sopenharmony_ci
168262306a36Sopenharmony_ci	/* map last frags into 4th, 5th .... */
168362306a36Sopenharmony_ci	for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
168462306a36Sopenharmony_ci		tx_data_bd = (struct eth_tx_bd *)
168562306a36Sopenharmony_ci			     qed_chain_produce(&txq->tx_pbl);
168662306a36Sopenharmony_ci
168762306a36Sopenharmony_ci		memset(tx_data_bd, 0, sizeof(*tx_data_bd));
168862306a36Sopenharmony_ci
168962306a36Sopenharmony_ci		rc = map_frag_to_bd(txq,
169062306a36Sopenharmony_ci				    &skb_shinfo(skb)->frags[frag_idx],
169162306a36Sopenharmony_ci				    tx_data_bd);
169262306a36Sopenharmony_ci		if (rc) {
169362306a36Sopenharmony_ci			qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
169462306a36Sopenharmony_ci			qede_update_tx_producer(txq);
169562306a36Sopenharmony_ci			return NETDEV_TX_OK;
169662306a36Sopenharmony_ci		}
169762306a36Sopenharmony_ci	}
169862306a36Sopenharmony_ci
169962306a36Sopenharmony_ci	/* update the first BD with the actual num BDs */
170062306a36Sopenharmony_ci	first_bd->data.nbds = nbd;
170162306a36Sopenharmony_ci
170262306a36Sopenharmony_ci	netdev_tx_sent_queue(netdev_txq, skb->len);
170362306a36Sopenharmony_ci
170462306a36Sopenharmony_ci	skb_tx_timestamp(skb);
170562306a36Sopenharmony_ci
170662306a36Sopenharmony_ci	/* Advance packet producer only before sending the packet since mapping
170762306a36Sopenharmony_ci	 * of pages may fail.
170862306a36Sopenharmony_ci	 */
170962306a36Sopenharmony_ci	txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
171062306a36Sopenharmony_ci
171162306a36Sopenharmony_ci	/* 'next page' entries are counted in the producer value */
171262306a36Sopenharmony_ci	txq->tx_db.data.bd_prod =
171362306a36Sopenharmony_ci		cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
171462306a36Sopenharmony_ci
171562306a36Sopenharmony_ci	if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
171662306a36Sopenharmony_ci		qede_update_tx_producer(txq);
171762306a36Sopenharmony_ci
171862306a36Sopenharmony_ci	if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
171962306a36Sopenharmony_ci		      < (MAX_SKB_FRAGS + 1))) {
172062306a36Sopenharmony_ci		if (netdev_xmit_more())
172162306a36Sopenharmony_ci			qede_update_tx_producer(txq);
172262306a36Sopenharmony_ci
172362306a36Sopenharmony_ci		netif_tx_stop_queue(netdev_txq);
172462306a36Sopenharmony_ci		txq->stopped_cnt++;
172562306a36Sopenharmony_ci		DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
172662306a36Sopenharmony_ci			   "Stop queue was called\n");
172762306a36Sopenharmony_ci		/* paired memory barrier is in qede_tx_int(), we have to keep
172862306a36Sopenharmony_ci		 * ordering of set_bit() in netif_tx_stop_queue() and read of
172962306a36Sopenharmony_ci		 * fp->bd_tx_cons
173062306a36Sopenharmony_ci		 */
173162306a36Sopenharmony_ci		smp_mb();
173262306a36Sopenharmony_ci
173362306a36Sopenharmony_ci		if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
173462306a36Sopenharmony_ci		     (MAX_SKB_FRAGS + 1)) &&
173562306a36Sopenharmony_ci		    (edev->state == QEDE_STATE_OPEN)) {
173662306a36Sopenharmony_ci			netif_tx_wake_queue(netdev_txq);
173762306a36Sopenharmony_ci			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
173862306a36Sopenharmony_ci				   "Wake queue was called\n");
173962306a36Sopenharmony_ci		}
174062306a36Sopenharmony_ci	}
174162306a36Sopenharmony_ci
174262306a36Sopenharmony_ci	return NETDEV_TX_OK;
174362306a36Sopenharmony_ci}
174462306a36Sopenharmony_ci
174562306a36Sopenharmony_ciu16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
174662306a36Sopenharmony_ci		      struct net_device *sb_dev)
174762306a36Sopenharmony_ci{
174862306a36Sopenharmony_ci	struct qede_dev *edev = netdev_priv(dev);
174962306a36Sopenharmony_ci	int total_txq;
175062306a36Sopenharmony_ci
175162306a36Sopenharmony_ci	total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
175262306a36Sopenharmony_ci
175362306a36Sopenharmony_ci	return QEDE_TSS_COUNT(edev) ?
175462306a36Sopenharmony_ci		netdev_pick_tx(dev, skb, NULL) % total_txq :  0;
175562306a36Sopenharmony_ci}
175662306a36Sopenharmony_ci
175762306a36Sopenharmony_ci/* 8B udp header + 8B base tunnel header + 32B option length */
175862306a36Sopenharmony_ci#define QEDE_MAX_TUN_HDR_LEN 48
175962306a36Sopenharmony_ci
176062306a36Sopenharmony_cinetdev_features_t qede_features_check(struct sk_buff *skb,
176162306a36Sopenharmony_ci				      struct net_device *dev,
176262306a36Sopenharmony_ci				      netdev_features_t features)
176362306a36Sopenharmony_ci{
176462306a36Sopenharmony_ci	if (skb->encapsulation) {
176562306a36Sopenharmony_ci		u8 l4_proto = 0;
176662306a36Sopenharmony_ci
176762306a36Sopenharmony_ci		switch (vlan_get_protocol(skb)) {
176862306a36Sopenharmony_ci		case htons(ETH_P_IP):
176962306a36Sopenharmony_ci			l4_proto = ip_hdr(skb)->protocol;
177062306a36Sopenharmony_ci			break;
177162306a36Sopenharmony_ci		case htons(ETH_P_IPV6):
177262306a36Sopenharmony_ci			l4_proto = ipv6_hdr(skb)->nexthdr;
177362306a36Sopenharmony_ci			break;
177462306a36Sopenharmony_ci		default:
177562306a36Sopenharmony_ci			return features;
177662306a36Sopenharmony_ci		}
177762306a36Sopenharmony_ci
177862306a36Sopenharmony_ci		/* Disable offloads for geneve tunnels, as HW can't parse
177962306a36Sopenharmony_ci		 * the geneve header which has option length greater than 32b
178062306a36Sopenharmony_ci		 * and disable offloads for the ports which are not offloaded.
178162306a36Sopenharmony_ci		 */
178262306a36Sopenharmony_ci		if (l4_proto == IPPROTO_UDP) {
178362306a36Sopenharmony_ci			struct qede_dev *edev = netdev_priv(dev);
178462306a36Sopenharmony_ci			u16 hdrlen, vxln_port, gnv_port;
178562306a36Sopenharmony_ci
178662306a36Sopenharmony_ci			hdrlen = QEDE_MAX_TUN_HDR_LEN;
178762306a36Sopenharmony_ci			vxln_port = edev->vxlan_dst_port;
178862306a36Sopenharmony_ci			gnv_port = edev->geneve_dst_port;
178962306a36Sopenharmony_ci
179062306a36Sopenharmony_ci			if ((skb_inner_mac_header(skb) -
179162306a36Sopenharmony_ci			     skb_transport_header(skb)) > hdrlen ||
179262306a36Sopenharmony_ci			     (ntohs(udp_hdr(skb)->dest) != vxln_port &&
179362306a36Sopenharmony_ci			      ntohs(udp_hdr(skb)->dest) != gnv_port))
179462306a36Sopenharmony_ci				return features & ~(NETIF_F_CSUM_MASK |
179562306a36Sopenharmony_ci						    NETIF_F_GSO_MASK);
179662306a36Sopenharmony_ci		} else if (l4_proto == IPPROTO_IPIP) {
179762306a36Sopenharmony_ci			/* IPIP tunnels are unknown to the device or at least unsupported natively,
179862306a36Sopenharmony_ci			 * offloads for them can't be done trivially, so disable them for such skb.
179962306a36Sopenharmony_ci			 */
180062306a36Sopenharmony_ci			return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
180162306a36Sopenharmony_ci		}
180262306a36Sopenharmony_ci	}
180362306a36Sopenharmony_ci
180462306a36Sopenharmony_ci	return features;
180562306a36Sopenharmony_ci}
1806