1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/skbuff.h>
10 #include <linux/bpf_trace.h>
11 #include <net/udp_tunnel.h>
12 #include <linux/ip.h>
13 #include <net/ipv6.h>
14 #include <net/tcp.h>
15 #include <linux/if_ether.h>
16 #include <linux/if_vlan.h>
17 #include <net/ip6_checksum.h>
18 #include "qede_ptp.h"
19 
20 #include <linux/qed/qed_if.h>
21 #include "qede.h"
22 /*********************************
23  * Content also used by slowpath *
24  *********************************/
25 
qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)26 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
27 {
28 	struct sw_rx_data *sw_rx_data;
29 	struct eth_rx_bd *rx_bd;
30 	dma_addr_t mapping;
31 	struct page *data;
32 
33 	/* In case lazy-allocation is allowed, postpone allocation until the
34 	 * end of the NAPI run. We'd still need to make sure the Rx ring has
35 	 * sufficient buffers to guarantee an additional Rx interrupt.
36 	 */
37 	if (allow_lazy && likely(rxq->filled_buffers > 12)) {
38 		rxq->filled_buffers--;
39 		return 0;
40 	}
41 
42 	data = alloc_pages(GFP_ATOMIC, 0);
43 	if (unlikely(!data))
44 		return -ENOMEM;
45 
46 	/* Map the entire page as it would be used
47 	 * for multiple RX buffer segment size mapping.
48 	 */
49 	mapping = dma_map_page(rxq->dev, data, 0,
50 			       PAGE_SIZE, rxq->data_direction);
51 	if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
52 		__free_page(data);
53 		return -ENOMEM;
54 	}
55 
56 	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
57 	sw_rx_data->page_offset = 0;
58 	sw_rx_data->data = data;
59 	sw_rx_data->mapping = mapping;
60 
61 	/* Advance PROD and get BD pointer */
62 	rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
63 	WARN_ON(!rx_bd);
64 	rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
65 	rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
66 				     rxq->rx_headroom);
67 
68 	rxq->sw_rx_prod++;
69 	rxq->filled_buffers++;
70 
71 	return 0;
72 }
73 
74 /* Unmap the data and free skb */
qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)75 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
76 {
77 	u16 idx = txq->sw_tx_cons;
78 	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
79 	struct eth_tx_1st_bd *first_bd;
80 	struct eth_tx_bd *tx_data_bd;
81 	int bds_consumed = 0;
82 	int nbds;
83 	bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
84 	int i, split_bd_len = 0;
85 
86 	if (unlikely(!skb)) {
87 		DP_ERR(edev,
88 		       "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
89 		       idx, txq->sw_tx_cons, txq->sw_tx_prod);
90 		return -1;
91 	}
92 
93 	*len = skb->len;
94 
95 	first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
96 
97 	bds_consumed++;
98 
99 	nbds = first_bd->data.nbds;
100 
101 	if (data_split) {
102 		struct eth_tx_bd *split = (struct eth_tx_bd *)
103 			qed_chain_consume(&txq->tx_pbl);
104 		split_bd_len = BD_UNMAP_LEN(split);
105 		bds_consumed++;
106 	}
107 	dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
108 			 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
109 
110 	/* Unmap the data of the skb frags */
111 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
112 		tx_data_bd = (struct eth_tx_bd *)
113 			qed_chain_consume(&txq->tx_pbl);
114 		dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
115 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
116 	}
117 
118 	while (bds_consumed++ < nbds)
119 		qed_chain_consume(&txq->tx_pbl);
120 
121 	/* Free skb */
122 	dev_kfree_skb_any(skb);
123 	txq->sw_tx_ring.skbs[idx].skb = NULL;
124 	txq->sw_tx_ring.skbs[idx].flags = 0;
125 
126 	return 0;
127 }
128 
129 /* Unmap the data and free skb when mapping failed during start_xmit */
qede_free_failed_tx_pkt(struct qede_tx_queue *txq, struct eth_tx_1st_bd *first_bd, int nbd, bool data_split)130 static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
131 				    struct eth_tx_1st_bd *first_bd,
132 				    int nbd, bool data_split)
133 {
134 	u16 idx = txq->sw_tx_prod;
135 	struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
136 	struct eth_tx_bd *tx_data_bd;
137 	int i, split_bd_len = 0;
138 
139 	/* Return prod to its position before this skb was handled */
140 	qed_chain_set_prod(&txq->tx_pbl,
141 			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
142 
143 	first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
144 
145 	if (data_split) {
146 		struct eth_tx_bd *split = (struct eth_tx_bd *)
147 					  qed_chain_produce(&txq->tx_pbl);
148 		split_bd_len = BD_UNMAP_LEN(split);
149 		nbd--;
150 	}
151 
152 	dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
153 			 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
154 
155 	/* Unmap the data of the skb frags */
156 	for (i = 0; i < nbd; i++) {
157 		tx_data_bd = (struct eth_tx_bd *)
158 			qed_chain_produce(&txq->tx_pbl);
159 		if (tx_data_bd->nbytes)
160 			dma_unmap_page(txq->dev,
161 				       BD_UNMAP_ADDR(tx_data_bd),
162 				       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
163 	}
164 
165 	/* Return again prod to its position before this skb was handled */
166 	qed_chain_set_prod(&txq->tx_pbl,
167 			   le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
168 
169 	/* Free skb */
170 	dev_kfree_skb_any(skb);
171 	txq->sw_tx_ring.skbs[idx].skb = NULL;
172 	txq->sw_tx_ring.skbs[idx].flags = 0;
173 }
174 
qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)175 static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
176 {
177 	u32 rc = XMIT_L4_CSUM;
178 	__be16 l3_proto;
179 
180 	if (skb->ip_summed != CHECKSUM_PARTIAL)
181 		return XMIT_PLAIN;
182 
183 	l3_proto = vlan_get_protocol(skb);
184 	if (l3_proto == htons(ETH_P_IPV6) &&
185 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
186 		*ipv6_ext = 1;
187 
188 	if (skb->encapsulation) {
189 		rc |= XMIT_ENC;
190 		if (skb_is_gso(skb)) {
191 			unsigned short gso_type = skb_shinfo(skb)->gso_type;
192 
193 			if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
194 			    (gso_type & SKB_GSO_GRE_CSUM))
195 				rc |= XMIT_ENC_GSO_L4_CSUM;
196 
197 			rc |= XMIT_LSO;
198 			return rc;
199 		}
200 	}
201 
202 	if (skb_is_gso(skb))
203 		rc |= XMIT_LSO;
204 
205 	return rc;
206 }
207 
qede_set_params_for_ipv6_ext(struct sk_buff *skb, struct eth_tx_2nd_bd *second_bd, struct eth_tx_3rd_bd *third_bd)208 static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
209 					 struct eth_tx_2nd_bd *second_bd,
210 					 struct eth_tx_3rd_bd *third_bd)
211 {
212 	u8 l4_proto;
213 	u16 bd2_bits1 = 0, bd2_bits2 = 0;
214 
215 	bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
216 
217 	bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
218 		     ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
219 		    << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
220 
221 	bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
222 		      ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
223 
224 	if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
225 		l4_proto = ipv6_hdr(skb)->nexthdr;
226 	else
227 		l4_proto = ip_hdr(skb)->protocol;
228 
229 	if (l4_proto == IPPROTO_UDP)
230 		bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
231 
232 	if (third_bd)
233 		third_bd->data.bitfields |=
234 			cpu_to_le16(((tcp_hdrlen(skb) / 4) &
235 				ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
236 				ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
237 
238 	second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
239 	second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
240 }
241 
map_frag_to_bd(struct qede_tx_queue *txq, skb_frag_t *frag, struct eth_tx_bd *bd)242 static int map_frag_to_bd(struct qede_tx_queue *txq,
243 			  skb_frag_t *frag, struct eth_tx_bd *bd)
244 {
245 	dma_addr_t mapping;
246 
247 	/* Map skb non-linear frag data for DMA */
248 	mapping = skb_frag_dma_map(txq->dev, frag, 0,
249 				   skb_frag_size(frag), DMA_TO_DEVICE);
250 	if (unlikely(dma_mapping_error(txq->dev, mapping)))
251 		return -ENOMEM;
252 
253 	/* Setup the data pointer of the frag data */
254 	BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
255 
256 	return 0;
257 }
258 
qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)259 static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
260 {
261 	if (is_encap_pkt)
262 		return (skb_inner_transport_header(skb) +
263 			inner_tcp_hdrlen(skb) - skb->data);
264 	else
265 		return (skb_transport_header(skb) +
266 			tcp_hdrlen(skb) - skb->data);
267 }
268 
269 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
270 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)271 static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
272 {
273 	int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
274 
275 	if (xmit_type & XMIT_LSO) {
276 		int hlen;
277 
278 		hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
279 
280 		/* linear payload would require its own BD */
281 		if (skb_headlen(skb) > hlen)
282 			allowed_frags--;
283 	}
284 
285 	return (skb_shinfo(skb)->nr_frags > allowed_frags);
286 }
287 #endif
288 
qede_update_tx_producer(struct qede_tx_queue *txq)289 static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
290 {
291 	/* wmb makes sure that the BDs data is updated before updating the
292 	 * producer, otherwise FW may read old data from the BDs.
293 	 */
294 	wmb();
295 	barrier();
296 	writel(txq->tx_db.raw, txq->doorbell_addr);
297 
298 	/* Fence required to flush the write combined buffer, since another
299 	 * CPU may write to the same doorbell address and data may be lost
300 	 * due to relaxed order nature of write combined bar.
301 	 */
302 	wmb();
303 }
304 
qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, u16 len, struct page *page, struct xdp_frame *xdpf)305 static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
306 			 u16 len, struct page *page, struct xdp_frame *xdpf)
307 {
308 	struct eth_tx_1st_bd *bd;
309 	struct sw_tx_xdp *xdp;
310 	u16 val;
311 
312 	if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
313 		     txq->num_tx_buffers)) {
314 		txq->stopped_cnt++;
315 		return -ENOMEM;
316 	}
317 
318 	bd = qed_chain_produce(&txq->tx_pbl);
319 	bd->data.nbds = 1;
320 	bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
321 
322 	val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
323 	       ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
324 
325 	bd->data.bitfields = cpu_to_le16(val);
326 
327 	/* We can safely ignore the offset, as it's 0 for XDP */
328 	BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
329 
330 	xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
331 	xdp->mapping = dma;
332 	xdp->page = page;
333 	xdp->xdpf = xdpf;
334 
335 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
336 
337 	return 0;
338 }
339 
qede_xdp_transmit(struct net_device *dev, int n_frames, struct xdp_frame **frames, u32 flags)340 int qede_xdp_transmit(struct net_device *dev, int n_frames,
341 		      struct xdp_frame **frames, u32 flags)
342 {
343 	struct qede_dev *edev = netdev_priv(dev);
344 	struct device *dmadev = &edev->pdev->dev;
345 	struct qede_tx_queue *xdp_tx;
346 	struct xdp_frame *xdpf;
347 	dma_addr_t mapping;
348 	int i, drops = 0;
349 	u16 xdp_prod;
350 
351 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
352 		return -EINVAL;
353 
354 	if (unlikely(!netif_running(dev)))
355 		return -ENETDOWN;
356 
357 	i = smp_processor_id() % edev->total_xdp_queues;
358 	xdp_tx = edev->fp_array[i].xdp_tx;
359 
360 	spin_lock(&xdp_tx->xdp_tx_lock);
361 
362 	for (i = 0; i < n_frames; i++) {
363 		xdpf = frames[i];
364 
365 		mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
366 					 DMA_TO_DEVICE);
367 		if (unlikely(dma_mapping_error(dmadev, mapping))) {
368 			xdp_return_frame_rx_napi(xdpf);
369 			drops++;
370 
371 			continue;
372 		}
373 
374 		if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
375 					   NULL, xdpf))) {
376 			xdp_return_frame_rx_napi(xdpf);
377 			drops++;
378 		}
379 	}
380 
381 	if (flags & XDP_XMIT_FLUSH) {
382 		xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
383 
384 		xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
385 		qede_update_tx_producer(xdp_tx);
386 	}
387 
388 	spin_unlock(&xdp_tx->xdp_tx_lock);
389 
390 	return n_frames - drops;
391 }
392 
qede_txq_has_work(struct qede_tx_queue *txq)393 int qede_txq_has_work(struct qede_tx_queue *txq)
394 {
395 	u16 hw_bd_cons;
396 
397 	/* Tell compiler that consumer and producer can change */
398 	barrier();
399 	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
400 	if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
401 		return 0;
402 
403 	return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
404 }
405 
qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)406 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
407 {
408 	struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
409 	struct device *dev = &edev->pdev->dev;
410 	struct xdp_frame *xdpf;
411 	u16 hw_bd_cons;
412 
413 	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
414 	barrier();
415 
416 	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
417 		xdp_info = xdp_arr + txq->sw_tx_cons;
418 		xdpf = xdp_info->xdpf;
419 
420 		if (xdpf) {
421 			dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
422 					 DMA_TO_DEVICE);
423 			xdp_return_frame(xdpf);
424 
425 			xdp_info->xdpf = NULL;
426 		} else {
427 			dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
428 				       DMA_BIDIRECTIONAL);
429 			__free_page(xdp_info->page);
430 		}
431 
432 		qed_chain_consume(&txq->tx_pbl);
433 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
434 		txq->xmit_pkts++;
435 	}
436 }
437 
qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)438 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
439 {
440 	unsigned int pkts_compl = 0, bytes_compl = 0;
441 	struct netdev_queue *netdev_txq;
442 	u16 hw_bd_cons;
443 	int rc;
444 
445 	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
446 
447 	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
448 	barrier();
449 
450 	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
451 		int len = 0;
452 
453 		rc = qede_free_tx_pkt(edev, txq, &len);
454 		if (rc) {
455 			DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
456 				  hw_bd_cons,
457 				  qed_chain_get_cons_idx(&txq->tx_pbl));
458 			break;
459 		}
460 
461 		bytes_compl += len;
462 		pkts_compl++;
463 		txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
464 		txq->xmit_pkts++;
465 	}
466 
467 	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
468 
469 	/* Need to make the tx_bd_cons update visible to start_xmit()
470 	 * before checking for netif_tx_queue_stopped().  Without the
471 	 * memory barrier, there is a small possibility that
472 	 * start_xmit() will miss it and cause the queue to be stopped
473 	 * forever.
474 	 * On the other hand we need an rmb() here to ensure the proper
475 	 * ordering of bit testing in the following
476 	 * netif_tx_queue_stopped(txq) call.
477 	 */
478 	smp_mb();
479 
480 	if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
481 		/* Taking tx_lock is needed to prevent reenabling the queue
482 		 * while it's empty. This could have happen if rx_action() gets
483 		 * suspended in qede_tx_int() after the condition before
484 		 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
485 		 *
486 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
487 		 * sends some packets consuming the whole queue again->
488 		 * stops the queue
489 		 */
490 
491 		__netif_tx_lock(netdev_txq, smp_processor_id());
492 
493 		if ((netif_tx_queue_stopped(netdev_txq)) &&
494 		    (edev->state == QEDE_STATE_OPEN) &&
495 		    (qed_chain_get_elem_left(&txq->tx_pbl)
496 		      >= (MAX_SKB_FRAGS + 1))) {
497 			netif_tx_wake_queue(netdev_txq);
498 			DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
499 				   "Wake queue was called\n");
500 		}
501 
502 		__netif_tx_unlock(netdev_txq);
503 	}
504 
505 	return 0;
506 }
507 
qede_has_rx_work(struct qede_rx_queue *rxq)508 bool qede_has_rx_work(struct qede_rx_queue *rxq)
509 {
510 	u16 hw_comp_cons, sw_comp_cons;
511 
512 	/* Tell compiler that status block fields can change */
513 	barrier();
514 
515 	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
516 	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
517 
518 	return hw_comp_cons != sw_comp_cons;
519 }
520 
qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)521 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
522 {
523 	qed_chain_consume(&rxq->rx_bd_ring);
524 	rxq->sw_rx_cons++;
525 }
526 
527 /* This function reuses the buffer(from an offset) from
528  * consumer index to producer index in the bd ring
529  */
qede_reuse_page(struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons)530 static inline void qede_reuse_page(struct qede_rx_queue *rxq,
531 				   struct sw_rx_data *curr_cons)
532 {
533 	struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
534 	struct sw_rx_data *curr_prod;
535 	dma_addr_t new_mapping;
536 
537 	curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
538 	*curr_prod = *curr_cons;
539 
540 	new_mapping = curr_prod->mapping + curr_prod->page_offset;
541 
542 	rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
543 	rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
544 					  rxq->rx_headroom);
545 
546 	rxq->sw_rx_prod++;
547 	curr_cons->data = NULL;
548 }
549 
550 /* In case of allocation failures reuse buffers
551  * from consumer index to produce buffers for firmware
552  */
qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)553 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
554 {
555 	struct sw_rx_data *curr_cons;
556 
557 	for (; count > 0; count--) {
558 		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
559 		qede_reuse_page(rxq, curr_cons);
560 		qede_rx_bd_ring_consume(rxq);
561 	}
562 }
563 
qede_realloc_rx_buffer(struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons)564 static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
565 					 struct sw_rx_data *curr_cons)
566 {
567 	/* Move to the next segment in the page */
568 	curr_cons->page_offset += rxq->rx_buf_seg_size;
569 
570 	if (curr_cons->page_offset == PAGE_SIZE) {
571 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
572 			/* Since we failed to allocate new buffer
573 			 * current buffer can be used again.
574 			 */
575 			curr_cons->page_offset -= rxq->rx_buf_seg_size;
576 
577 			return -ENOMEM;
578 		}
579 
580 		dma_unmap_page(rxq->dev, curr_cons->mapping,
581 			       PAGE_SIZE, rxq->data_direction);
582 	} else {
583 		/* Increment refcount of the page as we don't want
584 		 * network stack to take the ownership of the page
585 		 * which can be recycled multiple times by the driver.
586 		 */
587 		page_ref_inc(curr_cons->data);
588 		qede_reuse_page(rxq, curr_cons);
589 	}
590 
591 	return 0;
592 }
593 
qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)594 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
595 {
596 	u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
597 	u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
598 	struct eth_rx_prod_data rx_prods = {0};
599 
600 	/* Update producers */
601 	rx_prods.bd_prod = cpu_to_le16(bd_prod);
602 	rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
603 
604 	/* Make sure that the BD and SGE data is updated before updating the
605 	 * producers since FW might read the BD/SGE right after the producer
606 	 * is updated.
607 	 */
608 	wmb();
609 
610 	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
611 			(u32 *)&rx_prods);
612 }
613 
qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)614 static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
615 {
616 	enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
617 	enum rss_hash_type htype;
618 	u32 hash = 0;
619 
620 	htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
621 	if (htype) {
622 		hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
623 			     (htype == RSS_HASH_TYPE_IPV6)) ?
624 			    PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
625 		hash = le32_to_cpu(rss_hash);
626 	}
627 	skb_set_hash(skb, hash, hash_type);
628 }
629 
qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)630 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
631 {
632 	skb_checksum_none_assert(skb);
633 
634 	if (csum_flag & QEDE_CSUM_UNNECESSARY)
635 		skb->ip_summed = CHECKSUM_UNNECESSARY;
636 
637 	if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
638 		skb->csum_level = 1;
639 		skb->encapsulation = 1;
640 	}
641 }
642 
qede_skb_receive(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, struct sk_buff *skb, u16 vlan_tag)643 static inline void qede_skb_receive(struct qede_dev *edev,
644 				    struct qede_fastpath *fp,
645 				    struct qede_rx_queue *rxq,
646 				    struct sk_buff *skb, u16 vlan_tag)
647 {
648 	if (vlan_tag)
649 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
650 
651 	napi_gro_receive(&fp->napi, skb);
652 }
653 
qede_set_gro_params(struct qede_dev *edev, struct sk_buff *skb, struct eth_fast_path_rx_tpa_start_cqe *cqe)654 static void qede_set_gro_params(struct qede_dev *edev,
655 				struct sk_buff *skb,
656 				struct eth_fast_path_rx_tpa_start_cqe *cqe)
657 {
658 	u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
659 
660 	if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
661 	    PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
662 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
663 	else
664 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
665 
666 	skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
667 				    cqe->header_len;
668 }
669 
qede_fill_frag_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, u8 tpa_agg_index, u16 len_on_bd)670 static int qede_fill_frag_skb(struct qede_dev *edev,
671 			      struct qede_rx_queue *rxq,
672 			      u8 tpa_agg_index, u16 len_on_bd)
673 {
674 	struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
675 							 NUM_RX_BDS_MAX];
676 	struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
677 	struct sk_buff *skb = tpa_info->skb;
678 
679 	if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
680 		goto out;
681 
682 	/* Add one frag and update the appropriate fields in the skb */
683 	skb_fill_page_desc(skb, tpa_info->frag_id++,
684 			   current_bd->data,
685 			   current_bd->page_offset + rxq->rx_headroom,
686 			   len_on_bd);
687 
688 	if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
689 		/* Incr page ref count to reuse on allocation failure
690 		 * so that it doesn't get freed while freeing SKB.
691 		 */
692 		page_ref_inc(current_bd->data);
693 		goto out;
694 	}
695 
696 	qede_rx_bd_ring_consume(rxq);
697 
698 	skb->data_len += len_on_bd;
699 	skb->truesize += rxq->rx_buf_seg_size;
700 	skb->len += len_on_bd;
701 
702 	return 0;
703 
704 out:
705 	tpa_info->state = QEDE_AGG_STATE_ERROR;
706 	qede_recycle_rx_bd_ring(rxq, 1);
707 
708 	return -ENOMEM;
709 }
710 
qede_tunn_exist(u16 flag)711 static bool qede_tunn_exist(u16 flag)
712 {
713 	return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
714 			  PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
715 }
716 
qede_check_tunn_csum(u16 flag)717 static u8 qede_check_tunn_csum(u16 flag)
718 {
719 	u16 csum_flag = 0;
720 	u8 tcsum = 0;
721 
722 	if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
723 		    PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
724 		csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
725 			     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
726 
727 	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
728 		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
729 		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
730 			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
731 		tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
732 	}
733 
734 	csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
735 		     PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
736 		     PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
737 		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
738 
739 	if (csum_flag & flag)
740 		return QEDE_CSUM_ERROR;
741 
742 	return QEDE_CSUM_UNNECESSARY | tcsum;
743 }
744 
745 static inline struct sk_buff *
qede_build_skb(struct qede_rx_queue *rxq, struct sw_rx_data *bd, u16 len, u16 pad)746 qede_build_skb(struct qede_rx_queue *rxq,
747 	       struct sw_rx_data *bd, u16 len, u16 pad)
748 {
749 	struct sk_buff *skb;
750 	void *buf;
751 
752 	buf = page_address(bd->data) + bd->page_offset;
753 	skb = build_skb(buf, rxq->rx_buf_seg_size);
754 
755 	if (unlikely(!skb))
756 		return NULL;
757 
758 	skb_reserve(skb, pad);
759 	skb_put(skb, len);
760 
761 	return skb;
762 }
763 
764 static struct sk_buff *
qede_tpa_rx_build_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sw_rx_data *bd, u16 len, u16 pad, bool alloc_skb)765 qede_tpa_rx_build_skb(struct qede_dev *edev,
766 		      struct qede_rx_queue *rxq,
767 		      struct sw_rx_data *bd, u16 len, u16 pad,
768 		      bool alloc_skb)
769 {
770 	struct sk_buff *skb;
771 
772 	skb = qede_build_skb(rxq, bd, len, pad);
773 	bd->page_offset += rxq->rx_buf_seg_size;
774 
775 	if (bd->page_offset == PAGE_SIZE) {
776 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
777 			DP_NOTICE(edev,
778 				  "Failed to allocate RX buffer for tpa start\n");
779 			bd->page_offset -= rxq->rx_buf_seg_size;
780 			page_ref_inc(bd->data);
781 			dev_kfree_skb_any(skb);
782 			return NULL;
783 		}
784 	} else {
785 		page_ref_inc(bd->data);
786 		qede_reuse_page(rxq, bd);
787 	}
788 
789 	/* We've consumed the first BD and prepared an SKB */
790 	qede_rx_bd_ring_consume(rxq);
791 
792 	return skb;
793 }
794 
795 static struct sk_buff *
qede_rx_build_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sw_rx_data *bd, u16 len, u16 pad)796 qede_rx_build_skb(struct qede_dev *edev,
797 		  struct qede_rx_queue *rxq,
798 		  struct sw_rx_data *bd, u16 len, u16 pad)
799 {
800 	struct sk_buff *skb = NULL;
801 
802 	/* For smaller frames still need to allocate skb, memcpy
803 	 * data and benefit in reusing the page segment instead of
804 	 * un-mapping it.
805 	 */
806 	if ((len + pad <= edev->rx_copybreak)) {
807 		unsigned int offset = bd->page_offset + pad;
808 
809 		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
810 		if (unlikely(!skb))
811 			return NULL;
812 
813 		skb_reserve(skb, pad);
814 		skb_put_data(skb, page_address(bd->data) + offset, len);
815 		qede_reuse_page(rxq, bd);
816 		goto out;
817 	}
818 
819 	skb = qede_build_skb(rxq, bd, len, pad);
820 
821 	if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
822 		/* Incr page ref count to reuse on allocation failure so
823 		 * that it doesn't get freed while freeing SKB [as its
824 		 * already mapped there].
825 		 */
826 		page_ref_inc(bd->data);
827 		dev_kfree_skb_any(skb);
828 		return NULL;
829 	}
830 out:
831 	/* We've consumed the first BD and prepared an SKB */
832 	qede_rx_bd_ring_consume(rxq);
833 
834 	return skb;
835 }
836 
qede_tpa_start(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe)837 static void qede_tpa_start(struct qede_dev *edev,
838 			   struct qede_rx_queue *rxq,
839 			   struct eth_fast_path_rx_tpa_start_cqe *cqe)
840 {
841 	struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
842 	struct sw_rx_data *sw_rx_data_cons;
843 	u16 pad;
844 
845 	sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
846 	pad = cqe->placement_offset + rxq->rx_headroom;
847 
848 	tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
849 					      le16_to_cpu(cqe->len_on_first_bd),
850 					      pad, false);
851 	tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
852 	tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
853 
854 	if (unlikely(!tpa_info->skb)) {
855 		DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
856 
857 		/* Consume from ring but do not produce since
858 		 * this might be used by FW still, it will be re-used
859 		 * at TPA end.
860 		 */
861 		tpa_info->tpa_start_fail = true;
862 		qede_rx_bd_ring_consume(rxq);
863 		tpa_info->state = QEDE_AGG_STATE_ERROR;
864 		goto cons_buf;
865 	}
866 
867 	tpa_info->frag_id = 0;
868 	tpa_info->state = QEDE_AGG_STATE_START;
869 
870 	if ((le16_to_cpu(cqe->pars_flags.flags) >>
871 	     PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
872 	    PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
873 		tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
874 	else
875 		tpa_info->vlan_tag = 0;
876 
877 	qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
878 
879 	/* This is needed in order to enable forwarding support */
880 	qede_set_gro_params(edev, tpa_info->skb, cqe);
881 
882 cons_buf: /* We still need to handle bd_len_list to consume buffers */
883 	if (likely(cqe->bw_ext_bd_len_list[0]))
884 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
885 				   le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
886 
887 	if (unlikely(cqe->bw_ext_bd_len_list[1])) {
888 		DP_ERR(edev,
889 		       "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
890 		tpa_info->state = QEDE_AGG_STATE_ERROR;
891 	}
892 }
893 
894 #ifdef CONFIG_INET
qede_gro_ip_csum(struct sk_buff *skb)895 static void qede_gro_ip_csum(struct sk_buff *skb)
896 {
897 	const struct iphdr *iph = ip_hdr(skb);
898 	struct tcphdr *th;
899 
900 	skb_set_transport_header(skb, sizeof(struct iphdr));
901 	th = tcp_hdr(skb);
902 
903 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
904 				  iph->saddr, iph->daddr, 0);
905 
906 	tcp_gro_complete(skb);
907 }
908 
qede_gro_ipv6_csum(struct sk_buff *skb)909 static void qede_gro_ipv6_csum(struct sk_buff *skb)
910 {
911 	struct ipv6hdr *iph = ipv6_hdr(skb);
912 	struct tcphdr *th;
913 
914 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
915 	th = tcp_hdr(skb);
916 
917 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
918 				  &iph->saddr, &iph->daddr, 0);
919 	tcp_gro_complete(skb);
920 }
921 #endif
922 
qede_gro_receive(struct qede_dev *edev, struct qede_fastpath *fp, struct sk_buff *skb, u16 vlan_tag)923 static void qede_gro_receive(struct qede_dev *edev,
924 			     struct qede_fastpath *fp,
925 			     struct sk_buff *skb,
926 			     u16 vlan_tag)
927 {
928 	/* FW can send a single MTU sized packet from gro flow
929 	 * due to aggregation timeout/last segment etc. which
930 	 * is not expected to be a gro packet. If a skb has zero
931 	 * frags then simply push it in the stack as non gso skb.
932 	 */
933 	if (unlikely(!skb->data_len)) {
934 		skb_shinfo(skb)->gso_type = 0;
935 		skb_shinfo(skb)->gso_size = 0;
936 		goto send_skb;
937 	}
938 
939 #ifdef CONFIG_INET
940 	if (skb_shinfo(skb)->gso_size) {
941 		skb_reset_network_header(skb);
942 
943 		switch (skb->protocol) {
944 		case htons(ETH_P_IP):
945 			qede_gro_ip_csum(skb);
946 			break;
947 		case htons(ETH_P_IPV6):
948 			qede_gro_ipv6_csum(skb);
949 			break;
950 		default:
951 			DP_ERR(edev,
952 			       "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
953 			       ntohs(skb->protocol));
954 		}
955 	}
956 #endif
957 
958 send_skb:
959 	skb_record_rx_queue(skb, fp->rxq->rxq_id);
960 	qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
961 }
962 
qede_tpa_cont(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_cont_cqe *cqe)963 static inline void qede_tpa_cont(struct qede_dev *edev,
964 				 struct qede_rx_queue *rxq,
965 				 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
966 {
967 	int i;
968 
969 	for (i = 0; cqe->len_list[i]; i++)
970 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
971 				   le16_to_cpu(cqe->len_list[i]));
972 
973 	if (unlikely(i > 1))
974 		DP_ERR(edev,
975 		       "Strange - TPA cont with more than a single len_list entry\n");
976 }
977 
qede_tpa_end(struct qede_dev *edev, struct qede_fastpath *fp, struct eth_fast_path_rx_tpa_end_cqe *cqe)978 static int qede_tpa_end(struct qede_dev *edev,
979 			struct qede_fastpath *fp,
980 			struct eth_fast_path_rx_tpa_end_cqe *cqe)
981 {
982 	struct qede_rx_queue *rxq = fp->rxq;
983 	struct qede_agg_info *tpa_info;
984 	struct sk_buff *skb;
985 	int i;
986 
987 	tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
988 	skb = tpa_info->skb;
989 
990 	if (tpa_info->buffer.page_offset == PAGE_SIZE)
991 		dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
992 			       PAGE_SIZE, rxq->data_direction);
993 
994 	for (i = 0; cqe->len_list[i]; i++)
995 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
996 				   le16_to_cpu(cqe->len_list[i]));
997 	if (unlikely(i > 1))
998 		DP_ERR(edev,
999 		       "Strange - TPA emd with more than a single len_list entry\n");
1000 
1001 	if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
1002 		goto err;
1003 
1004 	/* Sanity */
1005 	if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
1006 		DP_ERR(edev,
1007 		       "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1008 		       cqe->num_of_bds, tpa_info->frag_id);
1009 	if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1010 		DP_ERR(edev,
1011 		       "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1012 		       le16_to_cpu(cqe->total_packet_len), skb->len);
1013 
1014 	/* Finalize the SKB */
1015 	skb->protocol = eth_type_trans(skb, edev->ndev);
1016 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1017 
1018 	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1019 	 * to skb_shinfo(skb)->gso_segs
1020 	 */
1021 	NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1022 
1023 	qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1024 
1025 	tpa_info->state = QEDE_AGG_STATE_NONE;
1026 
1027 	return 1;
1028 err:
1029 	tpa_info->state = QEDE_AGG_STATE_NONE;
1030 
1031 	if (tpa_info->tpa_start_fail) {
1032 		qede_reuse_page(rxq, &tpa_info->buffer);
1033 		tpa_info->tpa_start_fail = false;
1034 	}
1035 
1036 	dev_kfree_skb_any(tpa_info->skb);
1037 	tpa_info->skb = NULL;
1038 	return 0;
1039 }
1040 
qede_check_notunn_csum(u16 flag)1041 static u8 qede_check_notunn_csum(u16 flag)
1042 {
1043 	u16 csum_flag = 0;
1044 	u8 csum = 0;
1045 
1046 	if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1047 		    PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1048 		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1049 			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1050 		csum = QEDE_CSUM_UNNECESSARY;
1051 	}
1052 
1053 	csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1054 		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1055 
1056 	if (csum_flag & flag)
1057 		return QEDE_CSUM_ERROR;
1058 
1059 	return csum;
1060 }
1061 
qede_check_csum(u16 flag)1062 static u8 qede_check_csum(u16 flag)
1063 {
1064 	if (!qede_tunn_exist(flag))
1065 		return qede_check_notunn_csum(flag);
1066 	else
1067 		return qede_check_tunn_csum(flag);
1068 }
1069 
qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, u16 flag)1070 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1071 				      u16 flag)
1072 {
1073 	u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1074 
1075 	if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1076 			     ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1077 	    (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1078 		     PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1079 		return true;
1080 
1081 	return false;
1082 }
1083 
1084 /* Return true iff packet is to be passed to stack */
qede_rx_xdp(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, struct bpf_prog *prog, struct sw_rx_data *bd, struct eth_fast_path_rx_reg_cqe *cqe, u16 *data_offset, u16 *len)1085 static bool qede_rx_xdp(struct qede_dev *edev,
1086 			struct qede_fastpath *fp,
1087 			struct qede_rx_queue *rxq,
1088 			struct bpf_prog *prog,
1089 			struct sw_rx_data *bd,
1090 			struct eth_fast_path_rx_reg_cqe *cqe,
1091 			u16 *data_offset, u16 *len)
1092 {
1093 	struct xdp_buff xdp;
1094 	enum xdp_action act;
1095 
1096 	xdp.data_hard_start = page_address(bd->data);
1097 	xdp.data = xdp.data_hard_start + *data_offset;
1098 	xdp_set_data_meta_invalid(&xdp);
1099 	xdp.data_end = xdp.data + *len;
1100 	xdp.rxq = &rxq->xdp_rxq;
1101 	xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
1102 
1103 	/* Queues always have a full reset currently, so for the time
1104 	 * being until there's atomic program replace just mark read
1105 	 * side for map helpers.
1106 	 */
1107 	rcu_read_lock();
1108 	act = bpf_prog_run_xdp(prog, &xdp);
1109 	rcu_read_unlock();
1110 
1111 	/* Recalculate, as XDP might have changed the headers */
1112 	*data_offset = xdp.data - xdp.data_hard_start;
1113 	*len = xdp.data_end - xdp.data;
1114 
1115 	if (act == XDP_PASS)
1116 		return true;
1117 
1118 	/* Count number of packets not to be passed to stack */
1119 	rxq->xdp_no_pass++;
1120 
1121 	switch (act) {
1122 	case XDP_TX:
1123 		/* We need the replacement buffer before transmit. */
1124 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1125 			qede_recycle_rx_bd_ring(rxq, 1);
1126 
1127 			trace_xdp_exception(edev->ndev, prog, act);
1128 			break;
1129 		}
1130 
1131 		/* Now if there's a transmission problem, we'd still have to
1132 		 * throw current buffer, as replacement was already allocated.
1133 		 */
1134 		if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
1135 					   *data_offset, *len, bd->data,
1136 					   NULL))) {
1137 			dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1138 				       rxq->data_direction);
1139 			__free_page(bd->data);
1140 
1141 			trace_xdp_exception(edev->ndev, prog, act);
1142 		} else {
1143 			dma_sync_single_for_device(rxq->dev,
1144 						   bd->mapping + *data_offset,
1145 						   *len, rxq->data_direction);
1146 			fp->xdp_xmit |= QEDE_XDP_TX;
1147 		}
1148 
1149 		/* Regardless, we've consumed an Rx BD */
1150 		qede_rx_bd_ring_consume(rxq);
1151 		break;
1152 	case XDP_REDIRECT:
1153 		/* We need the replacement buffer before transmit. */
1154 		if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1155 			qede_recycle_rx_bd_ring(rxq, 1);
1156 
1157 			trace_xdp_exception(edev->ndev, prog, act);
1158 			break;
1159 		}
1160 
1161 		dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1162 			       rxq->data_direction);
1163 
1164 		if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
1165 			DP_NOTICE(edev, "Failed to redirect the packet\n");
1166 		else
1167 			fp->xdp_xmit |= QEDE_XDP_REDIRECT;
1168 
1169 		qede_rx_bd_ring_consume(rxq);
1170 		break;
1171 	default:
1172 		bpf_warn_invalid_xdp_action(act);
1173 		fallthrough;
1174 	case XDP_ABORTED:
1175 		trace_xdp_exception(edev->ndev, prog, act);
1176 		fallthrough;
1177 	case XDP_DROP:
1178 		qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1179 	}
1180 
1181 	return false;
1182 }
1183 
qede_rx_build_jumbo(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sk_buff *skb, struct eth_fast_path_rx_reg_cqe *cqe, u16 first_bd_len)1184 static int qede_rx_build_jumbo(struct qede_dev *edev,
1185 			       struct qede_rx_queue *rxq,
1186 			       struct sk_buff *skb,
1187 			       struct eth_fast_path_rx_reg_cqe *cqe,
1188 			       u16 first_bd_len)
1189 {
1190 	u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1191 	struct sw_rx_data *bd;
1192 	u16 bd_cons_idx;
1193 	u8 num_frags;
1194 
1195 	pkt_len -= first_bd_len;
1196 
1197 	/* We've already used one BD for the SKB. Now take care of the rest */
1198 	for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1199 		u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1200 		    pkt_len;
1201 
1202 		if (unlikely(!cur_size)) {
1203 			DP_ERR(edev,
1204 			       "Still got %d BDs for mapping jumbo, but length became 0\n",
1205 			       num_frags);
1206 			goto out;
1207 		}
1208 
1209 		/* We need a replacement buffer for each BD */
1210 		if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1211 			goto out;
1212 
1213 		/* Now that we've allocated the replacement buffer,
1214 		 * we can safely consume the next BD and map it to the SKB.
1215 		 */
1216 		bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1217 		bd = &rxq->sw_rx_ring[bd_cons_idx];
1218 		qede_rx_bd_ring_consume(rxq);
1219 
1220 		dma_unmap_page(rxq->dev, bd->mapping,
1221 			       PAGE_SIZE, DMA_FROM_DEVICE);
1222 
1223 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1224 				   bd->data, rxq->rx_headroom, cur_size);
1225 
1226 		skb->truesize += PAGE_SIZE;
1227 		skb->data_len += cur_size;
1228 		skb->len += cur_size;
1229 		pkt_len -= cur_size;
1230 	}
1231 
1232 	if (unlikely(pkt_len))
1233 		DP_ERR(edev,
1234 		       "Mapped all BDs of jumbo, but still have %d bytes\n",
1235 		       pkt_len);
1236 
1237 out:
1238 	return num_frags;
1239 }
1240 
qede_rx_process_tpa_cqe(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, union eth_rx_cqe *cqe, enum eth_rx_cqe_type type)1241 static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1242 				   struct qede_fastpath *fp,
1243 				   struct qede_rx_queue *rxq,
1244 				   union eth_rx_cqe *cqe,
1245 				   enum eth_rx_cqe_type type)
1246 {
1247 	switch (type) {
1248 	case ETH_RX_CQE_TYPE_TPA_START:
1249 		qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1250 		return 0;
1251 	case ETH_RX_CQE_TYPE_TPA_CONT:
1252 		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1253 		return 0;
1254 	case ETH_RX_CQE_TYPE_TPA_END:
1255 		return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1256 	default:
1257 		return 0;
1258 	}
1259 }
1260 
qede_rx_process_cqe(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq)1261 static int qede_rx_process_cqe(struct qede_dev *edev,
1262 			       struct qede_fastpath *fp,
1263 			       struct qede_rx_queue *rxq)
1264 {
1265 	struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1266 	struct eth_fast_path_rx_reg_cqe *fp_cqe;
1267 	u16 len, pad, bd_cons_idx, parse_flag;
1268 	enum eth_rx_cqe_type cqe_type;
1269 	union eth_rx_cqe *cqe;
1270 	struct sw_rx_data *bd;
1271 	struct sk_buff *skb;
1272 	__le16 flags;
1273 	u8 csum_flag;
1274 
1275 	/* Get the CQE from the completion ring */
1276 	cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1277 	cqe_type = cqe->fast_path_regular.type;
1278 
1279 	/* Process an unlikely slowpath event */
1280 	if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1281 		struct eth_slow_path_rx_cqe *sp_cqe;
1282 
1283 		sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1284 		edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1285 		return 0;
1286 	}
1287 
1288 	/* Handle TPA cqes */
1289 	if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1290 		return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1291 
1292 	/* Get the data from the SW ring; Consume it only after it's evident
1293 	 * we wouldn't recycle it.
1294 	 */
1295 	bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1296 	bd = &rxq->sw_rx_ring[bd_cons_idx];
1297 
1298 	fp_cqe = &cqe->fast_path_regular;
1299 	len = le16_to_cpu(fp_cqe->len_on_first_bd);
1300 	pad = fp_cqe->placement_offset + rxq->rx_headroom;
1301 
1302 	/* Run eBPF program if one is attached */
1303 	if (xdp_prog)
1304 		if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1305 				 &pad, &len))
1306 			return 0;
1307 
1308 	/* If this is an error packet then drop it */
1309 	flags = cqe->fast_path_regular.pars_flags.flags;
1310 	parse_flag = le16_to_cpu(flags);
1311 
1312 	csum_flag = qede_check_csum(parse_flag);
1313 	if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1314 		if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
1315 			rxq->rx_ip_frags++;
1316 		else
1317 			rxq->rx_hw_errors++;
1318 	}
1319 
1320 	/* Basic validation passed; Need to prepare an SKB. This would also
1321 	 * guarantee to finally consume the first BD upon success.
1322 	 */
1323 	skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
1324 	if (!skb) {
1325 		rxq->rx_alloc_errors++;
1326 		qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1327 		return 0;
1328 	}
1329 
1330 	/* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1331 	 * by a single cqe.
1332 	 */
1333 	if (fp_cqe->bd_num > 1) {
1334 		u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1335 							 fp_cqe, len);
1336 
1337 		if (unlikely(unmapped_frags > 0)) {
1338 			qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1339 			dev_kfree_skb_any(skb);
1340 			return 0;
1341 		}
1342 	}
1343 
1344 	/* The SKB contains all the data. Now prepare meta-magic */
1345 	skb->protocol = eth_type_trans(skb, edev->ndev);
1346 	qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1347 	qede_set_skb_csum(skb, csum_flag);
1348 	skb_record_rx_queue(skb, rxq->rxq_id);
1349 	qede_ptp_record_rx_ts(edev, cqe, skb);
1350 
1351 	/* SKB is prepared - pass it to stack */
1352 	qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1353 
1354 	return 1;
1355 }
1356 
qede_rx_int(struct qede_fastpath *fp, int budget)1357 static int qede_rx_int(struct qede_fastpath *fp, int budget)
1358 {
1359 	struct qede_rx_queue *rxq = fp->rxq;
1360 	struct qede_dev *edev = fp->edev;
1361 	int work_done = 0, rcv_pkts = 0;
1362 	u16 hw_comp_cons, sw_comp_cons;
1363 
1364 	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1365 	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1366 
1367 	/* Memory barrier to prevent the CPU from doing speculative reads of CQE
1368 	 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1369 	 * read before it is written by FW, then FW writes CQE and SB, and then
1370 	 * the CPU reads the hw_comp_cons, it will use an old CQE.
1371 	 */
1372 	rmb();
1373 
1374 	/* Loop to complete all indicated BDs */
1375 	while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1376 		rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1377 		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1378 		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1379 		work_done++;
1380 	}
1381 
1382 	rxq->rcv_pkts += rcv_pkts;
1383 
1384 	/* Allocate replacement buffers */
1385 	while (rxq->num_rx_buffers - rxq->filled_buffers)
1386 		if (qede_alloc_rx_buffer(rxq, false))
1387 			break;
1388 
1389 	/* Update producers */
1390 	qede_update_rx_prod(edev, rxq);
1391 
1392 	return work_done;
1393 }
1394 
qede_poll_is_more_work(struct qede_fastpath *fp)1395 static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1396 {
1397 	qed_sb_update_sb_idx(fp->sb_info);
1398 
1399 	/* *_has_*_work() reads the status block, thus we need to ensure that
1400 	 * status block indices have been actually read (qed_sb_update_sb_idx)
1401 	 * prior to this check (*_has_*_work) so that we won't write the
1402 	 * "newer" value of the status block to HW (if there was a DMA right
1403 	 * after qede_has_rx_work and if there is no rmb, the memory reading
1404 	 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1405 	 * In this case there will never be another interrupt until there is
1406 	 * another update of the status block, while there is still unhandled
1407 	 * work.
1408 	 */
1409 	rmb();
1410 
1411 	if (likely(fp->type & QEDE_FASTPATH_RX))
1412 		if (qede_has_rx_work(fp->rxq))
1413 			return true;
1414 
1415 	if (fp->type & QEDE_FASTPATH_XDP)
1416 		if (qede_txq_has_work(fp->xdp_tx))
1417 			return true;
1418 
1419 	if (likely(fp->type & QEDE_FASTPATH_TX)) {
1420 		int cos;
1421 
1422 		for_each_cos_in_txq(fp->edev, cos) {
1423 			if (qede_txq_has_work(&fp->txq[cos]))
1424 				return true;
1425 		}
1426 	}
1427 
1428 	return false;
1429 }
1430 
1431 /*********************
1432  * NDO & API related *
1433  *********************/
qede_poll(struct napi_struct *napi, int budget)1434 int qede_poll(struct napi_struct *napi, int budget)
1435 {
1436 	struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1437 						napi);
1438 	struct qede_dev *edev = fp->edev;
1439 	int rx_work_done = 0;
1440 	u16 xdp_prod;
1441 
1442 	fp->xdp_xmit = 0;
1443 
1444 	if (likely(fp->type & QEDE_FASTPATH_TX)) {
1445 		int cos;
1446 
1447 		for_each_cos_in_txq(fp->edev, cos) {
1448 			if (qede_txq_has_work(&fp->txq[cos]))
1449 				qede_tx_int(edev, &fp->txq[cos]);
1450 		}
1451 	}
1452 
1453 	if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1454 		qede_xdp_tx_int(edev, fp->xdp_tx);
1455 
1456 	rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1457 			qede_has_rx_work(fp->rxq)) ?
1458 			qede_rx_int(fp, budget) : 0;
1459 
1460 	if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
1461 		xdp_do_flush();
1462 
1463 	/* Handle case where we are called by netpoll with a budget of 0 */
1464 	if (rx_work_done < budget || !budget) {
1465 		if (!qede_poll_is_more_work(fp)) {
1466 			napi_complete_done(napi, rx_work_done);
1467 
1468 			/* Update and reenable interrupts */
1469 			qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1470 		} else {
1471 			rx_work_done = budget;
1472 		}
1473 	}
1474 
1475 	if (fp->xdp_xmit & QEDE_XDP_TX) {
1476 		xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1477 
1478 		fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1479 		qede_update_tx_producer(fp->xdp_tx);
1480 	}
1481 
1482 	return rx_work_done;
1483 }
1484 
qede_msix_fp_int(int irq, void *fp_cookie)1485 irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1486 {
1487 	struct qede_fastpath *fp = fp_cookie;
1488 
1489 	qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1490 
1491 	napi_schedule_irqoff(&fp->napi);
1492 	return IRQ_HANDLED;
1493 }
1494 
1495 /* Main transmit function */
qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)1496 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1497 {
1498 	struct qede_dev *edev = netdev_priv(ndev);
1499 	struct netdev_queue *netdev_txq;
1500 	struct qede_tx_queue *txq;
1501 	struct eth_tx_1st_bd *first_bd;
1502 	struct eth_tx_2nd_bd *second_bd = NULL;
1503 	struct eth_tx_3rd_bd *third_bd = NULL;
1504 	struct eth_tx_bd *tx_data_bd = NULL;
1505 	u16 txq_index, val = 0;
1506 	u8 nbd = 0;
1507 	dma_addr_t mapping;
1508 	int rc, frag_idx = 0, ipv6_ext = 0;
1509 	u8 xmit_type;
1510 	u16 idx;
1511 	u16 hlen;
1512 	bool data_split = false;
1513 
1514 	/* Get tx-queue context and netdev index */
1515 	txq_index = skb_get_queue_mapping(skb);
1516 	WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
1517 	txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
1518 	netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1519 
1520 	WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1521 
1522 	xmit_type = qede_xmit_type(skb, &ipv6_ext);
1523 
1524 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1525 	if (qede_pkt_req_lin(skb, xmit_type)) {
1526 		if (skb_linearize(skb)) {
1527 			txq->tx_mem_alloc_err++;
1528 
1529 			dev_kfree_skb_any(skb);
1530 			return NETDEV_TX_OK;
1531 		}
1532 	}
1533 #endif
1534 
1535 	/* Fill the entry in the SW ring and the BDs in the FW ring */
1536 	idx = txq->sw_tx_prod;
1537 	txq->sw_tx_ring.skbs[idx].skb = skb;
1538 	first_bd = (struct eth_tx_1st_bd *)
1539 		   qed_chain_produce(&txq->tx_pbl);
1540 	memset(first_bd, 0, sizeof(*first_bd));
1541 	first_bd->data.bd_flags.bitfields =
1542 		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1543 
1544 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1545 		qede_ptp_tx_ts(edev, skb);
1546 
1547 	/* Map skb linear data for DMA and set in the first BD */
1548 	mapping = dma_map_single(txq->dev, skb->data,
1549 				 skb_headlen(skb), DMA_TO_DEVICE);
1550 	if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1551 		DP_NOTICE(edev, "SKB mapping failed\n");
1552 		qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1553 		qede_update_tx_producer(txq);
1554 		return NETDEV_TX_OK;
1555 	}
1556 	nbd++;
1557 	BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1558 
1559 	/* In case there is IPv6 with extension headers or LSO we need 2nd and
1560 	 * 3rd BDs.
1561 	 */
1562 	if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1563 		second_bd = (struct eth_tx_2nd_bd *)
1564 			qed_chain_produce(&txq->tx_pbl);
1565 		memset(second_bd, 0, sizeof(*second_bd));
1566 
1567 		nbd++;
1568 		third_bd = (struct eth_tx_3rd_bd *)
1569 			qed_chain_produce(&txq->tx_pbl);
1570 		memset(third_bd, 0, sizeof(*third_bd));
1571 
1572 		nbd++;
1573 		/* We need to fill in additional data in second_bd... */
1574 		tx_data_bd = (struct eth_tx_bd *)second_bd;
1575 	}
1576 
1577 	if (skb_vlan_tag_present(skb)) {
1578 		first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1579 		first_bd->data.bd_flags.bitfields |=
1580 			1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1581 	}
1582 
1583 	/* Fill the parsing flags & params according to the requested offload */
1584 	if (xmit_type & XMIT_L4_CSUM) {
1585 		/* We don't re-calculate IP checksum as it is already done by
1586 		 * the upper stack
1587 		 */
1588 		first_bd->data.bd_flags.bitfields |=
1589 			1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1590 
1591 		if (xmit_type & XMIT_ENC) {
1592 			first_bd->data.bd_flags.bitfields |=
1593 				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1594 
1595 			val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1596 		}
1597 
1598 		/* Legacy FW had flipped behavior in regard to this bit -
1599 		 * I.e., needed to set to prevent FW from touching encapsulated
1600 		 * packets when it didn't need to.
1601 		 */
1602 		if (unlikely(txq->is_legacy))
1603 			val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1604 
1605 		/* If the packet is IPv6 with extension header, indicate that
1606 		 * to FW and pass few params, since the device cracker doesn't
1607 		 * support parsing IPv6 with extension header/s.
1608 		 */
1609 		if (unlikely(ipv6_ext))
1610 			qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1611 	}
1612 
1613 	if (xmit_type & XMIT_LSO) {
1614 		first_bd->data.bd_flags.bitfields |=
1615 			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1616 		third_bd->data.lso_mss =
1617 			cpu_to_le16(skb_shinfo(skb)->gso_size);
1618 
1619 		if (unlikely(xmit_type & XMIT_ENC)) {
1620 			first_bd->data.bd_flags.bitfields |=
1621 				1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1622 
1623 			if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1624 				u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1625 
1626 				first_bd->data.bd_flags.bitfields |= 1 << tmp;
1627 			}
1628 			hlen = qede_get_skb_hlen(skb, true);
1629 		} else {
1630 			first_bd->data.bd_flags.bitfields |=
1631 				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1632 			hlen = qede_get_skb_hlen(skb, false);
1633 		}
1634 
1635 		/* @@@TBD - if will not be removed need to check */
1636 		third_bd->data.bitfields |=
1637 			cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1638 
1639 		/* Make life easier for FW guys who can't deal with header and
1640 		 * data on same BD. If we need to split, use the second bd...
1641 		 */
1642 		if (unlikely(skb_headlen(skb) > hlen)) {
1643 			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1644 				   "TSO split header size is %d (%x:%x)\n",
1645 				   first_bd->nbytes, first_bd->addr.hi,
1646 				   first_bd->addr.lo);
1647 
1648 			mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1649 					   le32_to_cpu(first_bd->addr.lo)) +
1650 					   hlen;
1651 
1652 			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1653 					      le16_to_cpu(first_bd->nbytes) -
1654 					      hlen);
1655 
1656 			/* this marks the BD as one that has no
1657 			 * individual mapping
1658 			 */
1659 			txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1660 
1661 			first_bd->nbytes = cpu_to_le16(hlen);
1662 
1663 			tx_data_bd = (struct eth_tx_bd *)third_bd;
1664 			data_split = true;
1665 		}
1666 	} else {
1667 		if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
1668 			DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
1669 			qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1670 			qede_update_tx_producer(txq);
1671 			return NETDEV_TX_OK;
1672 		}
1673 
1674 		val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1675 			 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1676 	}
1677 
1678 	first_bd->data.bitfields = cpu_to_le16(val);
1679 
1680 	/* Handle fragmented skb */
1681 	/* special handle for frags inside 2nd and 3rd bds.. */
1682 	while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1683 		rc = map_frag_to_bd(txq,
1684 				    &skb_shinfo(skb)->frags[frag_idx],
1685 				    tx_data_bd);
1686 		if (rc) {
1687 			qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1688 			qede_update_tx_producer(txq);
1689 			return NETDEV_TX_OK;
1690 		}
1691 
1692 		if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1693 			tx_data_bd = (struct eth_tx_bd *)third_bd;
1694 		else
1695 			tx_data_bd = NULL;
1696 
1697 		frag_idx++;
1698 	}
1699 
1700 	/* map last frags into 4th, 5th .... */
1701 	for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1702 		tx_data_bd = (struct eth_tx_bd *)
1703 			     qed_chain_produce(&txq->tx_pbl);
1704 
1705 		memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1706 
1707 		rc = map_frag_to_bd(txq,
1708 				    &skb_shinfo(skb)->frags[frag_idx],
1709 				    tx_data_bd);
1710 		if (rc) {
1711 			qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1712 			qede_update_tx_producer(txq);
1713 			return NETDEV_TX_OK;
1714 		}
1715 	}
1716 
1717 	/* update the first BD with the actual num BDs */
1718 	first_bd->data.nbds = nbd;
1719 
1720 	netdev_tx_sent_queue(netdev_txq, skb->len);
1721 
1722 	skb_tx_timestamp(skb);
1723 
1724 	/* Advance packet producer only before sending the packet since mapping
1725 	 * of pages may fail.
1726 	 */
1727 	txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1728 
1729 	/* 'next page' entries are counted in the producer value */
1730 	txq->tx_db.data.bd_prod =
1731 		cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1732 
1733 	if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
1734 		qede_update_tx_producer(txq);
1735 
1736 	if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1737 		      < (MAX_SKB_FRAGS + 1))) {
1738 		if (netdev_xmit_more())
1739 			qede_update_tx_producer(txq);
1740 
1741 		netif_tx_stop_queue(netdev_txq);
1742 		txq->stopped_cnt++;
1743 		DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1744 			   "Stop queue was called\n");
1745 		/* paired memory barrier is in qede_tx_int(), we have to keep
1746 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
1747 		 * fp->bd_tx_cons
1748 		 */
1749 		smp_mb();
1750 
1751 		if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1752 		     (MAX_SKB_FRAGS + 1)) &&
1753 		    (edev->state == QEDE_STATE_OPEN)) {
1754 			netif_tx_wake_queue(netdev_txq);
1755 			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1756 				   "Wake queue was called\n");
1757 		}
1758 	}
1759 
1760 	return NETDEV_TX_OK;
1761 }
1762 
qede_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev)1763 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1764 		      struct net_device *sb_dev)
1765 {
1766 	struct qede_dev *edev = netdev_priv(dev);
1767 	int total_txq;
1768 
1769 	total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1770 
1771 	return QEDE_TSS_COUNT(edev) ?
1772 		netdev_pick_tx(dev, skb, NULL) % total_txq :  0;
1773 }
1774 
1775 /* 8B udp header + 8B base tunnel header + 32B option length */
1776 #define QEDE_MAX_TUN_HDR_LEN 48
1777 
qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features)1778 netdev_features_t qede_features_check(struct sk_buff *skb,
1779 				      struct net_device *dev,
1780 				      netdev_features_t features)
1781 {
1782 	if (skb->encapsulation) {
1783 		u8 l4_proto = 0;
1784 
1785 		switch (vlan_get_protocol(skb)) {
1786 		case htons(ETH_P_IP):
1787 			l4_proto = ip_hdr(skb)->protocol;
1788 			break;
1789 		case htons(ETH_P_IPV6):
1790 			l4_proto = ipv6_hdr(skb)->nexthdr;
1791 			break;
1792 		default:
1793 			return features;
1794 		}
1795 
1796 		/* Disable offloads for geneve tunnels, as HW can't parse
1797 		 * the geneve header which has option length greater than 32b
1798 		 * and disable offloads for the ports which are not offloaded.
1799 		 */
1800 		if (l4_proto == IPPROTO_UDP) {
1801 			struct qede_dev *edev = netdev_priv(dev);
1802 			u16 hdrlen, vxln_port, gnv_port;
1803 
1804 			hdrlen = QEDE_MAX_TUN_HDR_LEN;
1805 			vxln_port = edev->vxlan_dst_port;
1806 			gnv_port = edev->geneve_dst_port;
1807 
1808 			if ((skb_inner_mac_header(skb) -
1809 			     skb_transport_header(skb)) > hdrlen ||
1810 			     (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1811 			      ntohs(udp_hdr(skb)->dest) != gnv_port))
1812 				return features & ~(NETIF_F_CSUM_MASK |
1813 						    NETIF_F_GSO_MASK);
1814 		} else if (l4_proto == IPPROTO_IPIP) {
1815 			/* IPIP tunnels are unknown to the device or at least unsupported natively,
1816 			 * offloads for them can't be done trivially, so disable them for such skb.
1817 			 */
1818 			return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1819 		}
1820 	}
1821 
1822 	return features;
1823 }
1824