18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/* Copyright(c) 2013 - 2019 Intel Corporation. */
38c2ecf20Sopenharmony_ci
48c2ecf20Sopenharmony_ci#include <linux/types.h>
58c2ecf20Sopenharmony_ci#include <linux/module.h>
68c2ecf20Sopenharmony_ci#include <net/ipv6.h>
78c2ecf20Sopenharmony_ci#include <net/ip.h>
88c2ecf20Sopenharmony_ci#include <net/tcp.h>
98c2ecf20Sopenharmony_ci#include <linux/if_macvlan.h>
108c2ecf20Sopenharmony_ci#include <linux/prefetch.h>
118c2ecf20Sopenharmony_ci
128c2ecf20Sopenharmony_ci#include "fm10k.h"
138c2ecf20Sopenharmony_ci
148c2ecf20Sopenharmony_ci#define DRV_SUMMARY	"Intel(R) Ethernet Switch Host Interface Driver"
158c2ecf20Sopenharmony_cichar fm10k_driver_name[] = "fm10k";
168c2ecf20Sopenharmony_cistatic const char fm10k_driver_string[] = DRV_SUMMARY;
178c2ecf20Sopenharmony_cistatic const char fm10k_copyright[] =
188c2ecf20Sopenharmony_ci	"Copyright(c) 2013 - 2019 Intel Corporation.";
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_ciMODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
218c2ecf20Sopenharmony_ciMODULE_DESCRIPTION(DRV_SUMMARY);
228c2ecf20Sopenharmony_ciMODULE_LICENSE("GPL v2");
238c2ecf20Sopenharmony_ci
248c2ecf20Sopenharmony_ci/* single workqueue for entire fm10k driver */
258c2ecf20Sopenharmony_cistruct workqueue_struct *fm10k_workqueue;
268c2ecf20Sopenharmony_ci
278c2ecf20Sopenharmony_ci/**
288c2ecf20Sopenharmony_ci * fm10k_init_module - Driver Registration Routine
298c2ecf20Sopenharmony_ci *
308c2ecf20Sopenharmony_ci * fm10k_init_module is the first routine called when the driver is
318c2ecf20Sopenharmony_ci * loaded.  All it does is register with the PCI subsystem.
328c2ecf20Sopenharmony_ci **/
338c2ecf20Sopenharmony_cistatic int __init fm10k_init_module(void)
348c2ecf20Sopenharmony_ci{
358c2ecf20Sopenharmony_ci	int ret;
368c2ecf20Sopenharmony_ci
378c2ecf20Sopenharmony_ci	pr_info("%s\n", fm10k_driver_string);
388c2ecf20Sopenharmony_ci	pr_info("%s\n", fm10k_copyright);
398c2ecf20Sopenharmony_ci
408c2ecf20Sopenharmony_ci	/* create driver workqueue */
418c2ecf20Sopenharmony_ci	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
428c2ecf20Sopenharmony_ci					  fm10k_driver_name);
438c2ecf20Sopenharmony_ci	if (!fm10k_workqueue)
448c2ecf20Sopenharmony_ci		return -ENOMEM;
458c2ecf20Sopenharmony_ci
468c2ecf20Sopenharmony_ci	fm10k_dbg_init();
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_ci	ret = fm10k_register_pci_driver();
498c2ecf20Sopenharmony_ci	if (ret) {
508c2ecf20Sopenharmony_ci		fm10k_dbg_exit();
518c2ecf20Sopenharmony_ci		destroy_workqueue(fm10k_workqueue);
528c2ecf20Sopenharmony_ci	}
538c2ecf20Sopenharmony_ci
548c2ecf20Sopenharmony_ci	return ret;
558c2ecf20Sopenharmony_ci}
568c2ecf20Sopenharmony_cimodule_init(fm10k_init_module);
578c2ecf20Sopenharmony_ci
588c2ecf20Sopenharmony_ci/**
598c2ecf20Sopenharmony_ci * fm10k_exit_module - Driver Exit Cleanup Routine
608c2ecf20Sopenharmony_ci *
618c2ecf20Sopenharmony_ci * fm10k_exit_module is called just before the driver is removed
628c2ecf20Sopenharmony_ci * from memory.
638c2ecf20Sopenharmony_ci **/
648c2ecf20Sopenharmony_cistatic void __exit fm10k_exit_module(void)
658c2ecf20Sopenharmony_ci{
668c2ecf20Sopenharmony_ci	fm10k_unregister_pci_driver();
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_ci	fm10k_dbg_exit();
698c2ecf20Sopenharmony_ci
708c2ecf20Sopenharmony_ci	/* destroy driver workqueue */
718c2ecf20Sopenharmony_ci	destroy_workqueue(fm10k_workqueue);
728c2ecf20Sopenharmony_ci}
738c2ecf20Sopenharmony_cimodule_exit(fm10k_exit_module);
748c2ecf20Sopenharmony_ci
758c2ecf20Sopenharmony_cistatic bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
768c2ecf20Sopenharmony_ci				    struct fm10k_rx_buffer *bi)
778c2ecf20Sopenharmony_ci{
788c2ecf20Sopenharmony_ci	struct page *page = bi->page;
798c2ecf20Sopenharmony_ci	dma_addr_t dma;
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_ci	/* Only page will be NULL if buffer was consumed */
828c2ecf20Sopenharmony_ci	if (likely(page))
838c2ecf20Sopenharmony_ci		return true;
848c2ecf20Sopenharmony_ci
858c2ecf20Sopenharmony_ci	/* alloc new page for storage */
868c2ecf20Sopenharmony_ci	page = dev_alloc_page();
878c2ecf20Sopenharmony_ci	if (unlikely(!page)) {
888c2ecf20Sopenharmony_ci		rx_ring->rx_stats.alloc_failed++;
898c2ecf20Sopenharmony_ci		return false;
908c2ecf20Sopenharmony_ci	}
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci	/* map page for use */
938c2ecf20Sopenharmony_ci	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
948c2ecf20Sopenharmony_ci
958c2ecf20Sopenharmony_ci	/* if mapping failed free memory back to system since
968c2ecf20Sopenharmony_ci	 * there isn't much point in holding memory we can't use
978c2ecf20Sopenharmony_ci	 */
988c2ecf20Sopenharmony_ci	if (dma_mapping_error(rx_ring->dev, dma)) {
998c2ecf20Sopenharmony_ci		__free_page(page);
1008c2ecf20Sopenharmony_ci
1018c2ecf20Sopenharmony_ci		rx_ring->rx_stats.alloc_failed++;
1028c2ecf20Sopenharmony_ci		return false;
1038c2ecf20Sopenharmony_ci	}
1048c2ecf20Sopenharmony_ci
1058c2ecf20Sopenharmony_ci	bi->dma = dma;
1068c2ecf20Sopenharmony_ci	bi->page = page;
1078c2ecf20Sopenharmony_ci	bi->page_offset = 0;
1088c2ecf20Sopenharmony_ci
1098c2ecf20Sopenharmony_ci	return true;
1108c2ecf20Sopenharmony_ci}
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci/**
1138c2ecf20Sopenharmony_ci * fm10k_alloc_rx_buffers - Replace used receive buffers
1148c2ecf20Sopenharmony_ci * @rx_ring: ring to place buffers on
1158c2ecf20Sopenharmony_ci * @cleaned_count: number of buffers to replace
1168c2ecf20Sopenharmony_ci **/
1178c2ecf20Sopenharmony_civoid fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
1188c2ecf20Sopenharmony_ci{
1198c2ecf20Sopenharmony_ci	union fm10k_rx_desc *rx_desc;
1208c2ecf20Sopenharmony_ci	struct fm10k_rx_buffer *bi;
1218c2ecf20Sopenharmony_ci	u16 i = rx_ring->next_to_use;
1228c2ecf20Sopenharmony_ci
1238c2ecf20Sopenharmony_ci	/* nothing to do */
1248c2ecf20Sopenharmony_ci	if (!cleaned_count)
1258c2ecf20Sopenharmony_ci		return;
1268c2ecf20Sopenharmony_ci
1278c2ecf20Sopenharmony_ci	rx_desc = FM10K_RX_DESC(rx_ring, i);
1288c2ecf20Sopenharmony_ci	bi = &rx_ring->rx_buffer[i];
1298c2ecf20Sopenharmony_ci	i -= rx_ring->count;
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_ci	do {
1328c2ecf20Sopenharmony_ci		if (!fm10k_alloc_mapped_page(rx_ring, bi))
1338c2ecf20Sopenharmony_ci			break;
1348c2ecf20Sopenharmony_ci
1358c2ecf20Sopenharmony_ci		/* Refresh the desc even if buffer_addrs didn't change
1368c2ecf20Sopenharmony_ci		 * because each write-back erases this info.
1378c2ecf20Sopenharmony_ci		 */
1388c2ecf20Sopenharmony_ci		rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1398c2ecf20Sopenharmony_ci
1408c2ecf20Sopenharmony_ci		rx_desc++;
1418c2ecf20Sopenharmony_ci		bi++;
1428c2ecf20Sopenharmony_ci		i++;
1438c2ecf20Sopenharmony_ci		if (unlikely(!i)) {
1448c2ecf20Sopenharmony_ci			rx_desc = FM10K_RX_DESC(rx_ring, 0);
1458c2ecf20Sopenharmony_ci			bi = rx_ring->rx_buffer;
1468c2ecf20Sopenharmony_ci			i -= rx_ring->count;
1478c2ecf20Sopenharmony_ci		}
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci		/* clear the status bits for the next_to_use descriptor */
1508c2ecf20Sopenharmony_ci		rx_desc->d.staterr = 0;
1518c2ecf20Sopenharmony_ci
1528c2ecf20Sopenharmony_ci		cleaned_count--;
1538c2ecf20Sopenharmony_ci	} while (cleaned_count);
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_ci	i += rx_ring->count;
1568c2ecf20Sopenharmony_ci
1578c2ecf20Sopenharmony_ci	if (rx_ring->next_to_use != i) {
1588c2ecf20Sopenharmony_ci		/* record the next descriptor to use */
1598c2ecf20Sopenharmony_ci		rx_ring->next_to_use = i;
1608c2ecf20Sopenharmony_ci
1618c2ecf20Sopenharmony_ci		/* update next to alloc since we have filled the ring */
1628c2ecf20Sopenharmony_ci		rx_ring->next_to_alloc = i;
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci		/* Force memory writes to complete before letting h/w
1658c2ecf20Sopenharmony_ci		 * know there are new descriptors to fetch.  (Only
1668c2ecf20Sopenharmony_ci		 * applicable for weak-ordered memory model archs,
1678c2ecf20Sopenharmony_ci		 * such as IA-64).
1688c2ecf20Sopenharmony_ci		 */
1698c2ecf20Sopenharmony_ci		wmb();
1708c2ecf20Sopenharmony_ci
1718c2ecf20Sopenharmony_ci		/* notify hardware of new descriptors */
1728c2ecf20Sopenharmony_ci		writel(i, rx_ring->tail);
1738c2ecf20Sopenharmony_ci	}
1748c2ecf20Sopenharmony_ci}
1758c2ecf20Sopenharmony_ci
1768c2ecf20Sopenharmony_ci/**
1778c2ecf20Sopenharmony_ci * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
1788c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring to store buffers on
1798c2ecf20Sopenharmony_ci * @old_buff: donor buffer to have page reused
1808c2ecf20Sopenharmony_ci *
1818c2ecf20Sopenharmony_ci * Synchronizes page for reuse by the interface
1828c2ecf20Sopenharmony_ci **/
1838c2ecf20Sopenharmony_cistatic void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
1848c2ecf20Sopenharmony_ci				struct fm10k_rx_buffer *old_buff)
1858c2ecf20Sopenharmony_ci{
1868c2ecf20Sopenharmony_ci	struct fm10k_rx_buffer *new_buff;
1878c2ecf20Sopenharmony_ci	u16 nta = rx_ring->next_to_alloc;
1888c2ecf20Sopenharmony_ci
1898c2ecf20Sopenharmony_ci	new_buff = &rx_ring->rx_buffer[nta];
1908c2ecf20Sopenharmony_ci
1918c2ecf20Sopenharmony_ci	/* update, and store next to alloc */
1928c2ecf20Sopenharmony_ci	nta++;
1938c2ecf20Sopenharmony_ci	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1948c2ecf20Sopenharmony_ci
1958c2ecf20Sopenharmony_ci	/* transfer page from old buffer to new buffer */
1968c2ecf20Sopenharmony_ci	*new_buff = *old_buff;
1978c2ecf20Sopenharmony_ci
1988c2ecf20Sopenharmony_ci	/* sync the buffer for use by the device */
1998c2ecf20Sopenharmony_ci	dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
2008c2ecf20Sopenharmony_ci					 old_buff->page_offset,
2018c2ecf20Sopenharmony_ci					 FM10K_RX_BUFSZ,
2028c2ecf20Sopenharmony_ci					 DMA_FROM_DEVICE);
2038c2ecf20Sopenharmony_ci}
2048c2ecf20Sopenharmony_ci
2058c2ecf20Sopenharmony_cistatic inline bool fm10k_page_is_reserved(struct page *page)
2068c2ecf20Sopenharmony_ci{
2078c2ecf20Sopenharmony_ci	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
2088c2ecf20Sopenharmony_ci}
2098c2ecf20Sopenharmony_ci
2108c2ecf20Sopenharmony_cistatic bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
2118c2ecf20Sopenharmony_ci				    struct page *page,
2128c2ecf20Sopenharmony_ci				    unsigned int __maybe_unused truesize)
2138c2ecf20Sopenharmony_ci{
2148c2ecf20Sopenharmony_ci	/* avoid re-using remote pages */
2158c2ecf20Sopenharmony_ci	if (unlikely(fm10k_page_is_reserved(page)))
2168c2ecf20Sopenharmony_ci		return false;
2178c2ecf20Sopenharmony_ci
2188c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192)
2198c2ecf20Sopenharmony_ci	/* if we are only owner of page we can reuse it */
2208c2ecf20Sopenharmony_ci	if (unlikely(page_count(page) != 1))
2218c2ecf20Sopenharmony_ci		return false;
2228c2ecf20Sopenharmony_ci
2238c2ecf20Sopenharmony_ci	/* flip page offset to other buffer */
2248c2ecf20Sopenharmony_ci	rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
2258c2ecf20Sopenharmony_ci#else
2268c2ecf20Sopenharmony_ci	/* move offset up to the next cache line */
2278c2ecf20Sopenharmony_ci	rx_buffer->page_offset += truesize;
2288c2ecf20Sopenharmony_ci
2298c2ecf20Sopenharmony_ci	if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
2308c2ecf20Sopenharmony_ci		return false;
2318c2ecf20Sopenharmony_ci#endif
2328c2ecf20Sopenharmony_ci
2338c2ecf20Sopenharmony_ci	/* Even if we own the page, we are not allowed to use atomic_set()
2348c2ecf20Sopenharmony_ci	 * This would break get_page_unless_zero() users.
2358c2ecf20Sopenharmony_ci	 */
2368c2ecf20Sopenharmony_ci	page_ref_inc(page);
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_ci	return true;
2398c2ecf20Sopenharmony_ci}
2408c2ecf20Sopenharmony_ci
2418c2ecf20Sopenharmony_ci/**
2428c2ecf20Sopenharmony_ci * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
2438c2ecf20Sopenharmony_ci * @rx_buffer: buffer containing page to add
2448c2ecf20Sopenharmony_ci * @size: packet size from rx_desc
2458c2ecf20Sopenharmony_ci * @rx_desc: descriptor containing length of buffer written by hardware
2468c2ecf20Sopenharmony_ci * @skb: sk_buff to place the data into
2478c2ecf20Sopenharmony_ci *
2488c2ecf20Sopenharmony_ci * This function will add the data contained in rx_buffer->page to the skb.
2498c2ecf20Sopenharmony_ci * This is done either through a direct copy if the data in the buffer is
2508c2ecf20Sopenharmony_ci * less than the skb header size, otherwise it will just attach the page as
2518c2ecf20Sopenharmony_ci * a frag to the skb.
2528c2ecf20Sopenharmony_ci *
2538c2ecf20Sopenharmony_ci * The function will then update the page offset if necessary and return
2548c2ecf20Sopenharmony_ci * true if the buffer can be reused by the interface.
2558c2ecf20Sopenharmony_ci **/
2568c2ecf20Sopenharmony_cistatic bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
2578c2ecf20Sopenharmony_ci			      unsigned int size,
2588c2ecf20Sopenharmony_ci			      union fm10k_rx_desc *rx_desc,
2598c2ecf20Sopenharmony_ci			      struct sk_buff *skb)
2608c2ecf20Sopenharmony_ci{
2618c2ecf20Sopenharmony_ci	struct page *page = rx_buffer->page;
2628c2ecf20Sopenharmony_ci	unsigned char *va = page_address(page) + rx_buffer->page_offset;
2638c2ecf20Sopenharmony_ci#if (PAGE_SIZE < 8192)
2648c2ecf20Sopenharmony_ci	unsigned int truesize = FM10K_RX_BUFSZ;
2658c2ecf20Sopenharmony_ci#else
2668c2ecf20Sopenharmony_ci	unsigned int truesize = ALIGN(size, 512);
2678c2ecf20Sopenharmony_ci#endif
2688c2ecf20Sopenharmony_ci	unsigned int pull_len;
2698c2ecf20Sopenharmony_ci
2708c2ecf20Sopenharmony_ci	if (unlikely(skb_is_nonlinear(skb)))
2718c2ecf20Sopenharmony_ci		goto add_tail_frag;
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ci	if (likely(size <= FM10K_RX_HDR_LEN)) {
2748c2ecf20Sopenharmony_ci		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
2758c2ecf20Sopenharmony_ci
2768c2ecf20Sopenharmony_ci		/* page is not reserved, we can reuse buffer as-is */
2778c2ecf20Sopenharmony_ci		if (likely(!fm10k_page_is_reserved(page)))
2788c2ecf20Sopenharmony_ci			return true;
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci		/* this page cannot be reused so discard it */
2818c2ecf20Sopenharmony_ci		__free_page(page);
2828c2ecf20Sopenharmony_ci		return false;
2838c2ecf20Sopenharmony_ci	}
2848c2ecf20Sopenharmony_ci
2858c2ecf20Sopenharmony_ci	/* we need the header to contain the greater of either ETH_HLEN or
2868c2ecf20Sopenharmony_ci	 * 60 bytes if the skb->len is less than 60 for skb_pad.
2878c2ecf20Sopenharmony_ci	 */
2888c2ecf20Sopenharmony_ci	pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
2898c2ecf20Sopenharmony_ci
2908c2ecf20Sopenharmony_ci	/* align pull length to size of long to optimize memcpy performance */
2918c2ecf20Sopenharmony_ci	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
2928c2ecf20Sopenharmony_ci
2938c2ecf20Sopenharmony_ci	/* update all of the pointers */
2948c2ecf20Sopenharmony_ci	va += pull_len;
2958c2ecf20Sopenharmony_ci	size -= pull_len;
2968c2ecf20Sopenharmony_ci
2978c2ecf20Sopenharmony_ciadd_tail_frag:
2988c2ecf20Sopenharmony_ci	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2998c2ecf20Sopenharmony_ci			(unsigned long)va & ~PAGE_MASK, size, truesize);
3008c2ecf20Sopenharmony_ci
3018c2ecf20Sopenharmony_ci	return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
3028c2ecf20Sopenharmony_ci}
3038c2ecf20Sopenharmony_ci
3048c2ecf20Sopenharmony_cistatic struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
3058c2ecf20Sopenharmony_ci					     union fm10k_rx_desc *rx_desc,
3068c2ecf20Sopenharmony_ci					     struct sk_buff *skb)
3078c2ecf20Sopenharmony_ci{
3088c2ecf20Sopenharmony_ci	unsigned int size = le16_to_cpu(rx_desc->w.length);
3098c2ecf20Sopenharmony_ci	struct fm10k_rx_buffer *rx_buffer;
3108c2ecf20Sopenharmony_ci	struct page *page;
3118c2ecf20Sopenharmony_ci
3128c2ecf20Sopenharmony_ci	rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
3138c2ecf20Sopenharmony_ci	page = rx_buffer->page;
3148c2ecf20Sopenharmony_ci	prefetchw(page);
3158c2ecf20Sopenharmony_ci
3168c2ecf20Sopenharmony_ci	if (likely(!skb)) {
3178c2ecf20Sopenharmony_ci		void *page_addr = page_address(page) +
3188c2ecf20Sopenharmony_ci				  rx_buffer->page_offset;
3198c2ecf20Sopenharmony_ci
3208c2ecf20Sopenharmony_ci		/* prefetch first cache line of first page */
3218c2ecf20Sopenharmony_ci		net_prefetch(page_addr);
3228c2ecf20Sopenharmony_ci
3238c2ecf20Sopenharmony_ci		/* allocate a skb to store the frags */
3248c2ecf20Sopenharmony_ci		skb = napi_alloc_skb(&rx_ring->q_vector->napi,
3258c2ecf20Sopenharmony_ci				     FM10K_RX_HDR_LEN);
3268c2ecf20Sopenharmony_ci		if (unlikely(!skb)) {
3278c2ecf20Sopenharmony_ci			rx_ring->rx_stats.alloc_failed++;
3288c2ecf20Sopenharmony_ci			return NULL;
3298c2ecf20Sopenharmony_ci		}
3308c2ecf20Sopenharmony_ci
3318c2ecf20Sopenharmony_ci		/* we will be copying header into skb->data in
3328c2ecf20Sopenharmony_ci		 * pskb_may_pull so it is in our interest to prefetch
3338c2ecf20Sopenharmony_ci		 * it now to avoid a possible cache miss
3348c2ecf20Sopenharmony_ci		 */
3358c2ecf20Sopenharmony_ci		prefetchw(skb->data);
3368c2ecf20Sopenharmony_ci	}
3378c2ecf20Sopenharmony_ci
3388c2ecf20Sopenharmony_ci	/* we are reusing so sync this buffer for CPU use */
3398c2ecf20Sopenharmony_ci	dma_sync_single_range_for_cpu(rx_ring->dev,
3408c2ecf20Sopenharmony_ci				      rx_buffer->dma,
3418c2ecf20Sopenharmony_ci				      rx_buffer->page_offset,
3428c2ecf20Sopenharmony_ci				      size,
3438c2ecf20Sopenharmony_ci				      DMA_FROM_DEVICE);
3448c2ecf20Sopenharmony_ci
3458c2ecf20Sopenharmony_ci	/* pull page into skb */
3468c2ecf20Sopenharmony_ci	if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) {
3478c2ecf20Sopenharmony_ci		/* hand second half of page back to the ring */
3488c2ecf20Sopenharmony_ci		fm10k_reuse_rx_page(rx_ring, rx_buffer);
3498c2ecf20Sopenharmony_ci	} else {
3508c2ecf20Sopenharmony_ci		/* we are not reusing the buffer so unmap it */
3518c2ecf20Sopenharmony_ci		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
3528c2ecf20Sopenharmony_ci			       PAGE_SIZE, DMA_FROM_DEVICE);
3538c2ecf20Sopenharmony_ci	}
3548c2ecf20Sopenharmony_ci
3558c2ecf20Sopenharmony_ci	/* clear contents of rx_buffer */
3568c2ecf20Sopenharmony_ci	rx_buffer->page = NULL;
3578c2ecf20Sopenharmony_ci
3588c2ecf20Sopenharmony_ci	return skb;
3598c2ecf20Sopenharmony_ci}
3608c2ecf20Sopenharmony_ci
3618c2ecf20Sopenharmony_cistatic inline void fm10k_rx_checksum(struct fm10k_ring *ring,
3628c2ecf20Sopenharmony_ci				     union fm10k_rx_desc *rx_desc,
3638c2ecf20Sopenharmony_ci				     struct sk_buff *skb)
3648c2ecf20Sopenharmony_ci{
3658c2ecf20Sopenharmony_ci	skb_checksum_none_assert(skb);
3668c2ecf20Sopenharmony_ci
3678c2ecf20Sopenharmony_ci	/* Rx checksum disabled via ethtool */
3688c2ecf20Sopenharmony_ci	if (!(ring->netdev->features & NETIF_F_RXCSUM))
3698c2ecf20Sopenharmony_ci		return;
3708c2ecf20Sopenharmony_ci
3718c2ecf20Sopenharmony_ci	/* TCP/UDP checksum error bit is set */
3728c2ecf20Sopenharmony_ci	if (fm10k_test_staterr(rx_desc,
3738c2ecf20Sopenharmony_ci			       FM10K_RXD_STATUS_L4E |
3748c2ecf20Sopenharmony_ci			       FM10K_RXD_STATUS_L4E2 |
3758c2ecf20Sopenharmony_ci			       FM10K_RXD_STATUS_IPE |
3768c2ecf20Sopenharmony_ci			       FM10K_RXD_STATUS_IPE2)) {
3778c2ecf20Sopenharmony_ci		ring->rx_stats.csum_err++;
3788c2ecf20Sopenharmony_ci		return;
3798c2ecf20Sopenharmony_ci	}
3808c2ecf20Sopenharmony_ci
3818c2ecf20Sopenharmony_ci	/* It must be a TCP or UDP packet with a valid checksum */
3828c2ecf20Sopenharmony_ci	if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
3838c2ecf20Sopenharmony_ci		skb->encapsulation = true;
3848c2ecf20Sopenharmony_ci	else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS))
3858c2ecf20Sopenharmony_ci		return;
3868c2ecf20Sopenharmony_ci
3878c2ecf20Sopenharmony_ci	skb->ip_summed = CHECKSUM_UNNECESSARY;
3888c2ecf20Sopenharmony_ci
3898c2ecf20Sopenharmony_ci	ring->rx_stats.csum_good++;
3908c2ecf20Sopenharmony_ci}
3918c2ecf20Sopenharmony_ci
3928c2ecf20Sopenharmony_ci#define FM10K_RSS_L4_TYPES_MASK \
3938c2ecf20Sopenharmony_ci	(BIT(FM10K_RSSTYPE_IPV4_TCP) | \
3948c2ecf20Sopenharmony_ci	 BIT(FM10K_RSSTYPE_IPV4_UDP) | \
3958c2ecf20Sopenharmony_ci	 BIT(FM10K_RSSTYPE_IPV6_TCP) | \
3968c2ecf20Sopenharmony_ci	 BIT(FM10K_RSSTYPE_IPV6_UDP))
3978c2ecf20Sopenharmony_ci
3988c2ecf20Sopenharmony_cistatic inline void fm10k_rx_hash(struct fm10k_ring *ring,
3998c2ecf20Sopenharmony_ci				 union fm10k_rx_desc *rx_desc,
4008c2ecf20Sopenharmony_ci				 struct sk_buff *skb)
4018c2ecf20Sopenharmony_ci{
4028c2ecf20Sopenharmony_ci	u16 rss_type;
4038c2ecf20Sopenharmony_ci
4048c2ecf20Sopenharmony_ci	if (!(ring->netdev->features & NETIF_F_RXHASH))
4058c2ecf20Sopenharmony_ci		return;
4068c2ecf20Sopenharmony_ci
4078c2ecf20Sopenharmony_ci	rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK;
4088c2ecf20Sopenharmony_ci	if (!rss_type)
4098c2ecf20Sopenharmony_ci		return;
4108c2ecf20Sopenharmony_ci
4118c2ecf20Sopenharmony_ci	skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
4128c2ecf20Sopenharmony_ci		     (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
4138c2ecf20Sopenharmony_ci		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
4148c2ecf20Sopenharmony_ci}
4158c2ecf20Sopenharmony_ci
4168c2ecf20Sopenharmony_cistatic void fm10k_type_trans(struct fm10k_ring *rx_ring,
4178c2ecf20Sopenharmony_ci			     union fm10k_rx_desc __maybe_unused *rx_desc,
4188c2ecf20Sopenharmony_ci			     struct sk_buff *skb)
4198c2ecf20Sopenharmony_ci{
4208c2ecf20Sopenharmony_ci	struct net_device *dev = rx_ring->netdev;
4218c2ecf20Sopenharmony_ci	struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel);
4228c2ecf20Sopenharmony_ci
4238c2ecf20Sopenharmony_ci	/* check to see if DGLORT belongs to a MACVLAN */
4248c2ecf20Sopenharmony_ci	if (l2_accel) {
4258c2ecf20Sopenharmony_ci		u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
4268c2ecf20Sopenharmony_ci
4278c2ecf20Sopenharmony_ci		idx -= l2_accel->dglort;
4288c2ecf20Sopenharmony_ci		if (idx < l2_accel->size && l2_accel->macvlan[idx])
4298c2ecf20Sopenharmony_ci			dev = l2_accel->macvlan[idx];
4308c2ecf20Sopenharmony_ci		else
4318c2ecf20Sopenharmony_ci			l2_accel = NULL;
4328c2ecf20Sopenharmony_ci	}
4338c2ecf20Sopenharmony_ci
4348c2ecf20Sopenharmony_ci	/* Record Rx queue, or update macvlan statistics */
4358c2ecf20Sopenharmony_ci	if (!l2_accel)
4368c2ecf20Sopenharmony_ci		skb_record_rx_queue(skb, rx_ring->queue_index);
4378c2ecf20Sopenharmony_ci	else
4388c2ecf20Sopenharmony_ci		macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true,
4398c2ecf20Sopenharmony_ci				 false);
4408c2ecf20Sopenharmony_ci
4418c2ecf20Sopenharmony_ci	skb->protocol = eth_type_trans(skb, dev);
4428c2ecf20Sopenharmony_ci}
4438c2ecf20Sopenharmony_ci
4448c2ecf20Sopenharmony_ci/**
4458c2ecf20Sopenharmony_ci * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
4468c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring packet is being transacted on
4478c2ecf20Sopenharmony_ci * @rx_desc: pointer to the EOP Rx descriptor
4488c2ecf20Sopenharmony_ci * @skb: pointer to current skb being populated
4498c2ecf20Sopenharmony_ci *
4508c2ecf20Sopenharmony_ci * This function checks the ring, descriptor, and packet information in
4518c2ecf20Sopenharmony_ci * order to populate the hash, checksum, VLAN, timestamp, protocol, and
4528c2ecf20Sopenharmony_ci * other fields within the skb.
4538c2ecf20Sopenharmony_ci **/
4548c2ecf20Sopenharmony_cistatic unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
4558c2ecf20Sopenharmony_ci					     union fm10k_rx_desc *rx_desc,
4568c2ecf20Sopenharmony_ci					     struct sk_buff *skb)
4578c2ecf20Sopenharmony_ci{
4588c2ecf20Sopenharmony_ci	unsigned int len = skb->len;
4598c2ecf20Sopenharmony_ci
4608c2ecf20Sopenharmony_ci	fm10k_rx_hash(rx_ring, rx_desc, skb);
4618c2ecf20Sopenharmony_ci
4628c2ecf20Sopenharmony_ci	fm10k_rx_checksum(rx_ring, rx_desc, skb);
4638c2ecf20Sopenharmony_ci
4648c2ecf20Sopenharmony_ci	FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
4658c2ecf20Sopenharmony_ci
4668c2ecf20Sopenharmony_ci	FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
4678c2ecf20Sopenharmony_ci
4688c2ecf20Sopenharmony_ci	FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_ci	if (rx_desc->w.vlan) {
4718c2ecf20Sopenharmony_ci		u16 vid = le16_to_cpu(rx_desc->w.vlan);
4728c2ecf20Sopenharmony_ci
4738c2ecf20Sopenharmony_ci		if ((vid & VLAN_VID_MASK) != rx_ring->vid)
4748c2ecf20Sopenharmony_ci			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4758c2ecf20Sopenharmony_ci		else if (vid & VLAN_PRIO_MASK)
4768c2ecf20Sopenharmony_ci			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
4778c2ecf20Sopenharmony_ci					       vid & VLAN_PRIO_MASK);
4788c2ecf20Sopenharmony_ci	}
4798c2ecf20Sopenharmony_ci
4808c2ecf20Sopenharmony_ci	fm10k_type_trans(rx_ring, rx_desc, skb);
4818c2ecf20Sopenharmony_ci
4828c2ecf20Sopenharmony_ci	return len;
4838c2ecf20Sopenharmony_ci}
4848c2ecf20Sopenharmony_ci
4858c2ecf20Sopenharmony_ci/**
4868c2ecf20Sopenharmony_ci * fm10k_is_non_eop - process handling of non-EOP buffers
4878c2ecf20Sopenharmony_ci * @rx_ring: Rx ring being processed
4888c2ecf20Sopenharmony_ci * @rx_desc: Rx descriptor for current buffer
4898c2ecf20Sopenharmony_ci *
4908c2ecf20Sopenharmony_ci * This function updates next to clean.  If the buffer is an EOP buffer
4918c2ecf20Sopenharmony_ci * this function exits returning false, otherwise it will place the
4928c2ecf20Sopenharmony_ci * sk_buff in the next buffer to be chained and return true indicating
4938c2ecf20Sopenharmony_ci * that this is in fact a non-EOP buffer.
4948c2ecf20Sopenharmony_ci **/
4958c2ecf20Sopenharmony_cistatic bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
4968c2ecf20Sopenharmony_ci			     union fm10k_rx_desc *rx_desc)
4978c2ecf20Sopenharmony_ci{
4988c2ecf20Sopenharmony_ci	u32 ntc = rx_ring->next_to_clean + 1;
4998c2ecf20Sopenharmony_ci
5008c2ecf20Sopenharmony_ci	/* fetch, update, and store next to clean */
5018c2ecf20Sopenharmony_ci	ntc = (ntc < rx_ring->count) ? ntc : 0;
5028c2ecf20Sopenharmony_ci	rx_ring->next_to_clean = ntc;
5038c2ecf20Sopenharmony_ci
5048c2ecf20Sopenharmony_ci	prefetch(FM10K_RX_DESC(rx_ring, ntc));
5058c2ecf20Sopenharmony_ci
5068c2ecf20Sopenharmony_ci	if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
5078c2ecf20Sopenharmony_ci		return false;
5088c2ecf20Sopenharmony_ci
5098c2ecf20Sopenharmony_ci	return true;
5108c2ecf20Sopenharmony_ci}
5118c2ecf20Sopenharmony_ci
5128c2ecf20Sopenharmony_ci/**
5138c2ecf20Sopenharmony_ci * fm10k_cleanup_headers - Correct corrupted or empty headers
5148c2ecf20Sopenharmony_ci * @rx_ring: rx descriptor ring packet is being transacted on
5158c2ecf20Sopenharmony_ci * @rx_desc: pointer to the EOP Rx descriptor
5168c2ecf20Sopenharmony_ci * @skb: pointer to current skb being fixed
5178c2ecf20Sopenharmony_ci *
5188c2ecf20Sopenharmony_ci * Address the case where we are pulling data in on pages only
5198c2ecf20Sopenharmony_ci * and as such no data is present in the skb header.
5208c2ecf20Sopenharmony_ci *
5218c2ecf20Sopenharmony_ci * In addition if skb is not at least 60 bytes we need to pad it so that
5228c2ecf20Sopenharmony_ci * it is large enough to qualify as a valid Ethernet frame.
5238c2ecf20Sopenharmony_ci *
5248c2ecf20Sopenharmony_ci * Returns true if an error was encountered and skb was freed.
5258c2ecf20Sopenharmony_ci **/
5268c2ecf20Sopenharmony_cistatic bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
5278c2ecf20Sopenharmony_ci				  union fm10k_rx_desc *rx_desc,
5288c2ecf20Sopenharmony_ci				  struct sk_buff *skb)
5298c2ecf20Sopenharmony_ci{
5308c2ecf20Sopenharmony_ci	if (unlikely((fm10k_test_staterr(rx_desc,
5318c2ecf20Sopenharmony_ci					 FM10K_RXD_STATUS_RXE)))) {
5328c2ecf20Sopenharmony_ci#define FM10K_TEST_RXD_BIT(rxd, bit) \
5338c2ecf20Sopenharmony_ci	((rxd)->w.csum_err & cpu_to_le16(bit))
5348c2ecf20Sopenharmony_ci		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
5358c2ecf20Sopenharmony_ci			rx_ring->rx_stats.switch_errors++;
5368c2ecf20Sopenharmony_ci		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
5378c2ecf20Sopenharmony_ci			rx_ring->rx_stats.drops++;
5388c2ecf20Sopenharmony_ci		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
5398c2ecf20Sopenharmony_ci			rx_ring->rx_stats.pp_errors++;
5408c2ecf20Sopenharmony_ci		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
5418c2ecf20Sopenharmony_ci			rx_ring->rx_stats.link_errors++;
5428c2ecf20Sopenharmony_ci		if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
5438c2ecf20Sopenharmony_ci			rx_ring->rx_stats.length_errors++;
5448c2ecf20Sopenharmony_ci		dev_kfree_skb_any(skb);
5458c2ecf20Sopenharmony_ci		rx_ring->rx_stats.errors++;
5468c2ecf20Sopenharmony_ci		return true;
5478c2ecf20Sopenharmony_ci	}
5488c2ecf20Sopenharmony_ci
5498c2ecf20Sopenharmony_ci	/* if eth_skb_pad returns an error the skb was freed */
5508c2ecf20Sopenharmony_ci	if (eth_skb_pad(skb))
5518c2ecf20Sopenharmony_ci		return true;
5528c2ecf20Sopenharmony_ci
5538c2ecf20Sopenharmony_ci	return false;
5548c2ecf20Sopenharmony_ci}
5558c2ecf20Sopenharmony_ci
5568c2ecf20Sopenharmony_ci/**
5578c2ecf20Sopenharmony_ci * fm10k_receive_skb - helper function to handle rx indications
5588c2ecf20Sopenharmony_ci * @q_vector: structure containing interrupt and ring information
5598c2ecf20Sopenharmony_ci * @skb: packet to send up
5608c2ecf20Sopenharmony_ci **/
5618c2ecf20Sopenharmony_cistatic void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
5628c2ecf20Sopenharmony_ci			      struct sk_buff *skb)
5638c2ecf20Sopenharmony_ci{
5648c2ecf20Sopenharmony_ci	napi_gro_receive(&q_vector->napi, skb);
5658c2ecf20Sopenharmony_ci}
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_cistatic int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
5688c2ecf20Sopenharmony_ci			      struct fm10k_ring *rx_ring,
5698c2ecf20Sopenharmony_ci			      int budget)
5708c2ecf20Sopenharmony_ci{
5718c2ecf20Sopenharmony_ci	struct sk_buff *skb = rx_ring->skb;
5728c2ecf20Sopenharmony_ci	unsigned int total_bytes = 0, total_packets = 0;
5738c2ecf20Sopenharmony_ci	u16 cleaned_count = fm10k_desc_unused(rx_ring);
5748c2ecf20Sopenharmony_ci
5758c2ecf20Sopenharmony_ci	while (likely(total_packets < budget)) {
5768c2ecf20Sopenharmony_ci		union fm10k_rx_desc *rx_desc;
5778c2ecf20Sopenharmony_ci
5788c2ecf20Sopenharmony_ci		/* return some buffers to hardware, one at a time is too slow */
5798c2ecf20Sopenharmony_ci		if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
5808c2ecf20Sopenharmony_ci			fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
5818c2ecf20Sopenharmony_ci			cleaned_count = 0;
5828c2ecf20Sopenharmony_ci		}
5838c2ecf20Sopenharmony_ci
5848c2ecf20Sopenharmony_ci		rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
5858c2ecf20Sopenharmony_ci
5868c2ecf20Sopenharmony_ci		if (!rx_desc->d.staterr)
5878c2ecf20Sopenharmony_ci			break;
5888c2ecf20Sopenharmony_ci
5898c2ecf20Sopenharmony_ci		/* This memory barrier is needed to keep us from reading
5908c2ecf20Sopenharmony_ci		 * any other fields out of the rx_desc until we know the
5918c2ecf20Sopenharmony_ci		 * descriptor has been written back
5928c2ecf20Sopenharmony_ci		 */
5938c2ecf20Sopenharmony_ci		dma_rmb();
5948c2ecf20Sopenharmony_ci
5958c2ecf20Sopenharmony_ci		/* retrieve a buffer from the ring */
5968c2ecf20Sopenharmony_ci		skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
5978c2ecf20Sopenharmony_ci
5988c2ecf20Sopenharmony_ci		/* exit if we failed to retrieve a buffer */
5998c2ecf20Sopenharmony_ci		if (!skb)
6008c2ecf20Sopenharmony_ci			break;
6018c2ecf20Sopenharmony_ci
6028c2ecf20Sopenharmony_ci		cleaned_count++;
6038c2ecf20Sopenharmony_ci
6048c2ecf20Sopenharmony_ci		/* fetch next buffer in frame if non-eop */
6058c2ecf20Sopenharmony_ci		if (fm10k_is_non_eop(rx_ring, rx_desc))
6068c2ecf20Sopenharmony_ci			continue;
6078c2ecf20Sopenharmony_ci
6088c2ecf20Sopenharmony_ci		/* verify the packet layout is correct */
6098c2ecf20Sopenharmony_ci		if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
6108c2ecf20Sopenharmony_ci			skb = NULL;
6118c2ecf20Sopenharmony_ci			continue;
6128c2ecf20Sopenharmony_ci		}
6138c2ecf20Sopenharmony_ci
6148c2ecf20Sopenharmony_ci		/* populate checksum, timestamp, VLAN, and protocol */
6158c2ecf20Sopenharmony_ci		total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
6168c2ecf20Sopenharmony_ci
6178c2ecf20Sopenharmony_ci		fm10k_receive_skb(q_vector, skb);
6188c2ecf20Sopenharmony_ci
6198c2ecf20Sopenharmony_ci		/* reset skb pointer */
6208c2ecf20Sopenharmony_ci		skb = NULL;
6218c2ecf20Sopenharmony_ci
6228c2ecf20Sopenharmony_ci		/* update budget accounting */
6238c2ecf20Sopenharmony_ci		total_packets++;
6248c2ecf20Sopenharmony_ci	}
6258c2ecf20Sopenharmony_ci
6268c2ecf20Sopenharmony_ci	/* place incomplete frames back on ring for completion */
6278c2ecf20Sopenharmony_ci	rx_ring->skb = skb;
6288c2ecf20Sopenharmony_ci
6298c2ecf20Sopenharmony_ci	u64_stats_update_begin(&rx_ring->syncp);
6308c2ecf20Sopenharmony_ci	rx_ring->stats.packets += total_packets;
6318c2ecf20Sopenharmony_ci	rx_ring->stats.bytes += total_bytes;
6328c2ecf20Sopenharmony_ci	u64_stats_update_end(&rx_ring->syncp);
6338c2ecf20Sopenharmony_ci	q_vector->rx.total_packets += total_packets;
6348c2ecf20Sopenharmony_ci	q_vector->rx.total_bytes += total_bytes;
6358c2ecf20Sopenharmony_ci
6368c2ecf20Sopenharmony_ci	return total_packets;
6378c2ecf20Sopenharmony_ci}
6388c2ecf20Sopenharmony_ci
6398c2ecf20Sopenharmony_ci#define VXLAN_HLEN (sizeof(struct udphdr) + 8)
6408c2ecf20Sopenharmony_cistatic struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
6418c2ecf20Sopenharmony_ci{
6428c2ecf20Sopenharmony_ci	struct fm10k_intfc *interface = netdev_priv(skb->dev);
6438c2ecf20Sopenharmony_ci
6448c2ecf20Sopenharmony_ci	if (interface->vxlan_port != udp_hdr(skb)->dest)
6458c2ecf20Sopenharmony_ci		return NULL;
6468c2ecf20Sopenharmony_ci
6478c2ecf20Sopenharmony_ci	/* return offset of udp_hdr plus 8 bytes for VXLAN header */
6488c2ecf20Sopenharmony_ci	return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
6498c2ecf20Sopenharmony_ci}
6508c2ecf20Sopenharmony_ci
6518c2ecf20Sopenharmony_ci#define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
6528c2ecf20Sopenharmony_ci#define NVGRE_TNI htons(0x2000)
6538c2ecf20Sopenharmony_cistruct fm10k_nvgre_hdr {
6548c2ecf20Sopenharmony_ci	__be16 flags;
6558c2ecf20Sopenharmony_ci	__be16 proto;
6568c2ecf20Sopenharmony_ci	__be32 tni;
6578c2ecf20Sopenharmony_ci};
6588c2ecf20Sopenharmony_ci
6598c2ecf20Sopenharmony_cistatic struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
6608c2ecf20Sopenharmony_ci{
6618c2ecf20Sopenharmony_ci	struct fm10k_nvgre_hdr *nvgre_hdr;
6628c2ecf20Sopenharmony_ci	int hlen = ip_hdrlen(skb);
6638c2ecf20Sopenharmony_ci
6648c2ecf20Sopenharmony_ci	/* currently only IPv4 is supported due to hlen above */
6658c2ecf20Sopenharmony_ci	if (vlan_get_protocol(skb) != htons(ETH_P_IP))
6668c2ecf20Sopenharmony_ci		return NULL;
6678c2ecf20Sopenharmony_ci
6688c2ecf20Sopenharmony_ci	/* our transport header should be NVGRE */
6698c2ecf20Sopenharmony_ci	nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
6708c2ecf20Sopenharmony_ci
6718c2ecf20Sopenharmony_ci	/* verify all reserved flags are 0 */
6728c2ecf20Sopenharmony_ci	if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
6738c2ecf20Sopenharmony_ci		return NULL;
6748c2ecf20Sopenharmony_ci
6758c2ecf20Sopenharmony_ci	/* report start of ethernet header */
6768c2ecf20Sopenharmony_ci	if (nvgre_hdr->flags & NVGRE_TNI)
6778c2ecf20Sopenharmony_ci		return (struct ethhdr *)(nvgre_hdr + 1);
6788c2ecf20Sopenharmony_ci
6798c2ecf20Sopenharmony_ci	return (struct ethhdr *)(&nvgre_hdr->tni);
6808c2ecf20Sopenharmony_ci}
6818c2ecf20Sopenharmony_ci
6828c2ecf20Sopenharmony_ci__be16 fm10k_tx_encap_offload(struct sk_buff *skb)
6838c2ecf20Sopenharmony_ci{
6848c2ecf20Sopenharmony_ci	u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
6858c2ecf20Sopenharmony_ci	struct ethhdr *eth_hdr;
6868c2ecf20Sopenharmony_ci
6878c2ecf20Sopenharmony_ci	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
6888c2ecf20Sopenharmony_ci	    skb->inner_protocol != htons(ETH_P_TEB))
6898c2ecf20Sopenharmony_ci		return 0;
6908c2ecf20Sopenharmony_ci
6918c2ecf20Sopenharmony_ci	switch (vlan_get_protocol(skb)) {
6928c2ecf20Sopenharmony_ci	case htons(ETH_P_IP):
6938c2ecf20Sopenharmony_ci		l4_hdr = ip_hdr(skb)->protocol;
6948c2ecf20Sopenharmony_ci		break;
6958c2ecf20Sopenharmony_ci	case htons(ETH_P_IPV6):
6968c2ecf20Sopenharmony_ci		l4_hdr = ipv6_hdr(skb)->nexthdr;
6978c2ecf20Sopenharmony_ci		break;
6988c2ecf20Sopenharmony_ci	default:
6998c2ecf20Sopenharmony_ci		return 0;
7008c2ecf20Sopenharmony_ci	}
7018c2ecf20Sopenharmony_ci
7028c2ecf20Sopenharmony_ci	switch (l4_hdr) {
7038c2ecf20Sopenharmony_ci	case IPPROTO_UDP:
7048c2ecf20Sopenharmony_ci		eth_hdr = fm10k_port_is_vxlan(skb);
7058c2ecf20Sopenharmony_ci		break;
7068c2ecf20Sopenharmony_ci	case IPPROTO_GRE:
7078c2ecf20Sopenharmony_ci		eth_hdr = fm10k_gre_is_nvgre(skb);
7088c2ecf20Sopenharmony_ci		break;
7098c2ecf20Sopenharmony_ci	default:
7108c2ecf20Sopenharmony_ci		return 0;
7118c2ecf20Sopenharmony_ci	}
7128c2ecf20Sopenharmony_ci
7138c2ecf20Sopenharmony_ci	if (!eth_hdr)
7148c2ecf20Sopenharmony_ci		return 0;
7158c2ecf20Sopenharmony_ci
7168c2ecf20Sopenharmony_ci	switch (eth_hdr->h_proto) {
7178c2ecf20Sopenharmony_ci	case htons(ETH_P_IP):
7188c2ecf20Sopenharmony_ci		inner_l4_hdr = inner_ip_hdr(skb)->protocol;
7198c2ecf20Sopenharmony_ci		break;
7208c2ecf20Sopenharmony_ci	case htons(ETH_P_IPV6):
7218c2ecf20Sopenharmony_ci		inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
7228c2ecf20Sopenharmony_ci		break;
7238c2ecf20Sopenharmony_ci	default:
7248c2ecf20Sopenharmony_ci		return 0;
7258c2ecf20Sopenharmony_ci	}
7268c2ecf20Sopenharmony_ci
7278c2ecf20Sopenharmony_ci	switch (inner_l4_hdr) {
7288c2ecf20Sopenharmony_ci	case IPPROTO_TCP:
7298c2ecf20Sopenharmony_ci		inner_l4_hlen = inner_tcp_hdrlen(skb);
7308c2ecf20Sopenharmony_ci		break;
7318c2ecf20Sopenharmony_ci	case IPPROTO_UDP:
7328c2ecf20Sopenharmony_ci		inner_l4_hlen = 8;
7338c2ecf20Sopenharmony_ci		break;
7348c2ecf20Sopenharmony_ci	default:
7358c2ecf20Sopenharmony_ci		return 0;
7368c2ecf20Sopenharmony_ci	}
7378c2ecf20Sopenharmony_ci
7388c2ecf20Sopenharmony_ci	/* The hardware allows tunnel offloads only if the combined inner and
7398c2ecf20Sopenharmony_ci	 * outer header is 184 bytes or less
7408c2ecf20Sopenharmony_ci	 */
7418c2ecf20Sopenharmony_ci	if (skb_inner_transport_header(skb) + inner_l4_hlen -
7428c2ecf20Sopenharmony_ci	    skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
7438c2ecf20Sopenharmony_ci		return 0;
7448c2ecf20Sopenharmony_ci
7458c2ecf20Sopenharmony_ci	return eth_hdr->h_proto;
7468c2ecf20Sopenharmony_ci}
7478c2ecf20Sopenharmony_ci
7488c2ecf20Sopenharmony_cistatic int fm10k_tso(struct fm10k_ring *tx_ring,
7498c2ecf20Sopenharmony_ci		     struct fm10k_tx_buffer *first)
7508c2ecf20Sopenharmony_ci{
7518c2ecf20Sopenharmony_ci	struct sk_buff *skb = first->skb;
7528c2ecf20Sopenharmony_ci	struct fm10k_tx_desc *tx_desc;
7538c2ecf20Sopenharmony_ci	unsigned char *th;
7548c2ecf20Sopenharmony_ci	u8 hdrlen;
7558c2ecf20Sopenharmony_ci
7568c2ecf20Sopenharmony_ci	if (skb->ip_summed != CHECKSUM_PARTIAL)
7578c2ecf20Sopenharmony_ci		return 0;
7588c2ecf20Sopenharmony_ci
7598c2ecf20Sopenharmony_ci	if (!skb_is_gso(skb))
7608c2ecf20Sopenharmony_ci		return 0;
7618c2ecf20Sopenharmony_ci
7628c2ecf20Sopenharmony_ci	/* compute header lengths */
7638c2ecf20Sopenharmony_ci	if (skb->encapsulation) {
7648c2ecf20Sopenharmony_ci		if (!fm10k_tx_encap_offload(skb))
7658c2ecf20Sopenharmony_ci			goto err_vxlan;
7668c2ecf20Sopenharmony_ci		th = skb_inner_transport_header(skb);
7678c2ecf20Sopenharmony_ci	} else {
7688c2ecf20Sopenharmony_ci		th = skb_transport_header(skb);
7698c2ecf20Sopenharmony_ci	}
7708c2ecf20Sopenharmony_ci
7718c2ecf20Sopenharmony_ci	/* compute offset from SOF to transport header and add header len */
7728c2ecf20Sopenharmony_ci	hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
7738c2ecf20Sopenharmony_ci
7748c2ecf20Sopenharmony_ci	first->tx_flags |= FM10K_TX_FLAGS_CSUM;
7758c2ecf20Sopenharmony_ci
7768c2ecf20Sopenharmony_ci	/* update gso size and bytecount with header size */
7778c2ecf20Sopenharmony_ci	first->gso_segs = skb_shinfo(skb)->gso_segs;
7788c2ecf20Sopenharmony_ci	first->bytecount += (first->gso_segs - 1) * hdrlen;
7798c2ecf20Sopenharmony_ci
7808c2ecf20Sopenharmony_ci	/* populate Tx descriptor header size and mss */
7818c2ecf20Sopenharmony_ci	tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
7828c2ecf20Sopenharmony_ci	tx_desc->hdrlen = hdrlen;
7838c2ecf20Sopenharmony_ci	tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
7848c2ecf20Sopenharmony_ci
7858c2ecf20Sopenharmony_ci	return 1;
7868c2ecf20Sopenharmony_ci
7878c2ecf20Sopenharmony_cierr_vxlan:
7888c2ecf20Sopenharmony_ci	tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
7898c2ecf20Sopenharmony_ci	if (net_ratelimit())
7908c2ecf20Sopenharmony_ci		netdev_err(tx_ring->netdev,
7918c2ecf20Sopenharmony_ci			   "TSO requested for unsupported tunnel, disabling offload\n");
7928c2ecf20Sopenharmony_ci	return -1;
7938c2ecf20Sopenharmony_ci}
7948c2ecf20Sopenharmony_ci
7958c2ecf20Sopenharmony_cistatic void fm10k_tx_csum(struct fm10k_ring *tx_ring,
7968c2ecf20Sopenharmony_ci			  struct fm10k_tx_buffer *first)
7978c2ecf20Sopenharmony_ci{
7988c2ecf20Sopenharmony_ci	struct sk_buff *skb = first->skb;
7998c2ecf20Sopenharmony_ci	struct fm10k_tx_desc *tx_desc;
8008c2ecf20Sopenharmony_ci	union {
8018c2ecf20Sopenharmony_ci		struct iphdr *ipv4;
8028c2ecf20Sopenharmony_ci		struct ipv6hdr *ipv6;
8038c2ecf20Sopenharmony_ci		u8 *raw;
8048c2ecf20Sopenharmony_ci	} network_hdr;
8058c2ecf20Sopenharmony_ci	u8 *transport_hdr;
8068c2ecf20Sopenharmony_ci	__be16 frag_off;
8078c2ecf20Sopenharmony_ci	__be16 protocol;
8088c2ecf20Sopenharmony_ci	u8 l4_hdr = 0;
8098c2ecf20Sopenharmony_ci
8108c2ecf20Sopenharmony_ci	if (skb->ip_summed != CHECKSUM_PARTIAL)
8118c2ecf20Sopenharmony_ci		goto no_csum;
8128c2ecf20Sopenharmony_ci
8138c2ecf20Sopenharmony_ci	if (skb->encapsulation) {
8148c2ecf20Sopenharmony_ci		protocol = fm10k_tx_encap_offload(skb);
8158c2ecf20Sopenharmony_ci		if (!protocol) {
8168c2ecf20Sopenharmony_ci			if (skb_checksum_help(skb)) {
8178c2ecf20Sopenharmony_ci				dev_warn(tx_ring->dev,
8188c2ecf20Sopenharmony_ci					 "failed to offload encap csum!\n");
8198c2ecf20Sopenharmony_ci				tx_ring->tx_stats.csum_err++;
8208c2ecf20Sopenharmony_ci			}
8218c2ecf20Sopenharmony_ci			goto no_csum;
8228c2ecf20Sopenharmony_ci		}
8238c2ecf20Sopenharmony_ci		network_hdr.raw = skb_inner_network_header(skb);
8248c2ecf20Sopenharmony_ci		transport_hdr = skb_inner_transport_header(skb);
8258c2ecf20Sopenharmony_ci	} else {
8268c2ecf20Sopenharmony_ci		protocol = vlan_get_protocol(skb);
8278c2ecf20Sopenharmony_ci		network_hdr.raw = skb_network_header(skb);
8288c2ecf20Sopenharmony_ci		transport_hdr = skb_transport_header(skb);
8298c2ecf20Sopenharmony_ci	}
8308c2ecf20Sopenharmony_ci
8318c2ecf20Sopenharmony_ci	switch (protocol) {
8328c2ecf20Sopenharmony_ci	case htons(ETH_P_IP):
8338c2ecf20Sopenharmony_ci		l4_hdr = network_hdr.ipv4->protocol;
8348c2ecf20Sopenharmony_ci		break;
8358c2ecf20Sopenharmony_ci	case htons(ETH_P_IPV6):
8368c2ecf20Sopenharmony_ci		l4_hdr = network_hdr.ipv6->nexthdr;
8378c2ecf20Sopenharmony_ci		if (likely((transport_hdr - network_hdr.raw) ==
8388c2ecf20Sopenharmony_ci			   sizeof(struct ipv6hdr)))
8398c2ecf20Sopenharmony_ci			break;
8408c2ecf20Sopenharmony_ci		ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
8418c2ecf20Sopenharmony_ci				      sizeof(struct ipv6hdr),
8428c2ecf20Sopenharmony_ci				 &l4_hdr, &frag_off);
8438c2ecf20Sopenharmony_ci		if (unlikely(frag_off))
8448c2ecf20Sopenharmony_ci			l4_hdr = NEXTHDR_FRAGMENT;
8458c2ecf20Sopenharmony_ci		break;
8468c2ecf20Sopenharmony_ci	default:
8478c2ecf20Sopenharmony_ci		break;
8488c2ecf20Sopenharmony_ci	}
8498c2ecf20Sopenharmony_ci
8508c2ecf20Sopenharmony_ci	switch (l4_hdr) {
8518c2ecf20Sopenharmony_ci	case IPPROTO_TCP:
8528c2ecf20Sopenharmony_ci	case IPPROTO_UDP:
8538c2ecf20Sopenharmony_ci		break;
8548c2ecf20Sopenharmony_ci	case IPPROTO_GRE:
8558c2ecf20Sopenharmony_ci		if (skb->encapsulation)
8568c2ecf20Sopenharmony_ci			break;
8578c2ecf20Sopenharmony_ci		fallthrough;
8588c2ecf20Sopenharmony_ci	default:
8598c2ecf20Sopenharmony_ci		if (unlikely(net_ratelimit())) {
8608c2ecf20Sopenharmony_ci			dev_warn(tx_ring->dev,
8618c2ecf20Sopenharmony_ci				 "partial checksum, version=%d l4 proto=%x\n",
8628c2ecf20Sopenharmony_ci				 protocol, l4_hdr);
8638c2ecf20Sopenharmony_ci		}
8648c2ecf20Sopenharmony_ci		skb_checksum_help(skb);
8658c2ecf20Sopenharmony_ci		tx_ring->tx_stats.csum_err++;
8668c2ecf20Sopenharmony_ci		goto no_csum;
8678c2ecf20Sopenharmony_ci	}
8688c2ecf20Sopenharmony_ci
8698c2ecf20Sopenharmony_ci	/* update TX checksum flag */
8708c2ecf20Sopenharmony_ci	first->tx_flags |= FM10K_TX_FLAGS_CSUM;
8718c2ecf20Sopenharmony_ci	tx_ring->tx_stats.csum_good++;
8728c2ecf20Sopenharmony_ci
8738c2ecf20Sopenharmony_cino_csum:
8748c2ecf20Sopenharmony_ci	/* populate Tx descriptor header size and mss */
8758c2ecf20Sopenharmony_ci	tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
8768c2ecf20Sopenharmony_ci	tx_desc->hdrlen = 0;
8778c2ecf20Sopenharmony_ci	tx_desc->mss = 0;
8788c2ecf20Sopenharmony_ci}
8798c2ecf20Sopenharmony_ci
8808c2ecf20Sopenharmony_ci#define FM10K_SET_FLAG(_input, _flag, _result) \
8818c2ecf20Sopenharmony_ci	((_flag <= _result) ? \
8828c2ecf20Sopenharmony_ci	 ((u32)(_input & _flag) * (_result / _flag)) : \
8838c2ecf20Sopenharmony_ci	 ((u32)(_input & _flag) / (_flag / _result)))
8848c2ecf20Sopenharmony_ci
8858c2ecf20Sopenharmony_cistatic u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
8868c2ecf20Sopenharmony_ci{
8878c2ecf20Sopenharmony_ci	/* set type for advanced descriptor with frame checksum insertion */
8888c2ecf20Sopenharmony_ci	u32 desc_flags = 0;
8898c2ecf20Sopenharmony_ci
8908c2ecf20Sopenharmony_ci	/* set checksum offload bits */
8918c2ecf20Sopenharmony_ci	desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
8928c2ecf20Sopenharmony_ci				     FM10K_TXD_FLAG_CSUM);
8938c2ecf20Sopenharmony_ci
8948c2ecf20Sopenharmony_ci	return desc_flags;
8958c2ecf20Sopenharmony_ci}
8968c2ecf20Sopenharmony_ci
8978c2ecf20Sopenharmony_cistatic bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
8988c2ecf20Sopenharmony_ci			       struct fm10k_tx_desc *tx_desc, u16 i,
8998c2ecf20Sopenharmony_ci			       dma_addr_t dma, unsigned int size, u8 desc_flags)
9008c2ecf20Sopenharmony_ci{
9018c2ecf20Sopenharmony_ci	/* set RS and INT for last frame in a cache line */
9028c2ecf20Sopenharmony_ci	if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
9038c2ecf20Sopenharmony_ci		desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
9048c2ecf20Sopenharmony_ci
9058c2ecf20Sopenharmony_ci	/* record values to descriptor */
9068c2ecf20Sopenharmony_ci	tx_desc->buffer_addr = cpu_to_le64(dma);
9078c2ecf20Sopenharmony_ci	tx_desc->flags = desc_flags;
9088c2ecf20Sopenharmony_ci	tx_desc->buflen = cpu_to_le16(size);
9098c2ecf20Sopenharmony_ci
9108c2ecf20Sopenharmony_ci	/* return true if we just wrapped the ring */
9118c2ecf20Sopenharmony_ci	return i == tx_ring->count;
9128c2ecf20Sopenharmony_ci}
9138c2ecf20Sopenharmony_ci
9148c2ecf20Sopenharmony_cistatic int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
9158c2ecf20Sopenharmony_ci{
9168c2ecf20Sopenharmony_ci	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
9178c2ecf20Sopenharmony_ci
9188c2ecf20Sopenharmony_ci	/* Memory barrier before checking head and tail */
9198c2ecf20Sopenharmony_ci	smp_mb();
9208c2ecf20Sopenharmony_ci
9218c2ecf20Sopenharmony_ci	/* Check again in a case another CPU has just made room available */
9228c2ecf20Sopenharmony_ci	if (likely(fm10k_desc_unused(tx_ring) < size))
9238c2ecf20Sopenharmony_ci		return -EBUSY;
9248c2ecf20Sopenharmony_ci
9258c2ecf20Sopenharmony_ci	/* A reprieve! - use start_queue because it doesn't call schedule */
9268c2ecf20Sopenharmony_ci	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
9278c2ecf20Sopenharmony_ci	++tx_ring->tx_stats.restart_queue;
9288c2ecf20Sopenharmony_ci	return 0;
9298c2ecf20Sopenharmony_ci}
9308c2ecf20Sopenharmony_ci
9318c2ecf20Sopenharmony_cistatic inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
9328c2ecf20Sopenharmony_ci{
9338c2ecf20Sopenharmony_ci	if (likely(fm10k_desc_unused(tx_ring) >= size))
9348c2ecf20Sopenharmony_ci		return 0;
9358c2ecf20Sopenharmony_ci	return __fm10k_maybe_stop_tx(tx_ring, size);
9368c2ecf20Sopenharmony_ci}
9378c2ecf20Sopenharmony_ci
9388c2ecf20Sopenharmony_cistatic void fm10k_tx_map(struct fm10k_ring *tx_ring,
9398c2ecf20Sopenharmony_ci			 struct fm10k_tx_buffer *first)
9408c2ecf20Sopenharmony_ci{
9418c2ecf20Sopenharmony_ci	struct sk_buff *skb = first->skb;
9428c2ecf20Sopenharmony_ci	struct fm10k_tx_buffer *tx_buffer;
9438c2ecf20Sopenharmony_ci	struct fm10k_tx_desc *tx_desc;
9448c2ecf20Sopenharmony_ci	skb_frag_t *frag;
9458c2ecf20Sopenharmony_ci	unsigned char *data;
9468c2ecf20Sopenharmony_ci	dma_addr_t dma;
9478c2ecf20Sopenharmony_ci	unsigned int data_len, size;
9488c2ecf20Sopenharmony_ci	u32 tx_flags = first->tx_flags;
9498c2ecf20Sopenharmony_ci	u16 i = tx_ring->next_to_use;
9508c2ecf20Sopenharmony_ci	u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
9518c2ecf20Sopenharmony_ci
9528c2ecf20Sopenharmony_ci	tx_desc = FM10K_TX_DESC(tx_ring, i);
9538c2ecf20Sopenharmony_ci
9548c2ecf20Sopenharmony_ci	/* add HW VLAN tag */
9558c2ecf20Sopenharmony_ci	if (skb_vlan_tag_present(skb))
9568c2ecf20Sopenharmony_ci		tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
9578c2ecf20Sopenharmony_ci	else
9588c2ecf20Sopenharmony_ci		tx_desc->vlan = 0;
9598c2ecf20Sopenharmony_ci
9608c2ecf20Sopenharmony_ci	size = skb_headlen(skb);
9618c2ecf20Sopenharmony_ci	data = skb->data;
9628c2ecf20Sopenharmony_ci
9638c2ecf20Sopenharmony_ci	dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
9648c2ecf20Sopenharmony_ci
9658c2ecf20Sopenharmony_ci	data_len = skb->data_len;
9668c2ecf20Sopenharmony_ci	tx_buffer = first;
9678c2ecf20Sopenharmony_ci
9688c2ecf20Sopenharmony_ci	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
9698c2ecf20Sopenharmony_ci		if (dma_mapping_error(tx_ring->dev, dma))
9708c2ecf20Sopenharmony_ci			goto dma_error;
9718c2ecf20Sopenharmony_ci
9728c2ecf20Sopenharmony_ci		/* record length, and DMA address */
9738c2ecf20Sopenharmony_ci		dma_unmap_len_set(tx_buffer, len, size);
9748c2ecf20Sopenharmony_ci		dma_unmap_addr_set(tx_buffer, dma, dma);
9758c2ecf20Sopenharmony_ci
9768c2ecf20Sopenharmony_ci		while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
9778c2ecf20Sopenharmony_ci			if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
9788c2ecf20Sopenharmony_ci					       FM10K_MAX_DATA_PER_TXD, flags)) {
9798c2ecf20Sopenharmony_ci				tx_desc = FM10K_TX_DESC(tx_ring, 0);
9808c2ecf20Sopenharmony_ci				i = 0;
9818c2ecf20Sopenharmony_ci			}
9828c2ecf20Sopenharmony_ci
9838c2ecf20Sopenharmony_ci			dma += FM10K_MAX_DATA_PER_TXD;
9848c2ecf20Sopenharmony_ci			size -= FM10K_MAX_DATA_PER_TXD;
9858c2ecf20Sopenharmony_ci		}
9868c2ecf20Sopenharmony_ci
9878c2ecf20Sopenharmony_ci		if (likely(!data_len))
9888c2ecf20Sopenharmony_ci			break;
9898c2ecf20Sopenharmony_ci
9908c2ecf20Sopenharmony_ci		if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
9918c2ecf20Sopenharmony_ci				       dma, size, flags)) {
9928c2ecf20Sopenharmony_ci			tx_desc = FM10K_TX_DESC(tx_ring, 0);
9938c2ecf20Sopenharmony_ci			i = 0;
9948c2ecf20Sopenharmony_ci		}
9958c2ecf20Sopenharmony_ci
9968c2ecf20Sopenharmony_ci		size = skb_frag_size(frag);
9978c2ecf20Sopenharmony_ci		data_len -= size;
9988c2ecf20Sopenharmony_ci
9998c2ecf20Sopenharmony_ci		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
10008c2ecf20Sopenharmony_ci				       DMA_TO_DEVICE);
10018c2ecf20Sopenharmony_ci
10028c2ecf20Sopenharmony_ci		tx_buffer = &tx_ring->tx_buffer[i];
10038c2ecf20Sopenharmony_ci	}
10048c2ecf20Sopenharmony_ci
10058c2ecf20Sopenharmony_ci	/* write last descriptor with LAST bit set */
10068c2ecf20Sopenharmony_ci	flags |= FM10K_TXD_FLAG_LAST;
10078c2ecf20Sopenharmony_ci
10088c2ecf20Sopenharmony_ci	if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
10098c2ecf20Sopenharmony_ci		i = 0;
10108c2ecf20Sopenharmony_ci
10118c2ecf20Sopenharmony_ci	/* record bytecount for BQL */
10128c2ecf20Sopenharmony_ci	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
10138c2ecf20Sopenharmony_ci
10148c2ecf20Sopenharmony_ci	/* record SW timestamp if HW timestamp is not available */
10158c2ecf20Sopenharmony_ci	skb_tx_timestamp(first->skb);
10168c2ecf20Sopenharmony_ci
10178c2ecf20Sopenharmony_ci	/* Force memory writes to complete before letting h/w know there
10188c2ecf20Sopenharmony_ci	 * are new descriptors to fetch.  (Only applicable for weak-ordered
10198c2ecf20Sopenharmony_ci	 * memory model archs, such as IA-64).
10208c2ecf20Sopenharmony_ci	 *
10218c2ecf20Sopenharmony_ci	 * We also need this memory barrier to make certain all of the
10228c2ecf20Sopenharmony_ci	 * status bits have been updated before next_to_watch is written.
10238c2ecf20Sopenharmony_ci	 */
10248c2ecf20Sopenharmony_ci	wmb();
10258c2ecf20Sopenharmony_ci
10268c2ecf20Sopenharmony_ci	/* set next_to_watch value indicating a packet is present */
10278c2ecf20Sopenharmony_ci	first->next_to_watch = tx_desc;
10288c2ecf20Sopenharmony_ci
10298c2ecf20Sopenharmony_ci	tx_ring->next_to_use = i;
10308c2ecf20Sopenharmony_ci
10318c2ecf20Sopenharmony_ci	/* Make sure there is space in the ring for the next send. */
10328c2ecf20Sopenharmony_ci	fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
10338c2ecf20Sopenharmony_ci
10348c2ecf20Sopenharmony_ci	/* notify HW of packet */
10358c2ecf20Sopenharmony_ci	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
10368c2ecf20Sopenharmony_ci		writel(i, tx_ring->tail);
10378c2ecf20Sopenharmony_ci	}
10388c2ecf20Sopenharmony_ci
10398c2ecf20Sopenharmony_ci	return;
10408c2ecf20Sopenharmony_cidma_error:
10418c2ecf20Sopenharmony_ci	dev_err(tx_ring->dev, "TX DMA map failed\n");
10428c2ecf20Sopenharmony_ci
10438c2ecf20Sopenharmony_ci	/* clear dma mappings for failed tx_buffer map */
10448c2ecf20Sopenharmony_ci	for (;;) {
10458c2ecf20Sopenharmony_ci		tx_buffer = &tx_ring->tx_buffer[i];
10468c2ecf20Sopenharmony_ci		fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
10478c2ecf20Sopenharmony_ci		if (tx_buffer == first)
10488c2ecf20Sopenharmony_ci			break;
10498c2ecf20Sopenharmony_ci		if (i == 0)
10508c2ecf20Sopenharmony_ci			i = tx_ring->count;
10518c2ecf20Sopenharmony_ci		i--;
10528c2ecf20Sopenharmony_ci	}
10538c2ecf20Sopenharmony_ci
10548c2ecf20Sopenharmony_ci	tx_ring->next_to_use = i;
10558c2ecf20Sopenharmony_ci}
10568c2ecf20Sopenharmony_ci
10578c2ecf20Sopenharmony_cinetdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
10588c2ecf20Sopenharmony_ci				  struct fm10k_ring *tx_ring)
10598c2ecf20Sopenharmony_ci{
10608c2ecf20Sopenharmony_ci	u16 count = TXD_USE_COUNT(skb_headlen(skb));
10618c2ecf20Sopenharmony_ci	struct fm10k_tx_buffer *first;
10628c2ecf20Sopenharmony_ci	unsigned short f;
10638c2ecf20Sopenharmony_ci	u32 tx_flags = 0;
10648c2ecf20Sopenharmony_ci	int tso;
10658c2ecf20Sopenharmony_ci
10668c2ecf20Sopenharmony_ci	/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
10678c2ecf20Sopenharmony_ci	 *       + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
10688c2ecf20Sopenharmony_ci	 *       + 2 desc gap to keep tail from touching head
10698c2ecf20Sopenharmony_ci	 * otherwise try next time
10708c2ecf20Sopenharmony_ci	 */
10718c2ecf20Sopenharmony_ci	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
10728c2ecf20Sopenharmony_ci		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
10738c2ecf20Sopenharmony_ci
10748c2ecf20Sopenharmony_ci		count += TXD_USE_COUNT(skb_frag_size(frag));
10758c2ecf20Sopenharmony_ci	}
10768c2ecf20Sopenharmony_ci
10778c2ecf20Sopenharmony_ci	if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
10788c2ecf20Sopenharmony_ci		tx_ring->tx_stats.tx_busy++;
10798c2ecf20Sopenharmony_ci		return NETDEV_TX_BUSY;
10808c2ecf20Sopenharmony_ci	}
10818c2ecf20Sopenharmony_ci
10828c2ecf20Sopenharmony_ci	/* record the location of the first descriptor for this packet */
10838c2ecf20Sopenharmony_ci	first = &tx_ring->tx_buffer[tx_ring->next_to_use];
10848c2ecf20Sopenharmony_ci	first->skb = skb;
10858c2ecf20Sopenharmony_ci	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
10868c2ecf20Sopenharmony_ci	first->gso_segs = 1;
10878c2ecf20Sopenharmony_ci
10888c2ecf20Sopenharmony_ci	/* record initial flags and protocol */
10898c2ecf20Sopenharmony_ci	first->tx_flags = tx_flags;
10908c2ecf20Sopenharmony_ci
10918c2ecf20Sopenharmony_ci	tso = fm10k_tso(tx_ring, first);
10928c2ecf20Sopenharmony_ci	if (tso < 0)
10938c2ecf20Sopenharmony_ci		goto out_drop;
10948c2ecf20Sopenharmony_ci	else if (!tso)
10958c2ecf20Sopenharmony_ci		fm10k_tx_csum(tx_ring, first);
10968c2ecf20Sopenharmony_ci
10978c2ecf20Sopenharmony_ci	fm10k_tx_map(tx_ring, first);
10988c2ecf20Sopenharmony_ci
10998c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
11008c2ecf20Sopenharmony_ci
11018c2ecf20Sopenharmony_ciout_drop:
11028c2ecf20Sopenharmony_ci	dev_kfree_skb_any(first->skb);
11038c2ecf20Sopenharmony_ci	first->skb = NULL;
11048c2ecf20Sopenharmony_ci
11058c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
11068c2ecf20Sopenharmony_ci}
11078c2ecf20Sopenharmony_ci
11088c2ecf20Sopenharmony_cistatic u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
11098c2ecf20Sopenharmony_ci{
11108c2ecf20Sopenharmony_ci	return ring->stats.packets;
11118c2ecf20Sopenharmony_ci}
11128c2ecf20Sopenharmony_ci
11138c2ecf20Sopenharmony_ci/**
11148c2ecf20Sopenharmony_ci * fm10k_get_tx_pending - how many Tx descriptors not processed
11158c2ecf20Sopenharmony_ci * @ring: the ring structure
11168c2ecf20Sopenharmony_ci * @in_sw: is tx_pending being checked in SW or in HW?
11178c2ecf20Sopenharmony_ci */
11188c2ecf20Sopenharmony_ciu64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
11198c2ecf20Sopenharmony_ci{
11208c2ecf20Sopenharmony_ci	struct fm10k_intfc *interface = ring->q_vector->interface;
11218c2ecf20Sopenharmony_ci	struct fm10k_hw *hw = &interface->hw;
11228c2ecf20Sopenharmony_ci	u32 head, tail;
11238c2ecf20Sopenharmony_ci
11248c2ecf20Sopenharmony_ci	if (likely(in_sw)) {
11258c2ecf20Sopenharmony_ci		head = ring->next_to_clean;
11268c2ecf20Sopenharmony_ci		tail = ring->next_to_use;
11278c2ecf20Sopenharmony_ci	} else {
11288c2ecf20Sopenharmony_ci		head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
11298c2ecf20Sopenharmony_ci		tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
11308c2ecf20Sopenharmony_ci	}
11318c2ecf20Sopenharmony_ci
11328c2ecf20Sopenharmony_ci	return ((head <= tail) ? tail : tail + ring->count) - head;
11338c2ecf20Sopenharmony_ci}
11348c2ecf20Sopenharmony_ci
11358c2ecf20Sopenharmony_cibool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
11368c2ecf20Sopenharmony_ci{
11378c2ecf20Sopenharmony_ci	u32 tx_done = fm10k_get_tx_completed(tx_ring);
11388c2ecf20Sopenharmony_ci	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
11398c2ecf20Sopenharmony_ci	u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
11408c2ecf20Sopenharmony_ci
11418c2ecf20Sopenharmony_ci	clear_check_for_tx_hang(tx_ring);
11428c2ecf20Sopenharmony_ci
11438c2ecf20Sopenharmony_ci	/* Check for a hung queue, but be thorough. This verifies
11448c2ecf20Sopenharmony_ci	 * that a transmit has been completed since the previous
11458c2ecf20Sopenharmony_ci	 * check AND there is at least one packet pending. By
11468c2ecf20Sopenharmony_ci	 * requiring this to fail twice we avoid races with
11478c2ecf20Sopenharmony_ci	 * clearing the ARMED bit and conditions where we
11488c2ecf20Sopenharmony_ci	 * run the check_tx_hang logic with a transmit completion
11498c2ecf20Sopenharmony_ci	 * pending but without time to complete it yet.
11508c2ecf20Sopenharmony_ci	 */
11518c2ecf20Sopenharmony_ci	if (!tx_pending || (tx_done_old != tx_done)) {
11528c2ecf20Sopenharmony_ci		/* update completed stats and continue */
11538c2ecf20Sopenharmony_ci		tx_ring->tx_stats.tx_done_old = tx_done;
11548c2ecf20Sopenharmony_ci		/* reset the countdown */
11558c2ecf20Sopenharmony_ci		clear_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
11568c2ecf20Sopenharmony_ci
11578c2ecf20Sopenharmony_ci		return false;
11588c2ecf20Sopenharmony_ci	}
11598c2ecf20Sopenharmony_ci
11608c2ecf20Sopenharmony_ci	/* make sure it is true for two checks in a row */
11618c2ecf20Sopenharmony_ci	return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, tx_ring->state);
11628c2ecf20Sopenharmony_ci}
11638c2ecf20Sopenharmony_ci
11648c2ecf20Sopenharmony_ci/**
11658c2ecf20Sopenharmony_ci * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
11668c2ecf20Sopenharmony_ci * @interface: driver private struct
11678c2ecf20Sopenharmony_ci **/
11688c2ecf20Sopenharmony_civoid fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
11698c2ecf20Sopenharmony_ci{
11708c2ecf20Sopenharmony_ci	/* Do the reset outside of interrupt context */
11718c2ecf20Sopenharmony_ci	if (!test_bit(__FM10K_DOWN, interface->state)) {
11728c2ecf20Sopenharmony_ci		interface->tx_timeout_count++;
11738c2ecf20Sopenharmony_ci		set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
11748c2ecf20Sopenharmony_ci		fm10k_service_event_schedule(interface);
11758c2ecf20Sopenharmony_ci	}
11768c2ecf20Sopenharmony_ci}
11778c2ecf20Sopenharmony_ci
11788c2ecf20Sopenharmony_ci/**
11798c2ecf20Sopenharmony_ci * fm10k_clean_tx_irq - Reclaim resources after transmit completes
11808c2ecf20Sopenharmony_ci * @q_vector: structure containing interrupt and ring information
11818c2ecf20Sopenharmony_ci * @tx_ring: tx ring to clean
11828c2ecf20Sopenharmony_ci * @napi_budget: Used to determine if we are in netpoll
11838c2ecf20Sopenharmony_ci **/
11848c2ecf20Sopenharmony_cistatic bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
11858c2ecf20Sopenharmony_ci			       struct fm10k_ring *tx_ring, int napi_budget)
11868c2ecf20Sopenharmony_ci{
11878c2ecf20Sopenharmony_ci	struct fm10k_intfc *interface = q_vector->interface;
11888c2ecf20Sopenharmony_ci	struct fm10k_tx_buffer *tx_buffer;
11898c2ecf20Sopenharmony_ci	struct fm10k_tx_desc *tx_desc;
11908c2ecf20Sopenharmony_ci	unsigned int total_bytes = 0, total_packets = 0;
11918c2ecf20Sopenharmony_ci	unsigned int budget = q_vector->tx.work_limit;
11928c2ecf20Sopenharmony_ci	unsigned int i = tx_ring->next_to_clean;
11938c2ecf20Sopenharmony_ci
11948c2ecf20Sopenharmony_ci	if (test_bit(__FM10K_DOWN, interface->state))
11958c2ecf20Sopenharmony_ci		return true;
11968c2ecf20Sopenharmony_ci
11978c2ecf20Sopenharmony_ci	tx_buffer = &tx_ring->tx_buffer[i];
11988c2ecf20Sopenharmony_ci	tx_desc = FM10K_TX_DESC(tx_ring, i);
11998c2ecf20Sopenharmony_ci	i -= tx_ring->count;
12008c2ecf20Sopenharmony_ci
12018c2ecf20Sopenharmony_ci	do {
12028c2ecf20Sopenharmony_ci		struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
12038c2ecf20Sopenharmony_ci
12048c2ecf20Sopenharmony_ci		/* if next_to_watch is not set then there is no work pending */
12058c2ecf20Sopenharmony_ci		if (!eop_desc)
12068c2ecf20Sopenharmony_ci			break;
12078c2ecf20Sopenharmony_ci
12088c2ecf20Sopenharmony_ci		/* prevent any other reads prior to eop_desc */
12098c2ecf20Sopenharmony_ci		smp_rmb();
12108c2ecf20Sopenharmony_ci
12118c2ecf20Sopenharmony_ci		/* if DD is not set pending work has not been completed */
12128c2ecf20Sopenharmony_ci		if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
12138c2ecf20Sopenharmony_ci			break;
12148c2ecf20Sopenharmony_ci
12158c2ecf20Sopenharmony_ci		/* clear next_to_watch to prevent false hangs */
12168c2ecf20Sopenharmony_ci		tx_buffer->next_to_watch = NULL;
12178c2ecf20Sopenharmony_ci
12188c2ecf20Sopenharmony_ci		/* update the statistics for this packet */
12198c2ecf20Sopenharmony_ci		total_bytes += tx_buffer->bytecount;
12208c2ecf20Sopenharmony_ci		total_packets += tx_buffer->gso_segs;
12218c2ecf20Sopenharmony_ci
12228c2ecf20Sopenharmony_ci		/* free the skb */
12238c2ecf20Sopenharmony_ci		napi_consume_skb(tx_buffer->skb, napi_budget);
12248c2ecf20Sopenharmony_ci
12258c2ecf20Sopenharmony_ci		/* unmap skb header data */
12268c2ecf20Sopenharmony_ci		dma_unmap_single(tx_ring->dev,
12278c2ecf20Sopenharmony_ci				 dma_unmap_addr(tx_buffer, dma),
12288c2ecf20Sopenharmony_ci				 dma_unmap_len(tx_buffer, len),
12298c2ecf20Sopenharmony_ci				 DMA_TO_DEVICE);
12308c2ecf20Sopenharmony_ci
12318c2ecf20Sopenharmony_ci		/* clear tx_buffer data */
12328c2ecf20Sopenharmony_ci		tx_buffer->skb = NULL;
12338c2ecf20Sopenharmony_ci		dma_unmap_len_set(tx_buffer, len, 0);
12348c2ecf20Sopenharmony_ci
12358c2ecf20Sopenharmony_ci		/* unmap remaining buffers */
12368c2ecf20Sopenharmony_ci		while (tx_desc != eop_desc) {
12378c2ecf20Sopenharmony_ci			tx_buffer++;
12388c2ecf20Sopenharmony_ci			tx_desc++;
12398c2ecf20Sopenharmony_ci			i++;
12408c2ecf20Sopenharmony_ci			if (unlikely(!i)) {
12418c2ecf20Sopenharmony_ci				i -= tx_ring->count;
12428c2ecf20Sopenharmony_ci				tx_buffer = tx_ring->tx_buffer;
12438c2ecf20Sopenharmony_ci				tx_desc = FM10K_TX_DESC(tx_ring, 0);
12448c2ecf20Sopenharmony_ci			}
12458c2ecf20Sopenharmony_ci
12468c2ecf20Sopenharmony_ci			/* unmap any remaining paged data */
12478c2ecf20Sopenharmony_ci			if (dma_unmap_len(tx_buffer, len)) {
12488c2ecf20Sopenharmony_ci				dma_unmap_page(tx_ring->dev,
12498c2ecf20Sopenharmony_ci					       dma_unmap_addr(tx_buffer, dma),
12508c2ecf20Sopenharmony_ci					       dma_unmap_len(tx_buffer, len),
12518c2ecf20Sopenharmony_ci					       DMA_TO_DEVICE);
12528c2ecf20Sopenharmony_ci				dma_unmap_len_set(tx_buffer, len, 0);
12538c2ecf20Sopenharmony_ci			}
12548c2ecf20Sopenharmony_ci		}
12558c2ecf20Sopenharmony_ci
12568c2ecf20Sopenharmony_ci		/* move us one more past the eop_desc for start of next pkt */
12578c2ecf20Sopenharmony_ci		tx_buffer++;
12588c2ecf20Sopenharmony_ci		tx_desc++;
12598c2ecf20Sopenharmony_ci		i++;
12608c2ecf20Sopenharmony_ci		if (unlikely(!i)) {
12618c2ecf20Sopenharmony_ci			i -= tx_ring->count;
12628c2ecf20Sopenharmony_ci			tx_buffer = tx_ring->tx_buffer;
12638c2ecf20Sopenharmony_ci			tx_desc = FM10K_TX_DESC(tx_ring, 0);
12648c2ecf20Sopenharmony_ci		}
12658c2ecf20Sopenharmony_ci
12668c2ecf20Sopenharmony_ci		/* issue prefetch for next Tx descriptor */
12678c2ecf20Sopenharmony_ci		prefetch(tx_desc);
12688c2ecf20Sopenharmony_ci
12698c2ecf20Sopenharmony_ci		/* update budget accounting */
12708c2ecf20Sopenharmony_ci		budget--;
12718c2ecf20Sopenharmony_ci	} while (likely(budget));
12728c2ecf20Sopenharmony_ci
12738c2ecf20Sopenharmony_ci	i += tx_ring->count;
12748c2ecf20Sopenharmony_ci	tx_ring->next_to_clean = i;
12758c2ecf20Sopenharmony_ci	u64_stats_update_begin(&tx_ring->syncp);
12768c2ecf20Sopenharmony_ci	tx_ring->stats.bytes += total_bytes;
12778c2ecf20Sopenharmony_ci	tx_ring->stats.packets += total_packets;
12788c2ecf20Sopenharmony_ci	u64_stats_update_end(&tx_ring->syncp);
12798c2ecf20Sopenharmony_ci	q_vector->tx.total_bytes += total_bytes;
12808c2ecf20Sopenharmony_ci	q_vector->tx.total_packets += total_packets;
12818c2ecf20Sopenharmony_ci
12828c2ecf20Sopenharmony_ci	if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
12838c2ecf20Sopenharmony_ci		/* schedule immediate reset if we believe we hung */
12848c2ecf20Sopenharmony_ci		struct fm10k_hw *hw = &interface->hw;
12858c2ecf20Sopenharmony_ci
12868c2ecf20Sopenharmony_ci		netif_err(interface, drv, tx_ring->netdev,
12878c2ecf20Sopenharmony_ci			  "Detected Tx Unit Hang\n"
12888c2ecf20Sopenharmony_ci			  "  Tx Queue             <%d>\n"
12898c2ecf20Sopenharmony_ci			  "  TDH, TDT             <%x>, <%x>\n"
12908c2ecf20Sopenharmony_ci			  "  next_to_use          <%x>\n"
12918c2ecf20Sopenharmony_ci			  "  next_to_clean        <%x>\n",
12928c2ecf20Sopenharmony_ci			  tx_ring->queue_index,
12938c2ecf20Sopenharmony_ci			  fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
12948c2ecf20Sopenharmony_ci			  fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
12958c2ecf20Sopenharmony_ci			  tx_ring->next_to_use, i);
12968c2ecf20Sopenharmony_ci
12978c2ecf20Sopenharmony_ci		netif_stop_subqueue(tx_ring->netdev,
12988c2ecf20Sopenharmony_ci				    tx_ring->queue_index);
12998c2ecf20Sopenharmony_ci
13008c2ecf20Sopenharmony_ci		netif_info(interface, probe, tx_ring->netdev,
13018c2ecf20Sopenharmony_ci			   "tx hang %d detected on queue %d, resetting interface\n",
13028c2ecf20Sopenharmony_ci			   interface->tx_timeout_count + 1,
13038c2ecf20Sopenharmony_ci			   tx_ring->queue_index);
13048c2ecf20Sopenharmony_ci
13058c2ecf20Sopenharmony_ci		fm10k_tx_timeout_reset(interface);
13068c2ecf20Sopenharmony_ci
13078c2ecf20Sopenharmony_ci		/* the netdev is about to reset, no point in enabling stuff */
13088c2ecf20Sopenharmony_ci		return true;
13098c2ecf20Sopenharmony_ci	}
13108c2ecf20Sopenharmony_ci
13118c2ecf20Sopenharmony_ci	/* notify netdev of completed buffers */
13128c2ecf20Sopenharmony_ci	netdev_tx_completed_queue(txring_txq(tx_ring),
13138c2ecf20Sopenharmony_ci				  total_packets, total_bytes);
13148c2ecf20Sopenharmony_ci
13158c2ecf20Sopenharmony_ci#define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
13168c2ecf20Sopenharmony_ci	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
13178c2ecf20Sopenharmony_ci		     (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
13188c2ecf20Sopenharmony_ci		/* Make sure that anybody stopping the queue after this
13198c2ecf20Sopenharmony_ci		 * sees the new next_to_clean.
13208c2ecf20Sopenharmony_ci		 */
13218c2ecf20Sopenharmony_ci		smp_mb();
13228c2ecf20Sopenharmony_ci		if (__netif_subqueue_stopped(tx_ring->netdev,
13238c2ecf20Sopenharmony_ci					     tx_ring->queue_index) &&
13248c2ecf20Sopenharmony_ci		    !test_bit(__FM10K_DOWN, interface->state)) {
13258c2ecf20Sopenharmony_ci			netif_wake_subqueue(tx_ring->netdev,
13268c2ecf20Sopenharmony_ci					    tx_ring->queue_index);
13278c2ecf20Sopenharmony_ci			++tx_ring->tx_stats.restart_queue;
13288c2ecf20Sopenharmony_ci		}
13298c2ecf20Sopenharmony_ci	}
13308c2ecf20Sopenharmony_ci
13318c2ecf20Sopenharmony_ci	return !!budget;
13328c2ecf20Sopenharmony_ci}
13338c2ecf20Sopenharmony_ci
13348c2ecf20Sopenharmony_ci/**
13358c2ecf20Sopenharmony_ci * fm10k_update_itr - update the dynamic ITR value based on packet size
13368c2ecf20Sopenharmony_ci *
13378c2ecf20Sopenharmony_ci *      Stores a new ITR value based on strictly on packet size.  The
13388c2ecf20Sopenharmony_ci *      divisors and thresholds used by this function were determined based
13398c2ecf20Sopenharmony_ci *      on theoretical maximum wire speed and testing data, in order to
13408c2ecf20Sopenharmony_ci *      minimize response time while increasing bulk throughput.
13418c2ecf20Sopenharmony_ci *
13428c2ecf20Sopenharmony_ci * @ring_container: Container for rings to have ITR updated
13438c2ecf20Sopenharmony_ci **/
13448c2ecf20Sopenharmony_cistatic void fm10k_update_itr(struct fm10k_ring_container *ring_container)
13458c2ecf20Sopenharmony_ci{
13468c2ecf20Sopenharmony_ci	unsigned int avg_wire_size, packets, itr_round;
13478c2ecf20Sopenharmony_ci
13488c2ecf20Sopenharmony_ci	/* Only update ITR if we are using adaptive setting */
13498c2ecf20Sopenharmony_ci	if (!ITR_IS_ADAPTIVE(ring_container->itr))
13508c2ecf20Sopenharmony_ci		goto clear_counts;
13518c2ecf20Sopenharmony_ci
13528c2ecf20Sopenharmony_ci	packets = ring_container->total_packets;
13538c2ecf20Sopenharmony_ci	if (!packets)
13548c2ecf20Sopenharmony_ci		goto clear_counts;
13558c2ecf20Sopenharmony_ci
13568c2ecf20Sopenharmony_ci	avg_wire_size = ring_container->total_bytes / packets;
13578c2ecf20Sopenharmony_ci
13588c2ecf20Sopenharmony_ci	/* The following is a crude approximation of:
13598c2ecf20Sopenharmony_ci	 *  wmem_default / (size + overhead) = desired_pkts_per_int
13608c2ecf20Sopenharmony_ci	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
13618c2ecf20Sopenharmony_ci	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
13628c2ecf20Sopenharmony_ci	 *
13638c2ecf20Sopenharmony_ci	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
13648c2ecf20Sopenharmony_ci	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
13658c2ecf20Sopenharmony_ci	 * formula down to
13668c2ecf20Sopenharmony_ci	 *
13678c2ecf20Sopenharmony_ci	 *  (34 * (size + 24)) / (size + 640) = ITR
13688c2ecf20Sopenharmony_ci	 *
13698c2ecf20Sopenharmony_ci	 * We first do some math on the packet size and then finally bitshift
13708c2ecf20Sopenharmony_ci	 * by 8 after rounding up. We also have to account for PCIe link speed
13718c2ecf20Sopenharmony_ci	 * difference as ITR scales based on this.
13728c2ecf20Sopenharmony_ci	 */
13738c2ecf20Sopenharmony_ci	if (avg_wire_size <= 360) {
13748c2ecf20Sopenharmony_ci		/* Start at 250K ints/sec and gradually drop to 77K ints/sec */
13758c2ecf20Sopenharmony_ci		avg_wire_size *= 8;
13768c2ecf20Sopenharmony_ci		avg_wire_size += 376;
13778c2ecf20Sopenharmony_ci	} else if (avg_wire_size <= 1152) {
13788c2ecf20Sopenharmony_ci		/* 77K ints/sec to 45K ints/sec */
13798c2ecf20Sopenharmony_ci		avg_wire_size *= 3;
13808c2ecf20Sopenharmony_ci		avg_wire_size += 2176;
13818c2ecf20Sopenharmony_ci	} else if (avg_wire_size <= 1920) {
13828c2ecf20Sopenharmony_ci		/* 45K ints/sec to 38K ints/sec */
13838c2ecf20Sopenharmony_ci		avg_wire_size += 4480;
13848c2ecf20Sopenharmony_ci	} else {
13858c2ecf20Sopenharmony_ci		/* plateau at a limit of 38K ints/sec */
13868c2ecf20Sopenharmony_ci		avg_wire_size = 6656;
13878c2ecf20Sopenharmony_ci	}
13888c2ecf20Sopenharmony_ci
13898c2ecf20Sopenharmony_ci	/* Perform final bitshift for division after rounding up to ensure
13908c2ecf20Sopenharmony_ci	 * that the calculation will never get below a 1. The bit shift
13918c2ecf20Sopenharmony_ci	 * accounts for changes in the ITR due to PCIe link speed.
13928c2ecf20Sopenharmony_ci	 */
13938c2ecf20Sopenharmony_ci	itr_round = READ_ONCE(ring_container->itr_scale) + 8;
13948c2ecf20Sopenharmony_ci	avg_wire_size += BIT(itr_round) - 1;
13958c2ecf20Sopenharmony_ci	avg_wire_size >>= itr_round;
13968c2ecf20Sopenharmony_ci
13978c2ecf20Sopenharmony_ci	/* write back value and retain adaptive flag */
13988c2ecf20Sopenharmony_ci	ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
13998c2ecf20Sopenharmony_ci
14008c2ecf20Sopenharmony_ciclear_counts:
14018c2ecf20Sopenharmony_ci	ring_container->total_bytes = 0;
14028c2ecf20Sopenharmony_ci	ring_container->total_packets = 0;
14038c2ecf20Sopenharmony_ci}
14048c2ecf20Sopenharmony_ci
14058c2ecf20Sopenharmony_cistatic void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
14068c2ecf20Sopenharmony_ci{
14078c2ecf20Sopenharmony_ci	/* Enable auto-mask and clear the current mask */
14088c2ecf20Sopenharmony_ci	u32 itr = FM10K_ITR_ENABLE;
14098c2ecf20Sopenharmony_ci
14108c2ecf20Sopenharmony_ci	/* Update Tx ITR */
14118c2ecf20Sopenharmony_ci	fm10k_update_itr(&q_vector->tx);
14128c2ecf20Sopenharmony_ci
14138c2ecf20Sopenharmony_ci	/* Update Rx ITR */
14148c2ecf20Sopenharmony_ci	fm10k_update_itr(&q_vector->rx);
14158c2ecf20Sopenharmony_ci
14168c2ecf20Sopenharmony_ci	/* Store Tx itr in timer slot 0 */
14178c2ecf20Sopenharmony_ci	itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
14188c2ecf20Sopenharmony_ci
14198c2ecf20Sopenharmony_ci	/* Shift Rx itr to timer slot 1 */
14208c2ecf20Sopenharmony_ci	itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
14218c2ecf20Sopenharmony_ci
14228c2ecf20Sopenharmony_ci	/* Write the final value to the ITR register */
14238c2ecf20Sopenharmony_ci	writel(itr, q_vector->itr);
14248c2ecf20Sopenharmony_ci}
14258c2ecf20Sopenharmony_ci
14268c2ecf20Sopenharmony_cistatic int fm10k_poll(struct napi_struct *napi, int budget)
14278c2ecf20Sopenharmony_ci{
14288c2ecf20Sopenharmony_ci	struct fm10k_q_vector *q_vector =
14298c2ecf20Sopenharmony_ci			       container_of(napi, struct fm10k_q_vector, napi);
14308c2ecf20Sopenharmony_ci	struct fm10k_ring *ring;
14318c2ecf20Sopenharmony_ci	int per_ring_budget, work_done = 0;
14328c2ecf20Sopenharmony_ci	bool clean_complete = true;
14338c2ecf20Sopenharmony_ci
14348c2ecf20Sopenharmony_ci	fm10k_for_each_ring(ring, q_vector->tx) {
14358c2ecf20Sopenharmony_ci		if (!fm10k_clean_tx_irq(q_vector, ring, budget))
14368c2ecf20Sopenharmony_ci			clean_complete = false;
14378c2ecf20Sopenharmony_ci	}
14388c2ecf20Sopenharmony_ci
14398c2ecf20Sopenharmony_ci	/* Handle case where we are called by netpoll with a budget of 0 */
14408c2ecf20Sopenharmony_ci	if (budget <= 0)
14418c2ecf20Sopenharmony_ci		return budget;
14428c2ecf20Sopenharmony_ci
14438c2ecf20Sopenharmony_ci	/* attempt to distribute budget to each queue fairly, but don't
14448c2ecf20Sopenharmony_ci	 * allow the budget to go below 1 because we'll exit polling
14458c2ecf20Sopenharmony_ci	 */
14468c2ecf20Sopenharmony_ci	if (q_vector->rx.count > 1)
14478c2ecf20Sopenharmony_ci		per_ring_budget = max(budget / q_vector->rx.count, 1);
14488c2ecf20Sopenharmony_ci	else
14498c2ecf20Sopenharmony_ci		per_ring_budget = budget;
14508c2ecf20Sopenharmony_ci
14518c2ecf20Sopenharmony_ci	fm10k_for_each_ring(ring, q_vector->rx) {
14528c2ecf20Sopenharmony_ci		int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
14538c2ecf20Sopenharmony_ci
14548c2ecf20Sopenharmony_ci		work_done += work;
14558c2ecf20Sopenharmony_ci		if (work >= per_ring_budget)
14568c2ecf20Sopenharmony_ci			clean_complete = false;
14578c2ecf20Sopenharmony_ci	}
14588c2ecf20Sopenharmony_ci
14598c2ecf20Sopenharmony_ci	/* If all work not completed, return budget and keep polling */
14608c2ecf20Sopenharmony_ci	if (!clean_complete)
14618c2ecf20Sopenharmony_ci		return budget;
14628c2ecf20Sopenharmony_ci
14638c2ecf20Sopenharmony_ci	/* Exit the polling mode, but don't re-enable interrupts if stack might
14648c2ecf20Sopenharmony_ci	 * poll us due to busy-polling
14658c2ecf20Sopenharmony_ci	 */
14668c2ecf20Sopenharmony_ci	if (likely(napi_complete_done(napi, work_done)))
14678c2ecf20Sopenharmony_ci		fm10k_qv_enable(q_vector);
14688c2ecf20Sopenharmony_ci
14698c2ecf20Sopenharmony_ci	return min(work_done, budget - 1);
14708c2ecf20Sopenharmony_ci}
14718c2ecf20Sopenharmony_ci
14728c2ecf20Sopenharmony_ci/**
14738c2ecf20Sopenharmony_ci * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
14748c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
14758c2ecf20Sopenharmony_ci *
14768c2ecf20Sopenharmony_ci * When QoS (Quality of Service) is enabled, allocate queues for
14778c2ecf20Sopenharmony_ci * each traffic class.  If multiqueue isn't available,then abort QoS
14788c2ecf20Sopenharmony_ci * initialization.
14798c2ecf20Sopenharmony_ci *
14808c2ecf20Sopenharmony_ci * This function handles all combinations of Qos and RSS.
14818c2ecf20Sopenharmony_ci *
14828c2ecf20Sopenharmony_ci **/
14838c2ecf20Sopenharmony_cistatic bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
14848c2ecf20Sopenharmony_ci{
14858c2ecf20Sopenharmony_ci	struct net_device *dev = interface->netdev;
14868c2ecf20Sopenharmony_ci	struct fm10k_ring_feature *f;
14878c2ecf20Sopenharmony_ci	int rss_i, i;
14888c2ecf20Sopenharmony_ci	int pcs;
14898c2ecf20Sopenharmony_ci
14908c2ecf20Sopenharmony_ci	/* Map queue offset and counts onto allocated tx queues */
14918c2ecf20Sopenharmony_ci	pcs = netdev_get_num_tc(dev);
14928c2ecf20Sopenharmony_ci
14938c2ecf20Sopenharmony_ci	if (pcs <= 1)
14948c2ecf20Sopenharmony_ci		return false;
14958c2ecf20Sopenharmony_ci
14968c2ecf20Sopenharmony_ci	/* set QoS mask and indices */
14978c2ecf20Sopenharmony_ci	f = &interface->ring_feature[RING_F_QOS];
14988c2ecf20Sopenharmony_ci	f->indices = pcs;
14998c2ecf20Sopenharmony_ci	f->mask = BIT(fls(pcs - 1)) - 1;
15008c2ecf20Sopenharmony_ci
15018c2ecf20Sopenharmony_ci	/* determine the upper limit for our current DCB mode */
15028c2ecf20Sopenharmony_ci	rss_i = interface->hw.mac.max_queues / pcs;
15038c2ecf20Sopenharmony_ci	rss_i = BIT(fls(rss_i) - 1);
15048c2ecf20Sopenharmony_ci
15058c2ecf20Sopenharmony_ci	/* set RSS mask and indices */
15068c2ecf20Sopenharmony_ci	f = &interface->ring_feature[RING_F_RSS];
15078c2ecf20Sopenharmony_ci	rss_i = min_t(u16, rss_i, f->limit);
15088c2ecf20Sopenharmony_ci	f->indices = rss_i;
15098c2ecf20Sopenharmony_ci	f->mask = BIT(fls(rss_i - 1)) - 1;
15108c2ecf20Sopenharmony_ci
15118c2ecf20Sopenharmony_ci	/* configure pause class to queue mapping */
15128c2ecf20Sopenharmony_ci	for (i = 0; i < pcs; i++)
15138c2ecf20Sopenharmony_ci		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
15148c2ecf20Sopenharmony_ci
15158c2ecf20Sopenharmony_ci	interface->num_rx_queues = rss_i * pcs;
15168c2ecf20Sopenharmony_ci	interface->num_tx_queues = rss_i * pcs;
15178c2ecf20Sopenharmony_ci
15188c2ecf20Sopenharmony_ci	return true;
15198c2ecf20Sopenharmony_ci}
15208c2ecf20Sopenharmony_ci
15218c2ecf20Sopenharmony_ci/**
15228c2ecf20Sopenharmony_ci * fm10k_set_rss_queues: Allocate queues for RSS
15238c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
15248c2ecf20Sopenharmony_ci *
15258c2ecf20Sopenharmony_ci * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
15268c2ecf20Sopenharmony_ci * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
15278c2ecf20Sopenharmony_ci *
15288c2ecf20Sopenharmony_ci **/
15298c2ecf20Sopenharmony_cistatic bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
15308c2ecf20Sopenharmony_ci{
15318c2ecf20Sopenharmony_ci	struct fm10k_ring_feature *f;
15328c2ecf20Sopenharmony_ci	u16 rss_i;
15338c2ecf20Sopenharmony_ci
15348c2ecf20Sopenharmony_ci	f = &interface->ring_feature[RING_F_RSS];
15358c2ecf20Sopenharmony_ci	rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
15368c2ecf20Sopenharmony_ci
15378c2ecf20Sopenharmony_ci	/* record indices and power of 2 mask for RSS */
15388c2ecf20Sopenharmony_ci	f->indices = rss_i;
15398c2ecf20Sopenharmony_ci	f->mask = BIT(fls(rss_i - 1)) - 1;
15408c2ecf20Sopenharmony_ci
15418c2ecf20Sopenharmony_ci	interface->num_rx_queues = rss_i;
15428c2ecf20Sopenharmony_ci	interface->num_tx_queues = rss_i;
15438c2ecf20Sopenharmony_ci
15448c2ecf20Sopenharmony_ci	return true;
15458c2ecf20Sopenharmony_ci}
15468c2ecf20Sopenharmony_ci
15478c2ecf20Sopenharmony_ci/**
15488c2ecf20Sopenharmony_ci * fm10k_set_num_queues: Allocate queues for device, feature dependent
15498c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
15508c2ecf20Sopenharmony_ci *
15518c2ecf20Sopenharmony_ci * This is the top level queue allocation routine.  The order here is very
15528c2ecf20Sopenharmony_ci * important, starting with the "most" number of features turned on at once,
15538c2ecf20Sopenharmony_ci * and ending with the smallest set of features.  This way large combinations
15548c2ecf20Sopenharmony_ci * can be allocated if they're turned on, and smaller combinations are the
15558c2ecf20Sopenharmony_ci * fall through conditions.
15568c2ecf20Sopenharmony_ci *
15578c2ecf20Sopenharmony_ci **/
15588c2ecf20Sopenharmony_cistatic void fm10k_set_num_queues(struct fm10k_intfc *interface)
15598c2ecf20Sopenharmony_ci{
15608c2ecf20Sopenharmony_ci	/* Attempt to setup QoS and RSS first */
15618c2ecf20Sopenharmony_ci	if (fm10k_set_qos_queues(interface))
15628c2ecf20Sopenharmony_ci		return;
15638c2ecf20Sopenharmony_ci
15648c2ecf20Sopenharmony_ci	/* If we don't have QoS, just fallback to only RSS. */
15658c2ecf20Sopenharmony_ci	fm10k_set_rss_queues(interface);
15668c2ecf20Sopenharmony_ci}
15678c2ecf20Sopenharmony_ci
15688c2ecf20Sopenharmony_ci/**
15698c2ecf20Sopenharmony_ci * fm10k_reset_num_queues - Reset the number of queues to zero
15708c2ecf20Sopenharmony_ci * @interface: board private structure
15718c2ecf20Sopenharmony_ci *
15728c2ecf20Sopenharmony_ci * This function should be called whenever we need to reset the number of
15738c2ecf20Sopenharmony_ci * queues after an error condition.
15748c2ecf20Sopenharmony_ci */
15758c2ecf20Sopenharmony_cistatic void fm10k_reset_num_queues(struct fm10k_intfc *interface)
15768c2ecf20Sopenharmony_ci{
15778c2ecf20Sopenharmony_ci	interface->num_tx_queues = 0;
15788c2ecf20Sopenharmony_ci	interface->num_rx_queues = 0;
15798c2ecf20Sopenharmony_ci	interface->num_q_vectors = 0;
15808c2ecf20Sopenharmony_ci}
15818c2ecf20Sopenharmony_ci
15828c2ecf20Sopenharmony_ci/**
15838c2ecf20Sopenharmony_ci * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
15848c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
15858c2ecf20Sopenharmony_ci * @v_count: q_vectors allocated on interface, used for ring interleaving
15868c2ecf20Sopenharmony_ci * @v_idx: index of vector in interface struct
15878c2ecf20Sopenharmony_ci * @txr_count: total number of Tx rings to allocate
15888c2ecf20Sopenharmony_ci * @txr_idx: index of first Tx ring to allocate
15898c2ecf20Sopenharmony_ci * @rxr_count: total number of Rx rings to allocate
15908c2ecf20Sopenharmony_ci * @rxr_idx: index of first Rx ring to allocate
15918c2ecf20Sopenharmony_ci *
15928c2ecf20Sopenharmony_ci * We allocate one q_vector.  If allocation fails we return -ENOMEM.
15938c2ecf20Sopenharmony_ci **/
15948c2ecf20Sopenharmony_cistatic int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
15958c2ecf20Sopenharmony_ci				unsigned int v_count, unsigned int v_idx,
15968c2ecf20Sopenharmony_ci				unsigned int txr_count, unsigned int txr_idx,
15978c2ecf20Sopenharmony_ci				unsigned int rxr_count, unsigned int rxr_idx)
15988c2ecf20Sopenharmony_ci{
15998c2ecf20Sopenharmony_ci	struct fm10k_q_vector *q_vector;
16008c2ecf20Sopenharmony_ci	struct fm10k_ring *ring;
16018c2ecf20Sopenharmony_ci	int ring_count;
16028c2ecf20Sopenharmony_ci
16038c2ecf20Sopenharmony_ci	ring_count = txr_count + rxr_count;
16048c2ecf20Sopenharmony_ci
16058c2ecf20Sopenharmony_ci	/* allocate q_vector and rings */
16068c2ecf20Sopenharmony_ci	q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL);
16078c2ecf20Sopenharmony_ci	if (!q_vector)
16088c2ecf20Sopenharmony_ci		return -ENOMEM;
16098c2ecf20Sopenharmony_ci
16108c2ecf20Sopenharmony_ci	/* initialize NAPI */
16118c2ecf20Sopenharmony_ci	netif_napi_add(interface->netdev, &q_vector->napi,
16128c2ecf20Sopenharmony_ci		       fm10k_poll, NAPI_POLL_WEIGHT);
16138c2ecf20Sopenharmony_ci
16148c2ecf20Sopenharmony_ci	/* tie q_vector and interface together */
16158c2ecf20Sopenharmony_ci	interface->q_vector[v_idx] = q_vector;
16168c2ecf20Sopenharmony_ci	q_vector->interface = interface;
16178c2ecf20Sopenharmony_ci	q_vector->v_idx = v_idx;
16188c2ecf20Sopenharmony_ci
16198c2ecf20Sopenharmony_ci	/* initialize pointer to rings */
16208c2ecf20Sopenharmony_ci	ring = q_vector->ring;
16218c2ecf20Sopenharmony_ci
16228c2ecf20Sopenharmony_ci	/* save Tx ring container info */
16238c2ecf20Sopenharmony_ci	q_vector->tx.ring = ring;
16248c2ecf20Sopenharmony_ci	q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
16258c2ecf20Sopenharmony_ci	q_vector->tx.itr = interface->tx_itr;
16268c2ecf20Sopenharmony_ci	q_vector->tx.itr_scale = interface->hw.mac.itr_scale;
16278c2ecf20Sopenharmony_ci	q_vector->tx.count = txr_count;
16288c2ecf20Sopenharmony_ci
16298c2ecf20Sopenharmony_ci	while (txr_count) {
16308c2ecf20Sopenharmony_ci		/* assign generic ring traits */
16318c2ecf20Sopenharmony_ci		ring->dev = &interface->pdev->dev;
16328c2ecf20Sopenharmony_ci		ring->netdev = interface->netdev;
16338c2ecf20Sopenharmony_ci
16348c2ecf20Sopenharmony_ci		/* configure backlink on ring */
16358c2ecf20Sopenharmony_ci		ring->q_vector = q_vector;
16368c2ecf20Sopenharmony_ci
16378c2ecf20Sopenharmony_ci		/* apply Tx specific ring traits */
16388c2ecf20Sopenharmony_ci		ring->count = interface->tx_ring_count;
16398c2ecf20Sopenharmony_ci		ring->queue_index = txr_idx;
16408c2ecf20Sopenharmony_ci
16418c2ecf20Sopenharmony_ci		/* assign ring to interface */
16428c2ecf20Sopenharmony_ci		interface->tx_ring[txr_idx] = ring;
16438c2ecf20Sopenharmony_ci
16448c2ecf20Sopenharmony_ci		/* update count and index */
16458c2ecf20Sopenharmony_ci		txr_count--;
16468c2ecf20Sopenharmony_ci		txr_idx += v_count;
16478c2ecf20Sopenharmony_ci
16488c2ecf20Sopenharmony_ci		/* push pointer to next ring */
16498c2ecf20Sopenharmony_ci		ring++;
16508c2ecf20Sopenharmony_ci	}
16518c2ecf20Sopenharmony_ci
16528c2ecf20Sopenharmony_ci	/* save Rx ring container info */
16538c2ecf20Sopenharmony_ci	q_vector->rx.ring = ring;
16548c2ecf20Sopenharmony_ci	q_vector->rx.itr = interface->rx_itr;
16558c2ecf20Sopenharmony_ci	q_vector->rx.itr_scale = interface->hw.mac.itr_scale;
16568c2ecf20Sopenharmony_ci	q_vector->rx.count = rxr_count;
16578c2ecf20Sopenharmony_ci
16588c2ecf20Sopenharmony_ci	while (rxr_count) {
16598c2ecf20Sopenharmony_ci		/* assign generic ring traits */
16608c2ecf20Sopenharmony_ci		ring->dev = &interface->pdev->dev;
16618c2ecf20Sopenharmony_ci		ring->netdev = interface->netdev;
16628c2ecf20Sopenharmony_ci		rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
16638c2ecf20Sopenharmony_ci
16648c2ecf20Sopenharmony_ci		/* configure backlink on ring */
16658c2ecf20Sopenharmony_ci		ring->q_vector = q_vector;
16668c2ecf20Sopenharmony_ci
16678c2ecf20Sopenharmony_ci		/* apply Rx specific ring traits */
16688c2ecf20Sopenharmony_ci		ring->count = interface->rx_ring_count;
16698c2ecf20Sopenharmony_ci		ring->queue_index = rxr_idx;
16708c2ecf20Sopenharmony_ci
16718c2ecf20Sopenharmony_ci		/* assign ring to interface */
16728c2ecf20Sopenharmony_ci		interface->rx_ring[rxr_idx] = ring;
16738c2ecf20Sopenharmony_ci
16748c2ecf20Sopenharmony_ci		/* update count and index */
16758c2ecf20Sopenharmony_ci		rxr_count--;
16768c2ecf20Sopenharmony_ci		rxr_idx += v_count;
16778c2ecf20Sopenharmony_ci
16788c2ecf20Sopenharmony_ci		/* push pointer to next ring */
16798c2ecf20Sopenharmony_ci		ring++;
16808c2ecf20Sopenharmony_ci	}
16818c2ecf20Sopenharmony_ci
16828c2ecf20Sopenharmony_ci	fm10k_dbg_q_vector_init(q_vector);
16838c2ecf20Sopenharmony_ci
16848c2ecf20Sopenharmony_ci	return 0;
16858c2ecf20Sopenharmony_ci}
16868c2ecf20Sopenharmony_ci
16878c2ecf20Sopenharmony_ci/**
16888c2ecf20Sopenharmony_ci * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
16898c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
16908c2ecf20Sopenharmony_ci * @v_idx: Index of vector to be freed
16918c2ecf20Sopenharmony_ci *
16928c2ecf20Sopenharmony_ci * This function frees the memory allocated to the q_vector.  In addition if
16938c2ecf20Sopenharmony_ci * NAPI is enabled it will delete any references to the NAPI struct prior
16948c2ecf20Sopenharmony_ci * to freeing the q_vector.
16958c2ecf20Sopenharmony_ci **/
16968c2ecf20Sopenharmony_cistatic void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
16978c2ecf20Sopenharmony_ci{
16988c2ecf20Sopenharmony_ci	struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
16998c2ecf20Sopenharmony_ci	struct fm10k_ring *ring;
17008c2ecf20Sopenharmony_ci
17018c2ecf20Sopenharmony_ci	fm10k_dbg_q_vector_exit(q_vector);
17028c2ecf20Sopenharmony_ci
17038c2ecf20Sopenharmony_ci	fm10k_for_each_ring(ring, q_vector->tx)
17048c2ecf20Sopenharmony_ci		interface->tx_ring[ring->queue_index] = NULL;
17058c2ecf20Sopenharmony_ci
17068c2ecf20Sopenharmony_ci	fm10k_for_each_ring(ring, q_vector->rx)
17078c2ecf20Sopenharmony_ci		interface->rx_ring[ring->queue_index] = NULL;
17088c2ecf20Sopenharmony_ci
17098c2ecf20Sopenharmony_ci	interface->q_vector[v_idx] = NULL;
17108c2ecf20Sopenharmony_ci	netif_napi_del(&q_vector->napi);
17118c2ecf20Sopenharmony_ci	kfree_rcu(q_vector, rcu);
17128c2ecf20Sopenharmony_ci}
17138c2ecf20Sopenharmony_ci
17148c2ecf20Sopenharmony_ci/**
17158c2ecf20Sopenharmony_ci * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
17168c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
17178c2ecf20Sopenharmony_ci *
17188c2ecf20Sopenharmony_ci * We allocate one q_vector per queue interrupt.  If allocation fails we
17198c2ecf20Sopenharmony_ci * return -ENOMEM.
17208c2ecf20Sopenharmony_ci **/
17218c2ecf20Sopenharmony_cistatic int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
17228c2ecf20Sopenharmony_ci{
17238c2ecf20Sopenharmony_ci	unsigned int q_vectors = interface->num_q_vectors;
17248c2ecf20Sopenharmony_ci	unsigned int rxr_remaining = interface->num_rx_queues;
17258c2ecf20Sopenharmony_ci	unsigned int txr_remaining = interface->num_tx_queues;
17268c2ecf20Sopenharmony_ci	unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
17278c2ecf20Sopenharmony_ci	int err;
17288c2ecf20Sopenharmony_ci
17298c2ecf20Sopenharmony_ci	if (q_vectors >= (rxr_remaining + txr_remaining)) {
17308c2ecf20Sopenharmony_ci		for (; rxr_remaining; v_idx++) {
17318c2ecf20Sopenharmony_ci			err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
17328c2ecf20Sopenharmony_ci						   0, 0, 1, rxr_idx);
17338c2ecf20Sopenharmony_ci			if (err)
17348c2ecf20Sopenharmony_ci				goto err_out;
17358c2ecf20Sopenharmony_ci
17368c2ecf20Sopenharmony_ci			/* update counts and index */
17378c2ecf20Sopenharmony_ci			rxr_remaining--;
17388c2ecf20Sopenharmony_ci			rxr_idx++;
17398c2ecf20Sopenharmony_ci		}
17408c2ecf20Sopenharmony_ci	}
17418c2ecf20Sopenharmony_ci
17428c2ecf20Sopenharmony_ci	for (; v_idx < q_vectors; v_idx++) {
17438c2ecf20Sopenharmony_ci		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
17448c2ecf20Sopenharmony_ci		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
17458c2ecf20Sopenharmony_ci
17468c2ecf20Sopenharmony_ci		err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
17478c2ecf20Sopenharmony_ci					   tqpv, txr_idx,
17488c2ecf20Sopenharmony_ci					   rqpv, rxr_idx);
17498c2ecf20Sopenharmony_ci
17508c2ecf20Sopenharmony_ci		if (err)
17518c2ecf20Sopenharmony_ci			goto err_out;
17528c2ecf20Sopenharmony_ci
17538c2ecf20Sopenharmony_ci		/* update counts and index */
17548c2ecf20Sopenharmony_ci		rxr_remaining -= rqpv;
17558c2ecf20Sopenharmony_ci		txr_remaining -= tqpv;
17568c2ecf20Sopenharmony_ci		rxr_idx++;
17578c2ecf20Sopenharmony_ci		txr_idx++;
17588c2ecf20Sopenharmony_ci	}
17598c2ecf20Sopenharmony_ci
17608c2ecf20Sopenharmony_ci	return 0;
17618c2ecf20Sopenharmony_ci
17628c2ecf20Sopenharmony_cierr_out:
17638c2ecf20Sopenharmony_ci	fm10k_reset_num_queues(interface);
17648c2ecf20Sopenharmony_ci
17658c2ecf20Sopenharmony_ci	while (v_idx--)
17668c2ecf20Sopenharmony_ci		fm10k_free_q_vector(interface, v_idx);
17678c2ecf20Sopenharmony_ci
17688c2ecf20Sopenharmony_ci	return -ENOMEM;
17698c2ecf20Sopenharmony_ci}
17708c2ecf20Sopenharmony_ci
17718c2ecf20Sopenharmony_ci/**
17728c2ecf20Sopenharmony_ci * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
17738c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
17748c2ecf20Sopenharmony_ci *
17758c2ecf20Sopenharmony_ci * This function frees the memory allocated to the q_vectors.  In addition if
17768c2ecf20Sopenharmony_ci * NAPI is enabled it will delete any references to the NAPI struct prior
17778c2ecf20Sopenharmony_ci * to freeing the q_vector.
17788c2ecf20Sopenharmony_ci **/
17798c2ecf20Sopenharmony_cistatic void fm10k_free_q_vectors(struct fm10k_intfc *interface)
17808c2ecf20Sopenharmony_ci{
17818c2ecf20Sopenharmony_ci	int v_idx = interface->num_q_vectors;
17828c2ecf20Sopenharmony_ci
17838c2ecf20Sopenharmony_ci	fm10k_reset_num_queues(interface);
17848c2ecf20Sopenharmony_ci
17858c2ecf20Sopenharmony_ci	while (v_idx--)
17868c2ecf20Sopenharmony_ci		fm10k_free_q_vector(interface, v_idx);
17878c2ecf20Sopenharmony_ci}
17888c2ecf20Sopenharmony_ci
17898c2ecf20Sopenharmony_ci/**
17908c2ecf20Sopenharmony_ci * f10k_reset_msix_capability - reset MSI-X capability
17918c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
17928c2ecf20Sopenharmony_ci *
17938c2ecf20Sopenharmony_ci * Reset the MSI-X capability back to its starting state
17948c2ecf20Sopenharmony_ci **/
17958c2ecf20Sopenharmony_cistatic void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
17968c2ecf20Sopenharmony_ci{
17978c2ecf20Sopenharmony_ci	pci_disable_msix(interface->pdev);
17988c2ecf20Sopenharmony_ci	kfree(interface->msix_entries);
17998c2ecf20Sopenharmony_ci	interface->msix_entries = NULL;
18008c2ecf20Sopenharmony_ci}
18018c2ecf20Sopenharmony_ci
18028c2ecf20Sopenharmony_ci/**
18038c2ecf20Sopenharmony_ci * f10k_init_msix_capability - configure MSI-X capability
18048c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
18058c2ecf20Sopenharmony_ci *
18068c2ecf20Sopenharmony_ci * Attempt to configure the interrupts using the best available
18078c2ecf20Sopenharmony_ci * capabilities of the hardware and the kernel.
18088c2ecf20Sopenharmony_ci **/
18098c2ecf20Sopenharmony_cistatic int fm10k_init_msix_capability(struct fm10k_intfc *interface)
18108c2ecf20Sopenharmony_ci{
18118c2ecf20Sopenharmony_ci	struct fm10k_hw *hw = &interface->hw;
18128c2ecf20Sopenharmony_ci	int v_budget, vector;
18138c2ecf20Sopenharmony_ci
18148c2ecf20Sopenharmony_ci	/* It's easy to be greedy for MSI-X vectors, but it really
18158c2ecf20Sopenharmony_ci	 * doesn't do us much good if we have a lot more vectors
18168c2ecf20Sopenharmony_ci	 * than CPU's.  So let's be conservative and only ask for
18178c2ecf20Sopenharmony_ci	 * (roughly) the same number of vectors as there are CPU's.
18188c2ecf20Sopenharmony_ci	 * the default is to use pairs of vectors
18198c2ecf20Sopenharmony_ci	 */
18208c2ecf20Sopenharmony_ci	v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
18218c2ecf20Sopenharmony_ci	v_budget = min_t(u16, v_budget, num_online_cpus());
18228c2ecf20Sopenharmony_ci
18238c2ecf20Sopenharmony_ci	/* account for vectors not related to queues */
18248c2ecf20Sopenharmony_ci	v_budget += NON_Q_VECTORS;
18258c2ecf20Sopenharmony_ci
18268c2ecf20Sopenharmony_ci	/* At the same time, hardware can only support a maximum of
18278c2ecf20Sopenharmony_ci	 * hw.mac->max_msix_vectors vectors.  With features
18288c2ecf20Sopenharmony_ci	 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
18298c2ecf20Sopenharmony_ci	 * descriptor queues supported by our device.  Thus, we cap it off in
18308c2ecf20Sopenharmony_ci	 * those rare cases where the cpu count also exceeds our vector limit.
18318c2ecf20Sopenharmony_ci	 */
18328c2ecf20Sopenharmony_ci	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
18338c2ecf20Sopenharmony_ci
18348c2ecf20Sopenharmony_ci	/* A failure in MSI-X entry allocation is fatal. */
18358c2ecf20Sopenharmony_ci	interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
18368c2ecf20Sopenharmony_ci					  GFP_KERNEL);
18378c2ecf20Sopenharmony_ci	if (!interface->msix_entries)
18388c2ecf20Sopenharmony_ci		return -ENOMEM;
18398c2ecf20Sopenharmony_ci
18408c2ecf20Sopenharmony_ci	/* populate entry values */
18418c2ecf20Sopenharmony_ci	for (vector = 0; vector < v_budget; vector++)
18428c2ecf20Sopenharmony_ci		interface->msix_entries[vector].entry = vector;
18438c2ecf20Sopenharmony_ci
18448c2ecf20Sopenharmony_ci	/* Attempt to enable MSI-X with requested value */
18458c2ecf20Sopenharmony_ci	v_budget = pci_enable_msix_range(interface->pdev,
18468c2ecf20Sopenharmony_ci					 interface->msix_entries,
18478c2ecf20Sopenharmony_ci					 MIN_MSIX_COUNT(hw),
18488c2ecf20Sopenharmony_ci					 v_budget);
18498c2ecf20Sopenharmony_ci	if (v_budget < 0) {
18508c2ecf20Sopenharmony_ci		kfree(interface->msix_entries);
18518c2ecf20Sopenharmony_ci		interface->msix_entries = NULL;
18528c2ecf20Sopenharmony_ci		return v_budget;
18538c2ecf20Sopenharmony_ci	}
18548c2ecf20Sopenharmony_ci
18558c2ecf20Sopenharmony_ci	/* record the number of queues available for q_vectors */
18568c2ecf20Sopenharmony_ci	interface->num_q_vectors = v_budget - NON_Q_VECTORS;
18578c2ecf20Sopenharmony_ci
18588c2ecf20Sopenharmony_ci	return 0;
18598c2ecf20Sopenharmony_ci}
18608c2ecf20Sopenharmony_ci
18618c2ecf20Sopenharmony_ci/**
18628c2ecf20Sopenharmony_ci * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
18638c2ecf20Sopenharmony_ci * @interface: Interface structure continaining rings and devices
18648c2ecf20Sopenharmony_ci *
18658c2ecf20Sopenharmony_ci * Cache the descriptor ring offsets for Qos
18668c2ecf20Sopenharmony_ci **/
18678c2ecf20Sopenharmony_cistatic bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
18688c2ecf20Sopenharmony_ci{
18698c2ecf20Sopenharmony_ci	struct net_device *dev = interface->netdev;
18708c2ecf20Sopenharmony_ci	int pc, offset, rss_i, i;
18718c2ecf20Sopenharmony_ci	u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
18728c2ecf20Sopenharmony_ci	u8 num_pcs = netdev_get_num_tc(dev);
18738c2ecf20Sopenharmony_ci
18748c2ecf20Sopenharmony_ci	if (num_pcs <= 1)
18758c2ecf20Sopenharmony_ci		return false;
18768c2ecf20Sopenharmony_ci
18778c2ecf20Sopenharmony_ci	rss_i = interface->ring_feature[RING_F_RSS].indices;
18788c2ecf20Sopenharmony_ci
18798c2ecf20Sopenharmony_ci	for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
18808c2ecf20Sopenharmony_ci		int q_idx = pc;
18818c2ecf20Sopenharmony_ci
18828c2ecf20Sopenharmony_ci		for (i = 0; i < rss_i; i++) {
18838c2ecf20Sopenharmony_ci			interface->tx_ring[offset + i]->reg_idx = q_idx;
18848c2ecf20Sopenharmony_ci			interface->tx_ring[offset + i]->qos_pc = pc;
18858c2ecf20Sopenharmony_ci			interface->rx_ring[offset + i]->reg_idx = q_idx;
18868c2ecf20Sopenharmony_ci			interface->rx_ring[offset + i]->qos_pc = pc;
18878c2ecf20Sopenharmony_ci			q_idx += pc_stride;
18888c2ecf20Sopenharmony_ci		}
18898c2ecf20Sopenharmony_ci	}
18908c2ecf20Sopenharmony_ci
18918c2ecf20Sopenharmony_ci	return true;
18928c2ecf20Sopenharmony_ci}
18938c2ecf20Sopenharmony_ci
18948c2ecf20Sopenharmony_ci/**
18958c2ecf20Sopenharmony_ci * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
18968c2ecf20Sopenharmony_ci * @interface: Interface structure continaining rings and devices
18978c2ecf20Sopenharmony_ci *
18988c2ecf20Sopenharmony_ci * Cache the descriptor ring offsets for RSS
18998c2ecf20Sopenharmony_ci **/
19008c2ecf20Sopenharmony_cistatic void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
19018c2ecf20Sopenharmony_ci{
19028c2ecf20Sopenharmony_ci	int i;
19038c2ecf20Sopenharmony_ci
19048c2ecf20Sopenharmony_ci	for (i = 0; i < interface->num_rx_queues; i++)
19058c2ecf20Sopenharmony_ci		interface->rx_ring[i]->reg_idx = i;
19068c2ecf20Sopenharmony_ci
19078c2ecf20Sopenharmony_ci	for (i = 0; i < interface->num_tx_queues; i++)
19088c2ecf20Sopenharmony_ci		interface->tx_ring[i]->reg_idx = i;
19098c2ecf20Sopenharmony_ci}
19108c2ecf20Sopenharmony_ci
19118c2ecf20Sopenharmony_ci/**
19128c2ecf20Sopenharmony_ci * fm10k_assign_rings - Map rings to network devices
19138c2ecf20Sopenharmony_ci * @interface: Interface structure containing rings and devices
19148c2ecf20Sopenharmony_ci *
19158c2ecf20Sopenharmony_ci * This function is meant to go though and configure both the network
19168c2ecf20Sopenharmony_ci * devices so that they contain rings, and configure the rings so that
19178c2ecf20Sopenharmony_ci * they function with their network devices.
19188c2ecf20Sopenharmony_ci **/
19198c2ecf20Sopenharmony_cistatic void fm10k_assign_rings(struct fm10k_intfc *interface)
19208c2ecf20Sopenharmony_ci{
19218c2ecf20Sopenharmony_ci	if (fm10k_cache_ring_qos(interface))
19228c2ecf20Sopenharmony_ci		return;
19238c2ecf20Sopenharmony_ci
19248c2ecf20Sopenharmony_ci	fm10k_cache_ring_rss(interface);
19258c2ecf20Sopenharmony_ci}
19268c2ecf20Sopenharmony_ci
19278c2ecf20Sopenharmony_cistatic void fm10k_init_reta(struct fm10k_intfc *interface)
19288c2ecf20Sopenharmony_ci{
19298c2ecf20Sopenharmony_ci	u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
19308c2ecf20Sopenharmony_ci	u32 reta;
19318c2ecf20Sopenharmony_ci
19328c2ecf20Sopenharmony_ci	/* If the Rx flow indirection table has been configured manually, we
19338c2ecf20Sopenharmony_ci	 * need to maintain it when possible.
19348c2ecf20Sopenharmony_ci	 */
19358c2ecf20Sopenharmony_ci	if (netif_is_rxfh_configured(interface->netdev)) {
19368c2ecf20Sopenharmony_ci		for (i = FM10K_RETA_SIZE; i--;) {
19378c2ecf20Sopenharmony_ci			reta = interface->reta[i];
19388c2ecf20Sopenharmony_ci			if ((((reta << 24) >> 24) < rss_i) &&
19398c2ecf20Sopenharmony_ci			    (((reta << 16) >> 24) < rss_i) &&
19408c2ecf20Sopenharmony_ci			    (((reta <<  8) >> 24) < rss_i) &&
19418c2ecf20Sopenharmony_ci			    (((reta)       >> 24) < rss_i))
19428c2ecf20Sopenharmony_ci				continue;
19438c2ecf20Sopenharmony_ci
19448c2ecf20Sopenharmony_ci			/* this should never happen */
19458c2ecf20Sopenharmony_ci			dev_err(&interface->pdev->dev,
19468c2ecf20Sopenharmony_ci				"RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n");
19478c2ecf20Sopenharmony_ci			goto repopulate_reta;
19488c2ecf20Sopenharmony_ci		}
19498c2ecf20Sopenharmony_ci
19508c2ecf20Sopenharmony_ci		/* do nothing if all of the elements are in bounds */
19518c2ecf20Sopenharmony_ci		return;
19528c2ecf20Sopenharmony_ci	}
19538c2ecf20Sopenharmony_ci
19548c2ecf20Sopenharmony_cirepopulate_reta:
19558c2ecf20Sopenharmony_ci	fm10k_write_reta(interface, NULL);
19568c2ecf20Sopenharmony_ci}
19578c2ecf20Sopenharmony_ci
19588c2ecf20Sopenharmony_ci/**
19598c2ecf20Sopenharmony_ci * fm10k_init_queueing_scheme - Determine proper queueing scheme
19608c2ecf20Sopenharmony_ci * @interface: board private structure to initialize
19618c2ecf20Sopenharmony_ci *
19628c2ecf20Sopenharmony_ci * We determine which queueing scheme to use based on...
19638c2ecf20Sopenharmony_ci * - Hardware queue count (num_*_queues)
19648c2ecf20Sopenharmony_ci *   - defined by miscellaneous hardware support/features (RSS, etc.)
19658c2ecf20Sopenharmony_ci **/
19668c2ecf20Sopenharmony_ciint fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
19678c2ecf20Sopenharmony_ci{
19688c2ecf20Sopenharmony_ci	int err;
19698c2ecf20Sopenharmony_ci
19708c2ecf20Sopenharmony_ci	/* Number of supported queues */
19718c2ecf20Sopenharmony_ci	fm10k_set_num_queues(interface);
19728c2ecf20Sopenharmony_ci
19738c2ecf20Sopenharmony_ci	/* Configure MSI-X capability */
19748c2ecf20Sopenharmony_ci	err = fm10k_init_msix_capability(interface);
19758c2ecf20Sopenharmony_ci	if (err) {
19768c2ecf20Sopenharmony_ci		dev_err(&interface->pdev->dev,
19778c2ecf20Sopenharmony_ci			"Unable to initialize MSI-X capability\n");
19788c2ecf20Sopenharmony_ci		goto err_init_msix;
19798c2ecf20Sopenharmony_ci	}
19808c2ecf20Sopenharmony_ci
19818c2ecf20Sopenharmony_ci	/* Allocate memory for queues */
19828c2ecf20Sopenharmony_ci	err = fm10k_alloc_q_vectors(interface);
19838c2ecf20Sopenharmony_ci	if (err) {
19848c2ecf20Sopenharmony_ci		dev_err(&interface->pdev->dev,
19858c2ecf20Sopenharmony_ci			"Unable to allocate queue vectors\n");
19868c2ecf20Sopenharmony_ci		goto err_alloc_q_vectors;
19878c2ecf20Sopenharmony_ci	}
19888c2ecf20Sopenharmony_ci
19898c2ecf20Sopenharmony_ci	/* Map rings to devices, and map devices to physical queues */
19908c2ecf20Sopenharmony_ci	fm10k_assign_rings(interface);
19918c2ecf20Sopenharmony_ci
19928c2ecf20Sopenharmony_ci	/* Initialize RSS redirection table */
19938c2ecf20Sopenharmony_ci	fm10k_init_reta(interface);
19948c2ecf20Sopenharmony_ci
19958c2ecf20Sopenharmony_ci	return 0;
19968c2ecf20Sopenharmony_ci
19978c2ecf20Sopenharmony_cierr_alloc_q_vectors:
19988c2ecf20Sopenharmony_ci	fm10k_reset_msix_capability(interface);
19998c2ecf20Sopenharmony_cierr_init_msix:
20008c2ecf20Sopenharmony_ci	fm10k_reset_num_queues(interface);
20018c2ecf20Sopenharmony_ci	return err;
20028c2ecf20Sopenharmony_ci}
20038c2ecf20Sopenharmony_ci
20048c2ecf20Sopenharmony_ci/**
20058c2ecf20Sopenharmony_ci * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
20068c2ecf20Sopenharmony_ci * @interface: board private structure to clear queueing scheme on
20078c2ecf20Sopenharmony_ci *
20088c2ecf20Sopenharmony_ci * We go through and clear queueing specific resources and reset the structure
20098c2ecf20Sopenharmony_ci * to pre-load conditions
20108c2ecf20Sopenharmony_ci **/
20118c2ecf20Sopenharmony_civoid fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
20128c2ecf20Sopenharmony_ci{
20138c2ecf20Sopenharmony_ci	fm10k_free_q_vectors(interface);
20148c2ecf20Sopenharmony_ci	fm10k_reset_msix_capability(interface);
20158c2ecf20Sopenharmony_ci}
2016