18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/**************************************************************************** 38c2ecf20Sopenharmony_ci * Driver for Solarflare network controllers and boards 48c2ecf20Sopenharmony_ci * Copyright 2005-2006 Fen Systems Ltd. 58c2ecf20Sopenharmony_ci * Copyright 2005-2013 Solarflare Communications Inc. 68c2ecf20Sopenharmony_ci */ 78c2ecf20Sopenharmony_ci 88c2ecf20Sopenharmony_ci#include <linux/socket.h> 98c2ecf20Sopenharmony_ci#include <linux/in.h> 108c2ecf20Sopenharmony_ci#include <linux/slab.h> 118c2ecf20Sopenharmony_ci#include <linux/ip.h> 128c2ecf20Sopenharmony_ci#include <linux/ipv6.h> 138c2ecf20Sopenharmony_ci#include <linux/tcp.h> 148c2ecf20Sopenharmony_ci#include <linux/udp.h> 158c2ecf20Sopenharmony_ci#include <linux/prefetch.h> 168c2ecf20Sopenharmony_ci#include <linux/moduleparam.h> 178c2ecf20Sopenharmony_ci#include <linux/iommu.h> 188c2ecf20Sopenharmony_ci#include <net/ip.h> 198c2ecf20Sopenharmony_ci#include <net/checksum.h> 208c2ecf20Sopenharmony_ci#include "net_driver.h" 218c2ecf20Sopenharmony_ci#include "efx.h" 228c2ecf20Sopenharmony_ci#include "filter.h" 238c2ecf20Sopenharmony_ci#include "nic.h" 248c2ecf20Sopenharmony_ci#include "selftest.h" 258c2ecf20Sopenharmony_ci#include "workarounds.h" 268c2ecf20Sopenharmony_ci 278c2ecf20Sopenharmony_ci/* Preferred number of descriptors to fill at once */ 288c2ecf20Sopenharmony_ci#define EF4_RX_PREFERRED_BATCH 8U 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_ci/* Number of RX buffers to recycle pages for. When creating the RX page recycle 318c2ecf20Sopenharmony_ci * ring, this number is divided by the number of buffers per page to calculate 328c2ecf20Sopenharmony_ci * the number of pages to store in the RX page recycle ring. 338c2ecf20Sopenharmony_ci */ 348c2ecf20Sopenharmony_ci#define EF4_RECYCLE_RING_SIZE_IOMMU 4096 358c2ecf20Sopenharmony_ci#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH) 368c2ecf20Sopenharmony_ci 378c2ecf20Sopenharmony_ci/* Size of buffer allocated for skb header area. */ 388c2ecf20Sopenharmony_ci#define EF4_SKB_HEADERS 128u 398c2ecf20Sopenharmony_ci 408c2ecf20Sopenharmony_ci/* This is the percentage fill level below which new RX descriptors 418c2ecf20Sopenharmony_ci * will be added to the RX descriptor ring. 428c2ecf20Sopenharmony_ci */ 438c2ecf20Sopenharmony_cistatic unsigned int rx_refill_threshold; 448c2ecf20Sopenharmony_ci 458c2ecf20Sopenharmony_ci/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ 468c2ecf20Sopenharmony_ci#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \ 478c2ecf20Sopenharmony_ci EF4_RX_USR_BUF_SIZE) 488c2ecf20Sopenharmony_ci 498c2ecf20Sopenharmony_ci/* 508c2ecf20Sopenharmony_ci * RX maximum head room required. 518c2ecf20Sopenharmony_ci * 528c2ecf20Sopenharmony_ci * This must be at least 1 to prevent overflow, plus one packet-worth 538c2ecf20Sopenharmony_ci * to allow pipelined receives. 548c2ecf20Sopenharmony_ci */ 558c2ecf20Sopenharmony_ci#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS) 568c2ecf20Sopenharmony_ci 578c2ecf20Sopenharmony_cistatic inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf) 588c2ecf20Sopenharmony_ci{ 598c2ecf20Sopenharmony_ci return page_address(buf->page) + buf->page_offset; 608c2ecf20Sopenharmony_ci} 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_cistatic inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh) 638c2ecf20Sopenharmony_ci{ 648c2ecf20Sopenharmony_ci#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 658c2ecf20Sopenharmony_ci return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); 668c2ecf20Sopenharmony_ci#else 678c2ecf20Sopenharmony_ci const u8 *data = eh + efx->rx_packet_hash_offset; 688c2ecf20Sopenharmony_ci return (u32)data[0] | 698c2ecf20Sopenharmony_ci (u32)data[1] << 8 | 708c2ecf20Sopenharmony_ci (u32)data[2] << 16 | 718c2ecf20Sopenharmony_ci (u32)data[3] << 24; 728c2ecf20Sopenharmony_ci#endif 738c2ecf20Sopenharmony_ci} 748c2ecf20Sopenharmony_ci 758c2ecf20Sopenharmony_cistatic inline struct ef4_rx_buffer * 768c2ecf20Sopenharmony_cief4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf) 778c2ecf20Sopenharmony_ci{ 788c2ecf20Sopenharmony_ci if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask))) 798c2ecf20Sopenharmony_ci return ef4_rx_buffer(rx_queue, 0); 808c2ecf20Sopenharmony_ci else 818c2ecf20Sopenharmony_ci return rx_buf + 1; 828c2ecf20Sopenharmony_ci} 838c2ecf20Sopenharmony_ci 848c2ecf20Sopenharmony_cistatic inline void ef4_sync_rx_buffer(struct ef4_nic *efx, 858c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf, 868c2ecf20Sopenharmony_ci unsigned int len) 878c2ecf20Sopenharmony_ci{ 888c2ecf20Sopenharmony_ci dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, 898c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 908c2ecf20Sopenharmony_ci} 918c2ecf20Sopenharmony_ci 928c2ecf20Sopenharmony_civoid ef4_rx_config_page_split(struct ef4_nic *efx) 938c2ecf20Sopenharmony_ci{ 948c2ecf20Sopenharmony_ci efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, 958c2ecf20Sopenharmony_ci EF4_RX_BUF_ALIGNMENT); 968c2ecf20Sopenharmony_ci efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 978c2ecf20Sopenharmony_ci ((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) / 988c2ecf20Sopenharmony_ci efx->rx_page_buf_step); 998c2ecf20Sopenharmony_ci efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / 1008c2ecf20Sopenharmony_ci efx->rx_bufs_per_page; 1018c2ecf20Sopenharmony_ci efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH, 1028c2ecf20Sopenharmony_ci efx->rx_bufs_per_page); 1038c2ecf20Sopenharmony_ci} 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci/* Check the RX page recycle ring for a page that can be reused. */ 1068c2ecf20Sopenharmony_cistatic struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue) 1078c2ecf20Sopenharmony_ci{ 1088c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 1098c2ecf20Sopenharmony_ci struct page *page; 1108c2ecf20Sopenharmony_ci struct ef4_rx_page_state *state; 1118c2ecf20Sopenharmony_ci unsigned index; 1128c2ecf20Sopenharmony_ci 1138c2ecf20Sopenharmony_ci if (unlikely(!rx_queue->page_ring)) 1148c2ecf20Sopenharmony_ci return NULL; 1158c2ecf20Sopenharmony_ci index = rx_queue->page_remove & rx_queue->page_ptr_mask; 1168c2ecf20Sopenharmony_ci page = rx_queue->page_ring[index]; 1178c2ecf20Sopenharmony_ci if (page == NULL) 1188c2ecf20Sopenharmony_ci return NULL; 1198c2ecf20Sopenharmony_ci 1208c2ecf20Sopenharmony_ci rx_queue->page_ring[index] = NULL; 1218c2ecf20Sopenharmony_ci /* page_remove cannot exceed page_add. */ 1228c2ecf20Sopenharmony_ci if (rx_queue->page_remove != rx_queue->page_add) 1238c2ecf20Sopenharmony_ci ++rx_queue->page_remove; 1248c2ecf20Sopenharmony_ci 1258c2ecf20Sopenharmony_ci /* If page_count is 1 then we hold the only reference to this page. */ 1268c2ecf20Sopenharmony_ci if (page_count(page) == 1) { 1278c2ecf20Sopenharmony_ci ++rx_queue->page_recycle_count; 1288c2ecf20Sopenharmony_ci return page; 1298c2ecf20Sopenharmony_ci } else { 1308c2ecf20Sopenharmony_ci state = page_address(page); 1318c2ecf20Sopenharmony_ci dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 1328c2ecf20Sopenharmony_ci PAGE_SIZE << efx->rx_buffer_order, 1338c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 1348c2ecf20Sopenharmony_ci put_page(page); 1358c2ecf20Sopenharmony_ci ++rx_queue->page_recycle_failed; 1368c2ecf20Sopenharmony_ci } 1378c2ecf20Sopenharmony_ci 1388c2ecf20Sopenharmony_ci return NULL; 1398c2ecf20Sopenharmony_ci} 1408c2ecf20Sopenharmony_ci 1418c2ecf20Sopenharmony_ci/** 1428c2ecf20Sopenharmony_ci * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers 1438c2ecf20Sopenharmony_ci * 1448c2ecf20Sopenharmony_ci * @rx_queue: Efx RX queue 1458c2ecf20Sopenharmony_ci * @atomic: control memory allocation flags 1468c2ecf20Sopenharmony_ci * 1478c2ecf20Sopenharmony_ci * This allocates a batch of pages, maps them for DMA, and populates 1488c2ecf20Sopenharmony_ci * struct ef4_rx_buffers for each one. Return a negative error code or 1498c2ecf20Sopenharmony_ci * 0 on success. If a single page can be used for multiple buffers, 1508c2ecf20Sopenharmony_ci * then the page will either be inserted fully, or not at all. 1518c2ecf20Sopenharmony_ci */ 1528c2ecf20Sopenharmony_cistatic int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic) 1538c2ecf20Sopenharmony_ci{ 1548c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 1558c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf; 1568c2ecf20Sopenharmony_ci struct page *page; 1578c2ecf20Sopenharmony_ci unsigned int page_offset; 1588c2ecf20Sopenharmony_ci struct ef4_rx_page_state *state; 1598c2ecf20Sopenharmony_ci dma_addr_t dma_addr; 1608c2ecf20Sopenharmony_ci unsigned index, count; 1618c2ecf20Sopenharmony_ci 1628c2ecf20Sopenharmony_ci count = 0; 1638c2ecf20Sopenharmony_ci do { 1648c2ecf20Sopenharmony_ci page = ef4_reuse_page(rx_queue); 1658c2ecf20Sopenharmony_ci if (page == NULL) { 1668c2ecf20Sopenharmony_ci page = alloc_pages(__GFP_COMP | 1678c2ecf20Sopenharmony_ci (atomic ? GFP_ATOMIC : GFP_KERNEL), 1688c2ecf20Sopenharmony_ci efx->rx_buffer_order); 1698c2ecf20Sopenharmony_ci if (unlikely(page == NULL)) 1708c2ecf20Sopenharmony_ci return -ENOMEM; 1718c2ecf20Sopenharmony_ci dma_addr = 1728c2ecf20Sopenharmony_ci dma_map_page(&efx->pci_dev->dev, page, 0, 1738c2ecf20Sopenharmony_ci PAGE_SIZE << efx->rx_buffer_order, 1748c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 1758c2ecf20Sopenharmony_ci if (unlikely(dma_mapping_error(&efx->pci_dev->dev, 1768c2ecf20Sopenharmony_ci dma_addr))) { 1778c2ecf20Sopenharmony_ci __free_pages(page, efx->rx_buffer_order); 1788c2ecf20Sopenharmony_ci return -EIO; 1798c2ecf20Sopenharmony_ci } 1808c2ecf20Sopenharmony_ci state = page_address(page); 1818c2ecf20Sopenharmony_ci state->dma_addr = dma_addr; 1828c2ecf20Sopenharmony_ci } else { 1838c2ecf20Sopenharmony_ci state = page_address(page); 1848c2ecf20Sopenharmony_ci dma_addr = state->dma_addr; 1858c2ecf20Sopenharmony_ci } 1868c2ecf20Sopenharmony_ci 1878c2ecf20Sopenharmony_ci dma_addr += sizeof(struct ef4_rx_page_state); 1888c2ecf20Sopenharmony_ci page_offset = sizeof(struct ef4_rx_page_state); 1898c2ecf20Sopenharmony_ci 1908c2ecf20Sopenharmony_ci do { 1918c2ecf20Sopenharmony_ci index = rx_queue->added_count & rx_queue->ptr_mask; 1928c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buffer(rx_queue, index); 1938c2ecf20Sopenharmony_ci rx_buf->dma_addr = dma_addr + efx->rx_ip_align; 1948c2ecf20Sopenharmony_ci rx_buf->page = page; 1958c2ecf20Sopenharmony_ci rx_buf->page_offset = page_offset + efx->rx_ip_align; 1968c2ecf20Sopenharmony_ci rx_buf->len = efx->rx_dma_len; 1978c2ecf20Sopenharmony_ci rx_buf->flags = 0; 1988c2ecf20Sopenharmony_ci ++rx_queue->added_count; 1998c2ecf20Sopenharmony_ci get_page(page); 2008c2ecf20Sopenharmony_ci dma_addr += efx->rx_page_buf_step; 2018c2ecf20Sopenharmony_ci page_offset += efx->rx_page_buf_step; 2028c2ecf20Sopenharmony_ci } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); 2038c2ecf20Sopenharmony_ci 2048c2ecf20Sopenharmony_ci rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE; 2058c2ecf20Sopenharmony_ci } while (++count < efx->rx_pages_per_batch); 2068c2ecf20Sopenharmony_ci 2078c2ecf20Sopenharmony_ci return 0; 2088c2ecf20Sopenharmony_ci} 2098c2ecf20Sopenharmony_ci 2108c2ecf20Sopenharmony_ci/* Unmap a DMA-mapped page. This function is only called for the final RX 2118c2ecf20Sopenharmony_ci * buffer in a page. 2128c2ecf20Sopenharmony_ci */ 2138c2ecf20Sopenharmony_cistatic void ef4_unmap_rx_buffer(struct ef4_nic *efx, 2148c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf) 2158c2ecf20Sopenharmony_ci{ 2168c2ecf20Sopenharmony_ci struct page *page = rx_buf->page; 2178c2ecf20Sopenharmony_ci 2188c2ecf20Sopenharmony_ci if (page) { 2198c2ecf20Sopenharmony_ci struct ef4_rx_page_state *state = page_address(page); 2208c2ecf20Sopenharmony_ci dma_unmap_page(&efx->pci_dev->dev, 2218c2ecf20Sopenharmony_ci state->dma_addr, 2228c2ecf20Sopenharmony_ci PAGE_SIZE << efx->rx_buffer_order, 2238c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 2248c2ecf20Sopenharmony_ci } 2258c2ecf20Sopenharmony_ci} 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_cistatic void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue, 2288c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf, 2298c2ecf20Sopenharmony_ci unsigned int num_bufs) 2308c2ecf20Sopenharmony_ci{ 2318c2ecf20Sopenharmony_ci do { 2328c2ecf20Sopenharmony_ci if (rx_buf->page) { 2338c2ecf20Sopenharmony_ci put_page(rx_buf->page); 2348c2ecf20Sopenharmony_ci rx_buf->page = NULL; 2358c2ecf20Sopenharmony_ci } 2368c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); 2378c2ecf20Sopenharmony_ci } while (--num_bufs); 2388c2ecf20Sopenharmony_ci} 2398c2ecf20Sopenharmony_ci 2408c2ecf20Sopenharmony_ci/* Attempt to recycle the page if there is an RX recycle ring; the page can 2418c2ecf20Sopenharmony_ci * only be added if this is the final RX buffer, to prevent pages being used in 2428c2ecf20Sopenharmony_ci * the descriptor ring and appearing in the recycle ring simultaneously. 2438c2ecf20Sopenharmony_ci */ 2448c2ecf20Sopenharmony_cistatic void ef4_recycle_rx_page(struct ef4_channel *channel, 2458c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf) 2468c2ecf20Sopenharmony_ci{ 2478c2ecf20Sopenharmony_ci struct page *page = rx_buf->page; 2488c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); 2498c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 2508c2ecf20Sopenharmony_ci unsigned index; 2518c2ecf20Sopenharmony_ci 2528c2ecf20Sopenharmony_ci /* Only recycle the page after processing the final buffer. */ 2538c2ecf20Sopenharmony_ci if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE)) 2548c2ecf20Sopenharmony_ci return; 2558c2ecf20Sopenharmony_ci 2568c2ecf20Sopenharmony_ci index = rx_queue->page_add & rx_queue->page_ptr_mask; 2578c2ecf20Sopenharmony_ci if (rx_queue->page_ring[index] == NULL) { 2588c2ecf20Sopenharmony_ci unsigned read_index = rx_queue->page_remove & 2598c2ecf20Sopenharmony_ci rx_queue->page_ptr_mask; 2608c2ecf20Sopenharmony_ci 2618c2ecf20Sopenharmony_ci /* The next slot in the recycle ring is available, but 2628c2ecf20Sopenharmony_ci * increment page_remove if the read pointer currently 2638c2ecf20Sopenharmony_ci * points here. 2648c2ecf20Sopenharmony_ci */ 2658c2ecf20Sopenharmony_ci if (read_index == index) 2668c2ecf20Sopenharmony_ci ++rx_queue->page_remove; 2678c2ecf20Sopenharmony_ci rx_queue->page_ring[index] = page; 2688c2ecf20Sopenharmony_ci ++rx_queue->page_add; 2698c2ecf20Sopenharmony_ci return; 2708c2ecf20Sopenharmony_ci } 2718c2ecf20Sopenharmony_ci ++rx_queue->page_recycle_full; 2728c2ecf20Sopenharmony_ci ef4_unmap_rx_buffer(efx, rx_buf); 2738c2ecf20Sopenharmony_ci put_page(rx_buf->page); 2748c2ecf20Sopenharmony_ci} 2758c2ecf20Sopenharmony_ci 2768c2ecf20Sopenharmony_cistatic void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue, 2778c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf) 2788c2ecf20Sopenharmony_ci{ 2798c2ecf20Sopenharmony_ci /* Release the page reference we hold for the buffer. */ 2808c2ecf20Sopenharmony_ci if (rx_buf->page) 2818c2ecf20Sopenharmony_ci put_page(rx_buf->page); 2828c2ecf20Sopenharmony_ci 2838c2ecf20Sopenharmony_ci /* If this is the last buffer in a page, unmap and free it. */ 2848c2ecf20Sopenharmony_ci if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) { 2858c2ecf20Sopenharmony_ci ef4_unmap_rx_buffer(rx_queue->efx, rx_buf); 2868c2ecf20Sopenharmony_ci ef4_free_rx_buffers(rx_queue, rx_buf, 1); 2878c2ecf20Sopenharmony_ci } 2888c2ecf20Sopenharmony_ci rx_buf->page = NULL; 2898c2ecf20Sopenharmony_ci} 2908c2ecf20Sopenharmony_ci 2918c2ecf20Sopenharmony_ci/* Recycle the pages that are used by buffers that have just been received. */ 2928c2ecf20Sopenharmony_cistatic void ef4_recycle_rx_pages(struct ef4_channel *channel, 2938c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf, 2948c2ecf20Sopenharmony_ci unsigned int n_frags) 2958c2ecf20Sopenharmony_ci{ 2968c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); 2978c2ecf20Sopenharmony_ci 2988c2ecf20Sopenharmony_ci if (unlikely(!rx_queue->page_ring)) 2998c2ecf20Sopenharmony_ci return; 3008c2ecf20Sopenharmony_ci 3018c2ecf20Sopenharmony_ci do { 3028c2ecf20Sopenharmony_ci ef4_recycle_rx_page(channel, rx_buf); 3038c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); 3048c2ecf20Sopenharmony_ci } while (--n_frags); 3058c2ecf20Sopenharmony_ci} 3068c2ecf20Sopenharmony_ci 3078c2ecf20Sopenharmony_cistatic void ef4_discard_rx_packet(struct ef4_channel *channel, 3088c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf, 3098c2ecf20Sopenharmony_ci unsigned int n_frags) 3108c2ecf20Sopenharmony_ci{ 3118c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel); 3128c2ecf20Sopenharmony_ci 3138c2ecf20Sopenharmony_ci ef4_recycle_rx_pages(channel, rx_buf, n_frags); 3148c2ecf20Sopenharmony_ci 3158c2ecf20Sopenharmony_ci ef4_free_rx_buffers(rx_queue, rx_buf, n_frags); 3168c2ecf20Sopenharmony_ci} 3178c2ecf20Sopenharmony_ci 3188c2ecf20Sopenharmony_ci/** 3198c2ecf20Sopenharmony_ci * ef4_fast_push_rx_descriptors - push new RX descriptors quickly 3208c2ecf20Sopenharmony_ci * @rx_queue: RX descriptor queue 3218c2ecf20Sopenharmony_ci * 3228c2ecf20Sopenharmony_ci * This will aim to fill the RX descriptor queue up to 3238c2ecf20Sopenharmony_ci * @rx_queue->@max_fill. If there is insufficient atomic 3248c2ecf20Sopenharmony_ci * memory to do so, a slow fill will be scheduled. 3258c2ecf20Sopenharmony_ci * @atomic: control memory allocation flags 3268c2ecf20Sopenharmony_ci * 3278c2ecf20Sopenharmony_ci * The caller must provide serialisation (none is used here). In practise, 3288c2ecf20Sopenharmony_ci * this means this function must run from the NAPI handler, or be called 3298c2ecf20Sopenharmony_ci * when NAPI is disabled. 3308c2ecf20Sopenharmony_ci */ 3318c2ecf20Sopenharmony_civoid ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic) 3328c2ecf20Sopenharmony_ci{ 3338c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 3348c2ecf20Sopenharmony_ci unsigned int fill_level, batch_size; 3358c2ecf20Sopenharmony_ci int space, rc = 0; 3368c2ecf20Sopenharmony_ci 3378c2ecf20Sopenharmony_ci if (!rx_queue->refill_enabled) 3388c2ecf20Sopenharmony_ci return; 3398c2ecf20Sopenharmony_ci 3408c2ecf20Sopenharmony_ci /* Calculate current fill level, and exit if we don't need to fill */ 3418c2ecf20Sopenharmony_ci fill_level = (rx_queue->added_count - rx_queue->removed_count); 3428c2ecf20Sopenharmony_ci EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); 3438c2ecf20Sopenharmony_ci if (fill_level >= rx_queue->fast_fill_trigger) 3448c2ecf20Sopenharmony_ci goto out; 3458c2ecf20Sopenharmony_ci 3468c2ecf20Sopenharmony_ci /* Record minimum fill level */ 3478c2ecf20Sopenharmony_ci if (unlikely(fill_level < rx_queue->min_fill)) { 3488c2ecf20Sopenharmony_ci if (fill_level) 3498c2ecf20Sopenharmony_ci rx_queue->min_fill = fill_level; 3508c2ecf20Sopenharmony_ci } 3518c2ecf20Sopenharmony_ci 3528c2ecf20Sopenharmony_ci batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; 3538c2ecf20Sopenharmony_ci space = rx_queue->max_fill - fill_level; 3548c2ecf20Sopenharmony_ci EF4_BUG_ON_PARANOID(space < batch_size); 3558c2ecf20Sopenharmony_ci 3568c2ecf20Sopenharmony_ci netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 3578c2ecf20Sopenharmony_ci "RX queue %d fast-filling descriptor ring from" 3588c2ecf20Sopenharmony_ci " level %d to level %d\n", 3598c2ecf20Sopenharmony_ci ef4_rx_queue_index(rx_queue), fill_level, 3608c2ecf20Sopenharmony_ci rx_queue->max_fill); 3618c2ecf20Sopenharmony_ci 3628c2ecf20Sopenharmony_ci 3638c2ecf20Sopenharmony_ci do { 3648c2ecf20Sopenharmony_ci rc = ef4_init_rx_buffers(rx_queue, atomic); 3658c2ecf20Sopenharmony_ci if (unlikely(rc)) { 3668c2ecf20Sopenharmony_ci /* Ensure that we don't leave the rx queue empty */ 3678c2ecf20Sopenharmony_ci if (rx_queue->added_count == rx_queue->removed_count) 3688c2ecf20Sopenharmony_ci ef4_schedule_slow_fill(rx_queue); 3698c2ecf20Sopenharmony_ci goto out; 3708c2ecf20Sopenharmony_ci } 3718c2ecf20Sopenharmony_ci } while ((space -= batch_size) >= batch_size); 3728c2ecf20Sopenharmony_ci 3738c2ecf20Sopenharmony_ci netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 3748c2ecf20Sopenharmony_ci "RX queue %d fast-filled descriptor ring " 3758c2ecf20Sopenharmony_ci "to level %d\n", ef4_rx_queue_index(rx_queue), 3768c2ecf20Sopenharmony_ci rx_queue->added_count - rx_queue->removed_count); 3778c2ecf20Sopenharmony_ci 3788c2ecf20Sopenharmony_ci out: 3798c2ecf20Sopenharmony_ci if (rx_queue->notified_count != rx_queue->added_count) 3808c2ecf20Sopenharmony_ci ef4_nic_notify_rx_desc(rx_queue); 3818c2ecf20Sopenharmony_ci} 3828c2ecf20Sopenharmony_ci 3838c2ecf20Sopenharmony_civoid ef4_rx_slow_fill(struct timer_list *t) 3848c2ecf20Sopenharmony_ci{ 3858c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); 3868c2ecf20Sopenharmony_ci 3878c2ecf20Sopenharmony_ci /* Post an event to cause NAPI to run and refill the queue */ 3888c2ecf20Sopenharmony_ci ef4_nic_generate_fill_event(rx_queue); 3898c2ecf20Sopenharmony_ci ++rx_queue->slow_fill_count; 3908c2ecf20Sopenharmony_ci} 3918c2ecf20Sopenharmony_ci 3928c2ecf20Sopenharmony_cistatic void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue, 3938c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf, 3948c2ecf20Sopenharmony_ci int len) 3958c2ecf20Sopenharmony_ci{ 3968c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 3978c2ecf20Sopenharmony_ci unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 3988c2ecf20Sopenharmony_ci 3998c2ecf20Sopenharmony_ci if (likely(len <= max_len)) 4008c2ecf20Sopenharmony_ci return; 4018c2ecf20Sopenharmony_ci 4028c2ecf20Sopenharmony_ci /* The packet must be discarded, but this is only a fatal error 4038c2ecf20Sopenharmony_ci * if the caller indicated it was 4048c2ecf20Sopenharmony_ci */ 4058c2ecf20Sopenharmony_ci rx_buf->flags |= EF4_RX_PKT_DISCARD; 4068c2ecf20Sopenharmony_ci 4078c2ecf20Sopenharmony_ci if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) { 4088c2ecf20Sopenharmony_ci if (net_ratelimit()) 4098c2ecf20Sopenharmony_ci netif_err(efx, rx_err, efx->net_dev, 4108c2ecf20Sopenharmony_ci " RX queue %d seriously overlength " 4118c2ecf20Sopenharmony_ci "RX event (0x%x > 0x%x+0x%x). Leaking\n", 4128c2ecf20Sopenharmony_ci ef4_rx_queue_index(rx_queue), len, max_len, 4138c2ecf20Sopenharmony_ci efx->type->rx_buffer_padding); 4148c2ecf20Sopenharmony_ci ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 4158c2ecf20Sopenharmony_ci } else { 4168c2ecf20Sopenharmony_ci if (net_ratelimit()) 4178c2ecf20Sopenharmony_ci netif_err(efx, rx_err, efx->net_dev, 4188c2ecf20Sopenharmony_ci " RX queue %d overlength RX event " 4198c2ecf20Sopenharmony_ci "(0x%x > 0x%x)\n", 4208c2ecf20Sopenharmony_ci ef4_rx_queue_index(rx_queue), len, max_len); 4218c2ecf20Sopenharmony_ci } 4228c2ecf20Sopenharmony_ci 4238c2ecf20Sopenharmony_ci ef4_rx_queue_channel(rx_queue)->n_rx_overlength++; 4248c2ecf20Sopenharmony_ci} 4258c2ecf20Sopenharmony_ci 4268c2ecf20Sopenharmony_ci/* Pass a received packet up through GRO. GRO can handle pages 4278c2ecf20Sopenharmony_ci * regardless of checksum state and skbs with a good checksum. 4288c2ecf20Sopenharmony_ci */ 4298c2ecf20Sopenharmony_cistatic void 4308c2ecf20Sopenharmony_cief4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf, 4318c2ecf20Sopenharmony_ci unsigned int n_frags, u8 *eh) 4328c2ecf20Sopenharmony_ci{ 4338c2ecf20Sopenharmony_ci struct napi_struct *napi = &channel->napi_str; 4348c2ecf20Sopenharmony_ci struct ef4_nic *efx = channel->efx; 4358c2ecf20Sopenharmony_ci struct sk_buff *skb; 4368c2ecf20Sopenharmony_ci 4378c2ecf20Sopenharmony_ci skb = napi_get_frags(napi); 4388c2ecf20Sopenharmony_ci if (unlikely(!skb)) { 4398c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue; 4408c2ecf20Sopenharmony_ci 4418c2ecf20Sopenharmony_ci rx_queue = ef4_channel_get_rx_queue(channel); 4428c2ecf20Sopenharmony_ci ef4_free_rx_buffers(rx_queue, rx_buf, n_frags); 4438c2ecf20Sopenharmony_ci return; 4448c2ecf20Sopenharmony_ci } 4458c2ecf20Sopenharmony_ci 4468c2ecf20Sopenharmony_ci if (efx->net_dev->features & NETIF_F_RXHASH) 4478c2ecf20Sopenharmony_ci skb_set_hash(skb, ef4_rx_buf_hash(efx, eh), 4488c2ecf20Sopenharmony_ci PKT_HASH_TYPE_L3); 4498c2ecf20Sopenharmony_ci skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ? 4508c2ecf20Sopenharmony_ci CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 4518c2ecf20Sopenharmony_ci 4528c2ecf20Sopenharmony_ci for (;;) { 4538c2ecf20Sopenharmony_ci skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 4548c2ecf20Sopenharmony_ci rx_buf->page, rx_buf->page_offset, 4558c2ecf20Sopenharmony_ci rx_buf->len); 4568c2ecf20Sopenharmony_ci rx_buf->page = NULL; 4578c2ecf20Sopenharmony_ci skb->len += rx_buf->len; 4588c2ecf20Sopenharmony_ci if (skb_shinfo(skb)->nr_frags == n_frags) 4598c2ecf20Sopenharmony_ci break; 4608c2ecf20Sopenharmony_ci 4618c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf); 4628c2ecf20Sopenharmony_ci } 4638c2ecf20Sopenharmony_ci 4648c2ecf20Sopenharmony_ci skb->data_len = skb->len; 4658c2ecf20Sopenharmony_ci skb->truesize += n_frags * efx->rx_buffer_truesize; 4668c2ecf20Sopenharmony_ci 4678c2ecf20Sopenharmony_ci skb_record_rx_queue(skb, channel->rx_queue.core_index); 4688c2ecf20Sopenharmony_ci 4698c2ecf20Sopenharmony_ci napi_gro_frags(napi); 4708c2ecf20Sopenharmony_ci} 4718c2ecf20Sopenharmony_ci 4728c2ecf20Sopenharmony_ci/* Allocate and construct an SKB around page fragments */ 4738c2ecf20Sopenharmony_cistatic struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel, 4748c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf, 4758c2ecf20Sopenharmony_ci unsigned int n_frags, 4768c2ecf20Sopenharmony_ci u8 *eh, int hdr_len) 4778c2ecf20Sopenharmony_ci{ 4788c2ecf20Sopenharmony_ci struct ef4_nic *efx = channel->efx; 4798c2ecf20Sopenharmony_ci struct sk_buff *skb; 4808c2ecf20Sopenharmony_ci 4818c2ecf20Sopenharmony_ci /* Allocate an SKB to store the headers */ 4828c2ecf20Sopenharmony_ci skb = netdev_alloc_skb(efx->net_dev, 4838c2ecf20Sopenharmony_ci efx->rx_ip_align + efx->rx_prefix_size + 4848c2ecf20Sopenharmony_ci hdr_len); 4858c2ecf20Sopenharmony_ci if (unlikely(skb == NULL)) { 4868c2ecf20Sopenharmony_ci atomic_inc(&efx->n_rx_noskb_drops); 4878c2ecf20Sopenharmony_ci return NULL; 4888c2ecf20Sopenharmony_ci } 4898c2ecf20Sopenharmony_ci 4908c2ecf20Sopenharmony_ci EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len); 4918c2ecf20Sopenharmony_ci 4928c2ecf20Sopenharmony_ci memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, 4938c2ecf20Sopenharmony_ci efx->rx_prefix_size + hdr_len); 4948c2ecf20Sopenharmony_ci skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); 4958c2ecf20Sopenharmony_ci __skb_put(skb, hdr_len); 4968c2ecf20Sopenharmony_ci 4978c2ecf20Sopenharmony_ci /* Append the remaining page(s) onto the frag list */ 4988c2ecf20Sopenharmony_ci if (rx_buf->len > hdr_len) { 4998c2ecf20Sopenharmony_ci rx_buf->page_offset += hdr_len; 5008c2ecf20Sopenharmony_ci rx_buf->len -= hdr_len; 5018c2ecf20Sopenharmony_ci 5028c2ecf20Sopenharmony_ci for (;;) { 5038c2ecf20Sopenharmony_ci skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 5048c2ecf20Sopenharmony_ci rx_buf->page, rx_buf->page_offset, 5058c2ecf20Sopenharmony_ci rx_buf->len); 5068c2ecf20Sopenharmony_ci rx_buf->page = NULL; 5078c2ecf20Sopenharmony_ci skb->len += rx_buf->len; 5088c2ecf20Sopenharmony_ci skb->data_len += rx_buf->len; 5098c2ecf20Sopenharmony_ci if (skb_shinfo(skb)->nr_frags == n_frags) 5108c2ecf20Sopenharmony_ci break; 5118c2ecf20Sopenharmony_ci 5128c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf); 5138c2ecf20Sopenharmony_ci } 5148c2ecf20Sopenharmony_ci } else { 5158c2ecf20Sopenharmony_ci __free_pages(rx_buf->page, efx->rx_buffer_order); 5168c2ecf20Sopenharmony_ci rx_buf->page = NULL; 5178c2ecf20Sopenharmony_ci n_frags = 0; 5188c2ecf20Sopenharmony_ci } 5198c2ecf20Sopenharmony_ci 5208c2ecf20Sopenharmony_ci skb->truesize += n_frags * efx->rx_buffer_truesize; 5218c2ecf20Sopenharmony_ci 5228c2ecf20Sopenharmony_ci /* Move past the ethernet header */ 5238c2ecf20Sopenharmony_ci skb->protocol = eth_type_trans(skb, efx->net_dev); 5248c2ecf20Sopenharmony_ci 5258c2ecf20Sopenharmony_ci skb_mark_napi_id(skb, &channel->napi_str); 5268c2ecf20Sopenharmony_ci 5278c2ecf20Sopenharmony_ci return skb; 5288c2ecf20Sopenharmony_ci} 5298c2ecf20Sopenharmony_ci 5308c2ecf20Sopenharmony_civoid ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index, 5318c2ecf20Sopenharmony_ci unsigned int n_frags, unsigned int len, u16 flags) 5328c2ecf20Sopenharmony_ci{ 5338c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 5348c2ecf20Sopenharmony_ci struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue); 5358c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf; 5368c2ecf20Sopenharmony_ci 5378c2ecf20Sopenharmony_ci rx_queue->rx_packets++; 5388c2ecf20Sopenharmony_ci 5398c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buffer(rx_queue, index); 5408c2ecf20Sopenharmony_ci rx_buf->flags |= flags; 5418c2ecf20Sopenharmony_ci 5428c2ecf20Sopenharmony_ci /* Validate the number of fragments and completed length */ 5438c2ecf20Sopenharmony_ci if (n_frags == 1) { 5448c2ecf20Sopenharmony_ci if (!(flags & EF4_RX_PKT_PREFIX_LEN)) 5458c2ecf20Sopenharmony_ci ef4_rx_packet__check_len(rx_queue, rx_buf, len); 5468c2ecf20Sopenharmony_ci } else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) || 5478c2ecf20Sopenharmony_ci unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || 5488c2ecf20Sopenharmony_ci unlikely(len > n_frags * efx->rx_dma_len) || 5498c2ecf20Sopenharmony_ci unlikely(!efx->rx_scatter)) { 5508c2ecf20Sopenharmony_ci /* If this isn't an explicit discard request, either 5518c2ecf20Sopenharmony_ci * the hardware or the driver is broken. 5528c2ecf20Sopenharmony_ci */ 5538c2ecf20Sopenharmony_ci WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD)); 5548c2ecf20Sopenharmony_ci rx_buf->flags |= EF4_RX_PKT_DISCARD; 5558c2ecf20Sopenharmony_ci } 5568c2ecf20Sopenharmony_ci 5578c2ecf20Sopenharmony_ci netif_vdbg(efx, rx_status, efx->net_dev, 5588c2ecf20Sopenharmony_ci "RX queue %d received ids %x-%x len %d %s%s\n", 5598c2ecf20Sopenharmony_ci ef4_rx_queue_index(rx_queue), index, 5608c2ecf20Sopenharmony_ci (index + n_frags - 1) & rx_queue->ptr_mask, len, 5618c2ecf20Sopenharmony_ci (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "", 5628c2ecf20Sopenharmony_ci (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : ""); 5638c2ecf20Sopenharmony_ci 5648c2ecf20Sopenharmony_ci /* Discard packet, if instructed to do so. Process the 5658c2ecf20Sopenharmony_ci * previous receive first. 5668c2ecf20Sopenharmony_ci */ 5678c2ecf20Sopenharmony_ci if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) { 5688c2ecf20Sopenharmony_ci ef4_rx_flush_packet(channel); 5698c2ecf20Sopenharmony_ci ef4_discard_rx_packet(channel, rx_buf, n_frags); 5708c2ecf20Sopenharmony_ci return; 5718c2ecf20Sopenharmony_ci } 5728c2ecf20Sopenharmony_ci 5738c2ecf20Sopenharmony_ci if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN)) 5748c2ecf20Sopenharmony_ci rx_buf->len = len; 5758c2ecf20Sopenharmony_ci 5768c2ecf20Sopenharmony_ci /* Release and/or sync the DMA mapping - assumes all RX buffers 5778c2ecf20Sopenharmony_ci * consumed in-order per RX queue. 5788c2ecf20Sopenharmony_ci */ 5798c2ecf20Sopenharmony_ci ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len); 5808c2ecf20Sopenharmony_ci 5818c2ecf20Sopenharmony_ci /* Prefetch nice and early so data will (hopefully) be in cache by 5828c2ecf20Sopenharmony_ci * the time we look at it. 5838c2ecf20Sopenharmony_ci */ 5848c2ecf20Sopenharmony_ci prefetch(ef4_rx_buf_va(rx_buf)); 5858c2ecf20Sopenharmony_ci 5868c2ecf20Sopenharmony_ci rx_buf->page_offset += efx->rx_prefix_size; 5878c2ecf20Sopenharmony_ci rx_buf->len -= efx->rx_prefix_size; 5888c2ecf20Sopenharmony_ci 5898c2ecf20Sopenharmony_ci if (n_frags > 1) { 5908c2ecf20Sopenharmony_ci /* Release/sync DMA mapping for additional fragments. 5918c2ecf20Sopenharmony_ci * Fix length for last fragment. 5928c2ecf20Sopenharmony_ci */ 5938c2ecf20Sopenharmony_ci unsigned int tail_frags = n_frags - 1; 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_ci for (;;) { 5968c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buf_next(rx_queue, rx_buf); 5978c2ecf20Sopenharmony_ci if (--tail_frags == 0) 5988c2ecf20Sopenharmony_ci break; 5998c2ecf20Sopenharmony_ci ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); 6008c2ecf20Sopenharmony_ci } 6018c2ecf20Sopenharmony_ci rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; 6028c2ecf20Sopenharmony_ci ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len); 6038c2ecf20Sopenharmony_ci } 6048c2ecf20Sopenharmony_ci 6058c2ecf20Sopenharmony_ci /* All fragments have been DMA-synced, so recycle pages. */ 6068c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buffer(rx_queue, index); 6078c2ecf20Sopenharmony_ci ef4_recycle_rx_pages(channel, rx_buf, n_frags); 6088c2ecf20Sopenharmony_ci 6098c2ecf20Sopenharmony_ci /* Pipeline receives so that we give time for packet headers to be 6108c2ecf20Sopenharmony_ci * prefetched into cache. 6118c2ecf20Sopenharmony_ci */ 6128c2ecf20Sopenharmony_ci ef4_rx_flush_packet(channel); 6138c2ecf20Sopenharmony_ci channel->rx_pkt_n_frags = n_frags; 6148c2ecf20Sopenharmony_ci channel->rx_pkt_index = index; 6158c2ecf20Sopenharmony_ci} 6168c2ecf20Sopenharmony_ci 6178c2ecf20Sopenharmony_cistatic void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh, 6188c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf, 6198c2ecf20Sopenharmony_ci unsigned int n_frags) 6208c2ecf20Sopenharmony_ci{ 6218c2ecf20Sopenharmony_ci struct sk_buff *skb; 6228c2ecf20Sopenharmony_ci u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS); 6238c2ecf20Sopenharmony_ci 6248c2ecf20Sopenharmony_ci skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); 6258c2ecf20Sopenharmony_ci if (unlikely(skb == NULL)) { 6268c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue; 6278c2ecf20Sopenharmony_ci 6288c2ecf20Sopenharmony_ci rx_queue = ef4_channel_get_rx_queue(channel); 6298c2ecf20Sopenharmony_ci ef4_free_rx_buffers(rx_queue, rx_buf, n_frags); 6308c2ecf20Sopenharmony_ci return; 6318c2ecf20Sopenharmony_ci } 6328c2ecf20Sopenharmony_ci skb_record_rx_queue(skb, channel->rx_queue.core_index); 6338c2ecf20Sopenharmony_ci 6348c2ecf20Sopenharmony_ci /* Set the SKB flags */ 6358c2ecf20Sopenharmony_ci skb_checksum_none_assert(skb); 6368c2ecf20Sopenharmony_ci if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED)) 6378c2ecf20Sopenharmony_ci skb->ip_summed = CHECKSUM_UNNECESSARY; 6388c2ecf20Sopenharmony_ci 6398c2ecf20Sopenharmony_ci if (channel->type->receive_skb) 6408c2ecf20Sopenharmony_ci if (channel->type->receive_skb(channel, skb)) 6418c2ecf20Sopenharmony_ci return; 6428c2ecf20Sopenharmony_ci 6438c2ecf20Sopenharmony_ci /* Pass the packet up */ 6448c2ecf20Sopenharmony_ci netif_receive_skb(skb); 6458c2ecf20Sopenharmony_ci} 6468c2ecf20Sopenharmony_ci 6478c2ecf20Sopenharmony_ci/* Handle a received packet. Second half: Touches packet payload. */ 6488c2ecf20Sopenharmony_civoid __ef4_rx_packet(struct ef4_channel *channel) 6498c2ecf20Sopenharmony_ci{ 6508c2ecf20Sopenharmony_ci struct ef4_nic *efx = channel->efx; 6518c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf = 6528c2ecf20Sopenharmony_ci ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); 6538c2ecf20Sopenharmony_ci u8 *eh = ef4_rx_buf_va(rx_buf); 6548c2ecf20Sopenharmony_ci 6558c2ecf20Sopenharmony_ci /* Read length from the prefix if necessary. This already 6568c2ecf20Sopenharmony_ci * excludes the length of the prefix itself. 6578c2ecf20Sopenharmony_ci */ 6588c2ecf20Sopenharmony_ci if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN) 6598c2ecf20Sopenharmony_ci rx_buf->len = le16_to_cpup((__le16 *) 6608c2ecf20Sopenharmony_ci (eh + efx->rx_packet_len_offset)); 6618c2ecf20Sopenharmony_ci 6628c2ecf20Sopenharmony_ci /* If we're in loopback test, then pass the packet directly to the 6638c2ecf20Sopenharmony_ci * loopback layer, and free the rx_buf here 6648c2ecf20Sopenharmony_ci */ 6658c2ecf20Sopenharmony_ci if (unlikely(efx->loopback_selftest)) { 6668c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue; 6678c2ecf20Sopenharmony_ci 6688c2ecf20Sopenharmony_ci ef4_loopback_rx_packet(efx, eh, rx_buf->len); 6698c2ecf20Sopenharmony_ci rx_queue = ef4_channel_get_rx_queue(channel); 6708c2ecf20Sopenharmony_ci ef4_free_rx_buffers(rx_queue, rx_buf, 6718c2ecf20Sopenharmony_ci channel->rx_pkt_n_frags); 6728c2ecf20Sopenharmony_ci goto out; 6738c2ecf20Sopenharmony_ci } 6748c2ecf20Sopenharmony_ci 6758c2ecf20Sopenharmony_ci if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 6768c2ecf20Sopenharmony_ci rx_buf->flags &= ~EF4_RX_PKT_CSUMMED; 6778c2ecf20Sopenharmony_ci 6788c2ecf20Sopenharmony_ci if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb) 6798c2ecf20Sopenharmony_ci ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); 6808c2ecf20Sopenharmony_ci else 6818c2ecf20Sopenharmony_ci ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); 6828c2ecf20Sopenharmony_ciout: 6838c2ecf20Sopenharmony_ci channel->rx_pkt_n_frags = 0; 6848c2ecf20Sopenharmony_ci} 6858c2ecf20Sopenharmony_ci 6868c2ecf20Sopenharmony_ciint ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue) 6878c2ecf20Sopenharmony_ci{ 6888c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 6898c2ecf20Sopenharmony_ci unsigned int entries; 6908c2ecf20Sopenharmony_ci int rc; 6918c2ecf20Sopenharmony_ci 6928c2ecf20Sopenharmony_ci /* Create the smallest power-of-two aligned ring */ 6938c2ecf20Sopenharmony_ci entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE); 6948c2ecf20Sopenharmony_ci EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE); 6958c2ecf20Sopenharmony_ci rx_queue->ptr_mask = entries - 1; 6968c2ecf20Sopenharmony_ci 6978c2ecf20Sopenharmony_ci netif_dbg(efx, probe, efx->net_dev, 6988c2ecf20Sopenharmony_ci "creating RX queue %d size %#x mask %#x\n", 6998c2ecf20Sopenharmony_ci ef4_rx_queue_index(rx_queue), efx->rxq_entries, 7008c2ecf20Sopenharmony_ci rx_queue->ptr_mask); 7018c2ecf20Sopenharmony_ci 7028c2ecf20Sopenharmony_ci /* Allocate RX buffers */ 7038c2ecf20Sopenharmony_ci rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), 7048c2ecf20Sopenharmony_ci GFP_KERNEL); 7058c2ecf20Sopenharmony_ci if (!rx_queue->buffer) 7068c2ecf20Sopenharmony_ci return -ENOMEM; 7078c2ecf20Sopenharmony_ci 7088c2ecf20Sopenharmony_ci rc = ef4_nic_probe_rx(rx_queue); 7098c2ecf20Sopenharmony_ci if (rc) { 7108c2ecf20Sopenharmony_ci kfree(rx_queue->buffer); 7118c2ecf20Sopenharmony_ci rx_queue->buffer = NULL; 7128c2ecf20Sopenharmony_ci } 7138c2ecf20Sopenharmony_ci 7148c2ecf20Sopenharmony_ci return rc; 7158c2ecf20Sopenharmony_ci} 7168c2ecf20Sopenharmony_ci 7178c2ecf20Sopenharmony_cistatic void ef4_init_rx_recycle_ring(struct ef4_nic *efx, 7188c2ecf20Sopenharmony_ci struct ef4_rx_queue *rx_queue) 7198c2ecf20Sopenharmony_ci{ 7208c2ecf20Sopenharmony_ci unsigned int bufs_in_recycle_ring, page_ring_size; 7218c2ecf20Sopenharmony_ci 7228c2ecf20Sopenharmony_ci /* Set the RX recycle ring size */ 7238c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 7248c2ecf20Sopenharmony_ci bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; 7258c2ecf20Sopenharmony_ci#else 7268c2ecf20Sopenharmony_ci if (iommu_present(&pci_bus_type)) 7278c2ecf20Sopenharmony_ci bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; 7288c2ecf20Sopenharmony_ci else 7298c2ecf20Sopenharmony_ci bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU; 7308c2ecf20Sopenharmony_ci#endif /* CONFIG_PPC64 */ 7318c2ecf20Sopenharmony_ci 7328c2ecf20Sopenharmony_ci page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / 7338c2ecf20Sopenharmony_ci efx->rx_bufs_per_page); 7348c2ecf20Sopenharmony_ci rx_queue->page_ring = kcalloc(page_ring_size, 7358c2ecf20Sopenharmony_ci sizeof(*rx_queue->page_ring), GFP_KERNEL); 7368c2ecf20Sopenharmony_ci if (!rx_queue->page_ring) 7378c2ecf20Sopenharmony_ci rx_queue->page_ptr_mask = 0; 7388c2ecf20Sopenharmony_ci else 7398c2ecf20Sopenharmony_ci rx_queue->page_ptr_mask = page_ring_size - 1; 7408c2ecf20Sopenharmony_ci} 7418c2ecf20Sopenharmony_ci 7428c2ecf20Sopenharmony_civoid ef4_init_rx_queue(struct ef4_rx_queue *rx_queue) 7438c2ecf20Sopenharmony_ci{ 7448c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 7458c2ecf20Sopenharmony_ci unsigned int max_fill, trigger, max_trigger; 7468c2ecf20Sopenharmony_ci 7478c2ecf20Sopenharmony_ci netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 7488c2ecf20Sopenharmony_ci "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue)); 7498c2ecf20Sopenharmony_ci 7508c2ecf20Sopenharmony_ci /* Initialise ptr fields */ 7518c2ecf20Sopenharmony_ci rx_queue->added_count = 0; 7528c2ecf20Sopenharmony_ci rx_queue->notified_count = 0; 7538c2ecf20Sopenharmony_ci rx_queue->removed_count = 0; 7548c2ecf20Sopenharmony_ci rx_queue->min_fill = -1U; 7558c2ecf20Sopenharmony_ci ef4_init_rx_recycle_ring(efx, rx_queue); 7568c2ecf20Sopenharmony_ci 7578c2ecf20Sopenharmony_ci rx_queue->page_remove = 0; 7588c2ecf20Sopenharmony_ci rx_queue->page_add = rx_queue->page_ptr_mask + 1; 7598c2ecf20Sopenharmony_ci rx_queue->page_recycle_count = 0; 7608c2ecf20Sopenharmony_ci rx_queue->page_recycle_failed = 0; 7618c2ecf20Sopenharmony_ci rx_queue->page_recycle_full = 0; 7628c2ecf20Sopenharmony_ci 7638c2ecf20Sopenharmony_ci /* Initialise limit fields */ 7648c2ecf20Sopenharmony_ci max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM; 7658c2ecf20Sopenharmony_ci max_trigger = 7668c2ecf20Sopenharmony_ci max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; 7678c2ecf20Sopenharmony_ci if (rx_refill_threshold != 0) { 7688c2ecf20Sopenharmony_ci trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 7698c2ecf20Sopenharmony_ci if (trigger > max_trigger) 7708c2ecf20Sopenharmony_ci trigger = max_trigger; 7718c2ecf20Sopenharmony_ci } else { 7728c2ecf20Sopenharmony_ci trigger = max_trigger; 7738c2ecf20Sopenharmony_ci } 7748c2ecf20Sopenharmony_ci 7758c2ecf20Sopenharmony_ci rx_queue->max_fill = max_fill; 7768c2ecf20Sopenharmony_ci rx_queue->fast_fill_trigger = trigger; 7778c2ecf20Sopenharmony_ci rx_queue->refill_enabled = true; 7788c2ecf20Sopenharmony_ci 7798c2ecf20Sopenharmony_ci /* Set up RX descriptor ring */ 7808c2ecf20Sopenharmony_ci ef4_nic_init_rx(rx_queue); 7818c2ecf20Sopenharmony_ci} 7828c2ecf20Sopenharmony_ci 7838c2ecf20Sopenharmony_civoid ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue) 7848c2ecf20Sopenharmony_ci{ 7858c2ecf20Sopenharmony_ci int i; 7868c2ecf20Sopenharmony_ci struct ef4_nic *efx = rx_queue->efx; 7878c2ecf20Sopenharmony_ci struct ef4_rx_buffer *rx_buf; 7888c2ecf20Sopenharmony_ci 7898c2ecf20Sopenharmony_ci netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 7908c2ecf20Sopenharmony_ci "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue)); 7918c2ecf20Sopenharmony_ci 7928c2ecf20Sopenharmony_ci del_timer_sync(&rx_queue->slow_fill); 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_ci /* Release RX buffers from the current read ptr to the write ptr */ 7958c2ecf20Sopenharmony_ci if (rx_queue->buffer) { 7968c2ecf20Sopenharmony_ci for (i = rx_queue->removed_count; i < rx_queue->added_count; 7978c2ecf20Sopenharmony_ci i++) { 7988c2ecf20Sopenharmony_ci unsigned index = i & rx_queue->ptr_mask; 7998c2ecf20Sopenharmony_ci rx_buf = ef4_rx_buffer(rx_queue, index); 8008c2ecf20Sopenharmony_ci ef4_fini_rx_buffer(rx_queue, rx_buf); 8018c2ecf20Sopenharmony_ci } 8028c2ecf20Sopenharmony_ci } 8038c2ecf20Sopenharmony_ci 8048c2ecf20Sopenharmony_ci /* Unmap and release the pages in the recycle ring. Remove the ring. */ 8058c2ecf20Sopenharmony_ci for (i = 0; i <= rx_queue->page_ptr_mask; i++) { 8068c2ecf20Sopenharmony_ci struct page *page = rx_queue->page_ring[i]; 8078c2ecf20Sopenharmony_ci struct ef4_rx_page_state *state; 8088c2ecf20Sopenharmony_ci 8098c2ecf20Sopenharmony_ci if (page == NULL) 8108c2ecf20Sopenharmony_ci continue; 8118c2ecf20Sopenharmony_ci 8128c2ecf20Sopenharmony_ci state = page_address(page); 8138c2ecf20Sopenharmony_ci dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, 8148c2ecf20Sopenharmony_ci PAGE_SIZE << efx->rx_buffer_order, 8158c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 8168c2ecf20Sopenharmony_ci put_page(page); 8178c2ecf20Sopenharmony_ci } 8188c2ecf20Sopenharmony_ci kfree(rx_queue->page_ring); 8198c2ecf20Sopenharmony_ci rx_queue->page_ring = NULL; 8208c2ecf20Sopenharmony_ci} 8218c2ecf20Sopenharmony_ci 8228c2ecf20Sopenharmony_civoid ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue) 8238c2ecf20Sopenharmony_ci{ 8248c2ecf20Sopenharmony_ci netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 8258c2ecf20Sopenharmony_ci "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue)); 8268c2ecf20Sopenharmony_ci 8278c2ecf20Sopenharmony_ci ef4_nic_remove_rx(rx_queue); 8288c2ecf20Sopenharmony_ci 8298c2ecf20Sopenharmony_ci kfree(rx_queue->buffer); 8308c2ecf20Sopenharmony_ci rx_queue->buffer = NULL; 8318c2ecf20Sopenharmony_ci} 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_ci 8348c2ecf20Sopenharmony_cimodule_param(rx_refill_threshold, uint, 0444); 8358c2ecf20Sopenharmony_ciMODULE_PARM_DESC(rx_refill_threshold, 8368c2ecf20Sopenharmony_ci "RX descriptor ring refill threshold (%)"); 8378c2ecf20Sopenharmony_ci 8388c2ecf20Sopenharmony_ci#ifdef CONFIG_RFS_ACCEL 8398c2ecf20Sopenharmony_ci 8408c2ecf20Sopenharmony_ciint ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 8418c2ecf20Sopenharmony_ci u16 rxq_index, u32 flow_id) 8428c2ecf20Sopenharmony_ci{ 8438c2ecf20Sopenharmony_ci struct ef4_nic *efx = netdev_priv(net_dev); 8448c2ecf20Sopenharmony_ci struct ef4_channel *channel; 8458c2ecf20Sopenharmony_ci struct ef4_filter_spec spec; 8468c2ecf20Sopenharmony_ci struct flow_keys fk; 8478c2ecf20Sopenharmony_ci int rc; 8488c2ecf20Sopenharmony_ci 8498c2ecf20Sopenharmony_ci if (flow_id == RPS_FLOW_ID_INVALID) 8508c2ecf20Sopenharmony_ci return -EINVAL; 8518c2ecf20Sopenharmony_ci 8528c2ecf20Sopenharmony_ci if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 8538c2ecf20Sopenharmony_ci return -EPROTONOSUPPORT; 8548c2ecf20Sopenharmony_ci 8558c2ecf20Sopenharmony_ci if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) 8568c2ecf20Sopenharmony_ci return -EPROTONOSUPPORT; 8578c2ecf20Sopenharmony_ci if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) 8588c2ecf20Sopenharmony_ci return -EPROTONOSUPPORT; 8598c2ecf20Sopenharmony_ci 8608c2ecf20Sopenharmony_ci ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT, 8618c2ecf20Sopenharmony_ci efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0, 8628c2ecf20Sopenharmony_ci rxq_index); 8638c2ecf20Sopenharmony_ci spec.match_flags = 8648c2ecf20Sopenharmony_ci EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO | 8658c2ecf20Sopenharmony_ci EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT | 8668c2ecf20Sopenharmony_ci EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT; 8678c2ecf20Sopenharmony_ci spec.ether_type = fk.basic.n_proto; 8688c2ecf20Sopenharmony_ci spec.ip_proto = fk.basic.ip_proto; 8698c2ecf20Sopenharmony_ci 8708c2ecf20Sopenharmony_ci if (fk.basic.n_proto == htons(ETH_P_IP)) { 8718c2ecf20Sopenharmony_ci spec.rem_host[0] = fk.addrs.v4addrs.src; 8728c2ecf20Sopenharmony_ci spec.loc_host[0] = fk.addrs.v4addrs.dst; 8738c2ecf20Sopenharmony_ci } else { 8748c2ecf20Sopenharmony_ci memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); 8758c2ecf20Sopenharmony_ci memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); 8768c2ecf20Sopenharmony_ci } 8778c2ecf20Sopenharmony_ci 8788c2ecf20Sopenharmony_ci spec.rem_port = fk.ports.src; 8798c2ecf20Sopenharmony_ci spec.loc_port = fk.ports.dst; 8808c2ecf20Sopenharmony_ci 8818c2ecf20Sopenharmony_ci rc = efx->type->filter_rfs_insert(efx, &spec); 8828c2ecf20Sopenharmony_ci if (rc < 0) 8838c2ecf20Sopenharmony_ci return rc; 8848c2ecf20Sopenharmony_ci 8858c2ecf20Sopenharmony_ci /* Remember this so we can check whether to expire the filter later */ 8868c2ecf20Sopenharmony_ci channel = ef4_get_channel(efx, rxq_index); 8878c2ecf20Sopenharmony_ci channel->rps_flow_id[rc] = flow_id; 8888c2ecf20Sopenharmony_ci ++channel->rfs_filters_added; 8898c2ecf20Sopenharmony_ci 8908c2ecf20Sopenharmony_ci if (spec.ether_type == htons(ETH_P_IP)) 8918c2ecf20Sopenharmony_ci netif_info(efx, rx_status, efx->net_dev, 8928c2ecf20Sopenharmony_ci "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", 8938c2ecf20Sopenharmony_ci (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 8948c2ecf20Sopenharmony_ci spec.rem_host, ntohs(spec.rem_port), spec.loc_host, 8958c2ecf20Sopenharmony_ci ntohs(spec.loc_port), rxq_index, flow_id, rc); 8968c2ecf20Sopenharmony_ci else 8978c2ecf20Sopenharmony_ci netif_info(efx, rx_status, efx->net_dev, 8988c2ecf20Sopenharmony_ci "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", 8998c2ecf20Sopenharmony_ci (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", 9008c2ecf20Sopenharmony_ci spec.rem_host, ntohs(spec.rem_port), spec.loc_host, 9018c2ecf20Sopenharmony_ci ntohs(spec.loc_port), rxq_index, flow_id, rc); 9028c2ecf20Sopenharmony_ci 9038c2ecf20Sopenharmony_ci return rc; 9048c2ecf20Sopenharmony_ci} 9058c2ecf20Sopenharmony_ci 9068c2ecf20Sopenharmony_cibool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota) 9078c2ecf20Sopenharmony_ci{ 9088c2ecf20Sopenharmony_ci bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index); 9098c2ecf20Sopenharmony_ci unsigned int channel_idx, index, size; 9108c2ecf20Sopenharmony_ci u32 flow_id; 9118c2ecf20Sopenharmony_ci 9128c2ecf20Sopenharmony_ci if (!spin_trylock_bh(&efx->filter_lock)) 9138c2ecf20Sopenharmony_ci return false; 9148c2ecf20Sopenharmony_ci 9158c2ecf20Sopenharmony_ci expire_one = efx->type->filter_rfs_expire_one; 9168c2ecf20Sopenharmony_ci channel_idx = efx->rps_expire_channel; 9178c2ecf20Sopenharmony_ci index = efx->rps_expire_index; 9188c2ecf20Sopenharmony_ci size = efx->type->max_rx_ip_filters; 9198c2ecf20Sopenharmony_ci while (quota--) { 9208c2ecf20Sopenharmony_ci struct ef4_channel *channel = ef4_get_channel(efx, channel_idx); 9218c2ecf20Sopenharmony_ci flow_id = channel->rps_flow_id[index]; 9228c2ecf20Sopenharmony_ci 9238c2ecf20Sopenharmony_ci if (flow_id != RPS_FLOW_ID_INVALID && 9248c2ecf20Sopenharmony_ci expire_one(efx, flow_id, index)) { 9258c2ecf20Sopenharmony_ci netif_info(efx, rx_status, efx->net_dev, 9268c2ecf20Sopenharmony_ci "expired filter %d [queue %u flow %u]\n", 9278c2ecf20Sopenharmony_ci index, channel_idx, flow_id); 9288c2ecf20Sopenharmony_ci channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; 9298c2ecf20Sopenharmony_ci } 9308c2ecf20Sopenharmony_ci if (++index == size) { 9318c2ecf20Sopenharmony_ci if (++channel_idx == efx->n_channels) 9328c2ecf20Sopenharmony_ci channel_idx = 0; 9338c2ecf20Sopenharmony_ci index = 0; 9348c2ecf20Sopenharmony_ci } 9358c2ecf20Sopenharmony_ci } 9368c2ecf20Sopenharmony_ci efx->rps_expire_channel = channel_idx; 9378c2ecf20Sopenharmony_ci efx->rps_expire_index = index; 9388c2ecf20Sopenharmony_ci 9398c2ecf20Sopenharmony_ci spin_unlock_bh(&efx->filter_lock); 9408c2ecf20Sopenharmony_ci return true; 9418c2ecf20Sopenharmony_ci} 9428c2ecf20Sopenharmony_ci 9438c2ecf20Sopenharmony_ci#endif /* CONFIG_RFS_ACCEL */ 9448c2ecf20Sopenharmony_ci 9458c2ecf20Sopenharmony_ci/** 9468c2ecf20Sopenharmony_ci * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient 9478c2ecf20Sopenharmony_ci * @spec: Specification to test 9488c2ecf20Sopenharmony_ci * 9498c2ecf20Sopenharmony_ci * Return: %true if the specification is a non-drop RX filter that 9508c2ecf20Sopenharmony_ci * matches a local MAC address I/G bit value of 1 or matches a local 9518c2ecf20Sopenharmony_ci * IPv4 or IPv6 address value in the respective multicast address 9528c2ecf20Sopenharmony_ci * range. Otherwise %false. 9538c2ecf20Sopenharmony_ci */ 9548c2ecf20Sopenharmony_cibool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec) 9558c2ecf20Sopenharmony_ci{ 9568c2ecf20Sopenharmony_ci if (!(spec->flags & EF4_FILTER_FLAG_RX) || 9578c2ecf20Sopenharmony_ci spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP) 9588c2ecf20Sopenharmony_ci return false; 9598c2ecf20Sopenharmony_ci 9608c2ecf20Sopenharmony_ci if (spec->match_flags & 9618c2ecf20Sopenharmony_ci (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) && 9628c2ecf20Sopenharmony_ci is_multicast_ether_addr(spec->loc_mac)) 9638c2ecf20Sopenharmony_ci return true; 9648c2ecf20Sopenharmony_ci 9658c2ecf20Sopenharmony_ci if ((spec->match_flags & 9668c2ecf20Sopenharmony_ci (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) == 9678c2ecf20Sopenharmony_ci (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) { 9688c2ecf20Sopenharmony_ci if (spec->ether_type == htons(ETH_P_IP) && 9698c2ecf20Sopenharmony_ci ipv4_is_multicast(spec->loc_host[0])) 9708c2ecf20Sopenharmony_ci return true; 9718c2ecf20Sopenharmony_ci if (spec->ether_type == htons(ETH_P_IPV6) && 9728c2ecf20Sopenharmony_ci ((const u8 *)spec->loc_host)[0] == 0xff) 9738c2ecf20Sopenharmony_ci return true; 9748c2ecf20Sopenharmony_ci } 9758c2ecf20Sopenharmony_ci 9768c2ecf20Sopenharmony_ci return false; 9778c2ecf20Sopenharmony_ci} 978