162306a36Sopenharmony_ci// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 262306a36Sopenharmony_ci 362306a36Sopenharmony_ci#include <linux/bpf_trace.h> 462306a36Sopenharmony_ci#include <linux/dma-mapping.h> 562306a36Sopenharmony_ci#include <linux/etherdevice.h> 662306a36Sopenharmony_ci#include <linux/filter.h> 762306a36Sopenharmony_ci#include <linux/irq.h> 862306a36Sopenharmony_ci#include <linux/pci.h> 962306a36Sopenharmony_ci#include <linux/skbuff.h> 1062306a36Sopenharmony_ci#include "funeth_txrx.h" 1162306a36Sopenharmony_ci#include "funeth.h" 1262306a36Sopenharmony_ci#include "fun_queue.h" 1362306a36Sopenharmony_ci 1462306a36Sopenharmony_ci#define CREATE_TRACE_POINTS 1562306a36Sopenharmony_ci#include "funeth_trace.h" 1662306a36Sopenharmony_ci 1762306a36Sopenharmony_ci/* Given the device's max supported MTU and pages of at least 4KB a packet can 1862306a36Sopenharmony_ci * be scattered into at most 4 buffers. 1962306a36Sopenharmony_ci */ 2062306a36Sopenharmony_ci#define RX_MAX_FRAGS 4 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_ci/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */ 2362306a36Sopenharmony_ci#define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) 2462306a36Sopenharmony_ci 2562306a36Sopenharmony_ci/* We try to reuse pages for our buffers. To avoid frequent page ref writes we 2662306a36Sopenharmony_ci * take EXTRA_PAGE_REFS references at once and then hand them out one per packet 2762306a36Sopenharmony_ci * occupying the buffer. 2862306a36Sopenharmony_ci */ 2962306a36Sopenharmony_ci#define EXTRA_PAGE_REFS 1000000 3062306a36Sopenharmony_ci#define MIN_PAGE_REFS 1000 3162306a36Sopenharmony_ci 3262306a36Sopenharmony_cienum { 3362306a36Sopenharmony_ci FUN_XDP_FLUSH_REDIR = 1, 3462306a36Sopenharmony_ci FUN_XDP_FLUSH_TX = 2, 3562306a36Sopenharmony_ci}; 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_ci/* See if a page is running low on refs we are holding and if so take more. */ 3862306a36Sopenharmony_cistatic void refresh_refs(struct funeth_rxbuf *buf) 3962306a36Sopenharmony_ci{ 4062306a36Sopenharmony_ci if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) { 4162306a36Sopenharmony_ci buf->pg_refs += EXTRA_PAGE_REFS; 4262306a36Sopenharmony_ci page_ref_add(buf->page, EXTRA_PAGE_REFS); 4362306a36Sopenharmony_ci } 4462306a36Sopenharmony_ci} 4562306a36Sopenharmony_ci 4662306a36Sopenharmony_ci/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its 4762306a36Sopenharmony_ci * page is worth retaining and there's room for it. Otherwise the page is 4862306a36Sopenharmony_ci * unmapped and our references released. 4962306a36Sopenharmony_ci */ 5062306a36Sopenharmony_cistatic void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) 5162306a36Sopenharmony_ci{ 5262306a36Sopenharmony_ci struct funeth_rx_cache *c = &q->cache; 5362306a36Sopenharmony_ci 5462306a36Sopenharmony_ci if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) { 5562306a36Sopenharmony_ci c->bufs[c->prod_cnt & c->mask] = *buf; 5662306a36Sopenharmony_ci c->prod_cnt++; 5762306a36Sopenharmony_ci } else { 5862306a36Sopenharmony_ci dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, 5962306a36Sopenharmony_ci DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 6062306a36Sopenharmony_ci __page_frag_cache_drain(buf->page, buf->pg_refs); 6162306a36Sopenharmony_ci } 6262306a36Sopenharmony_ci} 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci/* Get a page from the Rx buffer cache. We only consider the next available 6562306a36Sopenharmony_ci * page and return it if we own all its references. 6662306a36Sopenharmony_ci */ 6762306a36Sopenharmony_cistatic bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) 6862306a36Sopenharmony_ci{ 6962306a36Sopenharmony_ci struct funeth_rx_cache *c = &q->cache; 7062306a36Sopenharmony_ci struct funeth_rxbuf *buf; 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci if (c->prod_cnt == c->cons_cnt) 7362306a36Sopenharmony_ci return false; /* empty cache */ 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_ci buf = &c->bufs[c->cons_cnt & c->mask]; 7662306a36Sopenharmony_ci if (page_ref_count(buf->page) == buf->pg_refs) { 7762306a36Sopenharmony_ci dma_sync_single_for_device(q->dma_dev, buf->dma_addr, 7862306a36Sopenharmony_ci PAGE_SIZE, DMA_FROM_DEVICE); 7962306a36Sopenharmony_ci *rb = *buf; 8062306a36Sopenharmony_ci buf->page = NULL; 8162306a36Sopenharmony_ci refresh_refs(rb); 8262306a36Sopenharmony_ci c->cons_cnt++; 8362306a36Sopenharmony_ci return true; 8462306a36Sopenharmony_ci } 8562306a36Sopenharmony_ci 8662306a36Sopenharmony_ci /* Page can't be reused. If the cache is full drop this page. */ 8762306a36Sopenharmony_ci if (c->prod_cnt - c->cons_cnt > c->mask) { 8862306a36Sopenharmony_ci dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, 8962306a36Sopenharmony_ci DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 9062306a36Sopenharmony_ci __page_frag_cache_drain(buf->page, buf->pg_refs); 9162306a36Sopenharmony_ci buf->page = NULL; 9262306a36Sopenharmony_ci c->cons_cnt++; 9362306a36Sopenharmony_ci } 9462306a36Sopenharmony_ci return false; 9562306a36Sopenharmony_ci} 9662306a36Sopenharmony_ci 9762306a36Sopenharmony_ci/* Allocate and DMA-map a page for receive. */ 9862306a36Sopenharmony_cistatic int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, 9962306a36Sopenharmony_ci int node, gfp_t gfp) 10062306a36Sopenharmony_ci{ 10162306a36Sopenharmony_ci struct page *p; 10262306a36Sopenharmony_ci 10362306a36Sopenharmony_ci if (cache_get(q, rb)) 10462306a36Sopenharmony_ci return 0; 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0); 10762306a36Sopenharmony_ci if (unlikely(!p)) 10862306a36Sopenharmony_ci return -ENOMEM; 10962306a36Sopenharmony_ci 11062306a36Sopenharmony_ci rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, 11162306a36Sopenharmony_ci DMA_FROM_DEVICE); 11262306a36Sopenharmony_ci if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) { 11362306a36Sopenharmony_ci FUN_QSTAT_INC(q, rx_map_err); 11462306a36Sopenharmony_ci __free_page(p); 11562306a36Sopenharmony_ci return -ENOMEM; 11662306a36Sopenharmony_ci } 11762306a36Sopenharmony_ci 11862306a36Sopenharmony_ci FUN_QSTAT_INC(q, rx_page_alloc); 11962306a36Sopenharmony_ci 12062306a36Sopenharmony_ci rb->page = p; 12162306a36Sopenharmony_ci rb->pg_refs = 1; 12262306a36Sopenharmony_ci refresh_refs(rb); 12362306a36Sopenharmony_ci rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p); 12462306a36Sopenharmony_ci return 0; 12562306a36Sopenharmony_ci} 12662306a36Sopenharmony_ci 12762306a36Sopenharmony_cistatic void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb) 12862306a36Sopenharmony_ci{ 12962306a36Sopenharmony_ci if (rb->page) { 13062306a36Sopenharmony_ci dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE, 13162306a36Sopenharmony_ci DMA_FROM_DEVICE); 13262306a36Sopenharmony_ci __page_frag_cache_drain(rb->page, rb->pg_refs); 13362306a36Sopenharmony_ci rb->page = NULL; 13462306a36Sopenharmony_ci } 13562306a36Sopenharmony_ci} 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_ci/* Run the XDP program assigned to an Rx queue. 13862306a36Sopenharmony_ci * Return %NULL if the buffer is consumed, or the virtual address of the packet 13962306a36Sopenharmony_ci * to turn into an skb. 14062306a36Sopenharmony_ci */ 14162306a36Sopenharmony_cistatic void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va, 14262306a36Sopenharmony_ci int ref_ok, struct funeth_txq *xdp_q) 14362306a36Sopenharmony_ci{ 14462306a36Sopenharmony_ci struct bpf_prog *xdp_prog; 14562306a36Sopenharmony_ci struct xdp_frame *xdpf; 14662306a36Sopenharmony_ci struct xdp_buff xdp; 14762306a36Sopenharmony_ci u32 act; 14862306a36Sopenharmony_ci 14962306a36Sopenharmony_ci /* VA includes the headroom, frag size includes headroom + tailroom */ 15062306a36Sopenharmony_ci xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN), 15162306a36Sopenharmony_ci &q->xdp_rxq); 15262306a36Sopenharmony_ci xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) - 15362306a36Sopenharmony_ci (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false); 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_ci xdp_prog = READ_ONCE(q->xdp_prog); 15662306a36Sopenharmony_ci act = bpf_prog_run_xdp(xdp_prog, &xdp); 15762306a36Sopenharmony_ci 15862306a36Sopenharmony_ci switch (act) { 15962306a36Sopenharmony_ci case XDP_PASS: 16062306a36Sopenharmony_ci /* remove headroom, which may not be FUN_XDP_HEADROOM now */ 16162306a36Sopenharmony_ci skb_frag_size_set(frags, xdp.data_end - xdp.data); 16262306a36Sopenharmony_ci skb_frag_off_add(frags, xdp.data - xdp.data_hard_start); 16362306a36Sopenharmony_ci goto pass; 16462306a36Sopenharmony_ci case XDP_TX: 16562306a36Sopenharmony_ci if (unlikely(!ref_ok)) 16662306a36Sopenharmony_ci goto pass; 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci xdpf = xdp_convert_buff_to_frame(&xdp); 16962306a36Sopenharmony_ci if (!xdpf || !fun_xdp_tx(xdp_q, xdpf)) 17062306a36Sopenharmony_ci goto xdp_error; 17162306a36Sopenharmony_ci FUN_QSTAT_INC(q, xdp_tx); 17262306a36Sopenharmony_ci q->xdp_flush |= FUN_XDP_FLUSH_TX; 17362306a36Sopenharmony_ci break; 17462306a36Sopenharmony_ci case XDP_REDIRECT: 17562306a36Sopenharmony_ci if (unlikely(!ref_ok)) 17662306a36Sopenharmony_ci goto pass; 17762306a36Sopenharmony_ci if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog))) 17862306a36Sopenharmony_ci goto xdp_error; 17962306a36Sopenharmony_ci FUN_QSTAT_INC(q, xdp_redir); 18062306a36Sopenharmony_ci q->xdp_flush |= FUN_XDP_FLUSH_REDIR; 18162306a36Sopenharmony_ci break; 18262306a36Sopenharmony_ci default: 18362306a36Sopenharmony_ci bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act); 18462306a36Sopenharmony_ci fallthrough; 18562306a36Sopenharmony_ci case XDP_ABORTED: 18662306a36Sopenharmony_ci trace_xdp_exception(q->netdev, xdp_prog, act); 18762306a36Sopenharmony_cixdp_error: 18862306a36Sopenharmony_ci q->cur_buf->pg_refs++; /* return frags' page reference */ 18962306a36Sopenharmony_ci FUN_QSTAT_INC(q, xdp_err); 19062306a36Sopenharmony_ci break; 19162306a36Sopenharmony_ci case XDP_DROP: 19262306a36Sopenharmony_ci q->cur_buf->pg_refs++; 19362306a36Sopenharmony_ci FUN_QSTAT_INC(q, xdp_drops); 19462306a36Sopenharmony_ci break; 19562306a36Sopenharmony_ci } 19662306a36Sopenharmony_ci return NULL; 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_cipass: 19962306a36Sopenharmony_ci return xdp.data; 20062306a36Sopenharmony_ci} 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_ci/* A CQE contains a fixed completion structure along with optional metadata and 20362306a36Sopenharmony_ci * even packet data. Given the start address of a CQE return the start of the 20462306a36Sopenharmony_ci * contained fixed structure, which lies at the end. 20562306a36Sopenharmony_ci */ 20662306a36Sopenharmony_cistatic const void *cqe_to_info(const void *cqe) 20762306a36Sopenharmony_ci{ 20862306a36Sopenharmony_ci return cqe + FUNETH_CQE_INFO_OFFSET; 20962306a36Sopenharmony_ci} 21062306a36Sopenharmony_ci 21162306a36Sopenharmony_ci/* The inverse of cqe_to_info(). */ 21262306a36Sopenharmony_cistatic const void *info_to_cqe(const void *cqe_info) 21362306a36Sopenharmony_ci{ 21462306a36Sopenharmony_ci return cqe_info - FUNETH_CQE_INFO_OFFSET; 21562306a36Sopenharmony_ci} 21662306a36Sopenharmony_ci 21762306a36Sopenharmony_ci/* Return the type of hash provided by the device based on the L3 and L4 21862306a36Sopenharmony_ci * protocols it parsed for the packet. 21962306a36Sopenharmony_ci */ 22062306a36Sopenharmony_cistatic enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse) 22162306a36Sopenharmony_ci{ 22262306a36Sopenharmony_ci static const enum pkt_hash_types htype_map[] = { 22362306a36Sopenharmony_ci PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3, 22462306a36Sopenharmony_ci PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4, 22562306a36Sopenharmony_ci PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3, 22662306a36Sopenharmony_ci PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3 22762306a36Sopenharmony_ci }; 22862306a36Sopenharmony_ci u16 key; 22962306a36Sopenharmony_ci 23062306a36Sopenharmony_ci /* Build the key from the TCP/UDP and IP/IPv6 bits */ 23162306a36Sopenharmony_ci key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) | 23262306a36Sopenharmony_ci ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1); 23362306a36Sopenharmony_ci 23462306a36Sopenharmony_ci return htype_map[key]; 23562306a36Sopenharmony_ci} 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_ci/* Each received packet can be scattered across several Rx buffers or can 23862306a36Sopenharmony_ci * share a buffer with previously received packets depending on the buffer 23962306a36Sopenharmony_ci * and packet sizes and the room available in the most recently used buffer. 24062306a36Sopenharmony_ci * 24162306a36Sopenharmony_ci * The rules are: 24262306a36Sopenharmony_ci * - If the buffer at the head of an RQ has not been used it gets (part of) the 24362306a36Sopenharmony_ci * next incoming packet. 24462306a36Sopenharmony_ci * - Otherwise, if the packet fully fits in the buffer's remaining space the 24562306a36Sopenharmony_ci * packet is written there. 24662306a36Sopenharmony_ci * - Otherwise, the packet goes into the next Rx buffer. 24762306a36Sopenharmony_ci * 24862306a36Sopenharmony_ci * This function returns the Rx buffer for a packet or fragment thereof of the 24962306a36Sopenharmony_ci * given length. If it isn't @buf it either recycles or frees that buffer 25062306a36Sopenharmony_ci * before advancing the queue to the next buffer. 25162306a36Sopenharmony_ci * 25262306a36Sopenharmony_ci * If called repeatedly with the remaining length of a packet it will walk 25362306a36Sopenharmony_ci * through all the buffers containing the packet. 25462306a36Sopenharmony_ci */ 25562306a36Sopenharmony_cistatic struct funeth_rxbuf * 25662306a36Sopenharmony_ciget_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len) 25762306a36Sopenharmony_ci{ 25862306a36Sopenharmony_ci if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset) 25962306a36Sopenharmony_ci return buf; /* @buf holds (part of) the packet */ 26062306a36Sopenharmony_ci 26162306a36Sopenharmony_ci /* The packet occupies part of the next buffer. Move there after 26262306a36Sopenharmony_ci * replenishing the current buffer slot either with the spare page or 26362306a36Sopenharmony_ci * by reusing the slot's existing page. Note that if a spare page isn't 26462306a36Sopenharmony_ci * available and the current packet occupies @buf it is a multi-frag 26562306a36Sopenharmony_ci * packet that will be dropped leaving @buf available for reuse. 26662306a36Sopenharmony_ci */ 26762306a36Sopenharmony_ci if ((page_ref_count(buf->page) == buf->pg_refs && 26862306a36Sopenharmony_ci buf->node == numa_mem_id()) || !q->spare_buf.page) { 26962306a36Sopenharmony_ci dma_sync_single_for_device(q->dma_dev, buf->dma_addr, 27062306a36Sopenharmony_ci PAGE_SIZE, DMA_FROM_DEVICE); 27162306a36Sopenharmony_ci refresh_refs(buf); 27262306a36Sopenharmony_ci } else { 27362306a36Sopenharmony_ci cache_offer(q, buf); 27462306a36Sopenharmony_ci *buf = q->spare_buf; 27562306a36Sopenharmony_ci q->spare_buf.page = NULL; 27662306a36Sopenharmony_ci q->rqes[q->rq_cons & q->rq_mask] = 27762306a36Sopenharmony_ci FUN_EPRQ_RQBUF_INIT(buf->dma_addr); 27862306a36Sopenharmony_ci } 27962306a36Sopenharmony_ci q->buf_offset = 0; 28062306a36Sopenharmony_ci q->rq_cons++; 28162306a36Sopenharmony_ci return &q->bufs[q->rq_cons & q->rq_mask]; 28262306a36Sopenharmony_ci} 28362306a36Sopenharmony_ci 28462306a36Sopenharmony_ci/* Gather the page fragments making up the first Rx packet on @q. Its total 28562306a36Sopenharmony_ci * length @tot_len includes optional head- and tail-rooms. 28662306a36Sopenharmony_ci * 28762306a36Sopenharmony_ci * Return 0 if the device retains ownership of at least some of the pages. 28862306a36Sopenharmony_ci * In this case the caller may only copy the packet. 28962306a36Sopenharmony_ci * 29062306a36Sopenharmony_ci * A non-zero return value gives the caller permission to use references to the 29162306a36Sopenharmony_ci * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least 29262306a36Sopenharmony_ci * one of the pages is PF_MEMALLOC. 29362306a36Sopenharmony_ci * 29462306a36Sopenharmony_ci * Regardless of outcome the caller is granted a reference to each of the pages. 29562306a36Sopenharmony_ci */ 29662306a36Sopenharmony_cistatic int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len, 29762306a36Sopenharmony_ci skb_frag_t *frags) 29862306a36Sopenharmony_ci{ 29962306a36Sopenharmony_ci struct funeth_rxbuf *buf = q->cur_buf; 30062306a36Sopenharmony_ci unsigned int frag_len; 30162306a36Sopenharmony_ci int ref_ok = 1; 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_ci for (;;) { 30462306a36Sopenharmony_ci buf = get_buf(q, buf, tot_len); 30562306a36Sopenharmony_ci 30662306a36Sopenharmony_ci /* We always keep the RQ full of buffers so before we can give 30762306a36Sopenharmony_ci * one of our pages to the stack we require that we can obtain 30862306a36Sopenharmony_ci * a replacement page. If we can't the packet will either be 30962306a36Sopenharmony_ci * copied or dropped so we can retain ownership of the page and 31062306a36Sopenharmony_ci * reuse it. 31162306a36Sopenharmony_ci */ 31262306a36Sopenharmony_ci if (!q->spare_buf.page && 31362306a36Sopenharmony_ci funeth_alloc_page(q, &q->spare_buf, numa_mem_id(), 31462306a36Sopenharmony_ci GFP_ATOMIC | __GFP_MEMALLOC)) 31562306a36Sopenharmony_ci ref_ok = 0; 31662306a36Sopenharmony_ci 31762306a36Sopenharmony_ci frag_len = min_t(unsigned int, tot_len, 31862306a36Sopenharmony_ci PAGE_SIZE - q->buf_offset); 31962306a36Sopenharmony_ci dma_sync_single_for_cpu(q->dma_dev, 32062306a36Sopenharmony_ci buf->dma_addr + q->buf_offset, 32162306a36Sopenharmony_ci frag_len, DMA_FROM_DEVICE); 32262306a36Sopenharmony_ci buf->pg_refs--; 32362306a36Sopenharmony_ci if (ref_ok) 32462306a36Sopenharmony_ci ref_ok |= buf->node; 32562306a36Sopenharmony_ci 32662306a36Sopenharmony_ci skb_frag_fill_page_desc(frags++, buf->page, q->buf_offset, 32762306a36Sopenharmony_ci frag_len); 32862306a36Sopenharmony_ci 32962306a36Sopenharmony_ci tot_len -= frag_len; 33062306a36Sopenharmony_ci if (!tot_len) 33162306a36Sopenharmony_ci break; 33262306a36Sopenharmony_ci 33362306a36Sopenharmony_ci q->buf_offset = PAGE_SIZE; 33462306a36Sopenharmony_ci } 33562306a36Sopenharmony_ci q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN); 33662306a36Sopenharmony_ci q->cur_buf = buf; 33762306a36Sopenharmony_ci return ref_ok; 33862306a36Sopenharmony_ci} 33962306a36Sopenharmony_ci 34062306a36Sopenharmony_cistatic bool rx_hwtstamp_enabled(const struct net_device *dev) 34162306a36Sopenharmony_ci{ 34262306a36Sopenharmony_ci const struct funeth_priv *d = netdev_priv(dev); 34362306a36Sopenharmony_ci 34462306a36Sopenharmony_ci return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL; 34562306a36Sopenharmony_ci} 34662306a36Sopenharmony_ci 34762306a36Sopenharmony_ci/* Advance the CQ pointers and phase tag to the next CQE. */ 34862306a36Sopenharmony_cistatic void advance_cq(struct funeth_rxq *q) 34962306a36Sopenharmony_ci{ 35062306a36Sopenharmony_ci if (unlikely(q->cq_head == q->cq_mask)) { 35162306a36Sopenharmony_ci q->cq_head = 0; 35262306a36Sopenharmony_ci q->phase ^= 1; 35362306a36Sopenharmony_ci q->next_cqe_info = cqe_to_info(q->cqes); 35462306a36Sopenharmony_ci } else { 35562306a36Sopenharmony_ci q->cq_head++; 35662306a36Sopenharmony_ci q->next_cqe_info += FUNETH_CQE_SIZE; 35762306a36Sopenharmony_ci } 35862306a36Sopenharmony_ci prefetch(q->next_cqe_info); 35962306a36Sopenharmony_ci} 36062306a36Sopenharmony_ci 36162306a36Sopenharmony_ci/* Process the packet represented by the head CQE of @q. Gather the packet's 36262306a36Sopenharmony_ci * fragments, run it through the optional XDP program, and if needed construct 36362306a36Sopenharmony_ci * an skb and pass it to the stack. 36462306a36Sopenharmony_ci */ 36562306a36Sopenharmony_cistatic void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q) 36662306a36Sopenharmony_ci{ 36762306a36Sopenharmony_ci const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info); 36862306a36Sopenharmony_ci unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len); 36962306a36Sopenharmony_ci struct net_device *ndev = q->netdev; 37062306a36Sopenharmony_ci skb_frag_t frags[RX_MAX_FRAGS]; 37162306a36Sopenharmony_ci struct skb_shared_info *si; 37262306a36Sopenharmony_ci unsigned int headroom; 37362306a36Sopenharmony_ci gro_result_t gro_res; 37462306a36Sopenharmony_ci struct sk_buff *skb; 37562306a36Sopenharmony_ci int ref_ok; 37662306a36Sopenharmony_ci void *va; 37762306a36Sopenharmony_ci u16 cv; 37862306a36Sopenharmony_ci 37962306a36Sopenharmony_ci u64_stats_update_begin(&q->syncp); 38062306a36Sopenharmony_ci q->stats.rx_pkts++; 38162306a36Sopenharmony_ci q->stats.rx_bytes += pkt_len; 38262306a36Sopenharmony_ci u64_stats_update_end(&q->syncp); 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci advance_cq(q); 38562306a36Sopenharmony_ci 38662306a36Sopenharmony_ci /* account for head- and tail-room, present only for 1-buffer packets */ 38762306a36Sopenharmony_ci tot_len = pkt_len; 38862306a36Sopenharmony_ci headroom = be16_to_cpu(rxreq->headroom); 38962306a36Sopenharmony_ci if (likely(headroom)) 39062306a36Sopenharmony_ci tot_len += FUN_RX_TAILROOM + headroom; 39162306a36Sopenharmony_ci 39262306a36Sopenharmony_ci ref_ok = fun_gather_pkt(q, tot_len, frags); 39362306a36Sopenharmony_ci va = skb_frag_address(frags); 39462306a36Sopenharmony_ci if (xdp_q && headroom == FUN_XDP_HEADROOM) { 39562306a36Sopenharmony_ci va = fun_run_xdp(q, frags, va, ref_ok, xdp_q); 39662306a36Sopenharmony_ci if (!va) 39762306a36Sopenharmony_ci return; 39862306a36Sopenharmony_ci headroom = 0; /* XDP_PASS trims it */ 39962306a36Sopenharmony_ci } 40062306a36Sopenharmony_ci if (unlikely(!ref_ok)) 40162306a36Sopenharmony_ci goto no_mem; 40262306a36Sopenharmony_ci 40362306a36Sopenharmony_ci if (likely(headroom)) { 40462306a36Sopenharmony_ci /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */ 40562306a36Sopenharmony_ci prefetch(va + headroom); 40662306a36Sopenharmony_ci skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN)); 40762306a36Sopenharmony_ci if (unlikely(!skb)) 40862306a36Sopenharmony_ci goto no_mem; 40962306a36Sopenharmony_ci 41062306a36Sopenharmony_ci skb_reserve(skb, headroom); 41162306a36Sopenharmony_ci __skb_put(skb, pkt_len); 41262306a36Sopenharmony_ci skb->protocol = eth_type_trans(skb, ndev); 41362306a36Sopenharmony_ci } else { 41462306a36Sopenharmony_ci prefetch(va); 41562306a36Sopenharmony_ci skb = napi_get_frags(q->napi); 41662306a36Sopenharmony_ci if (unlikely(!skb)) 41762306a36Sopenharmony_ci goto no_mem; 41862306a36Sopenharmony_ci 41962306a36Sopenharmony_ci if (ref_ok < 0) 42062306a36Sopenharmony_ci skb->pfmemalloc = 1; 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ci si = skb_shinfo(skb); 42362306a36Sopenharmony_ci si->nr_frags = rxreq->nsgl; 42462306a36Sopenharmony_ci for (i = 0; i < si->nr_frags; i++) 42562306a36Sopenharmony_ci si->frags[i] = frags[i]; 42662306a36Sopenharmony_ci 42762306a36Sopenharmony_ci skb->len = pkt_len; 42862306a36Sopenharmony_ci skb->data_len = pkt_len; 42962306a36Sopenharmony_ci skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN); 43062306a36Sopenharmony_ci } 43162306a36Sopenharmony_ci 43262306a36Sopenharmony_ci skb_record_rx_queue(skb, q->qidx); 43362306a36Sopenharmony_ci cv = be16_to_cpu(rxreq->pkt_cv); 43462306a36Sopenharmony_ci if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash)) 43562306a36Sopenharmony_ci skb_set_hash(skb, be32_to_cpu(rxreq->hash), 43662306a36Sopenharmony_ci cqe_to_pkt_hash_type(cv)); 43762306a36Sopenharmony_ci if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) { 43862306a36Sopenharmony_ci FUN_QSTAT_INC(q, rx_cso); 43962306a36Sopenharmony_ci skb->ip_summed = CHECKSUM_UNNECESSARY; 44062306a36Sopenharmony_ci skb->csum_level = be16_to_cpu(rxreq->csum) - 1; 44162306a36Sopenharmony_ci } 44262306a36Sopenharmony_ci if (unlikely(rx_hwtstamp_enabled(q->netdev))) 44362306a36Sopenharmony_ci skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp); 44462306a36Sopenharmony_ci 44562306a36Sopenharmony_ci trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv); 44662306a36Sopenharmony_ci 44762306a36Sopenharmony_ci gro_res = skb->data_len ? napi_gro_frags(q->napi) : 44862306a36Sopenharmony_ci napi_gro_receive(q->napi, skb); 44962306a36Sopenharmony_ci if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE) 45062306a36Sopenharmony_ci FUN_QSTAT_INC(q, gro_merged); 45162306a36Sopenharmony_ci else if (gro_res == GRO_HELD) 45262306a36Sopenharmony_ci FUN_QSTAT_INC(q, gro_pkts); 45362306a36Sopenharmony_ci return; 45462306a36Sopenharmony_ci 45562306a36Sopenharmony_cino_mem: 45662306a36Sopenharmony_ci FUN_QSTAT_INC(q, rx_mem_drops); 45762306a36Sopenharmony_ci 45862306a36Sopenharmony_ci /* Release the references we've been granted for the frag pages. 45962306a36Sopenharmony_ci * We return the ref of the last frag and free the rest. 46062306a36Sopenharmony_ci */ 46162306a36Sopenharmony_ci q->cur_buf->pg_refs++; 46262306a36Sopenharmony_ci for (i = 0; i < rxreq->nsgl - 1; i++) 46362306a36Sopenharmony_ci __free_page(skb_frag_page(frags + i)); 46462306a36Sopenharmony_ci} 46562306a36Sopenharmony_ci 46662306a36Sopenharmony_ci/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations 46762306a36Sopenharmony_ci * indicating the CQE is new. 46862306a36Sopenharmony_ci */ 46962306a36Sopenharmony_cistatic u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase) 47062306a36Sopenharmony_ci{ 47162306a36Sopenharmony_ci u16 sf_p = be16_to_cpu(ci->sf_p); 47262306a36Sopenharmony_ci 47362306a36Sopenharmony_ci return (sf_p & 1) ^ phase; 47462306a36Sopenharmony_ci} 47562306a36Sopenharmony_ci 47662306a36Sopenharmony_ci/* Walk through a CQ identifying and processing fresh CQEs up to the given 47762306a36Sopenharmony_ci * budget. Return the remaining budget. 47862306a36Sopenharmony_ci */ 47962306a36Sopenharmony_cistatic int fun_process_cqes(struct funeth_rxq *q, int budget) 48062306a36Sopenharmony_ci{ 48162306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 48262306a36Sopenharmony_ci struct funeth_txq **xdpqs, *xdp_q = NULL; 48362306a36Sopenharmony_ci 48462306a36Sopenharmony_ci xdpqs = rcu_dereference_bh(fp->xdpqs); 48562306a36Sopenharmony_ci if (xdpqs) 48662306a36Sopenharmony_ci xdp_q = xdpqs[smp_processor_id()]; 48762306a36Sopenharmony_ci 48862306a36Sopenharmony_ci while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) { 48962306a36Sopenharmony_ci /* access other descriptor fields after the phase check */ 49062306a36Sopenharmony_ci dma_rmb(); 49162306a36Sopenharmony_ci 49262306a36Sopenharmony_ci fun_handle_cqe_pkt(q, xdp_q); 49362306a36Sopenharmony_ci budget--; 49462306a36Sopenharmony_ci } 49562306a36Sopenharmony_ci 49662306a36Sopenharmony_ci if (unlikely(q->xdp_flush)) { 49762306a36Sopenharmony_ci if (q->xdp_flush & FUN_XDP_FLUSH_TX) 49862306a36Sopenharmony_ci fun_txq_wr_db(xdp_q); 49962306a36Sopenharmony_ci if (q->xdp_flush & FUN_XDP_FLUSH_REDIR) 50062306a36Sopenharmony_ci xdp_do_flush(); 50162306a36Sopenharmony_ci q->xdp_flush = 0; 50262306a36Sopenharmony_ci } 50362306a36Sopenharmony_ci 50462306a36Sopenharmony_ci return budget; 50562306a36Sopenharmony_ci} 50662306a36Sopenharmony_ci 50762306a36Sopenharmony_ci/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ 50862306a36Sopenharmony_ci * doorbells as needed. 50962306a36Sopenharmony_ci */ 51062306a36Sopenharmony_ciint fun_rxq_napi_poll(struct napi_struct *napi, int budget) 51162306a36Sopenharmony_ci{ 51262306a36Sopenharmony_ci struct fun_irq *irq = container_of(napi, struct fun_irq, napi); 51362306a36Sopenharmony_ci struct funeth_rxq *q = irq->rxq; 51462306a36Sopenharmony_ci int work_done = budget - fun_process_cqes(q, budget); 51562306a36Sopenharmony_ci u32 cq_db_val = q->cq_head; 51662306a36Sopenharmony_ci 51762306a36Sopenharmony_ci if (unlikely(work_done >= budget)) 51862306a36Sopenharmony_ci FUN_QSTAT_INC(q, rx_budget); 51962306a36Sopenharmony_ci else if (napi_complete_done(napi, work_done)) 52062306a36Sopenharmony_ci cq_db_val |= q->irq_db_val; 52162306a36Sopenharmony_ci 52262306a36Sopenharmony_ci /* check whether to post new Rx buffers */ 52362306a36Sopenharmony_ci if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) { 52462306a36Sopenharmony_ci u64_stats_update_begin(&q->syncp); 52562306a36Sopenharmony_ci q->stats.rx_bufs += q->rq_cons - q->rq_cons_db; 52662306a36Sopenharmony_ci u64_stats_update_end(&q->syncp); 52762306a36Sopenharmony_ci q->rq_cons_db = q->rq_cons; 52862306a36Sopenharmony_ci writel((q->rq_cons - 1) & q->rq_mask, q->rq_db); 52962306a36Sopenharmony_ci } 53062306a36Sopenharmony_ci 53162306a36Sopenharmony_ci writel(cq_db_val, q->cq_db); 53262306a36Sopenharmony_ci return work_done; 53362306a36Sopenharmony_ci} 53462306a36Sopenharmony_ci 53562306a36Sopenharmony_ci/* Free the Rx buffers of an Rx queue. */ 53662306a36Sopenharmony_cistatic void fun_rxq_free_bufs(struct funeth_rxq *q) 53762306a36Sopenharmony_ci{ 53862306a36Sopenharmony_ci struct funeth_rxbuf *b = q->bufs; 53962306a36Sopenharmony_ci unsigned int i; 54062306a36Sopenharmony_ci 54162306a36Sopenharmony_ci for (i = 0; i <= q->rq_mask; i++, b++) 54262306a36Sopenharmony_ci funeth_free_page(q, b); 54362306a36Sopenharmony_ci 54462306a36Sopenharmony_ci funeth_free_page(q, &q->spare_buf); 54562306a36Sopenharmony_ci q->cur_buf = NULL; 54662306a36Sopenharmony_ci} 54762306a36Sopenharmony_ci 54862306a36Sopenharmony_ci/* Initially provision an Rx queue with Rx buffers. */ 54962306a36Sopenharmony_cistatic int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node) 55062306a36Sopenharmony_ci{ 55162306a36Sopenharmony_ci struct funeth_rxbuf *b = q->bufs; 55262306a36Sopenharmony_ci unsigned int i; 55362306a36Sopenharmony_ci 55462306a36Sopenharmony_ci for (i = 0; i <= q->rq_mask; i++, b++) { 55562306a36Sopenharmony_ci if (funeth_alloc_page(q, b, node, GFP_KERNEL)) { 55662306a36Sopenharmony_ci fun_rxq_free_bufs(q); 55762306a36Sopenharmony_ci return -ENOMEM; 55862306a36Sopenharmony_ci } 55962306a36Sopenharmony_ci q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr); 56062306a36Sopenharmony_ci } 56162306a36Sopenharmony_ci q->cur_buf = q->bufs; 56262306a36Sopenharmony_ci return 0; 56362306a36Sopenharmony_ci} 56462306a36Sopenharmony_ci 56562306a36Sopenharmony_ci/* Initialize a used-buffer cache of the given depth. */ 56662306a36Sopenharmony_cistatic int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth, 56762306a36Sopenharmony_ci int node) 56862306a36Sopenharmony_ci{ 56962306a36Sopenharmony_ci c->mask = depth - 1; 57062306a36Sopenharmony_ci c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node); 57162306a36Sopenharmony_ci return c->bufs ? 0 : -ENOMEM; 57262306a36Sopenharmony_ci} 57362306a36Sopenharmony_ci 57462306a36Sopenharmony_ci/* Deallocate an Rx queue's used-buffer cache and its contents. */ 57562306a36Sopenharmony_cistatic void fun_rxq_free_cache(struct funeth_rxq *q) 57662306a36Sopenharmony_ci{ 57762306a36Sopenharmony_ci struct funeth_rxbuf *b = q->cache.bufs; 57862306a36Sopenharmony_ci unsigned int i; 57962306a36Sopenharmony_ci 58062306a36Sopenharmony_ci for (i = 0; i <= q->cache.mask; i++, b++) 58162306a36Sopenharmony_ci funeth_free_page(q, b); 58262306a36Sopenharmony_ci 58362306a36Sopenharmony_ci kvfree(q->cache.bufs); 58462306a36Sopenharmony_ci q->cache.bufs = NULL; 58562306a36Sopenharmony_ci} 58662306a36Sopenharmony_ci 58762306a36Sopenharmony_ciint fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog) 58862306a36Sopenharmony_ci{ 58962306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 59062306a36Sopenharmony_ci struct fun_admin_epcq_req cmd; 59162306a36Sopenharmony_ci u16 headroom; 59262306a36Sopenharmony_ci int err; 59362306a36Sopenharmony_ci 59462306a36Sopenharmony_ci headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; 59562306a36Sopenharmony_ci if (headroom != q->headroom) { 59662306a36Sopenharmony_ci cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ, 59762306a36Sopenharmony_ci sizeof(cmd)); 59862306a36Sopenharmony_ci cmd.u.modify = 59962306a36Sopenharmony_ci FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY, 60062306a36Sopenharmony_ci 0, q->hw_cqid, headroom); 60162306a36Sopenharmony_ci err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0, 60262306a36Sopenharmony_ci 0); 60362306a36Sopenharmony_ci if (err) 60462306a36Sopenharmony_ci return err; 60562306a36Sopenharmony_ci q->headroom = headroom; 60662306a36Sopenharmony_ci } 60762306a36Sopenharmony_ci 60862306a36Sopenharmony_ci WRITE_ONCE(q->xdp_prog, prog); 60962306a36Sopenharmony_ci return 0; 61062306a36Sopenharmony_ci} 61162306a36Sopenharmony_ci 61262306a36Sopenharmony_ci/* Create an Rx queue, allocating the host memory it needs. */ 61362306a36Sopenharmony_cistatic struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev, 61462306a36Sopenharmony_ci unsigned int qidx, 61562306a36Sopenharmony_ci unsigned int ncqe, 61662306a36Sopenharmony_ci unsigned int nrqe, 61762306a36Sopenharmony_ci struct fun_irq *irq) 61862306a36Sopenharmony_ci{ 61962306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(dev); 62062306a36Sopenharmony_ci struct funeth_rxq *q; 62162306a36Sopenharmony_ci int err = -ENOMEM; 62262306a36Sopenharmony_ci int numa_node; 62362306a36Sopenharmony_ci 62462306a36Sopenharmony_ci numa_node = fun_irq_node(irq); 62562306a36Sopenharmony_ci q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); 62662306a36Sopenharmony_ci if (!q) 62762306a36Sopenharmony_ci goto err; 62862306a36Sopenharmony_ci 62962306a36Sopenharmony_ci q->qidx = qidx; 63062306a36Sopenharmony_ci q->netdev = dev; 63162306a36Sopenharmony_ci q->cq_mask = ncqe - 1; 63262306a36Sopenharmony_ci q->rq_mask = nrqe - 1; 63362306a36Sopenharmony_ci q->numa_node = numa_node; 63462306a36Sopenharmony_ci q->rq_db_thres = nrqe / 4; 63562306a36Sopenharmony_ci u64_stats_init(&q->syncp); 63662306a36Sopenharmony_ci q->dma_dev = &fp->pdev->dev; 63762306a36Sopenharmony_ci 63862306a36Sopenharmony_ci q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), 63962306a36Sopenharmony_ci sizeof(*q->bufs), false, numa_node, 64062306a36Sopenharmony_ci &q->rq_dma_addr, (void **)&q->bufs, NULL); 64162306a36Sopenharmony_ci if (!q->rqes) 64262306a36Sopenharmony_ci goto free_q; 64362306a36Sopenharmony_ci 64462306a36Sopenharmony_ci q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, 64562306a36Sopenharmony_ci false, numa_node, &q->cq_dma_addr, NULL, 64662306a36Sopenharmony_ci NULL); 64762306a36Sopenharmony_ci if (!q->cqes) 64862306a36Sopenharmony_ci goto free_rqes; 64962306a36Sopenharmony_ci 65062306a36Sopenharmony_ci err = fun_rxq_init_cache(&q->cache, nrqe, numa_node); 65162306a36Sopenharmony_ci if (err) 65262306a36Sopenharmony_ci goto free_cqes; 65362306a36Sopenharmony_ci 65462306a36Sopenharmony_ci err = fun_rxq_alloc_bufs(q, numa_node); 65562306a36Sopenharmony_ci if (err) 65662306a36Sopenharmony_ci goto free_cache; 65762306a36Sopenharmony_ci 65862306a36Sopenharmony_ci q->stats.rx_bufs = q->rq_mask; 65962306a36Sopenharmony_ci q->init_state = FUN_QSTATE_INIT_SW; 66062306a36Sopenharmony_ci return q; 66162306a36Sopenharmony_ci 66262306a36Sopenharmony_cifree_cache: 66362306a36Sopenharmony_ci fun_rxq_free_cache(q); 66462306a36Sopenharmony_cifree_cqes: 66562306a36Sopenharmony_ci dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, 66662306a36Sopenharmony_ci q->cq_dma_addr); 66762306a36Sopenharmony_cifree_rqes: 66862306a36Sopenharmony_ci fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes, 66962306a36Sopenharmony_ci q->rq_dma_addr, q->bufs); 67062306a36Sopenharmony_cifree_q: 67162306a36Sopenharmony_ci kfree(q); 67262306a36Sopenharmony_cierr: 67362306a36Sopenharmony_ci netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx); 67462306a36Sopenharmony_ci return ERR_PTR(err); 67562306a36Sopenharmony_ci} 67662306a36Sopenharmony_ci 67762306a36Sopenharmony_cistatic void fun_rxq_free_sw(struct funeth_rxq *q) 67862306a36Sopenharmony_ci{ 67962306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 68062306a36Sopenharmony_ci 68162306a36Sopenharmony_ci fun_rxq_free_cache(q); 68262306a36Sopenharmony_ci fun_rxq_free_bufs(q); 68362306a36Sopenharmony_ci fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false, 68462306a36Sopenharmony_ci q->rqes, q->rq_dma_addr, q->bufs); 68562306a36Sopenharmony_ci dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE, 68662306a36Sopenharmony_ci q->cqes, q->cq_dma_addr); 68762306a36Sopenharmony_ci 68862306a36Sopenharmony_ci /* Before freeing the queue transfer key counters to the device. */ 68962306a36Sopenharmony_ci fp->rx_packets += q->stats.rx_pkts; 69062306a36Sopenharmony_ci fp->rx_bytes += q->stats.rx_bytes; 69162306a36Sopenharmony_ci fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops; 69262306a36Sopenharmony_ci 69362306a36Sopenharmony_ci kfree(q); 69462306a36Sopenharmony_ci} 69562306a36Sopenharmony_ci 69662306a36Sopenharmony_ci/* Create an Rx queue's resources on the device. */ 69762306a36Sopenharmony_ciint fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq) 69862306a36Sopenharmony_ci{ 69962306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 70062306a36Sopenharmony_ci unsigned int ncqe = q->cq_mask + 1; 70162306a36Sopenharmony_ci unsigned int nrqe = q->rq_mask + 1; 70262306a36Sopenharmony_ci int err; 70362306a36Sopenharmony_ci 70462306a36Sopenharmony_ci err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, 70562306a36Sopenharmony_ci irq->napi.napi_id); 70662306a36Sopenharmony_ci if (err) 70762306a36Sopenharmony_ci goto out; 70862306a36Sopenharmony_ci 70962306a36Sopenharmony_ci err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED, 71062306a36Sopenharmony_ci NULL); 71162306a36Sopenharmony_ci if (err) 71262306a36Sopenharmony_ci goto xdp_unreg; 71362306a36Sopenharmony_ci 71462306a36Sopenharmony_ci q->phase = 1; 71562306a36Sopenharmony_ci q->irq_cnt = 0; 71662306a36Sopenharmony_ci q->cq_head = 0; 71762306a36Sopenharmony_ci q->rq_cons = 0; 71862306a36Sopenharmony_ci q->rq_cons_db = 0; 71962306a36Sopenharmony_ci q->buf_offset = 0; 72062306a36Sopenharmony_ci q->napi = &irq->napi; 72162306a36Sopenharmony_ci q->irq_db_val = fp->cq_irq_db; 72262306a36Sopenharmony_ci q->next_cqe_info = cqe_to_info(q->cqes); 72362306a36Sopenharmony_ci 72462306a36Sopenharmony_ci q->xdp_prog = fp->xdp_prog; 72562306a36Sopenharmony_ci q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; 72662306a36Sopenharmony_ci 72762306a36Sopenharmony_ci err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR | 72862306a36Sopenharmony_ci FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0, 72962306a36Sopenharmony_ci FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0, 73062306a36Sopenharmony_ci 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT, 73162306a36Sopenharmony_ci &q->hw_sqid, &q->rq_db); 73262306a36Sopenharmony_ci if (err) 73362306a36Sopenharmony_ci goto xdp_unreg; 73462306a36Sopenharmony_ci 73562306a36Sopenharmony_ci err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR | 73662306a36Sopenharmony_ci FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0, 73762306a36Sopenharmony_ci q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe, 73862306a36Sopenharmony_ci q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0, 73962306a36Sopenharmony_ci irq->irq_idx, 0, fp->fdev->kern_end_qid, 74062306a36Sopenharmony_ci &q->hw_cqid, &q->cq_db); 74162306a36Sopenharmony_ci if (err) 74262306a36Sopenharmony_ci goto free_rq; 74362306a36Sopenharmony_ci 74462306a36Sopenharmony_ci irq->rxq = q; 74562306a36Sopenharmony_ci writel(q->rq_mask, q->rq_db); 74662306a36Sopenharmony_ci q->init_state = FUN_QSTATE_INIT_FULL; 74762306a36Sopenharmony_ci 74862306a36Sopenharmony_ci netif_info(fp, ifup, q->netdev, 74962306a36Sopenharmony_ci "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n", 75062306a36Sopenharmony_ci q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx, 75162306a36Sopenharmony_ci q->numa_node, q->headroom); 75262306a36Sopenharmony_ci return 0; 75362306a36Sopenharmony_ci 75462306a36Sopenharmony_cifree_rq: 75562306a36Sopenharmony_ci fun_destroy_sq(fp->fdev, q->hw_sqid); 75662306a36Sopenharmony_cixdp_unreg: 75762306a36Sopenharmony_ci xdp_rxq_info_unreg(&q->xdp_rxq); 75862306a36Sopenharmony_ciout: 75962306a36Sopenharmony_ci netdev_err(q->netdev, 76062306a36Sopenharmony_ci "Failed to create Rx queue %u on device, error %d\n", 76162306a36Sopenharmony_ci q->qidx, err); 76262306a36Sopenharmony_ci return err; 76362306a36Sopenharmony_ci} 76462306a36Sopenharmony_ci 76562306a36Sopenharmony_cistatic void fun_rxq_free_dev(struct funeth_rxq *q) 76662306a36Sopenharmony_ci{ 76762306a36Sopenharmony_ci struct funeth_priv *fp = netdev_priv(q->netdev); 76862306a36Sopenharmony_ci struct fun_irq *irq; 76962306a36Sopenharmony_ci 77062306a36Sopenharmony_ci if (q->init_state < FUN_QSTATE_INIT_FULL) 77162306a36Sopenharmony_ci return; 77262306a36Sopenharmony_ci 77362306a36Sopenharmony_ci irq = container_of(q->napi, struct fun_irq, napi); 77462306a36Sopenharmony_ci netif_info(fp, ifdown, q->netdev, 77562306a36Sopenharmony_ci "Freeing Rx queue %u (id %u/%u), IRQ %u\n", 77662306a36Sopenharmony_ci q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx); 77762306a36Sopenharmony_ci 77862306a36Sopenharmony_ci irq->rxq = NULL; 77962306a36Sopenharmony_ci xdp_rxq_info_unreg(&q->xdp_rxq); 78062306a36Sopenharmony_ci fun_destroy_sq(fp->fdev, q->hw_sqid); 78162306a36Sopenharmony_ci fun_destroy_cq(fp->fdev, q->hw_cqid); 78262306a36Sopenharmony_ci q->init_state = FUN_QSTATE_INIT_SW; 78362306a36Sopenharmony_ci} 78462306a36Sopenharmony_ci 78562306a36Sopenharmony_ci/* Create or advance an Rx queue, allocating all the host and device resources 78662306a36Sopenharmony_ci * needed to reach the target state. 78762306a36Sopenharmony_ci */ 78862306a36Sopenharmony_ciint funeth_rxq_create(struct net_device *dev, unsigned int qidx, 78962306a36Sopenharmony_ci unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq, 79062306a36Sopenharmony_ci int state, struct funeth_rxq **qp) 79162306a36Sopenharmony_ci{ 79262306a36Sopenharmony_ci struct funeth_rxq *q = *qp; 79362306a36Sopenharmony_ci int err; 79462306a36Sopenharmony_ci 79562306a36Sopenharmony_ci if (!q) { 79662306a36Sopenharmony_ci q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq); 79762306a36Sopenharmony_ci if (IS_ERR(q)) 79862306a36Sopenharmony_ci return PTR_ERR(q); 79962306a36Sopenharmony_ci } 80062306a36Sopenharmony_ci 80162306a36Sopenharmony_ci if (q->init_state >= state) 80262306a36Sopenharmony_ci goto out; 80362306a36Sopenharmony_ci 80462306a36Sopenharmony_ci err = fun_rxq_create_dev(q, irq); 80562306a36Sopenharmony_ci if (err) { 80662306a36Sopenharmony_ci if (!*qp) 80762306a36Sopenharmony_ci fun_rxq_free_sw(q); 80862306a36Sopenharmony_ci return err; 80962306a36Sopenharmony_ci } 81062306a36Sopenharmony_ci 81162306a36Sopenharmony_ciout: 81262306a36Sopenharmony_ci *qp = q; 81362306a36Sopenharmony_ci return 0; 81462306a36Sopenharmony_ci} 81562306a36Sopenharmony_ci 81662306a36Sopenharmony_ci/* Free Rx queue resources until it reaches the target state. */ 81762306a36Sopenharmony_cistruct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state) 81862306a36Sopenharmony_ci{ 81962306a36Sopenharmony_ci if (state < FUN_QSTATE_INIT_FULL) 82062306a36Sopenharmony_ci fun_rxq_free_dev(q); 82162306a36Sopenharmony_ci 82262306a36Sopenharmony_ci if (state == FUN_QSTATE_DESTROYED) { 82362306a36Sopenharmony_ci fun_rxq_free_sw(q); 82462306a36Sopenharmony_ci q = NULL; 82562306a36Sopenharmony_ci } 82662306a36Sopenharmony_ci 82762306a36Sopenharmony_ci return q; 82862306a36Sopenharmony_ci} 829