162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Linux driver for VMware's vmxnet3 ethernet NIC. 462306a36Sopenharmony_ci * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved. 562306a36Sopenharmony_ci * Maintained by: pv-drivers@vmware.com 662306a36Sopenharmony_ci * 762306a36Sopenharmony_ci */ 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci#include "vmxnet3_int.h" 1062306a36Sopenharmony_ci#include "vmxnet3_xdp.h" 1162306a36Sopenharmony_ci 1262306a36Sopenharmony_cistatic void 1362306a36Sopenharmony_civmxnet3_xdp_exchange_program(struct vmxnet3_adapter *adapter, 1462306a36Sopenharmony_ci struct bpf_prog *prog) 1562306a36Sopenharmony_ci{ 1662306a36Sopenharmony_ci rcu_assign_pointer(adapter->xdp_bpf_prog, prog); 1762306a36Sopenharmony_ci} 1862306a36Sopenharmony_ci 1962306a36Sopenharmony_cistatic inline struct vmxnet3_tx_queue * 2062306a36Sopenharmony_civmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter) 2162306a36Sopenharmony_ci{ 2262306a36Sopenharmony_ci struct vmxnet3_tx_queue *tq; 2362306a36Sopenharmony_ci int tq_number; 2462306a36Sopenharmony_ci int cpu; 2562306a36Sopenharmony_ci 2662306a36Sopenharmony_ci tq_number = adapter->num_tx_queues; 2762306a36Sopenharmony_ci cpu = smp_processor_id(); 2862306a36Sopenharmony_ci if (likely(cpu < tq_number)) 2962306a36Sopenharmony_ci tq = &adapter->tx_queue[cpu]; 3062306a36Sopenharmony_ci else 3162306a36Sopenharmony_ci tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)]; 3262306a36Sopenharmony_ci 3362306a36Sopenharmony_ci return tq; 3462306a36Sopenharmony_ci} 3562306a36Sopenharmony_ci 3662306a36Sopenharmony_cistatic int 3762306a36Sopenharmony_civmxnet3_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf, 3862306a36Sopenharmony_ci struct netlink_ext_ack *extack) 3962306a36Sopenharmony_ci{ 4062306a36Sopenharmony_ci struct vmxnet3_adapter *adapter = netdev_priv(netdev); 4162306a36Sopenharmony_ci struct bpf_prog *new_bpf_prog = bpf->prog; 4262306a36Sopenharmony_ci struct bpf_prog *old_bpf_prog; 4362306a36Sopenharmony_ci bool need_update; 4462306a36Sopenharmony_ci bool running; 4562306a36Sopenharmony_ci int err; 4662306a36Sopenharmony_ci 4762306a36Sopenharmony_ci if (new_bpf_prog && netdev->mtu > VMXNET3_XDP_MAX_MTU) { 4862306a36Sopenharmony_ci NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP", 4962306a36Sopenharmony_ci netdev->mtu); 5062306a36Sopenharmony_ci return -EOPNOTSUPP; 5162306a36Sopenharmony_ci } 5262306a36Sopenharmony_ci 5362306a36Sopenharmony_ci if (adapter->netdev->features & NETIF_F_LRO) { 5462306a36Sopenharmony_ci NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP"); 5562306a36Sopenharmony_ci adapter->netdev->features &= ~NETIF_F_LRO; 5662306a36Sopenharmony_ci } 5762306a36Sopenharmony_ci 5862306a36Sopenharmony_ci old_bpf_prog = rcu_dereference(adapter->xdp_bpf_prog); 5962306a36Sopenharmony_ci if (!new_bpf_prog && !old_bpf_prog) 6062306a36Sopenharmony_ci return 0; 6162306a36Sopenharmony_ci 6262306a36Sopenharmony_ci running = netif_running(netdev); 6362306a36Sopenharmony_ci need_update = !!old_bpf_prog != !!new_bpf_prog; 6462306a36Sopenharmony_ci 6562306a36Sopenharmony_ci if (running && need_update) 6662306a36Sopenharmony_ci vmxnet3_quiesce_dev(adapter); 6762306a36Sopenharmony_ci 6862306a36Sopenharmony_ci vmxnet3_xdp_exchange_program(adapter, new_bpf_prog); 6962306a36Sopenharmony_ci if (old_bpf_prog) 7062306a36Sopenharmony_ci bpf_prog_put(old_bpf_prog); 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci if (!running || !need_update) 7362306a36Sopenharmony_ci return 0; 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_ci if (new_bpf_prog) 7662306a36Sopenharmony_ci xdp_features_set_redirect_target(netdev, false); 7762306a36Sopenharmony_ci else 7862306a36Sopenharmony_ci xdp_features_clear_redirect_target(netdev); 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci vmxnet3_reset_dev(adapter); 8162306a36Sopenharmony_ci vmxnet3_rq_destroy_all(adapter); 8262306a36Sopenharmony_ci vmxnet3_adjust_rx_ring_size(adapter); 8362306a36Sopenharmony_ci err = vmxnet3_rq_create_all(adapter); 8462306a36Sopenharmony_ci if (err) { 8562306a36Sopenharmony_ci NL_SET_ERR_MSG_MOD(extack, 8662306a36Sopenharmony_ci "failed to re-create rx queues for XDP."); 8762306a36Sopenharmony_ci return -EOPNOTSUPP; 8862306a36Sopenharmony_ci } 8962306a36Sopenharmony_ci err = vmxnet3_activate_dev(adapter); 9062306a36Sopenharmony_ci if (err) { 9162306a36Sopenharmony_ci NL_SET_ERR_MSG_MOD(extack, 9262306a36Sopenharmony_ci "failed to activate device for XDP."); 9362306a36Sopenharmony_ci return -EOPNOTSUPP; 9462306a36Sopenharmony_ci } 9562306a36Sopenharmony_ci clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 9662306a36Sopenharmony_ci 9762306a36Sopenharmony_ci return 0; 9862306a36Sopenharmony_ci} 9962306a36Sopenharmony_ci 10062306a36Sopenharmony_ci/* This is the main xdp call used by kernel to set/unset eBPF program. */ 10162306a36Sopenharmony_ciint 10262306a36Sopenharmony_civmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf) 10362306a36Sopenharmony_ci{ 10462306a36Sopenharmony_ci switch (bpf->command) { 10562306a36Sopenharmony_ci case XDP_SETUP_PROG: 10662306a36Sopenharmony_ci return vmxnet3_xdp_set(netdev, bpf, bpf->extack); 10762306a36Sopenharmony_ci default: 10862306a36Sopenharmony_ci return -EINVAL; 10962306a36Sopenharmony_ci } 11062306a36Sopenharmony_ci 11162306a36Sopenharmony_ci return 0; 11262306a36Sopenharmony_ci} 11362306a36Sopenharmony_ci 11462306a36Sopenharmony_cistatic int 11562306a36Sopenharmony_civmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter, 11662306a36Sopenharmony_ci struct xdp_frame *xdpf, 11762306a36Sopenharmony_ci struct vmxnet3_tx_queue *tq, bool dma_map) 11862306a36Sopenharmony_ci{ 11962306a36Sopenharmony_ci struct vmxnet3_tx_buf_info *tbi = NULL; 12062306a36Sopenharmony_ci union Vmxnet3_GenericDesc *gdesc; 12162306a36Sopenharmony_ci struct vmxnet3_tx_ctx ctx; 12262306a36Sopenharmony_ci int tx_num_deferred; 12362306a36Sopenharmony_ci struct page *page; 12462306a36Sopenharmony_ci u32 buf_size; 12562306a36Sopenharmony_ci u32 dw2; 12662306a36Sopenharmony_ci 12762306a36Sopenharmony_ci dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; 12862306a36Sopenharmony_ci dw2 |= xdpf->len; 12962306a36Sopenharmony_ci ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; 13062306a36Sopenharmony_ci gdesc = ctx.sop_txd; 13162306a36Sopenharmony_ci 13262306a36Sopenharmony_ci buf_size = xdpf->len; 13362306a36Sopenharmony_ci tbi = tq->buf_info + tq->tx_ring.next2fill; 13462306a36Sopenharmony_ci 13562306a36Sopenharmony_ci if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) { 13662306a36Sopenharmony_ci tq->stats.tx_ring_full++; 13762306a36Sopenharmony_ci return -ENOSPC; 13862306a36Sopenharmony_ci } 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci tbi->map_type = VMXNET3_MAP_XDP; 14162306a36Sopenharmony_ci if (dma_map) { /* ndo_xdp_xmit */ 14262306a36Sopenharmony_ci tbi->dma_addr = dma_map_single(&adapter->pdev->dev, 14362306a36Sopenharmony_ci xdpf->data, buf_size, 14462306a36Sopenharmony_ci DMA_TO_DEVICE); 14562306a36Sopenharmony_ci if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) 14662306a36Sopenharmony_ci return -EFAULT; 14762306a36Sopenharmony_ci tbi->map_type |= VMXNET3_MAP_SINGLE; 14862306a36Sopenharmony_ci } else { /* XDP buffer from page pool */ 14962306a36Sopenharmony_ci page = virt_to_page(xdpf->data); 15062306a36Sopenharmony_ci tbi->dma_addr = page_pool_get_dma_addr(page) + 15162306a36Sopenharmony_ci VMXNET3_XDP_HEADROOM; 15262306a36Sopenharmony_ci dma_sync_single_for_device(&adapter->pdev->dev, 15362306a36Sopenharmony_ci tbi->dma_addr, buf_size, 15462306a36Sopenharmony_ci DMA_TO_DEVICE); 15562306a36Sopenharmony_ci } 15662306a36Sopenharmony_ci tbi->xdpf = xdpf; 15762306a36Sopenharmony_ci tbi->len = buf_size; 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 16062306a36Sopenharmony_ci WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen); 16162306a36Sopenharmony_ci 16262306a36Sopenharmony_ci gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 16362306a36Sopenharmony_ci gdesc->dword[2] = cpu_to_le32(dw2); 16462306a36Sopenharmony_ci 16562306a36Sopenharmony_ci /* Setup the EOP desc */ 16662306a36Sopenharmony_ci gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_ci gdesc->txd.om = 0; 16962306a36Sopenharmony_ci gdesc->txd.msscof = 0; 17062306a36Sopenharmony_ci gdesc->txd.hlen = 0; 17162306a36Sopenharmony_ci gdesc->txd.ti = 0; 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_ci tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); 17462306a36Sopenharmony_ci le32_add_cpu(&tq->shared->txNumDeferred, 1); 17562306a36Sopenharmony_ci tx_num_deferred++; 17662306a36Sopenharmony_ci 17762306a36Sopenharmony_ci vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 17862306a36Sopenharmony_ci 17962306a36Sopenharmony_ci /* set the last buf_info for the pkt */ 18062306a36Sopenharmony_ci tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base; 18162306a36Sopenharmony_ci 18262306a36Sopenharmony_ci dma_wmb(); 18362306a36Sopenharmony_ci gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ 18462306a36Sopenharmony_ci VMXNET3_TXD_GEN); 18562306a36Sopenharmony_ci 18662306a36Sopenharmony_ci /* No need to handle the case when tx_num_deferred doesn't reach 18762306a36Sopenharmony_ci * threshold. Backend driver at hypervisor side will poll and reset 18862306a36Sopenharmony_ci * tq->shared->txNumDeferred to 0. 18962306a36Sopenharmony_ci */ 19062306a36Sopenharmony_ci if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { 19162306a36Sopenharmony_ci tq->shared->txNumDeferred = 0; 19262306a36Sopenharmony_ci VMXNET3_WRITE_BAR0_REG(adapter, 19362306a36Sopenharmony_ci VMXNET3_REG_TXPROD + tq->qid * 8, 19462306a36Sopenharmony_ci tq->tx_ring.next2fill); 19562306a36Sopenharmony_ci } 19662306a36Sopenharmony_ci 19762306a36Sopenharmony_ci return 0; 19862306a36Sopenharmony_ci} 19962306a36Sopenharmony_ci 20062306a36Sopenharmony_cistatic int 20162306a36Sopenharmony_civmxnet3_xdp_xmit_back(struct vmxnet3_adapter *adapter, 20262306a36Sopenharmony_ci struct xdp_frame *xdpf) 20362306a36Sopenharmony_ci{ 20462306a36Sopenharmony_ci struct vmxnet3_tx_queue *tq; 20562306a36Sopenharmony_ci struct netdev_queue *nq; 20662306a36Sopenharmony_ci int err; 20762306a36Sopenharmony_ci 20862306a36Sopenharmony_ci tq = vmxnet3_xdp_get_tq(adapter); 20962306a36Sopenharmony_ci if (tq->stopped) 21062306a36Sopenharmony_ci return -ENETDOWN; 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci nq = netdev_get_tx_queue(adapter->netdev, tq->qid); 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci __netif_tx_lock(nq, smp_processor_id()); 21562306a36Sopenharmony_ci err = vmxnet3_xdp_xmit_frame(adapter, xdpf, tq, false); 21662306a36Sopenharmony_ci __netif_tx_unlock(nq); 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_ci return err; 21962306a36Sopenharmony_ci} 22062306a36Sopenharmony_ci 22162306a36Sopenharmony_ci/* ndo_xdp_xmit */ 22262306a36Sopenharmony_ciint 22362306a36Sopenharmony_civmxnet3_xdp_xmit(struct net_device *dev, 22462306a36Sopenharmony_ci int n, struct xdp_frame **frames, u32 flags) 22562306a36Sopenharmony_ci{ 22662306a36Sopenharmony_ci struct vmxnet3_adapter *adapter = netdev_priv(dev); 22762306a36Sopenharmony_ci struct vmxnet3_tx_queue *tq; 22862306a36Sopenharmony_ci int i; 22962306a36Sopenharmony_ci 23062306a36Sopenharmony_ci if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))) 23162306a36Sopenharmony_ci return -ENETDOWN; 23262306a36Sopenharmony_ci if (unlikely(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))) 23362306a36Sopenharmony_ci return -EINVAL; 23462306a36Sopenharmony_ci 23562306a36Sopenharmony_ci tq = vmxnet3_xdp_get_tq(adapter); 23662306a36Sopenharmony_ci if (tq->stopped) 23762306a36Sopenharmony_ci return -ENETDOWN; 23862306a36Sopenharmony_ci 23962306a36Sopenharmony_ci for (i = 0; i < n; i++) { 24062306a36Sopenharmony_ci if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) { 24162306a36Sopenharmony_ci tq->stats.xdp_xmit_err++; 24262306a36Sopenharmony_ci break; 24362306a36Sopenharmony_ci } 24462306a36Sopenharmony_ci } 24562306a36Sopenharmony_ci tq->stats.xdp_xmit += i; 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci return i; 24862306a36Sopenharmony_ci} 24962306a36Sopenharmony_ci 25062306a36Sopenharmony_cistatic int 25162306a36Sopenharmony_civmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, 25262306a36Sopenharmony_ci struct bpf_prog *prog) 25362306a36Sopenharmony_ci{ 25462306a36Sopenharmony_ci struct xdp_frame *xdpf; 25562306a36Sopenharmony_ci struct page *page; 25662306a36Sopenharmony_ci int err; 25762306a36Sopenharmony_ci u32 act; 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci rq->stats.xdp_packets++; 26062306a36Sopenharmony_ci act = bpf_prog_run_xdp(prog, xdp); 26162306a36Sopenharmony_ci page = virt_to_page(xdp->data_hard_start); 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci switch (act) { 26462306a36Sopenharmony_ci case XDP_PASS: 26562306a36Sopenharmony_ci return act; 26662306a36Sopenharmony_ci case XDP_REDIRECT: 26762306a36Sopenharmony_ci err = xdp_do_redirect(rq->adapter->netdev, xdp, prog); 26862306a36Sopenharmony_ci if (!err) { 26962306a36Sopenharmony_ci rq->stats.xdp_redirects++; 27062306a36Sopenharmony_ci } else { 27162306a36Sopenharmony_ci rq->stats.xdp_drops++; 27262306a36Sopenharmony_ci page_pool_recycle_direct(rq->page_pool, page); 27362306a36Sopenharmony_ci } 27462306a36Sopenharmony_ci return act; 27562306a36Sopenharmony_ci case XDP_TX: 27662306a36Sopenharmony_ci xdpf = xdp_convert_buff_to_frame(xdp); 27762306a36Sopenharmony_ci if (unlikely(!xdpf || 27862306a36Sopenharmony_ci vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) { 27962306a36Sopenharmony_ci rq->stats.xdp_drops++; 28062306a36Sopenharmony_ci page_pool_recycle_direct(rq->page_pool, page); 28162306a36Sopenharmony_ci } else { 28262306a36Sopenharmony_ci rq->stats.xdp_tx++; 28362306a36Sopenharmony_ci } 28462306a36Sopenharmony_ci return act; 28562306a36Sopenharmony_ci default: 28662306a36Sopenharmony_ci bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act); 28762306a36Sopenharmony_ci fallthrough; 28862306a36Sopenharmony_ci case XDP_ABORTED: 28962306a36Sopenharmony_ci trace_xdp_exception(rq->adapter->netdev, prog, act); 29062306a36Sopenharmony_ci rq->stats.xdp_aborted++; 29162306a36Sopenharmony_ci break; 29262306a36Sopenharmony_ci case XDP_DROP: 29362306a36Sopenharmony_ci rq->stats.xdp_drops++; 29462306a36Sopenharmony_ci break; 29562306a36Sopenharmony_ci } 29662306a36Sopenharmony_ci 29762306a36Sopenharmony_ci page_pool_recycle_direct(rq->page_pool, page); 29862306a36Sopenharmony_ci 29962306a36Sopenharmony_ci return act; 30062306a36Sopenharmony_ci} 30162306a36Sopenharmony_ci 30262306a36Sopenharmony_cistatic struct sk_buff * 30362306a36Sopenharmony_civmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page, 30462306a36Sopenharmony_ci const struct xdp_buff *xdp) 30562306a36Sopenharmony_ci{ 30662306a36Sopenharmony_ci struct sk_buff *skb; 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_ci skb = build_skb(page_address(page), PAGE_SIZE); 30962306a36Sopenharmony_ci if (unlikely(!skb)) { 31062306a36Sopenharmony_ci page_pool_recycle_direct(rq->page_pool, page); 31162306a36Sopenharmony_ci rq->stats.rx_buf_alloc_failure++; 31262306a36Sopenharmony_ci return NULL; 31362306a36Sopenharmony_ci } 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci /* bpf prog might change len and data position. */ 31662306a36Sopenharmony_ci skb_reserve(skb, xdp->data - xdp->data_hard_start); 31762306a36Sopenharmony_ci skb_put(skb, xdp->data_end - xdp->data); 31862306a36Sopenharmony_ci skb_mark_for_recycle(skb); 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci return skb; 32162306a36Sopenharmony_ci} 32262306a36Sopenharmony_ci 32362306a36Sopenharmony_ci/* Handle packets from DataRing. */ 32462306a36Sopenharmony_ciint 32562306a36Sopenharmony_civmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter, 32662306a36Sopenharmony_ci struct vmxnet3_rx_queue *rq, 32762306a36Sopenharmony_ci void *data, int len, 32862306a36Sopenharmony_ci struct sk_buff **skb_xdp_pass) 32962306a36Sopenharmony_ci{ 33062306a36Sopenharmony_ci struct bpf_prog *xdp_prog; 33162306a36Sopenharmony_ci struct xdp_buff xdp; 33262306a36Sopenharmony_ci struct page *page; 33362306a36Sopenharmony_ci int act; 33462306a36Sopenharmony_ci 33562306a36Sopenharmony_ci page = page_pool_alloc_pages(rq->page_pool, GFP_ATOMIC); 33662306a36Sopenharmony_ci if (unlikely(!page)) { 33762306a36Sopenharmony_ci rq->stats.rx_buf_alloc_failure++; 33862306a36Sopenharmony_ci return XDP_DROP; 33962306a36Sopenharmony_ci } 34062306a36Sopenharmony_ci 34162306a36Sopenharmony_ci xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); 34262306a36Sopenharmony_ci xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, 34362306a36Sopenharmony_ci len, false); 34462306a36Sopenharmony_ci xdp_buff_clear_frags_flag(&xdp); 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_ci /* Must copy the data because it's at dataring. */ 34762306a36Sopenharmony_ci memcpy(xdp.data, data, len); 34862306a36Sopenharmony_ci 34962306a36Sopenharmony_ci xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog); 35062306a36Sopenharmony_ci if (!xdp_prog) { 35162306a36Sopenharmony_ci act = XDP_PASS; 35262306a36Sopenharmony_ci goto out_skb; 35362306a36Sopenharmony_ci } 35462306a36Sopenharmony_ci act = vmxnet3_run_xdp(rq, &xdp, xdp_prog); 35562306a36Sopenharmony_ci if (act != XDP_PASS) 35662306a36Sopenharmony_ci return act; 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_ciout_skb: 35962306a36Sopenharmony_ci *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp); 36062306a36Sopenharmony_ci if (!*skb_xdp_pass) 36162306a36Sopenharmony_ci return XDP_DROP; 36262306a36Sopenharmony_ci 36362306a36Sopenharmony_ci /* No need to refill. */ 36462306a36Sopenharmony_ci return likely(*skb_xdp_pass) ? act : XDP_DROP; 36562306a36Sopenharmony_ci} 36662306a36Sopenharmony_ci 36762306a36Sopenharmony_ciint 36862306a36Sopenharmony_civmxnet3_process_xdp(struct vmxnet3_adapter *adapter, 36962306a36Sopenharmony_ci struct vmxnet3_rx_queue *rq, 37062306a36Sopenharmony_ci struct Vmxnet3_RxCompDesc *rcd, 37162306a36Sopenharmony_ci struct vmxnet3_rx_buf_info *rbi, 37262306a36Sopenharmony_ci struct Vmxnet3_RxDesc *rxd, 37362306a36Sopenharmony_ci struct sk_buff **skb_xdp_pass) 37462306a36Sopenharmony_ci{ 37562306a36Sopenharmony_ci struct bpf_prog *xdp_prog; 37662306a36Sopenharmony_ci dma_addr_t new_dma_addr; 37762306a36Sopenharmony_ci struct xdp_buff xdp; 37862306a36Sopenharmony_ci struct page *page; 37962306a36Sopenharmony_ci void *new_data; 38062306a36Sopenharmony_ci int act; 38162306a36Sopenharmony_ci 38262306a36Sopenharmony_ci page = rbi->page; 38362306a36Sopenharmony_ci dma_sync_single_for_cpu(&adapter->pdev->dev, 38462306a36Sopenharmony_ci page_pool_get_dma_addr(page) + 38562306a36Sopenharmony_ci rq->page_pool->p.offset, rbi->len, 38662306a36Sopenharmony_ci page_pool_get_dma_dir(rq->page_pool)); 38762306a36Sopenharmony_ci 38862306a36Sopenharmony_ci xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); 38962306a36Sopenharmony_ci xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, 39062306a36Sopenharmony_ci rbi->len, false); 39162306a36Sopenharmony_ci xdp_buff_clear_frags_flag(&xdp); 39262306a36Sopenharmony_ci 39362306a36Sopenharmony_ci xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog); 39462306a36Sopenharmony_ci if (!xdp_prog) { 39562306a36Sopenharmony_ci act = XDP_PASS; 39662306a36Sopenharmony_ci goto out_skb; 39762306a36Sopenharmony_ci } 39862306a36Sopenharmony_ci act = vmxnet3_run_xdp(rq, &xdp, xdp_prog); 39962306a36Sopenharmony_ci 40062306a36Sopenharmony_ci if (act == XDP_PASS) { 40162306a36Sopenharmony_ciout_skb: 40262306a36Sopenharmony_ci *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp); 40362306a36Sopenharmony_ci if (!*skb_xdp_pass) 40462306a36Sopenharmony_ci act = XDP_DROP; 40562306a36Sopenharmony_ci } 40662306a36Sopenharmony_ci 40762306a36Sopenharmony_ci new_data = vmxnet3_pp_get_buff(rq->page_pool, &new_dma_addr, 40862306a36Sopenharmony_ci GFP_ATOMIC); 40962306a36Sopenharmony_ci if (!new_data) { 41062306a36Sopenharmony_ci rq->stats.rx_buf_alloc_failure++; 41162306a36Sopenharmony_ci return XDP_DROP; 41262306a36Sopenharmony_ci } 41362306a36Sopenharmony_ci rbi->page = virt_to_page(new_data); 41462306a36Sopenharmony_ci rbi->dma_addr = new_dma_addr; 41562306a36Sopenharmony_ci rxd->addr = cpu_to_le64(rbi->dma_addr); 41662306a36Sopenharmony_ci rxd->len = rbi->len; 41762306a36Sopenharmony_ci 41862306a36Sopenharmony_ci return act; 41962306a36Sopenharmony_ci} 420