18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Copyright (c) 2019, Intel Corporation. */ 38c2ecf20Sopenharmony_ci 48c2ecf20Sopenharmony_ci#include <linux/bpf_trace.h> 58c2ecf20Sopenharmony_ci#include <net/xdp_sock_drv.h> 68c2ecf20Sopenharmony_ci#include <net/xdp.h> 78c2ecf20Sopenharmony_ci#include "ice.h" 88c2ecf20Sopenharmony_ci#include "ice_base.h" 98c2ecf20Sopenharmony_ci#include "ice_type.h" 108c2ecf20Sopenharmony_ci#include "ice_xsk.h" 118c2ecf20Sopenharmony_ci#include "ice_txrx.h" 128c2ecf20Sopenharmony_ci#include "ice_txrx_lib.h" 138c2ecf20Sopenharmony_ci#include "ice_lib.h" 148c2ecf20Sopenharmony_ci 158c2ecf20Sopenharmony_ci/** 168c2ecf20Sopenharmony_ci * ice_qp_reset_stats - Resets all stats for rings of given index 178c2ecf20Sopenharmony_ci * @vsi: VSI that contains rings of interest 188c2ecf20Sopenharmony_ci * @q_idx: ring index in array 198c2ecf20Sopenharmony_ci */ 208c2ecf20Sopenharmony_cistatic void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) 218c2ecf20Sopenharmony_ci{ 228c2ecf20Sopenharmony_ci memset(&vsi->rx_rings[q_idx]->rx_stats, 0, 238c2ecf20Sopenharmony_ci sizeof(vsi->rx_rings[q_idx]->rx_stats)); 248c2ecf20Sopenharmony_ci memset(&vsi->tx_rings[q_idx]->stats, 0, 258c2ecf20Sopenharmony_ci sizeof(vsi->tx_rings[q_idx]->stats)); 268c2ecf20Sopenharmony_ci if (ice_is_xdp_ena_vsi(vsi)) 278c2ecf20Sopenharmony_ci memset(&vsi->xdp_rings[q_idx]->stats, 0, 288c2ecf20Sopenharmony_ci sizeof(vsi->xdp_rings[q_idx]->stats)); 298c2ecf20Sopenharmony_ci} 308c2ecf20Sopenharmony_ci 318c2ecf20Sopenharmony_ci/** 328c2ecf20Sopenharmony_ci * ice_qp_clean_rings - Cleans all the rings of a given index 338c2ecf20Sopenharmony_ci * @vsi: VSI that contains rings of interest 348c2ecf20Sopenharmony_ci * @q_idx: ring index in array 358c2ecf20Sopenharmony_ci */ 368c2ecf20Sopenharmony_cistatic void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) 378c2ecf20Sopenharmony_ci{ 388c2ecf20Sopenharmony_ci ice_clean_tx_ring(vsi->tx_rings[q_idx]); 398c2ecf20Sopenharmony_ci if (ice_is_xdp_ena_vsi(vsi)) { 408c2ecf20Sopenharmony_ci synchronize_rcu(); 418c2ecf20Sopenharmony_ci ice_clean_tx_ring(vsi->xdp_rings[q_idx]); 428c2ecf20Sopenharmony_ci } 438c2ecf20Sopenharmony_ci ice_clean_rx_ring(vsi->rx_rings[q_idx]); 448c2ecf20Sopenharmony_ci} 458c2ecf20Sopenharmony_ci 468c2ecf20Sopenharmony_ci/** 478c2ecf20Sopenharmony_ci * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector 488c2ecf20Sopenharmony_ci * @vsi: VSI that has netdev 498c2ecf20Sopenharmony_ci * @q_vector: q_vector that has NAPI context 508c2ecf20Sopenharmony_ci * @enable: true for enable, false for disable 518c2ecf20Sopenharmony_ci */ 528c2ecf20Sopenharmony_cistatic void 538c2ecf20Sopenharmony_ciice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, 548c2ecf20Sopenharmony_ci bool enable) 558c2ecf20Sopenharmony_ci{ 568c2ecf20Sopenharmony_ci if (!vsi->netdev || !q_vector) 578c2ecf20Sopenharmony_ci return; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_ci if (enable) 608c2ecf20Sopenharmony_ci napi_enable(&q_vector->napi); 618c2ecf20Sopenharmony_ci else 628c2ecf20Sopenharmony_ci napi_disable(&q_vector->napi); 638c2ecf20Sopenharmony_ci} 648c2ecf20Sopenharmony_ci 658c2ecf20Sopenharmony_ci/** 668c2ecf20Sopenharmony_ci * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring 678c2ecf20Sopenharmony_ci * @vsi: the VSI that contains queue vector being un-configured 688c2ecf20Sopenharmony_ci * @rx_ring: Rx ring that will have its IRQ disabled 698c2ecf20Sopenharmony_ci * @q_vector: queue vector 708c2ecf20Sopenharmony_ci */ 718c2ecf20Sopenharmony_cistatic void 728c2ecf20Sopenharmony_ciice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring, 738c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector) 748c2ecf20Sopenharmony_ci{ 758c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 768c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 778c2ecf20Sopenharmony_ci int base = vsi->base_vector; 788c2ecf20Sopenharmony_ci u16 reg; 798c2ecf20Sopenharmony_ci u32 val; 808c2ecf20Sopenharmony_ci 818c2ecf20Sopenharmony_ci /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle 828c2ecf20Sopenharmony_ci * here only QINT_RQCTL 838c2ecf20Sopenharmony_ci */ 848c2ecf20Sopenharmony_ci reg = rx_ring->reg_idx; 858c2ecf20Sopenharmony_ci val = rd32(hw, QINT_RQCTL(reg)); 868c2ecf20Sopenharmony_ci val &= ~QINT_RQCTL_CAUSE_ENA_M; 878c2ecf20Sopenharmony_ci wr32(hw, QINT_RQCTL(reg), val); 888c2ecf20Sopenharmony_ci 898c2ecf20Sopenharmony_ci if (q_vector) { 908c2ecf20Sopenharmony_ci u16 v_idx = q_vector->v_idx; 918c2ecf20Sopenharmony_ci 928c2ecf20Sopenharmony_ci wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); 938c2ecf20Sopenharmony_ci ice_flush(hw); 948c2ecf20Sopenharmony_ci synchronize_irq(pf->msix_entries[v_idx + base].vector); 958c2ecf20Sopenharmony_ci } 968c2ecf20Sopenharmony_ci} 978c2ecf20Sopenharmony_ci 988c2ecf20Sopenharmony_ci/** 998c2ecf20Sopenharmony_ci * ice_qvec_cfg_msix - Enable IRQ for given queue vector 1008c2ecf20Sopenharmony_ci * @vsi: the VSI that contains queue vector 1018c2ecf20Sopenharmony_ci * @q_vector: queue vector 1028c2ecf20Sopenharmony_ci */ 1038c2ecf20Sopenharmony_cistatic void 1048c2ecf20Sopenharmony_ciice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 1058c2ecf20Sopenharmony_ci{ 1068c2ecf20Sopenharmony_ci u16 reg_idx = q_vector->reg_idx; 1078c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 1088c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 1098c2ecf20Sopenharmony_ci struct ice_ring *ring; 1108c2ecf20Sopenharmony_ci 1118c2ecf20Sopenharmony_ci ice_cfg_itr(hw, q_vector); 1128c2ecf20Sopenharmony_ci 1138c2ecf20Sopenharmony_ci wr32(hw, GLINT_RATE(reg_idx), 1148c2ecf20Sopenharmony_ci ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); 1158c2ecf20Sopenharmony_ci 1168c2ecf20Sopenharmony_ci ice_for_each_ring(ring, q_vector->tx) 1178c2ecf20Sopenharmony_ci ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx, 1188c2ecf20Sopenharmony_ci q_vector->tx.itr_idx); 1198c2ecf20Sopenharmony_ci 1208c2ecf20Sopenharmony_ci ice_for_each_ring(ring, q_vector->rx) 1218c2ecf20Sopenharmony_ci ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx, 1228c2ecf20Sopenharmony_ci q_vector->rx.itr_idx); 1238c2ecf20Sopenharmony_ci 1248c2ecf20Sopenharmony_ci ice_flush(hw); 1258c2ecf20Sopenharmony_ci} 1268c2ecf20Sopenharmony_ci 1278c2ecf20Sopenharmony_ci/** 1288c2ecf20Sopenharmony_ci * ice_qvec_ena_irq - Enable IRQ for given queue vector 1298c2ecf20Sopenharmony_ci * @vsi: the VSI that contains queue vector 1308c2ecf20Sopenharmony_ci * @q_vector: queue vector 1318c2ecf20Sopenharmony_ci */ 1328c2ecf20Sopenharmony_cistatic void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) 1338c2ecf20Sopenharmony_ci{ 1348c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 1358c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 1368c2ecf20Sopenharmony_ci 1378c2ecf20Sopenharmony_ci ice_irq_dynamic_ena(hw, vsi, q_vector); 1388c2ecf20Sopenharmony_ci 1398c2ecf20Sopenharmony_ci ice_flush(hw); 1408c2ecf20Sopenharmony_ci} 1418c2ecf20Sopenharmony_ci 1428c2ecf20Sopenharmony_ci/** 1438c2ecf20Sopenharmony_ci * ice_qp_dis - Disables a queue pair 1448c2ecf20Sopenharmony_ci * @vsi: VSI of interest 1458c2ecf20Sopenharmony_ci * @q_idx: ring index in array 1468c2ecf20Sopenharmony_ci * 1478c2ecf20Sopenharmony_ci * Returns 0 on success, negative on failure. 1488c2ecf20Sopenharmony_ci */ 1498c2ecf20Sopenharmony_cistatic int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) 1508c2ecf20Sopenharmony_ci{ 1518c2ecf20Sopenharmony_ci struct ice_txq_meta txq_meta = { }; 1528c2ecf20Sopenharmony_ci struct ice_ring *tx_ring, *rx_ring; 1538c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector; 1548c2ecf20Sopenharmony_ci int timeout = 50; 1558c2ecf20Sopenharmony_ci int err; 1568c2ecf20Sopenharmony_ci 1578c2ecf20Sopenharmony_ci if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) 1588c2ecf20Sopenharmony_ci return -EINVAL; 1598c2ecf20Sopenharmony_ci 1608c2ecf20Sopenharmony_ci tx_ring = vsi->tx_rings[q_idx]; 1618c2ecf20Sopenharmony_ci rx_ring = vsi->rx_rings[q_idx]; 1628c2ecf20Sopenharmony_ci q_vector = rx_ring->q_vector; 1638c2ecf20Sopenharmony_ci 1648c2ecf20Sopenharmony_ci while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) { 1658c2ecf20Sopenharmony_ci timeout--; 1668c2ecf20Sopenharmony_ci if (!timeout) 1678c2ecf20Sopenharmony_ci return -EBUSY; 1688c2ecf20Sopenharmony_ci usleep_range(1000, 2000); 1698c2ecf20Sopenharmony_ci } 1708c2ecf20Sopenharmony_ci netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 1718c2ecf20Sopenharmony_ci 1728c2ecf20Sopenharmony_ci ice_fill_txq_meta(vsi, tx_ring, &txq_meta); 1738c2ecf20Sopenharmony_ci err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); 1748c2ecf20Sopenharmony_ci if (err) 1758c2ecf20Sopenharmony_ci return err; 1768c2ecf20Sopenharmony_ci if (ice_is_xdp_ena_vsi(vsi)) { 1778c2ecf20Sopenharmony_ci struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; 1788c2ecf20Sopenharmony_ci 1798c2ecf20Sopenharmony_ci memset(&txq_meta, 0, sizeof(txq_meta)); 1808c2ecf20Sopenharmony_ci ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); 1818c2ecf20Sopenharmony_ci err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, 1828c2ecf20Sopenharmony_ci &txq_meta); 1838c2ecf20Sopenharmony_ci if (err) 1848c2ecf20Sopenharmony_ci return err; 1858c2ecf20Sopenharmony_ci } 1868c2ecf20Sopenharmony_ci ice_qvec_dis_irq(vsi, rx_ring, q_vector); 1878c2ecf20Sopenharmony_ci 1888c2ecf20Sopenharmony_ci err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); 1898c2ecf20Sopenharmony_ci if (err) 1908c2ecf20Sopenharmony_ci return err; 1918c2ecf20Sopenharmony_ci 1928c2ecf20Sopenharmony_ci ice_qvec_toggle_napi(vsi, q_vector, false); 1938c2ecf20Sopenharmony_ci ice_qp_clean_rings(vsi, q_idx); 1948c2ecf20Sopenharmony_ci ice_qp_reset_stats(vsi, q_idx); 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci return 0; 1978c2ecf20Sopenharmony_ci} 1988c2ecf20Sopenharmony_ci 1998c2ecf20Sopenharmony_ci/** 2008c2ecf20Sopenharmony_ci * ice_qp_ena - Enables a queue pair 2018c2ecf20Sopenharmony_ci * @vsi: VSI of interest 2028c2ecf20Sopenharmony_ci * @q_idx: ring index in array 2038c2ecf20Sopenharmony_ci * 2048c2ecf20Sopenharmony_ci * Returns 0 on success, negative on failure. 2058c2ecf20Sopenharmony_ci */ 2068c2ecf20Sopenharmony_cistatic int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) 2078c2ecf20Sopenharmony_ci{ 2088c2ecf20Sopenharmony_ci struct ice_aqc_add_tx_qgrp *qg_buf; 2098c2ecf20Sopenharmony_ci struct ice_ring *tx_ring, *rx_ring; 2108c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector; 2118c2ecf20Sopenharmony_ci u16 size; 2128c2ecf20Sopenharmony_ci int err; 2138c2ecf20Sopenharmony_ci 2148c2ecf20Sopenharmony_ci if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) 2158c2ecf20Sopenharmony_ci return -EINVAL; 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_ci size = struct_size(qg_buf, txqs, 1); 2188c2ecf20Sopenharmony_ci qg_buf = kzalloc(size, GFP_KERNEL); 2198c2ecf20Sopenharmony_ci if (!qg_buf) 2208c2ecf20Sopenharmony_ci return -ENOMEM; 2218c2ecf20Sopenharmony_ci 2228c2ecf20Sopenharmony_ci qg_buf->num_txqs = 1; 2238c2ecf20Sopenharmony_ci 2248c2ecf20Sopenharmony_ci tx_ring = vsi->tx_rings[q_idx]; 2258c2ecf20Sopenharmony_ci rx_ring = vsi->rx_rings[q_idx]; 2268c2ecf20Sopenharmony_ci q_vector = rx_ring->q_vector; 2278c2ecf20Sopenharmony_ci 2288c2ecf20Sopenharmony_ci err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf); 2298c2ecf20Sopenharmony_ci if (err) 2308c2ecf20Sopenharmony_ci goto free_buf; 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci if (ice_is_xdp_ena_vsi(vsi)) { 2338c2ecf20Sopenharmony_ci struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; 2348c2ecf20Sopenharmony_ci 2358c2ecf20Sopenharmony_ci memset(qg_buf, 0, size); 2368c2ecf20Sopenharmony_ci qg_buf->num_txqs = 1; 2378c2ecf20Sopenharmony_ci err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); 2388c2ecf20Sopenharmony_ci if (err) 2398c2ecf20Sopenharmony_ci goto free_buf; 2408c2ecf20Sopenharmony_ci ice_set_ring_xdp(xdp_ring); 2418c2ecf20Sopenharmony_ci xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); 2428c2ecf20Sopenharmony_ci } 2438c2ecf20Sopenharmony_ci 2448c2ecf20Sopenharmony_ci err = ice_setup_rx_ctx(rx_ring); 2458c2ecf20Sopenharmony_ci if (err) 2468c2ecf20Sopenharmony_ci goto free_buf; 2478c2ecf20Sopenharmony_ci 2488c2ecf20Sopenharmony_ci ice_qvec_cfg_msix(vsi, q_vector); 2498c2ecf20Sopenharmony_ci 2508c2ecf20Sopenharmony_ci err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); 2518c2ecf20Sopenharmony_ci if (err) 2528c2ecf20Sopenharmony_ci goto free_buf; 2538c2ecf20Sopenharmony_ci 2548c2ecf20Sopenharmony_ci clear_bit(__ICE_CFG_BUSY, vsi->state); 2558c2ecf20Sopenharmony_ci ice_qvec_toggle_napi(vsi, q_vector, true); 2568c2ecf20Sopenharmony_ci ice_qvec_ena_irq(vsi, q_vector); 2578c2ecf20Sopenharmony_ci 2588c2ecf20Sopenharmony_ci netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); 2598c2ecf20Sopenharmony_cifree_buf: 2608c2ecf20Sopenharmony_ci kfree(qg_buf); 2618c2ecf20Sopenharmony_ci return err; 2628c2ecf20Sopenharmony_ci} 2638c2ecf20Sopenharmony_ci 2648c2ecf20Sopenharmony_ci/** 2658c2ecf20Sopenharmony_ci * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket 2668c2ecf20Sopenharmony_ci * @vsi: VSI to allocate the buffer pool on 2678c2ecf20Sopenharmony_ci * 2688c2ecf20Sopenharmony_ci * Returns 0 on success, negative on error 2698c2ecf20Sopenharmony_ci */ 2708c2ecf20Sopenharmony_cistatic int ice_xsk_alloc_pools(struct ice_vsi *vsi) 2718c2ecf20Sopenharmony_ci{ 2728c2ecf20Sopenharmony_ci if (vsi->xsk_pools) 2738c2ecf20Sopenharmony_ci return 0; 2748c2ecf20Sopenharmony_ci 2758c2ecf20Sopenharmony_ci vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools), 2768c2ecf20Sopenharmony_ci GFP_KERNEL); 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_ci if (!vsi->xsk_pools) { 2798c2ecf20Sopenharmony_ci vsi->num_xsk_pools = 0; 2808c2ecf20Sopenharmony_ci return -ENOMEM; 2818c2ecf20Sopenharmony_ci } 2828c2ecf20Sopenharmony_ci 2838c2ecf20Sopenharmony_ci return 0; 2848c2ecf20Sopenharmony_ci} 2858c2ecf20Sopenharmony_ci 2868c2ecf20Sopenharmony_ci/** 2878c2ecf20Sopenharmony_ci * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid 2888c2ecf20Sopenharmony_ci * @vsi: VSI from which the VSI will be removed 2898c2ecf20Sopenharmony_ci * @qid: Ring/qid associated with the buffer pool 2908c2ecf20Sopenharmony_ci */ 2918c2ecf20Sopenharmony_cistatic void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid) 2928c2ecf20Sopenharmony_ci{ 2938c2ecf20Sopenharmony_ci vsi->xsk_pools[qid] = NULL; 2948c2ecf20Sopenharmony_ci vsi->num_xsk_pools_used--; 2958c2ecf20Sopenharmony_ci 2968c2ecf20Sopenharmony_ci if (vsi->num_xsk_pools_used == 0) { 2978c2ecf20Sopenharmony_ci kfree(vsi->xsk_pools); 2988c2ecf20Sopenharmony_ci vsi->xsk_pools = NULL; 2998c2ecf20Sopenharmony_ci vsi->num_xsk_pools = 0; 3008c2ecf20Sopenharmony_ci } 3018c2ecf20Sopenharmony_ci} 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci/** 3048c2ecf20Sopenharmony_ci * ice_xsk_pool_disable - disable a buffer pool region 3058c2ecf20Sopenharmony_ci * @vsi: Current VSI 3068c2ecf20Sopenharmony_ci * @qid: queue ID 3078c2ecf20Sopenharmony_ci * 3088c2ecf20Sopenharmony_ci * Returns 0 on success, negative on failure 3098c2ecf20Sopenharmony_ci */ 3108c2ecf20Sopenharmony_cistatic int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) 3118c2ecf20Sopenharmony_ci{ 3128c2ecf20Sopenharmony_ci if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools || 3138c2ecf20Sopenharmony_ci !vsi->xsk_pools[qid]) 3148c2ecf20Sopenharmony_ci return -EINVAL; 3158c2ecf20Sopenharmony_ci 3168c2ecf20Sopenharmony_ci xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR); 3178c2ecf20Sopenharmony_ci ice_xsk_remove_pool(vsi, qid); 3188c2ecf20Sopenharmony_ci 3198c2ecf20Sopenharmony_ci return 0; 3208c2ecf20Sopenharmony_ci} 3218c2ecf20Sopenharmony_ci 3228c2ecf20Sopenharmony_ci/** 3238c2ecf20Sopenharmony_ci * ice_xsk_pool_enable - enable a buffer pool region 3248c2ecf20Sopenharmony_ci * @vsi: Current VSI 3258c2ecf20Sopenharmony_ci * @pool: pointer to a requested buffer pool region 3268c2ecf20Sopenharmony_ci * @qid: queue ID 3278c2ecf20Sopenharmony_ci * 3288c2ecf20Sopenharmony_ci * Returns 0 on success, negative on failure 3298c2ecf20Sopenharmony_ci */ 3308c2ecf20Sopenharmony_cistatic int 3318c2ecf20Sopenharmony_ciice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) 3328c2ecf20Sopenharmony_ci{ 3338c2ecf20Sopenharmony_ci int err; 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci if (vsi->type != ICE_VSI_PF) 3368c2ecf20Sopenharmony_ci return -EINVAL; 3378c2ecf20Sopenharmony_ci 3388c2ecf20Sopenharmony_ci if (!vsi->num_xsk_pools) 3398c2ecf20Sopenharmony_ci vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq); 3408c2ecf20Sopenharmony_ci if (qid >= vsi->num_xsk_pools) 3418c2ecf20Sopenharmony_ci return -EINVAL; 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci err = ice_xsk_alloc_pools(vsi); 3448c2ecf20Sopenharmony_ci if (err) 3458c2ecf20Sopenharmony_ci return err; 3468c2ecf20Sopenharmony_ci 3478c2ecf20Sopenharmony_ci if (vsi->xsk_pools && vsi->xsk_pools[qid]) 3488c2ecf20Sopenharmony_ci return -EBUSY; 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_ci vsi->xsk_pools[qid] = pool; 3518c2ecf20Sopenharmony_ci vsi->num_xsk_pools_used++; 3528c2ecf20Sopenharmony_ci 3538c2ecf20Sopenharmony_ci err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back), 3548c2ecf20Sopenharmony_ci ICE_RX_DMA_ATTR); 3558c2ecf20Sopenharmony_ci if (err) 3568c2ecf20Sopenharmony_ci return err; 3578c2ecf20Sopenharmony_ci 3588c2ecf20Sopenharmony_ci return 0; 3598c2ecf20Sopenharmony_ci} 3608c2ecf20Sopenharmony_ci 3618c2ecf20Sopenharmony_ci/** 3628c2ecf20Sopenharmony_ci * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state 3638c2ecf20Sopenharmony_ci * @vsi: Current VSI 3648c2ecf20Sopenharmony_ci * @pool: buffer pool to enable/associate to a ring, NULL to disable 3658c2ecf20Sopenharmony_ci * @qid: queue ID 3668c2ecf20Sopenharmony_ci * 3678c2ecf20Sopenharmony_ci * Returns 0 on success, negative on failure 3688c2ecf20Sopenharmony_ci */ 3698c2ecf20Sopenharmony_ciint ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) 3708c2ecf20Sopenharmony_ci{ 3718c2ecf20Sopenharmony_ci bool if_running, pool_present = !!pool; 3728c2ecf20Sopenharmony_ci int ret = 0, pool_failure = 0; 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_ci if (qid >= vsi->num_rxq || qid >= vsi->num_txq) { 3758c2ecf20Sopenharmony_ci netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n"); 3768c2ecf20Sopenharmony_ci pool_failure = -EINVAL; 3778c2ecf20Sopenharmony_ci goto failure; 3788c2ecf20Sopenharmony_ci } 3798c2ecf20Sopenharmony_ci 3808c2ecf20Sopenharmony_ci if (!is_power_of_2(vsi->rx_rings[qid]->count) || 3818c2ecf20Sopenharmony_ci !is_power_of_2(vsi->tx_rings[qid]->count)) { 3828c2ecf20Sopenharmony_ci netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n"); 3838c2ecf20Sopenharmony_ci pool_failure = -EINVAL; 3848c2ecf20Sopenharmony_ci goto failure; 3858c2ecf20Sopenharmony_ci } 3868c2ecf20Sopenharmony_ci 3878c2ecf20Sopenharmony_ci if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_ci if (if_running) { 3908c2ecf20Sopenharmony_ci ret = ice_qp_dis(vsi, qid); 3918c2ecf20Sopenharmony_ci if (ret) { 3928c2ecf20Sopenharmony_ci netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); 3938c2ecf20Sopenharmony_ci goto xsk_pool_if_up; 3948c2ecf20Sopenharmony_ci } 3958c2ecf20Sopenharmony_ci } 3968c2ecf20Sopenharmony_ci 3978c2ecf20Sopenharmony_ci pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) : 3988c2ecf20Sopenharmony_ci ice_xsk_pool_disable(vsi, qid); 3998c2ecf20Sopenharmony_ci 4008c2ecf20Sopenharmony_cixsk_pool_if_up: 4018c2ecf20Sopenharmony_ci if (if_running) { 4028c2ecf20Sopenharmony_ci ret = ice_qp_ena(vsi, qid); 4038c2ecf20Sopenharmony_ci if (!ret && pool_present) 4048c2ecf20Sopenharmony_ci napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); 4058c2ecf20Sopenharmony_ci else if (ret) 4068c2ecf20Sopenharmony_ci netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret); 4078c2ecf20Sopenharmony_ci } 4088c2ecf20Sopenharmony_ci 4098c2ecf20Sopenharmony_cifailure: 4108c2ecf20Sopenharmony_ci if (pool_failure) { 4118c2ecf20Sopenharmony_ci netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n", 4128c2ecf20Sopenharmony_ci pool_present ? "en" : "dis", pool_failure); 4138c2ecf20Sopenharmony_ci return pool_failure; 4148c2ecf20Sopenharmony_ci } 4158c2ecf20Sopenharmony_ci 4168c2ecf20Sopenharmony_ci return ret; 4178c2ecf20Sopenharmony_ci} 4188c2ecf20Sopenharmony_ci 4198c2ecf20Sopenharmony_ci/** 4208c2ecf20Sopenharmony_ci * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers 4218c2ecf20Sopenharmony_ci * @rx_ring: Rx ring 4228c2ecf20Sopenharmony_ci * @count: The number of buffers to allocate 4238c2ecf20Sopenharmony_ci * 4248c2ecf20Sopenharmony_ci * This function allocates a number of Rx buffers from the fill ring 4258c2ecf20Sopenharmony_ci * or the internal recycle mechanism and places them on the Rx ring. 4268c2ecf20Sopenharmony_ci * 4278c2ecf20Sopenharmony_ci * Returns false if all allocations were successful, true if any fail. 4288c2ecf20Sopenharmony_ci */ 4298c2ecf20Sopenharmony_cibool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) 4308c2ecf20Sopenharmony_ci{ 4318c2ecf20Sopenharmony_ci union ice_32b_rx_flex_desc *rx_desc; 4328c2ecf20Sopenharmony_ci u16 ntu = rx_ring->next_to_use; 4338c2ecf20Sopenharmony_ci struct ice_rx_buf *rx_buf; 4348c2ecf20Sopenharmony_ci bool ret = false; 4358c2ecf20Sopenharmony_ci dma_addr_t dma; 4368c2ecf20Sopenharmony_ci 4378c2ecf20Sopenharmony_ci if (!count) 4388c2ecf20Sopenharmony_ci return false; 4398c2ecf20Sopenharmony_ci 4408c2ecf20Sopenharmony_ci rx_desc = ICE_RX_DESC(rx_ring, ntu); 4418c2ecf20Sopenharmony_ci rx_buf = &rx_ring->rx_buf[ntu]; 4428c2ecf20Sopenharmony_ci 4438c2ecf20Sopenharmony_ci do { 4448c2ecf20Sopenharmony_ci rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); 4458c2ecf20Sopenharmony_ci if (!rx_buf->xdp) { 4468c2ecf20Sopenharmony_ci ret = true; 4478c2ecf20Sopenharmony_ci break; 4488c2ecf20Sopenharmony_ci } 4498c2ecf20Sopenharmony_ci 4508c2ecf20Sopenharmony_ci dma = xsk_buff_xdp_get_dma(rx_buf->xdp); 4518c2ecf20Sopenharmony_ci rx_desc->read.pkt_addr = cpu_to_le64(dma); 4528c2ecf20Sopenharmony_ci rx_desc->wb.status_error0 = 0; 4538c2ecf20Sopenharmony_ci 4548c2ecf20Sopenharmony_ci rx_desc++; 4558c2ecf20Sopenharmony_ci rx_buf++; 4568c2ecf20Sopenharmony_ci ntu++; 4578c2ecf20Sopenharmony_ci 4588c2ecf20Sopenharmony_ci if (unlikely(ntu == rx_ring->count)) { 4598c2ecf20Sopenharmony_ci rx_desc = ICE_RX_DESC(rx_ring, 0); 4608c2ecf20Sopenharmony_ci rx_buf = rx_ring->rx_buf; 4618c2ecf20Sopenharmony_ci ntu = 0; 4628c2ecf20Sopenharmony_ci } 4638c2ecf20Sopenharmony_ci } while (--count); 4648c2ecf20Sopenharmony_ci 4658c2ecf20Sopenharmony_ci if (rx_ring->next_to_use != ntu) { 4668c2ecf20Sopenharmony_ci /* clear the status bits for the next_to_use descriptor */ 4678c2ecf20Sopenharmony_ci rx_desc->wb.status_error0 = 0; 4688c2ecf20Sopenharmony_ci ice_release_rx_desc(rx_ring, ntu); 4698c2ecf20Sopenharmony_ci } 4708c2ecf20Sopenharmony_ci 4718c2ecf20Sopenharmony_ci return ret; 4728c2ecf20Sopenharmony_ci} 4738c2ecf20Sopenharmony_ci 4748c2ecf20Sopenharmony_ci/** 4758c2ecf20Sopenharmony_ci * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring 4768c2ecf20Sopenharmony_ci * @rx_ring: Rx ring 4778c2ecf20Sopenharmony_ci */ 4788c2ecf20Sopenharmony_cistatic void ice_bump_ntc(struct ice_ring *rx_ring) 4798c2ecf20Sopenharmony_ci{ 4808c2ecf20Sopenharmony_ci int ntc = rx_ring->next_to_clean + 1; 4818c2ecf20Sopenharmony_ci 4828c2ecf20Sopenharmony_ci ntc = (ntc < rx_ring->count) ? ntc : 0; 4838c2ecf20Sopenharmony_ci rx_ring->next_to_clean = ntc; 4848c2ecf20Sopenharmony_ci prefetch(ICE_RX_DESC(rx_ring, ntc)); 4858c2ecf20Sopenharmony_ci} 4868c2ecf20Sopenharmony_ci 4878c2ecf20Sopenharmony_ci/** 4888c2ecf20Sopenharmony_ci * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer 4898c2ecf20Sopenharmony_ci * @rx_ring: Rx ring 4908c2ecf20Sopenharmony_ci * @rx_buf: zero-copy Rx buffer 4918c2ecf20Sopenharmony_ci * 4928c2ecf20Sopenharmony_ci * This function allocates a new skb from a zero-copy Rx buffer. 4938c2ecf20Sopenharmony_ci * 4948c2ecf20Sopenharmony_ci * Returns the skb on success, NULL on failure. 4958c2ecf20Sopenharmony_ci */ 4968c2ecf20Sopenharmony_cistatic struct sk_buff * 4978c2ecf20Sopenharmony_ciice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) 4988c2ecf20Sopenharmony_ci{ 4998c2ecf20Sopenharmony_ci unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta; 5008c2ecf20Sopenharmony_ci unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data; 5018c2ecf20Sopenharmony_ci unsigned int datasize_hard = rx_buf->xdp->data_end - 5028c2ecf20Sopenharmony_ci rx_buf->xdp->data_hard_start; 5038c2ecf20Sopenharmony_ci struct sk_buff *skb; 5048c2ecf20Sopenharmony_ci 5058c2ecf20Sopenharmony_ci skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, 5068c2ecf20Sopenharmony_ci GFP_ATOMIC | __GFP_NOWARN); 5078c2ecf20Sopenharmony_ci if (unlikely(!skb)) 5088c2ecf20Sopenharmony_ci return NULL; 5098c2ecf20Sopenharmony_ci 5108c2ecf20Sopenharmony_ci skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start); 5118c2ecf20Sopenharmony_ci memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize); 5128c2ecf20Sopenharmony_ci if (metasize) 5138c2ecf20Sopenharmony_ci skb_metadata_set(skb, metasize); 5148c2ecf20Sopenharmony_ci 5158c2ecf20Sopenharmony_ci xsk_buff_free(rx_buf->xdp); 5168c2ecf20Sopenharmony_ci rx_buf->xdp = NULL; 5178c2ecf20Sopenharmony_ci return skb; 5188c2ecf20Sopenharmony_ci} 5198c2ecf20Sopenharmony_ci 5208c2ecf20Sopenharmony_ci/** 5218c2ecf20Sopenharmony_ci * ice_run_xdp_zc - Executes an XDP program in zero-copy path 5228c2ecf20Sopenharmony_ci * @rx_ring: Rx ring 5238c2ecf20Sopenharmony_ci * @xdp: xdp_buff used as input to the XDP program 5248c2ecf20Sopenharmony_ci * 5258c2ecf20Sopenharmony_ci * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} 5268c2ecf20Sopenharmony_ci */ 5278c2ecf20Sopenharmony_cistatic int 5288c2ecf20Sopenharmony_ciice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) 5298c2ecf20Sopenharmony_ci{ 5308c2ecf20Sopenharmony_ci int err, result = ICE_XDP_PASS; 5318c2ecf20Sopenharmony_ci struct bpf_prog *xdp_prog; 5328c2ecf20Sopenharmony_ci struct ice_ring *xdp_ring; 5338c2ecf20Sopenharmony_ci u32 act; 5348c2ecf20Sopenharmony_ci 5358c2ecf20Sopenharmony_ci rcu_read_lock(); 5368c2ecf20Sopenharmony_ci xdp_prog = READ_ONCE(rx_ring->xdp_prog); 5378c2ecf20Sopenharmony_ci if (!xdp_prog) { 5388c2ecf20Sopenharmony_ci rcu_read_unlock(); 5398c2ecf20Sopenharmony_ci return ICE_XDP_PASS; 5408c2ecf20Sopenharmony_ci } 5418c2ecf20Sopenharmony_ci 5428c2ecf20Sopenharmony_ci act = bpf_prog_run_xdp(xdp_prog, xdp); 5438c2ecf20Sopenharmony_ci 5448c2ecf20Sopenharmony_ci if (likely(act == XDP_REDIRECT)) { 5458c2ecf20Sopenharmony_ci err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 5468c2ecf20Sopenharmony_ci if (err) 5478c2ecf20Sopenharmony_ci goto out_failure; 5488c2ecf20Sopenharmony_ci rcu_read_unlock(); 5498c2ecf20Sopenharmony_ci return ICE_XDP_REDIR; 5508c2ecf20Sopenharmony_ci } 5518c2ecf20Sopenharmony_ci 5528c2ecf20Sopenharmony_ci switch (act) { 5538c2ecf20Sopenharmony_ci case XDP_PASS: 5548c2ecf20Sopenharmony_ci break; 5558c2ecf20Sopenharmony_ci case XDP_TX: 5568c2ecf20Sopenharmony_ci xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; 5578c2ecf20Sopenharmony_ci result = ice_xmit_xdp_buff(xdp, xdp_ring); 5588c2ecf20Sopenharmony_ci if (result == ICE_XDP_CONSUMED) 5598c2ecf20Sopenharmony_ci goto out_failure; 5608c2ecf20Sopenharmony_ci break; 5618c2ecf20Sopenharmony_ci default: 5628c2ecf20Sopenharmony_ci bpf_warn_invalid_xdp_action(act); 5638c2ecf20Sopenharmony_ci fallthrough; 5648c2ecf20Sopenharmony_ci case XDP_ABORTED: 5658c2ecf20Sopenharmony_ciout_failure: 5668c2ecf20Sopenharmony_ci trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 5678c2ecf20Sopenharmony_ci fallthrough; 5688c2ecf20Sopenharmony_ci case XDP_DROP: 5698c2ecf20Sopenharmony_ci result = ICE_XDP_CONSUMED; 5708c2ecf20Sopenharmony_ci break; 5718c2ecf20Sopenharmony_ci } 5728c2ecf20Sopenharmony_ci 5738c2ecf20Sopenharmony_ci rcu_read_unlock(); 5748c2ecf20Sopenharmony_ci return result; 5758c2ecf20Sopenharmony_ci} 5768c2ecf20Sopenharmony_ci 5778c2ecf20Sopenharmony_ci/** 5788c2ecf20Sopenharmony_ci * ice_clean_rx_irq_zc - consumes packets from the hardware ring 5798c2ecf20Sopenharmony_ci * @rx_ring: AF_XDP Rx ring 5808c2ecf20Sopenharmony_ci * @budget: NAPI budget 5818c2ecf20Sopenharmony_ci * 5828c2ecf20Sopenharmony_ci * Returns number of processed packets on success, remaining budget on failure. 5838c2ecf20Sopenharmony_ci */ 5848c2ecf20Sopenharmony_ciint ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) 5858c2ecf20Sopenharmony_ci{ 5868c2ecf20Sopenharmony_ci unsigned int total_rx_bytes = 0, total_rx_packets = 0; 5878c2ecf20Sopenharmony_ci u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); 5888c2ecf20Sopenharmony_ci unsigned int xdp_xmit = 0; 5898c2ecf20Sopenharmony_ci bool failure = false; 5908c2ecf20Sopenharmony_ci 5918c2ecf20Sopenharmony_ci while (likely(total_rx_packets < (unsigned int)budget)) { 5928c2ecf20Sopenharmony_ci union ice_32b_rx_flex_desc *rx_desc; 5938c2ecf20Sopenharmony_ci unsigned int size, xdp_res = 0; 5948c2ecf20Sopenharmony_ci struct ice_rx_buf *rx_buf; 5958c2ecf20Sopenharmony_ci struct sk_buff *skb; 5968c2ecf20Sopenharmony_ci u16 stat_err_bits; 5978c2ecf20Sopenharmony_ci u16 vlan_tag = 0; 5988c2ecf20Sopenharmony_ci u8 rx_ptype; 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_ci if (cleaned_count >= ICE_RX_BUF_WRITE) { 6018c2ecf20Sopenharmony_ci failure |= ice_alloc_rx_bufs_zc(rx_ring, 6028c2ecf20Sopenharmony_ci cleaned_count); 6038c2ecf20Sopenharmony_ci cleaned_count = 0; 6048c2ecf20Sopenharmony_ci } 6058c2ecf20Sopenharmony_ci 6068c2ecf20Sopenharmony_ci rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); 6078c2ecf20Sopenharmony_ci 6088c2ecf20Sopenharmony_ci stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); 6098c2ecf20Sopenharmony_ci if (!ice_test_staterr(rx_desc, stat_err_bits)) 6108c2ecf20Sopenharmony_ci break; 6118c2ecf20Sopenharmony_ci 6128c2ecf20Sopenharmony_ci /* This memory barrier is needed to keep us from reading 6138c2ecf20Sopenharmony_ci * any other fields out of the rx_desc until we have 6148c2ecf20Sopenharmony_ci * verified the descriptor has been written back. 6158c2ecf20Sopenharmony_ci */ 6168c2ecf20Sopenharmony_ci dma_rmb(); 6178c2ecf20Sopenharmony_ci 6188c2ecf20Sopenharmony_ci size = le16_to_cpu(rx_desc->wb.pkt_len) & 6198c2ecf20Sopenharmony_ci ICE_RX_FLX_DESC_PKT_LEN_M; 6208c2ecf20Sopenharmony_ci if (!size) 6218c2ecf20Sopenharmony_ci break; 6228c2ecf20Sopenharmony_ci 6238c2ecf20Sopenharmony_ci rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; 6248c2ecf20Sopenharmony_ci rx_buf->xdp->data_end = rx_buf->xdp->data + size; 6258c2ecf20Sopenharmony_ci xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool); 6268c2ecf20Sopenharmony_ci 6278c2ecf20Sopenharmony_ci xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp); 6288c2ecf20Sopenharmony_ci if (xdp_res) { 6298c2ecf20Sopenharmony_ci if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) 6308c2ecf20Sopenharmony_ci xdp_xmit |= xdp_res; 6318c2ecf20Sopenharmony_ci else 6328c2ecf20Sopenharmony_ci xsk_buff_free(rx_buf->xdp); 6338c2ecf20Sopenharmony_ci 6348c2ecf20Sopenharmony_ci rx_buf->xdp = NULL; 6358c2ecf20Sopenharmony_ci total_rx_bytes += size; 6368c2ecf20Sopenharmony_ci total_rx_packets++; 6378c2ecf20Sopenharmony_ci cleaned_count++; 6388c2ecf20Sopenharmony_ci 6398c2ecf20Sopenharmony_ci ice_bump_ntc(rx_ring); 6408c2ecf20Sopenharmony_ci continue; 6418c2ecf20Sopenharmony_ci } 6428c2ecf20Sopenharmony_ci 6438c2ecf20Sopenharmony_ci /* XDP_PASS path */ 6448c2ecf20Sopenharmony_ci skb = ice_construct_skb_zc(rx_ring, rx_buf); 6458c2ecf20Sopenharmony_ci if (!skb) { 6468c2ecf20Sopenharmony_ci rx_ring->rx_stats.alloc_buf_failed++; 6478c2ecf20Sopenharmony_ci break; 6488c2ecf20Sopenharmony_ci } 6498c2ecf20Sopenharmony_ci 6508c2ecf20Sopenharmony_ci cleaned_count++; 6518c2ecf20Sopenharmony_ci ice_bump_ntc(rx_ring); 6528c2ecf20Sopenharmony_ci 6538c2ecf20Sopenharmony_ci if (eth_skb_pad(skb)) { 6548c2ecf20Sopenharmony_ci skb = NULL; 6558c2ecf20Sopenharmony_ci continue; 6568c2ecf20Sopenharmony_ci } 6578c2ecf20Sopenharmony_ci 6588c2ecf20Sopenharmony_ci total_rx_bytes += skb->len; 6598c2ecf20Sopenharmony_ci total_rx_packets++; 6608c2ecf20Sopenharmony_ci 6618c2ecf20Sopenharmony_ci stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S); 6628c2ecf20Sopenharmony_ci if (ice_test_staterr(rx_desc, stat_err_bits)) 6638c2ecf20Sopenharmony_ci vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); 6648c2ecf20Sopenharmony_ci 6658c2ecf20Sopenharmony_ci rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & 6668c2ecf20Sopenharmony_ci ICE_RX_FLEX_DESC_PTYPE_M; 6678c2ecf20Sopenharmony_ci 6688c2ecf20Sopenharmony_ci ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); 6698c2ecf20Sopenharmony_ci ice_receive_skb(rx_ring, skb, vlan_tag); 6708c2ecf20Sopenharmony_ci } 6718c2ecf20Sopenharmony_ci 6728c2ecf20Sopenharmony_ci ice_finalize_xdp_rx(rx_ring, xdp_xmit); 6738c2ecf20Sopenharmony_ci ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); 6748c2ecf20Sopenharmony_ci 6758c2ecf20Sopenharmony_ci if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { 6768c2ecf20Sopenharmony_ci if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) 6778c2ecf20Sopenharmony_ci xsk_set_rx_need_wakeup(rx_ring->xsk_pool); 6788c2ecf20Sopenharmony_ci else 6798c2ecf20Sopenharmony_ci xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); 6808c2ecf20Sopenharmony_ci 6818c2ecf20Sopenharmony_ci return (int)total_rx_packets; 6828c2ecf20Sopenharmony_ci } 6838c2ecf20Sopenharmony_ci 6848c2ecf20Sopenharmony_ci return failure ? budget : (int)total_rx_packets; 6858c2ecf20Sopenharmony_ci} 6868c2ecf20Sopenharmony_ci 6878c2ecf20Sopenharmony_ci/** 6888c2ecf20Sopenharmony_ci * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries 6898c2ecf20Sopenharmony_ci * @xdp_ring: XDP Tx ring 6908c2ecf20Sopenharmony_ci * @budget: max number of frames to xmit 6918c2ecf20Sopenharmony_ci * 6928c2ecf20Sopenharmony_ci * Returns true if cleanup/transmission is done. 6938c2ecf20Sopenharmony_ci */ 6948c2ecf20Sopenharmony_cistatic bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) 6958c2ecf20Sopenharmony_ci{ 6968c2ecf20Sopenharmony_ci struct ice_tx_desc *tx_desc = NULL; 6978c2ecf20Sopenharmony_ci bool work_done = true; 6988c2ecf20Sopenharmony_ci struct xdp_desc desc; 6998c2ecf20Sopenharmony_ci dma_addr_t dma; 7008c2ecf20Sopenharmony_ci 7018c2ecf20Sopenharmony_ci while (likely(budget-- > 0)) { 7028c2ecf20Sopenharmony_ci struct ice_tx_buf *tx_buf; 7038c2ecf20Sopenharmony_ci 7048c2ecf20Sopenharmony_ci if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) { 7058c2ecf20Sopenharmony_ci xdp_ring->tx_stats.tx_busy++; 7068c2ecf20Sopenharmony_ci work_done = false; 7078c2ecf20Sopenharmony_ci break; 7088c2ecf20Sopenharmony_ci } 7098c2ecf20Sopenharmony_ci 7108c2ecf20Sopenharmony_ci tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; 7118c2ecf20Sopenharmony_ci 7128c2ecf20Sopenharmony_ci if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) 7138c2ecf20Sopenharmony_ci break; 7148c2ecf20Sopenharmony_ci 7158c2ecf20Sopenharmony_ci dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); 7168c2ecf20Sopenharmony_ci xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, 7178c2ecf20Sopenharmony_ci desc.len); 7188c2ecf20Sopenharmony_ci 7198c2ecf20Sopenharmony_ci tx_buf->bytecount = desc.len; 7208c2ecf20Sopenharmony_ci 7218c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use); 7228c2ecf20Sopenharmony_ci tx_desc->buf_addr = cpu_to_le64(dma); 7238c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 7248c2ecf20Sopenharmony_ci ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0); 7258c2ecf20Sopenharmony_ci 7268c2ecf20Sopenharmony_ci xdp_ring->next_to_use++; 7278c2ecf20Sopenharmony_ci if (xdp_ring->next_to_use == xdp_ring->count) 7288c2ecf20Sopenharmony_ci xdp_ring->next_to_use = 0; 7298c2ecf20Sopenharmony_ci } 7308c2ecf20Sopenharmony_ci 7318c2ecf20Sopenharmony_ci if (tx_desc) { 7328c2ecf20Sopenharmony_ci ice_xdp_ring_update_tail(xdp_ring); 7338c2ecf20Sopenharmony_ci xsk_tx_release(xdp_ring->xsk_pool); 7348c2ecf20Sopenharmony_ci } 7358c2ecf20Sopenharmony_ci 7368c2ecf20Sopenharmony_ci return budget > 0 && work_done; 7378c2ecf20Sopenharmony_ci} 7388c2ecf20Sopenharmony_ci 7398c2ecf20Sopenharmony_ci/** 7408c2ecf20Sopenharmony_ci * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer 7418c2ecf20Sopenharmony_ci * @xdp_ring: XDP Tx ring 7428c2ecf20Sopenharmony_ci * @tx_buf: Tx buffer to clean 7438c2ecf20Sopenharmony_ci */ 7448c2ecf20Sopenharmony_cistatic void 7458c2ecf20Sopenharmony_ciice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) 7468c2ecf20Sopenharmony_ci{ 7478c2ecf20Sopenharmony_ci xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); 7488c2ecf20Sopenharmony_ci dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), 7498c2ecf20Sopenharmony_ci dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); 7508c2ecf20Sopenharmony_ci dma_unmap_len_set(tx_buf, len, 0); 7518c2ecf20Sopenharmony_ci} 7528c2ecf20Sopenharmony_ci 7538c2ecf20Sopenharmony_ci/** 7548c2ecf20Sopenharmony_ci * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries 7558c2ecf20Sopenharmony_ci * @xdp_ring: XDP Tx ring 7568c2ecf20Sopenharmony_ci * @budget: NAPI budget 7578c2ecf20Sopenharmony_ci * 7588c2ecf20Sopenharmony_ci * Returns true if cleanup/tranmission is done. 7598c2ecf20Sopenharmony_ci */ 7608c2ecf20Sopenharmony_cibool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) 7618c2ecf20Sopenharmony_ci{ 7628c2ecf20Sopenharmony_ci int total_packets = 0, total_bytes = 0; 7638c2ecf20Sopenharmony_ci s16 ntc = xdp_ring->next_to_clean; 7648c2ecf20Sopenharmony_ci struct ice_tx_desc *tx_desc; 7658c2ecf20Sopenharmony_ci struct ice_tx_buf *tx_buf; 7668c2ecf20Sopenharmony_ci u32 xsk_frames = 0; 7678c2ecf20Sopenharmony_ci bool xmit_done; 7688c2ecf20Sopenharmony_ci 7698c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(xdp_ring, ntc); 7708c2ecf20Sopenharmony_ci tx_buf = &xdp_ring->tx_buf[ntc]; 7718c2ecf20Sopenharmony_ci ntc -= xdp_ring->count; 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci do { 7748c2ecf20Sopenharmony_ci if (!(tx_desc->cmd_type_offset_bsz & 7758c2ecf20Sopenharmony_ci cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) 7768c2ecf20Sopenharmony_ci break; 7778c2ecf20Sopenharmony_ci 7788c2ecf20Sopenharmony_ci total_bytes += tx_buf->bytecount; 7798c2ecf20Sopenharmony_ci total_packets++; 7808c2ecf20Sopenharmony_ci 7818c2ecf20Sopenharmony_ci if (tx_buf->raw_buf) { 7828c2ecf20Sopenharmony_ci ice_clean_xdp_tx_buf(xdp_ring, tx_buf); 7838c2ecf20Sopenharmony_ci tx_buf->raw_buf = NULL; 7848c2ecf20Sopenharmony_ci } else { 7858c2ecf20Sopenharmony_ci xsk_frames++; 7868c2ecf20Sopenharmony_ci } 7878c2ecf20Sopenharmony_ci 7888c2ecf20Sopenharmony_ci tx_desc->cmd_type_offset_bsz = 0; 7898c2ecf20Sopenharmony_ci tx_buf++; 7908c2ecf20Sopenharmony_ci tx_desc++; 7918c2ecf20Sopenharmony_ci ntc++; 7928c2ecf20Sopenharmony_ci 7938c2ecf20Sopenharmony_ci if (unlikely(!ntc)) { 7948c2ecf20Sopenharmony_ci ntc -= xdp_ring->count; 7958c2ecf20Sopenharmony_ci tx_buf = xdp_ring->tx_buf; 7968c2ecf20Sopenharmony_ci tx_desc = ICE_TX_DESC(xdp_ring, 0); 7978c2ecf20Sopenharmony_ci } 7988c2ecf20Sopenharmony_ci 7998c2ecf20Sopenharmony_ci prefetch(tx_desc); 8008c2ecf20Sopenharmony_ci 8018c2ecf20Sopenharmony_ci } while (likely(--budget)); 8028c2ecf20Sopenharmony_ci 8038c2ecf20Sopenharmony_ci ntc += xdp_ring->count; 8048c2ecf20Sopenharmony_ci xdp_ring->next_to_clean = ntc; 8058c2ecf20Sopenharmony_ci 8068c2ecf20Sopenharmony_ci if (xsk_frames) 8078c2ecf20Sopenharmony_ci xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); 8088c2ecf20Sopenharmony_ci 8098c2ecf20Sopenharmony_ci if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) 8108c2ecf20Sopenharmony_ci xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); 8118c2ecf20Sopenharmony_ci 8128c2ecf20Sopenharmony_ci ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); 8138c2ecf20Sopenharmony_ci xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); 8148c2ecf20Sopenharmony_ci 8158c2ecf20Sopenharmony_ci return budget > 0 && xmit_done; 8168c2ecf20Sopenharmony_ci} 8178c2ecf20Sopenharmony_ci 8188c2ecf20Sopenharmony_ci/** 8198c2ecf20Sopenharmony_ci * ice_xsk_wakeup - Implements ndo_xsk_wakeup 8208c2ecf20Sopenharmony_ci * @netdev: net_device 8218c2ecf20Sopenharmony_ci * @queue_id: queue to wake up 8228c2ecf20Sopenharmony_ci * @flags: ignored in our case, since we have Rx and Tx in the same NAPI 8238c2ecf20Sopenharmony_ci * 8248c2ecf20Sopenharmony_ci * Returns negative on error, zero otherwise. 8258c2ecf20Sopenharmony_ci */ 8268c2ecf20Sopenharmony_ciint 8278c2ecf20Sopenharmony_ciice_xsk_wakeup(struct net_device *netdev, u32 queue_id, 8288c2ecf20Sopenharmony_ci u32 __always_unused flags) 8298c2ecf20Sopenharmony_ci{ 8308c2ecf20Sopenharmony_ci struct ice_netdev_priv *np = netdev_priv(netdev); 8318c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector; 8328c2ecf20Sopenharmony_ci struct ice_vsi *vsi = np->vsi; 8338c2ecf20Sopenharmony_ci struct ice_ring *ring; 8348c2ecf20Sopenharmony_ci 8358c2ecf20Sopenharmony_ci if (test_bit(__ICE_DOWN, vsi->state)) 8368c2ecf20Sopenharmony_ci return -ENETDOWN; 8378c2ecf20Sopenharmony_ci 8388c2ecf20Sopenharmony_ci if (!ice_is_xdp_ena_vsi(vsi)) 8398c2ecf20Sopenharmony_ci return -ENXIO; 8408c2ecf20Sopenharmony_ci 8418c2ecf20Sopenharmony_ci if (queue_id >= vsi->num_txq) 8428c2ecf20Sopenharmony_ci return -ENXIO; 8438c2ecf20Sopenharmony_ci 8448c2ecf20Sopenharmony_ci if (!vsi->xdp_rings[queue_id]->xsk_pool) 8458c2ecf20Sopenharmony_ci return -ENXIO; 8468c2ecf20Sopenharmony_ci 8478c2ecf20Sopenharmony_ci ring = vsi->xdp_rings[queue_id]; 8488c2ecf20Sopenharmony_ci 8498c2ecf20Sopenharmony_ci /* The idea here is that if NAPI is running, mark a miss, so 8508c2ecf20Sopenharmony_ci * it will run again. If not, trigger an interrupt and 8518c2ecf20Sopenharmony_ci * schedule the NAPI from interrupt context. If NAPI would be 8528c2ecf20Sopenharmony_ci * scheduled here, the interrupt affinity would not be 8538c2ecf20Sopenharmony_ci * honored. 8548c2ecf20Sopenharmony_ci */ 8558c2ecf20Sopenharmony_ci q_vector = ring->q_vector; 8568c2ecf20Sopenharmony_ci if (!napi_if_scheduled_mark_missed(&q_vector->napi)) 8578c2ecf20Sopenharmony_ci ice_trigger_sw_intr(&vsi->back->hw, q_vector); 8588c2ecf20Sopenharmony_ci 8598c2ecf20Sopenharmony_ci return 0; 8608c2ecf20Sopenharmony_ci} 8618c2ecf20Sopenharmony_ci 8628c2ecf20Sopenharmony_ci/** 8638c2ecf20Sopenharmony_ci * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached 8648c2ecf20Sopenharmony_ci * @vsi: VSI to be checked 8658c2ecf20Sopenharmony_ci * 8668c2ecf20Sopenharmony_ci * Returns true if any of the Rx rings has an AF_XDP buff pool attached 8678c2ecf20Sopenharmony_ci */ 8688c2ecf20Sopenharmony_cibool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) 8698c2ecf20Sopenharmony_ci{ 8708c2ecf20Sopenharmony_ci int i; 8718c2ecf20Sopenharmony_ci 8728c2ecf20Sopenharmony_ci if (!vsi->xsk_pools) 8738c2ecf20Sopenharmony_ci return false; 8748c2ecf20Sopenharmony_ci 8758c2ecf20Sopenharmony_ci for (i = 0; i < vsi->num_xsk_pools; i++) { 8768c2ecf20Sopenharmony_ci if (vsi->xsk_pools[i]) 8778c2ecf20Sopenharmony_ci return true; 8788c2ecf20Sopenharmony_ci } 8798c2ecf20Sopenharmony_ci 8808c2ecf20Sopenharmony_ci return false; 8818c2ecf20Sopenharmony_ci} 8828c2ecf20Sopenharmony_ci 8838c2ecf20Sopenharmony_ci/** 8848c2ecf20Sopenharmony_ci * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring 8858c2ecf20Sopenharmony_ci * @rx_ring: ring to be cleaned 8868c2ecf20Sopenharmony_ci */ 8878c2ecf20Sopenharmony_civoid ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) 8888c2ecf20Sopenharmony_ci{ 8898c2ecf20Sopenharmony_ci u16 i; 8908c2ecf20Sopenharmony_ci 8918c2ecf20Sopenharmony_ci for (i = 0; i < rx_ring->count; i++) { 8928c2ecf20Sopenharmony_ci struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; 8938c2ecf20Sopenharmony_ci 8948c2ecf20Sopenharmony_ci if (!rx_buf->xdp) 8958c2ecf20Sopenharmony_ci continue; 8968c2ecf20Sopenharmony_ci 8978c2ecf20Sopenharmony_ci rx_buf->xdp = NULL; 8988c2ecf20Sopenharmony_ci } 8998c2ecf20Sopenharmony_ci} 9008c2ecf20Sopenharmony_ci 9018c2ecf20Sopenharmony_ci/** 9028c2ecf20Sopenharmony_ci * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues 9038c2ecf20Sopenharmony_ci * @xdp_ring: XDP_Tx ring 9048c2ecf20Sopenharmony_ci */ 9058c2ecf20Sopenharmony_civoid ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) 9068c2ecf20Sopenharmony_ci{ 9078c2ecf20Sopenharmony_ci u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; 9088c2ecf20Sopenharmony_ci u32 xsk_frames = 0; 9098c2ecf20Sopenharmony_ci 9108c2ecf20Sopenharmony_ci while (ntc != ntu) { 9118c2ecf20Sopenharmony_ci struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; 9128c2ecf20Sopenharmony_ci 9138c2ecf20Sopenharmony_ci if (tx_buf->raw_buf) 9148c2ecf20Sopenharmony_ci ice_clean_xdp_tx_buf(xdp_ring, tx_buf); 9158c2ecf20Sopenharmony_ci else 9168c2ecf20Sopenharmony_ci xsk_frames++; 9178c2ecf20Sopenharmony_ci 9188c2ecf20Sopenharmony_ci tx_buf->raw_buf = NULL; 9198c2ecf20Sopenharmony_ci 9208c2ecf20Sopenharmony_ci ntc++; 9218c2ecf20Sopenharmony_ci if (ntc >= xdp_ring->count) 9228c2ecf20Sopenharmony_ci ntc = 0; 9238c2ecf20Sopenharmony_ci } 9248c2ecf20Sopenharmony_ci 9258c2ecf20Sopenharmony_ci if (xsk_frames) 9268c2ecf20Sopenharmony_ci xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); 9278c2ecf20Sopenharmony_ci} 928