18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Copyright (c) 2019, Intel Corporation. */ 38c2ecf20Sopenharmony_ci 48c2ecf20Sopenharmony_ci#include <net/xdp_sock_drv.h> 58c2ecf20Sopenharmony_ci#include "ice_base.h" 68c2ecf20Sopenharmony_ci#include "ice_lib.h" 78c2ecf20Sopenharmony_ci#include "ice_dcb_lib.h" 88c2ecf20Sopenharmony_ci 98c2ecf20Sopenharmony_ci/** 108c2ecf20Sopenharmony_ci * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 118c2ecf20Sopenharmony_ci * @qs_cfg: gathered variables needed for PF->VSI queues assignment 128c2ecf20Sopenharmony_ci * 138c2ecf20Sopenharmony_ci * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 148c2ecf20Sopenharmony_ci */ 158c2ecf20Sopenharmony_cistatic int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) 168c2ecf20Sopenharmony_ci{ 178c2ecf20Sopenharmony_ci unsigned int offset, i; 188c2ecf20Sopenharmony_ci 198c2ecf20Sopenharmony_ci mutex_lock(qs_cfg->qs_mutex); 208c2ecf20Sopenharmony_ci offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, 218c2ecf20Sopenharmony_ci 0, qs_cfg->q_count, 0); 228c2ecf20Sopenharmony_ci if (offset >= qs_cfg->pf_map_size) { 238c2ecf20Sopenharmony_ci mutex_unlock(qs_cfg->qs_mutex); 248c2ecf20Sopenharmony_ci return -ENOMEM; 258c2ecf20Sopenharmony_ci } 268c2ecf20Sopenharmony_ci 278c2ecf20Sopenharmony_ci bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); 288c2ecf20Sopenharmony_ci for (i = 0; i < qs_cfg->q_count; i++) 298c2ecf20Sopenharmony_ci qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset); 308c2ecf20Sopenharmony_ci mutex_unlock(qs_cfg->qs_mutex); 318c2ecf20Sopenharmony_ci 328c2ecf20Sopenharmony_ci return 0; 338c2ecf20Sopenharmony_ci} 348c2ecf20Sopenharmony_ci 358c2ecf20Sopenharmony_ci/** 368c2ecf20Sopenharmony_ci * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI 378c2ecf20Sopenharmony_ci * @qs_cfg: gathered variables needed for pf->vsi queues assignment 388c2ecf20Sopenharmony_ci * 398c2ecf20Sopenharmony_ci * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 408c2ecf20Sopenharmony_ci */ 418c2ecf20Sopenharmony_cistatic int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) 428c2ecf20Sopenharmony_ci{ 438c2ecf20Sopenharmony_ci unsigned int i, index = 0; 448c2ecf20Sopenharmony_ci 458c2ecf20Sopenharmony_ci mutex_lock(qs_cfg->qs_mutex); 468c2ecf20Sopenharmony_ci for (i = 0; i < qs_cfg->q_count; i++) { 478c2ecf20Sopenharmony_ci index = find_next_zero_bit(qs_cfg->pf_map, 488c2ecf20Sopenharmony_ci qs_cfg->pf_map_size, index); 498c2ecf20Sopenharmony_ci if (index >= qs_cfg->pf_map_size) 508c2ecf20Sopenharmony_ci goto err_scatter; 518c2ecf20Sopenharmony_ci set_bit(index, qs_cfg->pf_map); 528c2ecf20Sopenharmony_ci qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index; 538c2ecf20Sopenharmony_ci } 548c2ecf20Sopenharmony_ci mutex_unlock(qs_cfg->qs_mutex); 558c2ecf20Sopenharmony_ci 568c2ecf20Sopenharmony_ci return 0; 578c2ecf20Sopenharmony_cierr_scatter: 588c2ecf20Sopenharmony_ci for (index = 0; index < i; index++) { 598c2ecf20Sopenharmony_ci clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); 608c2ecf20Sopenharmony_ci qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; 618c2ecf20Sopenharmony_ci } 628c2ecf20Sopenharmony_ci mutex_unlock(qs_cfg->qs_mutex); 638c2ecf20Sopenharmony_ci 648c2ecf20Sopenharmony_ci return -ENOMEM; 658c2ecf20Sopenharmony_ci} 668c2ecf20Sopenharmony_ci 678c2ecf20Sopenharmony_ci/** 688c2ecf20Sopenharmony_ci * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled 698c2ecf20Sopenharmony_ci * @pf: the PF being configured 708c2ecf20Sopenharmony_ci * @pf_q: the PF queue 718c2ecf20Sopenharmony_ci * @ena: enable or disable state of the queue 728c2ecf20Sopenharmony_ci * 738c2ecf20Sopenharmony_ci * This routine will wait for the given Rx queue of the PF to reach the 748c2ecf20Sopenharmony_ci * enabled or disabled state. 758c2ecf20Sopenharmony_ci * Returns -ETIMEDOUT in case of failing to reach the requested state after 768c2ecf20Sopenharmony_ci * multiple retries; else will return 0 in case of success. 778c2ecf20Sopenharmony_ci */ 788c2ecf20Sopenharmony_cistatic int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) 798c2ecf20Sopenharmony_ci{ 808c2ecf20Sopenharmony_ci int i; 818c2ecf20Sopenharmony_ci 828c2ecf20Sopenharmony_ci for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { 838c2ecf20Sopenharmony_ci if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & 848c2ecf20Sopenharmony_ci QRX_CTRL_QENA_STAT_M)) 858c2ecf20Sopenharmony_ci return 0; 868c2ecf20Sopenharmony_ci 878c2ecf20Sopenharmony_ci usleep_range(20, 40); 888c2ecf20Sopenharmony_ci } 898c2ecf20Sopenharmony_ci 908c2ecf20Sopenharmony_ci return -ETIMEDOUT; 918c2ecf20Sopenharmony_ci} 928c2ecf20Sopenharmony_ci 938c2ecf20Sopenharmony_ci/** 948c2ecf20Sopenharmony_ci * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector 958c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 968c2ecf20Sopenharmony_ci * @v_idx: index of the vector in the VSI struct 978c2ecf20Sopenharmony_ci * 988c2ecf20Sopenharmony_ci * We allocate one q_vector and set default value for ITR setting associated 998c2ecf20Sopenharmony_ci * with this q_vector. If allocation fails we return -ENOMEM. 1008c2ecf20Sopenharmony_ci */ 1018c2ecf20Sopenharmony_cistatic int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) 1028c2ecf20Sopenharmony_ci{ 1038c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 1048c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector; 1058c2ecf20Sopenharmony_ci 1068c2ecf20Sopenharmony_ci /* allocate q_vector */ 1078c2ecf20Sopenharmony_ci q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), 1088c2ecf20Sopenharmony_ci GFP_KERNEL); 1098c2ecf20Sopenharmony_ci if (!q_vector) 1108c2ecf20Sopenharmony_ci return -ENOMEM; 1118c2ecf20Sopenharmony_ci 1128c2ecf20Sopenharmony_ci q_vector->vsi = vsi; 1138c2ecf20Sopenharmony_ci q_vector->v_idx = v_idx; 1148c2ecf20Sopenharmony_ci q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; 1158c2ecf20Sopenharmony_ci q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; 1168c2ecf20Sopenharmony_ci if (vsi->type == ICE_VSI_VF) 1178c2ecf20Sopenharmony_ci goto out; 1188c2ecf20Sopenharmony_ci /* only set affinity_mask if the CPU is online */ 1198c2ecf20Sopenharmony_ci if (cpu_online(v_idx)) 1208c2ecf20Sopenharmony_ci cpumask_set_cpu(v_idx, &q_vector->affinity_mask); 1218c2ecf20Sopenharmony_ci 1228c2ecf20Sopenharmony_ci /* This will not be called in the driver load path because the netdev 1238c2ecf20Sopenharmony_ci * will not be created yet. All other cases with register the NAPI 1248c2ecf20Sopenharmony_ci * handler here (i.e. resume, reset/rebuild, etc.) 1258c2ecf20Sopenharmony_ci */ 1268c2ecf20Sopenharmony_ci if (vsi->netdev) 1278c2ecf20Sopenharmony_ci netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, 1288c2ecf20Sopenharmony_ci NAPI_POLL_WEIGHT); 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ciout: 1318c2ecf20Sopenharmony_ci /* tie q_vector and VSI together */ 1328c2ecf20Sopenharmony_ci vsi->q_vectors[v_idx] = q_vector; 1338c2ecf20Sopenharmony_ci 1348c2ecf20Sopenharmony_ci return 0; 1358c2ecf20Sopenharmony_ci} 1368c2ecf20Sopenharmony_ci 1378c2ecf20Sopenharmony_ci/** 1388c2ecf20Sopenharmony_ci * ice_free_q_vector - Free memory allocated for a specific interrupt vector 1398c2ecf20Sopenharmony_ci * @vsi: VSI having the memory freed 1408c2ecf20Sopenharmony_ci * @v_idx: index of the vector to be freed 1418c2ecf20Sopenharmony_ci */ 1428c2ecf20Sopenharmony_cistatic void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) 1438c2ecf20Sopenharmony_ci{ 1448c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector; 1458c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 1468c2ecf20Sopenharmony_ci struct ice_ring *ring; 1478c2ecf20Sopenharmony_ci struct device *dev; 1488c2ecf20Sopenharmony_ci 1498c2ecf20Sopenharmony_ci dev = ice_pf_to_dev(pf); 1508c2ecf20Sopenharmony_ci if (!vsi->q_vectors[v_idx]) { 1518c2ecf20Sopenharmony_ci dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); 1528c2ecf20Sopenharmony_ci return; 1538c2ecf20Sopenharmony_ci } 1548c2ecf20Sopenharmony_ci q_vector = vsi->q_vectors[v_idx]; 1558c2ecf20Sopenharmony_ci 1568c2ecf20Sopenharmony_ci ice_for_each_ring(ring, q_vector->tx) 1578c2ecf20Sopenharmony_ci ring->q_vector = NULL; 1588c2ecf20Sopenharmony_ci ice_for_each_ring(ring, q_vector->rx) 1598c2ecf20Sopenharmony_ci ring->q_vector = NULL; 1608c2ecf20Sopenharmony_ci 1618c2ecf20Sopenharmony_ci /* only VSI with an associated netdev is set up with NAPI */ 1628c2ecf20Sopenharmony_ci if (vsi->netdev) 1638c2ecf20Sopenharmony_ci netif_napi_del(&q_vector->napi); 1648c2ecf20Sopenharmony_ci 1658c2ecf20Sopenharmony_ci devm_kfree(dev, q_vector); 1668c2ecf20Sopenharmony_ci vsi->q_vectors[v_idx] = NULL; 1678c2ecf20Sopenharmony_ci} 1688c2ecf20Sopenharmony_ci 1698c2ecf20Sopenharmony_ci/** 1708c2ecf20Sopenharmony_ci * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set 1718c2ecf20Sopenharmony_ci * @hw: board specific structure 1728c2ecf20Sopenharmony_ci */ 1738c2ecf20Sopenharmony_cistatic void ice_cfg_itr_gran(struct ice_hw *hw) 1748c2ecf20Sopenharmony_ci{ 1758c2ecf20Sopenharmony_ci u32 regval = rd32(hw, GLINT_CTL); 1768c2ecf20Sopenharmony_ci 1778c2ecf20Sopenharmony_ci /* no need to update global register if ITR gran is already set */ 1788c2ecf20Sopenharmony_ci if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && 1798c2ecf20Sopenharmony_ci (((regval & GLINT_CTL_ITR_GRAN_200_M) >> 1808c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && 1818c2ecf20Sopenharmony_ci (((regval & GLINT_CTL_ITR_GRAN_100_M) >> 1828c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && 1838c2ecf20Sopenharmony_ci (((regval & GLINT_CTL_ITR_GRAN_50_M) >> 1848c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && 1858c2ecf20Sopenharmony_ci (((regval & GLINT_CTL_ITR_GRAN_25_M) >> 1868c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) 1878c2ecf20Sopenharmony_ci return; 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_ci regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & 1908c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_200_M) | 1918c2ecf20Sopenharmony_ci ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & 1928c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_100_M) | 1938c2ecf20Sopenharmony_ci ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & 1948c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_50_M) | 1958c2ecf20Sopenharmony_ci ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & 1968c2ecf20Sopenharmony_ci GLINT_CTL_ITR_GRAN_25_M); 1978c2ecf20Sopenharmony_ci wr32(hw, GLINT_CTL, regval); 1988c2ecf20Sopenharmony_ci} 1998c2ecf20Sopenharmony_ci 2008c2ecf20Sopenharmony_ci/** 2018c2ecf20Sopenharmony_ci * ice_calc_q_handle - calculate the queue handle 2028c2ecf20Sopenharmony_ci * @vsi: VSI that ring belongs to 2038c2ecf20Sopenharmony_ci * @ring: ring to get the absolute queue index 2048c2ecf20Sopenharmony_ci * @tc: traffic class number 2058c2ecf20Sopenharmony_ci */ 2068c2ecf20Sopenharmony_cistatic u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) 2078c2ecf20Sopenharmony_ci{ 2088c2ecf20Sopenharmony_ci WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); 2098c2ecf20Sopenharmony_ci 2108c2ecf20Sopenharmony_ci /* Idea here for calculation is that we subtract the number of queue 2118c2ecf20Sopenharmony_ci * count from TC that ring belongs to from it's absolute queue index 2128c2ecf20Sopenharmony_ci * and as a result we get the queue's index within TC. 2138c2ecf20Sopenharmony_ci */ 2148c2ecf20Sopenharmony_ci return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; 2158c2ecf20Sopenharmony_ci} 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_ci/** 2188c2ecf20Sopenharmony_ci * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance 2198c2ecf20Sopenharmony_ci * @ring: The Tx ring to configure 2208c2ecf20Sopenharmony_ci * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized 2218c2ecf20Sopenharmony_ci * @pf_q: queue index in the PF space 2228c2ecf20Sopenharmony_ci * 2238c2ecf20Sopenharmony_ci * Configure the Tx descriptor ring in TLAN context. 2248c2ecf20Sopenharmony_ci */ 2258c2ecf20Sopenharmony_cistatic void 2268c2ecf20Sopenharmony_ciice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) 2278c2ecf20Sopenharmony_ci{ 2288c2ecf20Sopenharmony_ci struct ice_vsi *vsi = ring->vsi; 2298c2ecf20Sopenharmony_ci struct ice_hw *hw = &vsi->back->hw; 2308c2ecf20Sopenharmony_ci 2318c2ecf20Sopenharmony_ci tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; 2328c2ecf20Sopenharmony_ci 2338c2ecf20Sopenharmony_ci tlan_ctx->port_num = vsi->port_info->lport; 2348c2ecf20Sopenharmony_ci 2358c2ecf20Sopenharmony_ci /* Transmit Queue Length */ 2368c2ecf20Sopenharmony_ci tlan_ctx->qlen = ring->count; 2378c2ecf20Sopenharmony_ci 2388c2ecf20Sopenharmony_ci ice_set_cgd_num(tlan_ctx, ring); 2398c2ecf20Sopenharmony_ci 2408c2ecf20Sopenharmony_ci /* PF number */ 2418c2ecf20Sopenharmony_ci tlan_ctx->pf_num = hw->pf_id; 2428c2ecf20Sopenharmony_ci 2438c2ecf20Sopenharmony_ci /* queue belongs to a specific VSI type 2448c2ecf20Sopenharmony_ci * VF / VM index should be programmed per vmvf_type setting: 2458c2ecf20Sopenharmony_ci * for vmvf_type = VF, it is VF number between 0-256 2468c2ecf20Sopenharmony_ci * for vmvf_type = VM, it is VM number between 0-767 2478c2ecf20Sopenharmony_ci * for PF or EMP this field should be set to zero 2488c2ecf20Sopenharmony_ci */ 2498c2ecf20Sopenharmony_ci switch (vsi->type) { 2508c2ecf20Sopenharmony_ci case ICE_VSI_LB: 2518c2ecf20Sopenharmony_ci case ICE_VSI_CTRL: 2528c2ecf20Sopenharmony_ci case ICE_VSI_PF: 2538c2ecf20Sopenharmony_ci tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 2548c2ecf20Sopenharmony_ci break; 2558c2ecf20Sopenharmony_ci case ICE_VSI_VF: 2568c2ecf20Sopenharmony_ci /* Firmware expects vmvf_num to be absolute VF ID */ 2578c2ecf20Sopenharmony_ci tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; 2588c2ecf20Sopenharmony_ci tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; 2598c2ecf20Sopenharmony_ci break; 2608c2ecf20Sopenharmony_ci default: 2618c2ecf20Sopenharmony_ci return; 2628c2ecf20Sopenharmony_ci } 2638c2ecf20Sopenharmony_ci 2648c2ecf20Sopenharmony_ci /* make sure the context is associated with the right VSI */ 2658c2ecf20Sopenharmony_ci tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); 2668c2ecf20Sopenharmony_ci 2678c2ecf20Sopenharmony_ci tlan_ctx->tso_ena = ICE_TX_LEGACY; 2688c2ecf20Sopenharmony_ci tlan_ctx->tso_qnum = pf_q; 2698c2ecf20Sopenharmony_ci 2708c2ecf20Sopenharmony_ci /* Legacy or Advanced Host Interface: 2718c2ecf20Sopenharmony_ci * 0: Advanced Host Interface 2728c2ecf20Sopenharmony_ci * 1: Legacy Host Interface 2738c2ecf20Sopenharmony_ci */ 2748c2ecf20Sopenharmony_ci tlan_ctx->legacy_int = ICE_TX_LEGACY; 2758c2ecf20Sopenharmony_ci} 2768c2ecf20Sopenharmony_ci 2778c2ecf20Sopenharmony_ci/** 2788c2ecf20Sopenharmony_ci * ice_setup_rx_ctx - Configure a receive ring context 2798c2ecf20Sopenharmony_ci * @ring: The Rx ring to configure 2808c2ecf20Sopenharmony_ci * 2818c2ecf20Sopenharmony_ci * Configure the Rx descriptor ring in RLAN context. 2828c2ecf20Sopenharmony_ci */ 2838c2ecf20Sopenharmony_ciint ice_setup_rx_ctx(struct ice_ring *ring) 2848c2ecf20Sopenharmony_ci{ 2858c2ecf20Sopenharmony_ci struct device *dev = ice_pf_to_dev(ring->vsi->back); 2868c2ecf20Sopenharmony_ci int chain_len = ICE_MAX_CHAINED_RX_BUFS; 2878c2ecf20Sopenharmony_ci u16 num_bufs = ICE_DESC_UNUSED(ring); 2888c2ecf20Sopenharmony_ci struct ice_vsi *vsi = ring->vsi; 2898c2ecf20Sopenharmony_ci u32 rxdid = ICE_RXDID_FLEX_NIC; 2908c2ecf20Sopenharmony_ci struct ice_rlan_ctx rlan_ctx; 2918c2ecf20Sopenharmony_ci struct ice_hw *hw; 2928c2ecf20Sopenharmony_ci u16 pf_q; 2938c2ecf20Sopenharmony_ci int err; 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci hw = &vsi->back->hw; 2968c2ecf20Sopenharmony_ci 2978c2ecf20Sopenharmony_ci /* what is Rx queue number in global space of 2K Rx queues */ 2988c2ecf20Sopenharmony_ci pf_q = vsi->rxq_map[ring->q_index]; 2998c2ecf20Sopenharmony_ci 3008c2ecf20Sopenharmony_ci /* clear the context structure first */ 3018c2ecf20Sopenharmony_ci memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci ring->rx_buf_len = vsi->rx_buf_len; 3048c2ecf20Sopenharmony_ci 3058c2ecf20Sopenharmony_ci if (ring->vsi->type == ICE_VSI_PF) { 3068c2ecf20Sopenharmony_ci if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 3078c2ecf20Sopenharmony_ci /* coverity[check_return] */ 3088c2ecf20Sopenharmony_ci xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 3098c2ecf20Sopenharmony_ci ring->q_index); 3108c2ecf20Sopenharmony_ci 3118c2ecf20Sopenharmony_ci ring->xsk_pool = ice_xsk_pool(ring); 3128c2ecf20Sopenharmony_ci if (ring->xsk_pool) { 3138c2ecf20Sopenharmony_ci xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 3148c2ecf20Sopenharmony_ci 3158c2ecf20Sopenharmony_ci ring->rx_buf_len = 3168c2ecf20Sopenharmony_ci xsk_pool_get_rx_frame_size(ring->xsk_pool); 3178c2ecf20Sopenharmony_ci /* For AF_XDP ZC, we disallow packets to span on 3188c2ecf20Sopenharmony_ci * multiple buffers, thus letting us skip that 3198c2ecf20Sopenharmony_ci * handling in the fast-path. 3208c2ecf20Sopenharmony_ci */ 3218c2ecf20Sopenharmony_ci chain_len = 1; 3228c2ecf20Sopenharmony_ci err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3238c2ecf20Sopenharmony_ci MEM_TYPE_XSK_BUFF_POOL, 3248c2ecf20Sopenharmony_ci NULL); 3258c2ecf20Sopenharmony_ci if (err) 3268c2ecf20Sopenharmony_ci return err; 3278c2ecf20Sopenharmony_ci xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); 3288c2ecf20Sopenharmony_ci 3298c2ecf20Sopenharmony_ci dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", 3308c2ecf20Sopenharmony_ci ring->q_index); 3318c2ecf20Sopenharmony_ci } else { 3328c2ecf20Sopenharmony_ci if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 3338c2ecf20Sopenharmony_ci /* coverity[check_return] */ 3348c2ecf20Sopenharmony_ci xdp_rxq_info_reg(&ring->xdp_rxq, 3358c2ecf20Sopenharmony_ci ring->netdev, 3368c2ecf20Sopenharmony_ci ring->q_index); 3378c2ecf20Sopenharmony_ci 3388c2ecf20Sopenharmony_ci err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3398c2ecf20Sopenharmony_ci MEM_TYPE_PAGE_SHARED, 3408c2ecf20Sopenharmony_ci NULL); 3418c2ecf20Sopenharmony_ci if (err) 3428c2ecf20Sopenharmony_ci return err; 3438c2ecf20Sopenharmony_ci } 3448c2ecf20Sopenharmony_ci } 3458c2ecf20Sopenharmony_ci /* Receive Queue Base Address. 3468c2ecf20Sopenharmony_ci * Indicates the starting address of the descriptor queue defined in 3478c2ecf20Sopenharmony_ci * 128 Byte units. 3488c2ecf20Sopenharmony_ci */ 3498c2ecf20Sopenharmony_ci rlan_ctx.base = ring->dma >> 7; 3508c2ecf20Sopenharmony_ci 3518c2ecf20Sopenharmony_ci rlan_ctx.qlen = ring->count; 3528c2ecf20Sopenharmony_ci 3538c2ecf20Sopenharmony_ci /* Receive Packet Data Buffer Size. 3548c2ecf20Sopenharmony_ci * The Packet Data Buffer Size is defined in 128 byte units. 3558c2ecf20Sopenharmony_ci */ 3568c2ecf20Sopenharmony_ci rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, 3578c2ecf20Sopenharmony_ci BIT_ULL(ICE_RLAN_CTX_DBUF_S)); 3588c2ecf20Sopenharmony_ci 3598c2ecf20Sopenharmony_ci /* use 32 byte descriptors */ 3608c2ecf20Sopenharmony_ci rlan_ctx.dsize = 1; 3618c2ecf20Sopenharmony_ci 3628c2ecf20Sopenharmony_ci /* Strip the Ethernet CRC bytes before the packet is posted to host 3638c2ecf20Sopenharmony_ci * memory. 3648c2ecf20Sopenharmony_ci */ 3658c2ecf20Sopenharmony_ci rlan_ctx.crcstrip = 1; 3668c2ecf20Sopenharmony_ci 3678c2ecf20Sopenharmony_ci /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ 3688c2ecf20Sopenharmony_ci rlan_ctx.l2tsel = 1; 3698c2ecf20Sopenharmony_ci 3708c2ecf20Sopenharmony_ci rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; 3718c2ecf20Sopenharmony_ci rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; 3728c2ecf20Sopenharmony_ci rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_ci /* This controls whether VLAN is stripped from inner headers 3758c2ecf20Sopenharmony_ci * The VLAN in the inner L2 header is stripped to the receive 3768c2ecf20Sopenharmony_ci * descriptor if enabled by this flag. 3778c2ecf20Sopenharmony_ci */ 3788c2ecf20Sopenharmony_ci rlan_ctx.showiv = 0; 3798c2ecf20Sopenharmony_ci 3808c2ecf20Sopenharmony_ci /* Max packet size for this queue - must not be set to a larger value 3818c2ecf20Sopenharmony_ci * than 5 x DBUF 3828c2ecf20Sopenharmony_ci */ 3838c2ecf20Sopenharmony_ci rlan_ctx.rxmax = min_t(u32, vsi->max_frame, 3848c2ecf20Sopenharmony_ci chain_len * ring->rx_buf_len); 3858c2ecf20Sopenharmony_ci 3868c2ecf20Sopenharmony_ci /* Rx queue threshold in units of 64 */ 3878c2ecf20Sopenharmony_ci rlan_ctx.lrxqthresh = 1; 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_ci /* Enable Flexible Descriptors in the queue context which 3908c2ecf20Sopenharmony_ci * allows this driver to select a specific receive descriptor format 3918c2ecf20Sopenharmony_ci * increasing context priority to pick up profile ID; default is 0x01; 3928c2ecf20Sopenharmony_ci * setting to 0x03 to ensure profile is programming if prev context is 3938c2ecf20Sopenharmony_ci * of same priority 3948c2ecf20Sopenharmony_ci */ 3958c2ecf20Sopenharmony_ci if (vsi->type != ICE_VSI_VF) 3968c2ecf20Sopenharmony_ci ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3); 3978c2ecf20Sopenharmony_ci else 3988c2ecf20Sopenharmony_ci ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3); 3998c2ecf20Sopenharmony_ci 4008c2ecf20Sopenharmony_ci /* Absolute queue number out of 2K needs to be passed */ 4018c2ecf20Sopenharmony_ci err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); 4028c2ecf20Sopenharmony_ci if (err) { 4038c2ecf20Sopenharmony_ci dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", 4048c2ecf20Sopenharmony_ci pf_q, err); 4058c2ecf20Sopenharmony_ci return -EIO; 4068c2ecf20Sopenharmony_ci } 4078c2ecf20Sopenharmony_ci 4088c2ecf20Sopenharmony_ci if (vsi->type == ICE_VSI_VF) 4098c2ecf20Sopenharmony_ci return 0; 4108c2ecf20Sopenharmony_ci 4118c2ecf20Sopenharmony_ci /* configure Rx buffer alignment */ 4128c2ecf20Sopenharmony_ci if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) 4138c2ecf20Sopenharmony_ci ice_clear_ring_build_skb_ena(ring); 4148c2ecf20Sopenharmony_ci else 4158c2ecf20Sopenharmony_ci ice_set_ring_build_skb_ena(ring); 4168c2ecf20Sopenharmony_ci 4178c2ecf20Sopenharmony_ci /* init queue specific tail register */ 4188c2ecf20Sopenharmony_ci ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 4198c2ecf20Sopenharmony_ci writel(0, ring->tail); 4208c2ecf20Sopenharmony_ci 4218c2ecf20Sopenharmony_ci if (ring->xsk_pool) { 4228c2ecf20Sopenharmony_ci if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { 4238c2ecf20Sopenharmony_ci dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", 4248c2ecf20Sopenharmony_ci num_bufs, ring->q_index); 4258c2ecf20Sopenharmony_ci dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); 4268c2ecf20Sopenharmony_ci 4278c2ecf20Sopenharmony_ci return 0; 4288c2ecf20Sopenharmony_ci } 4298c2ecf20Sopenharmony_ci 4308c2ecf20Sopenharmony_ci err = ice_alloc_rx_bufs_zc(ring, num_bufs); 4318c2ecf20Sopenharmony_ci if (err) 4328c2ecf20Sopenharmony_ci dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", 4338c2ecf20Sopenharmony_ci ring->q_index, pf_q); 4348c2ecf20Sopenharmony_ci return 0; 4358c2ecf20Sopenharmony_ci } 4368c2ecf20Sopenharmony_ci 4378c2ecf20Sopenharmony_ci ice_alloc_rx_bufs(ring, num_bufs); 4388c2ecf20Sopenharmony_ci 4398c2ecf20Sopenharmony_ci return 0; 4408c2ecf20Sopenharmony_ci} 4418c2ecf20Sopenharmony_ci 4428c2ecf20Sopenharmony_ci/** 4438c2ecf20Sopenharmony_ci * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI 4448c2ecf20Sopenharmony_ci * @qs_cfg: gathered variables needed for pf->vsi queues assignment 4458c2ecf20Sopenharmony_ci * 4468c2ecf20Sopenharmony_ci * This function first tries to find contiguous space. If it is not successful, 4478c2ecf20Sopenharmony_ci * it tries with the scatter approach. 4488c2ecf20Sopenharmony_ci * 4498c2ecf20Sopenharmony_ci * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap 4508c2ecf20Sopenharmony_ci */ 4518c2ecf20Sopenharmony_ciint __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) 4528c2ecf20Sopenharmony_ci{ 4538c2ecf20Sopenharmony_ci int ret = 0; 4548c2ecf20Sopenharmony_ci 4558c2ecf20Sopenharmony_ci ret = __ice_vsi_get_qs_contig(qs_cfg); 4568c2ecf20Sopenharmony_ci if (ret) { 4578c2ecf20Sopenharmony_ci /* contig failed, so try with scatter approach */ 4588c2ecf20Sopenharmony_ci qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; 4598c2ecf20Sopenharmony_ci qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count, 4608c2ecf20Sopenharmony_ci qs_cfg->scatter_count); 4618c2ecf20Sopenharmony_ci ret = __ice_vsi_get_qs_sc(qs_cfg); 4628c2ecf20Sopenharmony_ci } 4638c2ecf20Sopenharmony_ci return ret; 4648c2ecf20Sopenharmony_ci} 4658c2ecf20Sopenharmony_ci 4668c2ecf20Sopenharmony_ci/** 4678c2ecf20Sopenharmony_ci * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait 4688c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 4698c2ecf20Sopenharmony_ci * @ena: start or stop the Rx ring 4708c2ecf20Sopenharmony_ci * @rxq_idx: 0-based Rx queue index for the VSI passed in 4718c2ecf20Sopenharmony_ci * @wait: wait or don't wait for configuration to finish in hardware 4728c2ecf20Sopenharmony_ci * 4738c2ecf20Sopenharmony_ci * Return 0 on success and negative on error. 4748c2ecf20Sopenharmony_ci */ 4758c2ecf20Sopenharmony_ciint 4768c2ecf20Sopenharmony_ciice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait) 4778c2ecf20Sopenharmony_ci{ 4788c2ecf20Sopenharmony_ci int pf_q = vsi->rxq_map[rxq_idx]; 4798c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 4808c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 4818c2ecf20Sopenharmony_ci u32 rx_reg; 4828c2ecf20Sopenharmony_ci 4838c2ecf20Sopenharmony_ci rx_reg = rd32(hw, QRX_CTRL(pf_q)); 4848c2ecf20Sopenharmony_ci 4858c2ecf20Sopenharmony_ci /* Skip if the queue is already in the requested state */ 4868c2ecf20Sopenharmony_ci if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) 4878c2ecf20Sopenharmony_ci return 0; 4888c2ecf20Sopenharmony_ci 4898c2ecf20Sopenharmony_ci /* turn on/off the queue */ 4908c2ecf20Sopenharmony_ci if (ena) 4918c2ecf20Sopenharmony_ci rx_reg |= QRX_CTRL_QENA_REQ_M; 4928c2ecf20Sopenharmony_ci else 4938c2ecf20Sopenharmony_ci rx_reg &= ~QRX_CTRL_QENA_REQ_M; 4948c2ecf20Sopenharmony_ci wr32(hw, QRX_CTRL(pf_q), rx_reg); 4958c2ecf20Sopenharmony_ci 4968c2ecf20Sopenharmony_ci if (!wait) 4978c2ecf20Sopenharmony_ci return 0; 4988c2ecf20Sopenharmony_ci 4998c2ecf20Sopenharmony_ci ice_flush(hw); 5008c2ecf20Sopenharmony_ci return ice_pf_rxq_wait(pf, pf_q, ena); 5018c2ecf20Sopenharmony_ci} 5028c2ecf20Sopenharmony_ci 5038c2ecf20Sopenharmony_ci/** 5048c2ecf20Sopenharmony_ci * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started 5058c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 5068c2ecf20Sopenharmony_ci * @ena: true/false to verify Rx ring has been enabled/disabled respectively 5078c2ecf20Sopenharmony_ci * @rxq_idx: 0-based Rx queue index for the VSI passed in 5088c2ecf20Sopenharmony_ci * 5098c2ecf20Sopenharmony_ci * This routine will wait for the given Rx queue of the VSI to reach the 5108c2ecf20Sopenharmony_ci * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach 5118c2ecf20Sopenharmony_ci * the requested state after multiple retries; else will return 0 in case of 5128c2ecf20Sopenharmony_ci * success. 5138c2ecf20Sopenharmony_ci */ 5148c2ecf20Sopenharmony_ciint ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) 5158c2ecf20Sopenharmony_ci{ 5168c2ecf20Sopenharmony_ci int pf_q = vsi->rxq_map[rxq_idx]; 5178c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 5188c2ecf20Sopenharmony_ci 5198c2ecf20Sopenharmony_ci return ice_pf_rxq_wait(pf, pf_q, ena); 5208c2ecf20Sopenharmony_ci} 5218c2ecf20Sopenharmony_ci 5228c2ecf20Sopenharmony_ci/** 5238c2ecf20Sopenharmony_ci * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors 5248c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 5258c2ecf20Sopenharmony_ci * 5268c2ecf20Sopenharmony_ci * We allocate one q_vector per queue interrupt. If allocation fails we 5278c2ecf20Sopenharmony_ci * return -ENOMEM. 5288c2ecf20Sopenharmony_ci */ 5298c2ecf20Sopenharmony_ciint ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) 5308c2ecf20Sopenharmony_ci{ 5318c2ecf20Sopenharmony_ci struct device *dev = ice_pf_to_dev(vsi->back); 5328c2ecf20Sopenharmony_ci u16 v_idx; 5338c2ecf20Sopenharmony_ci int err; 5348c2ecf20Sopenharmony_ci 5358c2ecf20Sopenharmony_ci if (vsi->q_vectors[0]) { 5368c2ecf20Sopenharmony_ci dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); 5378c2ecf20Sopenharmony_ci return -EEXIST; 5388c2ecf20Sopenharmony_ci } 5398c2ecf20Sopenharmony_ci 5408c2ecf20Sopenharmony_ci for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { 5418c2ecf20Sopenharmony_ci err = ice_vsi_alloc_q_vector(vsi, v_idx); 5428c2ecf20Sopenharmony_ci if (err) 5438c2ecf20Sopenharmony_ci goto err_out; 5448c2ecf20Sopenharmony_ci } 5458c2ecf20Sopenharmony_ci 5468c2ecf20Sopenharmony_ci return 0; 5478c2ecf20Sopenharmony_ci 5488c2ecf20Sopenharmony_cierr_out: 5498c2ecf20Sopenharmony_ci while (v_idx--) 5508c2ecf20Sopenharmony_ci ice_free_q_vector(vsi, v_idx); 5518c2ecf20Sopenharmony_ci 5528c2ecf20Sopenharmony_ci dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", 5538c2ecf20Sopenharmony_ci vsi->num_q_vectors, vsi->vsi_num, err); 5548c2ecf20Sopenharmony_ci vsi->num_q_vectors = 0; 5558c2ecf20Sopenharmony_ci return err; 5568c2ecf20Sopenharmony_ci} 5578c2ecf20Sopenharmony_ci 5588c2ecf20Sopenharmony_ci/** 5598c2ecf20Sopenharmony_ci * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors 5608c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 5618c2ecf20Sopenharmony_ci * 5628c2ecf20Sopenharmony_ci * This function maps descriptor rings to the queue-specific vectors allotted 5638c2ecf20Sopenharmony_ci * through the MSI-X enabling code. On a constrained vector budget, we map Tx 5648c2ecf20Sopenharmony_ci * and Rx rings to the vector as "efficiently" as possible. 5658c2ecf20Sopenharmony_ci */ 5668c2ecf20Sopenharmony_civoid ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 5678c2ecf20Sopenharmony_ci{ 5688c2ecf20Sopenharmony_ci int q_vectors = vsi->num_q_vectors; 5698c2ecf20Sopenharmony_ci u16 tx_rings_rem, rx_rings_rem; 5708c2ecf20Sopenharmony_ci int v_id; 5718c2ecf20Sopenharmony_ci 5728c2ecf20Sopenharmony_ci /* initially assigning remaining rings count to VSIs num queue value */ 5738c2ecf20Sopenharmony_ci tx_rings_rem = vsi->num_txq; 5748c2ecf20Sopenharmony_ci rx_rings_rem = vsi->num_rxq; 5758c2ecf20Sopenharmony_ci 5768c2ecf20Sopenharmony_ci for (v_id = 0; v_id < q_vectors; v_id++) { 5778c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; 5788c2ecf20Sopenharmony_ci u8 tx_rings_per_v, rx_rings_per_v; 5798c2ecf20Sopenharmony_ci u16 q_id, q_base; 5808c2ecf20Sopenharmony_ci 5818c2ecf20Sopenharmony_ci /* Tx rings mapping to vector */ 5828c2ecf20Sopenharmony_ci tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, 5838c2ecf20Sopenharmony_ci q_vectors - v_id); 5848c2ecf20Sopenharmony_ci q_vector->num_ring_tx = tx_rings_per_v; 5858c2ecf20Sopenharmony_ci q_vector->tx.ring = NULL; 5868c2ecf20Sopenharmony_ci q_vector->tx.itr_idx = ICE_TX_ITR; 5878c2ecf20Sopenharmony_ci q_base = vsi->num_txq - tx_rings_rem; 5888c2ecf20Sopenharmony_ci 5898c2ecf20Sopenharmony_ci for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { 5908c2ecf20Sopenharmony_ci struct ice_ring *tx_ring = vsi->tx_rings[q_id]; 5918c2ecf20Sopenharmony_ci 5928c2ecf20Sopenharmony_ci tx_ring->q_vector = q_vector; 5938c2ecf20Sopenharmony_ci tx_ring->next = q_vector->tx.ring; 5948c2ecf20Sopenharmony_ci q_vector->tx.ring = tx_ring; 5958c2ecf20Sopenharmony_ci } 5968c2ecf20Sopenharmony_ci tx_rings_rem -= tx_rings_per_v; 5978c2ecf20Sopenharmony_ci 5988c2ecf20Sopenharmony_ci /* Rx rings mapping to vector */ 5998c2ecf20Sopenharmony_ci rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, 6008c2ecf20Sopenharmony_ci q_vectors - v_id); 6018c2ecf20Sopenharmony_ci q_vector->num_ring_rx = rx_rings_per_v; 6028c2ecf20Sopenharmony_ci q_vector->rx.ring = NULL; 6038c2ecf20Sopenharmony_ci q_vector->rx.itr_idx = ICE_RX_ITR; 6048c2ecf20Sopenharmony_ci q_base = vsi->num_rxq - rx_rings_rem; 6058c2ecf20Sopenharmony_ci 6068c2ecf20Sopenharmony_ci for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { 6078c2ecf20Sopenharmony_ci struct ice_ring *rx_ring = vsi->rx_rings[q_id]; 6088c2ecf20Sopenharmony_ci 6098c2ecf20Sopenharmony_ci rx_ring->q_vector = q_vector; 6108c2ecf20Sopenharmony_ci rx_ring->next = q_vector->rx.ring; 6118c2ecf20Sopenharmony_ci q_vector->rx.ring = rx_ring; 6128c2ecf20Sopenharmony_ci } 6138c2ecf20Sopenharmony_ci rx_rings_rem -= rx_rings_per_v; 6148c2ecf20Sopenharmony_ci } 6158c2ecf20Sopenharmony_ci} 6168c2ecf20Sopenharmony_ci 6178c2ecf20Sopenharmony_ci/** 6188c2ecf20Sopenharmony_ci * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors 6198c2ecf20Sopenharmony_ci * @vsi: the VSI having memory freed 6208c2ecf20Sopenharmony_ci */ 6218c2ecf20Sopenharmony_civoid ice_vsi_free_q_vectors(struct ice_vsi *vsi) 6228c2ecf20Sopenharmony_ci{ 6238c2ecf20Sopenharmony_ci int v_idx; 6248c2ecf20Sopenharmony_ci 6258c2ecf20Sopenharmony_ci ice_for_each_q_vector(vsi, v_idx) 6268c2ecf20Sopenharmony_ci ice_free_q_vector(vsi, v_idx); 6278c2ecf20Sopenharmony_ci} 6288c2ecf20Sopenharmony_ci 6298c2ecf20Sopenharmony_ci/** 6308c2ecf20Sopenharmony_ci * ice_vsi_cfg_txq - Configure single Tx queue 6318c2ecf20Sopenharmony_ci * @vsi: the VSI that queue belongs to 6328c2ecf20Sopenharmony_ci * @ring: Tx ring to be configured 6338c2ecf20Sopenharmony_ci * @qg_buf: queue group buffer 6348c2ecf20Sopenharmony_ci */ 6358c2ecf20Sopenharmony_ciint 6368c2ecf20Sopenharmony_ciice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, 6378c2ecf20Sopenharmony_ci struct ice_aqc_add_tx_qgrp *qg_buf) 6388c2ecf20Sopenharmony_ci{ 6398c2ecf20Sopenharmony_ci u8 buf_len = struct_size(qg_buf, txqs, 1); 6408c2ecf20Sopenharmony_ci struct ice_tlan_ctx tlan_ctx = { 0 }; 6418c2ecf20Sopenharmony_ci struct ice_aqc_add_txqs_perq *txq; 6428c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 6438c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 6448c2ecf20Sopenharmony_ci enum ice_status status; 6458c2ecf20Sopenharmony_ci u16 pf_q; 6468c2ecf20Sopenharmony_ci u8 tc; 6478c2ecf20Sopenharmony_ci 6488c2ecf20Sopenharmony_ci pf_q = ring->reg_idx; 6498c2ecf20Sopenharmony_ci ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); 6508c2ecf20Sopenharmony_ci /* copy context contents into the qg_buf */ 6518c2ecf20Sopenharmony_ci qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 6528c2ecf20Sopenharmony_ci ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 6538c2ecf20Sopenharmony_ci ice_tlan_ctx_info); 6548c2ecf20Sopenharmony_ci 6558c2ecf20Sopenharmony_ci /* init queue specific tail reg. It is referred as 6568c2ecf20Sopenharmony_ci * transmit comm scheduler queue doorbell. 6578c2ecf20Sopenharmony_ci */ 6588c2ecf20Sopenharmony_ci ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); 6598c2ecf20Sopenharmony_ci 6608c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DCB)) 6618c2ecf20Sopenharmony_ci tc = ring->dcb_tc; 6628c2ecf20Sopenharmony_ci else 6638c2ecf20Sopenharmony_ci tc = 0; 6648c2ecf20Sopenharmony_ci 6658c2ecf20Sopenharmony_ci /* Add unique software queue handle of the Tx queue per 6668c2ecf20Sopenharmony_ci * TC into the VSI Tx ring 6678c2ecf20Sopenharmony_ci */ 6688c2ecf20Sopenharmony_ci ring->q_handle = ice_calc_q_handle(vsi, ring, tc); 6698c2ecf20Sopenharmony_ci 6708c2ecf20Sopenharmony_ci status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, 6718c2ecf20Sopenharmony_ci 1, qg_buf, buf_len, NULL); 6728c2ecf20Sopenharmony_ci if (status) { 6738c2ecf20Sopenharmony_ci dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", 6748c2ecf20Sopenharmony_ci ice_stat_str(status)); 6758c2ecf20Sopenharmony_ci return -ENODEV; 6768c2ecf20Sopenharmony_ci } 6778c2ecf20Sopenharmony_ci 6788c2ecf20Sopenharmony_ci /* Add Tx Queue TEID into the VSI Tx ring from the 6798c2ecf20Sopenharmony_ci * response. This will complete configuring and 6808c2ecf20Sopenharmony_ci * enabling the queue. 6818c2ecf20Sopenharmony_ci */ 6828c2ecf20Sopenharmony_ci txq = &qg_buf->txqs[0]; 6838c2ecf20Sopenharmony_ci if (pf_q == le16_to_cpu(txq->txq_id)) 6848c2ecf20Sopenharmony_ci ring->txq_teid = le32_to_cpu(txq->q_teid); 6858c2ecf20Sopenharmony_ci 6868c2ecf20Sopenharmony_ci return 0; 6878c2ecf20Sopenharmony_ci} 6888c2ecf20Sopenharmony_ci 6898c2ecf20Sopenharmony_ci/** 6908c2ecf20Sopenharmony_ci * ice_cfg_itr - configure the initial interrupt throttle values 6918c2ecf20Sopenharmony_ci * @hw: pointer to the HW structure 6928c2ecf20Sopenharmony_ci * @q_vector: interrupt vector that's being configured 6938c2ecf20Sopenharmony_ci * 6948c2ecf20Sopenharmony_ci * Configure interrupt throttling values for the ring containers that are 6958c2ecf20Sopenharmony_ci * associated with the interrupt vector passed in. 6968c2ecf20Sopenharmony_ci */ 6978c2ecf20Sopenharmony_civoid ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) 6988c2ecf20Sopenharmony_ci{ 6998c2ecf20Sopenharmony_ci ice_cfg_itr_gran(hw); 7008c2ecf20Sopenharmony_ci 7018c2ecf20Sopenharmony_ci if (q_vector->num_ring_rx) { 7028c2ecf20Sopenharmony_ci struct ice_ring_container *rc = &q_vector->rx; 7038c2ecf20Sopenharmony_ci 7048c2ecf20Sopenharmony_ci rc->target_itr = ITR_TO_REG(rc->itr_setting); 7058c2ecf20Sopenharmony_ci rc->next_update = jiffies + 1; 7068c2ecf20Sopenharmony_ci rc->current_itr = rc->target_itr; 7078c2ecf20Sopenharmony_ci wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 7088c2ecf20Sopenharmony_ci ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 7098c2ecf20Sopenharmony_ci } 7108c2ecf20Sopenharmony_ci 7118c2ecf20Sopenharmony_ci if (q_vector->num_ring_tx) { 7128c2ecf20Sopenharmony_ci struct ice_ring_container *rc = &q_vector->tx; 7138c2ecf20Sopenharmony_ci 7148c2ecf20Sopenharmony_ci rc->target_itr = ITR_TO_REG(rc->itr_setting); 7158c2ecf20Sopenharmony_ci rc->next_update = jiffies + 1; 7168c2ecf20Sopenharmony_ci rc->current_itr = rc->target_itr; 7178c2ecf20Sopenharmony_ci wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), 7188c2ecf20Sopenharmony_ci ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); 7198c2ecf20Sopenharmony_ci } 7208c2ecf20Sopenharmony_ci} 7218c2ecf20Sopenharmony_ci 7228c2ecf20Sopenharmony_ci/** 7238c2ecf20Sopenharmony_ci * ice_cfg_txq_interrupt - configure interrupt on Tx queue 7248c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 7258c2ecf20Sopenharmony_ci * @txq: Tx queue being mapped to MSI-X vector 7268c2ecf20Sopenharmony_ci * @msix_idx: MSI-X vector index within the function 7278c2ecf20Sopenharmony_ci * @itr_idx: ITR index of the interrupt cause 7288c2ecf20Sopenharmony_ci * 7298c2ecf20Sopenharmony_ci * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector 7308c2ecf20Sopenharmony_ci * within the function space. 7318c2ecf20Sopenharmony_ci */ 7328c2ecf20Sopenharmony_civoid 7338c2ecf20Sopenharmony_ciice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) 7348c2ecf20Sopenharmony_ci{ 7358c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 7368c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 7378c2ecf20Sopenharmony_ci u32 val; 7388c2ecf20Sopenharmony_ci 7398c2ecf20Sopenharmony_ci itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; 7408c2ecf20Sopenharmony_ci 7418c2ecf20Sopenharmony_ci val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | 7428c2ecf20Sopenharmony_ci ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); 7438c2ecf20Sopenharmony_ci 7448c2ecf20Sopenharmony_ci wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); 7458c2ecf20Sopenharmony_ci if (ice_is_xdp_ena_vsi(vsi)) { 7468c2ecf20Sopenharmony_ci u32 xdp_txq = txq + vsi->num_xdp_txq; 7478c2ecf20Sopenharmony_ci 7488c2ecf20Sopenharmony_ci wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 7498c2ecf20Sopenharmony_ci val); 7508c2ecf20Sopenharmony_ci } 7518c2ecf20Sopenharmony_ci ice_flush(hw); 7528c2ecf20Sopenharmony_ci} 7538c2ecf20Sopenharmony_ci 7548c2ecf20Sopenharmony_ci/** 7558c2ecf20Sopenharmony_ci * ice_cfg_rxq_interrupt - configure interrupt on Rx queue 7568c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 7578c2ecf20Sopenharmony_ci * @rxq: Rx queue being mapped to MSI-X vector 7588c2ecf20Sopenharmony_ci * @msix_idx: MSI-X vector index within the function 7598c2ecf20Sopenharmony_ci * @itr_idx: ITR index of the interrupt cause 7608c2ecf20Sopenharmony_ci * 7618c2ecf20Sopenharmony_ci * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector 7628c2ecf20Sopenharmony_ci * within the function space. 7638c2ecf20Sopenharmony_ci */ 7648c2ecf20Sopenharmony_civoid 7658c2ecf20Sopenharmony_ciice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) 7668c2ecf20Sopenharmony_ci{ 7678c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 7688c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 7698c2ecf20Sopenharmony_ci u32 val; 7708c2ecf20Sopenharmony_ci 7718c2ecf20Sopenharmony_ci itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | 7748c2ecf20Sopenharmony_ci ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); 7758c2ecf20Sopenharmony_ci 7768c2ecf20Sopenharmony_ci wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); 7778c2ecf20Sopenharmony_ci 7788c2ecf20Sopenharmony_ci ice_flush(hw); 7798c2ecf20Sopenharmony_ci} 7808c2ecf20Sopenharmony_ci 7818c2ecf20Sopenharmony_ci/** 7828c2ecf20Sopenharmony_ci * ice_trigger_sw_intr - trigger a software interrupt 7838c2ecf20Sopenharmony_ci * @hw: pointer to the HW structure 7848c2ecf20Sopenharmony_ci * @q_vector: interrupt vector to trigger the software interrupt for 7858c2ecf20Sopenharmony_ci */ 7868c2ecf20Sopenharmony_civoid ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) 7878c2ecf20Sopenharmony_ci{ 7888c2ecf20Sopenharmony_ci wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 7898c2ecf20Sopenharmony_ci (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | 7908c2ecf20Sopenharmony_ci GLINT_DYN_CTL_SWINT_TRIG_M | 7918c2ecf20Sopenharmony_ci GLINT_DYN_CTL_INTENA_M); 7928c2ecf20Sopenharmony_ci} 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_ci/** 7958c2ecf20Sopenharmony_ci * ice_vsi_stop_tx_ring - Disable single Tx ring 7968c2ecf20Sopenharmony_ci * @vsi: the VSI being configured 7978c2ecf20Sopenharmony_ci * @rst_src: reset source 7988c2ecf20Sopenharmony_ci * @rel_vmvf_num: Relative ID of VF/VM 7998c2ecf20Sopenharmony_ci * @ring: Tx ring to be stopped 8008c2ecf20Sopenharmony_ci * @txq_meta: Meta data of Tx ring to be stopped 8018c2ecf20Sopenharmony_ci */ 8028c2ecf20Sopenharmony_ciint 8038c2ecf20Sopenharmony_ciice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 8048c2ecf20Sopenharmony_ci u16 rel_vmvf_num, struct ice_ring *ring, 8058c2ecf20Sopenharmony_ci struct ice_txq_meta *txq_meta) 8068c2ecf20Sopenharmony_ci{ 8078c2ecf20Sopenharmony_ci struct ice_pf *pf = vsi->back; 8088c2ecf20Sopenharmony_ci struct ice_q_vector *q_vector; 8098c2ecf20Sopenharmony_ci struct ice_hw *hw = &pf->hw; 8108c2ecf20Sopenharmony_ci enum ice_status status; 8118c2ecf20Sopenharmony_ci u32 val; 8128c2ecf20Sopenharmony_ci 8138c2ecf20Sopenharmony_ci /* clear cause_ena bit for disabled queues */ 8148c2ecf20Sopenharmony_ci val = rd32(hw, QINT_TQCTL(ring->reg_idx)); 8158c2ecf20Sopenharmony_ci val &= ~QINT_TQCTL_CAUSE_ENA_M; 8168c2ecf20Sopenharmony_ci wr32(hw, QINT_TQCTL(ring->reg_idx), val); 8178c2ecf20Sopenharmony_ci 8188c2ecf20Sopenharmony_ci /* software is expected to wait for 100 ns */ 8198c2ecf20Sopenharmony_ci ndelay(100); 8208c2ecf20Sopenharmony_ci 8218c2ecf20Sopenharmony_ci /* trigger a software interrupt for the vector 8228c2ecf20Sopenharmony_ci * associated to the queue to schedule NAPI handler 8238c2ecf20Sopenharmony_ci */ 8248c2ecf20Sopenharmony_ci q_vector = ring->q_vector; 8258c2ecf20Sopenharmony_ci if (q_vector) 8268c2ecf20Sopenharmony_ci ice_trigger_sw_intr(hw, q_vector); 8278c2ecf20Sopenharmony_ci 8288c2ecf20Sopenharmony_ci status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, 8298c2ecf20Sopenharmony_ci txq_meta->tc, 1, &txq_meta->q_handle, 8308c2ecf20Sopenharmony_ci &txq_meta->q_id, &txq_meta->q_teid, rst_src, 8318c2ecf20Sopenharmony_ci rel_vmvf_num, NULL); 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_ci /* if the disable queue command was exercised during an 8348c2ecf20Sopenharmony_ci * active reset flow, ICE_ERR_RESET_ONGOING is returned. 8358c2ecf20Sopenharmony_ci * This is not an error as the reset operation disables 8368c2ecf20Sopenharmony_ci * queues at the hardware level anyway. 8378c2ecf20Sopenharmony_ci */ 8388c2ecf20Sopenharmony_ci if (status == ICE_ERR_RESET_ONGOING) { 8398c2ecf20Sopenharmony_ci dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); 8408c2ecf20Sopenharmony_ci } else if (status == ICE_ERR_DOES_NOT_EXIST) { 8418c2ecf20Sopenharmony_ci dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); 8428c2ecf20Sopenharmony_ci } else if (status) { 8438c2ecf20Sopenharmony_ci dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", 8448c2ecf20Sopenharmony_ci ice_stat_str(status)); 8458c2ecf20Sopenharmony_ci return -ENODEV; 8468c2ecf20Sopenharmony_ci } 8478c2ecf20Sopenharmony_ci 8488c2ecf20Sopenharmony_ci return 0; 8498c2ecf20Sopenharmony_ci} 8508c2ecf20Sopenharmony_ci 8518c2ecf20Sopenharmony_ci/** 8528c2ecf20Sopenharmony_ci * ice_fill_txq_meta - Prepare the Tx queue's meta data 8538c2ecf20Sopenharmony_ci * @vsi: VSI that ring belongs to 8548c2ecf20Sopenharmony_ci * @ring: ring that txq_meta will be based on 8558c2ecf20Sopenharmony_ci * @txq_meta: a helper struct that wraps Tx queue's information 8568c2ecf20Sopenharmony_ci * 8578c2ecf20Sopenharmony_ci * Set up a helper struct that will contain all the necessary fields that 8588c2ecf20Sopenharmony_ci * are needed for stopping Tx queue 8598c2ecf20Sopenharmony_ci */ 8608c2ecf20Sopenharmony_civoid 8618c2ecf20Sopenharmony_ciice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, 8628c2ecf20Sopenharmony_ci struct ice_txq_meta *txq_meta) 8638c2ecf20Sopenharmony_ci{ 8648c2ecf20Sopenharmony_ci u8 tc; 8658c2ecf20Sopenharmony_ci 8668c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DCB)) 8678c2ecf20Sopenharmony_ci tc = ring->dcb_tc; 8688c2ecf20Sopenharmony_ci else 8698c2ecf20Sopenharmony_ci tc = 0; 8708c2ecf20Sopenharmony_ci 8718c2ecf20Sopenharmony_ci txq_meta->q_id = ring->reg_idx; 8728c2ecf20Sopenharmony_ci txq_meta->q_teid = ring->txq_teid; 8738c2ecf20Sopenharmony_ci txq_meta->q_handle = ring->q_handle; 8748c2ecf20Sopenharmony_ci txq_meta->vsi_idx = vsi->idx; 8758c2ecf20Sopenharmony_ci txq_meta->tc = tc; 8768c2ecf20Sopenharmony_ci} 877