18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Copyright (c) 2018, Intel Corporation. */ 38c2ecf20Sopenharmony_ci 48c2ecf20Sopenharmony_ci#include "ice_sched.h" 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci/** 78c2ecf20Sopenharmony_ci * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB 88c2ecf20Sopenharmony_ci * @pi: port information structure 98c2ecf20Sopenharmony_ci * @info: Scheduler element information from firmware 108c2ecf20Sopenharmony_ci * 118c2ecf20Sopenharmony_ci * This function inserts the root node of the scheduling tree topology 128c2ecf20Sopenharmony_ci * to the SW DB. 138c2ecf20Sopenharmony_ci */ 148c2ecf20Sopenharmony_cistatic enum ice_status 158c2ecf20Sopenharmony_ciice_sched_add_root_node(struct ice_port_info *pi, 168c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data *info) 178c2ecf20Sopenharmony_ci{ 188c2ecf20Sopenharmony_ci struct ice_sched_node *root; 198c2ecf20Sopenharmony_ci struct ice_hw *hw; 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci if (!pi) 228c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 238c2ecf20Sopenharmony_ci 248c2ecf20Sopenharmony_ci hw = pi->hw; 258c2ecf20Sopenharmony_ci 268c2ecf20Sopenharmony_ci root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL); 278c2ecf20Sopenharmony_ci if (!root) 288c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_ci /* coverity[suspicious_sizeof] */ 318c2ecf20Sopenharmony_ci root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], 328c2ecf20Sopenharmony_ci sizeof(*root), GFP_KERNEL); 338c2ecf20Sopenharmony_ci if (!root->children) { 348c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), root); 358c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 368c2ecf20Sopenharmony_ci } 378c2ecf20Sopenharmony_ci 388c2ecf20Sopenharmony_ci memcpy(&root->info, info, sizeof(*info)); 398c2ecf20Sopenharmony_ci pi->root = root; 408c2ecf20Sopenharmony_ci return 0; 418c2ecf20Sopenharmony_ci} 428c2ecf20Sopenharmony_ci 438c2ecf20Sopenharmony_ci/** 448c2ecf20Sopenharmony_ci * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB 458c2ecf20Sopenharmony_ci * @start_node: pointer to the starting ice_sched_node struct in a sub-tree 468c2ecf20Sopenharmony_ci * @teid: node TEID to search 478c2ecf20Sopenharmony_ci * 488c2ecf20Sopenharmony_ci * This function searches for a node matching the TEID in the scheduling tree 498c2ecf20Sopenharmony_ci * from the SW DB. The search is recursive and is restricted by the number of 508c2ecf20Sopenharmony_ci * layers it has searched through; stopping at the max supported layer. 518c2ecf20Sopenharmony_ci * 528c2ecf20Sopenharmony_ci * This function needs to be called when holding the port_info->sched_lock 538c2ecf20Sopenharmony_ci */ 548c2ecf20Sopenharmony_cistruct ice_sched_node * 558c2ecf20Sopenharmony_ciice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) 568c2ecf20Sopenharmony_ci{ 578c2ecf20Sopenharmony_ci u16 i; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_ci /* The TEID is same as that of the start_node */ 608c2ecf20Sopenharmony_ci if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) 618c2ecf20Sopenharmony_ci return start_node; 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci /* The node has no children or is at the max layer */ 648c2ecf20Sopenharmony_ci if (!start_node->num_children || 658c2ecf20Sopenharmony_ci start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || 668c2ecf20Sopenharmony_ci start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) 678c2ecf20Sopenharmony_ci return NULL; 688c2ecf20Sopenharmony_ci 698c2ecf20Sopenharmony_ci /* Check if TEID matches to any of the children nodes */ 708c2ecf20Sopenharmony_ci for (i = 0; i < start_node->num_children; i++) 718c2ecf20Sopenharmony_ci if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) 728c2ecf20Sopenharmony_ci return start_node->children[i]; 738c2ecf20Sopenharmony_ci 748c2ecf20Sopenharmony_ci /* Search within each child's sub-tree */ 758c2ecf20Sopenharmony_ci for (i = 0; i < start_node->num_children; i++) { 768c2ecf20Sopenharmony_ci struct ice_sched_node *tmp; 778c2ecf20Sopenharmony_ci 788c2ecf20Sopenharmony_ci tmp = ice_sched_find_node_by_teid(start_node->children[i], 798c2ecf20Sopenharmony_ci teid); 808c2ecf20Sopenharmony_ci if (tmp) 818c2ecf20Sopenharmony_ci return tmp; 828c2ecf20Sopenharmony_ci } 838c2ecf20Sopenharmony_ci 848c2ecf20Sopenharmony_ci return NULL; 858c2ecf20Sopenharmony_ci} 868c2ecf20Sopenharmony_ci 878c2ecf20Sopenharmony_ci/** 888c2ecf20Sopenharmony_ci * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd 898c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 908c2ecf20Sopenharmony_ci * @cmd_opc: cmd opcode 918c2ecf20Sopenharmony_ci * @elems_req: number of elements to request 928c2ecf20Sopenharmony_ci * @buf: pointer to buffer 938c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 948c2ecf20Sopenharmony_ci * @elems_resp: returns total number of elements response 958c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 968c2ecf20Sopenharmony_ci * 978c2ecf20Sopenharmony_ci * This function sends a scheduling elements cmd (cmd_opc) 988c2ecf20Sopenharmony_ci */ 998c2ecf20Sopenharmony_cistatic enum ice_status 1008c2ecf20Sopenharmony_ciice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, 1018c2ecf20Sopenharmony_ci u16 elems_req, void *buf, u16 buf_size, 1028c2ecf20Sopenharmony_ci u16 *elems_resp, struct ice_sq_cd *cd) 1038c2ecf20Sopenharmony_ci{ 1048c2ecf20Sopenharmony_ci struct ice_aqc_sched_elem_cmd *cmd; 1058c2ecf20Sopenharmony_ci struct ice_aq_desc desc; 1068c2ecf20Sopenharmony_ci enum ice_status status; 1078c2ecf20Sopenharmony_ci 1088c2ecf20Sopenharmony_ci cmd = &desc.params.sched_elem_cmd; 1098c2ecf20Sopenharmony_ci ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); 1108c2ecf20Sopenharmony_ci cmd->num_elem_req = cpu_to_le16(elems_req); 1118c2ecf20Sopenharmony_ci desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1128c2ecf20Sopenharmony_ci status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1138c2ecf20Sopenharmony_ci if (!status && elems_resp) 1148c2ecf20Sopenharmony_ci *elems_resp = le16_to_cpu(cmd->num_elem_resp); 1158c2ecf20Sopenharmony_ci 1168c2ecf20Sopenharmony_ci return status; 1178c2ecf20Sopenharmony_ci} 1188c2ecf20Sopenharmony_ci 1198c2ecf20Sopenharmony_ci/** 1208c2ecf20Sopenharmony_ci * ice_aq_query_sched_elems - query scheduler elements 1218c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 1228c2ecf20Sopenharmony_ci * @elems_req: number of elements to query 1238c2ecf20Sopenharmony_ci * @buf: pointer to buffer 1248c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 1258c2ecf20Sopenharmony_ci * @elems_ret: returns total number of elements returned 1268c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 1278c2ecf20Sopenharmony_ci * 1288c2ecf20Sopenharmony_ci * Query scheduling elements (0x0404) 1298c2ecf20Sopenharmony_ci */ 1308c2ecf20Sopenharmony_cienum ice_status 1318c2ecf20Sopenharmony_ciice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 1328c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 1338c2ecf20Sopenharmony_ci u16 *elems_ret, struct ice_sq_cd *cd) 1348c2ecf20Sopenharmony_ci{ 1358c2ecf20Sopenharmony_ci return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, 1368c2ecf20Sopenharmony_ci elems_req, (void *)buf, buf_size, 1378c2ecf20Sopenharmony_ci elems_ret, cd); 1388c2ecf20Sopenharmony_ci} 1398c2ecf20Sopenharmony_ci 1408c2ecf20Sopenharmony_ci/** 1418c2ecf20Sopenharmony_ci * ice_sched_add_node - Insert the Tx scheduler node in SW DB 1428c2ecf20Sopenharmony_ci * @pi: port information structure 1438c2ecf20Sopenharmony_ci * @layer: Scheduler layer of the node 1448c2ecf20Sopenharmony_ci * @info: Scheduler element information from firmware 1458c2ecf20Sopenharmony_ci * 1468c2ecf20Sopenharmony_ci * This function inserts a scheduler node to the SW DB. 1478c2ecf20Sopenharmony_ci */ 1488c2ecf20Sopenharmony_cienum ice_status 1498c2ecf20Sopenharmony_ciice_sched_add_node(struct ice_port_info *pi, u8 layer, 1508c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data *info) 1518c2ecf20Sopenharmony_ci{ 1528c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data elem; 1538c2ecf20Sopenharmony_ci struct ice_sched_node *parent; 1548c2ecf20Sopenharmony_ci struct ice_sched_node *node; 1558c2ecf20Sopenharmony_ci enum ice_status status; 1568c2ecf20Sopenharmony_ci struct ice_hw *hw; 1578c2ecf20Sopenharmony_ci 1588c2ecf20Sopenharmony_ci if (!pi) 1598c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 1608c2ecf20Sopenharmony_ci 1618c2ecf20Sopenharmony_ci hw = pi->hw; 1628c2ecf20Sopenharmony_ci 1638c2ecf20Sopenharmony_ci /* A valid parent node should be there */ 1648c2ecf20Sopenharmony_ci parent = ice_sched_find_node_by_teid(pi->root, 1658c2ecf20Sopenharmony_ci le32_to_cpu(info->parent_teid)); 1668c2ecf20Sopenharmony_ci if (!parent) { 1678c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, 1688c2ecf20Sopenharmony_ci "Parent Node not found for parent_teid=0x%x\n", 1698c2ecf20Sopenharmony_ci le32_to_cpu(info->parent_teid)); 1708c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 1718c2ecf20Sopenharmony_ci } 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_ci /* query the current node information from FW before adding it 1748c2ecf20Sopenharmony_ci * to the SW DB 1758c2ecf20Sopenharmony_ci */ 1768c2ecf20Sopenharmony_ci status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); 1778c2ecf20Sopenharmony_ci if (status) 1788c2ecf20Sopenharmony_ci return status; 1798c2ecf20Sopenharmony_ci 1808c2ecf20Sopenharmony_ci node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); 1818c2ecf20Sopenharmony_ci if (!node) 1828c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 1838c2ecf20Sopenharmony_ci if (hw->max_children[layer]) { 1848c2ecf20Sopenharmony_ci /* coverity[suspicious_sizeof] */ 1858c2ecf20Sopenharmony_ci node->children = devm_kcalloc(ice_hw_to_dev(hw), 1868c2ecf20Sopenharmony_ci hw->max_children[layer], 1878c2ecf20Sopenharmony_ci sizeof(*node), GFP_KERNEL); 1888c2ecf20Sopenharmony_ci if (!node->children) { 1898c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), node); 1908c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 1918c2ecf20Sopenharmony_ci } 1928c2ecf20Sopenharmony_ci } 1938c2ecf20Sopenharmony_ci 1948c2ecf20Sopenharmony_ci node->in_use = true; 1958c2ecf20Sopenharmony_ci node->parent = parent; 1968c2ecf20Sopenharmony_ci node->tx_sched_layer = layer; 1978c2ecf20Sopenharmony_ci parent->children[parent->num_children++] = node; 1988c2ecf20Sopenharmony_ci node->info = elem; 1998c2ecf20Sopenharmony_ci return 0; 2008c2ecf20Sopenharmony_ci} 2018c2ecf20Sopenharmony_ci 2028c2ecf20Sopenharmony_ci/** 2038c2ecf20Sopenharmony_ci * ice_aq_delete_sched_elems - delete scheduler elements 2048c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 2058c2ecf20Sopenharmony_ci * @grps_req: number of groups to delete 2068c2ecf20Sopenharmony_ci * @buf: pointer to buffer 2078c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 2088c2ecf20Sopenharmony_ci * @grps_del: returns total number of elements deleted 2098c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 2108c2ecf20Sopenharmony_ci * 2118c2ecf20Sopenharmony_ci * Delete scheduling elements (0x040F) 2128c2ecf20Sopenharmony_ci */ 2138c2ecf20Sopenharmony_cistatic enum ice_status 2148c2ecf20Sopenharmony_ciice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, 2158c2ecf20Sopenharmony_ci struct ice_aqc_delete_elem *buf, u16 buf_size, 2168c2ecf20Sopenharmony_ci u16 *grps_del, struct ice_sq_cd *cd) 2178c2ecf20Sopenharmony_ci{ 2188c2ecf20Sopenharmony_ci return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, 2198c2ecf20Sopenharmony_ci grps_req, (void *)buf, buf_size, 2208c2ecf20Sopenharmony_ci grps_del, cd); 2218c2ecf20Sopenharmony_ci} 2228c2ecf20Sopenharmony_ci 2238c2ecf20Sopenharmony_ci/** 2248c2ecf20Sopenharmony_ci * ice_sched_remove_elems - remove nodes from HW 2258c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 2268c2ecf20Sopenharmony_ci * @parent: pointer to the parent node 2278c2ecf20Sopenharmony_ci * @num_nodes: number of nodes 2288c2ecf20Sopenharmony_ci * @node_teids: array of node teids to be deleted 2298c2ecf20Sopenharmony_ci * 2308c2ecf20Sopenharmony_ci * This function remove nodes from HW 2318c2ecf20Sopenharmony_ci */ 2328c2ecf20Sopenharmony_cistatic enum ice_status 2338c2ecf20Sopenharmony_ciice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, 2348c2ecf20Sopenharmony_ci u16 num_nodes, u32 *node_teids) 2358c2ecf20Sopenharmony_ci{ 2368c2ecf20Sopenharmony_ci struct ice_aqc_delete_elem *buf; 2378c2ecf20Sopenharmony_ci u16 i, num_groups_removed = 0; 2388c2ecf20Sopenharmony_ci enum ice_status status; 2398c2ecf20Sopenharmony_ci u16 buf_size; 2408c2ecf20Sopenharmony_ci 2418c2ecf20Sopenharmony_ci buf_size = struct_size(buf, teid, num_nodes); 2428c2ecf20Sopenharmony_ci buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); 2438c2ecf20Sopenharmony_ci if (!buf) 2448c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 2458c2ecf20Sopenharmony_ci 2468c2ecf20Sopenharmony_ci buf->hdr.parent_teid = parent->info.node_teid; 2478c2ecf20Sopenharmony_ci buf->hdr.num_elems = cpu_to_le16(num_nodes); 2488c2ecf20Sopenharmony_ci for (i = 0; i < num_nodes; i++) 2498c2ecf20Sopenharmony_ci buf->teid[i] = cpu_to_le32(node_teids[i]); 2508c2ecf20Sopenharmony_ci 2518c2ecf20Sopenharmony_ci status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, 2528c2ecf20Sopenharmony_ci &num_groups_removed, NULL); 2538c2ecf20Sopenharmony_ci if (status || num_groups_removed != 1) 2548c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", 2558c2ecf20Sopenharmony_ci hw->adminq.sq_last_status); 2568c2ecf20Sopenharmony_ci 2578c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), buf); 2588c2ecf20Sopenharmony_ci return status; 2598c2ecf20Sopenharmony_ci} 2608c2ecf20Sopenharmony_ci 2618c2ecf20Sopenharmony_ci/** 2628c2ecf20Sopenharmony_ci * ice_sched_get_first_node - get the first node of the given layer 2638c2ecf20Sopenharmony_ci * @pi: port information structure 2648c2ecf20Sopenharmony_ci * @parent: pointer the base node of the subtree 2658c2ecf20Sopenharmony_ci * @layer: layer number 2668c2ecf20Sopenharmony_ci * 2678c2ecf20Sopenharmony_ci * This function retrieves the first node of the given layer from the subtree 2688c2ecf20Sopenharmony_ci */ 2698c2ecf20Sopenharmony_cistatic struct ice_sched_node * 2708c2ecf20Sopenharmony_ciice_sched_get_first_node(struct ice_port_info *pi, 2718c2ecf20Sopenharmony_ci struct ice_sched_node *parent, u8 layer) 2728c2ecf20Sopenharmony_ci{ 2738c2ecf20Sopenharmony_ci return pi->sib_head[parent->tc_num][layer]; 2748c2ecf20Sopenharmony_ci} 2758c2ecf20Sopenharmony_ci 2768c2ecf20Sopenharmony_ci/** 2778c2ecf20Sopenharmony_ci * ice_sched_get_tc_node - get pointer to TC node 2788c2ecf20Sopenharmony_ci * @pi: port information structure 2798c2ecf20Sopenharmony_ci * @tc: TC number 2808c2ecf20Sopenharmony_ci * 2818c2ecf20Sopenharmony_ci * This function returns the TC node pointer 2828c2ecf20Sopenharmony_ci */ 2838c2ecf20Sopenharmony_cistruct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) 2848c2ecf20Sopenharmony_ci{ 2858c2ecf20Sopenharmony_ci u8 i; 2868c2ecf20Sopenharmony_ci 2878c2ecf20Sopenharmony_ci if (!pi || !pi->root) 2888c2ecf20Sopenharmony_ci return NULL; 2898c2ecf20Sopenharmony_ci for (i = 0; i < pi->root->num_children; i++) 2908c2ecf20Sopenharmony_ci if (pi->root->children[i]->tc_num == tc) 2918c2ecf20Sopenharmony_ci return pi->root->children[i]; 2928c2ecf20Sopenharmony_ci return NULL; 2938c2ecf20Sopenharmony_ci} 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci/** 2968c2ecf20Sopenharmony_ci * ice_free_sched_node - Free a Tx scheduler node from SW DB 2978c2ecf20Sopenharmony_ci * @pi: port information structure 2988c2ecf20Sopenharmony_ci * @node: pointer to the ice_sched_node struct 2998c2ecf20Sopenharmony_ci * 3008c2ecf20Sopenharmony_ci * This function frees up a node from SW DB as well as from HW 3018c2ecf20Sopenharmony_ci * 3028c2ecf20Sopenharmony_ci * This function needs to be called with the port_info->sched_lock held 3038c2ecf20Sopenharmony_ci */ 3048c2ecf20Sopenharmony_civoid ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) 3058c2ecf20Sopenharmony_ci{ 3068c2ecf20Sopenharmony_ci struct ice_sched_node *parent; 3078c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 3088c2ecf20Sopenharmony_ci u8 i, j; 3098c2ecf20Sopenharmony_ci 3108c2ecf20Sopenharmony_ci /* Free the children before freeing up the parent node 3118c2ecf20Sopenharmony_ci * The parent array is updated below and that shifts the nodes 3128c2ecf20Sopenharmony_ci * in the array. So always pick the first child if num children > 0 3138c2ecf20Sopenharmony_ci */ 3148c2ecf20Sopenharmony_ci while (node->num_children) 3158c2ecf20Sopenharmony_ci ice_free_sched_node(pi, node->children[0]); 3168c2ecf20Sopenharmony_ci 3178c2ecf20Sopenharmony_ci /* Leaf, TC and root nodes can't be deleted by SW */ 3188c2ecf20Sopenharmony_ci if (node->tx_sched_layer >= hw->sw_entry_point_layer && 3198c2ecf20Sopenharmony_ci node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 3208c2ecf20Sopenharmony_ci node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && 3218c2ecf20Sopenharmony_ci node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { 3228c2ecf20Sopenharmony_ci u32 teid = le32_to_cpu(node->info.node_teid); 3238c2ecf20Sopenharmony_ci 3248c2ecf20Sopenharmony_ci ice_sched_remove_elems(hw, node->parent, 1, &teid); 3258c2ecf20Sopenharmony_ci } 3268c2ecf20Sopenharmony_ci parent = node->parent; 3278c2ecf20Sopenharmony_ci /* root has no parent */ 3288c2ecf20Sopenharmony_ci if (parent) { 3298c2ecf20Sopenharmony_ci struct ice_sched_node *p; 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci /* update the parent */ 3328c2ecf20Sopenharmony_ci for (i = 0; i < parent->num_children; i++) 3338c2ecf20Sopenharmony_ci if (parent->children[i] == node) { 3348c2ecf20Sopenharmony_ci for (j = i + 1; j < parent->num_children; j++) 3358c2ecf20Sopenharmony_ci parent->children[j - 1] = 3368c2ecf20Sopenharmony_ci parent->children[j]; 3378c2ecf20Sopenharmony_ci parent->num_children--; 3388c2ecf20Sopenharmony_ci break; 3398c2ecf20Sopenharmony_ci } 3408c2ecf20Sopenharmony_ci 3418c2ecf20Sopenharmony_ci p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); 3428c2ecf20Sopenharmony_ci while (p) { 3438c2ecf20Sopenharmony_ci if (p->sibling == node) { 3448c2ecf20Sopenharmony_ci p->sibling = node->sibling; 3458c2ecf20Sopenharmony_ci break; 3468c2ecf20Sopenharmony_ci } 3478c2ecf20Sopenharmony_ci p = p->sibling; 3488c2ecf20Sopenharmony_ci } 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_ci /* update the sibling head if head is getting removed */ 3518c2ecf20Sopenharmony_ci if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) 3528c2ecf20Sopenharmony_ci pi->sib_head[node->tc_num][node->tx_sched_layer] = 3538c2ecf20Sopenharmony_ci node->sibling; 3548c2ecf20Sopenharmony_ci } 3558c2ecf20Sopenharmony_ci 3568c2ecf20Sopenharmony_ci /* leaf nodes have no children */ 3578c2ecf20Sopenharmony_ci if (node->children) 3588c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), node->children); 3598c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), node); 3608c2ecf20Sopenharmony_ci} 3618c2ecf20Sopenharmony_ci 3628c2ecf20Sopenharmony_ci/** 3638c2ecf20Sopenharmony_ci * ice_aq_get_dflt_topo - gets default scheduler topology 3648c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 3658c2ecf20Sopenharmony_ci * @lport: logical port number 3668c2ecf20Sopenharmony_ci * @buf: pointer to buffer 3678c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 3688c2ecf20Sopenharmony_ci * @num_branches: returns total number of queue to port branches 3698c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 3708c2ecf20Sopenharmony_ci * 3718c2ecf20Sopenharmony_ci * Get default scheduler topology (0x400) 3728c2ecf20Sopenharmony_ci */ 3738c2ecf20Sopenharmony_cistatic enum ice_status 3748c2ecf20Sopenharmony_ciice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, 3758c2ecf20Sopenharmony_ci struct ice_aqc_get_topo_elem *buf, u16 buf_size, 3768c2ecf20Sopenharmony_ci u8 *num_branches, struct ice_sq_cd *cd) 3778c2ecf20Sopenharmony_ci{ 3788c2ecf20Sopenharmony_ci struct ice_aqc_get_topo *cmd; 3798c2ecf20Sopenharmony_ci struct ice_aq_desc desc; 3808c2ecf20Sopenharmony_ci enum ice_status status; 3818c2ecf20Sopenharmony_ci 3828c2ecf20Sopenharmony_ci cmd = &desc.params.get_topo; 3838c2ecf20Sopenharmony_ci ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); 3848c2ecf20Sopenharmony_ci cmd->port_num = lport; 3858c2ecf20Sopenharmony_ci status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 3868c2ecf20Sopenharmony_ci if (!status && num_branches) 3878c2ecf20Sopenharmony_ci *num_branches = cmd->num_branches; 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_ci return status; 3908c2ecf20Sopenharmony_ci} 3918c2ecf20Sopenharmony_ci 3928c2ecf20Sopenharmony_ci/** 3938c2ecf20Sopenharmony_ci * ice_aq_add_sched_elems - adds scheduling element 3948c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 3958c2ecf20Sopenharmony_ci * @grps_req: the number of groups that are requested to be added 3968c2ecf20Sopenharmony_ci * @buf: pointer to buffer 3978c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 3988c2ecf20Sopenharmony_ci * @grps_added: returns total number of groups added 3998c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 4008c2ecf20Sopenharmony_ci * 4018c2ecf20Sopenharmony_ci * Add scheduling elements (0x0401) 4028c2ecf20Sopenharmony_ci */ 4038c2ecf20Sopenharmony_cistatic enum ice_status 4048c2ecf20Sopenharmony_ciice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, 4058c2ecf20Sopenharmony_ci struct ice_aqc_add_elem *buf, u16 buf_size, 4068c2ecf20Sopenharmony_ci u16 *grps_added, struct ice_sq_cd *cd) 4078c2ecf20Sopenharmony_ci{ 4088c2ecf20Sopenharmony_ci return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, 4098c2ecf20Sopenharmony_ci grps_req, (void *)buf, buf_size, 4108c2ecf20Sopenharmony_ci grps_added, cd); 4118c2ecf20Sopenharmony_ci} 4128c2ecf20Sopenharmony_ci 4138c2ecf20Sopenharmony_ci/** 4148c2ecf20Sopenharmony_ci * ice_aq_cfg_sched_elems - configures scheduler elements 4158c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 4168c2ecf20Sopenharmony_ci * @elems_req: number of elements to configure 4178c2ecf20Sopenharmony_ci * @buf: pointer to buffer 4188c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 4198c2ecf20Sopenharmony_ci * @elems_cfgd: returns total number of elements configured 4208c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 4218c2ecf20Sopenharmony_ci * 4228c2ecf20Sopenharmony_ci * Configure scheduling elements (0x0403) 4238c2ecf20Sopenharmony_ci */ 4248c2ecf20Sopenharmony_cistatic enum ice_status 4258c2ecf20Sopenharmony_ciice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, 4268c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data *buf, u16 buf_size, 4278c2ecf20Sopenharmony_ci u16 *elems_cfgd, struct ice_sq_cd *cd) 4288c2ecf20Sopenharmony_ci{ 4298c2ecf20Sopenharmony_ci return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, 4308c2ecf20Sopenharmony_ci elems_req, (void *)buf, buf_size, 4318c2ecf20Sopenharmony_ci elems_cfgd, cd); 4328c2ecf20Sopenharmony_ci} 4338c2ecf20Sopenharmony_ci 4348c2ecf20Sopenharmony_ci/** 4358c2ecf20Sopenharmony_ci * ice_aq_suspend_sched_elems - suspend scheduler elements 4368c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 4378c2ecf20Sopenharmony_ci * @elems_req: number of elements to suspend 4388c2ecf20Sopenharmony_ci * @buf: pointer to buffer 4398c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 4408c2ecf20Sopenharmony_ci * @elems_ret: returns total number of elements suspended 4418c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 4428c2ecf20Sopenharmony_ci * 4438c2ecf20Sopenharmony_ci * Suspend scheduling elements (0x0409) 4448c2ecf20Sopenharmony_ci */ 4458c2ecf20Sopenharmony_cistatic enum ice_status 4468c2ecf20Sopenharmony_ciice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 4478c2ecf20Sopenharmony_ci u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 4488c2ecf20Sopenharmony_ci{ 4498c2ecf20Sopenharmony_ci return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, 4508c2ecf20Sopenharmony_ci elems_req, (void *)buf, buf_size, 4518c2ecf20Sopenharmony_ci elems_ret, cd); 4528c2ecf20Sopenharmony_ci} 4538c2ecf20Sopenharmony_ci 4548c2ecf20Sopenharmony_ci/** 4558c2ecf20Sopenharmony_ci * ice_aq_resume_sched_elems - resume scheduler elements 4568c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 4578c2ecf20Sopenharmony_ci * @elems_req: number of elements to resume 4588c2ecf20Sopenharmony_ci * @buf: pointer to buffer 4598c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 4608c2ecf20Sopenharmony_ci * @elems_ret: returns total number of elements resumed 4618c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 4628c2ecf20Sopenharmony_ci * 4638c2ecf20Sopenharmony_ci * resume scheduling elements (0x040A) 4648c2ecf20Sopenharmony_ci */ 4658c2ecf20Sopenharmony_cistatic enum ice_status 4668c2ecf20Sopenharmony_ciice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, 4678c2ecf20Sopenharmony_ci u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) 4688c2ecf20Sopenharmony_ci{ 4698c2ecf20Sopenharmony_ci return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, 4708c2ecf20Sopenharmony_ci elems_req, (void *)buf, buf_size, 4718c2ecf20Sopenharmony_ci elems_ret, cd); 4728c2ecf20Sopenharmony_ci} 4738c2ecf20Sopenharmony_ci 4748c2ecf20Sopenharmony_ci/** 4758c2ecf20Sopenharmony_ci * ice_aq_query_sched_res - query scheduler resource 4768c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 4778c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 4788c2ecf20Sopenharmony_ci * @buf: pointer to buffer 4798c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 4808c2ecf20Sopenharmony_ci * 4818c2ecf20Sopenharmony_ci * Query scheduler resource allocation (0x0412) 4828c2ecf20Sopenharmony_ci */ 4838c2ecf20Sopenharmony_cistatic enum ice_status 4848c2ecf20Sopenharmony_ciice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, 4858c2ecf20Sopenharmony_ci struct ice_aqc_query_txsched_res_resp *buf, 4868c2ecf20Sopenharmony_ci struct ice_sq_cd *cd) 4878c2ecf20Sopenharmony_ci{ 4888c2ecf20Sopenharmony_ci struct ice_aq_desc desc; 4898c2ecf20Sopenharmony_ci 4908c2ecf20Sopenharmony_ci ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); 4918c2ecf20Sopenharmony_ci return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4928c2ecf20Sopenharmony_ci} 4938c2ecf20Sopenharmony_ci 4948c2ecf20Sopenharmony_ci/** 4958c2ecf20Sopenharmony_ci * ice_sched_suspend_resume_elems - suspend or resume HW nodes 4968c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 4978c2ecf20Sopenharmony_ci * @num_nodes: number of nodes 4988c2ecf20Sopenharmony_ci * @node_teids: array of node teids to be suspended or resumed 4998c2ecf20Sopenharmony_ci * @suspend: true means suspend / false means resume 5008c2ecf20Sopenharmony_ci * 5018c2ecf20Sopenharmony_ci * This function suspends or resumes HW nodes 5028c2ecf20Sopenharmony_ci */ 5038c2ecf20Sopenharmony_cistatic enum ice_status 5048c2ecf20Sopenharmony_ciice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, 5058c2ecf20Sopenharmony_ci bool suspend) 5068c2ecf20Sopenharmony_ci{ 5078c2ecf20Sopenharmony_ci u16 i, buf_size, num_elem_ret = 0; 5088c2ecf20Sopenharmony_ci enum ice_status status; 5098c2ecf20Sopenharmony_ci __le32 *buf; 5108c2ecf20Sopenharmony_ci 5118c2ecf20Sopenharmony_ci buf_size = sizeof(*buf) * num_nodes; 5128c2ecf20Sopenharmony_ci buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); 5138c2ecf20Sopenharmony_ci if (!buf) 5148c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 5158c2ecf20Sopenharmony_ci 5168c2ecf20Sopenharmony_ci for (i = 0; i < num_nodes; i++) 5178c2ecf20Sopenharmony_ci buf[i] = cpu_to_le32(node_teids[i]); 5188c2ecf20Sopenharmony_ci 5198c2ecf20Sopenharmony_ci if (suspend) 5208c2ecf20Sopenharmony_ci status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, 5218c2ecf20Sopenharmony_ci buf_size, &num_elem_ret, 5228c2ecf20Sopenharmony_ci NULL); 5238c2ecf20Sopenharmony_ci else 5248c2ecf20Sopenharmony_ci status = ice_aq_resume_sched_elems(hw, num_nodes, buf, 5258c2ecf20Sopenharmony_ci buf_size, &num_elem_ret, 5268c2ecf20Sopenharmony_ci NULL); 5278c2ecf20Sopenharmony_ci if (status || num_elem_ret != num_nodes) 5288c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); 5298c2ecf20Sopenharmony_ci 5308c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), buf); 5318c2ecf20Sopenharmony_ci return status; 5328c2ecf20Sopenharmony_ci} 5338c2ecf20Sopenharmony_ci 5348c2ecf20Sopenharmony_ci/** 5358c2ecf20Sopenharmony_ci * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC 5368c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 5378c2ecf20Sopenharmony_ci * @vsi_handle: VSI handle 5388c2ecf20Sopenharmony_ci * @tc: TC number 5398c2ecf20Sopenharmony_ci * @new_numqs: number of queues 5408c2ecf20Sopenharmony_ci */ 5418c2ecf20Sopenharmony_cistatic enum ice_status 5428c2ecf20Sopenharmony_ciice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) 5438c2ecf20Sopenharmony_ci{ 5448c2ecf20Sopenharmony_ci struct ice_vsi_ctx *vsi_ctx; 5458c2ecf20Sopenharmony_ci struct ice_q_ctx *q_ctx; 5468c2ecf20Sopenharmony_ci 5478c2ecf20Sopenharmony_ci vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 5488c2ecf20Sopenharmony_ci if (!vsi_ctx) 5498c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 5508c2ecf20Sopenharmony_ci /* allocate LAN queue contexts */ 5518c2ecf20Sopenharmony_ci if (!vsi_ctx->lan_q_ctx[tc]) { 5528c2ecf20Sopenharmony_ci vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), 5538c2ecf20Sopenharmony_ci new_numqs, 5548c2ecf20Sopenharmony_ci sizeof(*q_ctx), 5558c2ecf20Sopenharmony_ci GFP_KERNEL); 5568c2ecf20Sopenharmony_ci if (!vsi_ctx->lan_q_ctx[tc]) 5578c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 5588c2ecf20Sopenharmony_ci vsi_ctx->num_lan_q_entries[tc] = new_numqs; 5598c2ecf20Sopenharmony_ci return 0; 5608c2ecf20Sopenharmony_ci } 5618c2ecf20Sopenharmony_ci /* num queues are increased, update the queue contexts */ 5628c2ecf20Sopenharmony_ci if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { 5638c2ecf20Sopenharmony_ci u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; 5648c2ecf20Sopenharmony_ci 5658c2ecf20Sopenharmony_ci q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, 5668c2ecf20Sopenharmony_ci sizeof(*q_ctx), GFP_KERNEL); 5678c2ecf20Sopenharmony_ci if (!q_ctx) 5688c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 5698c2ecf20Sopenharmony_ci memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], 5708c2ecf20Sopenharmony_ci prev_num * sizeof(*q_ctx)); 5718c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); 5728c2ecf20Sopenharmony_ci vsi_ctx->lan_q_ctx[tc] = q_ctx; 5738c2ecf20Sopenharmony_ci vsi_ctx->num_lan_q_entries[tc] = new_numqs; 5748c2ecf20Sopenharmony_ci } 5758c2ecf20Sopenharmony_ci return 0; 5768c2ecf20Sopenharmony_ci} 5778c2ecf20Sopenharmony_ci 5788c2ecf20Sopenharmony_ci/** 5798c2ecf20Sopenharmony_ci * ice_aq_rl_profile - performs a rate limiting task 5808c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 5818c2ecf20Sopenharmony_ci * @opcode: opcode for add, query, or remove profile(s) 5828c2ecf20Sopenharmony_ci * @num_profiles: the number of profiles 5838c2ecf20Sopenharmony_ci * @buf: pointer to buffer 5848c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 5858c2ecf20Sopenharmony_ci * @num_processed: number of processed add or remove profile(s) to return 5868c2ecf20Sopenharmony_ci * @cd: pointer to command details structure 5878c2ecf20Sopenharmony_ci * 5888c2ecf20Sopenharmony_ci * RL profile function to add, query, or remove profile(s) 5898c2ecf20Sopenharmony_ci */ 5908c2ecf20Sopenharmony_cistatic enum ice_status 5918c2ecf20Sopenharmony_ciice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, 5928c2ecf20Sopenharmony_ci u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, 5938c2ecf20Sopenharmony_ci u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) 5948c2ecf20Sopenharmony_ci{ 5958c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile *cmd; 5968c2ecf20Sopenharmony_ci struct ice_aq_desc desc; 5978c2ecf20Sopenharmony_ci enum ice_status status; 5988c2ecf20Sopenharmony_ci 5998c2ecf20Sopenharmony_ci cmd = &desc.params.rl_profile; 6008c2ecf20Sopenharmony_ci 6018c2ecf20Sopenharmony_ci ice_fill_dflt_direct_cmd_desc(&desc, opcode); 6028c2ecf20Sopenharmony_ci desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 6038c2ecf20Sopenharmony_ci cmd->num_profiles = cpu_to_le16(num_profiles); 6048c2ecf20Sopenharmony_ci status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6058c2ecf20Sopenharmony_ci if (!status && num_processed) 6068c2ecf20Sopenharmony_ci *num_processed = le16_to_cpu(cmd->num_processed); 6078c2ecf20Sopenharmony_ci return status; 6088c2ecf20Sopenharmony_ci} 6098c2ecf20Sopenharmony_ci 6108c2ecf20Sopenharmony_ci/** 6118c2ecf20Sopenharmony_ci * ice_aq_add_rl_profile - adds rate limiting profile(s) 6128c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 6138c2ecf20Sopenharmony_ci * @num_profiles: the number of profile(s) to be add 6148c2ecf20Sopenharmony_ci * @buf: pointer to buffer 6158c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 6168c2ecf20Sopenharmony_ci * @num_profiles_added: total number of profiles added to return 6178c2ecf20Sopenharmony_ci * @cd: pointer to command details structure 6188c2ecf20Sopenharmony_ci * 6198c2ecf20Sopenharmony_ci * Add RL profile (0x0410) 6208c2ecf20Sopenharmony_ci */ 6218c2ecf20Sopenharmony_cistatic enum ice_status 6228c2ecf20Sopenharmony_ciice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, 6238c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 6248c2ecf20Sopenharmony_ci u16 *num_profiles_added, struct ice_sq_cd *cd) 6258c2ecf20Sopenharmony_ci{ 6268c2ecf20Sopenharmony_ci return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, 6278c2ecf20Sopenharmony_ci buf, buf_size, num_profiles_added, cd); 6288c2ecf20Sopenharmony_ci} 6298c2ecf20Sopenharmony_ci 6308c2ecf20Sopenharmony_ci/** 6318c2ecf20Sopenharmony_ci * ice_aq_remove_rl_profile - removes RL profile(s) 6328c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 6338c2ecf20Sopenharmony_ci * @num_profiles: the number of profile(s) to remove 6348c2ecf20Sopenharmony_ci * @buf: pointer to buffer 6358c2ecf20Sopenharmony_ci * @buf_size: buffer size in bytes 6368c2ecf20Sopenharmony_ci * @num_profiles_removed: total number of profiles removed to return 6378c2ecf20Sopenharmony_ci * @cd: pointer to command details structure or NULL 6388c2ecf20Sopenharmony_ci * 6398c2ecf20Sopenharmony_ci * Remove RL profile (0x0415) 6408c2ecf20Sopenharmony_ci */ 6418c2ecf20Sopenharmony_cistatic enum ice_status 6428c2ecf20Sopenharmony_ciice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, 6438c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_elem *buf, u16 buf_size, 6448c2ecf20Sopenharmony_ci u16 *num_profiles_removed, struct ice_sq_cd *cd) 6458c2ecf20Sopenharmony_ci{ 6468c2ecf20Sopenharmony_ci return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, 6478c2ecf20Sopenharmony_ci num_profiles, buf, buf_size, 6488c2ecf20Sopenharmony_ci num_profiles_removed, cd); 6498c2ecf20Sopenharmony_ci} 6508c2ecf20Sopenharmony_ci 6518c2ecf20Sopenharmony_ci/** 6528c2ecf20Sopenharmony_ci * ice_sched_del_rl_profile - remove RL profile 6538c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 6548c2ecf20Sopenharmony_ci * @rl_info: rate limit profile information 6558c2ecf20Sopenharmony_ci * 6568c2ecf20Sopenharmony_ci * If the profile ID is not referenced anymore, it removes profile ID with 6578c2ecf20Sopenharmony_ci * its associated parameters from HW DB,and locally. The caller needs to 6588c2ecf20Sopenharmony_ci * hold scheduler lock. 6598c2ecf20Sopenharmony_ci */ 6608c2ecf20Sopenharmony_cistatic enum ice_status 6618c2ecf20Sopenharmony_ciice_sched_del_rl_profile(struct ice_hw *hw, 6628c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_info) 6638c2ecf20Sopenharmony_ci{ 6648c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_elem *buf; 6658c2ecf20Sopenharmony_ci u16 num_profiles_removed; 6668c2ecf20Sopenharmony_ci enum ice_status status; 6678c2ecf20Sopenharmony_ci u16 num_profiles = 1; 6688c2ecf20Sopenharmony_ci 6698c2ecf20Sopenharmony_ci if (rl_info->prof_id_ref != 0) 6708c2ecf20Sopenharmony_ci return ICE_ERR_IN_USE; 6718c2ecf20Sopenharmony_ci 6728c2ecf20Sopenharmony_ci /* Safe to remove profile ID */ 6738c2ecf20Sopenharmony_ci buf = &rl_info->profile; 6748c2ecf20Sopenharmony_ci status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), 6758c2ecf20Sopenharmony_ci &num_profiles_removed, NULL); 6768c2ecf20Sopenharmony_ci if (status || num_profiles_removed != num_profiles) 6778c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 6788c2ecf20Sopenharmony_ci 6798c2ecf20Sopenharmony_ci /* Delete stale entry now */ 6808c2ecf20Sopenharmony_ci list_del(&rl_info->list_entry); 6818c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), rl_info); 6828c2ecf20Sopenharmony_ci return status; 6838c2ecf20Sopenharmony_ci} 6848c2ecf20Sopenharmony_ci 6858c2ecf20Sopenharmony_ci/** 6868c2ecf20Sopenharmony_ci * ice_sched_clear_rl_prof - clears RL prof entries 6878c2ecf20Sopenharmony_ci * @pi: port information structure 6888c2ecf20Sopenharmony_ci * 6898c2ecf20Sopenharmony_ci * This function removes all RL profile from HW as well as from SW DB. 6908c2ecf20Sopenharmony_ci */ 6918c2ecf20Sopenharmony_cistatic void ice_sched_clear_rl_prof(struct ice_port_info *pi) 6928c2ecf20Sopenharmony_ci{ 6938c2ecf20Sopenharmony_ci u16 ln; 6948c2ecf20Sopenharmony_ci 6958c2ecf20Sopenharmony_ci for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { 6968c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_prof_elem; 6978c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_prof_tmp; 6988c2ecf20Sopenharmony_ci 6998c2ecf20Sopenharmony_ci list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, 7008c2ecf20Sopenharmony_ci &pi->rl_prof_list[ln], list_entry) { 7018c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 7028c2ecf20Sopenharmony_ci enum ice_status status; 7038c2ecf20Sopenharmony_ci 7048c2ecf20Sopenharmony_ci rl_prof_elem->prof_id_ref = 0; 7058c2ecf20Sopenharmony_ci status = ice_sched_del_rl_profile(hw, rl_prof_elem); 7068c2ecf20Sopenharmony_ci if (status) { 7078c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, 7088c2ecf20Sopenharmony_ci "Remove rl profile failed\n"); 7098c2ecf20Sopenharmony_ci /* On error, free mem required */ 7108c2ecf20Sopenharmony_ci list_del(&rl_prof_elem->list_entry); 7118c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); 7128c2ecf20Sopenharmony_ci } 7138c2ecf20Sopenharmony_ci } 7148c2ecf20Sopenharmony_ci } 7158c2ecf20Sopenharmony_ci} 7168c2ecf20Sopenharmony_ci 7178c2ecf20Sopenharmony_ci/** 7188c2ecf20Sopenharmony_ci * ice_sched_clear_agg - clears the aggregator related information 7198c2ecf20Sopenharmony_ci * @hw: pointer to the hardware structure 7208c2ecf20Sopenharmony_ci * 7218c2ecf20Sopenharmony_ci * This function removes aggregator list and free up aggregator related memory 7228c2ecf20Sopenharmony_ci * previously allocated. 7238c2ecf20Sopenharmony_ci */ 7248c2ecf20Sopenharmony_civoid ice_sched_clear_agg(struct ice_hw *hw) 7258c2ecf20Sopenharmony_ci{ 7268c2ecf20Sopenharmony_ci struct ice_sched_agg_info *agg_info; 7278c2ecf20Sopenharmony_ci struct ice_sched_agg_info *atmp; 7288c2ecf20Sopenharmony_ci 7298c2ecf20Sopenharmony_ci list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) { 7308c2ecf20Sopenharmony_ci struct ice_sched_agg_vsi_info *agg_vsi_info; 7318c2ecf20Sopenharmony_ci struct ice_sched_agg_vsi_info *vtmp; 7328c2ecf20Sopenharmony_ci 7338c2ecf20Sopenharmony_ci list_for_each_entry_safe(agg_vsi_info, vtmp, 7348c2ecf20Sopenharmony_ci &agg_info->agg_vsi_list, list_entry) { 7358c2ecf20Sopenharmony_ci list_del(&agg_vsi_info->list_entry); 7368c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), agg_vsi_info); 7378c2ecf20Sopenharmony_ci } 7388c2ecf20Sopenharmony_ci list_del(&agg_info->list_entry); 7398c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), agg_info); 7408c2ecf20Sopenharmony_ci } 7418c2ecf20Sopenharmony_ci} 7428c2ecf20Sopenharmony_ci 7438c2ecf20Sopenharmony_ci/** 7448c2ecf20Sopenharmony_ci * ice_sched_clear_tx_topo - clears the scheduler tree nodes 7458c2ecf20Sopenharmony_ci * @pi: port information structure 7468c2ecf20Sopenharmony_ci * 7478c2ecf20Sopenharmony_ci * This function removes all the nodes from HW as well as from SW DB. 7488c2ecf20Sopenharmony_ci */ 7498c2ecf20Sopenharmony_cistatic void ice_sched_clear_tx_topo(struct ice_port_info *pi) 7508c2ecf20Sopenharmony_ci{ 7518c2ecf20Sopenharmony_ci if (!pi) 7528c2ecf20Sopenharmony_ci return; 7538c2ecf20Sopenharmony_ci /* remove RL profiles related lists */ 7548c2ecf20Sopenharmony_ci ice_sched_clear_rl_prof(pi); 7558c2ecf20Sopenharmony_ci if (pi->root) { 7568c2ecf20Sopenharmony_ci ice_free_sched_node(pi, pi->root); 7578c2ecf20Sopenharmony_ci pi->root = NULL; 7588c2ecf20Sopenharmony_ci } 7598c2ecf20Sopenharmony_ci} 7608c2ecf20Sopenharmony_ci 7618c2ecf20Sopenharmony_ci/** 7628c2ecf20Sopenharmony_ci * ice_sched_clear_port - clear the scheduler elements from SW DB for a port 7638c2ecf20Sopenharmony_ci * @pi: port information structure 7648c2ecf20Sopenharmony_ci * 7658c2ecf20Sopenharmony_ci * Cleanup scheduling elements from SW DB 7668c2ecf20Sopenharmony_ci */ 7678c2ecf20Sopenharmony_civoid ice_sched_clear_port(struct ice_port_info *pi) 7688c2ecf20Sopenharmony_ci{ 7698c2ecf20Sopenharmony_ci if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 7708c2ecf20Sopenharmony_ci return; 7718c2ecf20Sopenharmony_ci 7728c2ecf20Sopenharmony_ci pi->port_state = ICE_SCHED_PORT_STATE_INIT; 7738c2ecf20Sopenharmony_ci mutex_lock(&pi->sched_lock); 7748c2ecf20Sopenharmony_ci ice_sched_clear_tx_topo(pi); 7758c2ecf20Sopenharmony_ci mutex_unlock(&pi->sched_lock); 7768c2ecf20Sopenharmony_ci mutex_destroy(&pi->sched_lock); 7778c2ecf20Sopenharmony_ci} 7788c2ecf20Sopenharmony_ci 7798c2ecf20Sopenharmony_ci/** 7808c2ecf20Sopenharmony_ci * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports 7818c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 7828c2ecf20Sopenharmony_ci * 7838c2ecf20Sopenharmony_ci * Cleanup scheduling elements from SW DB for all the ports 7848c2ecf20Sopenharmony_ci */ 7858c2ecf20Sopenharmony_civoid ice_sched_cleanup_all(struct ice_hw *hw) 7868c2ecf20Sopenharmony_ci{ 7878c2ecf20Sopenharmony_ci if (!hw) 7888c2ecf20Sopenharmony_ci return; 7898c2ecf20Sopenharmony_ci 7908c2ecf20Sopenharmony_ci if (hw->layer_info) { 7918c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), hw->layer_info); 7928c2ecf20Sopenharmony_ci hw->layer_info = NULL; 7938c2ecf20Sopenharmony_ci } 7948c2ecf20Sopenharmony_ci 7958c2ecf20Sopenharmony_ci ice_sched_clear_port(hw->port_info); 7968c2ecf20Sopenharmony_ci 7978c2ecf20Sopenharmony_ci hw->num_tx_sched_layers = 0; 7988c2ecf20Sopenharmony_ci hw->num_tx_sched_phys_layers = 0; 7998c2ecf20Sopenharmony_ci hw->flattened_layers = 0; 8008c2ecf20Sopenharmony_ci hw->max_cgds = 0; 8018c2ecf20Sopenharmony_ci} 8028c2ecf20Sopenharmony_ci 8038c2ecf20Sopenharmony_ci/** 8048c2ecf20Sopenharmony_ci * ice_sched_add_elems - add nodes to HW and SW DB 8058c2ecf20Sopenharmony_ci * @pi: port information structure 8068c2ecf20Sopenharmony_ci * @tc_node: pointer to the branch node 8078c2ecf20Sopenharmony_ci * @parent: pointer to the parent node 8088c2ecf20Sopenharmony_ci * @layer: layer number to add nodes 8098c2ecf20Sopenharmony_ci * @num_nodes: number of nodes 8108c2ecf20Sopenharmony_ci * @num_nodes_added: pointer to num nodes added 8118c2ecf20Sopenharmony_ci * @first_node_teid: if new nodes are added then return the TEID of first node 8128c2ecf20Sopenharmony_ci * 8138c2ecf20Sopenharmony_ci * This function add nodes to HW as well as to SW DB for a given layer 8148c2ecf20Sopenharmony_ci */ 8158c2ecf20Sopenharmony_cistatic enum ice_status 8168c2ecf20Sopenharmony_ciice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, 8178c2ecf20Sopenharmony_ci struct ice_sched_node *parent, u8 layer, u16 num_nodes, 8188c2ecf20Sopenharmony_ci u16 *num_nodes_added, u32 *first_node_teid) 8198c2ecf20Sopenharmony_ci{ 8208c2ecf20Sopenharmony_ci struct ice_sched_node *prev, *new_node; 8218c2ecf20Sopenharmony_ci struct ice_aqc_add_elem *buf; 8228c2ecf20Sopenharmony_ci u16 i, num_groups_added = 0; 8238c2ecf20Sopenharmony_ci enum ice_status status = 0; 8248c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 8258c2ecf20Sopenharmony_ci size_t buf_size; 8268c2ecf20Sopenharmony_ci u32 teid; 8278c2ecf20Sopenharmony_ci 8288c2ecf20Sopenharmony_ci buf_size = struct_size(buf, generic, num_nodes); 8298c2ecf20Sopenharmony_ci buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); 8308c2ecf20Sopenharmony_ci if (!buf) 8318c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_ci buf->hdr.parent_teid = parent->info.node_teid; 8348c2ecf20Sopenharmony_ci buf->hdr.num_elems = cpu_to_le16(num_nodes); 8358c2ecf20Sopenharmony_ci for (i = 0; i < num_nodes; i++) { 8368c2ecf20Sopenharmony_ci buf->generic[i].parent_teid = parent->info.node_teid; 8378c2ecf20Sopenharmony_ci buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; 8388c2ecf20Sopenharmony_ci buf->generic[i].data.valid_sections = 8398c2ecf20Sopenharmony_ci ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 8408c2ecf20Sopenharmony_ci ICE_AQC_ELEM_VALID_EIR; 8418c2ecf20Sopenharmony_ci buf->generic[i].data.generic = 0; 8428c2ecf20Sopenharmony_ci buf->generic[i].data.cir_bw.bw_profile_idx = 8438c2ecf20Sopenharmony_ci cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 8448c2ecf20Sopenharmony_ci buf->generic[i].data.cir_bw.bw_alloc = 8458c2ecf20Sopenharmony_ci cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 8468c2ecf20Sopenharmony_ci buf->generic[i].data.eir_bw.bw_profile_idx = 8478c2ecf20Sopenharmony_ci cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 8488c2ecf20Sopenharmony_ci buf->generic[i].data.eir_bw.bw_alloc = 8498c2ecf20Sopenharmony_ci cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 8508c2ecf20Sopenharmony_ci } 8518c2ecf20Sopenharmony_ci 8528c2ecf20Sopenharmony_ci status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, 8538c2ecf20Sopenharmony_ci &num_groups_added, NULL); 8548c2ecf20Sopenharmony_ci if (status || num_groups_added != 1) { 8558c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", 8568c2ecf20Sopenharmony_ci hw->adminq.sq_last_status); 8578c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), buf); 8588c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 8598c2ecf20Sopenharmony_ci } 8608c2ecf20Sopenharmony_ci 8618c2ecf20Sopenharmony_ci *num_nodes_added = num_nodes; 8628c2ecf20Sopenharmony_ci /* add nodes to the SW DB */ 8638c2ecf20Sopenharmony_ci for (i = 0; i < num_nodes; i++) { 8648c2ecf20Sopenharmony_ci status = ice_sched_add_node(pi, layer, &buf->generic[i]); 8658c2ecf20Sopenharmony_ci if (status) { 8668c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, 8678c2ecf20Sopenharmony_ci "add nodes in SW DB failed status =%d\n", 8688c2ecf20Sopenharmony_ci status); 8698c2ecf20Sopenharmony_ci break; 8708c2ecf20Sopenharmony_ci } 8718c2ecf20Sopenharmony_ci 8728c2ecf20Sopenharmony_ci teid = le32_to_cpu(buf->generic[i].node_teid); 8738c2ecf20Sopenharmony_ci new_node = ice_sched_find_node_by_teid(parent, teid); 8748c2ecf20Sopenharmony_ci if (!new_node) { 8758c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, 8768c2ecf20Sopenharmony_ci "Node is missing for teid =%d\n", teid); 8778c2ecf20Sopenharmony_ci break; 8788c2ecf20Sopenharmony_ci } 8798c2ecf20Sopenharmony_ci 8808c2ecf20Sopenharmony_ci new_node->sibling = NULL; 8818c2ecf20Sopenharmony_ci new_node->tc_num = tc_node->tc_num; 8828c2ecf20Sopenharmony_ci 8838c2ecf20Sopenharmony_ci /* add it to previous node sibling pointer */ 8848c2ecf20Sopenharmony_ci /* Note: siblings are not linked across branches */ 8858c2ecf20Sopenharmony_ci prev = ice_sched_get_first_node(pi, tc_node, layer); 8868c2ecf20Sopenharmony_ci if (prev && prev != new_node) { 8878c2ecf20Sopenharmony_ci while (prev->sibling) 8888c2ecf20Sopenharmony_ci prev = prev->sibling; 8898c2ecf20Sopenharmony_ci prev->sibling = new_node; 8908c2ecf20Sopenharmony_ci } 8918c2ecf20Sopenharmony_ci 8928c2ecf20Sopenharmony_ci /* initialize the sibling head */ 8938c2ecf20Sopenharmony_ci if (!pi->sib_head[tc_node->tc_num][layer]) 8948c2ecf20Sopenharmony_ci pi->sib_head[tc_node->tc_num][layer] = new_node; 8958c2ecf20Sopenharmony_ci 8968c2ecf20Sopenharmony_ci if (i == 0) 8978c2ecf20Sopenharmony_ci *first_node_teid = teid; 8988c2ecf20Sopenharmony_ci } 8998c2ecf20Sopenharmony_ci 9008c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), buf); 9018c2ecf20Sopenharmony_ci return status; 9028c2ecf20Sopenharmony_ci} 9038c2ecf20Sopenharmony_ci 9048c2ecf20Sopenharmony_ci/** 9058c2ecf20Sopenharmony_ci * ice_sched_add_nodes_to_layer - Add nodes to a given layer 9068c2ecf20Sopenharmony_ci * @pi: port information structure 9078c2ecf20Sopenharmony_ci * @tc_node: pointer to TC node 9088c2ecf20Sopenharmony_ci * @parent: pointer to parent node 9098c2ecf20Sopenharmony_ci * @layer: layer number to add nodes 9108c2ecf20Sopenharmony_ci * @num_nodes: number of nodes to be added 9118c2ecf20Sopenharmony_ci * @first_node_teid: pointer to the first node TEID 9128c2ecf20Sopenharmony_ci * @num_nodes_added: pointer to number of nodes added 9138c2ecf20Sopenharmony_ci * 9148c2ecf20Sopenharmony_ci * This function add nodes to a given layer. 9158c2ecf20Sopenharmony_ci */ 9168c2ecf20Sopenharmony_cistatic enum ice_status 9178c2ecf20Sopenharmony_ciice_sched_add_nodes_to_layer(struct ice_port_info *pi, 9188c2ecf20Sopenharmony_ci struct ice_sched_node *tc_node, 9198c2ecf20Sopenharmony_ci struct ice_sched_node *parent, u8 layer, 9208c2ecf20Sopenharmony_ci u16 num_nodes, u32 *first_node_teid, 9218c2ecf20Sopenharmony_ci u16 *num_nodes_added) 9228c2ecf20Sopenharmony_ci{ 9238c2ecf20Sopenharmony_ci u32 *first_teid_ptr = first_node_teid; 9248c2ecf20Sopenharmony_ci u16 new_num_nodes, max_child_nodes; 9258c2ecf20Sopenharmony_ci enum ice_status status = 0; 9268c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 9278c2ecf20Sopenharmony_ci u16 num_added = 0; 9288c2ecf20Sopenharmony_ci u32 temp; 9298c2ecf20Sopenharmony_ci 9308c2ecf20Sopenharmony_ci *num_nodes_added = 0; 9318c2ecf20Sopenharmony_ci 9328c2ecf20Sopenharmony_ci if (!num_nodes) 9338c2ecf20Sopenharmony_ci return status; 9348c2ecf20Sopenharmony_ci 9358c2ecf20Sopenharmony_ci if (!parent || layer < hw->sw_entry_point_layer) 9368c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 9378c2ecf20Sopenharmony_ci 9388c2ecf20Sopenharmony_ci /* max children per node per layer */ 9398c2ecf20Sopenharmony_ci max_child_nodes = hw->max_children[parent->tx_sched_layer]; 9408c2ecf20Sopenharmony_ci 9418c2ecf20Sopenharmony_ci /* current number of children + required nodes exceed max children ? */ 9428c2ecf20Sopenharmony_ci if ((parent->num_children + num_nodes) > max_child_nodes) { 9438c2ecf20Sopenharmony_ci /* Fail if the parent is a TC node */ 9448c2ecf20Sopenharmony_ci if (parent == tc_node) 9458c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 9468c2ecf20Sopenharmony_ci 9478c2ecf20Sopenharmony_ci /* utilize all the spaces if the parent is not full */ 9488c2ecf20Sopenharmony_ci if (parent->num_children < max_child_nodes) { 9498c2ecf20Sopenharmony_ci new_num_nodes = max_child_nodes - parent->num_children; 9508c2ecf20Sopenharmony_ci /* this recursion is intentional, and wouldn't 9518c2ecf20Sopenharmony_ci * go more than 2 calls 9528c2ecf20Sopenharmony_ci */ 9538c2ecf20Sopenharmony_ci status = ice_sched_add_nodes_to_layer(pi, tc_node, 9548c2ecf20Sopenharmony_ci parent, layer, 9558c2ecf20Sopenharmony_ci new_num_nodes, 9568c2ecf20Sopenharmony_ci first_node_teid, 9578c2ecf20Sopenharmony_ci &num_added); 9588c2ecf20Sopenharmony_ci if (status) 9598c2ecf20Sopenharmony_ci return status; 9608c2ecf20Sopenharmony_ci 9618c2ecf20Sopenharmony_ci *num_nodes_added += num_added; 9628c2ecf20Sopenharmony_ci } 9638c2ecf20Sopenharmony_ci /* Don't modify the first node TEID memory if the first node was 9648c2ecf20Sopenharmony_ci * added already in the above call. Instead send some temp 9658c2ecf20Sopenharmony_ci * memory for all other recursive calls. 9668c2ecf20Sopenharmony_ci */ 9678c2ecf20Sopenharmony_ci if (num_added) 9688c2ecf20Sopenharmony_ci first_teid_ptr = &temp; 9698c2ecf20Sopenharmony_ci 9708c2ecf20Sopenharmony_ci new_num_nodes = num_nodes - num_added; 9718c2ecf20Sopenharmony_ci 9728c2ecf20Sopenharmony_ci /* This parent is full, try the next sibling */ 9738c2ecf20Sopenharmony_ci parent = parent->sibling; 9748c2ecf20Sopenharmony_ci 9758c2ecf20Sopenharmony_ci /* this recursion is intentional, for 1024 queues 9768c2ecf20Sopenharmony_ci * per VSI, it goes max of 16 iterations. 9778c2ecf20Sopenharmony_ci * 1024 / 8 = 128 layer 8 nodes 9788c2ecf20Sopenharmony_ci * 128 /8 = 16 (add 8 nodes per iteration) 9798c2ecf20Sopenharmony_ci */ 9808c2ecf20Sopenharmony_ci status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 9818c2ecf20Sopenharmony_ci layer, new_num_nodes, 9828c2ecf20Sopenharmony_ci first_teid_ptr, 9838c2ecf20Sopenharmony_ci &num_added); 9848c2ecf20Sopenharmony_ci *num_nodes_added += num_added; 9858c2ecf20Sopenharmony_ci return status; 9868c2ecf20Sopenharmony_ci } 9878c2ecf20Sopenharmony_ci 9888c2ecf20Sopenharmony_ci status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, 9898c2ecf20Sopenharmony_ci num_nodes_added, first_node_teid); 9908c2ecf20Sopenharmony_ci return status; 9918c2ecf20Sopenharmony_ci} 9928c2ecf20Sopenharmony_ci 9938c2ecf20Sopenharmony_ci/** 9948c2ecf20Sopenharmony_ci * ice_sched_get_qgrp_layer - get the current queue group layer number 9958c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 9968c2ecf20Sopenharmony_ci * 9978c2ecf20Sopenharmony_ci * This function returns the current queue group layer number 9988c2ecf20Sopenharmony_ci */ 9998c2ecf20Sopenharmony_cistatic u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) 10008c2ecf20Sopenharmony_ci{ 10018c2ecf20Sopenharmony_ci /* It's always total layers - 1, the array is 0 relative so -2 */ 10028c2ecf20Sopenharmony_ci return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; 10038c2ecf20Sopenharmony_ci} 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci/** 10068c2ecf20Sopenharmony_ci * ice_sched_get_vsi_layer - get the current VSI layer number 10078c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 10088c2ecf20Sopenharmony_ci * 10098c2ecf20Sopenharmony_ci * This function returns the current VSI layer number 10108c2ecf20Sopenharmony_ci */ 10118c2ecf20Sopenharmony_cistatic u8 ice_sched_get_vsi_layer(struct ice_hw *hw) 10128c2ecf20Sopenharmony_ci{ 10138c2ecf20Sopenharmony_ci /* Num Layers VSI layer 10148c2ecf20Sopenharmony_ci * 9 6 10158c2ecf20Sopenharmony_ci * 7 4 10168c2ecf20Sopenharmony_ci * 5 or less sw_entry_point_layer 10178c2ecf20Sopenharmony_ci */ 10188c2ecf20Sopenharmony_ci /* calculate the VSI layer based on number of layers. */ 10198c2ecf20Sopenharmony_ci if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { 10208c2ecf20Sopenharmony_ci u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; 10218c2ecf20Sopenharmony_ci 10228c2ecf20Sopenharmony_ci if (layer > hw->sw_entry_point_layer) 10238c2ecf20Sopenharmony_ci return layer; 10248c2ecf20Sopenharmony_ci } 10258c2ecf20Sopenharmony_ci return hw->sw_entry_point_layer; 10268c2ecf20Sopenharmony_ci} 10278c2ecf20Sopenharmony_ci 10288c2ecf20Sopenharmony_ci/** 10298c2ecf20Sopenharmony_ci * ice_rm_dflt_leaf_node - remove the default leaf node in the tree 10308c2ecf20Sopenharmony_ci * @pi: port information structure 10318c2ecf20Sopenharmony_ci * 10328c2ecf20Sopenharmony_ci * This function removes the leaf node that was created by the FW 10338c2ecf20Sopenharmony_ci * during initialization 10348c2ecf20Sopenharmony_ci */ 10358c2ecf20Sopenharmony_cistatic void ice_rm_dflt_leaf_node(struct ice_port_info *pi) 10368c2ecf20Sopenharmony_ci{ 10378c2ecf20Sopenharmony_ci struct ice_sched_node *node; 10388c2ecf20Sopenharmony_ci 10398c2ecf20Sopenharmony_ci node = pi->root; 10408c2ecf20Sopenharmony_ci while (node) { 10418c2ecf20Sopenharmony_ci if (!node->num_children) 10428c2ecf20Sopenharmony_ci break; 10438c2ecf20Sopenharmony_ci node = node->children[0]; 10448c2ecf20Sopenharmony_ci } 10458c2ecf20Sopenharmony_ci if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { 10468c2ecf20Sopenharmony_ci u32 teid = le32_to_cpu(node->info.node_teid); 10478c2ecf20Sopenharmony_ci enum ice_status status; 10488c2ecf20Sopenharmony_ci 10498c2ecf20Sopenharmony_ci /* remove the default leaf node */ 10508c2ecf20Sopenharmony_ci status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); 10518c2ecf20Sopenharmony_ci if (!status) 10528c2ecf20Sopenharmony_ci ice_free_sched_node(pi, node); 10538c2ecf20Sopenharmony_ci } 10548c2ecf20Sopenharmony_ci} 10558c2ecf20Sopenharmony_ci 10568c2ecf20Sopenharmony_ci/** 10578c2ecf20Sopenharmony_ci * ice_sched_rm_dflt_nodes - free the default nodes in the tree 10588c2ecf20Sopenharmony_ci * @pi: port information structure 10598c2ecf20Sopenharmony_ci * 10608c2ecf20Sopenharmony_ci * This function frees all the nodes except root and TC that were created by 10618c2ecf20Sopenharmony_ci * the FW during initialization 10628c2ecf20Sopenharmony_ci */ 10638c2ecf20Sopenharmony_cistatic void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) 10648c2ecf20Sopenharmony_ci{ 10658c2ecf20Sopenharmony_ci struct ice_sched_node *node; 10668c2ecf20Sopenharmony_ci 10678c2ecf20Sopenharmony_ci ice_rm_dflt_leaf_node(pi); 10688c2ecf20Sopenharmony_ci 10698c2ecf20Sopenharmony_ci /* remove the default nodes except TC and root nodes */ 10708c2ecf20Sopenharmony_ci node = pi->root; 10718c2ecf20Sopenharmony_ci while (node) { 10728c2ecf20Sopenharmony_ci if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && 10738c2ecf20Sopenharmony_ci node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && 10748c2ecf20Sopenharmony_ci node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { 10758c2ecf20Sopenharmony_ci ice_free_sched_node(pi, node); 10768c2ecf20Sopenharmony_ci break; 10778c2ecf20Sopenharmony_ci } 10788c2ecf20Sopenharmony_ci 10798c2ecf20Sopenharmony_ci if (!node->num_children) 10808c2ecf20Sopenharmony_ci break; 10818c2ecf20Sopenharmony_ci node = node->children[0]; 10828c2ecf20Sopenharmony_ci } 10838c2ecf20Sopenharmony_ci} 10848c2ecf20Sopenharmony_ci 10858c2ecf20Sopenharmony_ci/** 10868c2ecf20Sopenharmony_ci * ice_sched_init_port - Initialize scheduler by querying information from FW 10878c2ecf20Sopenharmony_ci * @pi: port info structure for the tree to cleanup 10888c2ecf20Sopenharmony_ci * 10898c2ecf20Sopenharmony_ci * This function is the initial call to find the total number of Tx scheduler 10908c2ecf20Sopenharmony_ci * resources, default topology created by firmware and storing the information 10918c2ecf20Sopenharmony_ci * in SW DB. 10928c2ecf20Sopenharmony_ci */ 10938c2ecf20Sopenharmony_cienum ice_status ice_sched_init_port(struct ice_port_info *pi) 10948c2ecf20Sopenharmony_ci{ 10958c2ecf20Sopenharmony_ci struct ice_aqc_get_topo_elem *buf; 10968c2ecf20Sopenharmony_ci enum ice_status status; 10978c2ecf20Sopenharmony_ci struct ice_hw *hw; 10988c2ecf20Sopenharmony_ci u8 num_branches; 10998c2ecf20Sopenharmony_ci u16 num_elems; 11008c2ecf20Sopenharmony_ci u8 i, j; 11018c2ecf20Sopenharmony_ci 11028c2ecf20Sopenharmony_ci if (!pi) 11038c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 11048c2ecf20Sopenharmony_ci hw = pi->hw; 11058c2ecf20Sopenharmony_ci 11068c2ecf20Sopenharmony_ci /* Query the Default Topology from FW */ 11078c2ecf20Sopenharmony_ci buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 11088c2ecf20Sopenharmony_ci if (!buf) 11098c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 11108c2ecf20Sopenharmony_ci 11118c2ecf20Sopenharmony_ci /* Query default scheduling tree topology */ 11128c2ecf20Sopenharmony_ci status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, 11138c2ecf20Sopenharmony_ci &num_branches, NULL); 11148c2ecf20Sopenharmony_ci if (status) 11158c2ecf20Sopenharmony_ci goto err_init_port; 11168c2ecf20Sopenharmony_ci 11178c2ecf20Sopenharmony_ci /* num_branches should be between 1-8 */ 11188c2ecf20Sopenharmony_ci if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { 11198c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", 11208c2ecf20Sopenharmony_ci num_branches); 11218c2ecf20Sopenharmony_ci status = ICE_ERR_PARAM; 11228c2ecf20Sopenharmony_ci goto err_init_port; 11238c2ecf20Sopenharmony_ci } 11248c2ecf20Sopenharmony_ci 11258c2ecf20Sopenharmony_ci /* get the number of elements on the default/first branch */ 11268c2ecf20Sopenharmony_ci num_elems = le16_to_cpu(buf[0].hdr.num_elems); 11278c2ecf20Sopenharmony_ci 11288c2ecf20Sopenharmony_ci /* num_elems should always be between 1-9 */ 11298c2ecf20Sopenharmony_ci if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { 11308c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", 11318c2ecf20Sopenharmony_ci num_elems); 11328c2ecf20Sopenharmony_ci status = ICE_ERR_PARAM; 11338c2ecf20Sopenharmony_ci goto err_init_port; 11348c2ecf20Sopenharmony_ci } 11358c2ecf20Sopenharmony_ci 11368c2ecf20Sopenharmony_ci /* If the last node is a leaf node then the index of the queue group 11378c2ecf20Sopenharmony_ci * layer is two less than the number of elements. 11388c2ecf20Sopenharmony_ci */ 11398c2ecf20Sopenharmony_ci if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == 11408c2ecf20Sopenharmony_ci ICE_AQC_ELEM_TYPE_LEAF) 11418c2ecf20Sopenharmony_ci pi->last_node_teid = 11428c2ecf20Sopenharmony_ci le32_to_cpu(buf[0].generic[num_elems - 2].node_teid); 11438c2ecf20Sopenharmony_ci else 11448c2ecf20Sopenharmony_ci pi->last_node_teid = 11458c2ecf20Sopenharmony_ci le32_to_cpu(buf[0].generic[num_elems - 1].node_teid); 11468c2ecf20Sopenharmony_ci 11478c2ecf20Sopenharmony_ci /* Insert the Tx Sched root node */ 11488c2ecf20Sopenharmony_ci status = ice_sched_add_root_node(pi, &buf[0].generic[0]); 11498c2ecf20Sopenharmony_ci if (status) 11508c2ecf20Sopenharmony_ci goto err_init_port; 11518c2ecf20Sopenharmony_ci 11528c2ecf20Sopenharmony_ci /* Parse the default tree and cache the information */ 11538c2ecf20Sopenharmony_ci for (i = 0; i < num_branches; i++) { 11548c2ecf20Sopenharmony_ci num_elems = le16_to_cpu(buf[i].hdr.num_elems); 11558c2ecf20Sopenharmony_ci 11568c2ecf20Sopenharmony_ci /* Skip root element as already inserted */ 11578c2ecf20Sopenharmony_ci for (j = 1; j < num_elems; j++) { 11588c2ecf20Sopenharmony_ci /* update the sw entry point */ 11598c2ecf20Sopenharmony_ci if (buf[0].generic[j].data.elem_type == 11608c2ecf20Sopenharmony_ci ICE_AQC_ELEM_TYPE_ENTRY_POINT) 11618c2ecf20Sopenharmony_ci hw->sw_entry_point_layer = j; 11628c2ecf20Sopenharmony_ci 11638c2ecf20Sopenharmony_ci status = ice_sched_add_node(pi, j, &buf[i].generic[j]); 11648c2ecf20Sopenharmony_ci if (status) 11658c2ecf20Sopenharmony_ci goto err_init_port; 11668c2ecf20Sopenharmony_ci } 11678c2ecf20Sopenharmony_ci } 11688c2ecf20Sopenharmony_ci 11698c2ecf20Sopenharmony_ci /* Remove the default nodes. */ 11708c2ecf20Sopenharmony_ci if (pi->root) 11718c2ecf20Sopenharmony_ci ice_sched_rm_dflt_nodes(pi); 11728c2ecf20Sopenharmony_ci 11738c2ecf20Sopenharmony_ci /* initialize the port for handling the scheduler tree */ 11748c2ecf20Sopenharmony_ci pi->port_state = ICE_SCHED_PORT_STATE_READY; 11758c2ecf20Sopenharmony_ci mutex_init(&pi->sched_lock); 11768c2ecf20Sopenharmony_ci for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) 11778c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&pi->rl_prof_list[i]); 11788c2ecf20Sopenharmony_ci 11798c2ecf20Sopenharmony_cierr_init_port: 11808c2ecf20Sopenharmony_ci if (status && pi->root) { 11818c2ecf20Sopenharmony_ci ice_free_sched_node(pi, pi->root); 11828c2ecf20Sopenharmony_ci pi->root = NULL; 11838c2ecf20Sopenharmony_ci } 11848c2ecf20Sopenharmony_ci 11858c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), buf); 11868c2ecf20Sopenharmony_ci return status; 11878c2ecf20Sopenharmony_ci} 11888c2ecf20Sopenharmony_ci 11898c2ecf20Sopenharmony_ci/** 11908c2ecf20Sopenharmony_ci * ice_sched_query_res_alloc - query the FW for num of logical sched layers 11918c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 11928c2ecf20Sopenharmony_ci * 11938c2ecf20Sopenharmony_ci * query FW for allocated scheduler resources and store in HW struct 11948c2ecf20Sopenharmony_ci */ 11958c2ecf20Sopenharmony_cienum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) 11968c2ecf20Sopenharmony_ci{ 11978c2ecf20Sopenharmony_ci struct ice_aqc_query_txsched_res_resp *buf; 11988c2ecf20Sopenharmony_ci enum ice_status status = 0; 11998c2ecf20Sopenharmony_ci __le16 max_sibl; 12008c2ecf20Sopenharmony_ci u16 i; 12018c2ecf20Sopenharmony_ci 12028c2ecf20Sopenharmony_ci if (hw->layer_info) 12038c2ecf20Sopenharmony_ci return status; 12048c2ecf20Sopenharmony_ci 12058c2ecf20Sopenharmony_ci buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL); 12068c2ecf20Sopenharmony_ci if (!buf) 12078c2ecf20Sopenharmony_ci return ICE_ERR_NO_MEMORY; 12088c2ecf20Sopenharmony_ci 12098c2ecf20Sopenharmony_ci status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); 12108c2ecf20Sopenharmony_ci if (status) 12118c2ecf20Sopenharmony_ci goto sched_query_out; 12128c2ecf20Sopenharmony_ci 12138c2ecf20Sopenharmony_ci hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels); 12148c2ecf20Sopenharmony_ci hw->num_tx_sched_phys_layers = 12158c2ecf20Sopenharmony_ci le16_to_cpu(buf->sched_props.phys_levels); 12168c2ecf20Sopenharmony_ci hw->flattened_layers = buf->sched_props.flattening_bitmap; 12178c2ecf20Sopenharmony_ci hw->max_cgds = buf->sched_props.max_pf_cgds; 12188c2ecf20Sopenharmony_ci 12198c2ecf20Sopenharmony_ci /* max sibling group size of current layer refers to the max children 12208c2ecf20Sopenharmony_ci * of the below layer node. 12218c2ecf20Sopenharmony_ci * layer 1 node max children will be layer 2 max sibling group size 12228c2ecf20Sopenharmony_ci * layer 2 node max children will be layer 3 max sibling group size 12238c2ecf20Sopenharmony_ci * and so on. This array will be populated from root (index 0) to 12248c2ecf20Sopenharmony_ci * qgroup layer 7. Leaf node has no children. 12258c2ecf20Sopenharmony_ci */ 12268c2ecf20Sopenharmony_ci for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { 12278c2ecf20Sopenharmony_ci max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; 12288c2ecf20Sopenharmony_ci hw->max_children[i] = le16_to_cpu(max_sibl); 12298c2ecf20Sopenharmony_ci } 12308c2ecf20Sopenharmony_ci 12318c2ecf20Sopenharmony_ci hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, 12328c2ecf20Sopenharmony_ci (hw->num_tx_sched_layers * 12338c2ecf20Sopenharmony_ci sizeof(*hw->layer_info)), 12348c2ecf20Sopenharmony_ci GFP_KERNEL); 12358c2ecf20Sopenharmony_ci if (!hw->layer_info) { 12368c2ecf20Sopenharmony_ci status = ICE_ERR_NO_MEMORY; 12378c2ecf20Sopenharmony_ci goto sched_query_out; 12388c2ecf20Sopenharmony_ci } 12398c2ecf20Sopenharmony_ci 12408c2ecf20Sopenharmony_cisched_query_out: 12418c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), buf); 12428c2ecf20Sopenharmony_ci return status; 12438c2ecf20Sopenharmony_ci} 12448c2ecf20Sopenharmony_ci 12458c2ecf20Sopenharmony_ci/** 12468c2ecf20Sopenharmony_ci * ice_sched_find_node_in_subtree - Find node in part of base node subtree 12478c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 12488c2ecf20Sopenharmony_ci * @base: pointer to the base node 12498c2ecf20Sopenharmony_ci * @node: pointer to the node to search 12508c2ecf20Sopenharmony_ci * 12518c2ecf20Sopenharmony_ci * This function checks whether a given node is part of the base node 12528c2ecf20Sopenharmony_ci * subtree or not 12538c2ecf20Sopenharmony_ci */ 12548c2ecf20Sopenharmony_cistatic bool 12558c2ecf20Sopenharmony_ciice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, 12568c2ecf20Sopenharmony_ci struct ice_sched_node *node) 12578c2ecf20Sopenharmony_ci{ 12588c2ecf20Sopenharmony_ci u8 i; 12598c2ecf20Sopenharmony_ci 12608c2ecf20Sopenharmony_ci for (i = 0; i < base->num_children; i++) { 12618c2ecf20Sopenharmony_ci struct ice_sched_node *child = base->children[i]; 12628c2ecf20Sopenharmony_ci 12638c2ecf20Sopenharmony_ci if (node == child) 12648c2ecf20Sopenharmony_ci return true; 12658c2ecf20Sopenharmony_ci 12668c2ecf20Sopenharmony_ci if (child->tx_sched_layer > node->tx_sched_layer) 12678c2ecf20Sopenharmony_ci return false; 12688c2ecf20Sopenharmony_ci 12698c2ecf20Sopenharmony_ci /* this recursion is intentional, and wouldn't 12708c2ecf20Sopenharmony_ci * go more than 8 calls 12718c2ecf20Sopenharmony_ci */ 12728c2ecf20Sopenharmony_ci if (ice_sched_find_node_in_subtree(hw, child, node)) 12738c2ecf20Sopenharmony_ci return true; 12748c2ecf20Sopenharmony_ci } 12758c2ecf20Sopenharmony_ci return false; 12768c2ecf20Sopenharmony_ci} 12778c2ecf20Sopenharmony_ci 12788c2ecf20Sopenharmony_ci/** 12798c2ecf20Sopenharmony_ci * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node 12808c2ecf20Sopenharmony_ci * @pi: port information structure 12818c2ecf20Sopenharmony_ci * @vsi_node: software VSI handle 12828c2ecf20Sopenharmony_ci * @qgrp_node: first queue group node identified for scanning 12838c2ecf20Sopenharmony_ci * @owner: LAN or RDMA 12848c2ecf20Sopenharmony_ci * 12858c2ecf20Sopenharmony_ci * This function retrieves a free LAN or RDMA queue group node by scanning 12868c2ecf20Sopenharmony_ci * qgrp_node and its siblings for the queue group with the fewest number 12878c2ecf20Sopenharmony_ci * of queues currently assigned. 12888c2ecf20Sopenharmony_ci */ 12898c2ecf20Sopenharmony_cistatic struct ice_sched_node * 12908c2ecf20Sopenharmony_ciice_sched_get_free_qgrp(struct ice_port_info *pi, 12918c2ecf20Sopenharmony_ci struct ice_sched_node *vsi_node, 12928c2ecf20Sopenharmony_ci struct ice_sched_node *qgrp_node, u8 owner) 12938c2ecf20Sopenharmony_ci{ 12948c2ecf20Sopenharmony_ci struct ice_sched_node *min_qgrp; 12958c2ecf20Sopenharmony_ci u8 min_children; 12968c2ecf20Sopenharmony_ci 12978c2ecf20Sopenharmony_ci if (!qgrp_node) 12988c2ecf20Sopenharmony_ci return qgrp_node; 12998c2ecf20Sopenharmony_ci min_children = qgrp_node->num_children; 13008c2ecf20Sopenharmony_ci if (!min_children) 13018c2ecf20Sopenharmony_ci return qgrp_node; 13028c2ecf20Sopenharmony_ci min_qgrp = qgrp_node; 13038c2ecf20Sopenharmony_ci /* scan all queue groups until find a node which has less than the 13048c2ecf20Sopenharmony_ci * minimum number of children. This way all queue group nodes get 13058c2ecf20Sopenharmony_ci * equal number of shares and active. The bandwidth will be equally 13068c2ecf20Sopenharmony_ci * distributed across all queues. 13078c2ecf20Sopenharmony_ci */ 13088c2ecf20Sopenharmony_ci while (qgrp_node) { 13098c2ecf20Sopenharmony_ci /* make sure the qgroup node is part of the VSI subtree */ 13108c2ecf20Sopenharmony_ci if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 13118c2ecf20Sopenharmony_ci if (qgrp_node->num_children < min_children && 13128c2ecf20Sopenharmony_ci qgrp_node->owner == owner) { 13138c2ecf20Sopenharmony_ci /* replace the new min queue group node */ 13148c2ecf20Sopenharmony_ci min_qgrp = qgrp_node; 13158c2ecf20Sopenharmony_ci min_children = min_qgrp->num_children; 13168c2ecf20Sopenharmony_ci /* break if it has no children, */ 13178c2ecf20Sopenharmony_ci if (!min_children) 13188c2ecf20Sopenharmony_ci break; 13198c2ecf20Sopenharmony_ci } 13208c2ecf20Sopenharmony_ci qgrp_node = qgrp_node->sibling; 13218c2ecf20Sopenharmony_ci } 13228c2ecf20Sopenharmony_ci return min_qgrp; 13238c2ecf20Sopenharmony_ci} 13248c2ecf20Sopenharmony_ci 13258c2ecf20Sopenharmony_ci/** 13268c2ecf20Sopenharmony_ci * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node 13278c2ecf20Sopenharmony_ci * @pi: port information structure 13288c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 13298c2ecf20Sopenharmony_ci * @tc: branch number 13308c2ecf20Sopenharmony_ci * @owner: LAN or RDMA 13318c2ecf20Sopenharmony_ci * 13328c2ecf20Sopenharmony_ci * This function retrieves a free LAN or RDMA queue group node 13338c2ecf20Sopenharmony_ci */ 13348c2ecf20Sopenharmony_cistruct ice_sched_node * 13358c2ecf20Sopenharmony_ciice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 13368c2ecf20Sopenharmony_ci u8 owner) 13378c2ecf20Sopenharmony_ci{ 13388c2ecf20Sopenharmony_ci struct ice_sched_node *vsi_node, *qgrp_node; 13398c2ecf20Sopenharmony_ci struct ice_vsi_ctx *vsi_ctx; 13408c2ecf20Sopenharmony_ci u16 max_children; 13418c2ecf20Sopenharmony_ci u8 qgrp_layer; 13428c2ecf20Sopenharmony_ci 13438c2ecf20Sopenharmony_ci qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); 13448c2ecf20Sopenharmony_ci max_children = pi->hw->max_children[qgrp_layer]; 13458c2ecf20Sopenharmony_ci 13468c2ecf20Sopenharmony_ci vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 13478c2ecf20Sopenharmony_ci if (!vsi_ctx) 13488c2ecf20Sopenharmony_ci return NULL; 13498c2ecf20Sopenharmony_ci vsi_node = vsi_ctx->sched.vsi_node[tc]; 13508c2ecf20Sopenharmony_ci /* validate invalid VSI ID */ 13518c2ecf20Sopenharmony_ci if (!vsi_node) 13528c2ecf20Sopenharmony_ci return NULL; 13538c2ecf20Sopenharmony_ci 13548c2ecf20Sopenharmony_ci /* get the first queue group node from VSI sub-tree */ 13558c2ecf20Sopenharmony_ci qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); 13568c2ecf20Sopenharmony_ci while (qgrp_node) { 13578c2ecf20Sopenharmony_ci /* make sure the qgroup node is part of the VSI subtree */ 13588c2ecf20Sopenharmony_ci if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) 13598c2ecf20Sopenharmony_ci if (qgrp_node->num_children < max_children && 13608c2ecf20Sopenharmony_ci qgrp_node->owner == owner) 13618c2ecf20Sopenharmony_ci break; 13628c2ecf20Sopenharmony_ci qgrp_node = qgrp_node->sibling; 13638c2ecf20Sopenharmony_ci } 13648c2ecf20Sopenharmony_ci 13658c2ecf20Sopenharmony_ci /* Select the best queue group */ 13668c2ecf20Sopenharmony_ci return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); 13678c2ecf20Sopenharmony_ci} 13688c2ecf20Sopenharmony_ci 13698c2ecf20Sopenharmony_ci/** 13708c2ecf20Sopenharmony_ci * ice_sched_get_vsi_node - Get a VSI node based on VSI ID 13718c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 13728c2ecf20Sopenharmony_ci * @tc_node: pointer to the TC node 13738c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 13748c2ecf20Sopenharmony_ci * 13758c2ecf20Sopenharmony_ci * This function retrieves a VSI node for a given VSI ID from a given 13768c2ecf20Sopenharmony_ci * TC branch 13778c2ecf20Sopenharmony_ci */ 13788c2ecf20Sopenharmony_cistatic struct ice_sched_node * 13798c2ecf20Sopenharmony_ciice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, 13808c2ecf20Sopenharmony_ci u16 vsi_handle) 13818c2ecf20Sopenharmony_ci{ 13828c2ecf20Sopenharmony_ci struct ice_sched_node *node; 13838c2ecf20Sopenharmony_ci u8 vsi_layer; 13848c2ecf20Sopenharmony_ci 13858c2ecf20Sopenharmony_ci vsi_layer = ice_sched_get_vsi_layer(hw); 13868c2ecf20Sopenharmony_ci node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer); 13878c2ecf20Sopenharmony_ci 13888c2ecf20Sopenharmony_ci /* Check whether it already exists */ 13898c2ecf20Sopenharmony_ci while (node) { 13908c2ecf20Sopenharmony_ci if (node->vsi_handle == vsi_handle) 13918c2ecf20Sopenharmony_ci return node; 13928c2ecf20Sopenharmony_ci node = node->sibling; 13938c2ecf20Sopenharmony_ci } 13948c2ecf20Sopenharmony_ci 13958c2ecf20Sopenharmony_ci return node; 13968c2ecf20Sopenharmony_ci} 13978c2ecf20Sopenharmony_ci 13988c2ecf20Sopenharmony_ci/** 13998c2ecf20Sopenharmony_ci * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes 14008c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 14018c2ecf20Sopenharmony_ci * @num_qs: number of queues 14028c2ecf20Sopenharmony_ci * @num_nodes: num nodes array 14038c2ecf20Sopenharmony_ci * 14048c2ecf20Sopenharmony_ci * This function calculates the number of VSI child nodes based on the 14058c2ecf20Sopenharmony_ci * number of queues. 14068c2ecf20Sopenharmony_ci */ 14078c2ecf20Sopenharmony_cistatic void 14088c2ecf20Sopenharmony_ciice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) 14098c2ecf20Sopenharmony_ci{ 14108c2ecf20Sopenharmony_ci u16 num = num_qs; 14118c2ecf20Sopenharmony_ci u8 i, qgl, vsil; 14128c2ecf20Sopenharmony_ci 14138c2ecf20Sopenharmony_ci qgl = ice_sched_get_qgrp_layer(hw); 14148c2ecf20Sopenharmony_ci vsil = ice_sched_get_vsi_layer(hw); 14158c2ecf20Sopenharmony_ci 14168c2ecf20Sopenharmony_ci /* calculate num nodes from queue group to VSI layer */ 14178c2ecf20Sopenharmony_ci for (i = qgl; i > vsil; i--) { 14188c2ecf20Sopenharmony_ci /* round to the next integer if there is a remainder */ 14198c2ecf20Sopenharmony_ci num = DIV_ROUND_UP(num, hw->max_children[i]); 14208c2ecf20Sopenharmony_ci 14218c2ecf20Sopenharmony_ci /* need at least one node */ 14228c2ecf20Sopenharmony_ci num_nodes[i] = num ? num : 1; 14238c2ecf20Sopenharmony_ci } 14248c2ecf20Sopenharmony_ci} 14258c2ecf20Sopenharmony_ci 14268c2ecf20Sopenharmony_ci/** 14278c2ecf20Sopenharmony_ci * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree 14288c2ecf20Sopenharmony_ci * @pi: port information structure 14298c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 14308c2ecf20Sopenharmony_ci * @tc_node: pointer to the TC node 14318c2ecf20Sopenharmony_ci * @num_nodes: pointer to the num nodes that needs to be added per layer 14328c2ecf20Sopenharmony_ci * @owner: node owner (LAN or RDMA) 14338c2ecf20Sopenharmony_ci * 14348c2ecf20Sopenharmony_ci * This function adds the VSI child nodes to tree. It gets called for 14358c2ecf20Sopenharmony_ci * LAN and RDMA separately. 14368c2ecf20Sopenharmony_ci */ 14378c2ecf20Sopenharmony_cistatic enum ice_status 14388c2ecf20Sopenharmony_ciice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 14398c2ecf20Sopenharmony_ci struct ice_sched_node *tc_node, u16 *num_nodes, 14408c2ecf20Sopenharmony_ci u8 owner) 14418c2ecf20Sopenharmony_ci{ 14428c2ecf20Sopenharmony_ci struct ice_sched_node *parent, *node; 14438c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 14448c2ecf20Sopenharmony_ci enum ice_status status; 14458c2ecf20Sopenharmony_ci u32 first_node_teid; 14468c2ecf20Sopenharmony_ci u16 num_added = 0; 14478c2ecf20Sopenharmony_ci u8 i, qgl, vsil; 14488c2ecf20Sopenharmony_ci 14498c2ecf20Sopenharmony_ci qgl = ice_sched_get_qgrp_layer(hw); 14508c2ecf20Sopenharmony_ci vsil = ice_sched_get_vsi_layer(hw); 14518c2ecf20Sopenharmony_ci parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); 14528c2ecf20Sopenharmony_ci for (i = vsil + 1; i <= qgl; i++) { 14538c2ecf20Sopenharmony_ci if (!parent) 14548c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 14558c2ecf20Sopenharmony_ci 14568c2ecf20Sopenharmony_ci status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, 14578c2ecf20Sopenharmony_ci num_nodes[i], 14588c2ecf20Sopenharmony_ci &first_node_teid, 14598c2ecf20Sopenharmony_ci &num_added); 14608c2ecf20Sopenharmony_ci if (status || num_nodes[i] != num_added) 14618c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 14628c2ecf20Sopenharmony_ci 14638c2ecf20Sopenharmony_ci /* The newly added node can be a new parent for the next 14648c2ecf20Sopenharmony_ci * layer nodes 14658c2ecf20Sopenharmony_ci */ 14668c2ecf20Sopenharmony_ci if (num_added) { 14678c2ecf20Sopenharmony_ci parent = ice_sched_find_node_by_teid(tc_node, 14688c2ecf20Sopenharmony_ci first_node_teid); 14698c2ecf20Sopenharmony_ci node = parent; 14708c2ecf20Sopenharmony_ci while (node) { 14718c2ecf20Sopenharmony_ci node->owner = owner; 14728c2ecf20Sopenharmony_ci node = node->sibling; 14738c2ecf20Sopenharmony_ci } 14748c2ecf20Sopenharmony_ci } else { 14758c2ecf20Sopenharmony_ci parent = parent->children[0]; 14768c2ecf20Sopenharmony_ci } 14778c2ecf20Sopenharmony_ci } 14788c2ecf20Sopenharmony_ci 14798c2ecf20Sopenharmony_ci return 0; 14808c2ecf20Sopenharmony_ci} 14818c2ecf20Sopenharmony_ci 14828c2ecf20Sopenharmony_ci/** 14838c2ecf20Sopenharmony_ci * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes 14848c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 14858c2ecf20Sopenharmony_ci * @tc_node: pointer to TC node 14868c2ecf20Sopenharmony_ci * @num_nodes: pointer to num nodes array 14878c2ecf20Sopenharmony_ci * 14888c2ecf20Sopenharmony_ci * This function calculates the number of supported nodes needed to add this 14898c2ecf20Sopenharmony_ci * VSI into Tx tree including the VSI, parent and intermediate nodes in below 14908c2ecf20Sopenharmony_ci * layers 14918c2ecf20Sopenharmony_ci */ 14928c2ecf20Sopenharmony_cistatic void 14938c2ecf20Sopenharmony_ciice_sched_calc_vsi_support_nodes(struct ice_hw *hw, 14948c2ecf20Sopenharmony_ci struct ice_sched_node *tc_node, u16 *num_nodes) 14958c2ecf20Sopenharmony_ci{ 14968c2ecf20Sopenharmony_ci struct ice_sched_node *node; 14978c2ecf20Sopenharmony_ci u8 vsil; 14988c2ecf20Sopenharmony_ci int i; 14998c2ecf20Sopenharmony_ci 15008c2ecf20Sopenharmony_ci vsil = ice_sched_get_vsi_layer(hw); 15018c2ecf20Sopenharmony_ci for (i = vsil; i >= hw->sw_entry_point_layer; i--) 15028c2ecf20Sopenharmony_ci /* Add intermediate nodes if TC has no children and 15038c2ecf20Sopenharmony_ci * need at least one node for VSI 15048c2ecf20Sopenharmony_ci */ 15058c2ecf20Sopenharmony_ci if (!tc_node->num_children || i == vsil) { 15068c2ecf20Sopenharmony_ci num_nodes[i]++; 15078c2ecf20Sopenharmony_ci } else { 15088c2ecf20Sopenharmony_ci /* If intermediate nodes are reached max children 15098c2ecf20Sopenharmony_ci * then add a new one. 15108c2ecf20Sopenharmony_ci */ 15118c2ecf20Sopenharmony_ci node = ice_sched_get_first_node(hw->port_info, tc_node, 15128c2ecf20Sopenharmony_ci (u8)i); 15138c2ecf20Sopenharmony_ci /* scan all the siblings */ 15148c2ecf20Sopenharmony_ci while (node) { 15158c2ecf20Sopenharmony_ci if (node->num_children < hw->max_children[i]) 15168c2ecf20Sopenharmony_ci break; 15178c2ecf20Sopenharmony_ci node = node->sibling; 15188c2ecf20Sopenharmony_ci } 15198c2ecf20Sopenharmony_ci 15208c2ecf20Sopenharmony_ci /* tree has one intermediate node to add this new VSI. 15218c2ecf20Sopenharmony_ci * So no need to calculate supported nodes for below 15228c2ecf20Sopenharmony_ci * layers. 15238c2ecf20Sopenharmony_ci */ 15248c2ecf20Sopenharmony_ci if (node) 15258c2ecf20Sopenharmony_ci break; 15268c2ecf20Sopenharmony_ci /* all the nodes are full, allocate a new one */ 15278c2ecf20Sopenharmony_ci num_nodes[i]++; 15288c2ecf20Sopenharmony_ci } 15298c2ecf20Sopenharmony_ci} 15308c2ecf20Sopenharmony_ci 15318c2ecf20Sopenharmony_ci/** 15328c2ecf20Sopenharmony_ci * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree 15338c2ecf20Sopenharmony_ci * @pi: port information structure 15348c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 15358c2ecf20Sopenharmony_ci * @tc_node: pointer to TC node 15368c2ecf20Sopenharmony_ci * @num_nodes: pointer to num nodes array 15378c2ecf20Sopenharmony_ci * 15388c2ecf20Sopenharmony_ci * This function adds the VSI supported nodes into Tx tree including the 15398c2ecf20Sopenharmony_ci * VSI, its parent and intermediate nodes in below layers 15408c2ecf20Sopenharmony_ci */ 15418c2ecf20Sopenharmony_cistatic enum ice_status 15428c2ecf20Sopenharmony_ciice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, 15438c2ecf20Sopenharmony_ci struct ice_sched_node *tc_node, u16 *num_nodes) 15448c2ecf20Sopenharmony_ci{ 15458c2ecf20Sopenharmony_ci struct ice_sched_node *parent = tc_node; 15468c2ecf20Sopenharmony_ci enum ice_status status; 15478c2ecf20Sopenharmony_ci u32 first_node_teid; 15488c2ecf20Sopenharmony_ci u16 num_added = 0; 15498c2ecf20Sopenharmony_ci u8 i, vsil; 15508c2ecf20Sopenharmony_ci 15518c2ecf20Sopenharmony_ci if (!pi) 15528c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 15538c2ecf20Sopenharmony_ci 15548c2ecf20Sopenharmony_ci vsil = ice_sched_get_vsi_layer(pi->hw); 15558c2ecf20Sopenharmony_ci for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { 15568c2ecf20Sopenharmony_ci status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, 15578c2ecf20Sopenharmony_ci i, num_nodes[i], 15588c2ecf20Sopenharmony_ci &first_node_teid, 15598c2ecf20Sopenharmony_ci &num_added); 15608c2ecf20Sopenharmony_ci if (status || num_nodes[i] != num_added) 15618c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 15628c2ecf20Sopenharmony_ci 15638c2ecf20Sopenharmony_ci /* The newly added node can be a new parent for the next 15648c2ecf20Sopenharmony_ci * layer nodes 15658c2ecf20Sopenharmony_ci */ 15668c2ecf20Sopenharmony_ci if (num_added) 15678c2ecf20Sopenharmony_ci parent = ice_sched_find_node_by_teid(tc_node, 15688c2ecf20Sopenharmony_ci first_node_teid); 15698c2ecf20Sopenharmony_ci else 15708c2ecf20Sopenharmony_ci parent = parent->children[0]; 15718c2ecf20Sopenharmony_ci 15728c2ecf20Sopenharmony_ci if (!parent) 15738c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 15748c2ecf20Sopenharmony_ci 15758c2ecf20Sopenharmony_ci if (i == vsil) 15768c2ecf20Sopenharmony_ci parent->vsi_handle = vsi_handle; 15778c2ecf20Sopenharmony_ci } 15788c2ecf20Sopenharmony_ci 15798c2ecf20Sopenharmony_ci return 0; 15808c2ecf20Sopenharmony_ci} 15818c2ecf20Sopenharmony_ci 15828c2ecf20Sopenharmony_ci/** 15838c2ecf20Sopenharmony_ci * ice_sched_add_vsi_to_topo - add a new VSI into tree 15848c2ecf20Sopenharmony_ci * @pi: port information structure 15858c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 15868c2ecf20Sopenharmony_ci * @tc: TC number 15878c2ecf20Sopenharmony_ci * 15888c2ecf20Sopenharmony_ci * This function adds a new VSI into scheduler tree 15898c2ecf20Sopenharmony_ci */ 15908c2ecf20Sopenharmony_cistatic enum ice_status 15918c2ecf20Sopenharmony_ciice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) 15928c2ecf20Sopenharmony_ci{ 15938c2ecf20Sopenharmony_ci u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 15948c2ecf20Sopenharmony_ci struct ice_sched_node *tc_node; 15958c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 15968c2ecf20Sopenharmony_ci 15978c2ecf20Sopenharmony_ci tc_node = ice_sched_get_tc_node(pi, tc); 15988c2ecf20Sopenharmony_ci if (!tc_node) 15998c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 16008c2ecf20Sopenharmony_ci 16018c2ecf20Sopenharmony_ci /* calculate number of supported nodes needed for this VSI */ 16028c2ecf20Sopenharmony_ci ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes); 16038c2ecf20Sopenharmony_ci 16048c2ecf20Sopenharmony_ci /* add VSI supported nodes to TC subtree */ 16058c2ecf20Sopenharmony_ci return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, 16068c2ecf20Sopenharmony_ci num_nodes); 16078c2ecf20Sopenharmony_ci} 16088c2ecf20Sopenharmony_ci 16098c2ecf20Sopenharmony_ci/** 16108c2ecf20Sopenharmony_ci * ice_sched_update_vsi_child_nodes - update VSI child nodes 16118c2ecf20Sopenharmony_ci * @pi: port information structure 16128c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 16138c2ecf20Sopenharmony_ci * @tc: TC number 16148c2ecf20Sopenharmony_ci * @new_numqs: new number of max queues 16158c2ecf20Sopenharmony_ci * @owner: owner of this subtree 16168c2ecf20Sopenharmony_ci * 16178c2ecf20Sopenharmony_ci * This function updates the VSI child nodes based on the number of queues 16188c2ecf20Sopenharmony_ci */ 16198c2ecf20Sopenharmony_cistatic enum ice_status 16208c2ecf20Sopenharmony_ciice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, 16218c2ecf20Sopenharmony_ci u8 tc, u16 new_numqs, u8 owner) 16228c2ecf20Sopenharmony_ci{ 16238c2ecf20Sopenharmony_ci u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; 16248c2ecf20Sopenharmony_ci struct ice_sched_node *vsi_node; 16258c2ecf20Sopenharmony_ci struct ice_sched_node *tc_node; 16268c2ecf20Sopenharmony_ci struct ice_vsi_ctx *vsi_ctx; 16278c2ecf20Sopenharmony_ci enum ice_status status = 0; 16288c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 16298c2ecf20Sopenharmony_ci u16 prev_numqs; 16308c2ecf20Sopenharmony_ci 16318c2ecf20Sopenharmony_ci tc_node = ice_sched_get_tc_node(pi, tc); 16328c2ecf20Sopenharmony_ci if (!tc_node) 16338c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 16348c2ecf20Sopenharmony_ci 16358c2ecf20Sopenharmony_ci vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); 16368c2ecf20Sopenharmony_ci if (!vsi_node) 16378c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 16388c2ecf20Sopenharmony_ci 16398c2ecf20Sopenharmony_ci vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 16408c2ecf20Sopenharmony_ci if (!vsi_ctx) 16418c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 16428c2ecf20Sopenharmony_ci 16438c2ecf20Sopenharmony_ci prev_numqs = vsi_ctx->sched.max_lanq[tc]; 16448c2ecf20Sopenharmony_ci /* num queues are not changed or less than the previous number */ 16458c2ecf20Sopenharmony_ci if (new_numqs <= prev_numqs) 16468c2ecf20Sopenharmony_ci return status; 16478c2ecf20Sopenharmony_ci status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); 16488c2ecf20Sopenharmony_ci if (status) 16498c2ecf20Sopenharmony_ci return status; 16508c2ecf20Sopenharmony_ci 16518c2ecf20Sopenharmony_ci if (new_numqs) 16528c2ecf20Sopenharmony_ci ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); 16538c2ecf20Sopenharmony_ci /* Keep the max number of queue configuration all the time. Update the 16548c2ecf20Sopenharmony_ci * tree only if number of queues > previous number of queues. This may 16558c2ecf20Sopenharmony_ci * leave some extra nodes in the tree if number of queues < previous 16568c2ecf20Sopenharmony_ci * number but that wouldn't harm anything. Removing those extra nodes 16578c2ecf20Sopenharmony_ci * may complicate the code if those nodes are part of SRL or 16588c2ecf20Sopenharmony_ci * individually rate limited. 16598c2ecf20Sopenharmony_ci */ 16608c2ecf20Sopenharmony_ci status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, 16618c2ecf20Sopenharmony_ci new_num_nodes, owner); 16628c2ecf20Sopenharmony_ci if (status) 16638c2ecf20Sopenharmony_ci return status; 16648c2ecf20Sopenharmony_ci vsi_ctx->sched.max_lanq[tc] = new_numqs; 16658c2ecf20Sopenharmony_ci 16668c2ecf20Sopenharmony_ci return 0; 16678c2ecf20Sopenharmony_ci} 16688c2ecf20Sopenharmony_ci 16698c2ecf20Sopenharmony_ci/** 16708c2ecf20Sopenharmony_ci * ice_sched_cfg_vsi - configure the new/existing VSI 16718c2ecf20Sopenharmony_ci * @pi: port information structure 16728c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 16738c2ecf20Sopenharmony_ci * @tc: TC number 16748c2ecf20Sopenharmony_ci * @maxqs: max number of queues 16758c2ecf20Sopenharmony_ci * @owner: LAN or RDMA 16768c2ecf20Sopenharmony_ci * @enable: TC enabled or disabled 16778c2ecf20Sopenharmony_ci * 16788c2ecf20Sopenharmony_ci * This function adds/updates VSI nodes based on the number of queues. If TC is 16798c2ecf20Sopenharmony_ci * enabled and VSI is in suspended state then resume the VSI back. If TC is 16808c2ecf20Sopenharmony_ci * disabled then suspend the VSI if it is not already. 16818c2ecf20Sopenharmony_ci */ 16828c2ecf20Sopenharmony_cienum ice_status 16838c2ecf20Sopenharmony_ciice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, 16848c2ecf20Sopenharmony_ci u8 owner, bool enable) 16858c2ecf20Sopenharmony_ci{ 16868c2ecf20Sopenharmony_ci struct ice_sched_node *vsi_node, *tc_node; 16878c2ecf20Sopenharmony_ci struct ice_vsi_ctx *vsi_ctx; 16888c2ecf20Sopenharmony_ci enum ice_status status = 0; 16898c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 16908c2ecf20Sopenharmony_ci 16918c2ecf20Sopenharmony_ci ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); 16928c2ecf20Sopenharmony_ci tc_node = ice_sched_get_tc_node(pi, tc); 16938c2ecf20Sopenharmony_ci if (!tc_node) 16948c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 16958c2ecf20Sopenharmony_ci vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); 16968c2ecf20Sopenharmony_ci if (!vsi_ctx) 16978c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 16988c2ecf20Sopenharmony_ci vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); 16998c2ecf20Sopenharmony_ci 17008c2ecf20Sopenharmony_ci /* suspend the VSI if TC is not enabled */ 17018c2ecf20Sopenharmony_ci if (!enable) { 17028c2ecf20Sopenharmony_ci if (vsi_node && vsi_node->in_use) { 17038c2ecf20Sopenharmony_ci u32 teid = le32_to_cpu(vsi_node->info.node_teid); 17048c2ecf20Sopenharmony_ci 17058c2ecf20Sopenharmony_ci status = ice_sched_suspend_resume_elems(hw, 1, &teid, 17068c2ecf20Sopenharmony_ci true); 17078c2ecf20Sopenharmony_ci if (!status) 17088c2ecf20Sopenharmony_ci vsi_node->in_use = false; 17098c2ecf20Sopenharmony_ci } 17108c2ecf20Sopenharmony_ci return status; 17118c2ecf20Sopenharmony_ci } 17128c2ecf20Sopenharmony_ci 17138c2ecf20Sopenharmony_ci /* TC is enabled, if it is a new VSI then add it to the tree */ 17148c2ecf20Sopenharmony_ci if (!vsi_node) { 17158c2ecf20Sopenharmony_ci status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); 17168c2ecf20Sopenharmony_ci if (status) 17178c2ecf20Sopenharmony_ci return status; 17188c2ecf20Sopenharmony_ci 17198c2ecf20Sopenharmony_ci vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); 17208c2ecf20Sopenharmony_ci if (!vsi_node) 17218c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 17228c2ecf20Sopenharmony_ci 17238c2ecf20Sopenharmony_ci vsi_ctx->sched.vsi_node[tc] = vsi_node; 17248c2ecf20Sopenharmony_ci vsi_node->in_use = true; 17258c2ecf20Sopenharmony_ci /* invalidate the max queues whenever VSI gets added first time 17268c2ecf20Sopenharmony_ci * into the scheduler tree (boot or after reset). We need to 17278c2ecf20Sopenharmony_ci * recreate the child nodes all the time in these cases. 17288c2ecf20Sopenharmony_ci */ 17298c2ecf20Sopenharmony_ci vsi_ctx->sched.max_lanq[tc] = 0; 17308c2ecf20Sopenharmony_ci } 17318c2ecf20Sopenharmony_ci 17328c2ecf20Sopenharmony_ci /* update the VSI child nodes */ 17338c2ecf20Sopenharmony_ci status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, 17348c2ecf20Sopenharmony_ci owner); 17358c2ecf20Sopenharmony_ci if (status) 17368c2ecf20Sopenharmony_ci return status; 17378c2ecf20Sopenharmony_ci 17388c2ecf20Sopenharmony_ci /* TC is enabled, resume the VSI if it is in the suspend state */ 17398c2ecf20Sopenharmony_ci if (!vsi_node->in_use) { 17408c2ecf20Sopenharmony_ci u32 teid = le32_to_cpu(vsi_node->info.node_teid); 17418c2ecf20Sopenharmony_ci 17428c2ecf20Sopenharmony_ci status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); 17438c2ecf20Sopenharmony_ci if (!status) 17448c2ecf20Sopenharmony_ci vsi_node->in_use = true; 17458c2ecf20Sopenharmony_ci } 17468c2ecf20Sopenharmony_ci 17478c2ecf20Sopenharmony_ci return status; 17488c2ecf20Sopenharmony_ci} 17498c2ecf20Sopenharmony_ci 17508c2ecf20Sopenharmony_ci/** 17518c2ecf20Sopenharmony_ci * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry 17528c2ecf20Sopenharmony_ci * @pi: port information structure 17538c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 17548c2ecf20Sopenharmony_ci * 17558c2ecf20Sopenharmony_ci * This function removes single aggregator VSI info entry from 17568c2ecf20Sopenharmony_ci * aggregator list. 17578c2ecf20Sopenharmony_ci */ 17588c2ecf20Sopenharmony_cistatic void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) 17598c2ecf20Sopenharmony_ci{ 17608c2ecf20Sopenharmony_ci struct ice_sched_agg_info *agg_info; 17618c2ecf20Sopenharmony_ci struct ice_sched_agg_info *atmp; 17628c2ecf20Sopenharmony_ci 17638c2ecf20Sopenharmony_ci list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list, 17648c2ecf20Sopenharmony_ci list_entry) { 17658c2ecf20Sopenharmony_ci struct ice_sched_agg_vsi_info *agg_vsi_info; 17668c2ecf20Sopenharmony_ci struct ice_sched_agg_vsi_info *vtmp; 17678c2ecf20Sopenharmony_ci 17688c2ecf20Sopenharmony_ci list_for_each_entry_safe(agg_vsi_info, vtmp, 17698c2ecf20Sopenharmony_ci &agg_info->agg_vsi_list, list_entry) 17708c2ecf20Sopenharmony_ci if (agg_vsi_info->vsi_handle == vsi_handle) { 17718c2ecf20Sopenharmony_ci list_del(&agg_vsi_info->list_entry); 17728c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(pi->hw), 17738c2ecf20Sopenharmony_ci agg_vsi_info); 17748c2ecf20Sopenharmony_ci return; 17758c2ecf20Sopenharmony_ci } 17768c2ecf20Sopenharmony_ci } 17778c2ecf20Sopenharmony_ci} 17788c2ecf20Sopenharmony_ci 17798c2ecf20Sopenharmony_ci/** 17808c2ecf20Sopenharmony_ci * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree 17818c2ecf20Sopenharmony_ci * @node: pointer to the sub-tree node 17828c2ecf20Sopenharmony_ci * 17838c2ecf20Sopenharmony_ci * This function checks for a leaf node presence in a given sub-tree node. 17848c2ecf20Sopenharmony_ci */ 17858c2ecf20Sopenharmony_cistatic bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) 17868c2ecf20Sopenharmony_ci{ 17878c2ecf20Sopenharmony_ci u8 i; 17888c2ecf20Sopenharmony_ci 17898c2ecf20Sopenharmony_ci for (i = 0; i < node->num_children; i++) 17908c2ecf20Sopenharmony_ci if (ice_sched_is_leaf_node_present(node->children[i])) 17918c2ecf20Sopenharmony_ci return true; 17928c2ecf20Sopenharmony_ci /* check for a leaf node */ 17938c2ecf20Sopenharmony_ci return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); 17948c2ecf20Sopenharmony_ci} 17958c2ecf20Sopenharmony_ci 17968c2ecf20Sopenharmony_ci/** 17978c2ecf20Sopenharmony_ci * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes 17988c2ecf20Sopenharmony_ci * @pi: port information structure 17998c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 18008c2ecf20Sopenharmony_ci * @owner: LAN or RDMA 18018c2ecf20Sopenharmony_ci * 18028c2ecf20Sopenharmony_ci * This function removes the VSI and its LAN or RDMA children nodes from the 18038c2ecf20Sopenharmony_ci * scheduler tree. 18048c2ecf20Sopenharmony_ci */ 18058c2ecf20Sopenharmony_cistatic enum ice_status 18068c2ecf20Sopenharmony_ciice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) 18078c2ecf20Sopenharmony_ci{ 18088c2ecf20Sopenharmony_ci enum ice_status status = ICE_ERR_PARAM; 18098c2ecf20Sopenharmony_ci struct ice_vsi_ctx *vsi_ctx; 18108c2ecf20Sopenharmony_ci u8 i; 18118c2ecf20Sopenharmony_ci 18128c2ecf20Sopenharmony_ci ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); 18138c2ecf20Sopenharmony_ci if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 18148c2ecf20Sopenharmony_ci return status; 18158c2ecf20Sopenharmony_ci mutex_lock(&pi->sched_lock); 18168c2ecf20Sopenharmony_ci vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); 18178c2ecf20Sopenharmony_ci if (!vsi_ctx) 18188c2ecf20Sopenharmony_ci goto exit_sched_rm_vsi_cfg; 18198c2ecf20Sopenharmony_ci 18208c2ecf20Sopenharmony_ci ice_for_each_traffic_class(i) { 18218c2ecf20Sopenharmony_ci struct ice_sched_node *vsi_node, *tc_node; 18228c2ecf20Sopenharmony_ci u8 j = 0; 18238c2ecf20Sopenharmony_ci 18248c2ecf20Sopenharmony_ci tc_node = ice_sched_get_tc_node(pi, i); 18258c2ecf20Sopenharmony_ci if (!tc_node) 18268c2ecf20Sopenharmony_ci continue; 18278c2ecf20Sopenharmony_ci 18288c2ecf20Sopenharmony_ci vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle); 18298c2ecf20Sopenharmony_ci if (!vsi_node) 18308c2ecf20Sopenharmony_ci continue; 18318c2ecf20Sopenharmony_ci 18328c2ecf20Sopenharmony_ci if (ice_sched_is_leaf_node_present(vsi_node)) { 18338c2ecf20Sopenharmony_ci ice_debug(pi->hw, ICE_DBG_SCHED, 18348c2ecf20Sopenharmony_ci "VSI has leaf nodes in TC %d\n", i); 18358c2ecf20Sopenharmony_ci status = ICE_ERR_IN_USE; 18368c2ecf20Sopenharmony_ci goto exit_sched_rm_vsi_cfg; 18378c2ecf20Sopenharmony_ci } 18388c2ecf20Sopenharmony_ci while (j < vsi_node->num_children) { 18398c2ecf20Sopenharmony_ci if (vsi_node->children[j]->owner == owner) { 18408c2ecf20Sopenharmony_ci ice_free_sched_node(pi, vsi_node->children[j]); 18418c2ecf20Sopenharmony_ci 18428c2ecf20Sopenharmony_ci /* reset the counter again since the num 18438c2ecf20Sopenharmony_ci * children will be updated after node removal 18448c2ecf20Sopenharmony_ci */ 18458c2ecf20Sopenharmony_ci j = 0; 18468c2ecf20Sopenharmony_ci } else { 18478c2ecf20Sopenharmony_ci j++; 18488c2ecf20Sopenharmony_ci } 18498c2ecf20Sopenharmony_ci } 18508c2ecf20Sopenharmony_ci /* remove the VSI if it has no children */ 18518c2ecf20Sopenharmony_ci if (!vsi_node->num_children) { 18528c2ecf20Sopenharmony_ci ice_free_sched_node(pi, vsi_node); 18538c2ecf20Sopenharmony_ci vsi_ctx->sched.vsi_node[i] = NULL; 18548c2ecf20Sopenharmony_ci 18558c2ecf20Sopenharmony_ci /* clean up aggregator related VSI info if any */ 18568c2ecf20Sopenharmony_ci ice_sched_rm_agg_vsi_info(pi, vsi_handle); 18578c2ecf20Sopenharmony_ci } 18588c2ecf20Sopenharmony_ci if (owner == ICE_SCHED_NODE_OWNER_LAN) 18598c2ecf20Sopenharmony_ci vsi_ctx->sched.max_lanq[i] = 0; 18608c2ecf20Sopenharmony_ci } 18618c2ecf20Sopenharmony_ci status = 0; 18628c2ecf20Sopenharmony_ci 18638c2ecf20Sopenharmony_ciexit_sched_rm_vsi_cfg: 18648c2ecf20Sopenharmony_ci mutex_unlock(&pi->sched_lock); 18658c2ecf20Sopenharmony_ci return status; 18668c2ecf20Sopenharmony_ci} 18678c2ecf20Sopenharmony_ci 18688c2ecf20Sopenharmony_ci/** 18698c2ecf20Sopenharmony_ci * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes 18708c2ecf20Sopenharmony_ci * @pi: port information structure 18718c2ecf20Sopenharmony_ci * @vsi_handle: software VSI handle 18728c2ecf20Sopenharmony_ci * 18738c2ecf20Sopenharmony_ci * This function clears the VSI and its LAN children nodes from scheduler tree 18748c2ecf20Sopenharmony_ci * for all TCs. 18758c2ecf20Sopenharmony_ci */ 18768c2ecf20Sopenharmony_cienum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) 18778c2ecf20Sopenharmony_ci{ 18788c2ecf20Sopenharmony_ci return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); 18798c2ecf20Sopenharmony_ci} 18808c2ecf20Sopenharmony_ci 18818c2ecf20Sopenharmony_ci/** 18828c2ecf20Sopenharmony_ci * ice_sched_rm_unused_rl_prof - remove unused RL profile 18838c2ecf20Sopenharmony_ci * @pi: port information structure 18848c2ecf20Sopenharmony_ci * 18858c2ecf20Sopenharmony_ci * This function removes unused rate limit profiles from the HW and 18868c2ecf20Sopenharmony_ci * SW DB. The caller needs to hold scheduler lock. 18878c2ecf20Sopenharmony_ci */ 18888c2ecf20Sopenharmony_cistatic void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) 18898c2ecf20Sopenharmony_ci{ 18908c2ecf20Sopenharmony_ci u16 ln; 18918c2ecf20Sopenharmony_ci 18928c2ecf20Sopenharmony_ci for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { 18938c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_prof_elem; 18948c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_prof_tmp; 18958c2ecf20Sopenharmony_ci 18968c2ecf20Sopenharmony_ci list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, 18978c2ecf20Sopenharmony_ci &pi->rl_prof_list[ln], list_entry) { 18988c2ecf20Sopenharmony_ci if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) 18998c2ecf20Sopenharmony_ci ice_debug(pi->hw, ICE_DBG_SCHED, 19008c2ecf20Sopenharmony_ci "Removed rl profile\n"); 19018c2ecf20Sopenharmony_ci } 19028c2ecf20Sopenharmony_ci } 19038c2ecf20Sopenharmony_ci} 19048c2ecf20Sopenharmony_ci 19058c2ecf20Sopenharmony_ci/** 19068c2ecf20Sopenharmony_ci * ice_sched_update_elem - update element 19078c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 19088c2ecf20Sopenharmony_ci * @node: pointer to node 19098c2ecf20Sopenharmony_ci * @info: node info to update 19108c2ecf20Sopenharmony_ci * 19118c2ecf20Sopenharmony_ci * Update the HW DB, and local SW DB of node. Update the scheduling 19128c2ecf20Sopenharmony_ci * parameters of node from argument info data buffer (Info->data buf) and 19138c2ecf20Sopenharmony_ci * returns success or error on config sched element failure. The caller 19148c2ecf20Sopenharmony_ci * needs to hold scheduler lock. 19158c2ecf20Sopenharmony_ci */ 19168c2ecf20Sopenharmony_cistatic enum ice_status 19178c2ecf20Sopenharmony_ciice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, 19188c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data *info) 19198c2ecf20Sopenharmony_ci{ 19208c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data buf; 19218c2ecf20Sopenharmony_ci enum ice_status status; 19228c2ecf20Sopenharmony_ci u16 elem_cfgd = 0; 19238c2ecf20Sopenharmony_ci u16 num_elems = 1; 19248c2ecf20Sopenharmony_ci 19258c2ecf20Sopenharmony_ci buf = *info; 19268c2ecf20Sopenharmony_ci /* Parent TEID is reserved field in this aq call */ 19278c2ecf20Sopenharmony_ci buf.parent_teid = 0; 19288c2ecf20Sopenharmony_ci /* Element type is reserved field in this aq call */ 19298c2ecf20Sopenharmony_ci buf.data.elem_type = 0; 19308c2ecf20Sopenharmony_ci /* Flags is reserved field in this aq call */ 19318c2ecf20Sopenharmony_ci buf.data.flags = 0; 19328c2ecf20Sopenharmony_ci 19338c2ecf20Sopenharmony_ci /* Update HW DB */ 19348c2ecf20Sopenharmony_ci /* Configure element node */ 19358c2ecf20Sopenharmony_ci status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), 19368c2ecf20Sopenharmony_ci &elem_cfgd, NULL); 19378c2ecf20Sopenharmony_ci if (status || elem_cfgd != num_elems) { 19388c2ecf20Sopenharmony_ci ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); 19398c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 19408c2ecf20Sopenharmony_ci } 19418c2ecf20Sopenharmony_ci 19428c2ecf20Sopenharmony_ci /* Config success case */ 19438c2ecf20Sopenharmony_ci /* Now update local SW DB */ 19448c2ecf20Sopenharmony_ci /* Only copy the data portion of info buffer */ 19458c2ecf20Sopenharmony_ci node->info.data = info->data; 19468c2ecf20Sopenharmony_ci return status; 19478c2ecf20Sopenharmony_ci} 19488c2ecf20Sopenharmony_ci 19498c2ecf20Sopenharmony_ci/** 19508c2ecf20Sopenharmony_ci * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params 19518c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 19528c2ecf20Sopenharmony_ci * @node: sched node to configure 19538c2ecf20Sopenharmony_ci * @rl_type: rate limit type CIR, EIR, or shared 19548c2ecf20Sopenharmony_ci * @bw_alloc: BW weight/allocation 19558c2ecf20Sopenharmony_ci * 19568c2ecf20Sopenharmony_ci * This function configures node element's BW allocation. 19578c2ecf20Sopenharmony_ci */ 19588c2ecf20Sopenharmony_cistatic enum ice_status 19598c2ecf20Sopenharmony_ciice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, 19608c2ecf20Sopenharmony_ci enum ice_rl_type rl_type, u16 bw_alloc) 19618c2ecf20Sopenharmony_ci{ 19628c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data buf; 19638c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem *data; 19648c2ecf20Sopenharmony_ci enum ice_status status; 19658c2ecf20Sopenharmony_ci 19668c2ecf20Sopenharmony_ci buf = node->info; 19678c2ecf20Sopenharmony_ci data = &buf.data; 19688c2ecf20Sopenharmony_ci if (rl_type == ICE_MIN_BW) { 19698c2ecf20Sopenharmony_ci data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 19708c2ecf20Sopenharmony_ci data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc); 19718c2ecf20Sopenharmony_ci } else if (rl_type == ICE_MAX_BW) { 19728c2ecf20Sopenharmony_ci data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 19738c2ecf20Sopenharmony_ci data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); 19748c2ecf20Sopenharmony_ci } else { 19758c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 19768c2ecf20Sopenharmony_ci } 19778c2ecf20Sopenharmony_ci 19788c2ecf20Sopenharmony_ci /* Configure element */ 19798c2ecf20Sopenharmony_ci status = ice_sched_update_elem(hw, node, &buf); 19808c2ecf20Sopenharmony_ci return status; 19818c2ecf20Sopenharmony_ci} 19828c2ecf20Sopenharmony_ci 19838c2ecf20Sopenharmony_ci/** 19848c2ecf20Sopenharmony_ci * ice_set_clear_cir_bw - set or clear CIR BW 19858c2ecf20Sopenharmony_ci * @bw_t_info: bandwidth type information structure 19868c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps - Kilo bits per sec 19878c2ecf20Sopenharmony_ci * 19888c2ecf20Sopenharmony_ci * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. 19898c2ecf20Sopenharmony_ci */ 19908c2ecf20Sopenharmony_cistatic void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 19918c2ecf20Sopenharmony_ci{ 19928c2ecf20Sopenharmony_ci if (bw == ICE_SCHED_DFLT_BW) { 19938c2ecf20Sopenharmony_ci clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 19948c2ecf20Sopenharmony_ci bw_t_info->cir_bw.bw = 0; 19958c2ecf20Sopenharmony_ci } else { 19968c2ecf20Sopenharmony_ci /* Save type of BW information */ 19978c2ecf20Sopenharmony_ci set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); 19988c2ecf20Sopenharmony_ci bw_t_info->cir_bw.bw = bw; 19998c2ecf20Sopenharmony_ci } 20008c2ecf20Sopenharmony_ci} 20018c2ecf20Sopenharmony_ci 20028c2ecf20Sopenharmony_ci/** 20038c2ecf20Sopenharmony_ci * ice_set_clear_eir_bw - set or clear EIR BW 20048c2ecf20Sopenharmony_ci * @bw_t_info: bandwidth type information structure 20058c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps - Kilo bits per sec 20068c2ecf20Sopenharmony_ci * 20078c2ecf20Sopenharmony_ci * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. 20088c2ecf20Sopenharmony_ci */ 20098c2ecf20Sopenharmony_cistatic void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 20108c2ecf20Sopenharmony_ci{ 20118c2ecf20Sopenharmony_ci if (bw == ICE_SCHED_DFLT_BW) { 20128c2ecf20Sopenharmony_ci clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 20138c2ecf20Sopenharmony_ci bw_t_info->eir_bw.bw = 0; 20148c2ecf20Sopenharmony_ci } else { 20158c2ecf20Sopenharmony_ci /* EIR BW and Shared BW profiles are mutually exclusive and 20168c2ecf20Sopenharmony_ci * hence only one of them may be set for any given element. 20178c2ecf20Sopenharmony_ci * First clear earlier saved shared BW information. 20188c2ecf20Sopenharmony_ci */ 20198c2ecf20Sopenharmony_ci clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 20208c2ecf20Sopenharmony_ci bw_t_info->shared_bw = 0; 20218c2ecf20Sopenharmony_ci /* save EIR BW information */ 20228c2ecf20Sopenharmony_ci set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 20238c2ecf20Sopenharmony_ci bw_t_info->eir_bw.bw = bw; 20248c2ecf20Sopenharmony_ci } 20258c2ecf20Sopenharmony_ci} 20268c2ecf20Sopenharmony_ci 20278c2ecf20Sopenharmony_ci/** 20288c2ecf20Sopenharmony_ci * ice_set_clear_shared_bw - set or clear shared BW 20298c2ecf20Sopenharmony_ci * @bw_t_info: bandwidth type information structure 20308c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps - Kilo bits per sec 20318c2ecf20Sopenharmony_ci * 20328c2ecf20Sopenharmony_ci * Save or clear shared bandwidth (BW) in the passed param bw_t_info. 20338c2ecf20Sopenharmony_ci */ 20348c2ecf20Sopenharmony_cistatic void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) 20358c2ecf20Sopenharmony_ci{ 20368c2ecf20Sopenharmony_ci if (bw == ICE_SCHED_DFLT_BW) { 20378c2ecf20Sopenharmony_ci clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 20388c2ecf20Sopenharmony_ci bw_t_info->shared_bw = 0; 20398c2ecf20Sopenharmony_ci } else { 20408c2ecf20Sopenharmony_ci /* EIR BW and Shared BW profiles are mutually exclusive and 20418c2ecf20Sopenharmony_ci * hence only one of them may be set for any given element. 20428c2ecf20Sopenharmony_ci * First clear earlier saved EIR BW information. 20438c2ecf20Sopenharmony_ci */ 20448c2ecf20Sopenharmony_ci clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); 20458c2ecf20Sopenharmony_ci bw_t_info->eir_bw.bw = 0; 20468c2ecf20Sopenharmony_ci /* save shared BW information */ 20478c2ecf20Sopenharmony_ci set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); 20488c2ecf20Sopenharmony_ci bw_t_info->shared_bw = bw; 20498c2ecf20Sopenharmony_ci } 20508c2ecf20Sopenharmony_ci} 20518c2ecf20Sopenharmony_ci 20528c2ecf20Sopenharmony_ci/** 20538c2ecf20Sopenharmony_ci * ice_sched_calc_wakeup - calculate RL profile wakeup parameter 20548c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps 20558c2ecf20Sopenharmony_ci * 20568c2ecf20Sopenharmony_ci * This function calculates the wakeup parameter of RL profile. 20578c2ecf20Sopenharmony_ci */ 20588c2ecf20Sopenharmony_cistatic u16 ice_sched_calc_wakeup(s32 bw) 20598c2ecf20Sopenharmony_ci{ 20608c2ecf20Sopenharmony_ci s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; 20618c2ecf20Sopenharmony_ci s32 wakeup_f_int; 20628c2ecf20Sopenharmony_ci u16 wakeup = 0; 20638c2ecf20Sopenharmony_ci 20648c2ecf20Sopenharmony_ci /* Get the wakeup integer value */ 20658c2ecf20Sopenharmony_ci bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); 20668c2ecf20Sopenharmony_ci wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec); 20678c2ecf20Sopenharmony_ci if (wakeup_int > 63) { 20688c2ecf20Sopenharmony_ci wakeup = (u16)((1 << 15) | wakeup_int); 20698c2ecf20Sopenharmony_ci } else { 20708c2ecf20Sopenharmony_ci /* Calculate fraction value up to 4 decimals 20718c2ecf20Sopenharmony_ci * Convert Integer value to a constant multiplier 20728c2ecf20Sopenharmony_ci */ 20738c2ecf20Sopenharmony_ci wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; 20748c2ecf20Sopenharmony_ci wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER * 20758c2ecf20Sopenharmony_ci ICE_RL_PROF_FREQUENCY, 20768c2ecf20Sopenharmony_ci bytes_per_sec); 20778c2ecf20Sopenharmony_ci 20788c2ecf20Sopenharmony_ci /* Get Fraction value */ 20798c2ecf20Sopenharmony_ci wakeup_f = wakeup_a - wakeup_b; 20808c2ecf20Sopenharmony_ci 20818c2ecf20Sopenharmony_ci /* Round up the Fractional value via Ceil(Fractional value) */ 20828c2ecf20Sopenharmony_ci if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2)) 20838c2ecf20Sopenharmony_ci wakeup_f += 1; 20848c2ecf20Sopenharmony_ci 20858c2ecf20Sopenharmony_ci wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION, 20868c2ecf20Sopenharmony_ci ICE_RL_PROF_MULTIPLIER); 20878c2ecf20Sopenharmony_ci wakeup |= (u16)(wakeup_int << 9); 20888c2ecf20Sopenharmony_ci wakeup |= (u16)(0x1ff & wakeup_f_int); 20898c2ecf20Sopenharmony_ci } 20908c2ecf20Sopenharmony_ci 20918c2ecf20Sopenharmony_ci return wakeup; 20928c2ecf20Sopenharmony_ci} 20938c2ecf20Sopenharmony_ci 20948c2ecf20Sopenharmony_ci/** 20958c2ecf20Sopenharmony_ci * ice_sched_bw_to_rl_profile - convert BW to profile parameters 20968c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps 20978c2ecf20Sopenharmony_ci * @profile: profile parameters to return 20988c2ecf20Sopenharmony_ci * 20998c2ecf20Sopenharmony_ci * This function converts the BW to profile structure format. 21008c2ecf20Sopenharmony_ci */ 21018c2ecf20Sopenharmony_cistatic enum ice_status 21028c2ecf20Sopenharmony_ciice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile) 21038c2ecf20Sopenharmony_ci{ 21048c2ecf20Sopenharmony_ci enum ice_status status = ICE_ERR_PARAM; 21058c2ecf20Sopenharmony_ci s64 bytes_per_sec, ts_rate, mv_tmp; 21068c2ecf20Sopenharmony_ci bool found = false; 21078c2ecf20Sopenharmony_ci s32 encode = 0; 21088c2ecf20Sopenharmony_ci s64 mv = 0; 21098c2ecf20Sopenharmony_ci s32 i; 21108c2ecf20Sopenharmony_ci 21118c2ecf20Sopenharmony_ci /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ 21128c2ecf20Sopenharmony_ci if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) 21138c2ecf20Sopenharmony_ci return status; 21148c2ecf20Sopenharmony_ci 21158c2ecf20Sopenharmony_ci /* Bytes per second from Kbps */ 21168c2ecf20Sopenharmony_ci bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); 21178c2ecf20Sopenharmony_ci 21188c2ecf20Sopenharmony_ci /* encode is 6 bits but really useful are 5 bits */ 21198c2ecf20Sopenharmony_ci for (i = 0; i < 64; i++) { 21208c2ecf20Sopenharmony_ci u64 pow_result = BIT_ULL(i); 21218c2ecf20Sopenharmony_ci 21228c2ecf20Sopenharmony_ci ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY, 21238c2ecf20Sopenharmony_ci pow_result * ICE_RL_PROF_TS_MULTIPLIER); 21248c2ecf20Sopenharmony_ci if (ts_rate <= 0) 21258c2ecf20Sopenharmony_ci continue; 21268c2ecf20Sopenharmony_ci 21278c2ecf20Sopenharmony_ci /* Multiplier value */ 21288c2ecf20Sopenharmony_ci mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, 21298c2ecf20Sopenharmony_ci ts_rate); 21308c2ecf20Sopenharmony_ci 21318c2ecf20Sopenharmony_ci /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ 21328c2ecf20Sopenharmony_ci mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); 21338c2ecf20Sopenharmony_ci 21348c2ecf20Sopenharmony_ci /* First multiplier value greater than the given 21358c2ecf20Sopenharmony_ci * accuracy bytes 21368c2ecf20Sopenharmony_ci */ 21378c2ecf20Sopenharmony_ci if (mv > ICE_RL_PROF_ACCURACY_BYTES) { 21388c2ecf20Sopenharmony_ci encode = i; 21398c2ecf20Sopenharmony_ci found = true; 21408c2ecf20Sopenharmony_ci break; 21418c2ecf20Sopenharmony_ci } 21428c2ecf20Sopenharmony_ci } 21438c2ecf20Sopenharmony_ci if (found) { 21448c2ecf20Sopenharmony_ci u16 wm; 21458c2ecf20Sopenharmony_ci 21468c2ecf20Sopenharmony_ci wm = ice_sched_calc_wakeup(bw); 21478c2ecf20Sopenharmony_ci profile->rl_multiply = cpu_to_le16(mv); 21488c2ecf20Sopenharmony_ci profile->wake_up_calc = cpu_to_le16(wm); 21498c2ecf20Sopenharmony_ci profile->rl_encode = cpu_to_le16(encode); 21508c2ecf20Sopenharmony_ci status = 0; 21518c2ecf20Sopenharmony_ci } else { 21528c2ecf20Sopenharmony_ci status = ICE_ERR_DOES_NOT_EXIST; 21538c2ecf20Sopenharmony_ci } 21548c2ecf20Sopenharmony_ci 21558c2ecf20Sopenharmony_ci return status; 21568c2ecf20Sopenharmony_ci} 21578c2ecf20Sopenharmony_ci 21588c2ecf20Sopenharmony_ci/** 21598c2ecf20Sopenharmony_ci * ice_sched_add_rl_profile - add RL profile 21608c2ecf20Sopenharmony_ci * @pi: port information structure 21618c2ecf20Sopenharmony_ci * @rl_type: type of rate limit BW - min, max, or shared 21628c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps - Kilo bits per sec 21638c2ecf20Sopenharmony_ci * @layer_num: specifies in which layer to create profile 21648c2ecf20Sopenharmony_ci * 21658c2ecf20Sopenharmony_ci * This function first checks the existing list for corresponding BW 21668c2ecf20Sopenharmony_ci * parameter. If it exists, it returns the associated profile otherwise 21678c2ecf20Sopenharmony_ci * it creates a new rate limit profile for requested BW, and adds it to 21688c2ecf20Sopenharmony_ci * the HW DB and local list. It returns the new profile or null on error. 21698c2ecf20Sopenharmony_ci * The caller needs to hold the scheduler lock. 21708c2ecf20Sopenharmony_ci */ 21718c2ecf20Sopenharmony_cistatic struct ice_aqc_rl_profile_info * 21728c2ecf20Sopenharmony_ciice_sched_add_rl_profile(struct ice_port_info *pi, 21738c2ecf20Sopenharmony_ci enum ice_rl_type rl_type, u32 bw, u8 layer_num) 21748c2ecf20Sopenharmony_ci{ 21758c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_prof_elem; 21768c2ecf20Sopenharmony_ci u16 profiles_added = 0, num_profiles = 1; 21778c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_elem *buf; 21788c2ecf20Sopenharmony_ci enum ice_status status; 21798c2ecf20Sopenharmony_ci struct ice_hw *hw; 21808c2ecf20Sopenharmony_ci u8 profile_type; 21818c2ecf20Sopenharmony_ci 21828c2ecf20Sopenharmony_ci if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) 21838c2ecf20Sopenharmony_ci return NULL; 21848c2ecf20Sopenharmony_ci switch (rl_type) { 21858c2ecf20Sopenharmony_ci case ICE_MIN_BW: 21868c2ecf20Sopenharmony_ci profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 21878c2ecf20Sopenharmony_ci break; 21888c2ecf20Sopenharmony_ci case ICE_MAX_BW: 21898c2ecf20Sopenharmony_ci profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 21908c2ecf20Sopenharmony_ci break; 21918c2ecf20Sopenharmony_ci case ICE_SHARED_BW: 21928c2ecf20Sopenharmony_ci profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 21938c2ecf20Sopenharmony_ci break; 21948c2ecf20Sopenharmony_ci default: 21958c2ecf20Sopenharmony_ci return NULL; 21968c2ecf20Sopenharmony_ci } 21978c2ecf20Sopenharmony_ci 21988c2ecf20Sopenharmony_ci if (!pi) 21998c2ecf20Sopenharmony_ci return NULL; 22008c2ecf20Sopenharmony_ci hw = pi->hw; 22018c2ecf20Sopenharmony_ci list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], 22028c2ecf20Sopenharmony_ci list_entry) 22038c2ecf20Sopenharmony_ci if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 22048c2ecf20Sopenharmony_ci profile_type && rl_prof_elem->bw == bw) 22058c2ecf20Sopenharmony_ci /* Return existing profile ID info */ 22068c2ecf20Sopenharmony_ci return rl_prof_elem; 22078c2ecf20Sopenharmony_ci 22088c2ecf20Sopenharmony_ci /* Create new profile ID */ 22098c2ecf20Sopenharmony_ci rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem), 22108c2ecf20Sopenharmony_ci GFP_KERNEL); 22118c2ecf20Sopenharmony_ci 22128c2ecf20Sopenharmony_ci if (!rl_prof_elem) 22138c2ecf20Sopenharmony_ci return NULL; 22148c2ecf20Sopenharmony_ci 22158c2ecf20Sopenharmony_ci status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile); 22168c2ecf20Sopenharmony_ci if (status) 22178c2ecf20Sopenharmony_ci goto exit_add_rl_prof; 22188c2ecf20Sopenharmony_ci 22198c2ecf20Sopenharmony_ci rl_prof_elem->bw = bw; 22208c2ecf20Sopenharmony_ci /* layer_num is zero relative, and fw expects level from 1 to 9 */ 22218c2ecf20Sopenharmony_ci rl_prof_elem->profile.level = layer_num + 1; 22228c2ecf20Sopenharmony_ci rl_prof_elem->profile.flags = profile_type; 22238c2ecf20Sopenharmony_ci rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size); 22248c2ecf20Sopenharmony_ci 22258c2ecf20Sopenharmony_ci /* Create new entry in HW DB */ 22268c2ecf20Sopenharmony_ci buf = &rl_prof_elem->profile; 22278c2ecf20Sopenharmony_ci status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), 22288c2ecf20Sopenharmony_ci &profiles_added, NULL); 22298c2ecf20Sopenharmony_ci if (status || profiles_added != num_profiles) 22308c2ecf20Sopenharmony_ci goto exit_add_rl_prof; 22318c2ecf20Sopenharmony_ci 22328c2ecf20Sopenharmony_ci /* Good entry - add in the list */ 22338c2ecf20Sopenharmony_ci rl_prof_elem->prof_id_ref = 0; 22348c2ecf20Sopenharmony_ci list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]); 22358c2ecf20Sopenharmony_ci return rl_prof_elem; 22368c2ecf20Sopenharmony_ci 22378c2ecf20Sopenharmony_ciexit_add_rl_prof: 22388c2ecf20Sopenharmony_ci devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); 22398c2ecf20Sopenharmony_ci return NULL; 22408c2ecf20Sopenharmony_ci} 22418c2ecf20Sopenharmony_ci 22428c2ecf20Sopenharmony_ci/** 22438c2ecf20Sopenharmony_ci * ice_sched_cfg_node_bw_lmt - configure node sched params 22448c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 22458c2ecf20Sopenharmony_ci * @node: sched node to configure 22468c2ecf20Sopenharmony_ci * @rl_type: rate limit type CIR, EIR, or shared 22478c2ecf20Sopenharmony_ci * @rl_prof_id: rate limit profile ID 22488c2ecf20Sopenharmony_ci * 22498c2ecf20Sopenharmony_ci * This function configures node element's BW limit. 22508c2ecf20Sopenharmony_ci */ 22518c2ecf20Sopenharmony_cistatic enum ice_status 22528c2ecf20Sopenharmony_ciice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, 22538c2ecf20Sopenharmony_ci enum ice_rl_type rl_type, u16 rl_prof_id) 22548c2ecf20Sopenharmony_ci{ 22558c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data buf; 22568c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem *data; 22578c2ecf20Sopenharmony_ci 22588c2ecf20Sopenharmony_ci buf = node->info; 22598c2ecf20Sopenharmony_ci data = &buf.data; 22608c2ecf20Sopenharmony_ci switch (rl_type) { 22618c2ecf20Sopenharmony_ci case ICE_MIN_BW: 22628c2ecf20Sopenharmony_ci data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; 22638c2ecf20Sopenharmony_ci data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); 22648c2ecf20Sopenharmony_ci break; 22658c2ecf20Sopenharmony_ci case ICE_MAX_BW: 22668c2ecf20Sopenharmony_ci /* EIR BW and Shared BW profiles are mutually exclusive and 22678c2ecf20Sopenharmony_ci * hence only one of them may be set for any given element 22688c2ecf20Sopenharmony_ci */ 22698c2ecf20Sopenharmony_ci if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 22708c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 22718c2ecf20Sopenharmony_ci data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 22728c2ecf20Sopenharmony_ci data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); 22738c2ecf20Sopenharmony_ci break; 22748c2ecf20Sopenharmony_ci case ICE_SHARED_BW: 22758c2ecf20Sopenharmony_ci /* Check for removing shared BW */ 22768c2ecf20Sopenharmony_ci if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) { 22778c2ecf20Sopenharmony_ci /* remove shared profile */ 22788c2ecf20Sopenharmony_ci data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; 22798c2ecf20Sopenharmony_ci data->srl_id = 0; /* clear SRL field */ 22808c2ecf20Sopenharmony_ci 22818c2ecf20Sopenharmony_ci /* enable back EIR to default profile */ 22828c2ecf20Sopenharmony_ci data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; 22838c2ecf20Sopenharmony_ci data->eir_bw.bw_profile_idx = 22848c2ecf20Sopenharmony_ci cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 22858c2ecf20Sopenharmony_ci break; 22868c2ecf20Sopenharmony_ci } 22878c2ecf20Sopenharmony_ci /* EIR BW and Shared BW profiles are mutually exclusive and 22888c2ecf20Sopenharmony_ci * hence only one of them may be set for any given element 22898c2ecf20Sopenharmony_ci */ 22908c2ecf20Sopenharmony_ci if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && 22918c2ecf20Sopenharmony_ci (le16_to_cpu(data->eir_bw.bw_profile_idx) != 22928c2ecf20Sopenharmony_ci ICE_SCHED_DFLT_RL_PROF_ID)) 22938c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 22948c2ecf20Sopenharmony_ci /* EIR BW is set to default, disable it */ 22958c2ecf20Sopenharmony_ci data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; 22968c2ecf20Sopenharmony_ci /* Okay to enable shared BW now */ 22978c2ecf20Sopenharmony_ci data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; 22988c2ecf20Sopenharmony_ci data->srl_id = cpu_to_le16(rl_prof_id); 22998c2ecf20Sopenharmony_ci break; 23008c2ecf20Sopenharmony_ci default: 23018c2ecf20Sopenharmony_ci /* Unknown rate limit type */ 23028c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 23038c2ecf20Sopenharmony_ci } 23048c2ecf20Sopenharmony_ci 23058c2ecf20Sopenharmony_ci /* Configure element */ 23068c2ecf20Sopenharmony_ci return ice_sched_update_elem(hw, node, &buf); 23078c2ecf20Sopenharmony_ci} 23088c2ecf20Sopenharmony_ci 23098c2ecf20Sopenharmony_ci/** 23108c2ecf20Sopenharmony_ci * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID 23118c2ecf20Sopenharmony_ci * @node: sched node 23128c2ecf20Sopenharmony_ci * @rl_type: rate limit type 23138c2ecf20Sopenharmony_ci * 23148c2ecf20Sopenharmony_ci * If existing profile matches, it returns the corresponding rate 23158c2ecf20Sopenharmony_ci * limit profile ID, otherwise it returns an invalid ID as error. 23168c2ecf20Sopenharmony_ci */ 23178c2ecf20Sopenharmony_cistatic u16 23188c2ecf20Sopenharmony_ciice_sched_get_node_rl_prof_id(struct ice_sched_node *node, 23198c2ecf20Sopenharmony_ci enum ice_rl_type rl_type) 23208c2ecf20Sopenharmony_ci{ 23218c2ecf20Sopenharmony_ci u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; 23228c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem *data; 23238c2ecf20Sopenharmony_ci 23248c2ecf20Sopenharmony_ci data = &node->info.data; 23258c2ecf20Sopenharmony_ci switch (rl_type) { 23268c2ecf20Sopenharmony_ci case ICE_MIN_BW: 23278c2ecf20Sopenharmony_ci if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) 23288c2ecf20Sopenharmony_ci rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx); 23298c2ecf20Sopenharmony_ci break; 23308c2ecf20Sopenharmony_ci case ICE_MAX_BW: 23318c2ecf20Sopenharmony_ci if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) 23328c2ecf20Sopenharmony_ci rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx); 23338c2ecf20Sopenharmony_ci break; 23348c2ecf20Sopenharmony_ci case ICE_SHARED_BW: 23358c2ecf20Sopenharmony_ci if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) 23368c2ecf20Sopenharmony_ci rl_prof_id = le16_to_cpu(data->srl_id); 23378c2ecf20Sopenharmony_ci break; 23388c2ecf20Sopenharmony_ci default: 23398c2ecf20Sopenharmony_ci break; 23408c2ecf20Sopenharmony_ci } 23418c2ecf20Sopenharmony_ci 23428c2ecf20Sopenharmony_ci return rl_prof_id; 23438c2ecf20Sopenharmony_ci} 23448c2ecf20Sopenharmony_ci 23458c2ecf20Sopenharmony_ci/** 23468c2ecf20Sopenharmony_ci * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer 23478c2ecf20Sopenharmony_ci * @pi: port information structure 23488c2ecf20Sopenharmony_ci * @rl_type: type of rate limit BW - min, max, or shared 23498c2ecf20Sopenharmony_ci * @layer_index: layer index 23508c2ecf20Sopenharmony_ci * 23518c2ecf20Sopenharmony_ci * This function returns requested profile creation layer. 23528c2ecf20Sopenharmony_ci */ 23538c2ecf20Sopenharmony_cistatic u8 23548c2ecf20Sopenharmony_ciice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, 23558c2ecf20Sopenharmony_ci u8 layer_index) 23568c2ecf20Sopenharmony_ci{ 23578c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 23588c2ecf20Sopenharmony_ci 23598c2ecf20Sopenharmony_ci if (layer_index >= hw->num_tx_sched_layers) 23608c2ecf20Sopenharmony_ci return ICE_SCHED_INVAL_LAYER_NUM; 23618c2ecf20Sopenharmony_ci switch (rl_type) { 23628c2ecf20Sopenharmony_ci case ICE_MIN_BW: 23638c2ecf20Sopenharmony_ci if (hw->layer_info[layer_index].max_cir_rl_profiles) 23648c2ecf20Sopenharmony_ci return layer_index; 23658c2ecf20Sopenharmony_ci break; 23668c2ecf20Sopenharmony_ci case ICE_MAX_BW: 23678c2ecf20Sopenharmony_ci if (hw->layer_info[layer_index].max_eir_rl_profiles) 23688c2ecf20Sopenharmony_ci return layer_index; 23698c2ecf20Sopenharmony_ci break; 23708c2ecf20Sopenharmony_ci case ICE_SHARED_BW: 23718c2ecf20Sopenharmony_ci /* if current layer doesn't support SRL profile creation 23728c2ecf20Sopenharmony_ci * then try a layer up or down. 23738c2ecf20Sopenharmony_ci */ 23748c2ecf20Sopenharmony_ci if (hw->layer_info[layer_index].max_srl_profiles) 23758c2ecf20Sopenharmony_ci return layer_index; 23768c2ecf20Sopenharmony_ci else if (layer_index < hw->num_tx_sched_layers - 1 && 23778c2ecf20Sopenharmony_ci hw->layer_info[layer_index + 1].max_srl_profiles) 23788c2ecf20Sopenharmony_ci return layer_index + 1; 23798c2ecf20Sopenharmony_ci else if (layer_index > 0 && 23808c2ecf20Sopenharmony_ci hw->layer_info[layer_index - 1].max_srl_profiles) 23818c2ecf20Sopenharmony_ci return layer_index - 1; 23828c2ecf20Sopenharmony_ci break; 23838c2ecf20Sopenharmony_ci default: 23848c2ecf20Sopenharmony_ci break; 23858c2ecf20Sopenharmony_ci } 23868c2ecf20Sopenharmony_ci return ICE_SCHED_INVAL_LAYER_NUM; 23878c2ecf20Sopenharmony_ci} 23888c2ecf20Sopenharmony_ci 23898c2ecf20Sopenharmony_ci/** 23908c2ecf20Sopenharmony_ci * ice_sched_get_srl_node - get shared rate limit node 23918c2ecf20Sopenharmony_ci * @node: tree node 23928c2ecf20Sopenharmony_ci * @srl_layer: shared rate limit layer 23938c2ecf20Sopenharmony_ci * 23948c2ecf20Sopenharmony_ci * This function returns SRL node to be used for shared rate limit purpose. 23958c2ecf20Sopenharmony_ci * The caller needs to hold scheduler lock. 23968c2ecf20Sopenharmony_ci */ 23978c2ecf20Sopenharmony_cistatic struct ice_sched_node * 23988c2ecf20Sopenharmony_ciice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) 23998c2ecf20Sopenharmony_ci{ 24008c2ecf20Sopenharmony_ci if (srl_layer > node->tx_sched_layer) 24018c2ecf20Sopenharmony_ci return node->children[0]; 24028c2ecf20Sopenharmony_ci else if (srl_layer < node->tx_sched_layer) 24038c2ecf20Sopenharmony_ci /* Node can't be created without a parent. It will always 24048c2ecf20Sopenharmony_ci * have a valid parent except root node. 24058c2ecf20Sopenharmony_ci */ 24068c2ecf20Sopenharmony_ci return node->parent; 24078c2ecf20Sopenharmony_ci else 24088c2ecf20Sopenharmony_ci return node; 24098c2ecf20Sopenharmony_ci} 24108c2ecf20Sopenharmony_ci 24118c2ecf20Sopenharmony_ci/** 24128c2ecf20Sopenharmony_ci * ice_sched_rm_rl_profile - remove RL profile ID 24138c2ecf20Sopenharmony_ci * @pi: port information structure 24148c2ecf20Sopenharmony_ci * @layer_num: layer number where profiles are saved 24158c2ecf20Sopenharmony_ci * @profile_type: profile type like EIR, CIR, or SRL 24168c2ecf20Sopenharmony_ci * @profile_id: profile ID to remove 24178c2ecf20Sopenharmony_ci * 24188c2ecf20Sopenharmony_ci * This function removes rate limit profile from layer 'layer_num' of type 24198c2ecf20Sopenharmony_ci * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold 24208c2ecf20Sopenharmony_ci * scheduler lock. 24218c2ecf20Sopenharmony_ci */ 24228c2ecf20Sopenharmony_cistatic enum ice_status 24238c2ecf20Sopenharmony_ciice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, 24248c2ecf20Sopenharmony_ci u16 profile_id) 24258c2ecf20Sopenharmony_ci{ 24268c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_prof_elem; 24278c2ecf20Sopenharmony_ci enum ice_status status = 0; 24288c2ecf20Sopenharmony_ci 24298c2ecf20Sopenharmony_ci if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) 24308c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 24318c2ecf20Sopenharmony_ci /* Check the existing list for RL profile */ 24328c2ecf20Sopenharmony_ci list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], 24338c2ecf20Sopenharmony_ci list_entry) 24348c2ecf20Sopenharmony_ci if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == 24358c2ecf20Sopenharmony_ci profile_type && 24368c2ecf20Sopenharmony_ci le16_to_cpu(rl_prof_elem->profile.profile_id) == 24378c2ecf20Sopenharmony_ci profile_id) { 24388c2ecf20Sopenharmony_ci if (rl_prof_elem->prof_id_ref) 24398c2ecf20Sopenharmony_ci rl_prof_elem->prof_id_ref--; 24408c2ecf20Sopenharmony_ci 24418c2ecf20Sopenharmony_ci /* Remove old profile ID from database */ 24428c2ecf20Sopenharmony_ci status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); 24438c2ecf20Sopenharmony_ci if (status && status != ICE_ERR_IN_USE) 24448c2ecf20Sopenharmony_ci ice_debug(pi->hw, ICE_DBG_SCHED, 24458c2ecf20Sopenharmony_ci "Remove rl profile failed\n"); 24468c2ecf20Sopenharmony_ci break; 24478c2ecf20Sopenharmony_ci } 24488c2ecf20Sopenharmony_ci if (status == ICE_ERR_IN_USE) 24498c2ecf20Sopenharmony_ci status = 0; 24508c2ecf20Sopenharmony_ci return status; 24518c2ecf20Sopenharmony_ci} 24528c2ecf20Sopenharmony_ci 24538c2ecf20Sopenharmony_ci/** 24548c2ecf20Sopenharmony_ci * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default 24558c2ecf20Sopenharmony_ci * @pi: port information structure 24568c2ecf20Sopenharmony_ci * @node: pointer to node structure 24578c2ecf20Sopenharmony_ci * @rl_type: rate limit type min, max, or shared 24588c2ecf20Sopenharmony_ci * @layer_num: layer number where RL profiles are saved 24598c2ecf20Sopenharmony_ci * 24608c2ecf20Sopenharmony_ci * This function configures node element's BW rate limit profile ID of 24618c2ecf20Sopenharmony_ci * type CIR, EIR, or SRL to default. This function needs to be called 24628c2ecf20Sopenharmony_ci * with the scheduler lock held. 24638c2ecf20Sopenharmony_ci */ 24648c2ecf20Sopenharmony_cistatic enum ice_status 24658c2ecf20Sopenharmony_ciice_sched_set_node_bw_dflt(struct ice_port_info *pi, 24668c2ecf20Sopenharmony_ci struct ice_sched_node *node, 24678c2ecf20Sopenharmony_ci enum ice_rl_type rl_type, u8 layer_num) 24688c2ecf20Sopenharmony_ci{ 24698c2ecf20Sopenharmony_ci enum ice_status status; 24708c2ecf20Sopenharmony_ci struct ice_hw *hw; 24718c2ecf20Sopenharmony_ci u8 profile_type; 24728c2ecf20Sopenharmony_ci u16 rl_prof_id; 24738c2ecf20Sopenharmony_ci u16 old_id; 24748c2ecf20Sopenharmony_ci 24758c2ecf20Sopenharmony_ci hw = pi->hw; 24768c2ecf20Sopenharmony_ci switch (rl_type) { 24778c2ecf20Sopenharmony_ci case ICE_MIN_BW: 24788c2ecf20Sopenharmony_ci profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; 24798c2ecf20Sopenharmony_ci rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 24808c2ecf20Sopenharmony_ci break; 24818c2ecf20Sopenharmony_ci case ICE_MAX_BW: 24828c2ecf20Sopenharmony_ci profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; 24838c2ecf20Sopenharmony_ci rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; 24848c2ecf20Sopenharmony_ci break; 24858c2ecf20Sopenharmony_ci case ICE_SHARED_BW: 24868c2ecf20Sopenharmony_ci profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; 24878c2ecf20Sopenharmony_ci /* No SRL is configured for default case */ 24888c2ecf20Sopenharmony_ci rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; 24898c2ecf20Sopenharmony_ci break; 24908c2ecf20Sopenharmony_ci default: 24918c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 24928c2ecf20Sopenharmony_ci } 24938c2ecf20Sopenharmony_ci /* Save existing RL prof ID for later clean up */ 24948c2ecf20Sopenharmony_ci old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 24958c2ecf20Sopenharmony_ci /* Configure BW scheduling parameters */ 24968c2ecf20Sopenharmony_ci status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 24978c2ecf20Sopenharmony_ci if (status) 24988c2ecf20Sopenharmony_ci return status; 24998c2ecf20Sopenharmony_ci 25008c2ecf20Sopenharmony_ci /* Remove stale RL profile ID */ 25018c2ecf20Sopenharmony_ci if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || 25028c2ecf20Sopenharmony_ci old_id == ICE_SCHED_INVAL_PROF_ID) 25038c2ecf20Sopenharmony_ci return 0; 25048c2ecf20Sopenharmony_ci 25058c2ecf20Sopenharmony_ci return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id); 25068c2ecf20Sopenharmony_ci} 25078c2ecf20Sopenharmony_ci 25088c2ecf20Sopenharmony_ci/** 25098c2ecf20Sopenharmony_ci * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness 25108c2ecf20Sopenharmony_ci * @pi: port information structure 25118c2ecf20Sopenharmony_ci * @node: pointer to node structure 25128c2ecf20Sopenharmony_ci * @layer_num: layer number where rate limit profiles are saved 25138c2ecf20Sopenharmony_ci * @rl_type: rate limit type min, max, or shared 25148c2ecf20Sopenharmony_ci * @bw: bandwidth value 25158c2ecf20Sopenharmony_ci * 25168c2ecf20Sopenharmony_ci * This function prepares node element's bandwidth to SRL or EIR exclusively. 25178c2ecf20Sopenharmony_ci * EIR BW and Shared BW profiles are mutually exclusive and hence only one of 25188c2ecf20Sopenharmony_ci * them may be set for any given element. This function needs to be called 25198c2ecf20Sopenharmony_ci * with the scheduler lock held. 25208c2ecf20Sopenharmony_ci */ 25218c2ecf20Sopenharmony_cistatic enum ice_status 25228c2ecf20Sopenharmony_ciice_sched_set_eir_srl_excl(struct ice_port_info *pi, 25238c2ecf20Sopenharmony_ci struct ice_sched_node *node, 25248c2ecf20Sopenharmony_ci u8 layer_num, enum ice_rl_type rl_type, u32 bw) 25258c2ecf20Sopenharmony_ci{ 25268c2ecf20Sopenharmony_ci if (rl_type == ICE_SHARED_BW) { 25278c2ecf20Sopenharmony_ci /* SRL node passed in this case, it may be different node */ 25288c2ecf20Sopenharmony_ci if (bw == ICE_SCHED_DFLT_BW) 25298c2ecf20Sopenharmony_ci /* SRL being removed, ice_sched_cfg_node_bw_lmt() 25308c2ecf20Sopenharmony_ci * enables EIR to default. EIR is not set in this 25318c2ecf20Sopenharmony_ci * case, so no additional action is required. 25328c2ecf20Sopenharmony_ci */ 25338c2ecf20Sopenharmony_ci return 0; 25348c2ecf20Sopenharmony_ci 25358c2ecf20Sopenharmony_ci /* SRL being configured, set EIR to default here. 25368c2ecf20Sopenharmony_ci * ice_sched_cfg_node_bw_lmt() disables EIR when it 25378c2ecf20Sopenharmony_ci * configures SRL 25388c2ecf20Sopenharmony_ci */ 25398c2ecf20Sopenharmony_ci return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW, 25408c2ecf20Sopenharmony_ci layer_num); 25418c2ecf20Sopenharmony_ci } else if (rl_type == ICE_MAX_BW && 25428c2ecf20Sopenharmony_ci node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { 25438c2ecf20Sopenharmony_ci /* Remove Shared profile. Set default shared BW call 25448c2ecf20Sopenharmony_ci * removes shared profile for a node. 25458c2ecf20Sopenharmony_ci */ 25468c2ecf20Sopenharmony_ci return ice_sched_set_node_bw_dflt(pi, node, 25478c2ecf20Sopenharmony_ci ICE_SHARED_BW, 25488c2ecf20Sopenharmony_ci layer_num); 25498c2ecf20Sopenharmony_ci } 25508c2ecf20Sopenharmony_ci return 0; 25518c2ecf20Sopenharmony_ci} 25528c2ecf20Sopenharmony_ci 25538c2ecf20Sopenharmony_ci/** 25548c2ecf20Sopenharmony_ci * ice_sched_set_node_bw - set node's bandwidth 25558c2ecf20Sopenharmony_ci * @pi: port information structure 25568c2ecf20Sopenharmony_ci * @node: tree node 25578c2ecf20Sopenharmony_ci * @rl_type: rate limit type min, max, or shared 25588c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps - Kilo bits per sec 25598c2ecf20Sopenharmony_ci * @layer_num: layer number 25608c2ecf20Sopenharmony_ci * 25618c2ecf20Sopenharmony_ci * This function adds new profile corresponding to requested BW, configures 25628c2ecf20Sopenharmony_ci * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile 25638c2ecf20Sopenharmony_ci * ID from local database. The caller needs to hold scheduler lock. 25648c2ecf20Sopenharmony_ci */ 25658c2ecf20Sopenharmony_cistatic enum ice_status 25668c2ecf20Sopenharmony_ciice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, 25678c2ecf20Sopenharmony_ci enum ice_rl_type rl_type, u32 bw, u8 layer_num) 25688c2ecf20Sopenharmony_ci{ 25698c2ecf20Sopenharmony_ci struct ice_aqc_rl_profile_info *rl_prof_info; 25708c2ecf20Sopenharmony_ci enum ice_status status = ICE_ERR_PARAM; 25718c2ecf20Sopenharmony_ci struct ice_hw *hw = pi->hw; 25728c2ecf20Sopenharmony_ci u16 old_id, rl_prof_id; 25738c2ecf20Sopenharmony_ci 25748c2ecf20Sopenharmony_ci rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); 25758c2ecf20Sopenharmony_ci if (!rl_prof_info) 25768c2ecf20Sopenharmony_ci return status; 25778c2ecf20Sopenharmony_ci 25788c2ecf20Sopenharmony_ci rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id); 25798c2ecf20Sopenharmony_ci 25808c2ecf20Sopenharmony_ci /* Save existing RL prof ID for later clean up */ 25818c2ecf20Sopenharmony_ci old_id = ice_sched_get_node_rl_prof_id(node, rl_type); 25828c2ecf20Sopenharmony_ci /* Configure BW scheduling parameters */ 25838c2ecf20Sopenharmony_ci status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); 25848c2ecf20Sopenharmony_ci if (status) 25858c2ecf20Sopenharmony_ci return status; 25868c2ecf20Sopenharmony_ci 25878c2ecf20Sopenharmony_ci /* New changes has been applied */ 25888c2ecf20Sopenharmony_ci /* Increment the profile ID reference count */ 25898c2ecf20Sopenharmony_ci rl_prof_info->prof_id_ref++; 25908c2ecf20Sopenharmony_ci 25918c2ecf20Sopenharmony_ci /* Check for old ID removal */ 25928c2ecf20Sopenharmony_ci if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || 25938c2ecf20Sopenharmony_ci old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) 25948c2ecf20Sopenharmony_ci return 0; 25958c2ecf20Sopenharmony_ci 25968c2ecf20Sopenharmony_ci return ice_sched_rm_rl_profile(pi, layer_num, 25978c2ecf20Sopenharmony_ci rl_prof_info->profile.flags & 25988c2ecf20Sopenharmony_ci ICE_AQC_RL_PROFILE_TYPE_M, old_id); 25998c2ecf20Sopenharmony_ci} 26008c2ecf20Sopenharmony_ci 26018c2ecf20Sopenharmony_ci/** 26028c2ecf20Sopenharmony_ci * ice_sched_set_node_bw_lmt - set node's BW limit 26038c2ecf20Sopenharmony_ci * @pi: port information structure 26048c2ecf20Sopenharmony_ci * @node: tree node 26058c2ecf20Sopenharmony_ci * @rl_type: rate limit type min, max, or shared 26068c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps - Kilo bits per sec 26078c2ecf20Sopenharmony_ci * 26088c2ecf20Sopenharmony_ci * It updates node's BW limit parameters like BW RL profile ID of type CIR, 26098c2ecf20Sopenharmony_ci * EIR, or SRL. The caller needs to hold scheduler lock. 26108c2ecf20Sopenharmony_ci */ 26118c2ecf20Sopenharmony_cistatic enum ice_status 26128c2ecf20Sopenharmony_ciice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, 26138c2ecf20Sopenharmony_ci enum ice_rl_type rl_type, u32 bw) 26148c2ecf20Sopenharmony_ci{ 26158c2ecf20Sopenharmony_ci struct ice_sched_node *cfg_node = node; 26168c2ecf20Sopenharmony_ci enum ice_status status; 26178c2ecf20Sopenharmony_ci 26188c2ecf20Sopenharmony_ci struct ice_hw *hw; 26198c2ecf20Sopenharmony_ci u8 layer_num; 26208c2ecf20Sopenharmony_ci 26218c2ecf20Sopenharmony_ci if (!pi) 26228c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 26238c2ecf20Sopenharmony_ci hw = pi->hw; 26248c2ecf20Sopenharmony_ci /* Remove unused RL profile IDs from HW and SW DB */ 26258c2ecf20Sopenharmony_ci ice_sched_rm_unused_rl_prof(pi); 26268c2ecf20Sopenharmony_ci layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, 26278c2ecf20Sopenharmony_ci node->tx_sched_layer); 26288c2ecf20Sopenharmony_ci if (layer_num >= hw->num_tx_sched_layers) 26298c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 26308c2ecf20Sopenharmony_ci 26318c2ecf20Sopenharmony_ci if (rl_type == ICE_SHARED_BW) { 26328c2ecf20Sopenharmony_ci /* SRL node may be different */ 26338c2ecf20Sopenharmony_ci cfg_node = ice_sched_get_srl_node(node, layer_num); 26348c2ecf20Sopenharmony_ci if (!cfg_node) 26358c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 26368c2ecf20Sopenharmony_ci } 26378c2ecf20Sopenharmony_ci /* EIR BW and Shared BW profiles are mutually exclusive and 26388c2ecf20Sopenharmony_ci * hence only one of them may be set for any given element 26398c2ecf20Sopenharmony_ci */ 26408c2ecf20Sopenharmony_ci status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type, 26418c2ecf20Sopenharmony_ci bw); 26428c2ecf20Sopenharmony_ci if (status) 26438c2ecf20Sopenharmony_ci return status; 26448c2ecf20Sopenharmony_ci if (bw == ICE_SCHED_DFLT_BW) 26458c2ecf20Sopenharmony_ci return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type, 26468c2ecf20Sopenharmony_ci layer_num); 26478c2ecf20Sopenharmony_ci return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num); 26488c2ecf20Sopenharmony_ci} 26498c2ecf20Sopenharmony_ci 26508c2ecf20Sopenharmony_ci/** 26518c2ecf20Sopenharmony_ci * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default 26528c2ecf20Sopenharmony_ci * @pi: port information structure 26538c2ecf20Sopenharmony_ci * @node: pointer to node structure 26548c2ecf20Sopenharmony_ci * @rl_type: rate limit type min, max, or shared 26558c2ecf20Sopenharmony_ci * 26568c2ecf20Sopenharmony_ci * This function configures node element's BW rate limit profile ID of 26578c2ecf20Sopenharmony_ci * type CIR, EIR, or SRL to default. This function needs to be called 26588c2ecf20Sopenharmony_ci * with the scheduler lock held. 26598c2ecf20Sopenharmony_ci */ 26608c2ecf20Sopenharmony_cistatic enum ice_status 26618c2ecf20Sopenharmony_ciice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, 26628c2ecf20Sopenharmony_ci struct ice_sched_node *node, 26638c2ecf20Sopenharmony_ci enum ice_rl_type rl_type) 26648c2ecf20Sopenharmony_ci{ 26658c2ecf20Sopenharmony_ci return ice_sched_set_node_bw_lmt(pi, node, rl_type, 26668c2ecf20Sopenharmony_ci ICE_SCHED_DFLT_BW); 26678c2ecf20Sopenharmony_ci} 26688c2ecf20Sopenharmony_ci 26698c2ecf20Sopenharmony_ci/** 26708c2ecf20Sopenharmony_ci * ice_sched_validate_srl_node - Check node for SRL applicability 26718c2ecf20Sopenharmony_ci * @node: sched node to configure 26728c2ecf20Sopenharmony_ci * @sel_layer: selected SRL layer 26738c2ecf20Sopenharmony_ci * 26748c2ecf20Sopenharmony_ci * This function checks if the SRL can be applied to a selected layer node on 26758c2ecf20Sopenharmony_ci * behalf of the requested node (first argument). This function needs to be 26768c2ecf20Sopenharmony_ci * called with scheduler lock held. 26778c2ecf20Sopenharmony_ci */ 26788c2ecf20Sopenharmony_cistatic enum ice_status 26798c2ecf20Sopenharmony_ciice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) 26808c2ecf20Sopenharmony_ci{ 26818c2ecf20Sopenharmony_ci /* SRL profiles are not available on all layers. Check if the 26828c2ecf20Sopenharmony_ci * SRL profile can be applied to a node above or below the 26838c2ecf20Sopenharmony_ci * requested node. SRL configuration is possible only if the 26848c2ecf20Sopenharmony_ci * selected layer's node has single child. 26858c2ecf20Sopenharmony_ci */ 26868c2ecf20Sopenharmony_ci if (sel_layer == node->tx_sched_layer || 26878c2ecf20Sopenharmony_ci ((sel_layer == node->tx_sched_layer + 1) && 26888c2ecf20Sopenharmony_ci node->num_children == 1) || 26898c2ecf20Sopenharmony_ci ((sel_layer == node->tx_sched_layer - 1) && 26908c2ecf20Sopenharmony_ci (node->parent && node->parent->num_children == 1))) 26918c2ecf20Sopenharmony_ci return 0; 26928c2ecf20Sopenharmony_ci 26938c2ecf20Sopenharmony_ci return ICE_ERR_CFG; 26948c2ecf20Sopenharmony_ci} 26958c2ecf20Sopenharmony_ci 26968c2ecf20Sopenharmony_ci/** 26978c2ecf20Sopenharmony_ci * ice_sched_save_q_bw - save queue node's BW information 26988c2ecf20Sopenharmony_ci * @q_ctx: queue context structure 26998c2ecf20Sopenharmony_ci * @rl_type: rate limit type min, max, or shared 27008c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps - Kilo bits per sec 27018c2ecf20Sopenharmony_ci * 27028c2ecf20Sopenharmony_ci * Save BW information of queue type node for post replay use. 27038c2ecf20Sopenharmony_ci */ 27048c2ecf20Sopenharmony_cistatic enum ice_status 27058c2ecf20Sopenharmony_ciice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) 27068c2ecf20Sopenharmony_ci{ 27078c2ecf20Sopenharmony_ci switch (rl_type) { 27088c2ecf20Sopenharmony_ci case ICE_MIN_BW: 27098c2ecf20Sopenharmony_ci ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); 27108c2ecf20Sopenharmony_ci break; 27118c2ecf20Sopenharmony_ci case ICE_MAX_BW: 27128c2ecf20Sopenharmony_ci ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); 27138c2ecf20Sopenharmony_ci break; 27148c2ecf20Sopenharmony_ci case ICE_SHARED_BW: 27158c2ecf20Sopenharmony_ci ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); 27168c2ecf20Sopenharmony_ci break; 27178c2ecf20Sopenharmony_ci default: 27188c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 27198c2ecf20Sopenharmony_ci } 27208c2ecf20Sopenharmony_ci return 0; 27218c2ecf20Sopenharmony_ci} 27228c2ecf20Sopenharmony_ci 27238c2ecf20Sopenharmony_ci/** 27248c2ecf20Sopenharmony_ci * ice_sched_set_q_bw_lmt - sets queue BW limit 27258c2ecf20Sopenharmony_ci * @pi: port information structure 27268c2ecf20Sopenharmony_ci * @vsi_handle: sw VSI handle 27278c2ecf20Sopenharmony_ci * @tc: traffic class 27288c2ecf20Sopenharmony_ci * @q_handle: software queue handle 27298c2ecf20Sopenharmony_ci * @rl_type: min, max, or shared 27308c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps 27318c2ecf20Sopenharmony_ci * 27328c2ecf20Sopenharmony_ci * This function sets BW limit of queue scheduling node. 27338c2ecf20Sopenharmony_ci */ 27348c2ecf20Sopenharmony_cistatic enum ice_status 27358c2ecf20Sopenharmony_ciice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 27368c2ecf20Sopenharmony_ci u16 q_handle, enum ice_rl_type rl_type, u32 bw) 27378c2ecf20Sopenharmony_ci{ 27388c2ecf20Sopenharmony_ci enum ice_status status = ICE_ERR_PARAM; 27398c2ecf20Sopenharmony_ci struct ice_sched_node *node; 27408c2ecf20Sopenharmony_ci struct ice_q_ctx *q_ctx; 27418c2ecf20Sopenharmony_ci 27428c2ecf20Sopenharmony_ci if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 27438c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 27448c2ecf20Sopenharmony_ci mutex_lock(&pi->sched_lock); 27458c2ecf20Sopenharmony_ci q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); 27468c2ecf20Sopenharmony_ci if (!q_ctx) 27478c2ecf20Sopenharmony_ci goto exit_q_bw_lmt; 27488c2ecf20Sopenharmony_ci node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 27498c2ecf20Sopenharmony_ci if (!node) { 27508c2ecf20Sopenharmony_ci ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); 27518c2ecf20Sopenharmony_ci goto exit_q_bw_lmt; 27528c2ecf20Sopenharmony_ci } 27538c2ecf20Sopenharmony_ci 27548c2ecf20Sopenharmony_ci /* Return error if it is not a leaf node */ 27558c2ecf20Sopenharmony_ci if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) 27568c2ecf20Sopenharmony_ci goto exit_q_bw_lmt; 27578c2ecf20Sopenharmony_ci 27588c2ecf20Sopenharmony_ci /* SRL bandwidth layer selection */ 27598c2ecf20Sopenharmony_ci if (rl_type == ICE_SHARED_BW) { 27608c2ecf20Sopenharmony_ci u8 sel_layer; /* selected layer */ 27618c2ecf20Sopenharmony_ci 27628c2ecf20Sopenharmony_ci sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, 27638c2ecf20Sopenharmony_ci node->tx_sched_layer); 27648c2ecf20Sopenharmony_ci if (sel_layer >= pi->hw->num_tx_sched_layers) { 27658c2ecf20Sopenharmony_ci status = ICE_ERR_PARAM; 27668c2ecf20Sopenharmony_ci goto exit_q_bw_lmt; 27678c2ecf20Sopenharmony_ci } 27688c2ecf20Sopenharmony_ci status = ice_sched_validate_srl_node(node, sel_layer); 27698c2ecf20Sopenharmony_ci if (status) 27708c2ecf20Sopenharmony_ci goto exit_q_bw_lmt; 27718c2ecf20Sopenharmony_ci } 27728c2ecf20Sopenharmony_ci 27738c2ecf20Sopenharmony_ci if (bw == ICE_SCHED_DFLT_BW) 27748c2ecf20Sopenharmony_ci status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); 27758c2ecf20Sopenharmony_ci else 27768c2ecf20Sopenharmony_ci status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); 27778c2ecf20Sopenharmony_ci 27788c2ecf20Sopenharmony_ci if (!status) 27798c2ecf20Sopenharmony_ci status = ice_sched_save_q_bw(q_ctx, rl_type, bw); 27808c2ecf20Sopenharmony_ci 27818c2ecf20Sopenharmony_ciexit_q_bw_lmt: 27828c2ecf20Sopenharmony_ci mutex_unlock(&pi->sched_lock); 27838c2ecf20Sopenharmony_ci return status; 27848c2ecf20Sopenharmony_ci} 27858c2ecf20Sopenharmony_ci 27868c2ecf20Sopenharmony_ci/** 27878c2ecf20Sopenharmony_ci * ice_cfg_q_bw_lmt - configure queue BW limit 27888c2ecf20Sopenharmony_ci * @pi: port information structure 27898c2ecf20Sopenharmony_ci * @vsi_handle: sw VSI handle 27908c2ecf20Sopenharmony_ci * @tc: traffic class 27918c2ecf20Sopenharmony_ci * @q_handle: software queue handle 27928c2ecf20Sopenharmony_ci * @rl_type: min, max, or shared 27938c2ecf20Sopenharmony_ci * @bw: bandwidth in Kbps 27948c2ecf20Sopenharmony_ci * 27958c2ecf20Sopenharmony_ci * This function configures BW limit of queue scheduling node. 27968c2ecf20Sopenharmony_ci */ 27978c2ecf20Sopenharmony_cienum ice_status 27988c2ecf20Sopenharmony_ciice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 27998c2ecf20Sopenharmony_ci u16 q_handle, enum ice_rl_type rl_type, u32 bw) 28008c2ecf20Sopenharmony_ci{ 28018c2ecf20Sopenharmony_ci return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 28028c2ecf20Sopenharmony_ci bw); 28038c2ecf20Sopenharmony_ci} 28048c2ecf20Sopenharmony_ci 28058c2ecf20Sopenharmony_ci/** 28068c2ecf20Sopenharmony_ci * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit 28078c2ecf20Sopenharmony_ci * @pi: port information structure 28088c2ecf20Sopenharmony_ci * @vsi_handle: sw VSI handle 28098c2ecf20Sopenharmony_ci * @tc: traffic class 28108c2ecf20Sopenharmony_ci * @q_handle: software queue handle 28118c2ecf20Sopenharmony_ci * @rl_type: min, max, or shared 28128c2ecf20Sopenharmony_ci * 28138c2ecf20Sopenharmony_ci * This function configures BW default limit of queue scheduling node. 28148c2ecf20Sopenharmony_ci */ 28158c2ecf20Sopenharmony_cienum ice_status 28168c2ecf20Sopenharmony_ciice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 28178c2ecf20Sopenharmony_ci u16 q_handle, enum ice_rl_type rl_type) 28188c2ecf20Sopenharmony_ci{ 28198c2ecf20Sopenharmony_ci return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, 28208c2ecf20Sopenharmony_ci ICE_SCHED_DFLT_BW); 28218c2ecf20Sopenharmony_ci} 28228c2ecf20Sopenharmony_ci 28238c2ecf20Sopenharmony_ci/** 28248c2ecf20Sopenharmony_ci * ice_cfg_rl_burst_size - Set burst size value 28258c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 28268c2ecf20Sopenharmony_ci * @bytes: burst size in bytes 28278c2ecf20Sopenharmony_ci * 28288c2ecf20Sopenharmony_ci * This function configures/set the burst size to requested new value. The new 28298c2ecf20Sopenharmony_ci * burst size value is used for future rate limit calls. It doesn't change the 28308c2ecf20Sopenharmony_ci * existing or previously created RL profiles. 28318c2ecf20Sopenharmony_ci */ 28328c2ecf20Sopenharmony_cienum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) 28338c2ecf20Sopenharmony_ci{ 28348c2ecf20Sopenharmony_ci u16 burst_size_to_prog; 28358c2ecf20Sopenharmony_ci 28368c2ecf20Sopenharmony_ci if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || 28378c2ecf20Sopenharmony_ci bytes > ICE_MAX_BURST_SIZE_ALLOWED) 28388c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 28398c2ecf20Sopenharmony_ci if (ice_round_to_num(bytes, 64) <= 28408c2ecf20Sopenharmony_ci ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { 28418c2ecf20Sopenharmony_ci /* 64 byte granularity case */ 28428c2ecf20Sopenharmony_ci /* Disable MSB granularity bit */ 28438c2ecf20Sopenharmony_ci burst_size_to_prog = ICE_64_BYTE_GRANULARITY; 28448c2ecf20Sopenharmony_ci /* round number to nearest 64 byte granularity */ 28458c2ecf20Sopenharmony_ci bytes = ice_round_to_num(bytes, 64); 28468c2ecf20Sopenharmony_ci /* The value is in 64 byte chunks */ 28478c2ecf20Sopenharmony_ci burst_size_to_prog |= (u16)(bytes / 64); 28488c2ecf20Sopenharmony_ci } else { 28498c2ecf20Sopenharmony_ci /* k bytes granularity case */ 28508c2ecf20Sopenharmony_ci /* Enable MSB granularity bit */ 28518c2ecf20Sopenharmony_ci burst_size_to_prog = ICE_KBYTE_GRANULARITY; 28528c2ecf20Sopenharmony_ci /* round number to nearest 1024 granularity */ 28538c2ecf20Sopenharmony_ci bytes = ice_round_to_num(bytes, 1024); 28548c2ecf20Sopenharmony_ci /* check rounding doesn't go beyond allowed */ 28558c2ecf20Sopenharmony_ci if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) 28568c2ecf20Sopenharmony_ci bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; 28578c2ecf20Sopenharmony_ci /* The value is in k bytes */ 28588c2ecf20Sopenharmony_ci burst_size_to_prog |= (u16)(bytes / 1024); 28598c2ecf20Sopenharmony_ci } 28608c2ecf20Sopenharmony_ci hw->max_burst_size = burst_size_to_prog; 28618c2ecf20Sopenharmony_ci return 0; 28628c2ecf20Sopenharmony_ci} 28638c2ecf20Sopenharmony_ci 28648c2ecf20Sopenharmony_ci/** 28658c2ecf20Sopenharmony_ci * ice_sched_replay_node_prio - re-configure node priority 28668c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 28678c2ecf20Sopenharmony_ci * @node: sched node to configure 28688c2ecf20Sopenharmony_ci * @priority: priority value 28698c2ecf20Sopenharmony_ci * 28708c2ecf20Sopenharmony_ci * This function configures node element's priority value. It 28718c2ecf20Sopenharmony_ci * needs to be called with scheduler lock held. 28728c2ecf20Sopenharmony_ci */ 28738c2ecf20Sopenharmony_cistatic enum ice_status 28748c2ecf20Sopenharmony_ciice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, 28758c2ecf20Sopenharmony_ci u8 priority) 28768c2ecf20Sopenharmony_ci{ 28778c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem_data buf; 28788c2ecf20Sopenharmony_ci struct ice_aqc_txsched_elem *data; 28798c2ecf20Sopenharmony_ci enum ice_status status; 28808c2ecf20Sopenharmony_ci 28818c2ecf20Sopenharmony_ci buf = node->info; 28828c2ecf20Sopenharmony_ci data = &buf.data; 28838c2ecf20Sopenharmony_ci data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; 28848c2ecf20Sopenharmony_ci data->generic = priority; 28858c2ecf20Sopenharmony_ci 28868c2ecf20Sopenharmony_ci /* Configure element */ 28878c2ecf20Sopenharmony_ci status = ice_sched_update_elem(hw, node, &buf); 28888c2ecf20Sopenharmony_ci return status; 28898c2ecf20Sopenharmony_ci} 28908c2ecf20Sopenharmony_ci 28918c2ecf20Sopenharmony_ci/** 28928c2ecf20Sopenharmony_ci * ice_sched_replay_node_bw - replay node(s) BW 28938c2ecf20Sopenharmony_ci * @hw: pointer to the HW struct 28948c2ecf20Sopenharmony_ci * @node: sched node to configure 28958c2ecf20Sopenharmony_ci * @bw_t_info: BW type information 28968c2ecf20Sopenharmony_ci * 28978c2ecf20Sopenharmony_ci * This function restores node's BW from bw_t_info. The caller needs 28988c2ecf20Sopenharmony_ci * to hold the scheduler lock. 28998c2ecf20Sopenharmony_ci */ 29008c2ecf20Sopenharmony_cistatic enum ice_status 29018c2ecf20Sopenharmony_ciice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, 29028c2ecf20Sopenharmony_ci struct ice_bw_type_info *bw_t_info) 29038c2ecf20Sopenharmony_ci{ 29048c2ecf20Sopenharmony_ci struct ice_port_info *pi = hw->port_info; 29058c2ecf20Sopenharmony_ci enum ice_status status = ICE_ERR_PARAM; 29068c2ecf20Sopenharmony_ci u16 bw_alloc; 29078c2ecf20Sopenharmony_ci 29088c2ecf20Sopenharmony_ci if (!node) 29098c2ecf20Sopenharmony_ci return status; 29108c2ecf20Sopenharmony_ci if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) 29118c2ecf20Sopenharmony_ci return 0; 29128c2ecf20Sopenharmony_ci if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) { 29138c2ecf20Sopenharmony_ci status = ice_sched_replay_node_prio(hw, node, 29148c2ecf20Sopenharmony_ci bw_t_info->generic); 29158c2ecf20Sopenharmony_ci if (status) 29168c2ecf20Sopenharmony_ci return status; 29178c2ecf20Sopenharmony_ci } 29188c2ecf20Sopenharmony_ci if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) { 29198c2ecf20Sopenharmony_ci status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, 29208c2ecf20Sopenharmony_ci bw_t_info->cir_bw.bw); 29218c2ecf20Sopenharmony_ci if (status) 29228c2ecf20Sopenharmony_ci return status; 29238c2ecf20Sopenharmony_ci } 29248c2ecf20Sopenharmony_ci if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) { 29258c2ecf20Sopenharmony_ci bw_alloc = bw_t_info->cir_bw.bw_alloc; 29268c2ecf20Sopenharmony_ci status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, 29278c2ecf20Sopenharmony_ci bw_alloc); 29288c2ecf20Sopenharmony_ci if (status) 29298c2ecf20Sopenharmony_ci return status; 29308c2ecf20Sopenharmony_ci } 29318c2ecf20Sopenharmony_ci if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) { 29328c2ecf20Sopenharmony_ci status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, 29338c2ecf20Sopenharmony_ci bw_t_info->eir_bw.bw); 29348c2ecf20Sopenharmony_ci if (status) 29358c2ecf20Sopenharmony_ci return status; 29368c2ecf20Sopenharmony_ci } 29378c2ecf20Sopenharmony_ci if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) { 29388c2ecf20Sopenharmony_ci bw_alloc = bw_t_info->eir_bw.bw_alloc; 29398c2ecf20Sopenharmony_ci status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, 29408c2ecf20Sopenharmony_ci bw_alloc); 29418c2ecf20Sopenharmony_ci if (status) 29428c2ecf20Sopenharmony_ci return status; 29438c2ecf20Sopenharmony_ci } 29448c2ecf20Sopenharmony_ci if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap)) 29458c2ecf20Sopenharmony_ci status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, 29468c2ecf20Sopenharmony_ci bw_t_info->shared_bw); 29478c2ecf20Sopenharmony_ci return status; 29488c2ecf20Sopenharmony_ci} 29498c2ecf20Sopenharmony_ci 29508c2ecf20Sopenharmony_ci/** 29518c2ecf20Sopenharmony_ci * ice_sched_replay_q_bw - replay queue type node BW 29528c2ecf20Sopenharmony_ci * @pi: port information structure 29538c2ecf20Sopenharmony_ci * @q_ctx: queue context structure 29548c2ecf20Sopenharmony_ci * 29558c2ecf20Sopenharmony_ci * This function replays queue type node bandwidth. This function needs to be 29568c2ecf20Sopenharmony_ci * called with scheduler lock held. 29578c2ecf20Sopenharmony_ci */ 29588c2ecf20Sopenharmony_cienum ice_status 29598c2ecf20Sopenharmony_ciice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) 29608c2ecf20Sopenharmony_ci{ 29618c2ecf20Sopenharmony_ci struct ice_sched_node *q_node; 29628c2ecf20Sopenharmony_ci 29638c2ecf20Sopenharmony_ci /* Following also checks the presence of node in tree */ 29648c2ecf20Sopenharmony_ci q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); 29658c2ecf20Sopenharmony_ci if (!q_node) 29668c2ecf20Sopenharmony_ci return ICE_ERR_PARAM; 29678c2ecf20Sopenharmony_ci return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); 29688c2ecf20Sopenharmony_ci} 2969