1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7#include "ice_flow.h"
8
9#define ICE_PF_RESET_WAIT_COUNT	300
10
11/**
12 * ice_set_mac_type - Sets MAC type
13 * @hw: pointer to the HW structure
14 *
15 * This function sets the MAC type of the adapter based on the
16 * vendor ID and device ID stored in the HW structure.
17 */
18static enum ice_status ice_set_mac_type(struct ice_hw *hw)
19{
20	if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21		return ICE_ERR_DEVICE_NOT_SUPPORTED;
22
23	switch (hw->device_id) {
24	case ICE_DEV_ID_E810C_BACKPLANE:
25	case ICE_DEV_ID_E810C_QSFP:
26	case ICE_DEV_ID_E810C_SFP:
27	case ICE_DEV_ID_E810_XXV_BACKPLANE:
28	case ICE_DEV_ID_E810_XXV_QSFP:
29	case ICE_DEV_ID_E810_XXV_SFP:
30		hw->mac_type = ICE_MAC_E810;
31		break;
32	case ICE_DEV_ID_E823C_10G_BASE_T:
33	case ICE_DEV_ID_E823C_BACKPLANE:
34	case ICE_DEV_ID_E823C_QSFP:
35	case ICE_DEV_ID_E823C_SFP:
36	case ICE_DEV_ID_E823C_SGMII:
37	case ICE_DEV_ID_E822C_10G_BASE_T:
38	case ICE_DEV_ID_E822C_BACKPLANE:
39	case ICE_DEV_ID_E822C_QSFP:
40	case ICE_DEV_ID_E822C_SFP:
41	case ICE_DEV_ID_E822C_SGMII:
42	case ICE_DEV_ID_E822L_10G_BASE_T:
43	case ICE_DEV_ID_E822L_BACKPLANE:
44	case ICE_DEV_ID_E822L_SFP:
45	case ICE_DEV_ID_E822L_SGMII:
46	case ICE_DEV_ID_E823L_10G_BASE_T:
47	case ICE_DEV_ID_E823L_1GBE:
48	case ICE_DEV_ID_E823L_BACKPLANE:
49	case ICE_DEV_ID_E823L_QSFP:
50	case ICE_DEV_ID_E823L_SFP:
51		hw->mac_type = ICE_MAC_GENERIC;
52		break;
53	default:
54		hw->mac_type = ICE_MAC_UNKNOWN;
55		break;
56	}
57
58	ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
59	return 0;
60}
61
62/**
63 * ice_clear_pf_cfg - Clear PF configuration
64 * @hw: pointer to the hardware structure
65 *
66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67 * configuration, flow director filters, etc.).
68 */
69enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
70{
71	struct ice_aq_desc desc;
72
73	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
74
75	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
76}
77
78/**
79 * ice_aq_manage_mac_read - manage MAC address read command
80 * @hw: pointer to the HW struct
81 * @buf: a virtual buffer to hold the manage MAC read response
82 * @buf_size: Size of the virtual buffer
83 * @cd: pointer to command details structure or NULL
84 *
85 * This function is used to return per PF station MAC address (0x0107).
86 * NOTE: Upon successful completion of this command, MAC address information
87 * is returned in user specified buffer. Please interpret user specified
88 * buffer as "manage_mac_read" response.
89 * Response such as various MAC addresses are stored in HW struct (port.mac)
90 * ice_discover_dev_caps is expected to be called before this function is
91 * called.
92 */
93static enum ice_status
94ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
95		       struct ice_sq_cd *cd)
96{
97	struct ice_aqc_manage_mac_read_resp *resp;
98	struct ice_aqc_manage_mac_read *cmd;
99	struct ice_aq_desc desc;
100	enum ice_status status;
101	u16 flags;
102	u8 i;
103
104	cmd = &desc.params.mac_read;
105
106	if (buf_size < sizeof(*resp))
107		return ICE_ERR_BUF_TOO_SHORT;
108
109	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
110
111	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
112	if (status)
113		return status;
114
115	resp = (struct ice_aqc_manage_mac_read_resp *)buf;
116	flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
117
118	if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
119		ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
120		return ICE_ERR_CFG;
121	}
122
123	/* A single port can report up to two (LAN and WoL) addresses */
124	for (i = 0; i < cmd->num_addr; i++)
125		if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
126			ether_addr_copy(hw->port_info->mac.lan_addr,
127					resp[i].mac_addr);
128			ether_addr_copy(hw->port_info->mac.perm_addr,
129					resp[i].mac_addr);
130			break;
131		}
132
133	return 0;
134}
135
136/**
137 * ice_aq_get_phy_caps - returns PHY capabilities
138 * @pi: port information structure
139 * @qual_mods: report qualified modules
140 * @report_mode: report mode capabilities
141 * @pcaps: structure for PHY capabilities to be filled
142 * @cd: pointer to command details structure or NULL
143 *
144 * Returns the various PHY capabilities supported on the Port (0x0600)
145 */
146enum ice_status
147ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
148		    struct ice_aqc_get_phy_caps_data *pcaps,
149		    struct ice_sq_cd *cd)
150{
151	struct ice_aqc_get_phy_caps *cmd;
152	u16 pcaps_size = sizeof(*pcaps);
153	struct ice_aq_desc desc;
154	enum ice_status status;
155	struct ice_hw *hw;
156
157	cmd = &desc.params.get_phy;
158
159	if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
160		return ICE_ERR_PARAM;
161	hw = pi->hw;
162
163	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
164
165	if (qual_mods)
166		cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
167
168	cmd->param0 |= cpu_to_le16(report_mode);
169	status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
170
171	ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
172		  report_mode);
173	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
174		  (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
175	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
176		  (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
177	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", pcaps->caps);
178	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
179		  pcaps->low_power_ctrl_an);
180	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", pcaps->eee_cap);
181	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n",
182		  pcaps->eeer_value);
183	ice_debug(hw, ICE_DBG_LINK, "	link_fec_options = 0x%x\n",
184		  pcaps->link_fec_options);
185	ice_debug(hw, ICE_DBG_LINK, "	module_compliance_enforcement = 0x%x\n",
186		  pcaps->module_compliance_enforcement);
187	ice_debug(hw, ICE_DBG_LINK, "   extended_compliance_code = 0x%x\n",
188		  pcaps->extended_compliance_code);
189	ice_debug(hw, ICE_DBG_LINK, "   module_type[0] = 0x%x\n",
190		  pcaps->module_type[0]);
191	ice_debug(hw, ICE_DBG_LINK, "   module_type[1] = 0x%x\n",
192		  pcaps->module_type[1]);
193	ice_debug(hw, ICE_DBG_LINK, "   module_type[2] = 0x%x\n",
194		  pcaps->module_type[2]);
195
196	if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
197		pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
198		pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
199		memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
200		       sizeof(pi->phy.link_info.module_type));
201	}
202
203	return status;
204}
205
206/**
207 * ice_aq_get_link_topo_handle - get link topology node return status
208 * @pi: port information structure
209 * @node_type: requested node type
210 * @cd: pointer to command details structure or NULL
211 *
212 * Get link topology node return status for specified node type (0x06E0)
213 *
214 * Node type cage can be used to determine if cage is present. If AQC
215 * returns error (ENOENT), then no cage present. If no cage present, then
216 * connection type is backplane or BASE-T.
217 */
218static enum ice_status
219ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
220			    struct ice_sq_cd *cd)
221{
222	struct ice_aqc_get_link_topo *cmd;
223	struct ice_aq_desc desc;
224
225	cmd = &desc.params.get_link_topo;
226
227	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
228
229	cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
230				   ICE_AQC_LINK_TOPO_NODE_CTX_S);
231
232	/* set node type */
233	cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
234
235	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
236}
237
238/**
239 * ice_is_media_cage_present
240 * @pi: port information structure
241 *
242 * Returns true if media cage is present, else false. If no cage, then
243 * media type is backplane or BASE-T.
244 */
245static bool ice_is_media_cage_present(struct ice_port_info *pi)
246{
247	/* Node type cage can be used to determine if cage is present. If AQC
248	 * returns error (ENOENT), then no cage present. If no cage present then
249	 * connection type is backplane or BASE-T.
250	 */
251	return !ice_aq_get_link_topo_handle(pi,
252					    ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
253					    NULL);
254}
255
256/**
257 * ice_get_media_type - Gets media type
258 * @pi: port information structure
259 */
260static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
261{
262	struct ice_link_status *hw_link_info;
263
264	if (!pi)
265		return ICE_MEDIA_UNKNOWN;
266
267	hw_link_info = &pi->phy.link_info;
268	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
269		/* If more than one media type is selected, report unknown */
270		return ICE_MEDIA_UNKNOWN;
271
272	if (hw_link_info->phy_type_low) {
273		/* 1G SGMII is a special case where some DA cable PHYs
274		 * may show this as an option when it really shouldn't
275		 * be since SGMII is meant to be between a MAC and a PHY
276		 * in a backplane. Try to detect this case and handle it
277		 */
278		if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
279		    (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
280		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
281		    hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
282		    ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
283			return ICE_MEDIA_DA;
284
285		switch (hw_link_info->phy_type_low) {
286		case ICE_PHY_TYPE_LOW_1000BASE_SX:
287		case ICE_PHY_TYPE_LOW_1000BASE_LX:
288		case ICE_PHY_TYPE_LOW_10GBASE_SR:
289		case ICE_PHY_TYPE_LOW_10GBASE_LR:
290		case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
291		case ICE_PHY_TYPE_LOW_25GBASE_SR:
292		case ICE_PHY_TYPE_LOW_25GBASE_LR:
293		case ICE_PHY_TYPE_LOW_40GBASE_SR4:
294		case ICE_PHY_TYPE_LOW_40GBASE_LR4:
295		case ICE_PHY_TYPE_LOW_50GBASE_SR2:
296		case ICE_PHY_TYPE_LOW_50GBASE_LR2:
297		case ICE_PHY_TYPE_LOW_50GBASE_SR:
298		case ICE_PHY_TYPE_LOW_50GBASE_FR:
299		case ICE_PHY_TYPE_LOW_50GBASE_LR:
300		case ICE_PHY_TYPE_LOW_100GBASE_SR4:
301		case ICE_PHY_TYPE_LOW_100GBASE_LR4:
302		case ICE_PHY_TYPE_LOW_100GBASE_SR2:
303		case ICE_PHY_TYPE_LOW_100GBASE_DR:
304		case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
305		case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
306		case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
307		case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
308		case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
309		case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
310		case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
311		case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
312			return ICE_MEDIA_FIBER;
313		case ICE_PHY_TYPE_LOW_100BASE_TX:
314		case ICE_PHY_TYPE_LOW_1000BASE_T:
315		case ICE_PHY_TYPE_LOW_2500BASE_T:
316		case ICE_PHY_TYPE_LOW_5GBASE_T:
317		case ICE_PHY_TYPE_LOW_10GBASE_T:
318		case ICE_PHY_TYPE_LOW_25GBASE_T:
319			return ICE_MEDIA_BASET;
320		case ICE_PHY_TYPE_LOW_10G_SFI_DA:
321		case ICE_PHY_TYPE_LOW_25GBASE_CR:
322		case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
323		case ICE_PHY_TYPE_LOW_25GBASE_CR1:
324		case ICE_PHY_TYPE_LOW_40GBASE_CR4:
325		case ICE_PHY_TYPE_LOW_50GBASE_CR2:
326		case ICE_PHY_TYPE_LOW_50GBASE_CP:
327		case ICE_PHY_TYPE_LOW_100GBASE_CR4:
328		case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
329		case ICE_PHY_TYPE_LOW_100GBASE_CP2:
330			return ICE_MEDIA_DA;
331		case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
332		case ICE_PHY_TYPE_LOW_40G_XLAUI:
333		case ICE_PHY_TYPE_LOW_50G_LAUI2:
334		case ICE_PHY_TYPE_LOW_50G_AUI2:
335		case ICE_PHY_TYPE_LOW_50G_AUI1:
336		case ICE_PHY_TYPE_LOW_100G_AUI4:
337		case ICE_PHY_TYPE_LOW_100G_CAUI4:
338			if (ice_is_media_cage_present(pi))
339				return ICE_MEDIA_DA;
340			fallthrough;
341		case ICE_PHY_TYPE_LOW_1000BASE_KX:
342		case ICE_PHY_TYPE_LOW_2500BASE_KX:
343		case ICE_PHY_TYPE_LOW_2500BASE_X:
344		case ICE_PHY_TYPE_LOW_5GBASE_KR:
345		case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
346		case ICE_PHY_TYPE_LOW_25GBASE_KR:
347		case ICE_PHY_TYPE_LOW_25GBASE_KR1:
348		case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
349		case ICE_PHY_TYPE_LOW_40GBASE_KR4:
350		case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
351		case ICE_PHY_TYPE_LOW_50GBASE_KR2:
352		case ICE_PHY_TYPE_LOW_100GBASE_KR4:
353		case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
354			return ICE_MEDIA_BACKPLANE;
355		}
356	} else {
357		switch (hw_link_info->phy_type_high) {
358		case ICE_PHY_TYPE_HIGH_100G_AUI2:
359		case ICE_PHY_TYPE_HIGH_100G_CAUI2:
360			if (ice_is_media_cage_present(pi))
361				return ICE_MEDIA_DA;
362			fallthrough;
363		case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
364			return ICE_MEDIA_BACKPLANE;
365		case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
366		case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
367			return ICE_MEDIA_FIBER;
368		}
369	}
370	return ICE_MEDIA_UNKNOWN;
371}
372
373/**
374 * ice_aq_get_link_info
375 * @pi: port information structure
376 * @ena_lse: enable/disable LinkStatusEvent reporting
377 * @link: pointer to link status structure - optional
378 * @cd: pointer to command details structure or NULL
379 *
380 * Get Link Status (0x607). Returns the link status of the adapter.
381 */
382enum ice_status
383ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
384		     struct ice_link_status *link, struct ice_sq_cd *cd)
385{
386	struct ice_aqc_get_link_status_data link_data = { 0 };
387	struct ice_aqc_get_link_status *resp;
388	struct ice_link_status *li_old, *li;
389	enum ice_media_type *hw_media_type;
390	struct ice_fc_info *hw_fc_info;
391	bool tx_pause, rx_pause;
392	struct ice_aq_desc desc;
393	enum ice_status status;
394	struct ice_hw *hw;
395	u16 cmd_flags;
396
397	if (!pi)
398		return ICE_ERR_PARAM;
399	hw = pi->hw;
400	li_old = &pi->phy.link_info_old;
401	hw_media_type = &pi->phy.media_type;
402	li = &pi->phy.link_info;
403	hw_fc_info = &pi->fc;
404
405	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
406	cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
407	resp = &desc.params.get_link_status;
408	resp->cmd_flags = cpu_to_le16(cmd_flags);
409	resp->lport_num = pi->lport;
410
411	status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
412
413	if (status)
414		return status;
415
416	/* save off old link status information */
417	*li_old = *li;
418
419	/* update current link status information */
420	li->link_speed = le16_to_cpu(link_data.link_speed);
421	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
422	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
423	*hw_media_type = ice_get_media_type(pi);
424	li->link_info = link_data.link_info;
425	li->an_info = link_data.an_info;
426	li->ext_info = link_data.ext_info;
427	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
428	li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
429	li->topo_media_conflict = link_data.topo_media_conflict;
430	li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
431				      ICE_AQ_CFG_PACING_TYPE_M);
432
433	/* update fc info */
434	tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
435	rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
436	if (tx_pause && rx_pause)
437		hw_fc_info->current_mode = ICE_FC_FULL;
438	else if (tx_pause)
439		hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
440	else if (rx_pause)
441		hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
442	else
443		hw_fc_info->current_mode = ICE_FC_NONE;
444
445	li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
446
447	ice_debug(hw, ICE_DBG_LINK, "get link info\n");
448	ice_debug(hw, ICE_DBG_LINK, "	link_speed = 0x%x\n", li->link_speed);
449	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
450		  (unsigned long long)li->phy_type_low);
451	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
452		  (unsigned long long)li->phy_type_high);
453	ice_debug(hw, ICE_DBG_LINK, "	media_type = 0x%x\n", *hw_media_type);
454	ice_debug(hw, ICE_DBG_LINK, "	link_info = 0x%x\n", li->link_info);
455	ice_debug(hw, ICE_DBG_LINK, "	an_info = 0x%x\n", li->an_info);
456	ice_debug(hw, ICE_DBG_LINK, "	ext_info = 0x%x\n", li->ext_info);
457	ice_debug(hw, ICE_DBG_LINK, "	fec_info = 0x%x\n", li->fec_info);
458	ice_debug(hw, ICE_DBG_LINK, "	lse_ena = 0x%x\n", li->lse_ena);
459	ice_debug(hw, ICE_DBG_LINK, "	max_frame = 0x%x\n",
460		  li->max_frame_size);
461	ice_debug(hw, ICE_DBG_LINK, "	pacing = 0x%x\n", li->pacing);
462
463	/* save link status information */
464	if (link)
465		*link = *li;
466
467	/* flag cleared so calling functions don't call AQ again */
468	pi->phy.get_link_info = false;
469
470	return 0;
471}
472
473/**
474 * ice_fill_tx_timer_and_fc_thresh
475 * @hw: pointer to the HW struct
476 * @cmd: pointer to MAC cfg structure
477 *
478 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
479 * descriptor
480 */
481static void
482ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
483				struct ice_aqc_set_mac_cfg *cmd)
484{
485	u16 fc_thres_val, tx_timer_val;
486	u32 val;
487
488	/* We read back the transmit timer and FC threshold value of
489	 * LFC. Thus, we will use index =
490	 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
491	 *
492	 * Also, because we are operating on transmit timer and FC
493	 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
494	 */
495#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
496
497	/* Retrieve the transmit timer */
498	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
499	tx_timer_val = val &
500		PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
501	cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
502
503	/* Retrieve the FC threshold */
504	val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
505	fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
506
507	cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
508}
509
510/**
511 * ice_aq_set_mac_cfg
512 * @hw: pointer to the HW struct
513 * @max_frame_size: Maximum Frame Size to be supported
514 * @cd: pointer to command details structure or NULL
515 *
516 * Set MAC configuration (0x0603)
517 */
518enum ice_status
519ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
520{
521	struct ice_aqc_set_mac_cfg *cmd;
522	struct ice_aq_desc desc;
523
524	cmd = &desc.params.set_mac_cfg;
525
526	if (max_frame_size == 0)
527		return ICE_ERR_PARAM;
528
529	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
530
531	cmd->max_frame_size = cpu_to_le16(max_frame_size);
532
533	ice_fill_tx_timer_and_fc_thresh(hw, cmd);
534
535	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
536}
537
538/**
539 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
540 * @hw: pointer to the HW struct
541 */
542static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
543{
544	struct ice_switch_info *sw;
545	enum ice_status status;
546
547	hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
548				       sizeof(*hw->switch_info), GFP_KERNEL);
549	sw = hw->switch_info;
550
551	if (!sw)
552		return ICE_ERR_NO_MEMORY;
553
554	INIT_LIST_HEAD(&sw->vsi_list_map_head);
555
556	status = ice_init_def_sw_recp(hw);
557	if (status) {
558		devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
559		return status;
560	}
561	return 0;
562}
563
564/**
565 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
566 * @hw: pointer to the HW struct
567 */
568static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
569{
570	struct ice_switch_info *sw = hw->switch_info;
571	struct ice_vsi_list_map_info *v_pos_map;
572	struct ice_vsi_list_map_info *v_tmp_map;
573	struct ice_sw_recipe *recps;
574	u8 i;
575
576	list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
577				 list_entry) {
578		list_del(&v_pos_map->list_entry);
579		devm_kfree(ice_hw_to_dev(hw), v_pos_map);
580	}
581	recps = hw->switch_info->recp_list;
582	for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
583		struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
584
585		recps[i].root_rid = i;
586		mutex_destroy(&recps[i].filt_rule_lock);
587		list_for_each_entry_safe(lst_itr, tmp_entry,
588					 &recps[i].filt_rules, list_entry) {
589			list_del(&lst_itr->list_entry);
590			devm_kfree(ice_hw_to_dev(hw), lst_itr);
591		}
592	}
593	ice_rm_all_sw_replay_rule_info(hw);
594	devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
595	devm_kfree(ice_hw_to_dev(hw), sw);
596}
597
598/**
599 * ice_get_fw_log_cfg - get FW logging configuration
600 * @hw: pointer to the HW struct
601 */
602static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
603{
604	struct ice_aq_desc desc;
605	enum ice_status status;
606	__le16 *config;
607	u16 size;
608
609	size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
610	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
611	if (!config)
612		return ICE_ERR_NO_MEMORY;
613
614	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
615
616	status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
617	if (!status) {
618		u16 i;
619
620		/* Save FW logging information into the HW structure */
621		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
622			u16 v, m, flgs;
623
624			v = le16_to_cpu(config[i]);
625			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
626			flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
627
628			if (m < ICE_AQC_FW_LOG_ID_MAX)
629				hw->fw_log.evnts[m].cur = flgs;
630		}
631	}
632
633	devm_kfree(ice_hw_to_dev(hw), config);
634
635	return status;
636}
637
638/**
639 * ice_cfg_fw_log - configure FW logging
640 * @hw: pointer to the HW struct
641 * @enable: enable certain FW logging events if true, disable all if false
642 *
643 * This function enables/disables the FW logging via Rx CQ events and a UART
644 * port based on predetermined configurations. FW logging via the Rx CQ can be
645 * enabled/disabled for individual PF's. However, FW logging via the UART can
646 * only be enabled/disabled for all PFs on the same device.
647 *
648 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
649 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
650 * before initializing the device.
651 *
652 * When re/configuring FW logging, callers need to update the "cfg" elements of
653 * the hw->fw_log.evnts array with the desired logging event configurations for
654 * modules of interest. When disabling FW logging completely, the callers can
655 * just pass false in the "enable" parameter. On completion, the function will
656 * update the "cur" element of the hw->fw_log.evnts array with the resulting
657 * logging event configurations of the modules that are being re/configured. FW
658 * logging modules that are not part of a reconfiguration operation retain their
659 * previous states.
660 *
661 * Before resetting the device, it is recommended that the driver disables FW
662 * logging before shutting down the control queue. When disabling FW logging
663 * ("enable" = false), the latest configurations of FW logging events stored in
664 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
665 * a device reset.
666 *
667 * When enabling FW logging to emit log messages via the Rx CQ during the
668 * device's initialization phase, a mechanism alternative to interrupt handlers
669 * needs to be used to extract FW log messages from the Rx CQ periodically and
670 * to prevent the Rx CQ from being full and stalling other types of control
671 * messages from FW to SW. Interrupts are typically disabled during the device's
672 * initialization phase.
673 */
674static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
675{
676	struct ice_aqc_fw_logging *cmd;
677	enum ice_status status = 0;
678	u16 i, chgs = 0, len = 0;
679	struct ice_aq_desc desc;
680	__le16 *data = NULL;
681	u8 actv_evnts = 0;
682	void *buf = NULL;
683
684	if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
685		return 0;
686
687	/* Disable FW logging only when the control queue is still responsive */
688	if (!enable &&
689	    (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
690		return 0;
691
692	/* Get current FW log settings */
693	status = ice_get_fw_log_cfg(hw);
694	if (status)
695		return status;
696
697	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
698	cmd = &desc.params.fw_logging;
699
700	/* Indicate which controls are valid */
701	if (hw->fw_log.cq_en)
702		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
703
704	if (hw->fw_log.uart_en)
705		cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
706
707	if (enable) {
708		/* Fill in an array of entries with FW logging modules and
709		 * logging events being reconfigured.
710		 */
711		for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
712			u16 val;
713
714			/* Keep track of enabled event types */
715			actv_evnts |= hw->fw_log.evnts[i].cfg;
716
717			if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
718				continue;
719
720			if (!data) {
721				data = devm_kcalloc(ice_hw_to_dev(hw),
722						    ICE_AQC_FW_LOG_ID_MAX,
723						    sizeof(*data),
724						    GFP_KERNEL);
725				if (!data)
726					return ICE_ERR_NO_MEMORY;
727			}
728
729			val = i << ICE_AQC_FW_LOG_ID_S;
730			val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
731			data[chgs++] = cpu_to_le16(val);
732		}
733
734		/* Only enable FW logging if at least one module is specified.
735		 * If FW logging is currently enabled but all modules are not
736		 * enabled to emit log messages, disable FW logging altogether.
737		 */
738		if (actv_evnts) {
739			/* Leave if there is effectively no change */
740			if (!chgs)
741				goto out;
742
743			if (hw->fw_log.cq_en)
744				cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
745
746			if (hw->fw_log.uart_en)
747				cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
748
749			buf = data;
750			len = sizeof(*data) * chgs;
751			desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
752		}
753	}
754
755	status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
756	if (!status) {
757		/* Update the current configuration to reflect events enabled.
758		 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
759		 * logging mode is enabled for the device. They do not reflect
760		 * actual modules being enabled to emit log messages. So, their
761		 * values remain unchanged even when all modules are disabled.
762		 */
763		u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
764
765		hw->fw_log.actv_evnts = actv_evnts;
766		for (i = 0; i < cnt; i++) {
767			u16 v, m;
768
769			if (!enable) {
770				/* When disabling all FW logging events as part
771				 * of device's de-initialization, the original
772				 * configurations are retained, and can be used
773				 * to reconfigure FW logging later if the device
774				 * is re-initialized.
775				 */
776				hw->fw_log.evnts[i].cur = 0;
777				continue;
778			}
779
780			v = le16_to_cpu(data[i]);
781			m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
782			hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
783		}
784	}
785
786out:
787	if (data)
788		devm_kfree(ice_hw_to_dev(hw), data);
789
790	return status;
791}
792
793/**
794 * ice_output_fw_log
795 * @hw: pointer to the HW struct
796 * @desc: pointer to the AQ message descriptor
797 * @buf: pointer to the buffer accompanying the AQ message
798 *
799 * Formats a FW Log message and outputs it via the standard driver logs.
800 */
801void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
802{
803	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
804	ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
805			le16_to_cpu(desc->datalen));
806	ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
807}
808
809/**
810 * ice_get_itr_intrl_gran
811 * @hw: pointer to the HW struct
812 *
813 * Determines the ITR/INTRL granularities based on the maximum aggregate
814 * bandwidth according to the device's configuration during power-on.
815 */
816static void ice_get_itr_intrl_gran(struct ice_hw *hw)
817{
818	u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
819			 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
820			GL_PWR_MODE_CTL_CAR_MAX_BW_S;
821
822	switch (max_agg_bw) {
823	case ICE_MAX_AGG_BW_200G:
824	case ICE_MAX_AGG_BW_100G:
825	case ICE_MAX_AGG_BW_50G:
826		hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
827		hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
828		break;
829	case ICE_MAX_AGG_BW_25G:
830		hw->itr_gran = ICE_ITR_GRAN_MAX_25;
831		hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
832		break;
833	}
834}
835
836/**
837 * ice_init_hw - main hardware initialization routine
838 * @hw: pointer to the hardware structure
839 */
840enum ice_status ice_init_hw(struct ice_hw *hw)
841{
842	struct ice_aqc_get_phy_caps_data *pcaps;
843	enum ice_status status;
844	u16 mac_buf_len;
845	void *mac_buf;
846
847	/* Set MAC type based on DeviceID */
848	status = ice_set_mac_type(hw);
849	if (status)
850		return status;
851
852	hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
853			 PF_FUNC_RID_FUNC_NUM_M) >>
854		PF_FUNC_RID_FUNC_NUM_S;
855
856	status = ice_reset(hw, ICE_RESET_PFR);
857	if (status)
858		return status;
859
860	ice_get_itr_intrl_gran(hw);
861
862	status = ice_create_all_ctrlq(hw);
863	if (status)
864		goto err_unroll_cqinit;
865
866	/* Enable FW logging. Not fatal if this fails. */
867	status = ice_cfg_fw_log(hw, true);
868	if (status)
869		ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
870
871	status = ice_clear_pf_cfg(hw);
872	if (status)
873		goto err_unroll_cqinit;
874
875	/* Set bit to enable Flow Director filters */
876	wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
877	INIT_LIST_HEAD(&hw->fdir_list_head);
878
879	ice_clear_pxe_mode(hw);
880
881	status = ice_init_nvm(hw);
882	if (status)
883		goto err_unroll_cqinit;
884
885	status = ice_get_caps(hw);
886	if (status)
887		goto err_unroll_cqinit;
888
889	hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
890				     sizeof(*hw->port_info), GFP_KERNEL);
891	if (!hw->port_info) {
892		status = ICE_ERR_NO_MEMORY;
893		goto err_unroll_cqinit;
894	}
895
896	/* set the back pointer to HW */
897	hw->port_info->hw = hw;
898
899	/* Initialize port_info struct with switch configuration data */
900	status = ice_get_initial_sw_cfg(hw);
901	if (status)
902		goto err_unroll_alloc;
903
904	hw->evb_veb = true;
905
906	/* Query the allocated resources for Tx scheduler */
907	status = ice_sched_query_res_alloc(hw);
908	if (status) {
909		ice_debug(hw, ICE_DBG_SCHED,
910			  "Failed to get scheduler allocated resources\n");
911		goto err_unroll_alloc;
912	}
913
914	/* Initialize port_info struct with scheduler data */
915	status = ice_sched_init_port(hw->port_info);
916	if (status)
917		goto err_unroll_sched;
918
919	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
920	if (!pcaps) {
921		status = ICE_ERR_NO_MEMORY;
922		goto err_unroll_sched;
923	}
924
925	/* Initialize port_info struct with PHY capabilities */
926	status = ice_aq_get_phy_caps(hw->port_info, false,
927				     ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
928				     NULL);
929	devm_kfree(ice_hw_to_dev(hw), pcaps);
930	if (status)
931		goto err_unroll_sched;
932
933	/* Initialize port_info struct with link information */
934	status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
935	if (status)
936		goto err_unroll_sched;
937
938	/* need a valid SW entry point to build a Tx tree */
939	if (!hw->sw_entry_point_layer) {
940		ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
941		status = ICE_ERR_CFG;
942		goto err_unroll_sched;
943	}
944	INIT_LIST_HEAD(&hw->agg_list);
945	/* Initialize max burst size */
946	if (!hw->max_burst_size)
947		ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
948
949	status = ice_init_fltr_mgmt_struct(hw);
950	if (status)
951		goto err_unroll_sched;
952
953	/* Get MAC information */
954	/* A single port can report up to two (LAN and WoL) addresses */
955	mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
956			       sizeof(struct ice_aqc_manage_mac_read_resp),
957			       GFP_KERNEL);
958	mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
959
960	if (!mac_buf) {
961		status = ICE_ERR_NO_MEMORY;
962		goto err_unroll_fltr_mgmt_struct;
963	}
964
965	status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
966	devm_kfree(ice_hw_to_dev(hw), mac_buf);
967
968	if (status)
969		goto err_unroll_fltr_mgmt_struct;
970	/* enable jumbo frame support at MAC level */
971	status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
972	if (status)
973		goto err_unroll_fltr_mgmt_struct;
974	/* Obtain counter base index which would be used by flow director */
975	status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
976	if (status)
977		goto err_unroll_fltr_mgmt_struct;
978	status = ice_init_hw_tbls(hw);
979	if (status)
980		goto err_unroll_fltr_mgmt_struct;
981	mutex_init(&hw->tnl_lock);
982	return 0;
983
984err_unroll_fltr_mgmt_struct:
985	ice_cleanup_fltr_mgmt_struct(hw);
986err_unroll_sched:
987	ice_sched_cleanup_all(hw);
988err_unroll_alloc:
989	devm_kfree(ice_hw_to_dev(hw), hw->port_info);
990err_unroll_cqinit:
991	ice_destroy_all_ctrlq(hw);
992	return status;
993}
994
995/**
996 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
997 * @hw: pointer to the hardware structure
998 *
999 * This should be called only during nominal operation, not as a result of
1000 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1001 * applicable initializations if it fails for any reason.
1002 */
1003void ice_deinit_hw(struct ice_hw *hw)
1004{
1005	ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1006	ice_cleanup_fltr_mgmt_struct(hw);
1007
1008	ice_sched_cleanup_all(hw);
1009	ice_sched_clear_agg(hw);
1010	ice_free_seg(hw);
1011	ice_free_hw_tbls(hw);
1012	mutex_destroy(&hw->tnl_lock);
1013
1014	if (hw->port_info) {
1015		devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1016		hw->port_info = NULL;
1017	}
1018
1019	/* Attempt to disable FW logging before shutting down control queues */
1020	ice_cfg_fw_log(hw, false);
1021	ice_destroy_all_ctrlq(hw);
1022
1023	/* Clear VSI contexts if not already cleared */
1024	ice_clear_all_vsi_ctx(hw);
1025}
1026
1027/**
1028 * ice_check_reset - Check to see if a global reset is complete
1029 * @hw: pointer to the hardware structure
1030 */
1031enum ice_status ice_check_reset(struct ice_hw *hw)
1032{
1033	u32 cnt, reg = 0, grst_timeout, uld_mask;
1034
1035	/* Poll for Device Active state in case a recent CORER, GLOBR,
1036	 * or EMPR has occurred. The grst delay value is in 100ms units.
1037	 * Add 1sec for outstanding AQ commands that can take a long time.
1038	 */
1039	grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1040			GLGEN_RSTCTL_GRSTDEL_S) + 10;
1041
1042	for (cnt = 0; cnt < grst_timeout; cnt++) {
1043		mdelay(100);
1044		reg = rd32(hw, GLGEN_RSTAT);
1045		if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1046			break;
1047	}
1048
1049	if (cnt == grst_timeout) {
1050		ice_debug(hw, ICE_DBG_INIT,
1051			  "Global reset polling failed to complete.\n");
1052		return ICE_ERR_RESET_FAILED;
1053	}
1054
1055#define ICE_RESET_DONE_MASK	(GLNVM_ULD_PCIER_DONE_M |\
1056				 GLNVM_ULD_PCIER_DONE_1_M |\
1057				 GLNVM_ULD_CORER_DONE_M |\
1058				 GLNVM_ULD_GLOBR_DONE_M |\
1059				 GLNVM_ULD_POR_DONE_M |\
1060				 GLNVM_ULD_POR_DONE_1_M |\
1061				 GLNVM_ULD_PCIER_DONE_2_M)
1062
1063	uld_mask = ICE_RESET_DONE_MASK;
1064
1065	/* Device is Active; check Global Reset processes are done */
1066	for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1067		reg = rd32(hw, GLNVM_ULD) & uld_mask;
1068		if (reg == uld_mask) {
1069			ice_debug(hw, ICE_DBG_INIT,
1070				  "Global reset processes done. %d\n", cnt);
1071			break;
1072		}
1073		mdelay(10);
1074	}
1075
1076	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1077		ice_debug(hw, ICE_DBG_INIT,
1078			  "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1079			  reg);
1080		return ICE_ERR_RESET_FAILED;
1081	}
1082
1083	return 0;
1084}
1085
1086/**
1087 * ice_pf_reset - Reset the PF
1088 * @hw: pointer to the hardware structure
1089 *
1090 * If a global reset has been triggered, this function checks
1091 * for its completion and then issues the PF reset
1092 */
1093static enum ice_status ice_pf_reset(struct ice_hw *hw)
1094{
1095	u32 cnt, reg;
1096
1097	/* If at function entry a global reset was already in progress, i.e.
1098	 * state is not 'device active' or any of the reset done bits are not
1099	 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1100	 * global reset is done.
1101	 */
1102	if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1103	    (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1104		/* poll on global reset currently in progress until done */
1105		if (ice_check_reset(hw))
1106			return ICE_ERR_RESET_FAILED;
1107
1108		return 0;
1109	}
1110
1111	/* Reset the PF */
1112	reg = rd32(hw, PFGEN_CTRL);
1113
1114	wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1115
1116	/* Wait for the PFR to complete. The wait time is the global config lock
1117	 * timeout plus the PFR timeout which will account for a possible reset
1118	 * that is occurring during a download package operation.
1119	 */
1120	for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1121	     ICE_PF_RESET_WAIT_COUNT; cnt++) {
1122		reg = rd32(hw, PFGEN_CTRL);
1123		if (!(reg & PFGEN_CTRL_PFSWR_M))
1124			break;
1125
1126		mdelay(1);
1127	}
1128
1129	if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1130		ice_debug(hw, ICE_DBG_INIT,
1131			  "PF reset polling failed to complete.\n");
1132		return ICE_ERR_RESET_FAILED;
1133	}
1134
1135	return 0;
1136}
1137
1138/**
1139 * ice_reset - Perform different types of reset
1140 * @hw: pointer to the hardware structure
1141 * @req: reset request
1142 *
1143 * This function triggers a reset as specified by the req parameter.
1144 *
1145 * Note:
1146 * If anything other than a PF reset is triggered, PXE mode is restored.
1147 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1148 * interface has been restored in the rebuild flow.
1149 */
1150enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1151{
1152	u32 val = 0;
1153
1154	switch (req) {
1155	case ICE_RESET_PFR:
1156		return ice_pf_reset(hw);
1157	case ICE_RESET_CORER:
1158		ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1159		val = GLGEN_RTRIG_CORER_M;
1160		break;
1161	case ICE_RESET_GLOBR:
1162		ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1163		val = GLGEN_RTRIG_GLOBR_M;
1164		break;
1165	default:
1166		return ICE_ERR_PARAM;
1167	}
1168
1169	val |= rd32(hw, GLGEN_RTRIG);
1170	wr32(hw, GLGEN_RTRIG, val);
1171	ice_flush(hw);
1172
1173	/* wait for the FW to be ready */
1174	return ice_check_reset(hw);
1175}
1176
1177/**
1178 * ice_copy_rxq_ctx_to_hw
1179 * @hw: pointer to the hardware structure
1180 * @ice_rxq_ctx: pointer to the rxq context
1181 * @rxq_index: the index of the Rx queue
1182 *
1183 * Copies rxq context from dense structure to HW register space
1184 */
1185static enum ice_status
1186ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1187{
1188	u8 i;
1189
1190	if (!ice_rxq_ctx)
1191		return ICE_ERR_BAD_PTR;
1192
1193	if (rxq_index > QRX_CTRL_MAX_INDEX)
1194		return ICE_ERR_PARAM;
1195
1196	/* Copy each dword separately to HW */
1197	for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1198		wr32(hw, QRX_CONTEXT(i, rxq_index),
1199		     *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1200
1201		ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1202			  *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1203	}
1204
1205	return 0;
1206}
1207
1208/* LAN Rx Queue Context */
1209static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1210	/* Field		Width	LSB */
1211	ICE_CTX_STORE(ice_rlan_ctx, head,		13,	0),
1212	ICE_CTX_STORE(ice_rlan_ctx, cpuid,		8,	13),
1213	ICE_CTX_STORE(ice_rlan_ctx, base,		57,	32),
1214	ICE_CTX_STORE(ice_rlan_ctx, qlen,		13,	89),
1215	ICE_CTX_STORE(ice_rlan_ctx, dbuf,		7,	102),
1216	ICE_CTX_STORE(ice_rlan_ctx, hbuf,		5,	109),
1217	ICE_CTX_STORE(ice_rlan_ctx, dtype,		2,	114),
1218	ICE_CTX_STORE(ice_rlan_ctx, dsize,		1,	116),
1219	ICE_CTX_STORE(ice_rlan_ctx, crcstrip,		1,	117),
1220	ICE_CTX_STORE(ice_rlan_ctx, l2tsel,		1,	119),
1221	ICE_CTX_STORE(ice_rlan_ctx, hsplit_0,		4,	120),
1222	ICE_CTX_STORE(ice_rlan_ctx, hsplit_1,		2,	124),
1223	ICE_CTX_STORE(ice_rlan_ctx, showiv,		1,	127),
1224	ICE_CTX_STORE(ice_rlan_ctx, rxmax,		14,	174),
1225	ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena,	1,	193),
1226	ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena,	1,	194),
1227	ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena,	1,	195),
1228	ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena,	1,	196),
1229	ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh,		3,	198),
1230	ICE_CTX_STORE(ice_rlan_ctx, prefena,		1,	201),
1231	{ 0 }
1232};
1233
1234/**
1235 * ice_write_rxq_ctx
1236 * @hw: pointer to the hardware structure
1237 * @rlan_ctx: pointer to the rxq context
1238 * @rxq_index: the index of the Rx queue
1239 *
1240 * Converts rxq context from sparse to dense structure and then writes
1241 * it to HW register space and enables the hardware to prefetch descriptors
1242 * instead of only fetching them on demand
1243 */
1244enum ice_status
1245ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1246		  u32 rxq_index)
1247{
1248	u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1249
1250	if (!rlan_ctx)
1251		return ICE_ERR_BAD_PTR;
1252
1253	rlan_ctx->prefena = 1;
1254
1255	ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1256	return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1257}
1258
1259/* LAN Tx Queue Context */
1260const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1261				    /* Field			Width	LSB */
1262	ICE_CTX_STORE(ice_tlan_ctx, base,			57,	0),
1263	ICE_CTX_STORE(ice_tlan_ctx, port_num,			3,	57),
1264	ICE_CTX_STORE(ice_tlan_ctx, cgd_num,			5,	60),
1265	ICE_CTX_STORE(ice_tlan_ctx, pf_num,			3,	65),
1266	ICE_CTX_STORE(ice_tlan_ctx, vmvf_num,			10,	68),
1267	ICE_CTX_STORE(ice_tlan_ctx, vmvf_type,			2,	78),
1268	ICE_CTX_STORE(ice_tlan_ctx, src_vsi,			10,	80),
1269	ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena,			1,	90),
1270	ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag,	1,	91),
1271	ICE_CTX_STORE(ice_tlan_ctx, alt_vlan,			1,	92),
1272	ICE_CTX_STORE(ice_tlan_ctx, cpuid,			8,	93),
1273	ICE_CTX_STORE(ice_tlan_ctx, wb_mode,			1,	101),
1274	ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc,			1,	102),
1275	ICE_CTX_STORE(ice_tlan_ctx, tphrd,			1,	103),
1276	ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc,			1,	104),
1277	ICE_CTX_STORE(ice_tlan_ctx, cmpq_id,			9,	105),
1278	ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func,		14,	114),
1279	ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode,	1,	128),
1280	ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id,		6,	129),
1281	ICE_CTX_STORE(ice_tlan_ctx, qlen,			13,	135),
1282	ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx,		4,	148),
1283	ICE_CTX_STORE(ice_tlan_ctx, tso_ena,			1,	152),
1284	ICE_CTX_STORE(ice_tlan_ctx, tso_qnum,			11,	153),
1285	ICE_CTX_STORE(ice_tlan_ctx, legacy_int,			1,	164),
1286	ICE_CTX_STORE(ice_tlan_ctx, drop_ena,			1,	165),
1287	ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx,		2,	166),
1288	ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx,	3,	168),
1289	ICE_CTX_STORE(ice_tlan_ctx, int_q_state,		122,	171),
1290	{ 0 }
1291};
1292
1293/* FW Admin Queue command wrappers */
1294
1295/* Software lock/mutex that is meant to be held while the Global Config Lock
1296 * in firmware is acquired by the software to prevent most (but not all) types
1297 * of AQ commands from being sent to FW
1298 */
1299DEFINE_MUTEX(ice_global_cfg_lock_sw);
1300
1301/**
1302 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1303 * @hw: pointer to the HW struct
1304 * @desc: descriptor describing the command
1305 * @buf: buffer to use for indirect commands (NULL for direct commands)
1306 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1307 * @cd: pointer to command details structure
1308 *
1309 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1310 */
1311enum ice_status
1312ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1313		u16 buf_size, struct ice_sq_cd *cd)
1314{
1315	struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1316	bool lock_acquired = false;
1317	enum ice_status status;
1318
1319	/* When a package download is in process (i.e. when the firmware's
1320	 * Global Configuration Lock resource is held), only the Download
1321	 * Package, Get Version, Get Package Info List and Release Resource
1322	 * (with resource ID set to Global Config Lock) AdminQ commands are
1323	 * allowed; all others must block until the package download completes
1324	 * and the Global Config Lock is released.  See also
1325	 * ice_acquire_global_cfg_lock().
1326	 */
1327	switch (le16_to_cpu(desc->opcode)) {
1328	case ice_aqc_opc_download_pkg:
1329	case ice_aqc_opc_get_pkg_info_list:
1330	case ice_aqc_opc_get_ver:
1331		break;
1332	case ice_aqc_opc_release_res:
1333		if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1334			break;
1335		fallthrough;
1336	default:
1337		mutex_lock(&ice_global_cfg_lock_sw);
1338		lock_acquired = true;
1339		break;
1340	}
1341
1342	status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1343	if (lock_acquired)
1344		mutex_unlock(&ice_global_cfg_lock_sw);
1345
1346	return status;
1347}
1348
1349/**
1350 * ice_aq_get_fw_ver
1351 * @hw: pointer to the HW struct
1352 * @cd: pointer to command details structure or NULL
1353 *
1354 * Get the firmware version (0x0001) from the admin queue commands
1355 */
1356enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1357{
1358	struct ice_aqc_get_ver *resp;
1359	struct ice_aq_desc desc;
1360	enum ice_status status;
1361
1362	resp = &desc.params.get_ver;
1363
1364	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1365
1366	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1367
1368	if (!status) {
1369		hw->fw_branch = resp->fw_branch;
1370		hw->fw_maj_ver = resp->fw_major;
1371		hw->fw_min_ver = resp->fw_minor;
1372		hw->fw_patch = resp->fw_patch;
1373		hw->fw_build = le32_to_cpu(resp->fw_build);
1374		hw->api_branch = resp->api_branch;
1375		hw->api_maj_ver = resp->api_major;
1376		hw->api_min_ver = resp->api_minor;
1377		hw->api_patch = resp->api_patch;
1378	}
1379
1380	return status;
1381}
1382
1383/**
1384 * ice_aq_send_driver_ver
1385 * @hw: pointer to the HW struct
1386 * @dv: driver's major, minor version
1387 * @cd: pointer to command details structure or NULL
1388 *
1389 * Send the driver version (0x0002) to the firmware
1390 */
1391enum ice_status
1392ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1393		       struct ice_sq_cd *cd)
1394{
1395	struct ice_aqc_driver_ver *cmd;
1396	struct ice_aq_desc desc;
1397	u16 len;
1398
1399	cmd = &desc.params.driver_ver;
1400
1401	if (!dv)
1402		return ICE_ERR_PARAM;
1403
1404	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1405
1406	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1407	cmd->major_ver = dv->major_ver;
1408	cmd->minor_ver = dv->minor_ver;
1409	cmd->build_ver = dv->build_ver;
1410	cmd->subbuild_ver = dv->subbuild_ver;
1411
1412	len = 0;
1413	while (len < sizeof(dv->driver_string) &&
1414	       isascii(dv->driver_string[len]) && dv->driver_string[len])
1415		len++;
1416
1417	return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1418}
1419
1420/**
1421 * ice_aq_q_shutdown
1422 * @hw: pointer to the HW struct
1423 * @unloading: is the driver unloading itself
1424 *
1425 * Tell the Firmware that we're shutting down the AdminQ and whether
1426 * or not the driver is unloading as well (0x0003).
1427 */
1428enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1429{
1430	struct ice_aqc_q_shutdown *cmd;
1431	struct ice_aq_desc desc;
1432
1433	cmd = &desc.params.q_shutdown;
1434
1435	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1436
1437	if (unloading)
1438		cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1439
1440	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1441}
1442
1443/**
1444 * ice_aq_req_res
1445 * @hw: pointer to the HW struct
1446 * @res: resource ID
1447 * @access: access type
1448 * @sdp_number: resource number
1449 * @timeout: the maximum time in ms that the driver may hold the resource
1450 * @cd: pointer to command details structure or NULL
1451 *
1452 * Requests common resource using the admin queue commands (0x0008).
1453 * When attempting to acquire the Global Config Lock, the driver can
1454 * learn of three states:
1455 *  1) ICE_SUCCESS -        acquired lock, and can perform download package
1456 *  2) ICE_ERR_AQ_ERROR -   did not get lock, driver should fail to load
1457 *  3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1458 *                          successfully downloaded the package; the driver does
1459 *                          not have to download the package and can continue
1460 *                          loading
1461 *
1462 * Note that if the caller is in an acquire lock, perform action, release lock
1463 * phase of operation, it is possible that the FW may detect a timeout and issue
1464 * a CORER. In this case, the driver will receive a CORER interrupt and will
1465 * have to determine its cause. The calling thread that is handling this flow
1466 * will likely get an error propagated back to it indicating the Download
1467 * Package, Update Package or the Release Resource AQ commands timed out.
1468 */
1469static enum ice_status
1470ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1471	       enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1472	       struct ice_sq_cd *cd)
1473{
1474	struct ice_aqc_req_res *cmd_resp;
1475	struct ice_aq_desc desc;
1476	enum ice_status status;
1477
1478	cmd_resp = &desc.params.res_owner;
1479
1480	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1481
1482	cmd_resp->res_id = cpu_to_le16(res);
1483	cmd_resp->access_type = cpu_to_le16(access);
1484	cmd_resp->res_number = cpu_to_le32(sdp_number);
1485	cmd_resp->timeout = cpu_to_le32(*timeout);
1486	*timeout = 0;
1487
1488	status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1489
1490	/* The completion specifies the maximum time in ms that the driver
1491	 * may hold the resource in the Timeout field.
1492	 */
1493
1494	/* Global config lock response utilizes an additional status field.
1495	 *
1496	 * If the Global config lock resource is held by some other driver, the
1497	 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1498	 * and the timeout field indicates the maximum time the current owner
1499	 * of the resource has to free it.
1500	 */
1501	if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1502		if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1503			*timeout = le32_to_cpu(cmd_resp->timeout);
1504			return 0;
1505		} else if (le16_to_cpu(cmd_resp->status) ==
1506			   ICE_AQ_RES_GLBL_IN_PROG) {
1507			*timeout = le32_to_cpu(cmd_resp->timeout);
1508			return ICE_ERR_AQ_ERROR;
1509		} else if (le16_to_cpu(cmd_resp->status) ==
1510			   ICE_AQ_RES_GLBL_DONE) {
1511			return ICE_ERR_AQ_NO_WORK;
1512		}
1513
1514		/* invalid FW response, force a timeout immediately */
1515		*timeout = 0;
1516		return ICE_ERR_AQ_ERROR;
1517	}
1518
1519	/* If the resource is held by some other driver, the command completes
1520	 * with a busy return value and the timeout field indicates the maximum
1521	 * time the current owner of the resource has to free it.
1522	 */
1523	if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1524		*timeout = le32_to_cpu(cmd_resp->timeout);
1525
1526	return status;
1527}
1528
1529/**
1530 * ice_aq_release_res
1531 * @hw: pointer to the HW struct
1532 * @res: resource ID
1533 * @sdp_number: resource number
1534 * @cd: pointer to command details structure or NULL
1535 *
1536 * release common resource using the admin queue commands (0x0009)
1537 */
1538static enum ice_status
1539ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1540		   struct ice_sq_cd *cd)
1541{
1542	struct ice_aqc_req_res *cmd;
1543	struct ice_aq_desc desc;
1544
1545	cmd = &desc.params.res_owner;
1546
1547	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1548
1549	cmd->res_id = cpu_to_le16(res);
1550	cmd->res_number = cpu_to_le32(sdp_number);
1551
1552	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1553}
1554
1555/**
1556 * ice_acquire_res
1557 * @hw: pointer to the HW structure
1558 * @res: resource ID
1559 * @access: access type (read or write)
1560 * @timeout: timeout in milliseconds
1561 *
1562 * This function will attempt to acquire the ownership of a resource.
1563 */
1564enum ice_status
1565ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1566		enum ice_aq_res_access_type access, u32 timeout)
1567{
1568#define ICE_RES_POLLING_DELAY_MS	10
1569	u32 delay = ICE_RES_POLLING_DELAY_MS;
1570	u32 time_left = timeout;
1571	enum ice_status status;
1572
1573	status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1574
1575	/* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1576	 * previously acquired the resource and performed any necessary updates;
1577	 * in this case the caller does not obtain the resource and has no
1578	 * further work to do.
1579	 */
1580	if (status == ICE_ERR_AQ_NO_WORK)
1581		goto ice_acquire_res_exit;
1582
1583	if (status)
1584		ice_debug(hw, ICE_DBG_RES,
1585			  "resource %d acquire type %d failed.\n", res, access);
1586
1587	/* If necessary, poll until the current lock owner timeouts */
1588	timeout = time_left;
1589	while (status && timeout && time_left) {
1590		mdelay(delay);
1591		timeout = (timeout > delay) ? timeout - delay : 0;
1592		status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1593
1594		if (status == ICE_ERR_AQ_NO_WORK)
1595			/* lock free, but no work to do */
1596			break;
1597
1598		if (!status)
1599			/* lock acquired */
1600			break;
1601	}
1602	if (status && status != ICE_ERR_AQ_NO_WORK)
1603		ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1604
1605ice_acquire_res_exit:
1606	if (status == ICE_ERR_AQ_NO_WORK) {
1607		if (access == ICE_RES_WRITE)
1608			ice_debug(hw, ICE_DBG_RES,
1609				  "resource indicates no work to do.\n");
1610		else
1611			ice_debug(hw, ICE_DBG_RES,
1612				  "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1613	}
1614	return status;
1615}
1616
1617/**
1618 * ice_release_res
1619 * @hw: pointer to the HW structure
1620 * @res: resource ID
1621 *
1622 * This function will release a resource using the proper Admin Command.
1623 */
1624void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1625{
1626	enum ice_status status;
1627	u32 total_delay = 0;
1628
1629	status = ice_aq_release_res(hw, res, 0, NULL);
1630
1631	/* there are some rare cases when trying to release the resource
1632	 * results in an admin queue timeout, so handle them correctly
1633	 */
1634	while ((status == ICE_ERR_AQ_TIMEOUT) &&
1635	       (total_delay < hw->adminq.sq_cmd_timeout)) {
1636		mdelay(1);
1637		status = ice_aq_release_res(hw, res, 0, NULL);
1638		total_delay++;
1639	}
1640}
1641
1642/**
1643 * ice_aq_alloc_free_res - command to allocate/free resources
1644 * @hw: pointer to the HW struct
1645 * @num_entries: number of resource entries in buffer
1646 * @buf: Indirect buffer to hold data parameters and response
1647 * @buf_size: size of buffer for indirect commands
1648 * @opc: pass in the command opcode
1649 * @cd: pointer to command details structure or NULL
1650 *
1651 * Helper function to allocate/free resources using the admin queue commands
1652 */
1653enum ice_status
1654ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1655		      struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1656		      enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1657{
1658	struct ice_aqc_alloc_free_res_cmd *cmd;
1659	struct ice_aq_desc desc;
1660
1661	cmd = &desc.params.sw_res_ctrl;
1662
1663	if (!buf)
1664		return ICE_ERR_PARAM;
1665
1666	if (buf_size < (num_entries * sizeof(buf->elem[0])))
1667		return ICE_ERR_PARAM;
1668
1669	ice_fill_dflt_direct_cmd_desc(&desc, opc);
1670
1671	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1672
1673	cmd->num_entries = cpu_to_le16(num_entries);
1674
1675	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1676}
1677
1678/**
1679 * ice_alloc_hw_res - allocate resource
1680 * @hw: pointer to the HW struct
1681 * @type: type of resource
1682 * @num: number of resources to allocate
1683 * @btm: allocate from bottom
1684 * @res: pointer to array that will receive the resources
1685 */
1686enum ice_status
1687ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1688{
1689	struct ice_aqc_alloc_free_res_elem *buf;
1690	enum ice_status status;
1691	u16 buf_len;
1692
1693	buf_len = struct_size(buf, elem, num);
1694	buf = kzalloc(buf_len, GFP_KERNEL);
1695	if (!buf)
1696		return ICE_ERR_NO_MEMORY;
1697
1698	/* Prepare buffer to allocate resource. */
1699	buf->num_elems = cpu_to_le16(num);
1700	buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1701				    ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1702	if (btm)
1703		buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1704
1705	status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1706				       ice_aqc_opc_alloc_res, NULL);
1707	if (status)
1708		goto ice_alloc_res_exit;
1709
1710	memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1711
1712ice_alloc_res_exit:
1713	kfree(buf);
1714	return status;
1715}
1716
1717/**
1718 * ice_free_hw_res - free allocated HW resource
1719 * @hw: pointer to the HW struct
1720 * @type: type of resource to free
1721 * @num: number of resources
1722 * @res: pointer to array that contains the resources to free
1723 */
1724enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1725{
1726	struct ice_aqc_alloc_free_res_elem *buf;
1727	enum ice_status status;
1728	u16 buf_len;
1729
1730	buf_len = struct_size(buf, elem, num);
1731	buf = kzalloc(buf_len, GFP_KERNEL);
1732	if (!buf)
1733		return ICE_ERR_NO_MEMORY;
1734
1735	/* Prepare buffer to free resource. */
1736	buf->num_elems = cpu_to_le16(num);
1737	buf->res_type = cpu_to_le16(type);
1738	memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1739
1740	status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1741				       ice_aqc_opc_free_res, NULL);
1742	if (status)
1743		ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1744
1745	kfree(buf);
1746	return status;
1747}
1748
1749/**
1750 * ice_get_num_per_func - determine number of resources per PF
1751 * @hw: pointer to the HW structure
1752 * @max: value to be evenly split between each PF
1753 *
1754 * Determine the number of valid functions by going through the bitmap returned
1755 * from parsing capabilities and use this to calculate the number of resources
1756 * per PF based on the max value passed in.
1757 */
1758static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1759{
1760	u8 funcs;
1761
1762#define ICE_CAPS_VALID_FUNCS_M	0xFF
1763	funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1764			 ICE_CAPS_VALID_FUNCS_M);
1765
1766	if (!funcs)
1767		return 0;
1768
1769	return max / funcs;
1770}
1771
1772/**
1773 * ice_parse_common_caps - parse common device/function capabilities
1774 * @hw: pointer to the HW struct
1775 * @caps: pointer to common capabilities structure
1776 * @elem: the capability element to parse
1777 * @prefix: message prefix for tracing capabilities
1778 *
1779 * Given a capability element, extract relevant details into the common
1780 * capability structure.
1781 *
1782 * Returns: true if the capability matches one of the common capability ids,
1783 * false otherwise.
1784 */
1785static bool
1786ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1787		      struct ice_aqc_list_caps_elem *elem, const char *prefix)
1788{
1789	u32 logical_id = le32_to_cpu(elem->logical_id);
1790	u32 phys_id = le32_to_cpu(elem->phys_id);
1791	u32 number = le32_to_cpu(elem->number);
1792	u16 cap = le16_to_cpu(elem->cap);
1793	bool found = true;
1794
1795	switch (cap) {
1796	case ICE_AQC_CAPS_VALID_FUNCTIONS:
1797		caps->valid_functions = number;
1798		ice_debug(hw, ICE_DBG_INIT,
1799			  "%s: valid_functions (bitmap) = %d\n", prefix,
1800			  caps->valid_functions);
1801		break;
1802	case ICE_AQC_CAPS_SRIOV:
1803		caps->sr_iov_1_1 = (number == 1);
1804		ice_debug(hw, ICE_DBG_INIT,
1805			  "%s: sr_iov_1_1 = %d\n", prefix,
1806			  caps->sr_iov_1_1);
1807		break;
1808	case ICE_AQC_CAPS_DCB:
1809		caps->dcb = (number == 1);
1810		caps->active_tc_bitmap = logical_id;
1811		caps->maxtc = phys_id;
1812		ice_debug(hw, ICE_DBG_INIT,
1813			  "%s: dcb = %d\n", prefix, caps->dcb);
1814		ice_debug(hw, ICE_DBG_INIT,
1815			  "%s: active_tc_bitmap = %d\n", prefix,
1816			  caps->active_tc_bitmap);
1817		ice_debug(hw, ICE_DBG_INIT,
1818			  "%s: maxtc = %d\n", prefix, caps->maxtc);
1819		break;
1820	case ICE_AQC_CAPS_RSS:
1821		caps->rss_table_size = number;
1822		caps->rss_table_entry_width = logical_id;
1823		ice_debug(hw, ICE_DBG_INIT,
1824			  "%s: rss_table_size = %d\n", prefix,
1825			  caps->rss_table_size);
1826		ice_debug(hw, ICE_DBG_INIT,
1827			  "%s: rss_table_entry_width = %d\n", prefix,
1828			  caps->rss_table_entry_width);
1829		break;
1830	case ICE_AQC_CAPS_RXQS:
1831		caps->num_rxq = number;
1832		caps->rxq_first_id = phys_id;
1833		ice_debug(hw, ICE_DBG_INIT,
1834			  "%s: num_rxq = %d\n", prefix,
1835			  caps->num_rxq);
1836		ice_debug(hw, ICE_DBG_INIT,
1837			  "%s: rxq_first_id = %d\n", prefix,
1838			  caps->rxq_first_id);
1839		break;
1840	case ICE_AQC_CAPS_TXQS:
1841		caps->num_txq = number;
1842		caps->txq_first_id = phys_id;
1843		ice_debug(hw, ICE_DBG_INIT,
1844			  "%s: num_txq = %d\n", prefix,
1845			  caps->num_txq);
1846		ice_debug(hw, ICE_DBG_INIT,
1847			  "%s: txq_first_id = %d\n", prefix,
1848			  caps->txq_first_id);
1849		break;
1850	case ICE_AQC_CAPS_MSIX:
1851		caps->num_msix_vectors = number;
1852		caps->msix_vector_first_id = phys_id;
1853		ice_debug(hw, ICE_DBG_INIT,
1854			  "%s: num_msix_vectors = %d\n", prefix,
1855			  caps->num_msix_vectors);
1856		ice_debug(hw, ICE_DBG_INIT,
1857			  "%s: msix_vector_first_id = %d\n", prefix,
1858			  caps->msix_vector_first_id);
1859		break;
1860	case ICE_AQC_CAPS_PENDING_NVM_VER:
1861		caps->nvm_update_pending_nvm = true;
1862		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1863		break;
1864	case ICE_AQC_CAPS_PENDING_OROM_VER:
1865		caps->nvm_update_pending_orom = true;
1866		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
1867		break;
1868	case ICE_AQC_CAPS_PENDING_NET_VER:
1869		caps->nvm_update_pending_netlist = true;
1870		ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
1871		break;
1872	case ICE_AQC_CAPS_NVM_MGMT:
1873		caps->nvm_unified_update =
1874			(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1875			true : false;
1876		ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1877			  caps->nvm_unified_update);
1878		break;
1879	case ICE_AQC_CAPS_MAX_MTU:
1880		caps->max_mtu = number;
1881		ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1882			  prefix, caps->max_mtu);
1883		break;
1884	default:
1885		/* Not one of the recognized common capabilities */
1886		found = false;
1887	}
1888
1889	return found;
1890}
1891
1892/**
1893 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1894 * @hw: pointer to the HW structure
1895 * @caps: pointer to capabilities structure to fix
1896 *
1897 * Re-calculate the capabilities that are dependent on the number of physical
1898 * ports; i.e. some features are not supported or function differently on
1899 * devices with more than 4 ports.
1900 */
1901static void
1902ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1903{
1904	/* This assumes device capabilities are always scanned before function
1905	 * capabilities during the initialization flow.
1906	 */
1907	if (hw->dev_caps.num_funcs > 4) {
1908		/* Max 4 TCs per port */
1909		caps->maxtc = 4;
1910		ice_debug(hw, ICE_DBG_INIT,
1911			  "reducing maxtc to %d (based on #ports)\n",
1912			  caps->maxtc);
1913	}
1914}
1915
1916/**
1917 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
1918 * @hw: pointer to the HW struct
1919 * @func_p: pointer to function capabilities structure
1920 * @cap: pointer to the capability element to parse
1921 *
1922 * Extract function capabilities for ICE_AQC_CAPS_VF.
1923 */
1924static void
1925ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1926		       struct ice_aqc_list_caps_elem *cap)
1927{
1928	u32 logical_id = le32_to_cpu(cap->logical_id);
1929	u32 number = le32_to_cpu(cap->number);
1930
1931	func_p->num_allocd_vfs = number;
1932	func_p->vf_base_id = logical_id;
1933	ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
1934		  func_p->num_allocd_vfs);
1935	ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
1936		  func_p->vf_base_id);
1937}
1938
1939/**
1940 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
1941 * @hw: pointer to the HW struct
1942 * @func_p: pointer to function capabilities structure
1943 * @cap: pointer to the capability element to parse
1944 *
1945 * Extract function capabilities for ICE_AQC_CAPS_VSI.
1946 */
1947static void
1948ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1949			struct ice_aqc_list_caps_elem *cap)
1950{
1951	func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
1952	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
1953		  le32_to_cpu(cap->number));
1954	ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
1955		  func_p->guar_num_vsi);
1956}
1957
1958/**
1959 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
1960 * @hw: pointer to the HW struct
1961 * @func_p: pointer to function capabilities structure
1962 *
1963 * Extract function capabilities for ICE_AQC_CAPS_FD.
1964 */
1965static void
1966ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
1967{
1968	u32 reg_val, val;
1969
1970	reg_val = rd32(hw, GLQF_FD_SIZE);
1971	val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1972		GLQF_FD_SIZE_FD_GSIZE_S;
1973	func_p->fd_fltr_guar =
1974		ice_get_num_per_func(hw, val);
1975	val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1976		GLQF_FD_SIZE_FD_BSIZE_S;
1977	func_p->fd_fltr_best_effort = val;
1978
1979	ice_debug(hw, ICE_DBG_INIT,
1980		  "func caps: fd_fltr_guar = %d\n",
1981		  func_p->fd_fltr_guar);
1982	ice_debug(hw, ICE_DBG_INIT,
1983		  "func caps: fd_fltr_best_effort = %d\n",
1984		  func_p->fd_fltr_best_effort);
1985}
1986
1987/**
1988 * ice_parse_func_caps - Parse function capabilities
1989 * @hw: pointer to the HW struct
1990 * @func_p: pointer to function capabilities structure
1991 * @buf: buffer containing the function capability records
1992 * @cap_count: the number of capabilities
1993 *
1994 * Helper function to parse function (0x000A) capabilities list. For
1995 * capabilities shared between device and function, this relies on
1996 * ice_parse_common_caps.
1997 *
1998 * Loop through the list of provided capabilities and extract the relevant
1999 * data into the function capabilities structured.
2000 */
2001static void
2002ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2003		    void *buf, u32 cap_count)
2004{
2005	struct ice_aqc_list_caps_elem *cap_resp;
2006	u32 i;
2007
2008	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2009
2010	memset(func_p, 0, sizeof(*func_p));
2011
2012	for (i = 0; i < cap_count; i++) {
2013		u16 cap = le16_to_cpu(cap_resp[i].cap);
2014		bool found;
2015
2016		found = ice_parse_common_caps(hw, &func_p->common_cap,
2017					      &cap_resp[i], "func caps");
2018
2019		switch (cap) {
2020		case ICE_AQC_CAPS_VF:
2021			ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2022			break;
2023		case ICE_AQC_CAPS_VSI:
2024			ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2025			break;
2026		case ICE_AQC_CAPS_FD:
2027			ice_parse_fdir_func_caps(hw, func_p);
2028			break;
2029		default:
2030			/* Don't list common capabilities as unknown */
2031			if (!found)
2032				ice_debug(hw, ICE_DBG_INIT,
2033					  "func caps: unknown capability[%d]: 0x%x\n",
2034					  i, cap);
2035			break;
2036		}
2037	}
2038
2039	ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2040}
2041
2042/**
2043 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2044 * @hw: pointer to the HW struct
2045 * @dev_p: pointer to device capabilities structure
2046 * @cap: capability element to parse
2047 *
2048 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2049 */
2050static void
2051ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2052			      struct ice_aqc_list_caps_elem *cap)
2053{
2054	u32 number = le32_to_cpu(cap->number);
2055
2056	dev_p->num_funcs = hweight32(number);
2057	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2058		  dev_p->num_funcs);
2059}
2060
2061/**
2062 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2063 * @hw: pointer to the HW struct
2064 * @dev_p: pointer to device capabilities structure
2065 * @cap: capability element to parse
2066 *
2067 * Parse ICE_AQC_CAPS_VF for device capabilities.
2068 */
2069static void
2070ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2071		      struct ice_aqc_list_caps_elem *cap)
2072{
2073	u32 number = le32_to_cpu(cap->number);
2074
2075	dev_p->num_vfs_exposed = number;
2076	ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2077		  dev_p->num_vfs_exposed);
2078}
2079
2080/**
2081 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2082 * @hw: pointer to the HW struct
2083 * @dev_p: pointer to device capabilities structure
2084 * @cap: capability element to parse
2085 *
2086 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2087 */
2088static void
2089ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2090		       struct ice_aqc_list_caps_elem *cap)
2091{
2092	u32 number = le32_to_cpu(cap->number);
2093
2094	dev_p->num_vsi_allocd_to_host = number;
2095	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2096		  dev_p->num_vsi_allocd_to_host);
2097}
2098
2099/**
2100 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2101 * @hw: pointer to the HW struct
2102 * @dev_p: pointer to device capabilities structure
2103 * @cap: capability element to parse
2104 *
2105 * Parse ICE_AQC_CAPS_FD for device capabilities.
2106 */
2107static void
2108ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2109			struct ice_aqc_list_caps_elem *cap)
2110{
2111	u32 number = le32_to_cpu(cap->number);
2112
2113	dev_p->num_flow_director_fltr = number;
2114	ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2115		  dev_p->num_flow_director_fltr);
2116}
2117
2118/**
2119 * ice_parse_dev_caps - Parse device capabilities
2120 * @hw: pointer to the HW struct
2121 * @dev_p: pointer to device capabilities structure
2122 * @buf: buffer containing the device capability records
2123 * @cap_count: the number of capabilities
2124 *
2125 * Helper device to parse device (0x000B) capabilities list. For
2126 * capabilities shared between device and function, this relies on
2127 * ice_parse_common_caps.
2128 *
2129 * Loop through the list of provided capabilities and extract the relevant
2130 * data into the device capabilities structured.
2131 */
2132static void
2133ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2134		   void *buf, u32 cap_count)
2135{
2136	struct ice_aqc_list_caps_elem *cap_resp;
2137	u32 i;
2138
2139	cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2140
2141	memset(dev_p, 0, sizeof(*dev_p));
2142
2143	for (i = 0; i < cap_count; i++) {
2144		u16 cap = le16_to_cpu(cap_resp[i].cap);
2145		bool found;
2146
2147		found = ice_parse_common_caps(hw, &dev_p->common_cap,
2148					      &cap_resp[i], "dev caps");
2149
2150		switch (cap) {
2151		case ICE_AQC_CAPS_VALID_FUNCTIONS:
2152			ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2153			break;
2154		case ICE_AQC_CAPS_VF:
2155			ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2156			break;
2157		case ICE_AQC_CAPS_VSI:
2158			ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2159			break;
2160		case  ICE_AQC_CAPS_FD:
2161			ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2162			break;
2163		default:
2164			/* Don't list common capabilities as unknown */
2165			if (!found)
2166				ice_debug(hw, ICE_DBG_INIT,
2167					  "dev caps: unknown capability[%d]: 0x%x\n",
2168					  i, cap);
2169			break;
2170		}
2171	}
2172
2173	ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2174}
2175
2176/**
2177 * ice_aq_list_caps - query function/device capabilities
2178 * @hw: pointer to the HW struct
2179 * @buf: a buffer to hold the capabilities
2180 * @buf_size: size of the buffer
2181 * @cap_count: if not NULL, set to the number of capabilities reported
2182 * @opc: capabilities type to discover, device or function
2183 * @cd: pointer to command details structure or NULL
2184 *
2185 * Get the function (0x000A) or device (0x000B) capabilities description from
2186 * firmware and store it in the buffer.
2187 *
2188 * If the cap_count pointer is not NULL, then it is set to the number of
2189 * capabilities firmware will report. Note that if the buffer size is too
2190 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2191 * cap_count will still be updated in this case. It is recommended that the
2192 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2193 * firmware could return) to avoid this.
2194 */
2195enum ice_status
2196ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2197		 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2198{
2199	struct ice_aqc_list_caps *cmd;
2200	struct ice_aq_desc desc;
2201	enum ice_status status;
2202
2203	cmd = &desc.params.get_cap;
2204
2205	if (opc != ice_aqc_opc_list_func_caps &&
2206	    opc != ice_aqc_opc_list_dev_caps)
2207		return ICE_ERR_PARAM;
2208
2209	ice_fill_dflt_direct_cmd_desc(&desc, opc);
2210	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2211
2212	if (cap_count)
2213		*cap_count = le32_to_cpu(cmd->count);
2214
2215	return status;
2216}
2217
2218/**
2219 * ice_discover_dev_caps - Read and extract device capabilities
2220 * @hw: pointer to the hardware structure
2221 * @dev_caps: pointer to device capabilities structure
2222 *
2223 * Read the device capabilities and extract them into the dev_caps structure
2224 * for later use.
2225 */
2226enum ice_status
2227ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2228{
2229	enum ice_status status;
2230	u32 cap_count = 0;
2231	void *cbuf;
2232
2233	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2234	if (!cbuf)
2235		return ICE_ERR_NO_MEMORY;
2236
2237	/* Although the driver doesn't know the number of capabilities the
2238	 * device will return, we can simply send a 4KB buffer, the maximum
2239	 * possible size that firmware can return.
2240	 */
2241	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2242
2243	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2244				  ice_aqc_opc_list_dev_caps, NULL);
2245	if (!status)
2246		ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2247	kfree(cbuf);
2248
2249	return status;
2250}
2251
2252/**
2253 * ice_discover_func_caps - Read and extract function capabilities
2254 * @hw: pointer to the hardware structure
2255 * @func_caps: pointer to function capabilities structure
2256 *
2257 * Read the function capabilities and extract them into the func_caps structure
2258 * for later use.
2259 */
2260static enum ice_status
2261ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2262{
2263	enum ice_status status;
2264	u32 cap_count = 0;
2265	void *cbuf;
2266
2267	cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2268	if (!cbuf)
2269		return ICE_ERR_NO_MEMORY;
2270
2271	/* Although the driver doesn't know the number of capabilities the
2272	 * device will return, we can simply send a 4KB buffer, the maximum
2273	 * possible size that firmware can return.
2274	 */
2275	cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2276
2277	status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2278				  ice_aqc_opc_list_func_caps, NULL);
2279	if (!status)
2280		ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2281	kfree(cbuf);
2282
2283	return status;
2284}
2285
2286/**
2287 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2288 * @hw: pointer to the hardware structure
2289 */
2290void ice_set_safe_mode_caps(struct ice_hw *hw)
2291{
2292	struct ice_hw_func_caps *func_caps = &hw->func_caps;
2293	struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2294	struct ice_hw_common_caps cached_caps;
2295	u32 num_funcs;
2296
2297	/* cache some func_caps values that should be restored after memset */
2298	cached_caps = func_caps->common_cap;
2299
2300	/* unset func capabilities */
2301	memset(func_caps, 0, sizeof(*func_caps));
2302
2303#define ICE_RESTORE_FUNC_CAP(name) \
2304	func_caps->common_cap.name = cached_caps.name
2305
2306	/* restore cached values */
2307	ICE_RESTORE_FUNC_CAP(valid_functions);
2308	ICE_RESTORE_FUNC_CAP(txq_first_id);
2309	ICE_RESTORE_FUNC_CAP(rxq_first_id);
2310	ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2311	ICE_RESTORE_FUNC_CAP(max_mtu);
2312	ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2313	ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2314	ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2315	ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2316
2317	/* one Tx and one Rx queue in safe mode */
2318	func_caps->common_cap.num_rxq = 1;
2319	func_caps->common_cap.num_txq = 1;
2320
2321	/* two MSIX vectors, one for traffic and one for misc causes */
2322	func_caps->common_cap.num_msix_vectors = 2;
2323	func_caps->guar_num_vsi = 1;
2324
2325	/* cache some dev_caps values that should be restored after memset */
2326	cached_caps = dev_caps->common_cap;
2327	num_funcs = dev_caps->num_funcs;
2328
2329	/* unset dev capabilities */
2330	memset(dev_caps, 0, sizeof(*dev_caps));
2331
2332#define ICE_RESTORE_DEV_CAP(name) \
2333	dev_caps->common_cap.name = cached_caps.name
2334
2335	/* restore cached values */
2336	ICE_RESTORE_DEV_CAP(valid_functions);
2337	ICE_RESTORE_DEV_CAP(txq_first_id);
2338	ICE_RESTORE_DEV_CAP(rxq_first_id);
2339	ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2340	ICE_RESTORE_DEV_CAP(max_mtu);
2341	ICE_RESTORE_DEV_CAP(nvm_unified_update);
2342	ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2343	ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2344	ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2345	dev_caps->num_funcs = num_funcs;
2346
2347	/* one Tx and one Rx queue per function in safe mode */
2348	dev_caps->common_cap.num_rxq = num_funcs;
2349	dev_caps->common_cap.num_txq = num_funcs;
2350
2351	/* two MSIX vectors per function */
2352	dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2353}
2354
2355/**
2356 * ice_get_caps - get info about the HW
2357 * @hw: pointer to the hardware structure
2358 */
2359enum ice_status ice_get_caps(struct ice_hw *hw)
2360{
2361	enum ice_status status;
2362
2363	status = ice_discover_dev_caps(hw, &hw->dev_caps);
2364	if (status)
2365		return status;
2366
2367	return ice_discover_func_caps(hw, &hw->func_caps);
2368}
2369
2370/**
2371 * ice_aq_manage_mac_write - manage MAC address write command
2372 * @hw: pointer to the HW struct
2373 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2374 * @flags: flags to control write behavior
2375 * @cd: pointer to command details structure or NULL
2376 *
2377 * This function is used to write MAC address to the NVM (0x0108).
2378 */
2379enum ice_status
2380ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2381			struct ice_sq_cd *cd)
2382{
2383	struct ice_aqc_manage_mac_write *cmd;
2384	struct ice_aq_desc desc;
2385
2386	cmd = &desc.params.mac_write;
2387	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2388
2389	cmd->flags = flags;
2390	ether_addr_copy(cmd->mac_addr, mac_addr);
2391
2392	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2393}
2394
2395/**
2396 * ice_aq_clear_pxe_mode
2397 * @hw: pointer to the HW struct
2398 *
2399 * Tell the firmware that the driver is taking over from PXE (0x0110).
2400 */
2401static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2402{
2403	struct ice_aq_desc desc;
2404
2405	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2406	desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2407
2408	return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2409}
2410
2411/**
2412 * ice_clear_pxe_mode - clear pxe operations mode
2413 * @hw: pointer to the HW struct
2414 *
2415 * Make sure all PXE mode settings are cleared, including things
2416 * like descriptor fetch/write-back mode.
2417 */
2418void ice_clear_pxe_mode(struct ice_hw *hw)
2419{
2420	if (ice_check_sq_alive(hw, &hw->adminq))
2421		ice_aq_clear_pxe_mode(hw);
2422}
2423
2424/**
2425 * ice_get_link_speed_based_on_phy_type - returns link speed
2426 * @phy_type_low: lower part of phy_type
2427 * @phy_type_high: higher part of phy_type
2428 *
2429 * This helper function will convert an entry in PHY type structure
2430 * [phy_type_low, phy_type_high] to its corresponding link speed.
2431 * Note: In the structure of [phy_type_low, phy_type_high], there should
2432 * be one bit set, as this function will convert one PHY type to its
2433 * speed.
2434 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2435 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2436 */
2437static u16
2438ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2439{
2440	u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2441	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2442
2443	switch (phy_type_low) {
2444	case ICE_PHY_TYPE_LOW_100BASE_TX:
2445	case ICE_PHY_TYPE_LOW_100M_SGMII:
2446		speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2447		break;
2448	case ICE_PHY_TYPE_LOW_1000BASE_T:
2449	case ICE_PHY_TYPE_LOW_1000BASE_SX:
2450	case ICE_PHY_TYPE_LOW_1000BASE_LX:
2451	case ICE_PHY_TYPE_LOW_1000BASE_KX:
2452	case ICE_PHY_TYPE_LOW_1G_SGMII:
2453		speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2454		break;
2455	case ICE_PHY_TYPE_LOW_2500BASE_T:
2456	case ICE_PHY_TYPE_LOW_2500BASE_X:
2457	case ICE_PHY_TYPE_LOW_2500BASE_KX:
2458		speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2459		break;
2460	case ICE_PHY_TYPE_LOW_5GBASE_T:
2461	case ICE_PHY_TYPE_LOW_5GBASE_KR:
2462		speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2463		break;
2464	case ICE_PHY_TYPE_LOW_10GBASE_T:
2465	case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2466	case ICE_PHY_TYPE_LOW_10GBASE_SR:
2467	case ICE_PHY_TYPE_LOW_10GBASE_LR:
2468	case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2469	case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2470	case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2471		speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2472		break;
2473	case ICE_PHY_TYPE_LOW_25GBASE_T:
2474	case ICE_PHY_TYPE_LOW_25GBASE_CR:
2475	case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2476	case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2477	case ICE_PHY_TYPE_LOW_25GBASE_SR:
2478	case ICE_PHY_TYPE_LOW_25GBASE_LR:
2479	case ICE_PHY_TYPE_LOW_25GBASE_KR:
2480	case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2481	case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2482	case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2483	case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2484		speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2485		break;
2486	case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2487	case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2488	case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2489	case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2490	case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2491	case ICE_PHY_TYPE_LOW_40G_XLAUI:
2492		speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2493		break;
2494	case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2495	case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2496	case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2497	case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2498	case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2499	case ICE_PHY_TYPE_LOW_50G_LAUI2:
2500	case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2501	case ICE_PHY_TYPE_LOW_50G_AUI2:
2502	case ICE_PHY_TYPE_LOW_50GBASE_CP:
2503	case ICE_PHY_TYPE_LOW_50GBASE_SR:
2504	case ICE_PHY_TYPE_LOW_50GBASE_FR:
2505	case ICE_PHY_TYPE_LOW_50GBASE_LR:
2506	case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2507	case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2508	case ICE_PHY_TYPE_LOW_50G_AUI1:
2509		speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2510		break;
2511	case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2512	case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2513	case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2514	case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2515	case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2516	case ICE_PHY_TYPE_LOW_100G_CAUI4:
2517	case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2518	case ICE_PHY_TYPE_LOW_100G_AUI4:
2519	case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2520	case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2521	case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2522	case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2523	case ICE_PHY_TYPE_LOW_100GBASE_DR:
2524		speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2525		break;
2526	default:
2527		speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2528		break;
2529	}
2530
2531	switch (phy_type_high) {
2532	case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2533	case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2534	case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2535	case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2536	case ICE_PHY_TYPE_HIGH_100G_AUI2:
2537		speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2538		break;
2539	default:
2540		speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2541		break;
2542	}
2543
2544	if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2545	    speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2546		return ICE_AQ_LINK_SPEED_UNKNOWN;
2547	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2548		 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2549		return ICE_AQ_LINK_SPEED_UNKNOWN;
2550	else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2551		 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2552		return speed_phy_type_low;
2553	else
2554		return speed_phy_type_high;
2555}
2556
2557/**
2558 * ice_update_phy_type
2559 * @phy_type_low: pointer to the lower part of phy_type
2560 * @phy_type_high: pointer to the higher part of phy_type
2561 * @link_speeds_bitmap: targeted link speeds bitmap
2562 *
2563 * Note: For the link_speeds_bitmap structure, you can check it at
2564 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2565 * link_speeds_bitmap include multiple speeds.
2566 *
2567 * Each entry in this [phy_type_low, phy_type_high] structure will
2568 * present a certain link speed. This helper function will turn on bits
2569 * in [phy_type_low, phy_type_high] structure based on the value of
2570 * link_speeds_bitmap input parameter.
2571 */
2572void
2573ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2574		    u16 link_speeds_bitmap)
2575{
2576	u64 pt_high;
2577	u64 pt_low;
2578	int index;
2579	u16 speed;
2580
2581	/* We first check with low part of phy_type */
2582	for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2583		pt_low = BIT_ULL(index);
2584		speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2585
2586		if (link_speeds_bitmap & speed)
2587			*phy_type_low |= BIT_ULL(index);
2588	}
2589
2590	/* We then check with high part of phy_type */
2591	for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2592		pt_high = BIT_ULL(index);
2593		speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2594
2595		if (link_speeds_bitmap & speed)
2596			*phy_type_high |= BIT_ULL(index);
2597	}
2598}
2599
2600/**
2601 * ice_aq_set_phy_cfg
2602 * @hw: pointer to the HW struct
2603 * @pi: port info structure of the interested logical port
2604 * @cfg: structure with PHY configuration data to be set
2605 * @cd: pointer to command details structure or NULL
2606 *
2607 * Set the various PHY configuration parameters supported on the Port.
2608 * One or more of the Set PHY config parameters may be ignored in an MFP
2609 * mode as the PF may not have the privilege to set some of the PHY Config
2610 * parameters. This status will be indicated by the command response (0x0601).
2611 */
2612enum ice_status
2613ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2614		   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2615{
2616	struct ice_aq_desc desc;
2617	enum ice_status status;
2618
2619	if (!cfg)
2620		return ICE_ERR_PARAM;
2621
2622	/* Ensure that only valid bits of cfg->caps can be turned on. */
2623	if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2624		ice_debug(hw, ICE_DBG_PHY,
2625			  "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2626			  cfg->caps);
2627
2628		cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2629	}
2630
2631	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2632	desc.params.set_phy.lport_num = pi->lport;
2633	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2634
2635	ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2636	ice_debug(hw, ICE_DBG_LINK, "	phy_type_low = 0x%llx\n",
2637		  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2638	ice_debug(hw, ICE_DBG_LINK, "	phy_type_high = 0x%llx\n",
2639		  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2640	ice_debug(hw, ICE_DBG_LINK, "	caps = 0x%x\n", cfg->caps);
2641	ice_debug(hw, ICE_DBG_LINK, "	low_power_ctrl_an = 0x%x\n",
2642		  cfg->low_power_ctrl_an);
2643	ice_debug(hw, ICE_DBG_LINK, "	eee_cap = 0x%x\n", cfg->eee_cap);
2644	ice_debug(hw, ICE_DBG_LINK, "	eeer_value = 0x%x\n", cfg->eeer_value);
2645	ice_debug(hw, ICE_DBG_LINK, "	link_fec_opt = 0x%x\n",
2646		  cfg->link_fec_opt);
2647
2648	status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2649	if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2650		status = 0;
2651
2652	if (!status)
2653		pi->phy.curr_user_phy_cfg = *cfg;
2654
2655	return status;
2656}
2657
2658/**
2659 * ice_update_link_info - update status of the HW network link
2660 * @pi: port info structure of the interested logical port
2661 */
2662enum ice_status ice_update_link_info(struct ice_port_info *pi)
2663{
2664	struct ice_link_status *li;
2665	enum ice_status status;
2666
2667	if (!pi)
2668		return ICE_ERR_PARAM;
2669
2670	li = &pi->phy.link_info;
2671
2672	status = ice_aq_get_link_info(pi, true, NULL, NULL);
2673	if (status)
2674		return status;
2675
2676	if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2677		struct ice_aqc_get_phy_caps_data *pcaps;
2678		struct ice_hw *hw;
2679
2680		hw = pi->hw;
2681		pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2682				     GFP_KERNEL);
2683		if (!pcaps)
2684			return ICE_ERR_NO_MEMORY;
2685
2686		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2687					     pcaps, NULL);
2688
2689		devm_kfree(ice_hw_to_dev(hw), pcaps);
2690	}
2691
2692	return status;
2693}
2694
2695/**
2696 * ice_cache_phy_user_req
2697 * @pi: port information structure
2698 * @cache_data: PHY logging data
2699 * @cache_mode: PHY logging mode
2700 *
2701 * Log the user request on (FC, FEC, SPEED) for later use.
2702 */
2703static void
2704ice_cache_phy_user_req(struct ice_port_info *pi,
2705		       struct ice_phy_cache_mode_data cache_data,
2706		       enum ice_phy_cache_mode cache_mode)
2707{
2708	if (!pi)
2709		return;
2710
2711	switch (cache_mode) {
2712	case ICE_FC_MODE:
2713		pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2714		break;
2715	case ICE_SPEED_MODE:
2716		pi->phy.curr_user_speed_req =
2717			cache_data.data.curr_user_speed_req;
2718		break;
2719	case ICE_FEC_MODE:
2720		pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2721		break;
2722	default:
2723		break;
2724	}
2725}
2726
2727/**
2728 * ice_caps_to_fc_mode
2729 * @caps: PHY capabilities
2730 *
2731 * Convert PHY FC capabilities to ice FC mode
2732 */
2733enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2734{
2735	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2736	    caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2737		return ICE_FC_FULL;
2738
2739	if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2740		return ICE_FC_TX_PAUSE;
2741
2742	if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2743		return ICE_FC_RX_PAUSE;
2744
2745	return ICE_FC_NONE;
2746}
2747
2748/**
2749 * ice_caps_to_fec_mode
2750 * @caps: PHY capabilities
2751 * @fec_options: Link FEC options
2752 *
2753 * Convert PHY FEC capabilities to ice FEC mode
2754 */
2755enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2756{
2757	if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2758		return ICE_FEC_AUTO;
2759
2760	if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2761			   ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2762			   ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2763			   ICE_AQC_PHY_FEC_25G_KR_REQ))
2764		return ICE_FEC_BASER;
2765
2766	if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2767			   ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2768			   ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2769		return ICE_FEC_RS;
2770
2771	return ICE_FEC_NONE;
2772}
2773
2774/**
2775 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2776 * @pi: port information structure
2777 * @cfg: PHY configuration data to set FC mode
2778 * @req_mode: FC mode to configure
2779 */
2780enum ice_status
2781ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2782	       enum ice_fc_mode req_mode)
2783{
2784	struct ice_phy_cache_mode_data cache_data;
2785	u8 pause_mask = 0x0;
2786
2787	if (!pi || !cfg)
2788		return ICE_ERR_BAD_PTR;
2789
2790	switch (req_mode) {
2791	case ICE_FC_FULL:
2792		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2793		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2794		break;
2795	case ICE_FC_RX_PAUSE:
2796		pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2797		break;
2798	case ICE_FC_TX_PAUSE:
2799		pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2800		break;
2801	default:
2802		break;
2803	}
2804
2805	/* clear the old pause settings */
2806	cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2807		ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2808
2809	/* set the new capabilities */
2810	cfg->caps |= pause_mask;
2811
2812	/* Cache user FC request */
2813	cache_data.data.curr_user_fc_req = req_mode;
2814	ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2815
2816	return 0;
2817}
2818
2819/**
2820 * ice_set_fc
2821 * @pi: port information structure
2822 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2823 * @ena_auto_link_update: enable automatic link update
2824 *
2825 * Set the requested flow control mode.
2826 */
2827enum ice_status
2828ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2829{
2830	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2831	struct ice_aqc_get_phy_caps_data *pcaps;
2832	enum ice_status status;
2833	struct ice_hw *hw;
2834
2835	if (!pi || !aq_failures)
2836		return ICE_ERR_BAD_PTR;
2837
2838	*aq_failures = 0;
2839	hw = pi->hw;
2840
2841	pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2842	if (!pcaps)
2843		return ICE_ERR_NO_MEMORY;
2844
2845	/* Get the current PHY config */
2846	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
2847				     pcaps, NULL);
2848	if (status) {
2849		*aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2850		goto out;
2851	}
2852
2853	ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2854
2855	/* Configure the set PHY data */
2856	status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2857	if (status)
2858		goto out;
2859
2860	/* If the capabilities have changed, then set the new config */
2861	if (cfg.caps != pcaps->caps) {
2862		int retry_count, retry_max = 10;
2863
2864		/* Auto restart link so settings take effect */
2865		if (ena_auto_link_update)
2866			cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2867
2868		status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2869		if (status) {
2870			*aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2871			goto out;
2872		}
2873
2874		/* Update the link info
2875		 * It sometimes takes a really long time for link to
2876		 * come back from the atomic reset. Thus, we wait a
2877		 * little bit.
2878		 */
2879		for (retry_count = 0; retry_count < retry_max; retry_count++) {
2880			status = ice_update_link_info(pi);
2881
2882			if (!status)
2883				break;
2884
2885			mdelay(100);
2886		}
2887
2888		if (status)
2889			*aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2890	}
2891
2892out:
2893	devm_kfree(ice_hw_to_dev(hw), pcaps);
2894	return status;
2895}
2896
2897/**
2898 * ice_phy_caps_equals_cfg
2899 * @phy_caps: PHY capabilities
2900 * @phy_cfg: PHY configuration
2901 *
2902 * Helper function to determine if PHY capabilities matches PHY
2903 * configuration
2904 */
2905bool
2906ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2907			struct ice_aqc_set_phy_cfg_data *phy_cfg)
2908{
2909	u8 caps_mask, cfg_mask;
2910
2911	if (!phy_caps || !phy_cfg)
2912		return false;
2913
2914	/* These bits are not common between capabilities and configuration.
2915	 * Do not use them to determine equality.
2916	 */
2917	caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2918					      ICE_AQC_GET_PHY_EN_MOD_QUAL);
2919	cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2920
2921	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2922	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2923	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2924	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2925	    phy_caps->eee_cap != phy_cfg->eee_cap ||
2926	    phy_caps->eeer_value != phy_cfg->eeer_value ||
2927	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2928		return false;
2929
2930	return true;
2931}
2932
2933/**
2934 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2935 * @pi: port information structure
2936 * @caps: PHY ability structure to copy date from
2937 * @cfg: PHY configuration structure to copy data to
2938 *
2939 * Helper function to copy AQC PHY get ability data to PHY set configuration
2940 * data structure
2941 */
2942void
2943ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2944			 struct ice_aqc_get_phy_caps_data *caps,
2945			 struct ice_aqc_set_phy_cfg_data *cfg)
2946{
2947	if (!pi || !caps || !cfg)
2948		return;
2949
2950	memset(cfg, 0, sizeof(*cfg));
2951	cfg->phy_type_low = caps->phy_type_low;
2952	cfg->phy_type_high = caps->phy_type_high;
2953	cfg->caps = caps->caps;
2954	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2955	cfg->eee_cap = caps->eee_cap;
2956	cfg->eeer_value = caps->eeer_value;
2957	cfg->link_fec_opt = caps->link_fec_options;
2958	cfg->module_compliance_enforcement =
2959		caps->module_compliance_enforcement;
2960
2961	if (ice_fw_supports_link_override(pi->hw)) {
2962		struct ice_link_default_override_tlv tlv;
2963
2964		if (ice_get_link_default_override(&tlv, pi))
2965			return;
2966
2967		if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2968			cfg->module_compliance_enforcement |=
2969				ICE_LINK_OVERRIDE_STRICT_MODE;
2970	}
2971}
2972
2973/**
2974 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2975 * @pi: port information structure
2976 * @cfg: PHY configuration data to set FEC mode
2977 * @fec: FEC mode to configure
2978 */
2979enum ice_status
2980ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2981		enum ice_fec_mode fec)
2982{
2983	struct ice_aqc_get_phy_caps_data *pcaps;
2984	enum ice_status status;
2985
2986	if (!pi || !cfg)
2987		return ICE_ERR_BAD_PTR;
2988
2989	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2990	if (!pcaps)
2991		return ICE_ERR_NO_MEMORY;
2992
2993	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
2994				     NULL);
2995	if (status)
2996		goto out;
2997
2998	cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2999	cfg->link_fec_opt = pcaps->link_fec_options;
3000
3001	switch (fec) {
3002	case ICE_FEC_BASER:
3003		/* Clear RS bits, and AND BASE-R ability
3004		 * bits and OR request bits.
3005		 */
3006		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3007			ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3008		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3009			ICE_AQC_PHY_FEC_25G_KR_REQ;
3010		break;
3011	case ICE_FEC_RS:
3012		/* Clear BASE-R bits, and AND RS ability
3013		 * bits and OR request bits.
3014		 */
3015		cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3016		cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3017			ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3018		break;
3019	case ICE_FEC_NONE:
3020		/* Clear all FEC option bits. */
3021		cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3022		break;
3023	case ICE_FEC_AUTO:
3024		/* AND auto FEC bit, and all caps bits. */
3025		cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3026		cfg->link_fec_opt |= pcaps->link_fec_options;
3027		break;
3028	default:
3029		status = ICE_ERR_PARAM;
3030		break;
3031	}
3032
3033	if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3034		struct ice_link_default_override_tlv tlv;
3035
3036		if (ice_get_link_default_override(&tlv, pi))
3037			goto out;
3038
3039		if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3040		    (tlv.options & ICE_LINK_OVERRIDE_EN))
3041			cfg->link_fec_opt = tlv.fec_options;
3042	}
3043
3044out:
3045	kfree(pcaps);
3046
3047	return status;
3048}
3049
3050/**
3051 * ice_get_link_status - get status of the HW network link
3052 * @pi: port information structure
3053 * @link_up: pointer to bool (true/false = linkup/linkdown)
3054 *
3055 * Variable link_up is true if link is up, false if link is down.
3056 * The variable link_up is invalid if status is non zero. As a
3057 * result of this call, link status reporting becomes enabled
3058 */
3059enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3060{
3061	struct ice_phy_info *phy_info;
3062	enum ice_status status = 0;
3063
3064	if (!pi || !link_up)
3065		return ICE_ERR_PARAM;
3066
3067	phy_info = &pi->phy;
3068
3069	if (phy_info->get_link_info) {
3070		status = ice_update_link_info(pi);
3071
3072		if (status)
3073			ice_debug(pi->hw, ICE_DBG_LINK,
3074				  "get link status error, status = %d\n",
3075				  status);
3076	}
3077
3078	*link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3079
3080	return status;
3081}
3082
3083/**
3084 * ice_aq_set_link_restart_an
3085 * @pi: pointer to the port information structure
3086 * @ena_link: if true: enable link, if false: disable link
3087 * @cd: pointer to command details structure or NULL
3088 *
3089 * Sets up the link and restarts the Auto-Negotiation over the link.
3090 */
3091enum ice_status
3092ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3093			   struct ice_sq_cd *cd)
3094{
3095	struct ice_aqc_restart_an *cmd;
3096	struct ice_aq_desc desc;
3097
3098	cmd = &desc.params.restart_an;
3099
3100	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3101
3102	cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3103	cmd->lport_num = pi->lport;
3104	if (ena_link)
3105		cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3106	else
3107		cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3108
3109	return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3110}
3111
3112/**
3113 * ice_aq_set_event_mask
3114 * @hw: pointer to the HW struct
3115 * @port_num: port number of the physical function
3116 * @mask: event mask to be set
3117 * @cd: pointer to command details structure or NULL
3118 *
3119 * Set event mask (0x0613)
3120 */
3121enum ice_status
3122ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3123		      struct ice_sq_cd *cd)
3124{
3125	struct ice_aqc_set_event_mask *cmd;
3126	struct ice_aq_desc desc;
3127
3128	cmd = &desc.params.set_event_mask;
3129
3130	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3131
3132	cmd->lport_num = port_num;
3133
3134	cmd->event_mask = cpu_to_le16(mask);
3135	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3136}
3137
3138/**
3139 * ice_aq_set_mac_loopback
3140 * @hw: pointer to the HW struct
3141 * @ena_lpbk: Enable or Disable loopback
3142 * @cd: pointer to command details structure or NULL
3143 *
3144 * Enable/disable loopback on a given port
3145 */
3146enum ice_status
3147ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3148{
3149	struct ice_aqc_set_mac_lb *cmd;
3150	struct ice_aq_desc desc;
3151
3152	cmd = &desc.params.set_mac_lb;
3153
3154	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3155	if (ena_lpbk)
3156		cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3157
3158	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3159}
3160
3161/**
3162 * ice_aq_set_port_id_led
3163 * @pi: pointer to the port information
3164 * @is_orig_mode: is this LED set to original mode (by the net-list)
3165 * @cd: pointer to command details structure or NULL
3166 *
3167 * Set LED value for the given port (0x06e9)
3168 */
3169enum ice_status
3170ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3171		       struct ice_sq_cd *cd)
3172{
3173	struct ice_aqc_set_port_id_led *cmd;
3174	struct ice_hw *hw = pi->hw;
3175	struct ice_aq_desc desc;
3176
3177	cmd = &desc.params.set_port_id_led;
3178
3179	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3180
3181	if (is_orig_mode)
3182		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3183	else
3184		cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3185
3186	return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3187}
3188
3189/**
3190 * ice_aq_sff_eeprom
3191 * @hw: pointer to the HW struct
3192 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3193 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3194 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3195 * @page: QSFP page
3196 * @set_page: set or ignore the page
3197 * @data: pointer to data buffer to be read/written to the I2C device.
3198 * @length: 1-16 for read, 1 for write.
3199 * @write: 0 read, 1 for write.
3200 * @cd: pointer to command details structure or NULL
3201 *
3202 * Read/Write SFF EEPROM (0x06EE)
3203 */
3204enum ice_status
3205ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3206		  u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3207		  bool write, struct ice_sq_cd *cd)
3208{
3209	struct ice_aqc_sff_eeprom *cmd;
3210	struct ice_aq_desc desc;
3211	enum ice_status status;
3212
3213	if (!data || (mem_addr & 0xff00))
3214		return ICE_ERR_PARAM;
3215
3216	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3217	cmd = &desc.params.read_write_sff_param;
3218	desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3219	cmd->lport_num = (u8)(lport & 0xff);
3220	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3221	cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3222					 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3223					((set_page <<
3224					  ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3225					 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3226	cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3227	cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3228	if (write)
3229		cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3230
3231	status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3232	return status;
3233}
3234
3235/**
3236 * __ice_aq_get_set_rss_lut
3237 * @hw: pointer to the hardware structure
3238 * @vsi_id: VSI FW index
3239 * @lut_type: LUT table type
3240 * @lut: pointer to the LUT buffer provided by the caller
3241 * @lut_size: size of the LUT buffer
3242 * @glob_lut_idx: global LUT index
3243 * @set: set true to set the table, false to get the table
3244 *
3245 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3246 */
3247static enum ice_status
3248__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3249			 u16 lut_size, u8 glob_lut_idx, bool set)
3250{
3251	struct ice_aqc_get_set_rss_lut *cmd_resp;
3252	struct ice_aq_desc desc;
3253	enum ice_status status;
3254	u16 flags = 0;
3255
3256	cmd_resp = &desc.params.get_set_rss_lut;
3257
3258	if (set) {
3259		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3260		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3261	} else {
3262		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3263	}
3264
3265	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3266					 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3267					ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3268				       ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3269
3270	switch (lut_type) {
3271	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3272	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3273	case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3274		flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3275			  ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3276		break;
3277	default:
3278		status = ICE_ERR_PARAM;
3279		goto ice_aq_get_set_rss_lut_exit;
3280	}
3281
3282	if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3283		flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3284			  ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3285
3286		if (!set)
3287			goto ice_aq_get_set_rss_lut_send;
3288	} else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3289		if (!set)
3290			goto ice_aq_get_set_rss_lut_send;
3291	} else {
3292		goto ice_aq_get_set_rss_lut_send;
3293	}
3294
3295	/* LUT size is only valid for Global and PF table types */
3296	switch (lut_size) {
3297	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3298		break;
3299	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3300		flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3301			  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3302			 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3303		break;
3304	case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3305		if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3306			flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3307				  ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3308				 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3309			break;
3310		}
3311		fallthrough;
3312	default:
3313		status = ICE_ERR_PARAM;
3314		goto ice_aq_get_set_rss_lut_exit;
3315	}
3316
3317ice_aq_get_set_rss_lut_send:
3318	cmd_resp->flags = cpu_to_le16(flags);
3319	status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3320
3321ice_aq_get_set_rss_lut_exit:
3322	return status;
3323}
3324
3325/**
3326 * ice_aq_get_rss_lut
3327 * @hw: pointer to the hardware structure
3328 * @vsi_handle: software VSI handle
3329 * @lut_type: LUT table type
3330 * @lut: pointer to the LUT buffer provided by the caller
3331 * @lut_size: size of the LUT buffer
3332 *
3333 * get the RSS lookup table, PF or VSI type
3334 */
3335enum ice_status
3336ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3337		   u8 *lut, u16 lut_size)
3338{
3339	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3340		return ICE_ERR_PARAM;
3341
3342	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3343					lut_type, lut, lut_size, 0, false);
3344}
3345
3346/**
3347 * ice_aq_set_rss_lut
3348 * @hw: pointer to the hardware structure
3349 * @vsi_handle: software VSI handle
3350 * @lut_type: LUT table type
3351 * @lut: pointer to the LUT buffer provided by the caller
3352 * @lut_size: size of the LUT buffer
3353 *
3354 * set the RSS lookup table, PF or VSI type
3355 */
3356enum ice_status
3357ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3358		   u8 *lut, u16 lut_size)
3359{
3360	if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3361		return ICE_ERR_PARAM;
3362
3363	return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3364					lut_type, lut, lut_size, 0, true);
3365}
3366
3367/**
3368 * __ice_aq_get_set_rss_key
3369 * @hw: pointer to the HW struct
3370 * @vsi_id: VSI FW index
3371 * @key: pointer to key info struct
3372 * @set: set true to set the key, false to get the key
3373 *
3374 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3375 */
3376static enum
3377ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3378				    struct ice_aqc_get_set_rss_keys *key,
3379				    bool set)
3380{
3381	struct ice_aqc_get_set_rss_key *cmd_resp;
3382	u16 key_size = sizeof(*key);
3383	struct ice_aq_desc desc;
3384
3385	cmd_resp = &desc.params.get_set_rss_key;
3386
3387	if (set) {
3388		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3389		desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3390	} else {
3391		ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3392	}
3393
3394	cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3395					 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3396					ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3397				       ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3398
3399	return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3400}
3401
3402/**
3403 * ice_aq_get_rss_key
3404 * @hw: pointer to the HW struct
3405 * @vsi_handle: software VSI handle
3406 * @key: pointer to key info struct
3407 *
3408 * get the RSS key per VSI
3409 */
3410enum ice_status
3411ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3412		   struct ice_aqc_get_set_rss_keys *key)
3413{
3414	if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3415		return ICE_ERR_PARAM;
3416
3417	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3418					key, false);
3419}
3420
3421/**
3422 * ice_aq_set_rss_key
3423 * @hw: pointer to the HW struct
3424 * @vsi_handle: software VSI handle
3425 * @keys: pointer to key info struct
3426 *
3427 * set the RSS key per VSI
3428 */
3429enum ice_status
3430ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3431		   struct ice_aqc_get_set_rss_keys *keys)
3432{
3433	if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3434		return ICE_ERR_PARAM;
3435
3436	return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3437					keys, true);
3438}
3439
3440/**
3441 * ice_aq_add_lan_txq
3442 * @hw: pointer to the hardware structure
3443 * @num_qgrps: Number of added queue groups
3444 * @qg_list: list of queue groups to be added
3445 * @buf_size: size of buffer for indirect command
3446 * @cd: pointer to command details structure or NULL
3447 *
3448 * Add Tx LAN queue (0x0C30)
3449 *
3450 * NOTE:
3451 * Prior to calling add Tx LAN queue:
3452 * Initialize the following as part of the Tx queue context:
3453 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3454 * Cache profile and Packet shaper profile.
3455 *
3456 * After add Tx LAN queue AQ command is completed:
3457 * Interrupts should be associated with specific queues,
3458 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3459 * flow.
3460 */
3461static enum ice_status
3462ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3463		   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3464		   struct ice_sq_cd *cd)
3465{
3466	struct ice_aqc_add_tx_qgrp *list;
3467	struct ice_aqc_add_txqs *cmd;
3468	struct ice_aq_desc desc;
3469	u16 i, sum_size = 0;
3470
3471	cmd = &desc.params.add_txqs;
3472
3473	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3474
3475	if (!qg_list)
3476		return ICE_ERR_PARAM;
3477
3478	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3479		return ICE_ERR_PARAM;
3480
3481	for (i = 0, list = qg_list; i < num_qgrps; i++) {
3482		sum_size += struct_size(list, txqs, list->num_txqs);
3483		list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3484						      list->num_txqs);
3485	}
3486
3487	if (buf_size != sum_size)
3488		return ICE_ERR_PARAM;
3489
3490	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3491
3492	cmd->num_qgrps = num_qgrps;
3493
3494	return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3495}
3496
3497/**
3498 * ice_aq_dis_lan_txq
3499 * @hw: pointer to the hardware structure
3500 * @num_qgrps: number of groups in the list
3501 * @qg_list: the list of groups to disable
3502 * @buf_size: the total size of the qg_list buffer in bytes
3503 * @rst_src: if called due to reset, specifies the reset source
3504 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3505 * @cd: pointer to command details structure or NULL
3506 *
3507 * Disable LAN Tx queue (0x0C31)
3508 */
3509static enum ice_status
3510ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3511		   struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3512		   enum ice_disq_rst_src rst_src, u16 vmvf_num,
3513		   struct ice_sq_cd *cd)
3514{
3515	struct ice_aqc_dis_txq_item *item;
3516	struct ice_aqc_dis_txqs *cmd;
3517	struct ice_aq_desc desc;
3518	enum ice_status status;
3519	u16 i, sz = 0;
3520
3521	cmd = &desc.params.dis_txqs;
3522	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3523
3524	/* qg_list can be NULL only in VM/VF reset flow */
3525	if (!qg_list && !rst_src)
3526		return ICE_ERR_PARAM;
3527
3528	if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3529		return ICE_ERR_PARAM;
3530
3531	cmd->num_entries = num_qgrps;
3532
3533	cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3534					    ICE_AQC_Q_DIS_TIMEOUT_M);
3535
3536	switch (rst_src) {
3537	case ICE_VM_RESET:
3538		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3539		cmd->vmvf_and_timeout |=
3540			cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3541		break;
3542	case ICE_VF_RESET:
3543		cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3544		/* In this case, FW expects vmvf_num to be absolute VF ID */
3545		cmd->vmvf_and_timeout |=
3546			cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3547				    ICE_AQC_Q_DIS_VMVF_NUM_M);
3548		break;
3549	case ICE_NO_RESET:
3550	default:
3551		break;
3552	}
3553
3554	/* flush pipe on time out */
3555	cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3556	/* If no queue group info, we are in a reset flow. Issue the AQ */
3557	if (!qg_list)
3558		goto do_aq;
3559
3560	/* set RD bit to indicate that command buffer is provided by the driver
3561	 * and it needs to be read by the firmware
3562	 */
3563	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3564
3565	for (i = 0, item = qg_list; i < num_qgrps; i++) {
3566		u16 item_size = struct_size(item, q_id, item->num_qs);
3567
3568		/* If the num of queues is even, add 2 bytes of padding */
3569		if ((item->num_qs % 2) == 0)
3570			item_size += 2;
3571
3572		sz += item_size;
3573
3574		item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3575	}
3576
3577	if (buf_size != sz)
3578		return ICE_ERR_PARAM;
3579
3580do_aq:
3581	status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3582	if (status) {
3583		if (!qg_list)
3584			ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3585				  vmvf_num, hw->adminq.sq_last_status);
3586		else
3587			ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3588				  le16_to_cpu(qg_list[0].q_id[0]),
3589				  hw->adminq.sq_last_status);
3590	}
3591	return status;
3592}
3593
3594/* End of FW Admin Queue command wrappers */
3595
3596/**
3597 * ice_write_byte - write a byte to a packed context structure
3598 * @src_ctx:  the context structure to read from
3599 * @dest_ctx: the context to be written to
3600 * @ce_info:  a description of the struct to be filled
3601 */
3602static void
3603ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3604{
3605	u8 src_byte, dest_byte, mask;
3606	u8 *from, *dest;
3607	u16 shift_width;
3608
3609	/* copy from the next struct field */
3610	from = src_ctx + ce_info->offset;
3611
3612	/* prepare the bits and mask */
3613	shift_width = ce_info->lsb % 8;
3614	mask = (u8)(BIT(ce_info->width) - 1);
3615
3616	src_byte = *from;
3617	src_byte &= mask;
3618
3619	/* shift to correct alignment */
3620	mask <<= shift_width;
3621	src_byte <<= shift_width;
3622
3623	/* get the current bits from the target bit string */
3624	dest = dest_ctx + (ce_info->lsb / 8);
3625
3626	memcpy(&dest_byte, dest, sizeof(dest_byte));
3627
3628	dest_byte &= ~mask;	/* get the bits not changing */
3629	dest_byte |= src_byte;	/* add in the new bits */
3630
3631	/* put it all back */
3632	memcpy(dest, &dest_byte, sizeof(dest_byte));
3633}
3634
3635/**
3636 * ice_write_word - write a word to a packed context structure
3637 * @src_ctx:  the context structure to read from
3638 * @dest_ctx: the context to be written to
3639 * @ce_info:  a description of the struct to be filled
3640 */
3641static void
3642ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3643{
3644	u16 src_word, mask;
3645	__le16 dest_word;
3646	u8 *from, *dest;
3647	u16 shift_width;
3648
3649	/* copy from the next struct field */
3650	from = src_ctx + ce_info->offset;
3651
3652	/* prepare the bits and mask */
3653	shift_width = ce_info->lsb % 8;
3654	mask = BIT(ce_info->width) - 1;
3655
3656	/* don't swizzle the bits until after the mask because the mask bits
3657	 * will be in a different bit position on big endian machines
3658	 */
3659	src_word = *(u16 *)from;
3660	src_word &= mask;
3661
3662	/* shift to correct alignment */
3663	mask <<= shift_width;
3664	src_word <<= shift_width;
3665
3666	/* get the current bits from the target bit string */
3667	dest = dest_ctx + (ce_info->lsb / 8);
3668
3669	memcpy(&dest_word, dest, sizeof(dest_word));
3670
3671	dest_word &= ~(cpu_to_le16(mask));	/* get the bits not changing */
3672	dest_word |= cpu_to_le16(src_word);	/* add in the new bits */
3673
3674	/* put it all back */
3675	memcpy(dest, &dest_word, sizeof(dest_word));
3676}
3677
3678/**
3679 * ice_write_dword - write a dword to a packed context structure
3680 * @src_ctx:  the context structure to read from
3681 * @dest_ctx: the context to be written to
3682 * @ce_info:  a description of the struct to be filled
3683 */
3684static void
3685ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3686{
3687	u32 src_dword, mask;
3688	__le32 dest_dword;
3689	u8 *from, *dest;
3690	u16 shift_width;
3691
3692	/* copy from the next struct field */
3693	from = src_ctx + ce_info->offset;
3694
3695	/* prepare the bits and mask */
3696	shift_width = ce_info->lsb % 8;
3697
3698	/* if the field width is exactly 32 on an x86 machine, then the shift
3699	 * operation will not work because the SHL instructions count is masked
3700	 * to 5 bits so the shift will do nothing
3701	 */
3702	if (ce_info->width < 32)
3703		mask = BIT(ce_info->width) - 1;
3704	else
3705		mask = (u32)~0;
3706
3707	/* don't swizzle the bits until after the mask because the mask bits
3708	 * will be in a different bit position on big endian machines
3709	 */
3710	src_dword = *(u32 *)from;
3711	src_dword &= mask;
3712
3713	/* shift to correct alignment */
3714	mask <<= shift_width;
3715	src_dword <<= shift_width;
3716
3717	/* get the current bits from the target bit string */
3718	dest = dest_ctx + (ce_info->lsb / 8);
3719
3720	memcpy(&dest_dword, dest, sizeof(dest_dword));
3721
3722	dest_dword &= ~(cpu_to_le32(mask));	/* get the bits not changing */
3723	dest_dword |= cpu_to_le32(src_dword);	/* add in the new bits */
3724
3725	/* put it all back */
3726	memcpy(dest, &dest_dword, sizeof(dest_dword));
3727}
3728
3729/**
3730 * ice_write_qword - write a qword to a packed context structure
3731 * @src_ctx:  the context structure to read from
3732 * @dest_ctx: the context to be written to
3733 * @ce_info:  a description of the struct to be filled
3734 */
3735static void
3736ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3737{
3738	u64 src_qword, mask;
3739	__le64 dest_qword;
3740	u8 *from, *dest;
3741	u16 shift_width;
3742
3743	/* copy from the next struct field */
3744	from = src_ctx + ce_info->offset;
3745
3746	/* prepare the bits and mask */
3747	shift_width = ce_info->lsb % 8;
3748
3749	/* if the field width is exactly 64 on an x86 machine, then the shift
3750	 * operation will not work because the SHL instructions count is masked
3751	 * to 6 bits so the shift will do nothing
3752	 */
3753	if (ce_info->width < 64)
3754		mask = BIT_ULL(ce_info->width) - 1;
3755	else
3756		mask = (u64)~0;
3757
3758	/* don't swizzle the bits until after the mask because the mask bits
3759	 * will be in a different bit position on big endian machines
3760	 */
3761	src_qword = *(u64 *)from;
3762	src_qword &= mask;
3763
3764	/* shift to correct alignment */
3765	mask <<= shift_width;
3766	src_qword <<= shift_width;
3767
3768	/* get the current bits from the target bit string */
3769	dest = dest_ctx + (ce_info->lsb / 8);
3770
3771	memcpy(&dest_qword, dest, sizeof(dest_qword));
3772
3773	dest_qword &= ~(cpu_to_le64(mask));	/* get the bits not changing */
3774	dest_qword |= cpu_to_le64(src_qword);	/* add in the new bits */
3775
3776	/* put it all back */
3777	memcpy(dest, &dest_qword, sizeof(dest_qword));
3778}
3779
3780/**
3781 * ice_set_ctx - set context bits in packed structure
3782 * @hw: pointer to the hardware structure
3783 * @src_ctx:  pointer to a generic non-packed context structure
3784 * @dest_ctx: pointer to memory for the packed structure
3785 * @ce_info:  a description of the structure to be transformed
3786 */
3787enum ice_status
3788ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3789	    const struct ice_ctx_ele *ce_info)
3790{
3791	int f;
3792
3793	for (f = 0; ce_info[f].width; f++) {
3794		/* We have to deal with each element of the FW response
3795		 * using the correct size so that we are correct regardless
3796		 * of the endianness of the machine.
3797		 */
3798		if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3799			ice_debug(hw, ICE_DBG_QCTX,
3800				  "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3801				  f, ce_info[f].width, ce_info[f].size_of);
3802			continue;
3803		}
3804		switch (ce_info[f].size_of) {
3805		case sizeof(u8):
3806			ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3807			break;
3808		case sizeof(u16):
3809			ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3810			break;
3811		case sizeof(u32):
3812			ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3813			break;
3814		case sizeof(u64):
3815			ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3816			break;
3817		default:
3818			return ICE_ERR_INVAL_SIZE;
3819		}
3820	}
3821
3822	return 0;
3823}
3824
3825/**
3826 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3827 * @hw: pointer to the HW struct
3828 * @vsi_handle: software VSI handle
3829 * @tc: TC number
3830 * @q_handle: software queue handle
3831 */
3832struct ice_q_ctx *
3833ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3834{
3835	struct ice_vsi_ctx *vsi;
3836	struct ice_q_ctx *q_ctx;
3837
3838	vsi = ice_get_vsi_ctx(hw, vsi_handle);
3839	if (!vsi)
3840		return NULL;
3841	if (q_handle >= vsi->num_lan_q_entries[tc])
3842		return NULL;
3843	if (!vsi->lan_q_ctx[tc])
3844		return NULL;
3845	q_ctx = vsi->lan_q_ctx[tc];
3846	return &q_ctx[q_handle];
3847}
3848
3849/**
3850 * ice_ena_vsi_txq
3851 * @pi: port information structure
3852 * @vsi_handle: software VSI handle
3853 * @tc: TC number
3854 * @q_handle: software queue handle
3855 * @num_qgrps: Number of added queue groups
3856 * @buf: list of queue groups to be added
3857 * @buf_size: size of buffer for indirect command
3858 * @cd: pointer to command details structure or NULL
3859 *
3860 * This function adds one LAN queue
3861 */
3862enum ice_status
3863ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3864		u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3865		struct ice_sq_cd *cd)
3866{
3867	struct ice_aqc_txsched_elem_data node = { 0 };
3868	struct ice_sched_node *parent;
3869	struct ice_q_ctx *q_ctx;
3870	enum ice_status status;
3871	struct ice_hw *hw;
3872
3873	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3874		return ICE_ERR_CFG;
3875
3876	if (num_qgrps > 1 || buf->num_txqs > 1)
3877		return ICE_ERR_MAX_LIMIT;
3878
3879	hw = pi->hw;
3880
3881	if (!ice_is_vsi_valid(hw, vsi_handle))
3882		return ICE_ERR_PARAM;
3883
3884	mutex_lock(&pi->sched_lock);
3885
3886	q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3887	if (!q_ctx) {
3888		ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3889			  q_handle);
3890		status = ICE_ERR_PARAM;
3891		goto ena_txq_exit;
3892	}
3893
3894	/* find a parent node */
3895	parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3896					    ICE_SCHED_NODE_OWNER_LAN);
3897	if (!parent) {
3898		status = ICE_ERR_PARAM;
3899		goto ena_txq_exit;
3900	}
3901
3902	buf->parent_teid = parent->info.node_teid;
3903	node.parent_teid = parent->info.node_teid;
3904	/* Mark that the values in the "generic" section as valid. The default
3905	 * value in the "generic" section is zero. This means that :
3906	 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3907	 * - 0 priority among siblings, indicated by Bit 1-3.
3908	 * - WFQ, indicated by Bit 4.
3909	 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3910	 * Bit 5-6.
3911	 * - Bit 7 is reserved.
3912	 * Without setting the generic section as valid in valid_sections, the
3913	 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3914	 */
3915	buf->txqs[0].info.valid_sections =
3916		ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
3917		ICE_AQC_ELEM_VALID_EIR;
3918	buf->txqs[0].info.generic = 0;
3919	buf->txqs[0].info.cir_bw.bw_profile_idx =
3920		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3921	buf->txqs[0].info.cir_bw.bw_alloc =
3922		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3923	buf->txqs[0].info.eir_bw.bw_profile_idx =
3924		cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3925	buf->txqs[0].info.eir_bw.bw_alloc =
3926		cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3927
3928	/* add the LAN queue */
3929	status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3930	if (status) {
3931		ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3932			  le16_to_cpu(buf->txqs[0].txq_id),
3933			  hw->adminq.sq_last_status);
3934		goto ena_txq_exit;
3935	}
3936
3937	node.node_teid = buf->txqs[0].q_teid;
3938	node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3939	q_ctx->q_handle = q_handle;
3940	q_ctx->q_teid = le32_to_cpu(node.node_teid);
3941
3942	/* add a leaf node into scheduler tree queue layer */
3943	status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3944	if (!status)
3945		status = ice_sched_replay_q_bw(pi, q_ctx);
3946
3947ena_txq_exit:
3948	mutex_unlock(&pi->sched_lock);
3949	return status;
3950}
3951
3952/**
3953 * ice_dis_vsi_txq
3954 * @pi: port information structure
3955 * @vsi_handle: software VSI handle
3956 * @tc: TC number
3957 * @num_queues: number of queues
3958 * @q_handles: pointer to software queue handle array
3959 * @q_ids: pointer to the q_id array
3960 * @q_teids: pointer to queue node teids
3961 * @rst_src: if called due to reset, specifies the reset source
3962 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3963 * @cd: pointer to command details structure or NULL
3964 *
3965 * This function removes queues and their corresponding nodes in SW DB
3966 */
3967enum ice_status
3968ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3969		u16 *q_handles, u16 *q_ids, u32 *q_teids,
3970		enum ice_disq_rst_src rst_src, u16 vmvf_num,
3971		struct ice_sq_cd *cd)
3972{
3973	enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3974	struct ice_aqc_dis_txq_item *qg_list;
3975	struct ice_q_ctx *q_ctx;
3976	struct ice_hw *hw;
3977	u16 i, buf_size;
3978
3979	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3980		return ICE_ERR_CFG;
3981
3982	hw = pi->hw;
3983
3984	if (!num_queues) {
3985		/* if queue is disabled already yet the disable queue command
3986		 * has to be sent to complete the VF reset, then call
3987		 * ice_aq_dis_lan_txq without any queue information
3988		 */
3989		if (rst_src)
3990			return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
3991						  vmvf_num, NULL);
3992		return ICE_ERR_CFG;
3993	}
3994
3995	buf_size = struct_size(qg_list, q_id, 1);
3996	qg_list = kzalloc(buf_size, GFP_KERNEL);
3997	if (!qg_list)
3998		return ICE_ERR_NO_MEMORY;
3999
4000	mutex_lock(&pi->sched_lock);
4001
4002	for (i = 0; i < num_queues; i++) {
4003		struct ice_sched_node *node;
4004
4005		node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4006		if (!node)
4007			continue;
4008		q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4009		if (!q_ctx) {
4010			ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4011				  q_handles[i]);
4012			continue;
4013		}
4014		if (q_ctx->q_handle != q_handles[i]) {
4015			ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4016				  q_ctx->q_handle, q_handles[i]);
4017			continue;
4018		}
4019		qg_list->parent_teid = node->info.parent_teid;
4020		qg_list->num_qs = 1;
4021		qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4022		status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4023					    vmvf_num, cd);
4024
4025		if (status)
4026			break;
4027		ice_free_sched_node(pi, node);
4028		q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4029	}
4030	mutex_unlock(&pi->sched_lock);
4031	kfree(qg_list);
4032	return status;
4033}
4034
4035/**
4036 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4037 * @pi: port information structure
4038 * @vsi_handle: software VSI handle
4039 * @tc_bitmap: TC bitmap
4040 * @maxqs: max queues array per TC
4041 * @owner: LAN or RDMA
4042 *
4043 * This function adds/updates the VSI queues per TC.
4044 */
4045static enum ice_status
4046ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4047	       u16 *maxqs, u8 owner)
4048{
4049	enum ice_status status = 0;
4050	u8 i;
4051
4052	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4053		return ICE_ERR_CFG;
4054
4055	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4056		return ICE_ERR_PARAM;
4057
4058	mutex_lock(&pi->sched_lock);
4059
4060	ice_for_each_traffic_class(i) {
4061		/* configuration is possible only if TC node is present */
4062		if (!ice_sched_get_tc_node(pi, i))
4063			continue;
4064
4065		status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4066					   ice_is_tc_ena(tc_bitmap, i));
4067		if (status)
4068			break;
4069	}
4070
4071	mutex_unlock(&pi->sched_lock);
4072	return status;
4073}
4074
4075/**
4076 * ice_cfg_vsi_lan - configure VSI LAN queues
4077 * @pi: port information structure
4078 * @vsi_handle: software VSI handle
4079 * @tc_bitmap: TC bitmap
4080 * @max_lanqs: max LAN queues array per TC
4081 *
4082 * This function adds/updates the VSI LAN queues per TC.
4083 */
4084enum ice_status
4085ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4086		u16 *max_lanqs)
4087{
4088	return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4089			      ICE_SCHED_NODE_OWNER_LAN);
4090}
4091
4092/**
4093 * ice_replay_pre_init - replay pre initialization
4094 * @hw: pointer to the HW struct
4095 *
4096 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4097 */
4098static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4099{
4100	struct ice_switch_info *sw = hw->switch_info;
4101	u8 i;
4102
4103	/* Delete old entries from replay filter list head if there is any */
4104	ice_rm_all_sw_replay_rule_info(hw);
4105	/* In start of replay, move entries into replay_rules list, it
4106	 * will allow adding rules entries back to filt_rules list,
4107	 * which is operational list.
4108	 */
4109	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4110		list_replace_init(&sw->recp_list[i].filt_rules,
4111				  &sw->recp_list[i].filt_replay_rules);
4112
4113	return 0;
4114}
4115
4116/**
4117 * ice_replay_vsi - replay VSI configuration
4118 * @hw: pointer to the HW struct
4119 * @vsi_handle: driver VSI handle
4120 *
4121 * Restore all VSI configuration after reset. It is required to call this
4122 * function with main VSI first.
4123 */
4124enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4125{
4126	enum ice_status status;
4127
4128	if (!ice_is_vsi_valid(hw, vsi_handle))
4129		return ICE_ERR_PARAM;
4130
4131	/* Replay pre-initialization if there is any */
4132	if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4133		status = ice_replay_pre_init(hw);
4134		if (status)
4135			return status;
4136	}
4137	/* Replay per VSI all RSS configurations */
4138	status = ice_replay_rss_cfg(hw, vsi_handle);
4139	if (status)
4140		return status;
4141	/* Replay per VSI all filters */
4142	status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4143	return status;
4144}
4145
4146/**
4147 * ice_replay_post - post replay configuration cleanup
4148 * @hw: pointer to the HW struct
4149 *
4150 * Post replay cleanup.
4151 */
4152void ice_replay_post(struct ice_hw *hw)
4153{
4154	/* Delete old entries from replay filter list head */
4155	ice_rm_all_sw_replay_rule_info(hw);
4156}
4157
4158/**
4159 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4160 * @hw: ptr to the hardware info
4161 * @reg: offset of 64 bit HW register to read from
4162 * @prev_stat_loaded: bool to specify if previous stats are loaded
4163 * @prev_stat: ptr to previous loaded stat value
4164 * @cur_stat: ptr to current stat value
4165 */
4166void
4167ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4168		  u64 *prev_stat, u64 *cur_stat)
4169{
4170	u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4171
4172	/* device stats are not reset at PFR, they likely will not be zeroed
4173	 * when the driver starts. Thus, save the value from the first read
4174	 * without adding to the statistic value so that we report stats which
4175	 * count up from zero.
4176	 */
4177	if (!prev_stat_loaded) {
4178		*prev_stat = new_data;
4179		return;
4180	}
4181
4182	/* Calculate the difference between the new and old values, and then
4183	 * add it to the software stat value.
4184	 */
4185	if (new_data >= *prev_stat)
4186		*cur_stat += new_data - *prev_stat;
4187	else
4188		/* to manage the potential roll-over */
4189		*cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4190
4191	/* Update the previously stored value to prepare for next read */
4192	*prev_stat = new_data;
4193}
4194
4195/**
4196 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4197 * @hw: ptr to the hardware info
4198 * @reg: offset of HW register to read from
4199 * @prev_stat_loaded: bool to specify if previous stats are loaded
4200 * @prev_stat: ptr to previous loaded stat value
4201 * @cur_stat: ptr to current stat value
4202 */
4203void
4204ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4205		  u64 *prev_stat, u64 *cur_stat)
4206{
4207	u32 new_data;
4208
4209	new_data = rd32(hw, reg);
4210
4211	/* device stats are not reset at PFR, they likely will not be zeroed
4212	 * when the driver starts. Thus, save the value from the first read
4213	 * without adding to the statistic value so that we report stats which
4214	 * count up from zero.
4215	 */
4216	if (!prev_stat_loaded) {
4217		*prev_stat = new_data;
4218		return;
4219	}
4220
4221	/* Calculate the difference between the new and old values, and then
4222	 * add it to the software stat value.
4223	 */
4224	if (new_data >= *prev_stat)
4225		*cur_stat += new_data - *prev_stat;
4226	else
4227		/* to manage the potential roll-over */
4228		*cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4229
4230	/* Update the previously stored value to prepare for next read */
4231	*prev_stat = new_data;
4232}
4233
4234/**
4235 * ice_sched_query_elem - query element information from HW
4236 * @hw: pointer to the HW struct
4237 * @node_teid: node TEID to be queried
4238 * @buf: buffer to element information
4239 *
4240 * This function queries HW element information
4241 */
4242enum ice_status
4243ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4244		     struct ice_aqc_txsched_elem_data *buf)
4245{
4246	u16 buf_size, num_elem_ret = 0;
4247	enum ice_status status;
4248
4249	buf_size = sizeof(*buf);
4250	memset(buf, 0, buf_size);
4251	buf->node_teid = cpu_to_le32(node_teid);
4252	status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4253					  NULL);
4254	if (status || num_elem_ret != 1)
4255		ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4256	return status;
4257}
4258
4259/**
4260 * ice_fw_supports_link_override
4261 * @hw: pointer to the hardware structure
4262 *
4263 * Checks if the firmware supports link override
4264 */
4265bool ice_fw_supports_link_override(struct ice_hw *hw)
4266{
4267	/* Currently, only supported for E810 devices */
4268	if (hw->mac_type != ICE_MAC_E810)
4269		return false;
4270
4271	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4272		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4273			return true;
4274		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4275		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4276			return true;
4277	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4278		return true;
4279	}
4280
4281	return false;
4282}
4283
4284/**
4285 * ice_get_link_default_override
4286 * @ldo: pointer to the link default override struct
4287 * @pi: pointer to the port info struct
4288 *
4289 * Gets the link default override for a port
4290 */
4291enum ice_status
4292ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4293			      struct ice_port_info *pi)
4294{
4295	u16 i, tlv, tlv_len, tlv_start, buf, offset;
4296	struct ice_hw *hw = pi->hw;
4297	enum ice_status status;
4298
4299	status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4300					ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4301	if (status) {
4302		ice_debug(hw, ICE_DBG_INIT,
4303			  "Failed to read link override TLV.\n");
4304		return status;
4305	}
4306
4307	/* Each port has its own config; calculate for our port */
4308	tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4309		ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4310
4311	/* link options first */
4312	status = ice_read_sr_word(hw, tlv_start, &buf);
4313	if (status) {
4314		ice_debug(hw, ICE_DBG_INIT,
4315			  "Failed to read override link options.\n");
4316		return status;
4317	}
4318	ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4319	ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4320		ICE_LINK_OVERRIDE_PHY_CFG_S;
4321
4322	/* link PHY config */
4323	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4324	status = ice_read_sr_word(hw, offset, &buf);
4325	if (status) {
4326		ice_debug(hw, ICE_DBG_INIT,
4327			  "Failed to read override phy config.\n");
4328		return status;
4329	}
4330	ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4331
4332	/* PHY types low */
4333	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4334	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4335		status = ice_read_sr_word(hw, (offset + i), &buf);
4336		if (status) {
4337			ice_debug(hw, ICE_DBG_INIT,
4338				  "Failed to read override link options.\n");
4339			return status;
4340		}
4341		/* shift 16 bits at a time to fill 64 bits */
4342		ldo->phy_type_low |= ((u64)buf << (i * 16));
4343	}
4344
4345	/* PHY types high */
4346	offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4347		ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4348	for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4349		status = ice_read_sr_word(hw, (offset + i), &buf);
4350		if (status) {
4351			ice_debug(hw, ICE_DBG_INIT,
4352				  "Failed to read override link options.\n");
4353			return status;
4354		}
4355		/* shift 16 bits at a time to fill 64 bits */
4356		ldo->phy_type_high |= ((u64)buf << (i * 16));
4357	}
4358
4359	return status;
4360}
4361
4362/**
4363 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4364 * @caps: get PHY capability data
4365 */
4366bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4367{
4368	if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4369	    caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4370				       ICE_AQC_PHY_AN_EN_CLAUSE73 |
4371				       ICE_AQC_PHY_AN_EN_CLAUSE37))
4372		return true;
4373
4374	return false;
4375}
4376
4377/**
4378 * ice_aq_set_lldp_mib - Set the LLDP MIB
4379 * @hw: pointer to the HW struct
4380 * @mib_type: Local, Remote or both Local and Remote MIBs
4381 * @buf: pointer to the caller-supplied buffer to store the MIB block
4382 * @buf_size: size of the buffer (in bytes)
4383 * @cd: pointer to command details structure or NULL
4384 *
4385 * Set the LLDP MIB. (0x0A08)
4386 */
4387enum ice_status
4388ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4389		    struct ice_sq_cd *cd)
4390{
4391	struct ice_aqc_lldp_set_local_mib *cmd;
4392	struct ice_aq_desc desc;
4393
4394	cmd = &desc.params.lldp_set_mib;
4395
4396	if (buf_size == 0 || !buf)
4397		return ICE_ERR_PARAM;
4398
4399	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4400
4401	desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4402	desc.datalen = cpu_to_le16(buf_size);
4403
4404	cmd->type = mib_type;
4405	cmd->length = cpu_to_le16(buf_size);
4406
4407	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4408}
4409