1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include "i40e.h"
5#include "i40e_type.h"
6#include "i40e_adminq.h"
7#include "i40e_prototype.h"
8#include <linux/avf/virtchnl.h>
9
10/**
11 * i40e_set_mac_type - Sets MAC type
12 * @hw: pointer to the HW structure
13 *
14 * This function sets the mac type of the adapter based on the
15 * vendor ID and device ID stored in the hw structure.
16 **/
17i40e_status i40e_set_mac_type(struct i40e_hw *hw)
18{
19	i40e_status status = 0;
20
21	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
22		switch (hw->device_id) {
23		case I40E_DEV_ID_SFP_XL710:
24		case I40E_DEV_ID_QEMU:
25		case I40E_DEV_ID_KX_B:
26		case I40E_DEV_ID_KX_C:
27		case I40E_DEV_ID_QSFP_A:
28		case I40E_DEV_ID_QSFP_B:
29		case I40E_DEV_ID_QSFP_C:
30		case I40E_DEV_ID_5G_BASE_T_BC:
31		case I40E_DEV_ID_10G_BASE_T:
32		case I40E_DEV_ID_10G_BASE_T4:
33		case I40E_DEV_ID_10G_BASE_T_BC:
34		case I40E_DEV_ID_10G_B:
35		case I40E_DEV_ID_10G_SFP:
36		case I40E_DEV_ID_20G_KR2:
37		case I40E_DEV_ID_20G_KR2_A:
38		case I40E_DEV_ID_25G_B:
39		case I40E_DEV_ID_25G_SFP28:
40		case I40E_DEV_ID_X710_N3000:
41		case I40E_DEV_ID_XXV710_N3000:
42			hw->mac.type = I40E_MAC_XL710;
43			break;
44		case I40E_DEV_ID_KX_X722:
45		case I40E_DEV_ID_QSFP_X722:
46		case I40E_DEV_ID_SFP_X722:
47		case I40E_DEV_ID_1G_BASE_T_X722:
48		case I40E_DEV_ID_10G_BASE_T_X722:
49		case I40E_DEV_ID_SFP_I_X722:
50			hw->mac.type = I40E_MAC_X722;
51			break;
52		default:
53			hw->mac.type = I40E_MAC_GENERIC;
54			break;
55		}
56	} else {
57		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
58	}
59
60	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
61		  hw->mac.type, status);
62	return status;
63}
64
65/**
66 * i40e_aq_str - convert AQ err code to a string
67 * @hw: pointer to the HW structure
68 * @aq_err: the AQ error code to convert
69 **/
70const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
71{
72	switch (aq_err) {
73	case I40E_AQ_RC_OK:
74		return "OK";
75	case I40E_AQ_RC_EPERM:
76		return "I40E_AQ_RC_EPERM";
77	case I40E_AQ_RC_ENOENT:
78		return "I40E_AQ_RC_ENOENT";
79	case I40E_AQ_RC_ESRCH:
80		return "I40E_AQ_RC_ESRCH";
81	case I40E_AQ_RC_EINTR:
82		return "I40E_AQ_RC_EINTR";
83	case I40E_AQ_RC_EIO:
84		return "I40E_AQ_RC_EIO";
85	case I40E_AQ_RC_ENXIO:
86		return "I40E_AQ_RC_ENXIO";
87	case I40E_AQ_RC_E2BIG:
88		return "I40E_AQ_RC_E2BIG";
89	case I40E_AQ_RC_EAGAIN:
90		return "I40E_AQ_RC_EAGAIN";
91	case I40E_AQ_RC_ENOMEM:
92		return "I40E_AQ_RC_ENOMEM";
93	case I40E_AQ_RC_EACCES:
94		return "I40E_AQ_RC_EACCES";
95	case I40E_AQ_RC_EFAULT:
96		return "I40E_AQ_RC_EFAULT";
97	case I40E_AQ_RC_EBUSY:
98		return "I40E_AQ_RC_EBUSY";
99	case I40E_AQ_RC_EEXIST:
100		return "I40E_AQ_RC_EEXIST";
101	case I40E_AQ_RC_EINVAL:
102		return "I40E_AQ_RC_EINVAL";
103	case I40E_AQ_RC_ENOTTY:
104		return "I40E_AQ_RC_ENOTTY";
105	case I40E_AQ_RC_ENOSPC:
106		return "I40E_AQ_RC_ENOSPC";
107	case I40E_AQ_RC_ENOSYS:
108		return "I40E_AQ_RC_ENOSYS";
109	case I40E_AQ_RC_ERANGE:
110		return "I40E_AQ_RC_ERANGE";
111	case I40E_AQ_RC_EFLUSHED:
112		return "I40E_AQ_RC_EFLUSHED";
113	case I40E_AQ_RC_BAD_ADDR:
114		return "I40E_AQ_RC_BAD_ADDR";
115	case I40E_AQ_RC_EMODE:
116		return "I40E_AQ_RC_EMODE";
117	case I40E_AQ_RC_EFBIG:
118		return "I40E_AQ_RC_EFBIG";
119	}
120
121	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
122	return hw->err_str;
123}
124
125/**
126 * i40e_stat_str - convert status err code to a string
127 * @hw: pointer to the HW structure
128 * @stat_err: the status error code to convert
129 **/
130const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
131{
132	switch (stat_err) {
133	case 0:
134		return "OK";
135	case I40E_ERR_NVM:
136		return "I40E_ERR_NVM";
137	case I40E_ERR_NVM_CHECKSUM:
138		return "I40E_ERR_NVM_CHECKSUM";
139	case I40E_ERR_PHY:
140		return "I40E_ERR_PHY";
141	case I40E_ERR_CONFIG:
142		return "I40E_ERR_CONFIG";
143	case I40E_ERR_PARAM:
144		return "I40E_ERR_PARAM";
145	case I40E_ERR_MAC_TYPE:
146		return "I40E_ERR_MAC_TYPE";
147	case I40E_ERR_UNKNOWN_PHY:
148		return "I40E_ERR_UNKNOWN_PHY";
149	case I40E_ERR_LINK_SETUP:
150		return "I40E_ERR_LINK_SETUP";
151	case I40E_ERR_ADAPTER_STOPPED:
152		return "I40E_ERR_ADAPTER_STOPPED";
153	case I40E_ERR_INVALID_MAC_ADDR:
154		return "I40E_ERR_INVALID_MAC_ADDR";
155	case I40E_ERR_DEVICE_NOT_SUPPORTED:
156		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
157	case I40E_ERR_MASTER_REQUESTS_PENDING:
158		return "I40E_ERR_MASTER_REQUESTS_PENDING";
159	case I40E_ERR_INVALID_LINK_SETTINGS:
160		return "I40E_ERR_INVALID_LINK_SETTINGS";
161	case I40E_ERR_AUTONEG_NOT_COMPLETE:
162		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
163	case I40E_ERR_RESET_FAILED:
164		return "I40E_ERR_RESET_FAILED";
165	case I40E_ERR_SWFW_SYNC:
166		return "I40E_ERR_SWFW_SYNC";
167	case I40E_ERR_NO_AVAILABLE_VSI:
168		return "I40E_ERR_NO_AVAILABLE_VSI";
169	case I40E_ERR_NO_MEMORY:
170		return "I40E_ERR_NO_MEMORY";
171	case I40E_ERR_BAD_PTR:
172		return "I40E_ERR_BAD_PTR";
173	case I40E_ERR_RING_FULL:
174		return "I40E_ERR_RING_FULL";
175	case I40E_ERR_INVALID_PD_ID:
176		return "I40E_ERR_INVALID_PD_ID";
177	case I40E_ERR_INVALID_QP_ID:
178		return "I40E_ERR_INVALID_QP_ID";
179	case I40E_ERR_INVALID_CQ_ID:
180		return "I40E_ERR_INVALID_CQ_ID";
181	case I40E_ERR_INVALID_CEQ_ID:
182		return "I40E_ERR_INVALID_CEQ_ID";
183	case I40E_ERR_INVALID_AEQ_ID:
184		return "I40E_ERR_INVALID_AEQ_ID";
185	case I40E_ERR_INVALID_SIZE:
186		return "I40E_ERR_INVALID_SIZE";
187	case I40E_ERR_INVALID_ARP_INDEX:
188		return "I40E_ERR_INVALID_ARP_INDEX";
189	case I40E_ERR_INVALID_FPM_FUNC_ID:
190		return "I40E_ERR_INVALID_FPM_FUNC_ID";
191	case I40E_ERR_QP_INVALID_MSG_SIZE:
192		return "I40E_ERR_QP_INVALID_MSG_SIZE";
193	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
194		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
195	case I40E_ERR_INVALID_FRAG_COUNT:
196		return "I40E_ERR_INVALID_FRAG_COUNT";
197	case I40E_ERR_QUEUE_EMPTY:
198		return "I40E_ERR_QUEUE_EMPTY";
199	case I40E_ERR_INVALID_ALIGNMENT:
200		return "I40E_ERR_INVALID_ALIGNMENT";
201	case I40E_ERR_FLUSHED_QUEUE:
202		return "I40E_ERR_FLUSHED_QUEUE";
203	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
204		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
205	case I40E_ERR_INVALID_IMM_DATA_SIZE:
206		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
207	case I40E_ERR_TIMEOUT:
208		return "I40E_ERR_TIMEOUT";
209	case I40E_ERR_OPCODE_MISMATCH:
210		return "I40E_ERR_OPCODE_MISMATCH";
211	case I40E_ERR_CQP_COMPL_ERROR:
212		return "I40E_ERR_CQP_COMPL_ERROR";
213	case I40E_ERR_INVALID_VF_ID:
214		return "I40E_ERR_INVALID_VF_ID";
215	case I40E_ERR_INVALID_HMCFN_ID:
216		return "I40E_ERR_INVALID_HMCFN_ID";
217	case I40E_ERR_BACKING_PAGE_ERROR:
218		return "I40E_ERR_BACKING_PAGE_ERROR";
219	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
220		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
221	case I40E_ERR_INVALID_PBLE_INDEX:
222		return "I40E_ERR_INVALID_PBLE_INDEX";
223	case I40E_ERR_INVALID_SD_INDEX:
224		return "I40E_ERR_INVALID_SD_INDEX";
225	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
226		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
227	case I40E_ERR_INVALID_SD_TYPE:
228		return "I40E_ERR_INVALID_SD_TYPE";
229	case I40E_ERR_MEMCPY_FAILED:
230		return "I40E_ERR_MEMCPY_FAILED";
231	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
232		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
233	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
234		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
235	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
236		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
237	case I40E_ERR_SRQ_ENABLED:
238		return "I40E_ERR_SRQ_ENABLED";
239	case I40E_ERR_ADMIN_QUEUE_ERROR:
240		return "I40E_ERR_ADMIN_QUEUE_ERROR";
241	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
242		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
243	case I40E_ERR_BUF_TOO_SHORT:
244		return "I40E_ERR_BUF_TOO_SHORT";
245	case I40E_ERR_ADMIN_QUEUE_FULL:
246		return "I40E_ERR_ADMIN_QUEUE_FULL";
247	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
248		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
249	case I40E_ERR_BAD_IWARP_CQE:
250		return "I40E_ERR_BAD_IWARP_CQE";
251	case I40E_ERR_NVM_BLANK_MODE:
252		return "I40E_ERR_NVM_BLANK_MODE";
253	case I40E_ERR_NOT_IMPLEMENTED:
254		return "I40E_ERR_NOT_IMPLEMENTED";
255	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
256		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
257	case I40E_ERR_DIAG_TEST_FAILED:
258		return "I40E_ERR_DIAG_TEST_FAILED";
259	case I40E_ERR_NOT_READY:
260		return "I40E_ERR_NOT_READY";
261	case I40E_NOT_SUPPORTED:
262		return "I40E_NOT_SUPPORTED";
263	case I40E_ERR_FIRMWARE_API_VERSION:
264		return "I40E_ERR_FIRMWARE_API_VERSION";
265	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
266		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
267	}
268
269	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
270	return hw->err_str;
271}
272
273/**
274 * i40e_debug_aq
275 * @hw: debug mask related to admin queue
276 * @mask: debug mask
277 * @desc: pointer to admin queue descriptor
278 * @buffer: pointer to command buffer
279 * @buf_len: max length of buffer
280 *
281 * Dumps debug log about adminq command with descriptor contents.
282 **/
283void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
284		   void *buffer, u16 buf_len)
285{
286	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
287	u32 effective_mask = hw->debug_mask & mask;
288	char prefix[27];
289	u16 len;
290	u8 *buf = (u8 *)buffer;
291
292	if (!effective_mask || !desc)
293		return;
294
295	len = le16_to_cpu(aq_desc->datalen);
296
297	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
298		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
299		   le16_to_cpu(aq_desc->opcode),
300		   le16_to_cpu(aq_desc->flags),
301		   le16_to_cpu(aq_desc->datalen),
302		   le16_to_cpu(aq_desc->retval));
303	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
304		   "\tcookie (h,l) 0x%08X 0x%08X\n",
305		   le32_to_cpu(aq_desc->cookie_high),
306		   le32_to_cpu(aq_desc->cookie_low));
307	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
308		   "\tparam (0,1)  0x%08X 0x%08X\n",
309		   le32_to_cpu(aq_desc->params.internal.param0),
310		   le32_to_cpu(aq_desc->params.internal.param1));
311	i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
312		   "\taddr (h,l)   0x%08X 0x%08X\n",
313		   le32_to_cpu(aq_desc->params.external.addr_high),
314		   le32_to_cpu(aq_desc->params.external.addr_low));
315
316	if (buffer && buf_len != 0 && len != 0 &&
317	    (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
318		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
319		if (buf_len < len)
320			len = buf_len;
321
322		snprintf(prefix, sizeof(prefix),
323			 "i40e %02x:%02x.%x: \t0x",
324			 hw->bus.bus_id,
325			 hw->bus.device,
326			 hw->bus.func);
327
328		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
329			       16, 1, buf, len, false);
330	}
331}
332
333/**
334 * i40e_check_asq_alive
335 * @hw: pointer to the hw struct
336 *
337 * Returns true if Queue is enabled else false.
338 **/
339bool i40e_check_asq_alive(struct i40e_hw *hw)
340{
341	if (hw->aq.asq.len)
342		return !!(rd32(hw, hw->aq.asq.len) &
343			  I40E_PF_ATQLEN_ATQENABLE_MASK);
344	else
345		return false;
346}
347
348/**
349 * i40e_aq_queue_shutdown
350 * @hw: pointer to the hw struct
351 * @unloading: is the driver unloading itself
352 *
353 * Tell the Firmware that we're shutting down the AdminQ and whether
354 * or not the driver is unloading as well.
355 **/
356i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
357					     bool unloading)
358{
359	struct i40e_aq_desc desc;
360	struct i40e_aqc_queue_shutdown *cmd =
361		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
362	i40e_status status;
363
364	i40e_fill_default_direct_cmd_desc(&desc,
365					  i40e_aqc_opc_queue_shutdown);
366
367	if (unloading)
368		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
369	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
370
371	return status;
372}
373
374/**
375 * i40e_aq_get_set_rss_lut
376 * @hw: pointer to the hardware structure
377 * @vsi_id: vsi fw index
378 * @pf_lut: for PF table set true, for VSI table set false
379 * @lut: pointer to the lut buffer provided by the caller
380 * @lut_size: size of the lut buffer
381 * @set: set true to set the table, false to get the table
382 *
383 * Internal function to get or set RSS look up table
384 **/
385static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
386					   u16 vsi_id, bool pf_lut,
387					   u8 *lut, u16 lut_size,
388					   bool set)
389{
390	i40e_status status;
391	struct i40e_aq_desc desc;
392	struct i40e_aqc_get_set_rss_lut *cmd_resp =
393		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
394
395	if (set)
396		i40e_fill_default_direct_cmd_desc(&desc,
397						  i40e_aqc_opc_set_rss_lut);
398	else
399		i40e_fill_default_direct_cmd_desc(&desc,
400						  i40e_aqc_opc_get_rss_lut);
401
402	/* Indirect command */
403	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
404	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
405
406	cmd_resp->vsi_id =
407			cpu_to_le16((u16)((vsi_id <<
408					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
409					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
410	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
411
412	if (pf_lut)
413		cmd_resp->flags |= cpu_to_le16((u16)
414					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
415					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
416					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
417	else
418		cmd_resp->flags |= cpu_to_le16((u16)
419					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
420					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
421					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
422
423	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
424
425	return status;
426}
427
428/**
429 * i40e_aq_get_rss_lut
430 * @hw: pointer to the hardware structure
431 * @vsi_id: vsi fw index
432 * @pf_lut: for PF table set true, for VSI table set false
433 * @lut: pointer to the lut buffer provided by the caller
434 * @lut_size: size of the lut buffer
435 *
436 * get the RSS lookup table, PF or VSI type
437 **/
438i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
439				bool pf_lut, u8 *lut, u16 lut_size)
440{
441	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
442				       false);
443}
444
445/**
446 * i40e_aq_set_rss_lut
447 * @hw: pointer to the hardware structure
448 * @vsi_id: vsi fw index
449 * @pf_lut: for PF table set true, for VSI table set false
450 * @lut: pointer to the lut buffer provided by the caller
451 * @lut_size: size of the lut buffer
452 *
453 * set the RSS lookup table, PF or VSI type
454 **/
455i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
456				bool pf_lut, u8 *lut, u16 lut_size)
457{
458	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
459}
460
461/**
462 * i40e_aq_get_set_rss_key
463 * @hw: pointer to the hw struct
464 * @vsi_id: vsi fw index
465 * @key: pointer to key info struct
466 * @set: set true to set the key, false to get the key
467 *
468 * get the RSS key per VSI
469 **/
470static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
471				      u16 vsi_id,
472				      struct i40e_aqc_get_set_rss_key_data *key,
473				      bool set)
474{
475	i40e_status status;
476	struct i40e_aq_desc desc;
477	struct i40e_aqc_get_set_rss_key *cmd_resp =
478			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
479	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
480
481	if (set)
482		i40e_fill_default_direct_cmd_desc(&desc,
483						  i40e_aqc_opc_set_rss_key);
484	else
485		i40e_fill_default_direct_cmd_desc(&desc,
486						  i40e_aqc_opc_get_rss_key);
487
488	/* Indirect command */
489	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
490	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
491
492	cmd_resp->vsi_id =
493			cpu_to_le16((u16)((vsi_id <<
494					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
495					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
496	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
497
498	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
499
500	return status;
501}
502
503/**
504 * i40e_aq_get_rss_key
505 * @hw: pointer to the hw struct
506 * @vsi_id: vsi fw index
507 * @key: pointer to key info struct
508 *
509 **/
510i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
511				u16 vsi_id,
512				struct i40e_aqc_get_set_rss_key_data *key)
513{
514	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
515}
516
517/**
518 * i40e_aq_set_rss_key
519 * @hw: pointer to the hw struct
520 * @vsi_id: vsi fw index
521 * @key: pointer to key info struct
522 *
523 * set the RSS key per VSI
524 **/
525i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
526				u16 vsi_id,
527				struct i40e_aqc_get_set_rss_key_data *key)
528{
529	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
530}
531
532/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
533 * hardware to a bit-field that can be used by SW to more easily determine the
534 * packet type.
535 *
536 * Macros are used to shorten the table lines and make this table human
537 * readable.
538 *
539 * We store the PTYPE in the top byte of the bit field - this is just so that
540 * we can check that the table doesn't have a row missing, as the index into
541 * the table should be the PTYPE.
542 *
543 * Typical work flow:
544 *
545 * IF NOT i40e_ptype_lookup[ptype].known
546 * THEN
547 *      Packet is unknown
548 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
549 *      Use the rest of the fields to look at the tunnels, inner protocols, etc
550 * ELSE
551 *      Use the enum i40e_rx_l2_ptype to decode the packet type
552 * ENDIF
553 */
554
555/* macro to make the table lines short */
556#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
557	{	PTYPE, \
558		1, \
559		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
560		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
561		I40E_RX_PTYPE_##OUTER_FRAG, \
562		I40E_RX_PTYPE_TUNNEL_##T, \
563		I40E_RX_PTYPE_TUNNEL_END_##TE, \
564		I40E_RX_PTYPE_##TEF, \
565		I40E_RX_PTYPE_INNER_PROT_##I, \
566		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
567
568#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
569		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
570
571/* shorter macros makes the table fit but are terse */
572#define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
573#define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
574#define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
575
576/* Lookup table mapping the HW PTYPE to the bit field for decoding */
577struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
578	/* L2 Packet types */
579	I40E_PTT_UNUSED_ENTRY(0),
580	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
581	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
582	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
583	I40E_PTT_UNUSED_ENTRY(4),
584	I40E_PTT_UNUSED_ENTRY(5),
585	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
586	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
587	I40E_PTT_UNUSED_ENTRY(8),
588	I40E_PTT_UNUSED_ENTRY(9),
589	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
590	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
591	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
592	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
593	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
594	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
595	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
596	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
597	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
598	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
599	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
600	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
601
602	/* Non Tunneled IPv4 */
603	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
604	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
605	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
606	I40E_PTT_UNUSED_ENTRY(25),
607	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
608	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
609	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
610
611	/* IPv4 --> IPv4 */
612	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
613	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
614	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
615	I40E_PTT_UNUSED_ENTRY(32),
616	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
617	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
618	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
619
620	/* IPv4 --> IPv6 */
621	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
622	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
623	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
624	I40E_PTT_UNUSED_ENTRY(39),
625	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
626	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
627	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
628
629	/* IPv4 --> GRE/NAT */
630	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
631
632	/* IPv4 --> GRE/NAT --> IPv4 */
633	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
634	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
635	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
636	I40E_PTT_UNUSED_ENTRY(47),
637	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
638	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
639	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
640
641	/* IPv4 --> GRE/NAT --> IPv6 */
642	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
643	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
644	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
645	I40E_PTT_UNUSED_ENTRY(54),
646	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
647	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
648	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
649
650	/* IPv4 --> GRE/NAT --> MAC */
651	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
652
653	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
654	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
655	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
656	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
657	I40E_PTT_UNUSED_ENTRY(62),
658	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
659	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
660	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
661
662	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
663	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
664	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
665	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
666	I40E_PTT_UNUSED_ENTRY(69),
667	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
668	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
669	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
670
671	/* IPv4 --> GRE/NAT --> MAC/VLAN */
672	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
673
674	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
675	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
676	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
677	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
678	I40E_PTT_UNUSED_ENTRY(77),
679	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
680	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
681	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
682
683	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
684	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
685	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
686	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
687	I40E_PTT_UNUSED_ENTRY(84),
688	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
689	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
690	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
691
692	/* Non Tunneled IPv6 */
693	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
694	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
695	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
696	I40E_PTT_UNUSED_ENTRY(91),
697	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
698	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
699	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
700
701	/* IPv6 --> IPv4 */
702	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
703	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
704	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
705	I40E_PTT_UNUSED_ENTRY(98),
706	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
707	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
708	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
709
710	/* IPv6 --> IPv6 */
711	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
712	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
713	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
714	I40E_PTT_UNUSED_ENTRY(105),
715	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
716	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
717	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
718
719	/* IPv6 --> GRE/NAT */
720	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
721
722	/* IPv6 --> GRE/NAT -> IPv4 */
723	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
724	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
725	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
726	I40E_PTT_UNUSED_ENTRY(113),
727	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
728	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
729	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
730
731	/* IPv6 --> GRE/NAT -> IPv6 */
732	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
733	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
734	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
735	I40E_PTT_UNUSED_ENTRY(120),
736	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
737	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
738	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
739
740	/* IPv6 --> GRE/NAT -> MAC */
741	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
742
743	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
744	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
745	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
746	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
747	I40E_PTT_UNUSED_ENTRY(128),
748	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
749	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
750	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
751
752	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
753	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
754	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
755	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
756	I40E_PTT_UNUSED_ENTRY(135),
757	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
758	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
759	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
760
761	/* IPv6 --> GRE/NAT -> MAC/VLAN */
762	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
763
764	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
765	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
766	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
767	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
768	I40E_PTT_UNUSED_ENTRY(143),
769	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
770	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
771	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
772
773	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
774	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
775	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
776	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
777	I40E_PTT_UNUSED_ENTRY(150),
778	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
779	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
780	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
781
782	/* unused entries */
783	I40E_PTT_UNUSED_ENTRY(154),
784	I40E_PTT_UNUSED_ENTRY(155),
785	I40E_PTT_UNUSED_ENTRY(156),
786	I40E_PTT_UNUSED_ENTRY(157),
787	I40E_PTT_UNUSED_ENTRY(158),
788	I40E_PTT_UNUSED_ENTRY(159),
789
790	I40E_PTT_UNUSED_ENTRY(160),
791	I40E_PTT_UNUSED_ENTRY(161),
792	I40E_PTT_UNUSED_ENTRY(162),
793	I40E_PTT_UNUSED_ENTRY(163),
794	I40E_PTT_UNUSED_ENTRY(164),
795	I40E_PTT_UNUSED_ENTRY(165),
796	I40E_PTT_UNUSED_ENTRY(166),
797	I40E_PTT_UNUSED_ENTRY(167),
798	I40E_PTT_UNUSED_ENTRY(168),
799	I40E_PTT_UNUSED_ENTRY(169),
800
801	I40E_PTT_UNUSED_ENTRY(170),
802	I40E_PTT_UNUSED_ENTRY(171),
803	I40E_PTT_UNUSED_ENTRY(172),
804	I40E_PTT_UNUSED_ENTRY(173),
805	I40E_PTT_UNUSED_ENTRY(174),
806	I40E_PTT_UNUSED_ENTRY(175),
807	I40E_PTT_UNUSED_ENTRY(176),
808	I40E_PTT_UNUSED_ENTRY(177),
809	I40E_PTT_UNUSED_ENTRY(178),
810	I40E_PTT_UNUSED_ENTRY(179),
811
812	I40E_PTT_UNUSED_ENTRY(180),
813	I40E_PTT_UNUSED_ENTRY(181),
814	I40E_PTT_UNUSED_ENTRY(182),
815	I40E_PTT_UNUSED_ENTRY(183),
816	I40E_PTT_UNUSED_ENTRY(184),
817	I40E_PTT_UNUSED_ENTRY(185),
818	I40E_PTT_UNUSED_ENTRY(186),
819	I40E_PTT_UNUSED_ENTRY(187),
820	I40E_PTT_UNUSED_ENTRY(188),
821	I40E_PTT_UNUSED_ENTRY(189),
822
823	I40E_PTT_UNUSED_ENTRY(190),
824	I40E_PTT_UNUSED_ENTRY(191),
825	I40E_PTT_UNUSED_ENTRY(192),
826	I40E_PTT_UNUSED_ENTRY(193),
827	I40E_PTT_UNUSED_ENTRY(194),
828	I40E_PTT_UNUSED_ENTRY(195),
829	I40E_PTT_UNUSED_ENTRY(196),
830	I40E_PTT_UNUSED_ENTRY(197),
831	I40E_PTT_UNUSED_ENTRY(198),
832	I40E_PTT_UNUSED_ENTRY(199),
833
834	I40E_PTT_UNUSED_ENTRY(200),
835	I40E_PTT_UNUSED_ENTRY(201),
836	I40E_PTT_UNUSED_ENTRY(202),
837	I40E_PTT_UNUSED_ENTRY(203),
838	I40E_PTT_UNUSED_ENTRY(204),
839	I40E_PTT_UNUSED_ENTRY(205),
840	I40E_PTT_UNUSED_ENTRY(206),
841	I40E_PTT_UNUSED_ENTRY(207),
842	I40E_PTT_UNUSED_ENTRY(208),
843	I40E_PTT_UNUSED_ENTRY(209),
844
845	I40E_PTT_UNUSED_ENTRY(210),
846	I40E_PTT_UNUSED_ENTRY(211),
847	I40E_PTT_UNUSED_ENTRY(212),
848	I40E_PTT_UNUSED_ENTRY(213),
849	I40E_PTT_UNUSED_ENTRY(214),
850	I40E_PTT_UNUSED_ENTRY(215),
851	I40E_PTT_UNUSED_ENTRY(216),
852	I40E_PTT_UNUSED_ENTRY(217),
853	I40E_PTT_UNUSED_ENTRY(218),
854	I40E_PTT_UNUSED_ENTRY(219),
855
856	I40E_PTT_UNUSED_ENTRY(220),
857	I40E_PTT_UNUSED_ENTRY(221),
858	I40E_PTT_UNUSED_ENTRY(222),
859	I40E_PTT_UNUSED_ENTRY(223),
860	I40E_PTT_UNUSED_ENTRY(224),
861	I40E_PTT_UNUSED_ENTRY(225),
862	I40E_PTT_UNUSED_ENTRY(226),
863	I40E_PTT_UNUSED_ENTRY(227),
864	I40E_PTT_UNUSED_ENTRY(228),
865	I40E_PTT_UNUSED_ENTRY(229),
866
867	I40E_PTT_UNUSED_ENTRY(230),
868	I40E_PTT_UNUSED_ENTRY(231),
869	I40E_PTT_UNUSED_ENTRY(232),
870	I40E_PTT_UNUSED_ENTRY(233),
871	I40E_PTT_UNUSED_ENTRY(234),
872	I40E_PTT_UNUSED_ENTRY(235),
873	I40E_PTT_UNUSED_ENTRY(236),
874	I40E_PTT_UNUSED_ENTRY(237),
875	I40E_PTT_UNUSED_ENTRY(238),
876	I40E_PTT_UNUSED_ENTRY(239),
877
878	I40E_PTT_UNUSED_ENTRY(240),
879	I40E_PTT_UNUSED_ENTRY(241),
880	I40E_PTT_UNUSED_ENTRY(242),
881	I40E_PTT_UNUSED_ENTRY(243),
882	I40E_PTT_UNUSED_ENTRY(244),
883	I40E_PTT_UNUSED_ENTRY(245),
884	I40E_PTT_UNUSED_ENTRY(246),
885	I40E_PTT_UNUSED_ENTRY(247),
886	I40E_PTT_UNUSED_ENTRY(248),
887	I40E_PTT_UNUSED_ENTRY(249),
888
889	I40E_PTT_UNUSED_ENTRY(250),
890	I40E_PTT_UNUSED_ENTRY(251),
891	I40E_PTT_UNUSED_ENTRY(252),
892	I40E_PTT_UNUSED_ENTRY(253),
893	I40E_PTT_UNUSED_ENTRY(254),
894	I40E_PTT_UNUSED_ENTRY(255)
895};
896
897/**
898 * i40e_init_shared_code - Initialize the shared code
899 * @hw: pointer to hardware structure
900 *
901 * This assigns the MAC type and PHY code and inits the NVM.
902 * Does not touch the hardware. This function must be called prior to any
903 * other function in the shared code. The i40e_hw structure should be
904 * memset to 0 prior to calling this function.  The following fields in
905 * hw structure should be filled in prior to calling this function:
906 * hw_addr, back, device_id, vendor_id, subsystem_device_id,
907 * subsystem_vendor_id, and revision_id
908 **/
909i40e_status i40e_init_shared_code(struct i40e_hw *hw)
910{
911	i40e_status status = 0;
912	u32 port, ari, func_rid;
913
914	i40e_set_mac_type(hw);
915
916	switch (hw->mac.type) {
917	case I40E_MAC_XL710:
918	case I40E_MAC_X722:
919		break;
920	default:
921		return I40E_ERR_DEVICE_NOT_SUPPORTED;
922	}
923
924	hw->phy.get_link_info = true;
925
926	/* Determine port number and PF number*/
927	port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
928					   >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
929	hw->port = (u8)port;
930	ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
931						 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
932	func_rid = rd32(hw, I40E_PF_FUNC_RID);
933	if (ari)
934		hw->pf_id = (u8)(func_rid & 0xff);
935	else
936		hw->pf_id = (u8)(func_rid & 0x7);
937
938	status = i40e_init_nvm(hw);
939	return status;
940}
941
942/**
943 * i40e_aq_mac_address_read - Retrieve the MAC addresses
944 * @hw: pointer to the hw struct
945 * @flags: a return indicator of what addresses were added to the addr store
946 * @addrs: the requestor's mac addr store
947 * @cmd_details: pointer to command details structure or NULL
948 **/
949static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
950				   u16 *flags,
951				   struct i40e_aqc_mac_address_read_data *addrs,
952				   struct i40e_asq_cmd_details *cmd_details)
953{
954	struct i40e_aq_desc desc;
955	struct i40e_aqc_mac_address_read *cmd_data =
956		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
957	i40e_status status;
958
959	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
960	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
961
962	status = i40e_asq_send_command(hw, &desc, addrs,
963				       sizeof(*addrs), cmd_details);
964	*flags = le16_to_cpu(cmd_data->command_flags);
965
966	return status;
967}
968
969/**
970 * i40e_aq_mac_address_write - Change the MAC addresses
971 * @hw: pointer to the hw struct
972 * @flags: indicates which MAC to be written
973 * @mac_addr: address to write
974 * @cmd_details: pointer to command details structure or NULL
975 **/
976i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
977				    u16 flags, u8 *mac_addr,
978				    struct i40e_asq_cmd_details *cmd_details)
979{
980	struct i40e_aq_desc desc;
981	struct i40e_aqc_mac_address_write *cmd_data =
982		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
983	i40e_status status;
984
985	i40e_fill_default_direct_cmd_desc(&desc,
986					  i40e_aqc_opc_mac_address_write);
987	cmd_data->command_flags = cpu_to_le16(flags);
988	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
989	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
990					((u32)mac_addr[3] << 16) |
991					((u32)mac_addr[4] << 8) |
992					mac_addr[5]);
993
994	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
995
996	return status;
997}
998
999/**
1000 * i40e_get_mac_addr - get MAC address
1001 * @hw: pointer to the HW structure
1002 * @mac_addr: pointer to MAC address
1003 *
1004 * Reads the adapter's MAC address from register
1005 **/
1006i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1007{
1008	struct i40e_aqc_mac_address_read_data addrs;
1009	i40e_status status;
1010	u16 flags = 0;
1011
1012	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1013
1014	if (flags & I40E_AQC_LAN_ADDR_VALID)
1015		ether_addr_copy(mac_addr, addrs.pf_lan_mac);
1016
1017	return status;
1018}
1019
1020/**
1021 * i40e_get_port_mac_addr - get Port MAC address
1022 * @hw: pointer to the HW structure
1023 * @mac_addr: pointer to Port MAC address
1024 *
1025 * Reads the adapter's Port MAC address
1026 **/
1027i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
1028{
1029	struct i40e_aqc_mac_address_read_data addrs;
1030	i40e_status status;
1031	u16 flags = 0;
1032
1033	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
1034	if (status)
1035		return status;
1036
1037	if (flags & I40E_AQC_PORT_ADDR_VALID)
1038		ether_addr_copy(mac_addr, addrs.port_mac);
1039	else
1040		status = I40E_ERR_INVALID_MAC_ADDR;
1041
1042	return status;
1043}
1044
1045/**
1046 * i40e_pre_tx_queue_cfg - pre tx queue configure
1047 * @hw: pointer to the HW structure
1048 * @queue: target PF queue index
1049 * @enable: state change request
1050 *
1051 * Handles hw requirement to indicate intention to enable
1052 * or disable target queue.
1053 **/
1054void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
1055{
1056	u32 abs_queue_idx = hw->func_caps.base_queue + queue;
1057	u32 reg_block = 0;
1058	u32 reg_val;
1059
1060	if (abs_queue_idx >= 128) {
1061		reg_block = abs_queue_idx / 128;
1062		abs_queue_idx %= 128;
1063	}
1064
1065	reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1066	reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1067	reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1068
1069	if (enable)
1070		reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
1071	else
1072		reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1073
1074	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
1075}
1076
1077/**
1078 *  i40e_read_pba_string - Reads part number string from EEPROM
1079 *  @hw: pointer to hardware structure
1080 *  @pba_num: stores the part number string from the EEPROM
1081 *  @pba_num_size: part number string buffer length
1082 *
1083 *  Reads the part number string from the EEPROM.
1084 **/
1085i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
1086				 u32 pba_num_size)
1087{
1088	i40e_status status = 0;
1089	u16 pba_word = 0;
1090	u16 pba_size = 0;
1091	u16 pba_ptr = 0;
1092	u16 i = 0;
1093
1094	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
1095	if (status || (pba_word != 0xFAFA)) {
1096		hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
1097		return status;
1098	}
1099
1100	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
1101	if (status) {
1102		hw_dbg(hw, "Failed to read PBA Block pointer.\n");
1103		return status;
1104	}
1105
1106	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
1107	if (status) {
1108		hw_dbg(hw, "Failed to read PBA Block size.\n");
1109		return status;
1110	}
1111
1112	/* Subtract one to get PBA word count (PBA Size word is included in
1113	 * total size)
1114	 */
1115	pba_size--;
1116	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
1117		hw_dbg(hw, "Buffer too small for PBA data.\n");
1118		return I40E_ERR_PARAM;
1119	}
1120
1121	for (i = 0; i < pba_size; i++) {
1122		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
1123		if (status) {
1124			hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
1125			return status;
1126		}
1127
1128		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
1129		pba_num[(i * 2) + 1] = pba_word & 0xFF;
1130	}
1131	pba_num[(pba_size * 2)] = '\0';
1132
1133	return status;
1134}
1135
1136/**
1137 * i40e_get_media_type - Gets media type
1138 * @hw: pointer to the hardware structure
1139 **/
1140static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
1141{
1142	enum i40e_media_type media;
1143
1144	switch (hw->phy.link_info.phy_type) {
1145	case I40E_PHY_TYPE_10GBASE_SR:
1146	case I40E_PHY_TYPE_10GBASE_LR:
1147	case I40E_PHY_TYPE_1000BASE_SX:
1148	case I40E_PHY_TYPE_1000BASE_LX:
1149	case I40E_PHY_TYPE_40GBASE_SR4:
1150	case I40E_PHY_TYPE_40GBASE_LR4:
1151	case I40E_PHY_TYPE_25GBASE_LR:
1152	case I40E_PHY_TYPE_25GBASE_SR:
1153		media = I40E_MEDIA_TYPE_FIBER;
1154		break;
1155	case I40E_PHY_TYPE_100BASE_TX:
1156	case I40E_PHY_TYPE_1000BASE_T:
1157	case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1158	case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1159	case I40E_PHY_TYPE_10GBASE_T:
1160		media = I40E_MEDIA_TYPE_BASET;
1161		break;
1162	case I40E_PHY_TYPE_10GBASE_CR1_CU:
1163	case I40E_PHY_TYPE_40GBASE_CR4_CU:
1164	case I40E_PHY_TYPE_10GBASE_CR1:
1165	case I40E_PHY_TYPE_40GBASE_CR4:
1166	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1167	case I40E_PHY_TYPE_40GBASE_AOC:
1168	case I40E_PHY_TYPE_10GBASE_AOC:
1169	case I40E_PHY_TYPE_25GBASE_CR:
1170	case I40E_PHY_TYPE_25GBASE_AOC:
1171	case I40E_PHY_TYPE_25GBASE_ACC:
1172		media = I40E_MEDIA_TYPE_DA;
1173		break;
1174	case I40E_PHY_TYPE_1000BASE_KX:
1175	case I40E_PHY_TYPE_10GBASE_KX4:
1176	case I40E_PHY_TYPE_10GBASE_KR:
1177	case I40E_PHY_TYPE_40GBASE_KR4:
1178	case I40E_PHY_TYPE_20GBASE_KR2:
1179	case I40E_PHY_TYPE_25GBASE_KR:
1180		media = I40E_MEDIA_TYPE_BACKPLANE;
1181		break;
1182	case I40E_PHY_TYPE_SGMII:
1183	case I40E_PHY_TYPE_XAUI:
1184	case I40E_PHY_TYPE_XFI:
1185	case I40E_PHY_TYPE_XLAUI:
1186	case I40E_PHY_TYPE_XLPPI:
1187	default:
1188		media = I40E_MEDIA_TYPE_UNKNOWN;
1189		break;
1190	}
1191
1192	return media;
1193}
1194
1195/**
1196 * i40e_poll_globr - Poll for Global Reset completion
1197 * @hw: pointer to the hardware structure
1198 * @retry_limit: how many times to retry before failure
1199 **/
1200static i40e_status i40e_poll_globr(struct i40e_hw *hw,
1201				   u32 retry_limit)
1202{
1203	u32 cnt, reg = 0;
1204
1205	for (cnt = 0; cnt < retry_limit; cnt++) {
1206		reg = rd32(hw, I40E_GLGEN_RSTAT);
1207		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1208			return 0;
1209		msleep(100);
1210	}
1211
1212	hw_dbg(hw, "Global reset failed.\n");
1213	hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
1214
1215	return I40E_ERR_RESET_FAILED;
1216}
1217
1218#define I40E_PF_RESET_WAIT_COUNT_A0	200
1219#define I40E_PF_RESET_WAIT_COUNT	200
1220/**
1221 * i40e_pf_reset - Reset the PF
1222 * @hw: pointer to the hardware structure
1223 *
1224 * Assuming someone else has triggered a global reset,
1225 * assure the global reset is complete and then reset the PF
1226 **/
1227i40e_status i40e_pf_reset(struct i40e_hw *hw)
1228{
1229	u32 cnt = 0;
1230	u32 cnt1 = 0;
1231	u32 reg = 0;
1232	u32 grst_del;
1233
1234	/* Poll for Global Reset steady state in case of recent GRST.
1235	 * The grst delay value is in 100ms units, and we'll wait a
1236	 * couple counts longer to be sure we don't just miss the end.
1237	 */
1238	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
1239		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
1240		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
1241
1242	/* It can take upto 15 secs for GRST steady state.
1243	 * Bump it to 16 secs max to be safe.
1244	 */
1245	grst_del = grst_del * 20;
1246
1247	for (cnt = 0; cnt < grst_del; cnt++) {
1248		reg = rd32(hw, I40E_GLGEN_RSTAT);
1249		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
1250			break;
1251		msleep(100);
1252	}
1253	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1254		hw_dbg(hw, "Global reset polling failed to complete.\n");
1255		return I40E_ERR_RESET_FAILED;
1256	}
1257
1258	/* Now Wait for the FW to be ready */
1259	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
1260		reg = rd32(hw, I40E_GLNVM_ULD);
1261		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1262			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
1263		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1264			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
1265			hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
1266			break;
1267		}
1268		usleep_range(10000, 20000);
1269	}
1270	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
1271		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
1272		hw_dbg(hw, "wait for FW Reset complete timedout\n");
1273		hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
1274		return I40E_ERR_RESET_FAILED;
1275	}
1276
1277	/* If there was a Global Reset in progress when we got here,
1278	 * we don't need to do the PF Reset
1279	 */
1280	if (!cnt) {
1281		u32 reg2 = 0;
1282		if (hw->revision_id == 0)
1283			cnt = I40E_PF_RESET_WAIT_COUNT_A0;
1284		else
1285			cnt = I40E_PF_RESET_WAIT_COUNT;
1286		reg = rd32(hw, I40E_PFGEN_CTRL);
1287		wr32(hw, I40E_PFGEN_CTRL,
1288		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1289		for (; cnt; cnt--) {
1290			reg = rd32(hw, I40E_PFGEN_CTRL);
1291			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
1292				break;
1293			reg2 = rd32(hw, I40E_GLGEN_RSTAT);
1294			if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
1295				break;
1296			usleep_range(1000, 2000);
1297		}
1298		if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
1299			if (i40e_poll_globr(hw, grst_del))
1300				return I40E_ERR_RESET_FAILED;
1301		} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
1302			hw_dbg(hw, "PF reset polling failed to complete.\n");
1303			return I40E_ERR_RESET_FAILED;
1304		}
1305	}
1306
1307	i40e_clear_pxe_mode(hw);
1308
1309	return 0;
1310}
1311
1312/**
1313 * i40e_clear_hw - clear out any left over hw state
1314 * @hw: pointer to the hw struct
1315 *
1316 * Clear queues and interrupts, typically called at init time,
1317 * but after the capabilities have been found so we know how many
1318 * queues and msix vectors have been allocated.
1319 **/
1320void i40e_clear_hw(struct i40e_hw *hw)
1321{
1322	u32 num_queues, base_queue;
1323	u32 num_pf_int;
1324	u32 num_vf_int;
1325	u32 num_vfs;
1326	u32 i, j;
1327	u32 val;
1328	u32 eol = 0x7ff;
1329
1330	/* get number of interrupts, queues, and VFs */
1331	val = rd32(hw, I40E_GLPCI_CNF2);
1332	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1333		     I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1334	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
1335		     I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
1336
1337	val = rd32(hw, I40E_PFLAN_QALLOC);
1338	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1339		     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1340	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
1341	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
1342	if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
1343		num_queues = (j - base_queue) + 1;
1344	else
1345		num_queues = 0;
1346
1347	val = rd32(hw, I40E_PF_VT_PFALLOC);
1348	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
1349	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
1350	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
1351	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
1352	if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
1353		num_vfs = (j - i) + 1;
1354	else
1355		num_vfs = 0;
1356
1357	/* stop all the interrupts */
1358	wr32(hw, I40E_PFINT_ICR0_ENA, 0);
1359	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1360	for (i = 0; i < num_pf_int - 2; i++)
1361		wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
1362
1363	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
1364	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1365	wr32(hw, I40E_PFINT_LNKLST0, val);
1366	for (i = 0; i < num_pf_int - 2; i++)
1367		wr32(hw, I40E_PFINT_LNKLSTN(i), val);
1368	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
1369	for (i = 0; i < num_vfs; i++)
1370		wr32(hw, I40E_VPINT_LNKLST0(i), val);
1371	for (i = 0; i < num_vf_int - 2; i++)
1372		wr32(hw, I40E_VPINT_LNKLSTN(i), val);
1373
1374	/* warn the HW of the coming Tx disables */
1375	for (i = 0; i < num_queues; i++) {
1376		u32 abs_queue_idx = base_queue + i;
1377		u32 reg_block = 0;
1378
1379		if (abs_queue_idx >= 128) {
1380			reg_block = abs_queue_idx / 128;
1381			abs_queue_idx %= 128;
1382		}
1383
1384		val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
1385		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
1386		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
1387		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
1388
1389		wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
1390	}
1391	udelay(400);
1392
1393	/* stop all the queues */
1394	for (i = 0; i < num_queues; i++) {
1395		wr32(hw, I40E_QINT_TQCTL(i), 0);
1396		wr32(hw, I40E_QTX_ENA(i), 0);
1397		wr32(hw, I40E_QINT_RQCTL(i), 0);
1398		wr32(hw, I40E_QRX_ENA(i), 0);
1399	}
1400
1401	/* short wait for all queue disables to settle */
1402	udelay(50);
1403}
1404
1405/**
1406 * i40e_clear_pxe_mode - clear pxe operations mode
1407 * @hw: pointer to the hw struct
1408 *
1409 * Make sure all PXE mode settings are cleared, including things
1410 * like descriptor fetch/write-back mode.
1411 **/
1412void i40e_clear_pxe_mode(struct i40e_hw *hw)
1413{
1414	u32 reg;
1415
1416	if (i40e_check_asq_alive(hw))
1417		i40e_aq_clear_pxe_mode(hw, NULL);
1418
1419	/* Clear single descriptor fetch/write-back mode */
1420	reg = rd32(hw, I40E_GLLAN_RCTL_0);
1421
1422	if (hw->revision_id == 0) {
1423		/* As a work around clear PXE_MODE instead of setting it */
1424		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
1425	} else {
1426		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
1427	}
1428}
1429
1430/**
1431 * i40e_led_is_mine - helper to find matching led
1432 * @hw: pointer to the hw struct
1433 * @idx: index into GPIO registers
1434 *
1435 * returns: 0 if no match, otherwise the value of the GPIO_CTL register
1436 */
1437static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
1438{
1439	u32 gpio_val = 0;
1440	u32 port;
1441
1442	if (!I40E_IS_X710TL_DEVICE(hw->device_id) &&
1443	    !hw->func_caps.led[idx])
1444		return 0;
1445	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
1446	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
1447		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
1448
1449	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
1450	 * if it is not our port then ignore
1451	 */
1452	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
1453	    (port != hw->port))
1454		return 0;
1455
1456	return gpio_val;
1457}
1458
1459#define I40E_FW_LED BIT(4)
1460#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
1461			     I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
1462
1463#define I40E_LED0 22
1464
1465#define I40E_PIN_FUNC_SDP 0x0
1466#define I40E_PIN_FUNC_LED 0x1
1467
1468/**
1469 * i40e_led_get - return current on/off mode
1470 * @hw: pointer to the hw struct
1471 *
1472 * The value returned is the 'mode' field as defined in the
1473 * GPIO register definitions: 0x0 = off, 0xf = on, and other
1474 * values are variations of possible behaviors relating to
1475 * blink, link, and wire.
1476 **/
1477u32 i40e_led_get(struct i40e_hw *hw)
1478{
1479	u32 mode = 0;
1480	int i;
1481
1482	/* as per the documentation GPIO 22-29 are the LED
1483	 * GPIO pins named LED0..LED7
1484	 */
1485	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1486		u32 gpio_val = i40e_led_is_mine(hw, i);
1487
1488		if (!gpio_val)
1489			continue;
1490
1491		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
1492			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
1493		break;
1494	}
1495
1496	return mode;
1497}
1498
1499/**
1500 * i40e_led_set - set new on/off mode
1501 * @hw: pointer to the hw struct
1502 * @mode: 0=off, 0xf=on (else see manual for mode details)
1503 * @blink: true if the LED should blink when on, false if steady
1504 *
1505 * if this function is used to turn on the blink it should
1506 * be used to disable the blink when restoring the original state.
1507 **/
1508void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
1509{
1510	int i;
1511
1512	if (mode & ~I40E_LED_MODE_VALID) {
1513		hw_dbg(hw, "invalid mode passed in %X\n", mode);
1514		return;
1515	}
1516
1517	/* as per the documentation GPIO 22-29 are the LED
1518	 * GPIO pins named LED0..LED7
1519	 */
1520	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
1521		u32 gpio_val = i40e_led_is_mine(hw, i);
1522
1523		if (!gpio_val)
1524			continue;
1525
1526		if (I40E_IS_X710TL_DEVICE(hw->device_id)) {
1527			u32 pin_func = 0;
1528
1529			if (mode & I40E_FW_LED)
1530				pin_func = I40E_PIN_FUNC_SDP;
1531			else
1532				pin_func = I40E_PIN_FUNC_LED;
1533
1534			gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
1535			gpio_val |= ((pin_func <<
1536				     I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
1537				     I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
1538		}
1539		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
1540		/* this & is a bit of paranoia, but serves as a range check */
1541		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
1542			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
1543
1544		if (blink)
1545			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1546		else
1547			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
1548
1549		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
1550		break;
1551	}
1552}
1553
1554/* Admin command wrappers */
1555
1556/**
1557 * i40e_aq_get_phy_capabilities
1558 * @hw: pointer to the hw struct
1559 * @abilities: structure for PHY capabilities to be filled
1560 * @qualified_modules: report Qualified Modules
1561 * @report_init: report init capabilities (active are default)
1562 * @cmd_details: pointer to command details structure or NULL
1563 *
1564 * Returns the various PHY abilities supported on the Port.
1565 **/
1566i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
1567			bool qualified_modules, bool report_init,
1568			struct i40e_aq_get_phy_abilities_resp *abilities,
1569			struct i40e_asq_cmd_details *cmd_details)
1570{
1571	struct i40e_aq_desc desc;
1572	i40e_status status;
1573	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
1574	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
1575
1576	if (!abilities)
1577		return I40E_ERR_PARAM;
1578
1579	do {
1580		i40e_fill_default_direct_cmd_desc(&desc,
1581					       i40e_aqc_opc_get_phy_abilities);
1582
1583		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1584		if (abilities_size > I40E_AQ_LARGE_BUF)
1585			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
1586
1587		if (qualified_modules)
1588			desc.params.external.param0 |=
1589			cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
1590
1591		if (report_init)
1592			desc.params.external.param0 |=
1593			cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
1594
1595		status = i40e_asq_send_command(hw, &desc, abilities,
1596					       abilities_size, cmd_details);
1597
1598		switch (hw->aq.asq_last_status) {
1599		case I40E_AQ_RC_EIO:
1600			status = I40E_ERR_UNKNOWN_PHY;
1601			break;
1602		case I40E_AQ_RC_EAGAIN:
1603			usleep_range(1000, 2000);
1604			total_delay++;
1605			status = I40E_ERR_TIMEOUT;
1606			break;
1607		/* also covers I40E_AQ_RC_OK */
1608		default:
1609			break;
1610		}
1611
1612	} while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
1613		(total_delay < max_delay));
1614
1615	if (status)
1616		return status;
1617
1618	if (report_init) {
1619		if (hw->mac.type ==  I40E_MAC_XL710 &&
1620		    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1621		    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
1622			status = i40e_aq_get_link_info(hw, true, NULL, NULL);
1623		} else {
1624			hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
1625			hw->phy.phy_types |=
1626					((u64)abilities->phy_type_ext << 32);
1627		}
1628	}
1629
1630	return status;
1631}
1632
1633/**
1634 * i40e_aq_set_phy_config
1635 * @hw: pointer to the hw struct
1636 * @config: structure with PHY configuration to be set
1637 * @cmd_details: pointer to command details structure or NULL
1638 *
1639 * Set the various PHY configuration parameters
1640 * supported on the Port.One or more of the Set PHY config parameters may be
1641 * ignored in an MFP mode as the PF may not have the privilege to set some
1642 * of the PHY Config parameters. This status will be indicated by the
1643 * command response.
1644 **/
1645enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1646				struct i40e_aq_set_phy_config *config,
1647				struct i40e_asq_cmd_details *cmd_details)
1648{
1649	struct i40e_aq_desc desc;
1650	struct i40e_aq_set_phy_config *cmd =
1651			(struct i40e_aq_set_phy_config *)&desc.params.raw;
1652	enum i40e_status_code status;
1653
1654	if (!config)
1655		return I40E_ERR_PARAM;
1656
1657	i40e_fill_default_direct_cmd_desc(&desc,
1658					  i40e_aqc_opc_set_phy_config);
1659
1660	*cmd = *config;
1661
1662	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1663
1664	return status;
1665}
1666
1667static noinline_for_stack enum i40e_status_code
1668i40e_set_fc_status(struct i40e_hw *hw,
1669		   struct i40e_aq_get_phy_abilities_resp *abilities,
1670		   bool atomic_restart)
1671{
1672	struct i40e_aq_set_phy_config config;
1673	enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
1674	u8 pause_mask = 0x0;
1675
1676	switch (fc_mode) {
1677	case I40E_FC_FULL:
1678		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1679		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1680		break;
1681	case I40E_FC_RX_PAUSE:
1682		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
1683		break;
1684	case I40E_FC_TX_PAUSE:
1685		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
1686		break;
1687	default:
1688		break;
1689	}
1690
1691	memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
1692	/* clear the old pause settings */
1693	config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
1694			   ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
1695	/* set the new abilities */
1696	config.abilities |= pause_mask;
1697	/* If the abilities have changed, then set the new config */
1698	if (config.abilities == abilities->abilities)
1699		return 0;
1700
1701	/* Auto restart link so settings take effect */
1702	if (atomic_restart)
1703		config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1704	/* Copy over all the old settings */
1705	config.phy_type = abilities->phy_type;
1706	config.phy_type_ext = abilities->phy_type_ext;
1707	config.link_speed = abilities->link_speed;
1708	config.eee_capability = abilities->eee_capability;
1709	config.eeer = abilities->eeer_val;
1710	config.low_power_ctrl = abilities->d3_lpan;
1711	config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
1712			    I40E_AQ_PHY_FEC_CONFIG_MASK;
1713
1714	return i40e_aq_set_phy_config(hw, &config, NULL);
1715}
1716
1717/**
1718 * i40e_set_fc
1719 * @hw: pointer to the hw struct
1720 * @aq_failures: buffer to return AdminQ failure information
1721 * @atomic_restart: whether to enable atomic link restart
1722 *
1723 * Set the requested flow control mode using set_phy_config.
1724 **/
1725enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
1726				  bool atomic_restart)
1727{
1728	struct i40e_aq_get_phy_abilities_resp abilities;
1729	enum i40e_status_code status;
1730
1731	*aq_failures = 0x0;
1732
1733	/* Get the current phy config */
1734	status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
1735					      NULL);
1736	if (status) {
1737		*aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
1738		return status;
1739	}
1740
1741	status = i40e_set_fc_status(hw, &abilities, atomic_restart);
1742	if (status)
1743		*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
1744
1745	/* Update the link info */
1746	status = i40e_update_link_info(hw);
1747	if (status) {
1748		/* Wait a little bit (on 40G cards it sometimes takes a really
1749		 * long time for link to come back from the atomic reset)
1750		 * and try once more
1751		 */
1752		msleep(1000);
1753		status = i40e_update_link_info(hw);
1754	}
1755	if (status)
1756		*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
1757
1758	return status;
1759}
1760
1761/**
1762 * i40e_aq_clear_pxe_mode
1763 * @hw: pointer to the hw struct
1764 * @cmd_details: pointer to command details structure or NULL
1765 *
1766 * Tell the firmware that the driver is taking over from PXE
1767 **/
1768i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
1769				struct i40e_asq_cmd_details *cmd_details)
1770{
1771	i40e_status status;
1772	struct i40e_aq_desc desc;
1773	struct i40e_aqc_clear_pxe *cmd =
1774		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
1775
1776	i40e_fill_default_direct_cmd_desc(&desc,
1777					  i40e_aqc_opc_clear_pxe_mode);
1778
1779	cmd->rx_cnt = 0x2;
1780
1781	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1782
1783	wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
1784
1785	return status;
1786}
1787
1788/**
1789 * i40e_aq_set_link_restart_an
1790 * @hw: pointer to the hw struct
1791 * @enable_link: if true: enable link, if false: disable link
1792 * @cmd_details: pointer to command details structure or NULL
1793 *
1794 * Sets up the link and restarts the Auto-Negotiation over the link.
1795 **/
1796i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
1797					bool enable_link,
1798					struct i40e_asq_cmd_details *cmd_details)
1799{
1800	struct i40e_aq_desc desc;
1801	struct i40e_aqc_set_link_restart_an *cmd =
1802		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
1803	i40e_status status;
1804
1805	i40e_fill_default_direct_cmd_desc(&desc,
1806					  i40e_aqc_opc_set_link_restart_an);
1807
1808	cmd->command = I40E_AQ_PHY_RESTART_AN;
1809	if (enable_link)
1810		cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
1811	else
1812		cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
1813
1814	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1815
1816	return status;
1817}
1818
1819/**
1820 * i40e_aq_get_link_info
1821 * @hw: pointer to the hw struct
1822 * @enable_lse: enable/disable LinkStatusEvent reporting
1823 * @link: pointer to link status structure - optional
1824 * @cmd_details: pointer to command details structure or NULL
1825 *
1826 * Returns the link status of the adapter.
1827 **/
1828i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1829				bool enable_lse, struct i40e_link_status *link,
1830				struct i40e_asq_cmd_details *cmd_details)
1831{
1832	struct i40e_aq_desc desc;
1833	struct i40e_aqc_get_link_status *resp =
1834		(struct i40e_aqc_get_link_status *)&desc.params.raw;
1835	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
1836	i40e_status status;
1837	bool tx_pause, rx_pause;
1838	u16 command_flags;
1839
1840	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
1841
1842	if (enable_lse)
1843		command_flags = I40E_AQ_LSE_ENABLE;
1844	else
1845		command_flags = I40E_AQ_LSE_DISABLE;
1846	resp->command_flags = cpu_to_le16(command_flags);
1847
1848	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1849
1850	if (status)
1851		goto aq_get_link_info_exit;
1852
1853	/* save off old link status information */
1854	hw->phy.link_info_old = *hw_link_info;
1855
1856	/* update link status */
1857	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
1858	hw->phy.media_type = i40e_get_media_type(hw);
1859	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
1860	hw_link_info->link_info = resp->link_info;
1861	hw_link_info->an_info = resp->an_info;
1862	hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
1863						 I40E_AQ_CONFIG_FEC_RS_ENA);
1864	hw_link_info->ext_info = resp->ext_info;
1865	hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
1866	hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
1867	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
1868
1869	/* update fc info */
1870	tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
1871	rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
1872	if (tx_pause & rx_pause)
1873		hw->fc.current_mode = I40E_FC_FULL;
1874	else if (tx_pause)
1875		hw->fc.current_mode = I40E_FC_TX_PAUSE;
1876	else if (rx_pause)
1877		hw->fc.current_mode = I40E_FC_RX_PAUSE;
1878	else
1879		hw->fc.current_mode = I40E_FC_NONE;
1880
1881	if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
1882		hw_link_info->crc_enable = true;
1883	else
1884		hw_link_info->crc_enable = false;
1885
1886	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED))
1887		hw_link_info->lse_enable = true;
1888	else
1889		hw_link_info->lse_enable = false;
1890
1891	if ((hw->mac.type == I40E_MAC_XL710) &&
1892	    (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
1893	     hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1894		hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1895
1896	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1897	    hw->mac.type != I40E_MAC_X722) {
1898		__le32 tmp;
1899
1900		memcpy(&tmp, resp->link_type, sizeof(tmp));
1901		hw->phy.phy_types = le32_to_cpu(tmp);
1902		hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
1903	}
1904
1905	/* save link status information */
1906	if (link)
1907		*link = *hw_link_info;
1908
1909	/* flag cleared so helper functions don't call AQ again */
1910	hw->phy.get_link_info = false;
1911
1912aq_get_link_info_exit:
1913	return status;
1914}
1915
1916/**
1917 * i40e_aq_set_phy_int_mask
1918 * @hw: pointer to the hw struct
1919 * @mask: interrupt mask to be set
1920 * @cmd_details: pointer to command details structure or NULL
1921 *
1922 * Set link interrupt mask.
1923 **/
1924i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
1925				     u16 mask,
1926				     struct i40e_asq_cmd_details *cmd_details)
1927{
1928	struct i40e_aq_desc desc;
1929	struct i40e_aqc_set_phy_int_mask *cmd =
1930		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
1931	i40e_status status;
1932
1933	i40e_fill_default_direct_cmd_desc(&desc,
1934					  i40e_aqc_opc_set_phy_int_mask);
1935
1936	cmd->event_mask = cpu_to_le16(mask);
1937
1938	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1939
1940	return status;
1941}
1942
1943/**
1944 * i40e_aq_set_phy_debug
1945 * @hw: pointer to the hw struct
1946 * @cmd_flags: debug command flags
1947 * @cmd_details: pointer to command details structure or NULL
1948 *
1949 * Reset the external PHY.
1950 **/
1951i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1952				  struct i40e_asq_cmd_details *cmd_details)
1953{
1954	struct i40e_aq_desc desc;
1955	struct i40e_aqc_set_phy_debug *cmd =
1956		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
1957	i40e_status status;
1958
1959	i40e_fill_default_direct_cmd_desc(&desc,
1960					  i40e_aqc_opc_set_phy_debug);
1961
1962	cmd->command_flags = cmd_flags;
1963
1964	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1965
1966	return status;
1967}
1968
1969/**
1970 * i40e_is_aq_api_ver_ge
1971 * @aq: pointer to AdminQ info containing HW API version to compare
1972 * @maj: API major value
1973 * @min: API minor value
1974 *
1975 * Assert whether current HW API version is greater/equal than provided.
1976 **/
1977static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1978				  u16 min)
1979{
1980	return (aq->api_maj_ver > maj ||
1981		(aq->api_maj_ver == maj && aq->api_min_ver >= min));
1982}
1983
1984/**
1985 * i40e_aq_add_vsi
1986 * @hw: pointer to the hw struct
1987 * @vsi_ctx: pointer to a vsi context struct
1988 * @cmd_details: pointer to command details structure or NULL
1989 *
1990 * Add a VSI context to the hardware.
1991**/
1992i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
1993				struct i40e_vsi_context *vsi_ctx,
1994				struct i40e_asq_cmd_details *cmd_details)
1995{
1996	struct i40e_aq_desc desc;
1997	struct i40e_aqc_add_get_update_vsi *cmd =
1998		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
1999	struct i40e_aqc_add_get_update_vsi_completion *resp =
2000		(struct i40e_aqc_add_get_update_vsi_completion *)
2001		&desc.params.raw;
2002	i40e_status status;
2003
2004	i40e_fill_default_direct_cmd_desc(&desc,
2005					  i40e_aqc_opc_add_vsi);
2006
2007	cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
2008	cmd->connection_type = vsi_ctx->connection_type;
2009	cmd->vf_id = vsi_ctx->vf_num;
2010	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
2011
2012	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2013
2014	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2015				    sizeof(vsi_ctx->info), cmd_details);
2016
2017	if (status)
2018		goto aq_add_vsi_exit;
2019
2020	vsi_ctx->seid = le16_to_cpu(resp->seid);
2021	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2022	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2023	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2024
2025aq_add_vsi_exit:
2026	return status;
2027}
2028
2029/**
2030 * i40e_aq_set_default_vsi
2031 * @hw: pointer to the hw struct
2032 * @seid: vsi number
2033 * @cmd_details: pointer to command details structure or NULL
2034 **/
2035i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
2036				    u16 seid,
2037				    struct i40e_asq_cmd_details *cmd_details)
2038{
2039	struct i40e_aq_desc desc;
2040	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2041		(struct i40e_aqc_set_vsi_promiscuous_modes *)
2042		&desc.params.raw;
2043	i40e_status status;
2044
2045	i40e_fill_default_direct_cmd_desc(&desc,
2046					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2047
2048	cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2049	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2050	cmd->seid = cpu_to_le16(seid);
2051
2052	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2053
2054	return status;
2055}
2056
2057/**
2058 * i40e_aq_clear_default_vsi
2059 * @hw: pointer to the hw struct
2060 * @seid: vsi number
2061 * @cmd_details: pointer to command details structure or NULL
2062 **/
2063i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2064				      u16 seid,
2065				      struct i40e_asq_cmd_details *cmd_details)
2066{
2067	struct i40e_aq_desc desc;
2068	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2069		(struct i40e_aqc_set_vsi_promiscuous_modes *)
2070		&desc.params.raw;
2071	i40e_status status;
2072
2073	i40e_fill_default_direct_cmd_desc(&desc,
2074					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2075
2076	cmd->promiscuous_flags = cpu_to_le16(0);
2077	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2078	cmd->seid = cpu_to_le16(seid);
2079
2080	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2081
2082	return status;
2083}
2084
2085/**
2086 * i40e_aq_set_vsi_unicast_promiscuous
2087 * @hw: pointer to the hw struct
2088 * @seid: vsi number
2089 * @set: set unicast promiscuous enable/disable
2090 * @cmd_details: pointer to command details structure or NULL
2091 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
2092 **/
2093i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
2094				u16 seid, bool set,
2095				struct i40e_asq_cmd_details *cmd_details,
2096				bool rx_only_promisc)
2097{
2098	struct i40e_aq_desc desc;
2099	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2100		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2101	i40e_status status;
2102	u16 flags = 0;
2103
2104	i40e_fill_default_direct_cmd_desc(&desc,
2105					i40e_aqc_opc_set_vsi_promiscuous_modes);
2106
2107	if (set) {
2108		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2109		if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2110			flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2111	}
2112
2113	cmd->promiscuous_flags = cpu_to_le16(flags);
2114
2115	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2116	if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2117		cmd->valid_flags |=
2118			cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2119
2120	cmd->seid = cpu_to_le16(seid);
2121	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2122
2123	return status;
2124}
2125
2126/**
2127 * i40e_aq_set_vsi_multicast_promiscuous
2128 * @hw: pointer to the hw struct
2129 * @seid: vsi number
2130 * @set: set multicast promiscuous enable/disable
2131 * @cmd_details: pointer to command details structure or NULL
2132 **/
2133i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
2134				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
2135{
2136	struct i40e_aq_desc desc;
2137	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2138		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2139	i40e_status status;
2140	u16 flags = 0;
2141
2142	i40e_fill_default_direct_cmd_desc(&desc,
2143					i40e_aqc_opc_set_vsi_promiscuous_modes);
2144
2145	if (set)
2146		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2147
2148	cmd->promiscuous_flags = cpu_to_le16(flags);
2149
2150	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2151
2152	cmd->seid = cpu_to_le16(seid);
2153	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2154
2155	return status;
2156}
2157
2158/**
2159 * i40e_aq_set_vsi_mc_promisc_on_vlan
2160 * @hw: pointer to the hw struct
2161 * @seid: vsi number
2162 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2163 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
2164 * @cmd_details: pointer to command details structure or NULL
2165 **/
2166enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
2167							 u16 seid, bool enable,
2168							 u16 vid,
2169				struct i40e_asq_cmd_details *cmd_details)
2170{
2171	struct i40e_aq_desc desc;
2172	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2173		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2174	enum i40e_status_code status;
2175	u16 flags = 0;
2176
2177	i40e_fill_default_direct_cmd_desc(&desc,
2178					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2179
2180	if (enable)
2181		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
2182
2183	cmd->promiscuous_flags = cpu_to_le16(flags);
2184	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
2185	cmd->seid = cpu_to_le16(seid);
2186	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2187
2188	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2189
2190	return status;
2191}
2192
2193/**
2194 * i40e_aq_set_vsi_uc_promisc_on_vlan
2195 * @hw: pointer to the hw struct
2196 * @seid: vsi number
2197 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2198 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
2199 * @cmd_details: pointer to command details structure or NULL
2200 **/
2201enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
2202							 u16 seid, bool enable,
2203							 u16 vid,
2204				struct i40e_asq_cmd_details *cmd_details)
2205{
2206	struct i40e_aq_desc desc;
2207	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2208		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2209	enum i40e_status_code status;
2210	u16 flags = 0;
2211
2212	i40e_fill_default_direct_cmd_desc(&desc,
2213					  i40e_aqc_opc_set_vsi_promiscuous_modes);
2214
2215	if (enable) {
2216		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
2217		if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2218			flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
2219	}
2220
2221	cmd->promiscuous_flags = cpu_to_le16(flags);
2222	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
2223	if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
2224		cmd->valid_flags |=
2225			cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
2226	cmd->seid = cpu_to_le16(seid);
2227	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2228
2229	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2230
2231	return status;
2232}
2233
2234/**
2235 * i40e_aq_set_vsi_bc_promisc_on_vlan
2236 * @hw: pointer to the hw struct
2237 * @seid: vsi number
2238 * @enable: set broadcast promiscuous enable/disable for a given VLAN
2239 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
2240 * @cmd_details: pointer to command details structure or NULL
2241 **/
2242i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
2243				u16 seid, bool enable, u16 vid,
2244				struct i40e_asq_cmd_details *cmd_details)
2245{
2246	struct i40e_aq_desc desc;
2247	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2248		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2249	i40e_status status;
2250	u16 flags = 0;
2251
2252	i40e_fill_default_direct_cmd_desc(&desc,
2253					i40e_aqc_opc_set_vsi_promiscuous_modes);
2254
2255	if (enable)
2256		flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
2257
2258	cmd->promiscuous_flags = cpu_to_le16(flags);
2259	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2260	cmd->seid = cpu_to_le16(seid);
2261	cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
2262
2263	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2264
2265	return status;
2266}
2267
2268/**
2269 * i40e_aq_set_vsi_broadcast
2270 * @hw: pointer to the hw struct
2271 * @seid: vsi number
2272 * @set_filter: true to set filter, false to clear filter
2273 * @cmd_details: pointer to command details structure or NULL
2274 *
2275 * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
2276 **/
2277i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
2278				u16 seid, bool set_filter,
2279				struct i40e_asq_cmd_details *cmd_details)
2280{
2281	struct i40e_aq_desc desc;
2282	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2283		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2284	i40e_status status;
2285
2286	i40e_fill_default_direct_cmd_desc(&desc,
2287					i40e_aqc_opc_set_vsi_promiscuous_modes);
2288
2289	if (set_filter)
2290		cmd->promiscuous_flags
2291			    |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2292	else
2293		cmd->promiscuous_flags
2294			    &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2295
2296	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
2297	cmd->seid = cpu_to_le16(seid);
2298	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2299
2300	return status;
2301}
2302
2303/**
2304 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
2305 * @hw: pointer to the hw struct
2306 * @seid: vsi number
2307 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
2308 * @cmd_details: pointer to command details structure or NULL
2309 **/
2310i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
2311				       u16 seid, bool enable,
2312				       struct i40e_asq_cmd_details *cmd_details)
2313{
2314	struct i40e_aq_desc desc;
2315	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2316		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
2317	i40e_status status;
2318	u16 flags = 0;
2319
2320	i40e_fill_default_direct_cmd_desc(&desc,
2321					i40e_aqc_opc_set_vsi_promiscuous_modes);
2322	if (enable)
2323		flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
2324
2325	cmd->promiscuous_flags = cpu_to_le16(flags);
2326	cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
2327	cmd->seid = cpu_to_le16(seid);
2328
2329	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2330
2331	return status;
2332}
2333
2334/**
2335 * i40e_get_vsi_params - get VSI configuration info
2336 * @hw: pointer to the hw struct
2337 * @vsi_ctx: pointer to a vsi context struct
2338 * @cmd_details: pointer to command details structure or NULL
2339 **/
2340i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
2341				struct i40e_vsi_context *vsi_ctx,
2342				struct i40e_asq_cmd_details *cmd_details)
2343{
2344	struct i40e_aq_desc desc;
2345	struct i40e_aqc_add_get_update_vsi *cmd =
2346		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2347	struct i40e_aqc_add_get_update_vsi_completion *resp =
2348		(struct i40e_aqc_add_get_update_vsi_completion *)
2349		&desc.params.raw;
2350	i40e_status status;
2351
2352	i40e_fill_default_direct_cmd_desc(&desc,
2353					  i40e_aqc_opc_get_vsi_parameters);
2354
2355	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2356
2357	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2358
2359	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2360				    sizeof(vsi_ctx->info), NULL);
2361
2362	if (status)
2363		goto aq_get_vsi_params_exit;
2364
2365	vsi_ctx->seid = le16_to_cpu(resp->seid);
2366	vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
2367	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2368	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2369
2370aq_get_vsi_params_exit:
2371	return status;
2372}
2373
2374/**
2375 * i40e_aq_update_vsi_params
2376 * @hw: pointer to the hw struct
2377 * @vsi_ctx: pointer to a vsi context struct
2378 * @cmd_details: pointer to command details structure or NULL
2379 *
2380 * Update a VSI context.
2381 **/
2382i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
2383				struct i40e_vsi_context *vsi_ctx,
2384				struct i40e_asq_cmd_details *cmd_details)
2385{
2386	struct i40e_aq_desc desc;
2387	struct i40e_aqc_add_get_update_vsi *cmd =
2388		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
2389	struct i40e_aqc_add_get_update_vsi_completion *resp =
2390		(struct i40e_aqc_add_get_update_vsi_completion *)
2391		&desc.params.raw;
2392	i40e_status status;
2393
2394	i40e_fill_default_direct_cmd_desc(&desc,
2395					  i40e_aqc_opc_update_vsi_parameters);
2396	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
2397
2398	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2399
2400	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
2401				    sizeof(vsi_ctx->info), cmd_details);
2402
2403	vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
2404	vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
2405
2406	return status;
2407}
2408
2409/**
2410 * i40e_aq_get_switch_config
2411 * @hw: pointer to the hardware structure
2412 * @buf: pointer to the result buffer
2413 * @buf_size: length of input buffer
2414 * @start_seid: seid to start for the report, 0 == beginning
2415 * @cmd_details: pointer to command details structure or NULL
2416 *
2417 * Fill the buf with switch configuration returned from AdminQ command
2418 **/
2419i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
2420				struct i40e_aqc_get_switch_config_resp *buf,
2421				u16 buf_size, u16 *start_seid,
2422				struct i40e_asq_cmd_details *cmd_details)
2423{
2424	struct i40e_aq_desc desc;
2425	struct i40e_aqc_switch_seid *scfg =
2426		(struct i40e_aqc_switch_seid *)&desc.params.raw;
2427	i40e_status status;
2428
2429	i40e_fill_default_direct_cmd_desc(&desc,
2430					  i40e_aqc_opc_get_switch_config);
2431	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
2432	if (buf_size > I40E_AQ_LARGE_BUF)
2433		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2434	scfg->seid = cpu_to_le16(*start_seid);
2435
2436	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
2437	*start_seid = le16_to_cpu(scfg->seid);
2438
2439	return status;
2440}
2441
2442/**
2443 * i40e_aq_set_switch_config
2444 * @hw: pointer to the hardware structure
2445 * @flags: bit flag values to set
2446 * @mode: cloud filter mode
2447 * @valid_flags: which bit flags to set
2448 * @mode: cloud filter mode
2449 * @cmd_details: pointer to command details structure or NULL
2450 *
2451 * Set switch configuration bits
2452 **/
2453enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
2454						u16 flags,
2455						u16 valid_flags, u8 mode,
2456				struct i40e_asq_cmd_details *cmd_details)
2457{
2458	struct i40e_aq_desc desc;
2459	struct i40e_aqc_set_switch_config *scfg =
2460		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
2461	enum i40e_status_code status;
2462
2463	i40e_fill_default_direct_cmd_desc(&desc,
2464					  i40e_aqc_opc_set_switch_config);
2465	scfg->flags = cpu_to_le16(flags);
2466	scfg->valid_flags = cpu_to_le16(valid_flags);
2467	scfg->mode = mode;
2468	if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
2469		scfg->switch_tag = cpu_to_le16(hw->switch_tag);
2470		scfg->first_tag = cpu_to_le16(hw->first_tag);
2471		scfg->second_tag = cpu_to_le16(hw->second_tag);
2472	}
2473	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2474
2475	return status;
2476}
2477
2478/**
2479 * i40e_aq_get_firmware_version
2480 * @hw: pointer to the hw struct
2481 * @fw_major_version: firmware major version
2482 * @fw_minor_version: firmware minor version
2483 * @fw_build: firmware build number
2484 * @api_major_version: major queue version
2485 * @api_minor_version: minor queue version
2486 * @cmd_details: pointer to command details structure or NULL
2487 *
2488 * Get the firmware version from the admin queue commands
2489 **/
2490i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
2491				u16 *fw_major_version, u16 *fw_minor_version,
2492				u32 *fw_build,
2493				u16 *api_major_version, u16 *api_minor_version,
2494				struct i40e_asq_cmd_details *cmd_details)
2495{
2496	struct i40e_aq_desc desc;
2497	struct i40e_aqc_get_version *resp =
2498		(struct i40e_aqc_get_version *)&desc.params.raw;
2499	i40e_status status;
2500
2501	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
2502
2503	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2504
2505	if (!status) {
2506		if (fw_major_version)
2507			*fw_major_version = le16_to_cpu(resp->fw_major);
2508		if (fw_minor_version)
2509			*fw_minor_version = le16_to_cpu(resp->fw_minor);
2510		if (fw_build)
2511			*fw_build = le32_to_cpu(resp->fw_build);
2512		if (api_major_version)
2513			*api_major_version = le16_to_cpu(resp->api_major);
2514		if (api_minor_version)
2515			*api_minor_version = le16_to_cpu(resp->api_minor);
2516	}
2517
2518	return status;
2519}
2520
2521/**
2522 * i40e_aq_send_driver_version
2523 * @hw: pointer to the hw struct
2524 * @dv: driver's major, minor version
2525 * @cmd_details: pointer to command details structure or NULL
2526 *
2527 * Send the driver version to the firmware
2528 **/
2529i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
2530				struct i40e_driver_version *dv,
2531				struct i40e_asq_cmd_details *cmd_details)
2532{
2533	struct i40e_aq_desc desc;
2534	struct i40e_aqc_driver_version *cmd =
2535		(struct i40e_aqc_driver_version *)&desc.params.raw;
2536	i40e_status status;
2537	u16 len;
2538
2539	if (dv == NULL)
2540		return I40E_ERR_PARAM;
2541
2542	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
2543
2544	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
2545	cmd->driver_major_ver = dv->major_version;
2546	cmd->driver_minor_ver = dv->minor_version;
2547	cmd->driver_build_ver = dv->build_version;
2548	cmd->driver_subbuild_ver = dv->subbuild_version;
2549
2550	len = 0;
2551	while (len < sizeof(dv->driver_string) &&
2552	       (dv->driver_string[len] < 0x80) &&
2553	       dv->driver_string[len])
2554		len++;
2555	status = i40e_asq_send_command(hw, &desc, dv->driver_string,
2556				       len, cmd_details);
2557
2558	return status;
2559}
2560
2561/**
2562 * i40e_get_link_status - get status of the HW network link
2563 * @hw: pointer to the hw struct
2564 * @link_up: pointer to bool (true/false = linkup/linkdown)
2565 *
2566 * Variable link_up true if link is up, false if link is down.
2567 * The variable link_up is invalid if returned value of status != 0
2568 *
2569 * Side effect: LinkStatusEvent reporting becomes enabled
2570 **/
2571i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
2572{
2573	i40e_status status = 0;
2574
2575	if (hw->phy.get_link_info) {
2576		status = i40e_update_link_info(hw);
2577
2578		if (status)
2579			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
2580				   status);
2581	}
2582
2583	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
2584
2585	return status;
2586}
2587
2588/**
2589 * i40e_updatelink_status - update status of the HW network link
2590 * @hw: pointer to the hw struct
2591 **/
2592noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
2593{
2594	struct i40e_aq_get_phy_abilities_resp abilities;
2595	i40e_status status = 0;
2596
2597	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
2598	if (status)
2599		return status;
2600
2601	/* extra checking needed to ensure link info to user is timely */
2602	if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
2603	    ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
2604	     !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
2605		status = i40e_aq_get_phy_capabilities(hw, false, false,
2606						      &abilities, NULL);
2607		if (status)
2608			return status;
2609
2610		if (abilities.fec_cfg_curr_mod_ext_info &
2611		    I40E_AQ_ENABLE_FEC_AUTO)
2612			hw->phy.link_info.req_fec_info =
2613				(I40E_AQ_REQUEST_FEC_KR |
2614				 I40E_AQ_REQUEST_FEC_RS);
2615		else
2616			hw->phy.link_info.req_fec_info =
2617				abilities.fec_cfg_curr_mod_ext_info &
2618				(I40E_AQ_REQUEST_FEC_KR |
2619				 I40E_AQ_REQUEST_FEC_RS);
2620
2621		memcpy(hw->phy.link_info.module_type, &abilities.module_type,
2622		       sizeof(hw->phy.link_info.module_type));
2623	}
2624
2625	return status;
2626}
2627
2628/**
2629 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
2630 * @hw: pointer to the hw struct
2631 * @uplink_seid: the MAC or other gizmo SEID
2632 * @downlink_seid: the VSI SEID
2633 * @enabled_tc: bitmap of TCs to be enabled
2634 * @default_port: true for default port VSI, false for control port
2635 * @veb_seid: pointer to where to put the resulting VEB SEID
2636 * @enable_stats: true to turn on VEB stats
2637 * @cmd_details: pointer to command details structure or NULL
2638 *
2639 * This asks the FW to add a VEB between the uplink and downlink
2640 * elements.  If the uplink SEID is 0, this will be a floating VEB.
2641 **/
2642i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
2643				u16 downlink_seid, u8 enabled_tc,
2644				bool default_port, u16 *veb_seid,
2645				bool enable_stats,
2646				struct i40e_asq_cmd_details *cmd_details)
2647{
2648	struct i40e_aq_desc desc;
2649	struct i40e_aqc_add_veb *cmd =
2650		(struct i40e_aqc_add_veb *)&desc.params.raw;
2651	struct i40e_aqc_add_veb_completion *resp =
2652		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
2653	i40e_status status;
2654	u16 veb_flags = 0;
2655
2656	/* SEIDs need to either both be set or both be 0 for floating VEB */
2657	if (!!uplink_seid != !!downlink_seid)
2658		return I40E_ERR_PARAM;
2659
2660	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
2661
2662	cmd->uplink_seid = cpu_to_le16(uplink_seid);
2663	cmd->downlink_seid = cpu_to_le16(downlink_seid);
2664	cmd->enable_tcs = enabled_tc;
2665	if (!uplink_seid)
2666		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
2667	if (default_port)
2668		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
2669	else
2670		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
2671
2672	/* reverse logic here: set the bitflag to disable the stats */
2673	if (!enable_stats)
2674		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
2675
2676	cmd->veb_flags = cpu_to_le16(veb_flags);
2677
2678	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2679
2680	if (!status && veb_seid)
2681		*veb_seid = le16_to_cpu(resp->veb_seid);
2682
2683	return status;
2684}
2685
2686/**
2687 * i40e_aq_get_veb_parameters - Retrieve VEB parameters
2688 * @hw: pointer to the hw struct
2689 * @veb_seid: the SEID of the VEB to query
2690 * @switch_id: the uplink switch id
2691 * @floating: set to true if the VEB is floating
2692 * @statistic_index: index of the stats counter block for this VEB
2693 * @vebs_used: number of VEB's used by function
2694 * @vebs_free: total VEB's not reserved by any function
2695 * @cmd_details: pointer to command details structure or NULL
2696 *
2697 * This retrieves the parameters for a particular VEB, specified by
2698 * uplink_seid, and returns them to the caller.
2699 **/
2700i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
2701				u16 veb_seid, u16 *switch_id,
2702				bool *floating, u16 *statistic_index,
2703				u16 *vebs_used, u16 *vebs_free,
2704				struct i40e_asq_cmd_details *cmd_details)
2705{
2706	struct i40e_aq_desc desc;
2707	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
2708		(struct i40e_aqc_get_veb_parameters_completion *)
2709		&desc.params.raw;
2710	i40e_status status;
2711
2712	if (veb_seid == 0)
2713		return I40E_ERR_PARAM;
2714
2715	i40e_fill_default_direct_cmd_desc(&desc,
2716					  i40e_aqc_opc_get_veb_parameters);
2717	cmd_resp->seid = cpu_to_le16(veb_seid);
2718
2719	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2720	if (status)
2721		goto get_veb_exit;
2722
2723	if (switch_id)
2724		*switch_id = le16_to_cpu(cmd_resp->switch_id);
2725	if (statistic_index)
2726		*statistic_index = le16_to_cpu(cmd_resp->statistic_index);
2727	if (vebs_used)
2728		*vebs_used = le16_to_cpu(cmd_resp->vebs_used);
2729	if (vebs_free)
2730		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
2731	if (floating) {
2732		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
2733
2734		if (flags & I40E_AQC_ADD_VEB_FLOATING)
2735			*floating = true;
2736		else
2737			*floating = false;
2738	}
2739
2740get_veb_exit:
2741	return status;
2742}
2743
2744/**
2745 * i40e_aq_add_macvlan
2746 * @hw: pointer to the hw struct
2747 * @seid: VSI for the mac address
2748 * @mv_list: list of macvlans to be added
2749 * @count: length of the list
2750 * @cmd_details: pointer to command details structure or NULL
2751 *
2752 * Add MAC/VLAN addresses to the HW filtering
2753 **/
2754i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
2755			struct i40e_aqc_add_macvlan_element_data *mv_list,
2756			u16 count, struct i40e_asq_cmd_details *cmd_details)
2757{
2758	struct i40e_aq_desc desc;
2759	struct i40e_aqc_macvlan *cmd =
2760		(struct i40e_aqc_macvlan *)&desc.params.raw;
2761	i40e_status status;
2762	u16 buf_size;
2763	int i;
2764
2765	if (count == 0 || !mv_list || !hw)
2766		return I40E_ERR_PARAM;
2767
2768	buf_size = count * sizeof(*mv_list);
2769
2770	/* prep the rest of the request */
2771	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
2772	cmd->num_addresses = cpu_to_le16(count);
2773	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2774	cmd->seid[1] = 0;
2775	cmd->seid[2] = 0;
2776
2777	for (i = 0; i < count; i++)
2778		if (is_multicast_ether_addr(mv_list[i].mac_addr))
2779			mv_list[i].flags |=
2780			       cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
2781
2782	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2783	if (buf_size > I40E_AQ_LARGE_BUF)
2784		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2785
2786	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2787				       cmd_details);
2788
2789	return status;
2790}
2791
2792/**
2793 * i40e_aq_remove_macvlan
2794 * @hw: pointer to the hw struct
2795 * @seid: VSI for the mac address
2796 * @mv_list: list of macvlans to be removed
2797 * @count: length of the list
2798 * @cmd_details: pointer to command details structure or NULL
2799 *
2800 * Remove MAC/VLAN addresses from the HW filtering
2801 **/
2802i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2803			struct i40e_aqc_remove_macvlan_element_data *mv_list,
2804			u16 count, struct i40e_asq_cmd_details *cmd_details)
2805{
2806	struct i40e_aq_desc desc;
2807	struct i40e_aqc_macvlan *cmd =
2808		(struct i40e_aqc_macvlan *)&desc.params.raw;
2809	i40e_status status;
2810	u16 buf_size;
2811
2812	if (count == 0 || !mv_list || !hw)
2813		return I40E_ERR_PARAM;
2814
2815	buf_size = count * sizeof(*mv_list);
2816
2817	/* prep the rest of the request */
2818	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
2819	cmd->num_addresses = cpu_to_le16(count);
2820	cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
2821	cmd->seid[1] = 0;
2822	cmd->seid[2] = 0;
2823
2824	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
2825	if (buf_size > I40E_AQ_LARGE_BUF)
2826		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2827
2828	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
2829				       cmd_details);
2830
2831	return status;
2832}
2833
2834/**
2835 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
2836 * @hw: pointer to the hw struct
2837 * @opcode: AQ opcode for add or delete mirror rule
2838 * @sw_seid: Switch SEID (to which rule refers)
2839 * @rule_type: Rule Type (ingress/egress/VLAN)
2840 * @id: Destination VSI SEID or Rule ID
2841 * @count: length of the list
2842 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2843 * @cmd_details: pointer to command details structure or NULL
2844 * @rule_id: Rule ID returned from FW
2845 * @rules_used: Number of rules used in internal switch
2846 * @rules_free: Number of rules free in internal switch
2847 *
2848 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2849 * VEBs/VEPA elements only
2850 **/
2851static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2852				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
2853				u16 count, __le16 *mr_list,
2854				struct i40e_asq_cmd_details *cmd_details,
2855				u16 *rule_id, u16 *rules_used, u16 *rules_free)
2856{
2857	struct i40e_aq_desc desc;
2858	struct i40e_aqc_add_delete_mirror_rule *cmd =
2859		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
2860	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
2861	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
2862	i40e_status status;
2863	u16 buf_size;
2864
2865	buf_size = count * sizeof(*mr_list);
2866
2867	/* prep the rest of the request */
2868	i40e_fill_default_direct_cmd_desc(&desc, opcode);
2869	cmd->seid = cpu_to_le16(sw_seid);
2870	cmd->rule_type = cpu_to_le16(rule_type &
2871				     I40E_AQC_MIRROR_RULE_TYPE_MASK);
2872	cmd->num_entries = cpu_to_le16(count);
2873	/* Dest VSI for add, rule_id for delete */
2874	cmd->destination = cpu_to_le16(id);
2875	if (mr_list) {
2876		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2877						I40E_AQ_FLAG_RD));
2878		if (buf_size > I40E_AQ_LARGE_BUF)
2879			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2880	}
2881
2882	status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
2883				       cmd_details);
2884	if (!status ||
2885	    hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
2886		if (rule_id)
2887			*rule_id = le16_to_cpu(resp->rule_id);
2888		if (rules_used)
2889			*rules_used = le16_to_cpu(resp->mirror_rules_used);
2890		if (rules_free)
2891			*rules_free = le16_to_cpu(resp->mirror_rules_free);
2892	}
2893	return status;
2894}
2895
2896/**
2897 * i40e_aq_add_mirrorrule - add a mirror rule
2898 * @hw: pointer to the hw struct
2899 * @sw_seid: Switch SEID (to which rule refers)
2900 * @rule_type: Rule Type (ingress/egress/VLAN)
2901 * @dest_vsi: SEID of VSI to which packets will be mirrored
2902 * @count: length of the list
2903 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2904 * @cmd_details: pointer to command details structure or NULL
2905 * @rule_id: Rule ID returned from FW
2906 * @rules_used: Number of rules used in internal switch
2907 * @rules_free: Number of rules free in internal switch
2908 *
2909 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2910 **/
2911i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2912			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
2913			struct i40e_asq_cmd_details *cmd_details,
2914			u16 *rule_id, u16 *rules_used, u16 *rules_free)
2915{
2916	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
2917	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
2918		if (count == 0 || !mr_list)
2919			return I40E_ERR_PARAM;
2920	}
2921
2922	return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
2923				  rule_type, dest_vsi, count, mr_list,
2924				  cmd_details, rule_id, rules_used, rules_free);
2925}
2926
2927/**
2928 * i40e_aq_delete_mirrorrule - delete a mirror rule
2929 * @hw: pointer to the hw struct
2930 * @sw_seid: Switch SEID (to which rule refers)
2931 * @rule_type: Rule Type (ingress/egress/VLAN)
2932 * @count: length of the list
2933 * @rule_id: Rule ID that is returned in the receive desc as part of
2934 *		add_mirrorrule.
2935 * @mr_list: list of mirrored VLAN IDs to be removed
2936 * @cmd_details: pointer to command details structure or NULL
2937 * @rules_used: Number of rules used in internal switch
2938 * @rules_free: Number of rules free in internal switch
2939 *
2940 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2941 **/
2942i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2943			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
2944			struct i40e_asq_cmd_details *cmd_details,
2945			u16 *rules_used, u16 *rules_free)
2946{
2947	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
2948	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
2949		/* count and mr_list shall be valid for rule_type INGRESS VLAN
2950		 * mirroring. For other rule_type, count and rule_type should
2951		 * not matter.
2952		 */
2953		if (count == 0 || !mr_list)
2954			return I40E_ERR_PARAM;
2955	}
2956
2957	return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
2958				  rule_type, rule_id, count, mr_list,
2959				  cmd_details, NULL, rules_used, rules_free);
2960}
2961
2962/**
2963 * i40e_aq_send_msg_to_vf
2964 * @hw: pointer to the hardware structure
2965 * @vfid: VF id to send msg
2966 * @v_opcode: opcodes for VF-PF communication
2967 * @v_retval: return error code
2968 * @msg: pointer to the msg buffer
2969 * @msglen: msg length
2970 * @cmd_details: pointer to command details
2971 *
2972 * send msg to vf
2973 **/
2974i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
2975				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
2976				struct i40e_asq_cmd_details *cmd_details)
2977{
2978	struct i40e_aq_desc desc;
2979	struct i40e_aqc_pf_vf_message *cmd =
2980		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
2981	i40e_status status;
2982
2983	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
2984	cmd->id = cpu_to_le32(vfid);
2985	desc.cookie_high = cpu_to_le32(v_opcode);
2986	desc.cookie_low = cpu_to_le32(v_retval);
2987	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
2988	if (msglen) {
2989		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
2990						I40E_AQ_FLAG_RD));
2991		if (msglen > I40E_AQ_LARGE_BUF)
2992			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
2993		desc.datalen = cpu_to_le16(msglen);
2994	}
2995	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
2996
2997	return status;
2998}
2999
3000/**
3001 * i40e_aq_debug_read_register
3002 * @hw: pointer to the hw struct
3003 * @reg_addr: register address
3004 * @reg_val: register value
3005 * @cmd_details: pointer to command details structure or NULL
3006 *
3007 * Read the register using the admin queue commands
3008 **/
3009i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
3010				u32 reg_addr, u64 *reg_val,
3011				struct i40e_asq_cmd_details *cmd_details)
3012{
3013	struct i40e_aq_desc desc;
3014	struct i40e_aqc_debug_reg_read_write *cmd_resp =
3015		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3016	i40e_status status;
3017
3018	if (reg_val == NULL)
3019		return I40E_ERR_PARAM;
3020
3021	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
3022
3023	cmd_resp->address = cpu_to_le32(reg_addr);
3024
3025	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3026
3027	if (!status) {
3028		*reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
3029			   (u64)le32_to_cpu(cmd_resp->value_low);
3030	}
3031
3032	return status;
3033}
3034
3035/**
3036 * i40e_aq_debug_write_register
3037 * @hw: pointer to the hw struct
3038 * @reg_addr: register address
3039 * @reg_val: register value
3040 * @cmd_details: pointer to command details structure or NULL
3041 *
3042 * Write to a register using the admin queue commands
3043 **/
3044i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
3045					u32 reg_addr, u64 reg_val,
3046					struct i40e_asq_cmd_details *cmd_details)
3047{
3048	struct i40e_aq_desc desc;
3049	struct i40e_aqc_debug_reg_read_write *cmd =
3050		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
3051	i40e_status status;
3052
3053	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
3054
3055	cmd->address = cpu_to_le32(reg_addr);
3056	cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
3057	cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
3058
3059	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3060
3061	return status;
3062}
3063
3064/**
3065 * i40e_aq_request_resource
3066 * @hw: pointer to the hw struct
3067 * @resource: resource id
3068 * @access: access type
3069 * @sdp_number: resource number
3070 * @timeout: the maximum time in ms that the driver may hold the resource
3071 * @cmd_details: pointer to command details structure or NULL
3072 *
3073 * requests common resource using the admin queue commands
3074 **/
3075i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
3076				enum i40e_aq_resources_ids resource,
3077				enum i40e_aq_resource_access_type access,
3078				u8 sdp_number, u64 *timeout,
3079				struct i40e_asq_cmd_details *cmd_details)
3080{
3081	struct i40e_aq_desc desc;
3082	struct i40e_aqc_request_resource *cmd_resp =
3083		(struct i40e_aqc_request_resource *)&desc.params.raw;
3084	i40e_status status;
3085
3086	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
3087
3088	cmd_resp->resource_id = cpu_to_le16(resource);
3089	cmd_resp->access_type = cpu_to_le16(access);
3090	cmd_resp->resource_number = cpu_to_le32(sdp_number);
3091
3092	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3093	/* The completion specifies the maximum time in ms that the driver
3094	 * may hold the resource in the Timeout field.
3095	 * If the resource is held by someone else, the command completes with
3096	 * busy return value and the timeout field indicates the maximum time
3097	 * the current owner of the resource has to free it.
3098	 */
3099	if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
3100		*timeout = le32_to_cpu(cmd_resp->timeout);
3101
3102	return status;
3103}
3104
3105/**
3106 * i40e_aq_release_resource
3107 * @hw: pointer to the hw struct
3108 * @resource: resource id
3109 * @sdp_number: resource number
3110 * @cmd_details: pointer to command details structure or NULL
3111 *
3112 * release common resource using the admin queue commands
3113 **/
3114i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
3115				enum i40e_aq_resources_ids resource,
3116				u8 sdp_number,
3117				struct i40e_asq_cmd_details *cmd_details)
3118{
3119	struct i40e_aq_desc desc;
3120	struct i40e_aqc_request_resource *cmd =
3121		(struct i40e_aqc_request_resource *)&desc.params.raw;
3122	i40e_status status;
3123
3124	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
3125
3126	cmd->resource_id = cpu_to_le16(resource);
3127	cmd->resource_number = cpu_to_le32(sdp_number);
3128
3129	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3130
3131	return status;
3132}
3133
3134/**
3135 * i40e_aq_read_nvm
3136 * @hw: pointer to the hw struct
3137 * @module_pointer: module pointer location in words from the NVM beginning
3138 * @offset: byte offset from the module beginning
3139 * @length: length of the section to be read (in bytes from the offset)
3140 * @data: command buffer (size [bytes] = length)
3141 * @last_command: tells if this is the last command in a series
3142 * @cmd_details: pointer to command details structure or NULL
3143 *
3144 * Read the NVM using the admin queue commands
3145 **/
3146i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
3147				u32 offset, u16 length, void *data,
3148				bool last_command,
3149				struct i40e_asq_cmd_details *cmd_details)
3150{
3151	struct i40e_aq_desc desc;
3152	struct i40e_aqc_nvm_update *cmd =
3153		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3154	i40e_status status;
3155
3156	/* In offset the highest byte must be zeroed. */
3157	if (offset & 0xFF000000) {
3158		status = I40E_ERR_PARAM;
3159		goto i40e_aq_read_nvm_exit;
3160	}
3161
3162	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
3163
3164	/* If this is the last command in a series, set the proper flag. */
3165	if (last_command)
3166		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3167	cmd->module_pointer = module_pointer;
3168	cmd->offset = cpu_to_le32(offset);
3169	cmd->length = cpu_to_le16(length);
3170
3171	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3172	if (length > I40E_AQ_LARGE_BUF)
3173		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3174
3175	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3176
3177i40e_aq_read_nvm_exit:
3178	return status;
3179}
3180
3181/**
3182 * i40e_aq_erase_nvm
3183 * @hw: pointer to the hw struct
3184 * @module_pointer: module pointer location in words from the NVM beginning
3185 * @offset: offset in the module (expressed in 4 KB from module's beginning)
3186 * @length: length of the section to be erased (expressed in 4 KB)
3187 * @last_command: tells if this is the last command in a series
3188 * @cmd_details: pointer to command details structure or NULL
3189 *
3190 * Erase the NVM sector using the admin queue commands
3191 **/
3192i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
3193			      u32 offset, u16 length, bool last_command,
3194			      struct i40e_asq_cmd_details *cmd_details)
3195{
3196	struct i40e_aq_desc desc;
3197	struct i40e_aqc_nvm_update *cmd =
3198		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3199	i40e_status status;
3200
3201	/* In offset the highest byte must be zeroed. */
3202	if (offset & 0xFF000000) {
3203		status = I40E_ERR_PARAM;
3204		goto i40e_aq_erase_nvm_exit;
3205	}
3206
3207	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
3208
3209	/* If this is the last command in a series, set the proper flag. */
3210	if (last_command)
3211		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3212	cmd->module_pointer = module_pointer;
3213	cmd->offset = cpu_to_le32(offset);
3214	cmd->length = cpu_to_le16(length);
3215
3216	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3217
3218i40e_aq_erase_nvm_exit:
3219	return status;
3220}
3221
3222/**
3223 * i40e_parse_discover_capabilities
3224 * @hw: pointer to the hw struct
3225 * @buff: pointer to a buffer containing device/function capability records
3226 * @cap_count: number of capability records in the list
3227 * @list_type_opc: type of capabilities list to parse
3228 *
3229 * Parse the device/function capabilities list.
3230 **/
3231static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
3232				     u32 cap_count,
3233				     enum i40e_admin_queue_opc list_type_opc)
3234{
3235	struct i40e_aqc_list_capabilities_element_resp *cap;
3236	u32 valid_functions, num_functions;
3237	u32 number, logical_id, phys_id;
3238	struct i40e_hw_capabilities *p;
3239	u16 id, ocp_cfg_word0;
3240	i40e_status status;
3241	u8 major_rev;
3242	u32 i = 0;
3243
3244	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
3245
3246	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
3247		p = &hw->dev_caps;
3248	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
3249		p = &hw->func_caps;
3250	else
3251		return;
3252
3253	for (i = 0; i < cap_count; i++, cap++) {
3254		id = le16_to_cpu(cap->id);
3255		number = le32_to_cpu(cap->number);
3256		logical_id = le32_to_cpu(cap->logical_id);
3257		phys_id = le32_to_cpu(cap->phys_id);
3258		major_rev = cap->major_rev;
3259
3260		switch (id) {
3261		case I40E_AQ_CAP_ID_SWITCH_MODE:
3262			p->switch_mode = number;
3263			break;
3264		case I40E_AQ_CAP_ID_MNG_MODE:
3265			p->management_mode = number;
3266			if (major_rev > 1) {
3267				p->mng_protocols_over_mctp = logical_id;
3268				i40e_debug(hw, I40E_DEBUG_INIT,
3269					   "HW Capability: Protocols over MCTP = %d\n",
3270					   p->mng_protocols_over_mctp);
3271			} else {
3272				p->mng_protocols_over_mctp = 0;
3273			}
3274			break;
3275		case I40E_AQ_CAP_ID_NPAR_ACTIVE:
3276			p->npar_enable = number;
3277			break;
3278		case I40E_AQ_CAP_ID_OS2BMC_CAP:
3279			p->os2bmc = number;
3280			break;
3281		case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
3282			p->valid_functions = number;
3283			break;
3284		case I40E_AQ_CAP_ID_SRIOV:
3285			if (number == 1)
3286				p->sr_iov_1_1 = true;
3287			break;
3288		case I40E_AQ_CAP_ID_VF:
3289			p->num_vfs = number;
3290			p->vf_base_id = logical_id;
3291			break;
3292		case I40E_AQ_CAP_ID_VMDQ:
3293			if (number == 1)
3294				p->vmdq = true;
3295			break;
3296		case I40E_AQ_CAP_ID_8021QBG:
3297			if (number == 1)
3298				p->evb_802_1_qbg = true;
3299			break;
3300		case I40E_AQ_CAP_ID_8021QBR:
3301			if (number == 1)
3302				p->evb_802_1_qbh = true;
3303			break;
3304		case I40E_AQ_CAP_ID_VSI:
3305			p->num_vsis = number;
3306			break;
3307		case I40E_AQ_CAP_ID_DCB:
3308			if (number == 1) {
3309				p->dcb = true;
3310				p->enabled_tcmap = logical_id;
3311				p->maxtc = phys_id;
3312			}
3313			break;
3314		case I40E_AQ_CAP_ID_FCOE:
3315			if (number == 1)
3316				p->fcoe = true;
3317			break;
3318		case I40E_AQ_CAP_ID_ISCSI:
3319			if (number == 1)
3320				p->iscsi = true;
3321			break;
3322		case I40E_AQ_CAP_ID_RSS:
3323			p->rss = true;
3324			p->rss_table_size = number;
3325			p->rss_table_entry_width = logical_id;
3326			break;
3327		case I40E_AQ_CAP_ID_RXQ:
3328			p->num_rx_qp = number;
3329			p->base_queue = phys_id;
3330			break;
3331		case I40E_AQ_CAP_ID_TXQ:
3332			p->num_tx_qp = number;
3333			p->base_queue = phys_id;
3334			break;
3335		case I40E_AQ_CAP_ID_MSIX:
3336			p->num_msix_vectors = number;
3337			i40e_debug(hw, I40E_DEBUG_INIT,
3338				   "HW Capability: MSIX vector count = %d\n",
3339				   p->num_msix_vectors);
3340			break;
3341		case I40E_AQ_CAP_ID_VF_MSIX:
3342			p->num_msix_vectors_vf = number;
3343			break;
3344		case I40E_AQ_CAP_ID_FLEX10:
3345			if (major_rev == 1) {
3346				if (number == 1) {
3347					p->flex10_enable = true;
3348					p->flex10_capable = true;
3349				}
3350			} else {
3351				/* Capability revision >= 2 */
3352				if (number & 1)
3353					p->flex10_enable = true;
3354				if (number & 2)
3355					p->flex10_capable = true;
3356			}
3357			p->flex10_mode = logical_id;
3358			p->flex10_status = phys_id;
3359			break;
3360		case I40E_AQ_CAP_ID_CEM:
3361			if (number == 1)
3362				p->mgmt_cem = true;
3363			break;
3364		case I40E_AQ_CAP_ID_IWARP:
3365			if (number == 1)
3366				p->iwarp = true;
3367			break;
3368		case I40E_AQ_CAP_ID_LED:
3369			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3370				p->led[phys_id] = true;
3371			break;
3372		case I40E_AQ_CAP_ID_SDP:
3373			if (phys_id < I40E_HW_CAP_MAX_GPIO)
3374				p->sdp[phys_id] = true;
3375			break;
3376		case I40E_AQ_CAP_ID_MDIO:
3377			if (number == 1) {
3378				p->mdio_port_num = phys_id;
3379				p->mdio_port_mode = logical_id;
3380			}
3381			break;
3382		case I40E_AQ_CAP_ID_1588:
3383			if (number == 1)
3384				p->ieee_1588 = true;
3385			break;
3386		case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
3387			p->fd = true;
3388			p->fd_filters_guaranteed = number;
3389			p->fd_filters_best_effort = logical_id;
3390			break;
3391		case I40E_AQ_CAP_ID_WSR_PROT:
3392			p->wr_csr_prot = (u64)number;
3393			p->wr_csr_prot |= (u64)logical_id << 32;
3394			break;
3395		case I40E_AQ_CAP_ID_NVM_MGMT:
3396			if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
3397				p->sec_rev_disabled = true;
3398			if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
3399				p->update_disabled = true;
3400			break;
3401		default:
3402			break;
3403		}
3404	}
3405
3406	if (p->fcoe)
3407		i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
3408
3409	/* Software override ensuring FCoE is disabled if npar or mfp
3410	 * mode because it is not supported in these modes.
3411	 */
3412	if (p->npar_enable || p->flex10_enable)
3413		p->fcoe = false;
3414
3415	/* count the enabled ports (aka the "not disabled" ports) */
3416	hw->num_ports = 0;
3417	for (i = 0; i < 4; i++) {
3418		u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
3419		u64 port_cfg = 0;
3420
3421		/* use AQ read to get the physical register offset instead
3422		 * of the port relative offset
3423		 */
3424		i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
3425		if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
3426			hw->num_ports++;
3427	}
3428
3429	/* OCP cards case: if a mezz is removed the Ethernet port is at
3430	 * disabled state in PRTGEN_CNF register. Additional NVM read is
3431	 * needed in order to check if we are dealing with OCP card.
3432	 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
3433	 * physical ports results in wrong partition id calculation and thus
3434	 * not supporting WoL.
3435	 */
3436	if (hw->mac.type == I40E_MAC_X722) {
3437		if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) {
3438			status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
3439						  2 * I40E_SR_OCP_CFG_WORD0,
3440						  sizeof(ocp_cfg_word0),
3441						  &ocp_cfg_word0, true, NULL);
3442			if (!status &&
3443			    (ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
3444				hw->num_ports = 4;
3445			i40e_release_nvm(hw);
3446		}
3447	}
3448
3449	valid_functions = p->valid_functions;
3450	num_functions = 0;
3451	while (valid_functions) {
3452		if (valid_functions & 1)
3453			num_functions++;
3454		valid_functions >>= 1;
3455	}
3456
3457	/* partition id is 1-based, and functions are evenly spread
3458	 * across the ports as partitions
3459	 */
3460	if (hw->num_ports != 0) {
3461		hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
3462		hw->num_partitions = num_functions / hw->num_ports;
3463	}
3464
3465	/* additional HW specific goodies that might
3466	 * someday be HW version specific
3467	 */
3468	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
3469}
3470
3471/**
3472 * i40e_aq_discover_capabilities
3473 * @hw: pointer to the hw struct
3474 * @buff: a virtual buffer to hold the capabilities
3475 * @buff_size: Size of the virtual buffer
3476 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
3477 * @list_type_opc: capabilities type to discover - pass in the command opcode
3478 * @cmd_details: pointer to command details structure or NULL
3479 *
3480 * Get the device capabilities descriptions from the firmware
3481 **/
3482i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
3483				void *buff, u16 buff_size, u16 *data_size,
3484				enum i40e_admin_queue_opc list_type_opc,
3485				struct i40e_asq_cmd_details *cmd_details)
3486{
3487	struct i40e_aqc_list_capabilites *cmd;
3488	struct i40e_aq_desc desc;
3489	i40e_status status = 0;
3490
3491	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
3492
3493	if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
3494		list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
3495		status = I40E_ERR_PARAM;
3496		goto exit;
3497	}
3498
3499	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
3500
3501	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3502	if (buff_size > I40E_AQ_LARGE_BUF)
3503		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3504
3505	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3506	*data_size = le16_to_cpu(desc.datalen);
3507
3508	if (status)
3509		goto exit;
3510
3511	i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
3512					 list_type_opc);
3513
3514exit:
3515	return status;
3516}
3517
3518/**
3519 * i40e_aq_update_nvm
3520 * @hw: pointer to the hw struct
3521 * @module_pointer: module pointer location in words from the NVM beginning
3522 * @offset: byte offset from the module beginning
3523 * @length: length of the section to be written (in bytes from the offset)
3524 * @data: command buffer (size [bytes] = length)
3525 * @last_command: tells if this is the last command in a series
3526 * @preservation_flags: Preservation mode flags
3527 * @cmd_details: pointer to command details structure or NULL
3528 *
3529 * Update the NVM using the admin queue commands
3530 **/
3531i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
3532			       u32 offset, u16 length, void *data,
3533				bool last_command, u8 preservation_flags,
3534			       struct i40e_asq_cmd_details *cmd_details)
3535{
3536	struct i40e_aq_desc desc;
3537	struct i40e_aqc_nvm_update *cmd =
3538		(struct i40e_aqc_nvm_update *)&desc.params.raw;
3539	i40e_status status;
3540
3541	/* In offset the highest byte must be zeroed. */
3542	if (offset & 0xFF000000) {
3543		status = I40E_ERR_PARAM;
3544		goto i40e_aq_update_nvm_exit;
3545	}
3546
3547	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3548
3549	/* If this is the last command in a series, set the proper flag. */
3550	if (last_command)
3551		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
3552	if (hw->mac.type == I40E_MAC_X722) {
3553		if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
3554			cmd->command_flags |=
3555				(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
3556				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3557		else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
3558			cmd->command_flags |=
3559				(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
3560				 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
3561	}
3562	cmd->module_pointer = module_pointer;
3563	cmd->offset = cpu_to_le32(offset);
3564	cmd->length = cpu_to_le16(length);
3565
3566	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
3567	if (length > I40E_AQ_LARGE_BUF)
3568		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3569
3570	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
3571
3572i40e_aq_update_nvm_exit:
3573	return status;
3574}
3575
3576/**
3577 * i40e_aq_rearrange_nvm
3578 * @hw: pointer to the hw struct
3579 * @rearrange_nvm: defines direction of rearrangement
3580 * @cmd_details: pointer to command details structure or NULL
3581 *
3582 * Rearrange NVM structure, available only for transition FW
3583 **/
3584i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
3585				  u8 rearrange_nvm,
3586				  struct i40e_asq_cmd_details *cmd_details)
3587{
3588	struct i40e_aqc_nvm_update *cmd;
3589	i40e_status status;
3590	struct i40e_aq_desc desc;
3591
3592	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
3593
3594	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
3595
3596	rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
3597			 I40E_AQ_NVM_REARRANGE_TO_STRUCT);
3598
3599	if (!rearrange_nvm) {
3600		status = I40E_ERR_PARAM;
3601		goto i40e_aq_rearrange_nvm_exit;
3602	}
3603
3604	cmd->command_flags |= rearrange_nvm;
3605	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3606
3607i40e_aq_rearrange_nvm_exit:
3608	return status;
3609}
3610
3611/**
3612 * i40e_aq_get_lldp_mib
3613 * @hw: pointer to the hw struct
3614 * @bridge_type: type of bridge requested
3615 * @mib_type: Local, Remote or both Local and Remote MIBs
3616 * @buff: pointer to a user supplied buffer to store the MIB block
3617 * @buff_size: size of the buffer (in bytes)
3618 * @local_len : length of the returned Local LLDP MIB
3619 * @remote_len: length of the returned Remote LLDP MIB
3620 * @cmd_details: pointer to command details structure or NULL
3621 *
3622 * Requests the complete LLDP MIB (entire packet).
3623 **/
3624i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
3625				u8 mib_type, void *buff, u16 buff_size,
3626				u16 *local_len, u16 *remote_len,
3627				struct i40e_asq_cmd_details *cmd_details)
3628{
3629	struct i40e_aq_desc desc;
3630	struct i40e_aqc_lldp_get_mib *cmd =
3631		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3632	struct i40e_aqc_lldp_get_mib *resp =
3633		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
3634	i40e_status status;
3635
3636	if (buff_size == 0 || !buff)
3637		return I40E_ERR_PARAM;
3638
3639	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
3640	/* Indirect Command */
3641	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3642
3643	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
3644	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
3645		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
3646
3647	desc.datalen = cpu_to_le16(buff_size);
3648
3649	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3650	if (buff_size > I40E_AQ_LARGE_BUF)
3651		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
3652
3653	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
3654	if (!status) {
3655		if (local_len != NULL)
3656			*local_len = le16_to_cpu(resp->local_len);
3657		if (remote_len != NULL)
3658			*remote_len = le16_to_cpu(resp->remote_len);
3659	}
3660
3661	return status;
3662}
3663
3664/**
3665 * i40e_aq_cfg_lldp_mib_change_event
3666 * @hw: pointer to the hw struct
3667 * @enable_update: Enable or Disable event posting
3668 * @cmd_details: pointer to command details structure or NULL
3669 *
3670 * Enable or Disable posting of an event on ARQ when LLDP MIB
3671 * associated with the interface changes
3672 **/
3673i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
3674				bool enable_update,
3675				struct i40e_asq_cmd_details *cmd_details)
3676{
3677	struct i40e_aq_desc desc;
3678	struct i40e_aqc_lldp_update_mib *cmd =
3679		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
3680	i40e_status status;
3681
3682	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
3683
3684	if (!enable_update)
3685		cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
3686
3687	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3688
3689	return status;
3690}
3691
3692/**
3693 * i40e_aq_restore_lldp
3694 * @hw: pointer to the hw struct
3695 * @setting: pointer to factory setting variable or NULL
3696 * @restore: True if factory settings should be restored
3697 * @cmd_details: pointer to command details structure or NULL
3698 *
3699 * Restore LLDP Agent factory settings if @restore set to True. In other case
3700 * only returns factory setting in AQ response.
3701 **/
3702enum i40e_status_code
3703i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
3704		     struct i40e_asq_cmd_details *cmd_details)
3705{
3706	struct i40e_aq_desc desc;
3707	struct i40e_aqc_lldp_restore *cmd =
3708		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
3709	i40e_status status;
3710
3711	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
3712		i40e_debug(hw, I40E_DEBUG_ALL,
3713			   "Restore LLDP not supported by current FW version.\n");
3714		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3715	}
3716
3717	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
3718
3719	if (restore)
3720		cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
3721
3722	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3723
3724	if (setting)
3725		*setting = cmd->command & 1;
3726
3727	return status;
3728}
3729
3730/**
3731 * i40e_aq_stop_lldp
3732 * @hw: pointer to the hw struct
3733 * @shutdown_agent: True if LLDP Agent needs to be Shutdown
3734 * @persist: True if stop of LLDP should be persistent across power cycles
3735 * @cmd_details: pointer to command details structure or NULL
3736 *
3737 * Stop or Shutdown the embedded LLDP Agent
3738 **/
3739i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3740				bool persist,
3741				struct i40e_asq_cmd_details *cmd_details)
3742{
3743	struct i40e_aq_desc desc;
3744	struct i40e_aqc_lldp_stop *cmd =
3745		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
3746	i40e_status status;
3747
3748	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
3749
3750	if (shutdown_agent)
3751		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
3752
3753	if (persist) {
3754		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3755			cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
3756		else
3757			i40e_debug(hw, I40E_DEBUG_ALL,
3758				   "Persistent Stop LLDP not supported by current FW version.\n");
3759	}
3760
3761	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3762
3763	return status;
3764}
3765
3766/**
3767 * i40e_aq_start_lldp
3768 * @hw: pointer to the hw struct
3769 * @persist: True if start of LLDP should be persistent across power cycles
3770 * @cmd_details: pointer to command details structure or NULL
3771 *
3772 * Start the embedded LLDP Agent on all ports.
3773 **/
3774i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
3775			       struct i40e_asq_cmd_details *cmd_details)
3776{
3777	struct i40e_aq_desc desc;
3778	struct i40e_aqc_lldp_start *cmd =
3779		(struct i40e_aqc_lldp_start *)&desc.params.raw;
3780	i40e_status status;
3781
3782	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
3783
3784	cmd->command = I40E_AQ_LLDP_AGENT_START;
3785
3786	if (persist) {
3787		if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
3788			cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
3789		else
3790			i40e_debug(hw, I40E_DEBUG_ALL,
3791				   "Persistent Start LLDP not supported by current FW version.\n");
3792	}
3793
3794	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3795
3796	return status;
3797}
3798
3799/**
3800 * i40e_aq_set_dcb_parameters
3801 * @hw: pointer to the hw struct
3802 * @cmd_details: pointer to command details structure or NULL
3803 * @dcb_enable: True if DCB configuration needs to be applied
3804 *
3805 **/
3806enum i40e_status_code
3807i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
3808			   struct i40e_asq_cmd_details *cmd_details)
3809{
3810	struct i40e_aq_desc desc;
3811	struct i40e_aqc_set_dcb_parameters *cmd =
3812		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
3813	i40e_status status;
3814
3815	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
3816		return I40E_ERR_DEVICE_NOT_SUPPORTED;
3817
3818	i40e_fill_default_direct_cmd_desc(&desc,
3819					  i40e_aqc_opc_set_dcb_parameters);
3820
3821	if (dcb_enable) {
3822		cmd->valid_flags = I40E_DCB_VALID;
3823		cmd->command = I40E_AQ_DCB_SET_AGENT;
3824	}
3825	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3826
3827	return status;
3828}
3829
3830/**
3831 * i40e_aq_get_cee_dcb_config
3832 * @hw: pointer to the hw struct
3833 * @buff: response buffer that stores CEE operational configuration
3834 * @buff_size: size of the buffer passed
3835 * @cmd_details: pointer to command details structure or NULL
3836 *
3837 * Get CEE DCBX mode operational configuration from firmware
3838 **/
3839i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3840				       void *buff, u16 buff_size,
3841				       struct i40e_asq_cmd_details *cmd_details)
3842{
3843	struct i40e_aq_desc desc;
3844	i40e_status status;
3845
3846	if (buff_size == 0 || !buff)
3847		return I40E_ERR_PARAM;
3848
3849	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
3850
3851	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
3852	status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
3853				       cmd_details);
3854
3855	return status;
3856}
3857
3858/**
3859 * i40e_aq_add_udp_tunnel
3860 * @hw: pointer to the hw struct
3861 * @udp_port: the UDP port to add in Host byte order
3862 * @protocol_index: protocol index type
3863 * @filter_index: pointer to filter index
3864 * @cmd_details: pointer to command details structure or NULL
3865 *
3866 * Note: Firmware expects the udp_port value to be in Little Endian format,
3867 * and this function will call cpu_to_le16 to convert from Host byte order to
3868 * Little Endian order.
3869 **/
3870i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
3871				u16 udp_port, u8 protocol_index,
3872				u8 *filter_index,
3873				struct i40e_asq_cmd_details *cmd_details)
3874{
3875	struct i40e_aq_desc desc;
3876	struct i40e_aqc_add_udp_tunnel *cmd =
3877		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
3878	struct i40e_aqc_del_udp_tunnel_completion *resp =
3879		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
3880	i40e_status status;
3881
3882	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
3883
3884	cmd->udp_port = cpu_to_le16(udp_port);
3885	cmd->protocol_type = protocol_index;
3886
3887	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3888
3889	if (!status && filter_index)
3890		*filter_index = resp->index;
3891
3892	return status;
3893}
3894
3895/**
3896 * i40e_aq_del_udp_tunnel
3897 * @hw: pointer to the hw struct
3898 * @index: filter index
3899 * @cmd_details: pointer to command details structure or NULL
3900 **/
3901i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
3902				struct i40e_asq_cmd_details *cmd_details)
3903{
3904	struct i40e_aq_desc desc;
3905	struct i40e_aqc_remove_udp_tunnel *cmd =
3906		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
3907	i40e_status status;
3908
3909	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
3910
3911	cmd->index = index;
3912
3913	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3914
3915	return status;
3916}
3917
3918/**
3919 * i40e_aq_delete_element - Delete switch element
3920 * @hw: pointer to the hw struct
3921 * @seid: the SEID to delete from the switch
3922 * @cmd_details: pointer to command details structure or NULL
3923 *
3924 * This deletes a switch element from the switch.
3925 **/
3926i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
3927				struct i40e_asq_cmd_details *cmd_details)
3928{
3929	struct i40e_aq_desc desc;
3930	struct i40e_aqc_switch_seid *cmd =
3931		(struct i40e_aqc_switch_seid *)&desc.params.raw;
3932	i40e_status status;
3933
3934	if (seid == 0)
3935		return I40E_ERR_PARAM;
3936
3937	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
3938
3939	cmd->seid = cpu_to_le16(seid);
3940
3941	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3942
3943	return status;
3944}
3945
3946/**
3947 * i40e_aq_dcb_updated - DCB Updated Command
3948 * @hw: pointer to the hw struct
3949 * @cmd_details: pointer to command details structure or NULL
3950 *
3951 * EMP will return when the shared RPB settings have been
3952 * recomputed and modified. The retval field in the descriptor
3953 * will be set to 0 when RPB is modified.
3954 **/
3955i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
3956				struct i40e_asq_cmd_details *cmd_details)
3957{
3958	struct i40e_aq_desc desc;
3959	i40e_status status;
3960
3961	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
3962
3963	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
3964
3965	return status;
3966}
3967
3968/**
3969 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
3970 * @hw: pointer to the hw struct
3971 * @seid: seid for the physical port/switching component/vsi
3972 * @buff: Indirect buffer to hold data parameters and response
3973 * @buff_size: Indirect buffer size
3974 * @opcode: Tx scheduler AQ command opcode
3975 * @cmd_details: pointer to command details structure or NULL
3976 *
3977 * Generic command handler for Tx scheduler AQ commands
3978 **/
3979static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
3980				void *buff, u16 buff_size,
3981				 enum i40e_admin_queue_opc opcode,
3982				struct i40e_asq_cmd_details *cmd_details)
3983{
3984	struct i40e_aq_desc desc;
3985	struct i40e_aqc_tx_sched_ind *cmd =
3986		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
3987	i40e_status status;
3988	bool cmd_param_flag = false;
3989
3990	switch (opcode) {
3991	case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
3992	case i40e_aqc_opc_configure_vsi_tc_bw:
3993	case i40e_aqc_opc_enable_switching_comp_ets:
3994	case i40e_aqc_opc_modify_switching_comp_ets:
3995	case i40e_aqc_opc_disable_switching_comp_ets:
3996	case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
3997	case i40e_aqc_opc_configure_switching_comp_bw_config:
3998		cmd_param_flag = true;
3999		break;
4000	case i40e_aqc_opc_query_vsi_bw_config:
4001	case i40e_aqc_opc_query_vsi_ets_sla_config:
4002	case i40e_aqc_opc_query_switching_comp_ets_config:
4003	case i40e_aqc_opc_query_port_ets_config:
4004	case i40e_aqc_opc_query_switching_comp_bw_config:
4005		cmd_param_flag = false;
4006		break;
4007	default:
4008		return I40E_ERR_PARAM;
4009	}
4010
4011	i40e_fill_default_direct_cmd_desc(&desc, opcode);
4012
4013	/* Indirect command */
4014	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4015	if (cmd_param_flag)
4016		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4017	if (buff_size > I40E_AQ_LARGE_BUF)
4018		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4019
4020	desc.datalen = cpu_to_le16(buff_size);
4021
4022	cmd->vsi_seid = cpu_to_le16(seid);
4023
4024	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4025
4026	return status;
4027}
4028
4029/**
4030 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
4031 * @hw: pointer to the hw struct
4032 * @seid: VSI seid
4033 * @credit: BW limit credits (0 = disabled)
4034 * @max_credit: Max BW limit credits
4035 * @cmd_details: pointer to command details structure or NULL
4036 **/
4037i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
4038				u16 seid, u16 credit, u8 max_credit,
4039				struct i40e_asq_cmd_details *cmd_details)
4040{
4041	struct i40e_aq_desc desc;
4042	struct i40e_aqc_configure_vsi_bw_limit *cmd =
4043		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
4044	i40e_status status;
4045
4046	i40e_fill_default_direct_cmd_desc(&desc,
4047					  i40e_aqc_opc_configure_vsi_bw_limit);
4048
4049	cmd->vsi_seid = cpu_to_le16(seid);
4050	cmd->credit = cpu_to_le16(credit);
4051	cmd->max_credit = max_credit;
4052
4053	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4054
4055	return status;
4056}
4057
4058/**
4059 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
4060 * @hw: pointer to the hw struct
4061 * @seid: VSI seid
4062 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
4063 * @cmd_details: pointer to command details structure or NULL
4064 **/
4065i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
4066			u16 seid,
4067			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
4068			struct i40e_asq_cmd_details *cmd_details)
4069{
4070	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4071				    i40e_aqc_opc_configure_vsi_tc_bw,
4072				    cmd_details);
4073}
4074
4075/**
4076 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
4077 * @hw: pointer to the hw struct
4078 * @seid: seid of the switching component connected to Physical Port
4079 * @ets_data: Buffer holding ETS parameters
4080 * @opcode: Tx scheduler AQ command opcode
4081 * @cmd_details: pointer to command details structure or NULL
4082 **/
4083i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
4084		u16 seid,
4085		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
4086		enum i40e_admin_queue_opc opcode,
4087		struct i40e_asq_cmd_details *cmd_details)
4088{
4089	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
4090				    sizeof(*ets_data), opcode, cmd_details);
4091}
4092
4093/**
4094 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
4095 * @hw: pointer to the hw struct
4096 * @seid: seid of the switching component
4097 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
4098 * @cmd_details: pointer to command details structure or NULL
4099 **/
4100i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
4101	u16 seid,
4102	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
4103	struct i40e_asq_cmd_details *cmd_details)
4104{
4105	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4106			    i40e_aqc_opc_configure_switching_comp_bw_config,
4107			    cmd_details);
4108}
4109
4110/**
4111 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
4112 * @hw: pointer to the hw struct
4113 * @seid: seid of the VSI
4114 * @bw_data: Buffer to hold VSI BW configuration
4115 * @cmd_details: pointer to command details structure or NULL
4116 **/
4117i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
4118			u16 seid,
4119			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
4120			struct i40e_asq_cmd_details *cmd_details)
4121{
4122	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4123				    i40e_aqc_opc_query_vsi_bw_config,
4124				    cmd_details);
4125}
4126
4127/**
4128 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
4129 * @hw: pointer to the hw struct
4130 * @seid: seid of the VSI
4131 * @bw_data: Buffer to hold VSI BW configuration per TC
4132 * @cmd_details: pointer to command details structure or NULL
4133 **/
4134i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
4135			u16 seid,
4136			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
4137			struct i40e_asq_cmd_details *cmd_details)
4138{
4139	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4140				    i40e_aqc_opc_query_vsi_ets_sla_config,
4141				    cmd_details);
4142}
4143
4144/**
4145 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
4146 * @hw: pointer to the hw struct
4147 * @seid: seid of the switching component
4148 * @bw_data: Buffer to hold switching component's per TC BW config
4149 * @cmd_details: pointer to command details structure or NULL
4150 **/
4151i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
4152		u16 seid,
4153		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
4154		struct i40e_asq_cmd_details *cmd_details)
4155{
4156	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4157				   i40e_aqc_opc_query_switching_comp_ets_config,
4158				   cmd_details);
4159}
4160
4161/**
4162 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
4163 * @hw: pointer to the hw struct
4164 * @seid: seid of the VSI or switching component connected to Physical Port
4165 * @bw_data: Buffer to hold current ETS configuration for the Physical Port
4166 * @cmd_details: pointer to command details structure or NULL
4167 **/
4168i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
4169			u16 seid,
4170			struct i40e_aqc_query_port_ets_config_resp *bw_data,
4171			struct i40e_asq_cmd_details *cmd_details)
4172{
4173	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4174				    i40e_aqc_opc_query_port_ets_config,
4175				    cmd_details);
4176}
4177
4178/**
4179 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
4180 * @hw: pointer to the hw struct
4181 * @seid: seid of the switching component
4182 * @bw_data: Buffer to hold switching component's BW configuration
4183 * @cmd_details: pointer to command details structure or NULL
4184 **/
4185i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
4186		u16 seid,
4187		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
4188		struct i40e_asq_cmd_details *cmd_details)
4189{
4190	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
4191				    i40e_aqc_opc_query_switching_comp_bw_config,
4192				    cmd_details);
4193}
4194
4195/**
4196 * i40e_validate_filter_settings
4197 * @hw: pointer to the hardware structure
4198 * @settings: Filter control settings
4199 *
4200 * Check and validate the filter control settings passed.
4201 * The function checks for the valid filter/context sizes being
4202 * passed for FCoE and PE.
4203 *
4204 * Returns 0 if the values passed are valid and within
4205 * range else returns an error.
4206 **/
4207static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
4208				struct i40e_filter_control_settings *settings)
4209{
4210	u32 fcoe_cntx_size, fcoe_filt_size;
4211	u32 pe_cntx_size, pe_filt_size;
4212	u32 fcoe_fmax;
4213	u32 val;
4214
4215	/* Validate FCoE settings passed */
4216	switch (settings->fcoe_filt_num) {
4217	case I40E_HASH_FILTER_SIZE_1K:
4218	case I40E_HASH_FILTER_SIZE_2K:
4219	case I40E_HASH_FILTER_SIZE_4K:
4220	case I40E_HASH_FILTER_SIZE_8K:
4221	case I40E_HASH_FILTER_SIZE_16K:
4222	case I40E_HASH_FILTER_SIZE_32K:
4223		fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4224		fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
4225		break;
4226	default:
4227		return I40E_ERR_PARAM;
4228	}
4229
4230	switch (settings->fcoe_cntx_num) {
4231	case I40E_DMA_CNTX_SIZE_512:
4232	case I40E_DMA_CNTX_SIZE_1K:
4233	case I40E_DMA_CNTX_SIZE_2K:
4234	case I40E_DMA_CNTX_SIZE_4K:
4235		fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4236		fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
4237		break;
4238	default:
4239		return I40E_ERR_PARAM;
4240	}
4241
4242	/* Validate PE settings passed */
4243	switch (settings->pe_filt_num) {
4244	case I40E_HASH_FILTER_SIZE_1K:
4245	case I40E_HASH_FILTER_SIZE_2K:
4246	case I40E_HASH_FILTER_SIZE_4K:
4247	case I40E_HASH_FILTER_SIZE_8K:
4248	case I40E_HASH_FILTER_SIZE_16K:
4249	case I40E_HASH_FILTER_SIZE_32K:
4250	case I40E_HASH_FILTER_SIZE_64K:
4251	case I40E_HASH_FILTER_SIZE_128K:
4252	case I40E_HASH_FILTER_SIZE_256K:
4253	case I40E_HASH_FILTER_SIZE_512K:
4254	case I40E_HASH_FILTER_SIZE_1M:
4255		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
4256		pe_filt_size <<= (u32)settings->pe_filt_num;
4257		break;
4258	default:
4259		return I40E_ERR_PARAM;
4260	}
4261
4262	switch (settings->pe_cntx_num) {
4263	case I40E_DMA_CNTX_SIZE_512:
4264	case I40E_DMA_CNTX_SIZE_1K:
4265	case I40E_DMA_CNTX_SIZE_2K:
4266	case I40E_DMA_CNTX_SIZE_4K:
4267	case I40E_DMA_CNTX_SIZE_8K:
4268	case I40E_DMA_CNTX_SIZE_16K:
4269	case I40E_DMA_CNTX_SIZE_32K:
4270	case I40E_DMA_CNTX_SIZE_64K:
4271	case I40E_DMA_CNTX_SIZE_128K:
4272	case I40E_DMA_CNTX_SIZE_256K:
4273		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
4274		pe_cntx_size <<= (u32)settings->pe_cntx_num;
4275		break;
4276	default:
4277		return I40E_ERR_PARAM;
4278	}
4279
4280	/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
4281	val = rd32(hw, I40E_GLHMC_FCOEFMAX);
4282	fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
4283		     >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
4284	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
4285		return I40E_ERR_INVALID_SIZE;
4286
4287	return 0;
4288}
4289
4290/**
4291 * i40e_set_filter_control
4292 * @hw: pointer to the hardware structure
4293 * @settings: Filter control settings
4294 *
4295 * Set the Queue Filters for PE/FCoE and enable filters required
4296 * for a single PF. It is expected that these settings are programmed
4297 * at the driver initialization time.
4298 **/
4299i40e_status i40e_set_filter_control(struct i40e_hw *hw,
4300				struct i40e_filter_control_settings *settings)
4301{
4302	i40e_status ret = 0;
4303	u32 hash_lut_size = 0;
4304	u32 val;
4305
4306	if (!settings)
4307		return I40E_ERR_PARAM;
4308
4309	/* Validate the input settings */
4310	ret = i40e_validate_filter_settings(hw, settings);
4311	if (ret)
4312		return ret;
4313
4314	/* Read the PF Queue Filter control register */
4315	val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
4316
4317	/* Program required PE hash buckets for the PF */
4318	val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
4319	val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
4320		I40E_PFQF_CTL_0_PEHSIZE_MASK;
4321	/* Program required PE contexts for the PF */
4322	val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
4323	val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
4324		I40E_PFQF_CTL_0_PEDSIZE_MASK;
4325
4326	/* Program required FCoE hash buckets for the PF */
4327	val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4328	val |= ((u32)settings->fcoe_filt_num <<
4329			I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
4330		I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
4331	/* Program required FCoE DDP contexts for the PF */
4332	val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4333	val |= ((u32)settings->fcoe_cntx_num <<
4334			I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
4335		I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
4336
4337	/* Program Hash LUT size for the PF */
4338	val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4339	if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
4340		hash_lut_size = 1;
4341	val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
4342		I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
4343
4344	/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
4345	if (settings->enable_fdir)
4346		val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
4347	if (settings->enable_ethtype)
4348		val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
4349	if (settings->enable_macvlan)
4350		val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
4351
4352	i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
4353
4354	return 0;
4355}
4356
4357/**
4358 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
4359 * @hw: pointer to the hw struct
4360 * @mac_addr: MAC address to use in the filter
4361 * @ethtype: Ethertype to use in the filter
4362 * @flags: Flags that needs to be applied to the filter
4363 * @vsi_seid: seid of the control VSI
4364 * @queue: VSI queue number to send the packet to
4365 * @is_add: Add control packet filter if True else remove
4366 * @stats: Structure to hold information on control filter counts
4367 * @cmd_details: pointer to command details structure or NULL
4368 *
4369 * This command will Add or Remove control packet filter for a control VSI.
4370 * In return it will update the total number of perfect filter count in
4371 * the stats member.
4372 **/
4373i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4374				u8 *mac_addr, u16 ethtype, u16 flags,
4375				u16 vsi_seid, u16 queue, bool is_add,
4376				struct i40e_control_filter_stats *stats,
4377				struct i40e_asq_cmd_details *cmd_details)
4378{
4379	struct i40e_aq_desc desc;
4380	struct i40e_aqc_add_remove_control_packet_filter *cmd =
4381		(struct i40e_aqc_add_remove_control_packet_filter *)
4382		&desc.params.raw;
4383	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
4384		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
4385		&desc.params.raw;
4386	i40e_status status;
4387
4388	if (vsi_seid == 0)
4389		return I40E_ERR_PARAM;
4390
4391	if (is_add) {
4392		i40e_fill_default_direct_cmd_desc(&desc,
4393				i40e_aqc_opc_add_control_packet_filter);
4394		cmd->queue = cpu_to_le16(queue);
4395	} else {
4396		i40e_fill_default_direct_cmd_desc(&desc,
4397				i40e_aqc_opc_remove_control_packet_filter);
4398	}
4399
4400	if (mac_addr)
4401		ether_addr_copy(cmd->mac, mac_addr);
4402
4403	cmd->etype = cpu_to_le16(ethtype);
4404	cmd->flags = cpu_to_le16(flags);
4405	cmd->seid = cpu_to_le16(vsi_seid);
4406
4407	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4408
4409	if (!status && stats) {
4410		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
4411		stats->etype_used = le16_to_cpu(resp->etype_used);
4412		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
4413		stats->etype_free = le16_to_cpu(resp->etype_free);
4414	}
4415
4416	return status;
4417}
4418
4419/**
4420 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
4421 * @hw: pointer to the hw struct
4422 * @seid: VSI seid to add ethertype filter from
4423 **/
4424void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4425						    u16 seid)
4426{
4427#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4428	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4429		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4430		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
4431	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
4432	i40e_status status;
4433
4434	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
4435						       seid, 0, true, NULL,
4436						       NULL);
4437	if (status)
4438		hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
4439}
4440
4441/**
4442 * i40e_aq_alternate_read
4443 * @hw: pointer to the hardware structure
4444 * @reg_addr0: address of first dword to be read
4445 * @reg_val0: pointer for data read from 'reg_addr0'
4446 * @reg_addr1: address of second dword to be read
4447 * @reg_val1: pointer for data read from 'reg_addr1'
4448 *
4449 * Read one or two dwords from alternate structure. Fields are indicated
4450 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4451 * is not passed then only register at 'reg_addr0' is read.
4452 *
4453 **/
4454static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
4455					  u32 reg_addr0, u32 *reg_val0,
4456					  u32 reg_addr1, u32 *reg_val1)
4457{
4458	struct i40e_aq_desc desc;
4459	struct i40e_aqc_alternate_write *cmd_resp =
4460		(struct i40e_aqc_alternate_write *)&desc.params.raw;
4461	i40e_status status;
4462
4463	if (!reg_val0)
4464		return I40E_ERR_PARAM;
4465
4466	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
4467	cmd_resp->address0 = cpu_to_le32(reg_addr0);
4468	cmd_resp->address1 = cpu_to_le32(reg_addr1);
4469
4470	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
4471
4472	if (!status) {
4473		*reg_val0 = le32_to_cpu(cmd_resp->data0);
4474
4475		if (reg_val1)
4476			*reg_val1 = le32_to_cpu(cmd_resp->data1);
4477	}
4478
4479	return status;
4480}
4481
4482/**
4483 * i40e_aq_resume_port_tx
4484 * @hw: pointer to the hardware structure
4485 * @cmd_details: pointer to command details structure or NULL
4486 *
4487 * Resume port's Tx traffic
4488 **/
4489i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
4490				   struct i40e_asq_cmd_details *cmd_details)
4491{
4492	struct i40e_aq_desc desc;
4493	i40e_status status;
4494
4495	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
4496
4497	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
4498
4499	return status;
4500}
4501
4502/**
4503 * i40e_set_pci_config_data - store PCI bus info
4504 * @hw: pointer to hardware structure
4505 * @link_status: the link status word from PCI config space
4506 *
4507 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
4508 **/
4509void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4510{
4511	hw->bus.type = i40e_bus_type_pci_express;
4512
4513	switch (link_status & PCI_EXP_LNKSTA_NLW) {
4514	case PCI_EXP_LNKSTA_NLW_X1:
4515		hw->bus.width = i40e_bus_width_pcie_x1;
4516		break;
4517	case PCI_EXP_LNKSTA_NLW_X2:
4518		hw->bus.width = i40e_bus_width_pcie_x2;
4519		break;
4520	case PCI_EXP_LNKSTA_NLW_X4:
4521		hw->bus.width = i40e_bus_width_pcie_x4;
4522		break;
4523	case PCI_EXP_LNKSTA_NLW_X8:
4524		hw->bus.width = i40e_bus_width_pcie_x8;
4525		break;
4526	default:
4527		hw->bus.width = i40e_bus_width_unknown;
4528		break;
4529	}
4530
4531	switch (link_status & PCI_EXP_LNKSTA_CLS) {
4532	case PCI_EXP_LNKSTA_CLS_2_5GB:
4533		hw->bus.speed = i40e_bus_speed_2500;
4534		break;
4535	case PCI_EXP_LNKSTA_CLS_5_0GB:
4536		hw->bus.speed = i40e_bus_speed_5000;
4537		break;
4538	case PCI_EXP_LNKSTA_CLS_8_0GB:
4539		hw->bus.speed = i40e_bus_speed_8000;
4540		break;
4541	default:
4542		hw->bus.speed = i40e_bus_speed_unknown;
4543		break;
4544	}
4545}
4546
4547/**
4548 * i40e_aq_debug_dump
4549 * @hw: pointer to the hardware structure
4550 * @cluster_id: specific cluster to dump
4551 * @table_id: table id within cluster
4552 * @start_index: index of line in the block to read
4553 * @buff_size: dump buffer size
4554 * @buff: dump buffer
4555 * @ret_buff_size: actual buffer size returned
4556 * @ret_next_table: next block to read
4557 * @ret_next_index: next index to read
4558 * @cmd_details: pointer to command details structure or NULL
4559 *
4560 * Dump internal FW/HW data for debug purposes.
4561 *
4562 **/
4563i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
4564			       u8 table_id, u32 start_index, u16 buff_size,
4565			       void *buff, u16 *ret_buff_size,
4566			       u8 *ret_next_table, u32 *ret_next_index,
4567			       struct i40e_asq_cmd_details *cmd_details)
4568{
4569	struct i40e_aq_desc desc;
4570	struct i40e_aqc_debug_dump_internals *cmd =
4571		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4572	struct i40e_aqc_debug_dump_internals *resp =
4573		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
4574	i40e_status status;
4575
4576	if (buff_size == 0 || !buff)
4577		return I40E_ERR_PARAM;
4578
4579	i40e_fill_default_direct_cmd_desc(&desc,
4580					  i40e_aqc_opc_debug_dump_internals);
4581	/* Indirect Command */
4582	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4583	if (buff_size > I40E_AQ_LARGE_BUF)
4584		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4585
4586	cmd->cluster_id = cluster_id;
4587	cmd->table_id = table_id;
4588	cmd->idx = cpu_to_le32(start_index);
4589
4590	desc.datalen = cpu_to_le16(buff_size);
4591
4592	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
4593	if (!status) {
4594		if (ret_buff_size)
4595			*ret_buff_size = le16_to_cpu(desc.datalen);
4596		if (ret_next_table)
4597			*ret_next_table = resp->table_id;
4598		if (ret_next_index)
4599			*ret_next_index = le32_to_cpu(resp->idx);
4600	}
4601
4602	return status;
4603}
4604
4605/**
4606 * i40e_read_bw_from_alt_ram
4607 * @hw: pointer to the hardware structure
4608 * @max_bw: pointer for max_bw read
4609 * @min_bw: pointer for min_bw read
4610 * @min_valid: pointer for bool that is true if min_bw is a valid value
4611 * @max_valid: pointer for bool that is true if max_bw is a valid value
4612 *
4613 * Read bw from the alternate ram for the given pf
4614 **/
4615i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
4616				      u32 *max_bw, u32 *min_bw,
4617				      bool *min_valid, bool *max_valid)
4618{
4619	i40e_status status;
4620	u32 max_bw_addr, min_bw_addr;
4621
4622	/* Calculate the address of the min/max bw registers */
4623	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4624		      I40E_ALT_STRUCT_MAX_BW_OFFSET +
4625		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4626	min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
4627		      I40E_ALT_STRUCT_MIN_BW_OFFSET +
4628		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
4629
4630	/* Read the bandwidths from alt ram */
4631	status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
4632					min_bw_addr, min_bw);
4633
4634	if (*min_bw & I40E_ALT_BW_VALID_MASK)
4635		*min_valid = true;
4636	else
4637		*min_valid = false;
4638
4639	if (*max_bw & I40E_ALT_BW_VALID_MASK)
4640		*max_valid = true;
4641	else
4642		*max_valid = false;
4643
4644	return status;
4645}
4646
4647/**
4648 * i40e_aq_configure_partition_bw
4649 * @hw: pointer to the hardware structure
4650 * @bw_data: Buffer holding valid pfs and bw limits
4651 * @cmd_details: pointer to command details
4652 *
4653 * Configure partitions guaranteed/max bw
4654 **/
4655i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4656			struct i40e_aqc_configure_partition_bw_data *bw_data,
4657			struct i40e_asq_cmd_details *cmd_details)
4658{
4659	i40e_status status;
4660	struct i40e_aq_desc desc;
4661	u16 bwd_size = sizeof(*bw_data);
4662
4663	i40e_fill_default_direct_cmd_desc(&desc,
4664					  i40e_aqc_opc_configure_partition_bw);
4665
4666	/* Indirect command */
4667	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
4668	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
4669
4670	if (bwd_size > I40E_AQ_LARGE_BUF)
4671		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
4672
4673	desc.datalen = cpu_to_le16(bwd_size);
4674
4675	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
4676				       cmd_details);
4677
4678	return status;
4679}
4680
4681/**
4682 * i40e_read_phy_register_clause22
4683 * @hw: pointer to the HW structure
4684 * @reg: register address in the page
4685 * @phy_addr: PHY address on MDIO interface
4686 * @value: PHY register value
4687 *
4688 * Reads specified PHY register value
4689 **/
4690i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4691					    u16 reg, u8 phy_addr, u16 *value)
4692{
4693	i40e_status status = I40E_ERR_TIMEOUT;
4694	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4695	u32 command = 0;
4696	u16 retry = 1000;
4697
4698	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4699		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4700		  (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
4701		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4702		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4703	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4704	do {
4705		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4706		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4707			status = 0;
4708			break;
4709		}
4710		udelay(10);
4711		retry--;
4712	} while (retry);
4713
4714	if (status) {
4715		i40e_debug(hw, I40E_DEBUG_PHY,
4716			   "PHY: Can't write command to external PHY.\n");
4717	} else {
4718		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4719		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4720			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4721	}
4722
4723	return status;
4724}
4725
4726/**
4727 * i40e_write_phy_register_clause22
4728 * @hw: pointer to the HW structure
4729 * @reg: register address in the page
4730 * @phy_addr: PHY address on MDIO interface
4731 * @value: PHY register value
4732 *
4733 * Writes specified PHY register value
4734 **/
4735i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4736					     u16 reg, u8 phy_addr, u16 value)
4737{
4738	i40e_status status = I40E_ERR_TIMEOUT;
4739	u8 port_num = (u8)hw->func_caps.mdio_port_num;
4740	u32 command  = 0;
4741	u16 retry = 1000;
4742
4743	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4744	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4745
4746	command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4747		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4748		  (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
4749		  (I40E_MDIO_CLAUSE22_STCODE_MASK) |
4750		  (I40E_GLGEN_MSCA_MDICMD_MASK);
4751
4752	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4753	do {
4754		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4755		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4756			status = 0;
4757			break;
4758		}
4759		udelay(10);
4760		retry--;
4761	} while (retry);
4762
4763	return status;
4764}
4765
4766/**
4767 * i40e_read_phy_register_clause45
4768 * @hw: pointer to the HW structure
4769 * @page: registers page number
4770 * @reg: register address in the page
4771 * @phy_addr: PHY address on MDIO interface
4772 * @value: PHY register value
4773 *
4774 * Reads specified PHY register value
4775 **/
4776i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
4777				u8 page, u16 reg, u8 phy_addr, u16 *value)
4778{
4779	i40e_status status = I40E_ERR_TIMEOUT;
4780	u32 command = 0;
4781	u16 retry = 1000;
4782	u8 port_num = hw->func_caps.mdio_port_num;
4783
4784	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4785		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4786		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4787		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4788		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4789		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4790		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4791	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4792	do {
4793		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4794		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4795			status = 0;
4796			break;
4797		}
4798		usleep_range(10, 20);
4799		retry--;
4800	} while (retry);
4801
4802	if (status) {
4803		i40e_debug(hw, I40E_DEBUG_PHY,
4804			   "PHY: Can't write command to external PHY.\n");
4805		goto phy_read_end;
4806	}
4807
4808	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4809		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4810		  (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
4811		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4812		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4813		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4814	status = I40E_ERR_TIMEOUT;
4815	retry = 1000;
4816	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4817	do {
4818		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4819		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4820			status = 0;
4821			break;
4822		}
4823		usleep_range(10, 20);
4824		retry--;
4825	} while (retry);
4826
4827	if (!status) {
4828		command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
4829		*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
4830			 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
4831	} else {
4832		i40e_debug(hw, I40E_DEBUG_PHY,
4833			   "PHY: Can't read register value from external PHY.\n");
4834	}
4835
4836phy_read_end:
4837	return status;
4838}
4839
4840/**
4841 * i40e_write_phy_register_clause45
4842 * @hw: pointer to the HW structure
4843 * @page: registers page number
4844 * @reg: register address in the page
4845 * @phy_addr: PHY address on MDIO interface
4846 * @value: PHY register value
4847 *
4848 * Writes value to specified PHY register
4849 **/
4850i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
4851				u8 page, u16 reg, u8 phy_addr, u16 value)
4852{
4853	i40e_status status = I40E_ERR_TIMEOUT;
4854	u32 command = 0;
4855	u16 retry = 1000;
4856	u8 port_num = hw->func_caps.mdio_port_num;
4857
4858	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
4859		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4860		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4861		  (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
4862		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4863		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4864		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4865	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4866	do {
4867		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4868		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4869			status = 0;
4870			break;
4871		}
4872		usleep_range(10, 20);
4873		retry--;
4874	} while (retry);
4875	if (status) {
4876		i40e_debug(hw, I40E_DEBUG_PHY,
4877			   "PHY: Can't write command to external PHY.\n");
4878		goto phy_write_end;
4879	}
4880
4881	command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
4882	wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
4883
4884	command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
4885		  (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
4886		  (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
4887		  (I40E_MDIO_CLAUSE45_STCODE_MASK) |
4888		  (I40E_GLGEN_MSCA_MDICMD_MASK) |
4889		  (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
4890	status = I40E_ERR_TIMEOUT;
4891	retry = 1000;
4892	wr32(hw, I40E_GLGEN_MSCA(port_num), command);
4893	do {
4894		command = rd32(hw, I40E_GLGEN_MSCA(port_num));
4895		if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
4896			status = 0;
4897			break;
4898		}
4899		usleep_range(10, 20);
4900		retry--;
4901	} while (retry);
4902
4903phy_write_end:
4904	return status;
4905}
4906
4907/**
4908 * i40e_write_phy_register
4909 * @hw: pointer to the HW structure
4910 * @page: registers page number
4911 * @reg: register address in the page
4912 * @phy_addr: PHY address on MDIO interface
4913 * @value: PHY register value
4914 *
4915 * Writes value to specified PHY register
4916 **/
4917i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4918				    u8 page, u16 reg, u8 phy_addr, u16 value)
4919{
4920	i40e_status status;
4921
4922	switch (hw->device_id) {
4923	case I40E_DEV_ID_1G_BASE_T_X722:
4924		status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
4925							  value);
4926		break;
4927	case I40E_DEV_ID_5G_BASE_T_BC:
4928	case I40E_DEV_ID_10G_BASE_T:
4929	case I40E_DEV_ID_10G_BASE_T4:
4930	case I40E_DEV_ID_10G_BASE_T_BC:
4931	case I40E_DEV_ID_10G_BASE_T_X722:
4932	case I40E_DEV_ID_25G_B:
4933	case I40E_DEV_ID_25G_SFP28:
4934		status = i40e_write_phy_register_clause45(hw, page, reg,
4935							  phy_addr, value);
4936		break;
4937	default:
4938		status = I40E_ERR_UNKNOWN_PHY;
4939		break;
4940	}
4941
4942	return status;
4943}
4944
4945/**
4946 * i40e_read_phy_register
4947 * @hw: pointer to the HW structure
4948 * @page: registers page number
4949 * @reg: register address in the page
4950 * @phy_addr: PHY address on MDIO interface
4951 * @value: PHY register value
4952 *
4953 * Reads specified PHY register value
4954 **/
4955i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4956				   u8 page, u16 reg, u8 phy_addr, u16 *value)
4957{
4958	i40e_status status;
4959
4960	switch (hw->device_id) {
4961	case I40E_DEV_ID_1G_BASE_T_X722:
4962		status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
4963							 value);
4964		break;
4965	case I40E_DEV_ID_5G_BASE_T_BC:
4966	case I40E_DEV_ID_10G_BASE_T:
4967	case I40E_DEV_ID_10G_BASE_T4:
4968	case I40E_DEV_ID_10G_BASE_T_BC:
4969	case I40E_DEV_ID_10G_BASE_T_X722:
4970	case I40E_DEV_ID_25G_B:
4971	case I40E_DEV_ID_25G_SFP28:
4972		status = i40e_read_phy_register_clause45(hw, page, reg,
4973							 phy_addr, value);
4974		break;
4975	default:
4976		status = I40E_ERR_UNKNOWN_PHY;
4977		break;
4978	}
4979
4980	return status;
4981}
4982
4983/**
4984 * i40e_get_phy_address
4985 * @hw: pointer to the HW structure
4986 * @dev_num: PHY port num that address we want
4987 *
4988 * Gets PHY address for current port
4989 **/
4990u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
4991{
4992	u8 port_num = hw->func_caps.mdio_port_num;
4993	u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
4994
4995	return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
4996}
4997
4998/**
4999 * i40e_blink_phy_led
5000 * @hw: pointer to the HW structure
5001 * @time: time how long led will blinks in secs
5002 * @interval: gap between LED on and off in msecs
5003 *
5004 * Blinks PHY link LED
5005 **/
5006i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
5007				    u32 time, u32 interval)
5008{
5009	i40e_status status = 0;
5010	u32 i;
5011	u16 led_ctl;
5012	u16 gpio_led_port;
5013	u16 led_reg;
5014	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
5015	u8 phy_addr = 0;
5016	u8 port_num;
5017
5018	i = rd32(hw, I40E_PFGEN_PORTNUM);
5019	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5020	phy_addr = i40e_get_phy_address(hw, port_num);
5021
5022	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5023	     led_addr++) {
5024		status = i40e_read_phy_register_clause45(hw,
5025							 I40E_PHY_COM_REG_PAGE,
5026							 led_addr, phy_addr,
5027							 &led_reg);
5028		if (status)
5029			goto phy_blinking_end;
5030		led_ctl = led_reg;
5031		if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5032			led_reg = 0;
5033			status = i40e_write_phy_register_clause45(hw,
5034							 I40E_PHY_COM_REG_PAGE,
5035							 led_addr, phy_addr,
5036							 led_reg);
5037			if (status)
5038				goto phy_blinking_end;
5039			break;
5040		}
5041	}
5042
5043	if (time > 0 && interval > 0) {
5044		for (i = 0; i < time * 1000; i += interval) {
5045			status = i40e_read_phy_register_clause45(hw,
5046						I40E_PHY_COM_REG_PAGE,
5047						led_addr, phy_addr, &led_reg);
5048			if (status)
5049				goto restore_config;
5050			if (led_reg & I40E_PHY_LED_MANUAL_ON)
5051				led_reg = 0;
5052			else
5053				led_reg = I40E_PHY_LED_MANUAL_ON;
5054			status = i40e_write_phy_register_clause45(hw,
5055						I40E_PHY_COM_REG_PAGE,
5056						led_addr, phy_addr, led_reg);
5057			if (status)
5058				goto restore_config;
5059			msleep(interval);
5060		}
5061	}
5062
5063restore_config:
5064	status = i40e_write_phy_register_clause45(hw,
5065						  I40E_PHY_COM_REG_PAGE,
5066						  led_addr, phy_addr, led_ctl);
5067
5068phy_blinking_end:
5069	return status;
5070}
5071
5072/**
5073 * i40e_led_get_reg - read LED register
5074 * @hw: pointer to the HW structure
5075 * @led_addr: LED register address
5076 * @reg_val: read register value
5077 **/
5078static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
5079					      u32 *reg_val)
5080{
5081	enum i40e_status_code status;
5082	u8 phy_addr = 0;
5083	u8 port_num;
5084	u32 i;
5085
5086	*reg_val = 0;
5087	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5088		status =
5089		       i40e_aq_get_phy_register(hw,
5090						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5091						I40E_PHY_COM_REG_PAGE, true,
5092						I40E_PHY_LED_PROV_REG_1,
5093						reg_val, NULL);
5094	} else {
5095		i = rd32(hw, I40E_PFGEN_PORTNUM);
5096		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5097		phy_addr = i40e_get_phy_address(hw, port_num);
5098		status = i40e_read_phy_register_clause45(hw,
5099							 I40E_PHY_COM_REG_PAGE,
5100							 led_addr, phy_addr,
5101							 (u16 *)reg_val);
5102	}
5103	return status;
5104}
5105
5106/**
5107 * i40e_led_set_reg - write LED register
5108 * @hw: pointer to the HW structure
5109 * @led_addr: LED register address
5110 * @reg_val: register value to write
5111 **/
5112static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
5113					      u32 reg_val)
5114{
5115	enum i40e_status_code status;
5116	u8 phy_addr = 0;
5117	u8 port_num;
5118	u32 i;
5119
5120	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5121		status =
5122		       i40e_aq_set_phy_register(hw,
5123						I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5124						I40E_PHY_COM_REG_PAGE, true,
5125						I40E_PHY_LED_PROV_REG_1,
5126						reg_val, NULL);
5127	} else {
5128		i = rd32(hw, I40E_PFGEN_PORTNUM);
5129		port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5130		phy_addr = i40e_get_phy_address(hw, port_num);
5131		status = i40e_write_phy_register_clause45(hw,
5132							  I40E_PHY_COM_REG_PAGE,
5133							  led_addr, phy_addr,
5134							  (u16)reg_val);
5135	}
5136
5137	return status;
5138}
5139
5140/**
5141 * i40e_led_get_phy - return current on/off mode
5142 * @hw: pointer to the hw struct
5143 * @led_addr: address of led register to use
5144 * @val: original value of register to use
5145 *
5146 **/
5147i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5148			     u16 *val)
5149{
5150	i40e_status status = 0;
5151	u16 gpio_led_port;
5152	u8 phy_addr = 0;
5153	u16 reg_val;
5154	u16 temp_addr;
5155	u8 port_num;
5156	u32 i;
5157	u32 reg_val_aq;
5158
5159	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
5160		status =
5161		      i40e_aq_get_phy_register(hw,
5162					       I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
5163					       I40E_PHY_COM_REG_PAGE, true,
5164					       I40E_PHY_LED_PROV_REG_1,
5165					       &reg_val_aq, NULL);
5166		if (status == I40E_SUCCESS)
5167			*val = (u16)reg_val_aq;
5168		return status;
5169	}
5170	temp_addr = I40E_PHY_LED_PROV_REG_1;
5171	i = rd32(hw, I40E_PFGEN_PORTNUM);
5172	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
5173	phy_addr = i40e_get_phy_address(hw, port_num);
5174
5175	for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
5176	     temp_addr++) {
5177		status = i40e_read_phy_register_clause45(hw,
5178							 I40E_PHY_COM_REG_PAGE,
5179							 temp_addr, phy_addr,
5180							 &reg_val);
5181		if (status)
5182			return status;
5183		*val = reg_val;
5184		if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
5185			*led_addr = temp_addr;
5186			break;
5187		}
5188	}
5189	return status;
5190}
5191
5192/**
5193 * i40e_led_set_phy
5194 * @hw: pointer to the HW structure
5195 * @on: true or false
5196 * @led_addr: address of led register to use
5197 * @mode: original val plus bit for set or ignore
5198 *
5199 * Set led's on or off when controlled by the PHY
5200 *
5201 **/
5202i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
5203			     u16 led_addr, u32 mode)
5204{
5205	i40e_status status = 0;
5206	u32 led_ctl = 0;
5207	u32 led_reg = 0;
5208
5209	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5210	if (status)
5211		return status;
5212	led_ctl = led_reg;
5213	if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
5214		led_reg = 0;
5215		status = i40e_led_set_reg(hw, led_addr, led_reg);
5216		if (status)
5217			return status;
5218	}
5219	status = i40e_led_get_reg(hw, led_addr, &led_reg);
5220	if (status)
5221		goto restore_config;
5222	if (on)
5223		led_reg = I40E_PHY_LED_MANUAL_ON;
5224	else
5225		led_reg = 0;
5226
5227	status = i40e_led_set_reg(hw, led_addr, led_reg);
5228	if (status)
5229		goto restore_config;
5230	if (mode & I40E_PHY_LED_MODE_ORIG) {
5231		led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
5232		status = i40e_led_set_reg(hw, led_addr, led_ctl);
5233	}
5234	return status;
5235
5236restore_config:
5237	status = i40e_led_set_reg(hw, led_addr, led_ctl);
5238	return status;
5239}
5240
5241/**
5242 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
5243 * @hw: pointer to the hw struct
5244 * @reg_addr: register address
5245 * @reg_val: ptr to register value
5246 * @cmd_details: pointer to command details structure or NULL
5247 *
5248 * Use the firmware to read the Rx control register,
5249 * especially useful if the Rx unit is under heavy pressure
5250 **/
5251i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
5252				u32 reg_addr, u32 *reg_val,
5253				struct i40e_asq_cmd_details *cmd_details)
5254{
5255	struct i40e_aq_desc desc;
5256	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
5257		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5258	i40e_status status;
5259
5260	if (!reg_val)
5261		return I40E_ERR_PARAM;
5262
5263	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
5264
5265	cmd_resp->address = cpu_to_le32(reg_addr);
5266
5267	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5268
5269	if (status == 0)
5270		*reg_val = le32_to_cpu(cmd_resp->value);
5271
5272	return status;
5273}
5274
5275/**
5276 * i40e_read_rx_ctl - read from an Rx control register
5277 * @hw: pointer to the hw struct
5278 * @reg_addr: register address
5279 **/
5280u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
5281{
5282	i40e_status status = 0;
5283	bool use_register;
5284	int retry = 5;
5285	u32 val = 0;
5286
5287	use_register = (((hw->aq.api_maj_ver == 1) &&
5288			(hw->aq.api_min_ver < 5)) ||
5289			(hw->mac.type == I40E_MAC_X722));
5290	if (!use_register) {
5291do_retry:
5292		status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
5293		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5294			usleep_range(1000, 2000);
5295			retry--;
5296			goto do_retry;
5297		}
5298	}
5299
5300	/* if the AQ access failed, try the old-fashioned way */
5301	if (status || use_register)
5302		val = rd32(hw, reg_addr);
5303
5304	return val;
5305}
5306
5307/**
5308 * i40e_aq_rx_ctl_write_register
5309 * @hw: pointer to the hw struct
5310 * @reg_addr: register address
5311 * @reg_val: register value
5312 * @cmd_details: pointer to command details structure or NULL
5313 *
5314 * Use the firmware to write to an Rx control register,
5315 * especially useful if the Rx unit is under heavy pressure
5316 **/
5317i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
5318				u32 reg_addr, u32 reg_val,
5319				struct i40e_asq_cmd_details *cmd_details)
5320{
5321	struct i40e_aq_desc desc;
5322	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
5323		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
5324	i40e_status status;
5325
5326	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
5327
5328	cmd->address = cpu_to_le32(reg_addr);
5329	cmd->value = cpu_to_le32(reg_val);
5330
5331	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5332
5333	return status;
5334}
5335
5336/**
5337 * i40e_write_rx_ctl - write to an Rx control register
5338 * @hw: pointer to the hw struct
5339 * @reg_addr: register address
5340 * @reg_val: register value
5341 **/
5342void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
5343{
5344	i40e_status status = 0;
5345	bool use_register;
5346	int retry = 5;
5347
5348	use_register = (((hw->aq.api_maj_ver == 1) &&
5349			(hw->aq.api_min_ver < 5)) ||
5350			(hw->mac.type == I40E_MAC_X722));
5351	if (!use_register) {
5352do_retry:
5353		status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
5354						       reg_val, NULL);
5355		if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
5356			usleep_range(1000, 2000);
5357			retry--;
5358			goto do_retry;
5359		}
5360	}
5361
5362	/* if the AQ access failed, try the old-fashioned way */
5363	if (status || use_register)
5364		wr32(hw, reg_addr, reg_val);
5365}
5366
5367/**
5368 * i40e_mdio_if_number_selection - MDIO I/F number selection
5369 * @hw: pointer to the hw struct
5370 * @set_mdio: use MDIO I/F number specified by mdio_num
5371 * @mdio_num: MDIO I/F number
5372 * @cmd: pointer to PHY Register command structure
5373 **/
5374static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
5375					  u8 mdio_num,
5376					  struct i40e_aqc_phy_register_access *cmd)
5377{
5378	if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
5379		if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
5380			cmd->cmd_flags |=
5381				I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
5382				((mdio_num <<
5383				I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
5384				I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
5385		else
5386			i40e_debug(hw, I40E_DEBUG_PHY,
5387				   "MDIO I/F number selection not supported by current FW version.\n");
5388	}
5389}
5390
5391/**
5392 * i40e_aq_set_phy_register_ext
5393 * @hw: pointer to the hw struct
5394 * @phy_select: select which phy should be accessed
5395 * @dev_addr: PHY device address
5396 * @page_change: flag to indicate if phy page should be updated
5397 * @set_mdio: use MDIO I/F number specified by mdio_num
5398 * @mdio_num: MDIO I/F number
5399 * @reg_addr: PHY register address
5400 * @reg_val: new register value
5401 * @cmd_details: pointer to command details structure or NULL
5402 *
5403 * Write the external PHY register.
5404 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5405 * may use simple wrapper i40e_aq_set_phy_register.
5406 **/
5407enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
5408			     u8 phy_select, u8 dev_addr, bool page_change,
5409			     bool set_mdio, u8 mdio_num,
5410			     u32 reg_addr, u32 reg_val,
5411			     struct i40e_asq_cmd_details *cmd_details)
5412{
5413	struct i40e_aq_desc desc;
5414	struct i40e_aqc_phy_register_access *cmd =
5415		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5416	i40e_status status;
5417
5418	i40e_fill_default_direct_cmd_desc(&desc,
5419					  i40e_aqc_opc_set_phy_register);
5420
5421	cmd->phy_interface = phy_select;
5422	cmd->dev_address = dev_addr;
5423	cmd->reg_address = cpu_to_le32(reg_addr);
5424	cmd->reg_value = cpu_to_le32(reg_val);
5425
5426	i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5427
5428	if (!page_change)
5429		cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5430
5431	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5432
5433	return status;
5434}
5435
5436/**
5437 * i40e_aq_get_phy_register_ext
5438 * @hw: pointer to the hw struct
5439 * @phy_select: select which phy should be accessed
5440 * @dev_addr: PHY device address
5441 * @page_change: flag to indicate if phy page should be updated
5442 * @set_mdio: use MDIO I/F number specified by mdio_num
5443 * @mdio_num: MDIO I/F number
5444 * @reg_addr: PHY register address
5445 * @reg_val: read register value
5446 * @cmd_details: pointer to command details structure or NULL
5447 *
5448 * Read the external PHY register.
5449 * NOTE: In common cases MDIO I/F number should not be changed, thats why you
5450 * may use simple wrapper i40e_aq_get_phy_register.
5451 **/
5452enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
5453			     u8 phy_select, u8 dev_addr, bool page_change,
5454			     bool set_mdio, u8 mdio_num,
5455			     u32 reg_addr, u32 *reg_val,
5456			     struct i40e_asq_cmd_details *cmd_details)
5457{
5458	struct i40e_aq_desc desc;
5459	struct i40e_aqc_phy_register_access *cmd =
5460		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
5461	i40e_status status;
5462
5463	i40e_fill_default_direct_cmd_desc(&desc,
5464					  i40e_aqc_opc_get_phy_register);
5465
5466	cmd->phy_interface = phy_select;
5467	cmd->dev_address = dev_addr;
5468	cmd->reg_address = cpu_to_le32(reg_addr);
5469
5470	i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
5471
5472	if (!page_change)
5473		cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
5474
5475	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
5476	if (!status)
5477		*reg_val = le32_to_cpu(cmd->reg_value);
5478
5479	return status;
5480}
5481
5482/**
5483 * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
5484 * @hw: pointer to the hw struct
5485 * @buff: command buffer (size in bytes = buff_size)
5486 * @buff_size: buffer size in bytes
5487 * @track_id: package tracking id
5488 * @error_offset: returns error offset
5489 * @error_info: returns error information
5490 * @cmd_details: pointer to command details structure or NULL
5491 **/
5492enum
5493i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5494				   u16 buff_size, u32 track_id,
5495				   u32 *error_offset, u32 *error_info,
5496				   struct i40e_asq_cmd_details *cmd_details)
5497{
5498	struct i40e_aq_desc desc;
5499	struct i40e_aqc_write_personalization_profile *cmd =
5500		(struct i40e_aqc_write_personalization_profile *)
5501		&desc.params.raw;
5502	struct i40e_aqc_write_ddp_resp *resp;
5503	i40e_status status;
5504
5505	i40e_fill_default_direct_cmd_desc(&desc,
5506					  i40e_aqc_opc_write_personalization_profile);
5507
5508	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
5509	if (buff_size > I40E_AQ_LARGE_BUF)
5510		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5511
5512	desc.datalen = cpu_to_le16(buff_size);
5513
5514	cmd->profile_track_id = cpu_to_le32(track_id);
5515
5516	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5517	if (!status) {
5518		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
5519		if (error_offset)
5520			*error_offset = le32_to_cpu(resp->error_offset);
5521		if (error_info)
5522			*error_info = le32_to_cpu(resp->error_info);
5523	}
5524
5525	return status;
5526}
5527
5528/**
5529 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
5530 * @hw: pointer to the hw struct
5531 * @buff: command buffer (size in bytes = buff_size)
5532 * @buff_size: buffer size in bytes
5533 * @flags: AdminQ command flags
5534 * @cmd_details: pointer to command details structure or NULL
5535 **/
5536enum
5537i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
5538				      u16 buff_size, u8 flags,
5539				      struct i40e_asq_cmd_details *cmd_details)
5540{
5541	struct i40e_aq_desc desc;
5542	struct i40e_aqc_get_applied_profiles *cmd =
5543		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
5544	i40e_status status;
5545
5546	i40e_fill_default_direct_cmd_desc(&desc,
5547					  i40e_aqc_opc_get_personalization_profile_list);
5548
5549	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
5550	if (buff_size > I40E_AQ_LARGE_BUF)
5551		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5552	desc.datalen = cpu_to_le16(buff_size);
5553
5554	cmd->flags = flags;
5555
5556	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
5557
5558	return status;
5559}
5560
5561/**
5562 * i40e_find_segment_in_package
5563 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
5564 * @pkg_hdr: pointer to the package header to be searched
5565 *
5566 * This function searches a package file for a particular segment type. On
5567 * success it returns a pointer to the segment header, otherwise it will
5568 * return NULL.
5569 **/
5570struct i40e_generic_seg_header *
5571i40e_find_segment_in_package(u32 segment_type,
5572			     struct i40e_package_header *pkg_hdr)
5573{
5574	struct i40e_generic_seg_header *segment;
5575	u32 i;
5576
5577	/* Search all package segments for the requested segment type */
5578	for (i = 0; i < pkg_hdr->segment_count; i++) {
5579		segment =
5580			(struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
5581			 pkg_hdr->segment_offset[i]);
5582
5583		if (segment->type == segment_type)
5584			return segment;
5585	}
5586
5587	return NULL;
5588}
5589
5590/* Get section table in profile */
5591#define I40E_SECTION_TABLE(profile, sec_tbl)				\
5592	do {								\
5593		struct i40e_profile_segment *p = (profile);		\
5594		u32 count;						\
5595		u32 *nvm;						\
5596		count = p->device_table_count;				\
5597		nvm = (u32 *)&p->device_table[count];			\
5598		sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
5599	} while (0)
5600
5601/* Get section header in profile */
5602#define I40E_SECTION_HEADER(profile, offset)				\
5603	(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
5604
5605/**
5606 * i40e_find_section_in_profile
5607 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
5608 * @profile: pointer to the i40e segment header to be searched
5609 *
5610 * This function searches i40e segment for a particular section type. On
5611 * success it returns a pointer to the section header, otherwise it will
5612 * return NULL.
5613 **/
5614struct i40e_profile_section_header *
5615i40e_find_section_in_profile(u32 section_type,
5616			     struct i40e_profile_segment *profile)
5617{
5618	struct i40e_profile_section_header *sec;
5619	struct i40e_section_table *sec_tbl;
5620	u32 sec_off;
5621	u32 i;
5622
5623	if (profile->header.type != SEGMENT_TYPE_I40E)
5624		return NULL;
5625
5626	I40E_SECTION_TABLE(profile, sec_tbl);
5627
5628	for (i = 0; i < sec_tbl->section_count; i++) {
5629		sec_off = sec_tbl->section_offset[i];
5630		sec = I40E_SECTION_HEADER(profile, sec_off);
5631		if (sec->section.type == section_type)
5632			return sec;
5633	}
5634
5635	return NULL;
5636}
5637
5638/**
5639 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
5640 * @hw: pointer to the hw struct
5641 * @aq: command buffer containing all data to execute AQ
5642 **/
5643static enum
5644i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
5645					  struct i40e_profile_aq_section *aq)
5646{
5647	i40e_status status;
5648	struct i40e_aq_desc desc;
5649	u8 *msg = NULL;
5650	u16 msglen;
5651
5652	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
5653	desc.flags |= cpu_to_le16(aq->flags);
5654	memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
5655
5656	msglen = aq->datalen;
5657	if (msglen) {
5658		desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
5659						I40E_AQ_FLAG_RD));
5660		if (msglen > I40E_AQ_LARGE_BUF)
5661			desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
5662		desc.datalen = cpu_to_le16(msglen);
5663		msg = &aq->data[0];
5664	}
5665
5666	status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
5667
5668	if (status) {
5669		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5670			   "unable to exec DDP AQ opcode %u, error %d\n",
5671			   aq->opcode, status);
5672		return status;
5673	}
5674
5675	/* copy returned desc to aq_buf */
5676	memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
5677
5678	return 0;
5679}
5680
5681/**
5682 * i40e_validate_profile
5683 * @hw: pointer to the hardware structure
5684 * @profile: pointer to the profile segment of the package to be validated
5685 * @track_id: package tracking id
5686 * @rollback: flag if the profile is for rollback.
5687 *
5688 * Validates supported devices and profile's sections.
5689 */
5690static enum i40e_status_code
5691i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5692		      u32 track_id, bool rollback)
5693{
5694	struct i40e_profile_section_header *sec = NULL;
5695	i40e_status status = 0;
5696	struct i40e_section_table *sec_tbl;
5697	u32 vendor_dev_id;
5698	u32 dev_cnt;
5699	u32 sec_off;
5700	u32 i;
5701
5702	if (track_id == I40E_DDP_TRACKID_INVALID) {
5703		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
5704		return I40E_NOT_SUPPORTED;
5705	}
5706
5707	dev_cnt = profile->device_table_count;
5708	for (i = 0; i < dev_cnt; i++) {
5709		vendor_dev_id = profile->device_table[i].vendor_dev_id;
5710		if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
5711		    hw->device_id == (vendor_dev_id & 0xFFFF))
5712			break;
5713	}
5714	if (dev_cnt && i == dev_cnt) {
5715		i40e_debug(hw, I40E_DEBUG_PACKAGE,
5716			   "Device doesn't support DDP\n");
5717		return I40E_ERR_DEVICE_NOT_SUPPORTED;
5718	}
5719
5720	I40E_SECTION_TABLE(profile, sec_tbl);
5721
5722	/* Validate sections types */
5723	for (i = 0; i < sec_tbl->section_count; i++) {
5724		sec_off = sec_tbl->section_offset[i];
5725		sec = I40E_SECTION_HEADER(profile, sec_off);
5726		if (rollback) {
5727			if (sec->section.type == SECTION_TYPE_MMIO ||
5728			    sec->section.type == SECTION_TYPE_AQ ||
5729			    sec->section.type == SECTION_TYPE_RB_AQ) {
5730				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5731					   "Not a roll-back package\n");
5732				return I40E_NOT_SUPPORTED;
5733			}
5734		} else {
5735			if (sec->section.type == SECTION_TYPE_RB_AQ ||
5736			    sec->section.type == SECTION_TYPE_RB_MMIO) {
5737				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5738					   "Not an original package\n");
5739				return I40E_NOT_SUPPORTED;
5740			}
5741		}
5742	}
5743
5744	return status;
5745}
5746
5747/**
5748 * i40e_write_profile
5749 * @hw: pointer to the hardware structure
5750 * @profile: pointer to the profile segment of the package to be downloaded
5751 * @track_id: package tracking id
5752 *
5753 * Handles the download of a complete package.
5754 */
5755enum i40e_status_code
5756i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5757		   u32 track_id)
5758{
5759	i40e_status status = 0;
5760	struct i40e_section_table *sec_tbl;
5761	struct i40e_profile_section_header *sec = NULL;
5762	struct i40e_profile_aq_section *ddp_aq;
5763	u32 section_size = 0;
5764	u32 offset = 0, info = 0;
5765	u32 sec_off;
5766	u32 i;
5767
5768	status = i40e_validate_profile(hw, profile, track_id, false);
5769	if (status)
5770		return status;
5771
5772	I40E_SECTION_TABLE(profile, sec_tbl);
5773
5774	for (i = 0; i < sec_tbl->section_count; i++) {
5775		sec_off = sec_tbl->section_offset[i];
5776		sec = I40E_SECTION_HEADER(profile, sec_off);
5777		/* Process generic admin command */
5778		if (sec->section.type == SECTION_TYPE_AQ) {
5779			ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
5780			status = i40e_ddp_exec_aq_section(hw, ddp_aq);
5781			if (status) {
5782				i40e_debug(hw, I40E_DEBUG_PACKAGE,
5783					   "Failed to execute aq: section %d, opcode %u\n",
5784					   i, ddp_aq->opcode);
5785				break;
5786			}
5787			sec->section.type = SECTION_TYPE_RB_AQ;
5788		}
5789
5790		/* Skip any non-mmio sections */
5791		if (sec->section.type != SECTION_TYPE_MMIO)
5792			continue;
5793
5794		section_size = sec->section.size +
5795			sizeof(struct i40e_profile_section_header);
5796
5797		/* Write MMIO section */
5798		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5799					   track_id, &offset, &info, NULL);
5800		if (status) {
5801			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5802				   "Failed to write profile: section %d, offset %d, info %d\n",
5803				   i, offset, info);
5804			break;
5805		}
5806	}
5807	return status;
5808}
5809
5810/**
5811 * i40e_rollback_profile
5812 * @hw: pointer to the hardware structure
5813 * @profile: pointer to the profile segment of the package to be removed
5814 * @track_id: package tracking id
5815 *
5816 * Rolls back previously loaded package.
5817 */
5818enum i40e_status_code
5819i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
5820		      u32 track_id)
5821{
5822	struct i40e_profile_section_header *sec = NULL;
5823	i40e_status status = 0;
5824	struct i40e_section_table *sec_tbl;
5825	u32 offset = 0, info = 0;
5826	u32 section_size = 0;
5827	u32 sec_off;
5828	int i;
5829
5830	status = i40e_validate_profile(hw, profile, track_id, true);
5831	if (status)
5832		return status;
5833
5834	I40E_SECTION_TABLE(profile, sec_tbl);
5835
5836	/* For rollback write sections in reverse */
5837	for (i = sec_tbl->section_count - 1; i >= 0; i--) {
5838		sec_off = sec_tbl->section_offset[i];
5839		sec = I40E_SECTION_HEADER(profile, sec_off);
5840
5841		/* Skip any non-rollback sections */
5842		if (sec->section.type != SECTION_TYPE_RB_MMIO)
5843			continue;
5844
5845		section_size = sec->section.size +
5846			sizeof(struct i40e_profile_section_header);
5847
5848		/* Write roll-back MMIO section */
5849		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
5850					   track_id, &offset, &info, NULL);
5851		if (status) {
5852			i40e_debug(hw, I40E_DEBUG_PACKAGE,
5853				   "Failed to write profile: section %d, offset %d, info %d\n",
5854				   i, offset, info);
5855			break;
5856		}
5857	}
5858	return status;
5859}
5860
5861/**
5862 * i40e_add_pinfo_to_list
5863 * @hw: pointer to the hardware structure
5864 * @profile: pointer to the profile segment of the package
5865 * @profile_info_sec: buffer for information section
5866 * @track_id: package tracking id
5867 *
5868 * Register a profile to the list of loaded profiles.
5869 */
5870enum i40e_status_code
5871i40e_add_pinfo_to_list(struct i40e_hw *hw,
5872		       struct i40e_profile_segment *profile,
5873		       u8 *profile_info_sec, u32 track_id)
5874{
5875	i40e_status status = 0;
5876	struct i40e_profile_section_header *sec = NULL;
5877	struct i40e_profile_info *pinfo;
5878	u32 offset = 0, info = 0;
5879
5880	sec = (struct i40e_profile_section_header *)profile_info_sec;
5881	sec->tbl_size = 1;
5882	sec->data_end = sizeof(struct i40e_profile_section_header) +
5883			sizeof(struct i40e_profile_info);
5884	sec->section.type = SECTION_TYPE_INFO;
5885	sec->section.offset = sizeof(struct i40e_profile_section_header);
5886	sec->section.size = sizeof(struct i40e_profile_info);
5887	pinfo = (struct i40e_profile_info *)(profile_info_sec +
5888					     sec->section.offset);
5889	pinfo->track_id = track_id;
5890	pinfo->version = profile->version;
5891	pinfo->op = I40E_DDP_ADD_TRACKID;
5892	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
5893
5894	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
5895				   track_id, &offset, &info, NULL);
5896
5897	return status;
5898}
5899
5900/**
5901 * i40e_aq_add_cloud_filters
5902 * @hw: pointer to the hardware structure
5903 * @seid: VSI seid to add cloud filters from
5904 * @filters: Buffer which contains the filters to be added
5905 * @filter_count: number of filters contained in the buffer
5906 *
5907 * Set the cloud filters for a given VSI.  The contents of the
5908 * i40e_aqc_cloud_filters_element_data are filled in by the caller
5909 * of the function.
5910 *
5911 **/
5912enum i40e_status_code
5913i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
5914			  struct i40e_aqc_cloud_filters_element_data *filters,
5915			  u8 filter_count)
5916{
5917	struct i40e_aq_desc desc;
5918	struct i40e_aqc_add_remove_cloud_filters *cmd =
5919	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5920	enum i40e_status_code status;
5921	u16 buff_len;
5922
5923	i40e_fill_default_direct_cmd_desc(&desc,
5924					  i40e_aqc_opc_add_cloud_filters);
5925
5926	buff_len = filter_count * sizeof(*filters);
5927	desc.datalen = cpu_to_le16(buff_len);
5928	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5929	cmd->num_filters = filter_count;
5930	cmd->seid = cpu_to_le16(seid);
5931
5932	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5933
5934	return status;
5935}
5936
5937/**
5938 * i40e_aq_add_cloud_filters_bb
5939 * @hw: pointer to the hardware structure
5940 * @seid: VSI seid to add cloud filters from
5941 * @filters: Buffer which contains the filters in big buffer to be added
5942 * @filter_count: number of filters contained in the buffer
5943 *
5944 * Set the big buffer cloud filters for a given VSI.  The contents of the
5945 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
5946 * function.
5947 *
5948 **/
5949enum i40e_status_code
5950i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
5951			     struct i40e_aqc_cloud_filters_element_bb *filters,
5952			     u8 filter_count)
5953{
5954	struct i40e_aq_desc desc;
5955	struct i40e_aqc_add_remove_cloud_filters *cmd =
5956	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
5957	i40e_status status;
5958	u16 buff_len;
5959	int i;
5960
5961	i40e_fill_default_direct_cmd_desc(&desc,
5962					  i40e_aqc_opc_add_cloud_filters);
5963
5964	buff_len = filter_count * sizeof(*filters);
5965	desc.datalen = cpu_to_le16(buff_len);
5966	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5967	cmd->num_filters = filter_count;
5968	cmd->seid = cpu_to_le16(seid);
5969	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
5970
5971	for (i = 0; i < filter_count; i++) {
5972		u16 tnl_type;
5973		u32 ti;
5974
5975		tnl_type = (le16_to_cpu(filters[i].element.flags) &
5976			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
5977			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
5978
5979		/* Due to hardware eccentricities, the VNI for Geneve is shifted
5980		 * one more byte further than normally used for Tenant ID in
5981		 * other tunnel types.
5982		 */
5983		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
5984			ti = le32_to_cpu(filters[i].element.tenant_id);
5985			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
5986		}
5987	}
5988
5989	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
5990
5991	return status;
5992}
5993
5994/**
5995 * i40e_aq_rem_cloud_filters
5996 * @hw: pointer to the hardware structure
5997 * @seid: VSI seid to remove cloud filters from
5998 * @filters: Buffer which contains the filters to be removed
5999 * @filter_count: number of filters contained in the buffer
6000 *
6001 * Remove the cloud filters for a given VSI.  The contents of the
6002 * i40e_aqc_cloud_filters_element_data are filled in by the caller
6003 * of the function.
6004 *
6005 **/
6006enum i40e_status_code
6007i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
6008			  struct i40e_aqc_cloud_filters_element_data *filters,
6009			  u8 filter_count)
6010{
6011	struct i40e_aq_desc desc;
6012	struct i40e_aqc_add_remove_cloud_filters *cmd =
6013	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6014	enum i40e_status_code status;
6015	u16 buff_len;
6016
6017	i40e_fill_default_direct_cmd_desc(&desc,
6018					  i40e_aqc_opc_remove_cloud_filters);
6019
6020	buff_len = filter_count * sizeof(*filters);
6021	desc.datalen = cpu_to_le16(buff_len);
6022	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6023	cmd->num_filters = filter_count;
6024	cmd->seid = cpu_to_le16(seid);
6025
6026	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6027
6028	return status;
6029}
6030
6031/**
6032 * i40e_aq_rem_cloud_filters_bb
6033 * @hw: pointer to the hardware structure
6034 * @seid: VSI seid to remove cloud filters from
6035 * @filters: Buffer which contains the filters in big buffer to be removed
6036 * @filter_count: number of filters contained in the buffer
6037 *
6038 * Remove the big buffer cloud filters for a given VSI.  The contents of the
6039 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
6040 * function.
6041 *
6042 **/
6043enum i40e_status_code
6044i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
6045			     struct i40e_aqc_cloud_filters_element_bb *filters,
6046			     u8 filter_count)
6047{
6048	struct i40e_aq_desc desc;
6049	struct i40e_aqc_add_remove_cloud_filters *cmd =
6050	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
6051	i40e_status status;
6052	u16 buff_len;
6053	int i;
6054
6055	i40e_fill_default_direct_cmd_desc(&desc,
6056					  i40e_aqc_opc_remove_cloud_filters);
6057
6058	buff_len = filter_count * sizeof(*filters);
6059	desc.datalen = cpu_to_le16(buff_len);
6060	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
6061	cmd->num_filters = filter_count;
6062	cmd->seid = cpu_to_le16(seid);
6063	cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
6064
6065	for (i = 0; i < filter_count; i++) {
6066		u16 tnl_type;
6067		u32 ti;
6068
6069		tnl_type = (le16_to_cpu(filters[i].element.flags) &
6070			   I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
6071			   I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
6072
6073		/* Due to hardware eccentricities, the VNI for Geneve is shifted
6074		 * one more byte further than normally used for Tenant ID in
6075		 * other tunnel types.
6076		 */
6077		if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
6078			ti = le32_to_cpu(filters[i].element.tenant_id);
6079			filters[i].element.tenant_id = cpu_to_le32(ti << 8);
6080		}
6081	}
6082
6083	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
6084
6085	return status;
6086}
6087