1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020, Intel Corporation. */
3
4#include <linux/vmalloc.h>
5
6#include "ice.h"
7#include "ice_lib.h"
8#include "ice_devlink.h"
9#include "ice_eswitch.h"
10#include "ice_fw_update.h"
11#include "ice_dcb_lib.h"
12
13static int ice_active_port_option = -1;
14
15/* context for devlink info version reporting */
16struct ice_info_ctx {
17	char buf[128];
18	struct ice_orom_info pending_orom;
19	struct ice_nvm_info pending_nvm;
20	struct ice_netlist_info pending_netlist;
21	struct ice_hw_dev_caps dev_caps;
22};
23
24/* The following functions are used to format specific strings for various
25 * devlink info versions. The ctx parameter is used to provide the storage
26 * buffer, as well as any ancillary information calculated when the info
27 * request was made.
28 *
29 * If a version does not exist, for example when attempting to get the
30 * inactive version of flash when there is no pending update, the function
31 * should leave the buffer in the ctx structure empty.
32 */
33
34static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
35{
36	u8 dsn[8];
37
38	/* Copy the DSN into an array in Big Endian format */
39	put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
40
41	snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
42}
43
44static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
45{
46	struct ice_hw *hw = &pf->hw;
47	int status;
48
49	status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
50	if (status)
51		/* We failed to locate the PBA, so just skip this entry */
52		dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
53			status);
54}
55
56static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
57{
58	struct ice_hw *hw = &pf->hw;
59
60	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
61		 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
62}
63
64static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
65{
66	struct ice_hw *hw = &pf->hw;
67
68	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
69		 hw->api_min_ver, hw->api_patch);
70}
71
72static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
73{
74	struct ice_hw *hw = &pf->hw;
75
76	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
77}
78
79static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
80{
81	struct ice_orom_info *orom = &pf->hw.flash.orom;
82
83	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
84		 orom->major, orom->build, orom->patch);
85}
86
87static void
88ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
89			  struct ice_info_ctx *ctx)
90{
91	struct ice_orom_info *orom = &ctx->pending_orom;
92
93	if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
94		snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
95			 orom->major, orom->build, orom->patch);
96}
97
98static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
99{
100	struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
101
102	snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
103}
104
105static void
106ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
107			 struct ice_info_ctx *ctx)
108{
109	struct ice_nvm_info *nvm = &ctx->pending_nvm;
110
111	if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
112		snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
113			 nvm->major, nvm->minor);
114}
115
116static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
117{
118	struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
119
120	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
121}
122
123static void
124ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
125{
126	struct ice_nvm_info *nvm = &ctx->pending_nvm;
127
128	if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
129		snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
130}
131
132static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
133{
134	struct ice_hw *hw = &pf->hw;
135
136	snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
137}
138
139static void
140ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
141{
142	struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
143
144	snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
145		 pkg->major, pkg->minor, pkg->update, pkg->draft);
146}
147
148static void
149ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
150{
151	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
152}
153
154static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
155{
156	struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
157
158	/* The netlist version fields are BCD formatted */
159	snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
160		 netlist->major, netlist->minor,
161		 netlist->type >> 16, netlist->type & 0xFFFF,
162		 netlist->rev, netlist->cust_ver);
163}
164
165static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
166{
167	struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
168
169	snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
170}
171
172static void
173ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
174			     struct ice_info_ctx *ctx)
175{
176	struct ice_netlist_info *netlist = &ctx->pending_netlist;
177
178	/* The netlist version fields are BCD formatted */
179	if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
180		snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
181			 netlist->major, netlist->minor,
182			 netlist->type >> 16, netlist->type & 0xFFFF,
183			 netlist->rev, netlist->cust_ver);
184}
185
186static void
187ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
188			       struct ice_info_ctx *ctx)
189{
190	struct ice_netlist_info *netlist = &ctx->pending_netlist;
191
192	if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
193		snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
194}
195
196#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
197#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
198#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
199
200/* The combined() macro inserts both the running entry as well as a stored
201 * entry. The running entry will always report the version from the active
202 * handler. The stored entry will first try the pending handler, and fallback
203 * to the active handler if the pending function does not report a version.
204 * The pending handler should check the status of a pending update for the
205 * relevant flash component. It should only fill in the buffer in the case
206 * where a valid pending version is available. This ensures that the related
207 * stored and running versions remain in sync, and that stored versions are
208 * correctly reported as expected.
209 */
210#define combined(key, active, pending) \
211	running(key, active), \
212	stored(key, pending, active)
213
214enum ice_version_type {
215	ICE_VERSION_FIXED,
216	ICE_VERSION_RUNNING,
217	ICE_VERSION_STORED,
218};
219
220static const struct ice_devlink_version {
221	enum ice_version_type type;
222	const char *key;
223	void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
224	void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
225} ice_devlink_versions[] = {
226	fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
227	running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
228	running("fw.mgmt.api", ice_info_fw_api),
229	running("fw.mgmt.build", ice_info_fw_build),
230	combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver),
231	combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver),
232	combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack),
233	running("fw.app.name", ice_info_ddp_pkg_name),
234	running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
235	running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
236	combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
237	combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
238};
239
240/**
241 * ice_devlink_info_get - .info_get devlink handler
242 * @devlink: devlink instance structure
243 * @req: the devlink info request
244 * @extack: extended netdev ack structure
245 *
246 * Callback for the devlink .info_get operation. Reports information about the
247 * device.
248 *
249 * Return: zero on success or an error code on failure.
250 */
251static int ice_devlink_info_get(struct devlink *devlink,
252				struct devlink_info_req *req,
253				struct netlink_ext_ack *extack)
254{
255	struct ice_pf *pf = devlink_priv(devlink);
256	struct device *dev = ice_pf_to_dev(pf);
257	struct ice_hw *hw = &pf->hw;
258	struct ice_info_ctx *ctx;
259	size_t i;
260	int err;
261
262	err = ice_wait_for_reset(pf, 10 * HZ);
263	if (err) {
264		NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting");
265		return err;
266	}
267
268	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
269	if (!ctx)
270		return -ENOMEM;
271
272	/* discover capabilities first */
273	err = ice_discover_dev_caps(hw, &ctx->dev_caps);
274	if (err) {
275		dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
276			err, ice_aq_str(hw->adminq.sq_last_status));
277		NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
278		goto out_free_ctx;
279	}
280
281	if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
282		err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
283		if (err) {
284			dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
285				err, ice_aq_str(hw->adminq.sq_last_status));
286
287			/* disable display of pending Option ROM */
288			ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
289		}
290	}
291
292	if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
293		err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
294		if (err) {
295			dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
296				err, ice_aq_str(hw->adminq.sq_last_status));
297
298			/* disable display of pending Option ROM */
299			ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
300		}
301	}
302
303	if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
304		err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
305		if (err) {
306			dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
307				err, ice_aq_str(hw->adminq.sq_last_status));
308
309			/* disable display of pending Option ROM */
310			ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
311		}
312	}
313
314	ice_info_get_dsn(pf, ctx);
315
316	err = devlink_info_serial_number_put(req, ctx->buf);
317	if (err) {
318		NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
319		goto out_free_ctx;
320	}
321
322	for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
323		enum ice_version_type type = ice_devlink_versions[i].type;
324		const char *key = ice_devlink_versions[i].key;
325
326		memset(ctx->buf, 0, sizeof(ctx->buf));
327
328		ice_devlink_versions[i].getter(pf, ctx);
329
330		/* If the default getter doesn't report a version, use the
331		 * fallback function. This is primarily useful in the case of
332		 * "stored" versions that want to report the same value as the
333		 * running version in the normal case of no pending update.
334		 */
335		if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
336			ice_devlink_versions[i].fallback(pf, ctx);
337
338		/* Do not report missing versions */
339		if (ctx->buf[0] == '\0')
340			continue;
341
342		switch (type) {
343		case ICE_VERSION_FIXED:
344			err = devlink_info_version_fixed_put(req, key, ctx->buf);
345			if (err) {
346				NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
347				goto out_free_ctx;
348			}
349			break;
350		case ICE_VERSION_RUNNING:
351			err = devlink_info_version_running_put(req, key, ctx->buf);
352			if (err) {
353				NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
354				goto out_free_ctx;
355			}
356			break;
357		case ICE_VERSION_STORED:
358			err = devlink_info_version_stored_put(req, key, ctx->buf);
359			if (err) {
360				NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
361				goto out_free_ctx;
362			}
363			break;
364		}
365	}
366
367out_free_ctx:
368	kfree(ctx);
369	return err;
370}
371
372/**
373 * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
374 * @pf: pointer to the pf instance
375 * @extack: netlink extended ACK structure
376 *
377 * Allow user to activate new Embedded Management Processor firmware by
378 * issuing device specific EMP reset. Called in response to
379 * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
380 *
381 * Note that teardown and rebuild of the driver state happens automatically as
382 * part of an interrupt and watchdog task. This is because all physical
383 * functions on the device must be able to reset when an EMP reset occurs from
384 * any source.
385 */
386static int
387ice_devlink_reload_empr_start(struct ice_pf *pf,
388			      struct netlink_ext_ack *extack)
389{
390	struct device *dev = ice_pf_to_dev(pf);
391	struct ice_hw *hw = &pf->hw;
392	u8 pending;
393	int err;
394
395	err = ice_get_pending_updates(pf, &pending, extack);
396	if (err)
397		return err;
398
399	/* pending is a bitmask of which flash banks have a pending update,
400	 * including the main NVM bank, the Option ROM bank, and the netlist
401	 * bank. If any of these bits are set, then there is a pending update
402	 * waiting to be activated.
403	 */
404	if (!pending) {
405		NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
406		return -ECANCELED;
407	}
408
409	if (pf->fw_emp_reset_disabled) {
410		NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
411		return -ECANCELED;
412	}
413
414	dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
415
416	err = ice_aq_nvm_update_empr(hw);
417	if (err) {
418		dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
419			err, ice_aq_str(hw->adminq.sq_last_status));
420		NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
421		return err;
422	}
423
424	return 0;
425}
426
427/**
428 * ice_devlink_reload_down - prepare for reload
429 * @devlink: pointer to the devlink instance to reload
430 * @netns_change: if true, the network namespace is changing
431 * @action: the action to perform
432 * @limit: limits on what reload should do, such as not resetting
433 * @extack: netlink extended ACK structure
434 */
435static int
436ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
437			enum devlink_reload_action action,
438			enum devlink_reload_limit limit,
439			struct netlink_ext_ack *extack)
440{
441	struct ice_pf *pf = devlink_priv(devlink);
442
443	switch (action) {
444	case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
445		if (ice_is_eswitch_mode_switchdev(pf)) {
446			NL_SET_ERR_MSG_MOD(extack,
447					   "Go to legacy mode before doing reinit\n");
448			return -EOPNOTSUPP;
449		}
450		if (ice_is_adq_active(pf)) {
451			NL_SET_ERR_MSG_MOD(extack,
452					   "Turn off ADQ before doing reinit\n");
453			return -EOPNOTSUPP;
454		}
455		if (ice_has_vfs(pf)) {
456			NL_SET_ERR_MSG_MOD(extack,
457					   "Remove all VFs before doing reinit\n");
458			return -EOPNOTSUPP;
459		}
460		ice_unload(pf);
461		return 0;
462	case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
463		return ice_devlink_reload_empr_start(pf, extack);
464	default:
465		WARN_ON(1);
466		return -EOPNOTSUPP;
467	}
468}
469
470/**
471 * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
472 * @pf: pointer to the pf instance
473 * @extack: netlink extended ACK structure
474 *
475 * Wait for driver to finish rebuilding after EMP reset is completed. This
476 * includes time to wait for both the actual device reset as well as the time
477 * for the driver's rebuild to complete.
478 */
479static int
480ice_devlink_reload_empr_finish(struct ice_pf *pf,
481			       struct netlink_ext_ack *extack)
482{
483	int err;
484
485	err = ice_wait_for_reset(pf, 60 * HZ);
486	if (err) {
487		NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
488		return err;
489	}
490
491	return 0;
492}
493
494/**
495 * ice_devlink_port_opt_speed_str - convert speed to a string
496 * @speed: speed value
497 */
498static const char *ice_devlink_port_opt_speed_str(u8 speed)
499{
500	switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
501	case ICE_AQC_PORT_OPT_MAX_LANE_100M:
502		return "0.1";
503	case ICE_AQC_PORT_OPT_MAX_LANE_1G:
504		return "1";
505	case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
506		return "2.5";
507	case ICE_AQC_PORT_OPT_MAX_LANE_5G:
508		return "5";
509	case ICE_AQC_PORT_OPT_MAX_LANE_10G:
510		return "10";
511	case ICE_AQC_PORT_OPT_MAX_LANE_25G:
512		return "25";
513	case ICE_AQC_PORT_OPT_MAX_LANE_50G:
514		return "50";
515	case ICE_AQC_PORT_OPT_MAX_LANE_100G:
516		return "100";
517	}
518
519	return "-";
520}
521
522#define ICE_PORT_OPT_DESC_LEN	50
523/**
524 * ice_devlink_port_options_print - Print available port split options
525 * @pf: the PF to print split port options
526 *
527 * Prints a table with available port split options and max port speeds
528 */
529static void ice_devlink_port_options_print(struct ice_pf *pf)
530{
531	u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
532	struct ice_aqc_get_port_options_elem *options, *opt;
533	struct device *dev = ice_pf_to_dev(pf);
534	bool active_valid, pending_valid;
535	char desc[ICE_PORT_OPT_DESC_LEN];
536	const char *str;
537	int status;
538
539	options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
540			  sizeof(*options), GFP_KERNEL);
541	if (!options)
542		return;
543
544	for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
545		opt = options + i * ICE_AQC_PORT_OPT_MAX;
546		options_count = ICE_AQC_PORT_OPT_MAX;
547		active_valid = 0;
548
549		status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
550						 i, true, &active_idx,
551						 &active_valid, &pending_idx,
552						 &pending_valid);
553		if (status) {
554			dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
555				i, status);
556			goto err;
557		}
558	}
559
560	dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
561	dev_dbg(dev, "Status  Split      Quad 0          Quad 1\n");
562	dev_dbg(dev, "        count  L0  L1  L2  L3  L4  L5  L6  L7\n");
563
564	for (i = 0; i < options_count; i++) {
565		cnt = 0;
566
567		if (i == ice_active_port_option)
568			str = "Active";
569		else if ((i == pending_idx) && pending_valid)
570			str = "Pending";
571		else
572			str = "";
573
574		cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
575				"%-8s", str);
576
577		cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
578				"%-6u", options[i].pmd);
579
580		for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
581			speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
582			str = ice_devlink_port_opt_speed_str(speed);
583			cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
584					"%3s ", str);
585		}
586
587		dev_dbg(dev, "%s\n", desc);
588	}
589
590err:
591	kfree(options);
592}
593
594/**
595 * ice_devlink_aq_set_port_option - Send set port option admin queue command
596 * @pf: the PF to print split port options
597 * @option_idx: selected port option
598 * @extack: extended netdev ack structure
599 *
600 * Sends set port option admin queue command with selected port option and
601 * calls NVM write activate.
602 */
603static int
604ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
605			       struct netlink_ext_ack *extack)
606{
607	struct device *dev = ice_pf_to_dev(pf);
608	int status;
609
610	status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
611	if (status) {
612		dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
613			status, pf->hw.adminq.sq_last_status);
614		NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
615		return -EIO;
616	}
617
618	status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
619	if (status) {
620		dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
621			status, pf->hw.adminq.sq_last_status);
622		NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
623		return -EIO;
624	}
625
626	status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
627	if (status) {
628		dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
629			status, pf->hw.adminq.sq_last_status);
630		NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
631		ice_release_nvm(&pf->hw);
632		return -EIO;
633	}
634
635	ice_release_nvm(&pf->hw);
636
637	NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
638	return 0;
639}
640
641/**
642 * ice_devlink_port_split - .port_split devlink handler
643 * @devlink: devlink instance structure
644 * @port: devlink port structure
645 * @count: number of ports to split to
646 * @extack: extended netdev ack structure
647 *
648 * Callback for the devlink .port_split operation.
649 *
650 * Unfortunately, the devlink expression of available options is limited
651 * to just a number, so search for an FW port option which supports
652 * the specified number. As there could be multiple FW port options with
653 * the same port split count, allow switching between them. When the same
654 * port split count request is issued again, switch to the next FW port
655 * option with the same port split count.
656 *
657 * Return: zero on success or an error code on failure.
658 */
659static int
660ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
661		       unsigned int count, struct netlink_ext_ack *extack)
662{
663	struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
664	u8 i, j, active_idx, pending_idx, new_option;
665	struct ice_pf *pf = devlink_priv(devlink);
666	u8 option_count = ICE_AQC_PORT_OPT_MAX;
667	struct device *dev = ice_pf_to_dev(pf);
668	bool active_valid, pending_valid;
669	int status;
670
671	status = ice_aq_get_port_options(&pf->hw, options, &option_count,
672					 0, true, &active_idx, &active_valid,
673					 &pending_idx, &pending_valid);
674	if (status) {
675		dev_dbg(dev, "Couldn't read port split options, err = %d\n",
676			status);
677		NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
678		return -EIO;
679	}
680
681	new_option = ICE_AQC_PORT_OPT_MAX;
682	active_idx = pending_valid ? pending_idx : active_idx;
683	for (i = 1; i <= option_count; i++) {
684		/* In order to allow switching between FW port options with
685		 * the same port split count, search for a new option starting
686		 * from the active/pending option (with array wrap around).
687		 */
688		j = (active_idx + i) % option_count;
689
690		if (count == options[j].pmd) {
691			new_option = j;
692			break;
693		}
694	}
695
696	if (new_option == active_idx) {
697		dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
698			count);
699		NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
700		ice_devlink_port_options_print(pf);
701		return -EINVAL;
702	}
703
704	if (new_option == ICE_AQC_PORT_OPT_MAX) {
705		dev_dbg(dev, "request to split: count: %u not found\n", count);
706		NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
707		ice_devlink_port_options_print(pf);
708		return -EINVAL;
709	}
710
711	status = ice_devlink_aq_set_port_option(pf, new_option, extack);
712	if (status)
713		return status;
714
715	ice_devlink_port_options_print(pf);
716
717	return 0;
718}
719
720/**
721 * ice_devlink_port_unsplit - .port_unsplit devlink handler
722 * @devlink: devlink instance structure
723 * @port: devlink port structure
724 * @extack: extended netdev ack structure
725 *
726 * Callback for the devlink .port_unsplit operation.
727 * Calls ice_devlink_port_split with split count set to 1.
728 * There could be no FW option available with split count 1.
729 *
730 * Return: zero on success or an error code on failure.
731 */
732static int
733ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
734			 struct netlink_ext_ack *extack)
735{
736	return ice_devlink_port_split(devlink, port, 1, extack);
737}
738
739/**
740 * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree
741 * @pf: pf struct
742 *
743 * This function tears down tree exported during VF's creation.
744 */
745void ice_tear_down_devlink_rate_tree(struct ice_pf *pf)
746{
747	struct devlink *devlink;
748	struct ice_vf *vf;
749	unsigned int bkt;
750
751	devlink = priv_to_devlink(pf);
752
753	devl_lock(devlink);
754	mutex_lock(&pf->vfs.table_lock);
755	ice_for_each_vf(pf, bkt, vf) {
756		if (vf->devlink_port.devlink_rate)
757			devl_rate_leaf_destroy(&vf->devlink_port);
758	}
759	mutex_unlock(&pf->vfs.table_lock);
760
761	devl_rate_nodes_destroy(devlink);
762	devl_unlock(devlink);
763}
764
765/**
766 * ice_enable_custom_tx - try to enable custom Tx feature
767 * @pf: pf struct
768 *
769 * This function tries to enable custom Tx feature,
770 * it's not possible to enable it, if DCB or ADQ is active.
771 */
772static bool ice_enable_custom_tx(struct ice_pf *pf)
773{
774	struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info;
775	struct device *dev = ice_pf_to_dev(pf);
776
777	if (pi->is_custom_tx_enabled)
778		/* already enabled, return true */
779		return true;
780
781	if (ice_is_adq_active(pf)) {
782		dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n");
783		return false;
784	}
785
786	if (ice_is_dcb_active(pf)) {
787		dev_err(dev, "DCB active, can't modify Tx scheduler tree\n");
788		return false;
789	}
790
791	pi->is_custom_tx_enabled = true;
792
793	return true;
794}
795
796/**
797 * ice_traverse_tx_tree - traverse Tx scheduler tree
798 * @devlink: devlink struct
799 * @node: current node, used for recursion
800 * @tc_node: tc_node struct, that is treated as a root
801 * @pf: pf struct
802 *
803 * This function traverses Tx scheduler tree and exports
804 * entire structure to the devlink-rate.
805 */
806static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node,
807				 struct ice_sched_node *tc_node, struct ice_pf *pf)
808{
809	struct devlink_rate *rate_node = NULL;
810	struct ice_vf *vf;
811	int i;
812
813	if (node->parent == tc_node) {
814		/* create root node */
815		rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
816	} else if (node->vsi_handle &&
817		   pf->vsi[node->vsi_handle]->vf) {
818		vf = pf->vsi[node->vsi_handle]->vf;
819		if (!vf->devlink_port.devlink_rate)
820			/* leaf nodes doesn't have children
821			 * so we don't set rate_node
822			 */
823			devl_rate_leaf_create(&vf->devlink_port, node,
824					      node->parent->rate_node);
825	} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
826		   node->parent->rate_node) {
827		rate_node = devl_rate_node_create(devlink, node, node->name,
828						  node->parent->rate_node);
829	}
830
831	if (rate_node && !IS_ERR(rate_node))
832		node->rate_node = rate_node;
833
834	for (i = 0; i < node->num_children; i++)
835		ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
836}
837
838/**
839 * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate
840 * @devlink: devlink struct
841 * @vsi: main vsi struct
842 *
843 * This function finds a root node, then calls ice_traverse_tx tree, which
844 * traverses the tree and exports it's contents to devlink rate.
845 */
846int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi)
847{
848	struct ice_port_info *pi = vsi->port_info;
849	struct ice_sched_node *tc_node;
850	struct ice_pf *pf = vsi->back;
851	int i;
852
853	tc_node = pi->root->children[0];
854	mutex_lock(&pi->sched_lock);
855	devl_lock(devlink);
856	for (i = 0; i < tc_node->num_children; i++)
857		ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
858	devl_unlock(devlink);
859	mutex_unlock(&pi->sched_lock);
860
861	return 0;
862}
863
864/**
865 * ice_set_object_tx_share - sets node scheduling parameter
866 * @pi: devlink struct instance
867 * @node: node struct instance
868 * @bw: bandwidth in bytes per second
869 * @extack: extended netdev ack structure
870 *
871 * This function sets ICE_MIN_BW scheduling BW limit.
872 */
873static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node,
874				   u64 bw, struct netlink_ext_ack *extack)
875{
876	int status;
877
878	mutex_lock(&pi->sched_lock);
879	/* converts bytes per second to kilo bits per second */
880	node->tx_share = div_u64(bw, 125);
881	status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share);
882	mutex_unlock(&pi->sched_lock);
883
884	if (status)
885		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share");
886
887	return status;
888}
889
890/**
891 * ice_set_object_tx_max - sets node scheduling parameter
892 * @pi: devlink struct instance
893 * @node: node struct instance
894 * @bw: bandwidth in bytes per second
895 * @extack: extended netdev ack structure
896 *
897 * This function sets ICE_MAX_BW scheduling BW limit.
898 */
899static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node,
900				 u64 bw, struct netlink_ext_ack *extack)
901{
902	int status;
903
904	mutex_lock(&pi->sched_lock);
905	/* converts bytes per second value to kilo bits per second */
906	node->tx_max = div_u64(bw, 125);
907	status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max);
908	mutex_unlock(&pi->sched_lock);
909
910	if (status)
911		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max");
912
913	return status;
914}
915
916/**
917 * ice_set_object_tx_priority - sets node scheduling parameter
918 * @pi: devlink struct instance
919 * @node: node struct instance
920 * @priority: value representing priority for strict priority arbitration
921 * @extack: extended netdev ack structure
922 *
923 * This function sets priority of node among siblings.
924 */
925static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node,
926				      u32 priority, struct netlink_ext_ack *extack)
927{
928	int status;
929
930	if (priority >= 8) {
931		NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
932		return -EINVAL;
933	}
934
935	mutex_lock(&pi->sched_lock);
936	node->tx_priority = priority;
937	status = ice_sched_set_node_priority(pi, node, node->tx_priority);
938	mutex_unlock(&pi->sched_lock);
939
940	if (status)
941		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority");
942
943	return status;
944}
945
946/**
947 * ice_set_object_tx_weight - sets node scheduling parameter
948 * @pi: devlink struct instance
949 * @node: node struct instance
950 * @weight: value represeting relative weight for WFQ arbitration
951 * @extack: extended netdev ack structure
952 *
953 * This function sets node weight for WFQ algorithm.
954 */
955static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node,
956				    u32 weight, struct netlink_ext_ack *extack)
957{
958	int status;
959
960	if (weight > 200 || weight < 1) {
961		NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
962		return -EINVAL;
963	}
964
965	mutex_lock(&pi->sched_lock);
966	node->tx_weight = weight;
967	status = ice_sched_set_node_weight(pi, node, node->tx_weight);
968	mutex_unlock(&pi->sched_lock);
969
970	if (status)
971		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight");
972
973	return status;
974}
975
976/**
977 * ice_get_pi_from_dev_rate - get port info from devlink_rate
978 * @rate_node: devlink struct instance
979 *
980 * This function returns corresponding port_info struct of devlink_rate
981 */
982static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node)
983{
984	struct ice_pf *pf = devlink_priv(rate_node->devlink);
985
986	return ice_get_main_vsi(pf)->port_info;
987}
988
989static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
990				     struct netlink_ext_ack *extack)
991{
992	struct ice_sched_node *node;
993	struct ice_port_info *pi;
994
995	pi = ice_get_pi_from_dev_rate(rate_node);
996
997	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
998		return -EBUSY;
999
1000	/* preallocate memory for ice_sched_node */
1001	node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
1002	*priv = node;
1003
1004	return 0;
1005}
1006
1007static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
1008				     struct netlink_ext_ack *extack)
1009{
1010	struct ice_sched_node *node, *tc_node;
1011	struct ice_port_info *pi;
1012
1013	pi = ice_get_pi_from_dev_rate(rate_node);
1014	tc_node = pi->root->children[0];
1015	node = priv;
1016
1017	if (!rate_node->parent || !node || tc_node == node || !extack)
1018		return 0;
1019
1020	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1021		return -EBUSY;
1022
1023	/* can't allow to delete a node with children */
1024	if (node->num_children)
1025		return -EINVAL;
1026
1027	mutex_lock(&pi->sched_lock);
1028	ice_free_sched_node(pi, node);
1029	mutex_unlock(&pi->sched_lock);
1030
1031	return 0;
1032}
1033
1034static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
1035					    u64 tx_max, struct netlink_ext_ack *extack)
1036{
1037	struct ice_sched_node *node = priv;
1038
1039	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1040		return -EBUSY;
1041
1042	if (!node)
1043		return 0;
1044
1045	return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf),
1046				     node, tx_max, extack);
1047}
1048
1049static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
1050					      u64 tx_share, struct netlink_ext_ack *extack)
1051{
1052	struct ice_sched_node *node = priv;
1053
1054	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1055		return -EBUSY;
1056
1057	if (!node)
1058		return 0;
1059
1060	return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node,
1061				       tx_share, extack);
1062}
1063
1064static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv,
1065						 u32 tx_priority, struct netlink_ext_ack *extack)
1066{
1067	struct ice_sched_node *node = priv;
1068
1069	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1070		return -EBUSY;
1071
1072	if (!node)
1073		return 0;
1074
1075	return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node,
1076					  tx_priority, extack);
1077}
1078
1079static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv,
1080					       u32 tx_weight, struct netlink_ext_ack *extack)
1081{
1082	struct ice_sched_node *node = priv;
1083
1084	if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
1085		return -EBUSY;
1086
1087	if (!node)
1088		return 0;
1089
1090	return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node,
1091					tx_weight, extack);
1092}
1093
1094static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
1095					    u64 tx_max, struct netlink_ext_ack *extack)
1096{
1097	struct ice_sched_node *node = priv;
1098
1099	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1100		return -EBUSY;
1101
1102	if (!node)
1103		return 0;
1104
1105	return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node),
1106				     node, tx_max, extack);
1107}
1108
1109static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
1110					      u64 tx_share, struct netlink_ext_ack *extack)
1111{
1112	struct ice_sched_node *node = priv;
1113
1114	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1115		return -EBUSY;
1116
1117	if (!node)
1118		return 0;
1119
1120	return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node),
1121				       node, tx_share, extack);
1122}
1123
1124static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv,
1125						 u32 tx_priority, struct netlink_ext_ack *extack)
1126{
1127	struct ice_sched_node *node = priv;
1128
1129	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1130		return -EBUSY;
1131
1132	if (!node)
1133		return 0;
1134
1135	return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node),
1136					  node, tx_priority, extack);
1137}
1138
1139static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv,
1140					       u32 tx_weight, struct netlink_ext_ack *extack)
1141{
1142	struct ice_sched_node *node = priv;
1143
1144	if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
1145		return -EBUSY;
1146
1147	if (!node)
1148		return 0;
1149
1150	return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node),
1151					node, tx_weight, extack);
1152}
1153
1154static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
1155				  struct devlink_rate *parent,
1156				  void *priv, void *parent_priv,
1157				  struct netlink_ext_ack *extack)
1158{
1159	struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate);
1160	struct ice_sched_node *tc_node, *node, *parent_node;
1161	u16 num_nodes_added;
1162	u32 first_node_teid;
1163	u32 node_teid;
1164	int status;
1165
1166	tc_node = pi->root->children[0];
1167	node = priv;
1168
1169	if (!extack)
1170		return 0;
1171
1172	if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink)))
1173		return -EBUSY;
1174
1175	if (!parent) {
1176		if (!node || tc_node == node || node->num_children)
1177			return -EINVAL;
1178
1179		mutex_lock(&pi->sched_lock);
1180		ice_free_sched_node(pi, node);
1181		mutex_unlock(&pi->sched_lock);
1182
1183		return 0;
1184	}
1185
1186	parent_node = parent_priv;
1187
1188	/* if the node doesn't exist, create it */
1189	if (!node->parent) {
1190		mutex_lock(&pi->sched_lock);
1191		status = ice_sched_add_elems(pi, tc_node, parent_node,
1192					     parent_node->tx_sched_layer + 1,
1193					     1, &num_nodes_added, &first_node_teid,
1194					     &node);
1195		mutex_unlock(&pi->sched_lock);
1196
1197		if (status) {
1198			NL_SET_ERR_MSG_MOD(extack, "Can't add a new node");
1199			return status;
1200		}
1201
1202		if (devlink_rate->tx_share)
1203			ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack);
1204		if (devlink_rate->tx_max)
1205			ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack);
1206		if (devlink_rate->tx_priority)
1207			ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack);
1208		if (devlink_rate->tx_weight)
1209			ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack);
1210	} else {
1211		node_teid = le32_to_cpu(node->info.node_teid);
1212		mutex_lock(&pi->sched_lock);
1213		status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid);
1214		mutex_unlock(&pi->sched_lock);
1215
1216		if (status)
1217			NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent");
1218	}
1219
1220	return status;
1221}
1222
1223/**
1224 * ice_devlink_reload_up - do reload up after reinit
1225 * @devlink: pointer to the devlink instance reloading
1226 * @action: the action requested
1227 * @limit: limits imposed by userspace, such as not resetting
1228 * @actions_performed: on return, indicate what actions actually performed
1229 * @extack: netlink extended ACK structure
1230 */
1231static int
1232ice_devlink_reload_up(struct devlink *devlink,
1233		      enum devlink_reload_action action,
1234		      enum devlink_reload_limit limit,
1235		      u32 *actions_performed,
1236		      struct netlink_ext_ack *extack)
1237{
1238	struct ice_pf *pf = devlink_priv(devlink);
1239
1240	switch (action) {
1241	case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
1242		*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
1243		return ice_load(pf);
1244	case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
1245		*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
1246		return ice_devlink_reload_empr_finish(pf, extack);
1247	default:
1248		WARN_ON(1);
1249		return -EOPNOTSUPP;
1250	}
1251}
1252
1253static const struct devlink_ops ice_devlink_ops = {
1254	.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
1255	.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1256			  BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
1257	.reload_down = ice_devlink_reload_down,
1258	.reload_up = ice_devlink_reload_up,
1259	.eswitch_mode_get = ice_eswitch_mode_get,
1260	.eswitch_mode_set = ice_eswitch_mode_set,
1261	.info_get = ice_devlink_info_get,
1262	.flash_update = ice_devlink_flash_update,
1263
1264	.rate_node_new = ice_devlink_rate_node_new,
1265	.rate_node_del = ice_devlink_rate_node_del,
1266
1267	.rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set,
1268	.rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set,
1269	.rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set,
1270	.rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set,
1271
1272	.rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set,
1273	.rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set,
1274	.rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set,
1275	.rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set,
1276
1277	.rate_leaf_parent_set = ice_devlink_set_parent,
1278	.rate_node_parent_set = ice_devlink_set_parent,
1279};
1280
1281static int
1282ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
1283			    struct devlink_param_gset_ctx *ctx)
1284{
1285	struct ice_pf *pf = devlink_priv(devlink);
1286
1287	ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
1288
1289	return 0;
1290}
1291
1292static int
1293ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
1294			    struct devlink_param_gset_ctx *ctx)
1295{
1296	struct ice_pf *pf = devlink_priv(devlink);
1297	bool roce_ena = ctx->val.vbool;
1298	int ret;
1299
1300	if (!roce_ena) {
1301		ice_unplug_aux_dev(pf);
1302		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
1303		return 0;
1304	}
1305
1306	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
1307	ret = ice_plug_aux_dev(pf);
1308	if (ret)
1309		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
1310
1311	return ret;
1312}
1313
1314static int
1315ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
1316				 union devlink_param_value val,
1317				 struct netlink_ext_ack *extack)
1318{
1319	struct ice_pf *pf = devlink_priv(devlink);
1320
1321	if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
1322		return -EOPNOTSUPP;
1323
1324	if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
1325		NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
1326		return -EOPNOTSUPP;
1327	}
1328
1329	return 0;
1330}
1331
1332static int
1333ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
1334			  struct devlink_param_gset_ctx *ctx)
1335{
1336	struct ice_pf *pf = devlink_priv(devlink);
1337
1338	ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
1339
1340	return 0;
1341}
1342
1343static int
1344ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
1345			  struct devlink_param_gset_ctx *ctx)
1346{
1347	struct ice_pf *pf = devlink_priv(devlink);
1348	bool iw_ena = ctx->val.vbool;
1349	int ret;
1350
1351	if (!iw_ena) {
1352		ice_unplug_aux_dev(pf);
1353		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
1354		return 0;
1355	}
1356
1357	pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
1358	ret = ice_plug_aux_dev(pf);
1359	if (ret)
1360		pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
1361
1362	return ret;
1363}
1364
1365static int
1366ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
1367			       union devlink_param_value val,
1368			       struct netlink_ext_ack *extack)
1369{
1370	struct ice_pf *pf = devlink_priv(devlink);
1371
1372	if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
1373		return -EOPNOTSUPP;
1374
1375	if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
1376		NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
1377		return -EOPNOTSUPP;
1378	}
1379
1380	return 0;
1381}
1382
1383static const struct devlink_param ice_devlink_params[] = {
1384	DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1385			      ice_devlink_enable_roce_get,
1386			      ice_devlink_enable_roce_set,
1387			      ice_devlink_enable_roce_validate),
1388	DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1389			      ice_devlink_enable_iw_get,
1390			      ice_devlink_enable_iw_set,
1391			      ice_devlink_enable_iw_validate),
1392
1393};
1394
1395static void ice_devlink_free(void *devlink_ptr)
1396{
1397	devlink_free((struct devlink *)devlink_ptr);
1398}
1399
1400/**
1401 * ice_allocate_pf - Allocate devlink and return PF structure pointer
1402 * @dev: the device to allocate for
1403 *
1404 * Allocate a devlink instance for this device and return the private area as
1405 * the PF structure. The devlink memory is kept track of through devres by
1406 * adding an action to remove it when unwinding.
1407 */
1408struct ice_pf *ice_allocate_pf(struct device *dev)
1409{
1410	struct devlink *devlink;
1411
1412	devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev);
1413	if (!devlink)
1414		return NULL;
1415
1416	/* Add an action to teardown the devlink when unwinding the driver */
1417	if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
1418		return NULL;
1419
1420	return devlink_priv(devlink);
1421}
1422
1423/**
1424 * ice_devlink_register - Register devlink interface for this PF
1425 * @pf: the PF to register the devlink for.
1426 *
1427 * Register the devlink instance associated with this physical function.
1428 *
1429 * Return: zero on success or an error code on failure.
1430 */
1431void ice_devlink_register(struct ice_pf *pf)
1432{
1433	struct devlink *devlink = priv_to_devlink(pf);
1434
1435	devlink_register(devlink);
1436}
1437
1438/**
1439 * ice_devlink_unregister - Unregister devlink resources for this PF.
1440 * @pf: the PF structure to cleanup
1441 *
1442 * Releases resources used by devlink and cleans up associated memory.
1443 */
1444void ice_devlink_unregister(struct ice_pf *pf)
1445{
1446	devlink_unregister(priv_to_devlink(pf));
1447}
1448
1449/**
1450 * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
1451 * @pf: the PF to create a devlink port for
1452 * @ppid: struct with switch id information
1453 */
1454static void
1455ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
1456{
1457	struct pci_dev *pdev = pf->pdev;
1458	u64 id;
1459
1460	id = pci_get_dsn(pdev);
1461
1462	ppid->id_len = sizeof(id);
1463	put_unaligned_be64(id, &ppid->id);
1464}
1465
1466int ice_devlink_register_params(struct ice_pf *pf)
1467{
1468	struct devlink *devlink = priv_to_devlink(pf);
1469
1470	return devlink_params_register(devlink, ice_devlink_params,
1471				       ARRAY_SIZE(ice_devlink_params));
1472}
1473
1474void ice_devlink_unregister_params(struct ice_pf *pf)
1475{
1476	devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
1477				  ARRAY_SIZE(ice_devlink_params));
1478}
1479
1480/**
1481 * ice_devlink_set_port_split_options - Set port split options
1482 * @pf: the PF to set port split options
1483 * @attrs: devlink attributes
1484 *
1485 * Sets devlink port split options based on available FW port options
1486 */
1487static void
1488ice_devlink_set_port_split_options(struct ice_pf *pf,
1489				   struct devlink_port_attrs *attrs)
1490{
1491	struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
1492	u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
1493	bool active_valid, pending_valid;
1494	int status;
1495
1496	status = ice_aq_get_port_options(&pf->hw, options, &option_count,
1497					 0, true, &active_idx, &active_valid,
1498					 &pending_idx, &pending_valid);
1499	if (status) {
1500		dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
1501			status);
1502		return;
1503	}
1504
1505	/* find the biggest available port split count */
1506	for (i = 0; i < option_count; i++)
1507		attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
1508
1509	attrs->splittable = attrs->lanes ? 1 : 0;
1510	ice_active_port_option = active_idx;
1511}
1512
1513static const struct devlink_port_ops ice_devlink_port_ops = {
1514	.port_split = ice_devlink_port_split,
1515	.port_unsplit = ice_devlink_port_unsplit,
1516};
1517
1518/**
1519 * ice_devlink_create_pf_port - Create a devlink port for this PF
1520 * @pf: the PF to create a devlink port for
1521 *
1522 * Create and register a devlink_port for this PF.
1523 *
1524 * Return: zero on success or an error code on failure.
1525 */
1526int ice_devlink_create_pf_port(struct ice_pf *pf)
1527{
1528	struct devlink_port_attrs attrs = {};
1529	struct devlink_port *devlink_port;
1530	struct devlink *devlink;
1531	struct ice_vsi *vsi;
1532	struct device *dev;
1533	int err;
1534
1535	dev = ice_pf_to_dev(pf);
1536
1537	devlink_port = &pf->devlink_port;
1538
1539	vsi = ice_get_main_vsi(pf);
1540	if (!vsi)
1541		return -EIO;
1542
1543	attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
1544	attrs.phys.port_number = pf->hw.bus.func;
1545
1546	/* As FW supports only port split options for whole device,
1547	 * set port split options only for first PF.
1548	 */
1549	if (pf->hw.pf_id == 0)
1550		ice_devlink_set_port_split_options(pf, &attrs);
1551
1552	ice_devlink_set_switch_id(pf, &attrs.switch_id);
1553
1554	devlink_port_attrs_set(devlink_port, &attrs);
1555	devlink = priv_to_devlink(pf);
1556
1557	err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx,
1558					     &ice_devlink_port_ops);
1559	if (err) {
1560		dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
1561			pf->hw.pf_id, err);
1562		return err;
1563	}
1564
1565	return 0;
1566}
1567
1568/**
1569 * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
1570 * @pf: the PF to cleanup
1571 *
1572 * Unregisters the devlink_port structure associated with this PF.
1573 */
1574void ice_devlink_destroy_pf_port(struct ice_pf *pf)
1575{
1576	devlink_port_unregister(&pf->devlink_port);
1577}
1578
1579/**
1580 * ice_devlink_create_vf_port - Create a devlink port for this VF
1581 * @vf: the VF to create a port for
1582 *
1583 * Create and register a devlink_port for this VF.
1584 *
1585 * Return: zero on success or an error code on failure.
1586 */
1587int ice_devlink_create_vf_port(struct ice_vf *vf)
1588{
1589	struct devlink_port_attrs attrs = {};
1590	struct devlink_port *devlink_port;
1591	struct devlink *devlink;
1592	struct ice_vsi *vsi;
1593	struct device *dev;
1594	struct ice_pf *pf;
1595	int err;
1596
1597	pf = vf->pf;
1598	dev = ice_pf_to_dev(pf);
1599	devlink_port = &vf->devlink_port;
1600
1601	vsi = ice_get_vf_vsi(vf);
1602	if (!vsi)
1603		return -EINVAL;
1604
1605	attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
1606	attrs.pci_vf.pf = pf->hw.bus.func;
1607	attrs.pci_vf.vf = vf->vf_id;
1608
1609	ice_devlink_set_switch_id(pf, &attrs.switch_id);
1610
1611	devlink_port_attrs_set(devlink_port, &attrs);
1612	devlink = priv_to_devlink(pf);
1613
1614	err = devlink_port_register(devlink, devlink_port, vsi->idx);
1615	if (err) {
1616		dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
1617			vf->vf_id, err);
1618		return err;
1619	}
1620
1621	return 0;
1622}
1623
1624/**
1625 * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
1626 * @vf: the VF to cleanup
1627 *
1628 * Unregisters the devlink_port structure associated with this VF.
1629 */
1630void ice_devlink_destroy_vf_port(struct ice_vf *vf)
1631{
1632	devl_rate_leaf_destroy(&vf->devlink_port);
1633	devlink_port_unregister(&vf->devlink_port);
1634}
1635
1636#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
1637
1638static const struct devlink_region_ops ice_nvm_region_ops;
1639static const struct devlink_region_ops ice_sram_region_ops;
1640
1641/**
1642 * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
1643 * @devlink: the devlink instance
1644 * @ops: the devlink region to snapshot
1645 * @extack: extended ACK response structure
1646 * @data: on exit points to snapshot data buffer
1647 *
1648 * This function is called in response to a DEVLINK_CMD_REGION_NEW for either
1649 * the nvm-flash or shadow-ram region.
1650 *
1651 * It captures a snapshot of the NVM or Shadow RAM flash contents. This
1652 * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink
1653 * interface.
1654 *
1655 * @returns zero on success, and updates the data pointer. Returns a non-zero
1656 * error code on failure.
1657 */
1658static int ice_devlink_nvm_snapshot(struct devlink *devlink,
1659				    const struct devlink_region_ops *ops,
1660				    struct netlink_ext_ack *extack, u8 **data)
1661{
1662	struct ice_pf *pf = devlink_priv(devlink);
1663	struct device *dev = ice_pf_to_dev(pf);
1664	struct ice_hw *hw = &pf->hw;
1665	bool read_shadow_ram;
1666	u8 *nvm_data, *tmp, i;
1667	u32 nvm_size, left;
1668	s8 num_blks;
1669	int status;
1670
1671	if (ops == &ice_nvm_region_ops) {
1672		read_shadow_ram = false;
1673		nvm_size = hw->flash.flash_size;
1674	} else if (ops == &ice_sram_region_ops) {
1675		read_shadow_ram = true;
1676		nvm_size = hw->flash.sr_words * 2u;
1677	} else {
1678		NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
1679		return -EOPNOTSUPP;
1680	}
1681
1682	nvm_data = vzalloc(nvm_size);
1683	if (!nvm_data)
1684		return -ENOMEM;
1685
1686	num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
1687	tmp = nvm_data;
1688	left = nvm_size;
1689
1690	/* Some systems take longer to read the NVM than others which causes the
1691	 * FW to reclaim the NVM lock before the entire NVM has been read. Fix
1692	 * this by breaking the reads of the NVM into smaller chunks that will
1693	 * probably not take as long. This has some overhead since we are
1694	 * increasing the number of AQ commands, but it should always work
1695	 */
1696	for (i = 0; i < num_blks; i++) {
1697		u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
1698
1699		status = ice_acquire_nvm(hw, ICE_RES_READ);
1700		if (status) {
1701			dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
1702				status, hw->adminq.sq_last_status);
1703			NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
1704			vfree(nvm_data);
1705			return -EIO;
1706		}
1707
1708		status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
1709					   &read_sz, tmp, read_shadow_ram);
1710		if (status) {
1711			dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
1712				read_sz, status, hw->adminq.sq_last_status);
1713			NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
1714			ice_release_nvm(hw);
1715			vfree(nvm_data);
1716			return -EIO;
1717		}
1718		ice_release_nvm(hw);
1719
1720		tmp += read_sz;
1721		left -= read_sz;
1722	}
1723
1724	*data = nvm_data;
1725
1726	return 0;
1727}
1728
1729/**
1730 * ice_devlink_nvm_read - Read a portion of NVM flash contents
1731 * @devlink: the devlink instance
1732 * @ops: the devlink region to snapshot
1733 * @extack: extended ACK response structure
1734 * @offset: the offset to start at
1735 * @size: the amount to read
1736 * @data: the data buffer to read into
1737 *
1738 * This function is called in response to DEVLINK_CMD_REGION_READ to directly
1739 * read a section of the NVM contents.
1740 *
1741 * It reads from either the nvm-flash or shadow-ram region contents.
1742 *
1743 * @returns zero on success, and updates the data pointer. Returns a non-zero
1744 * error code on failure.
1745 */
1746static int ice_devlink_nvm_read(struct devlink *devlink,
1747				const struct devlink_region_ops *ops,
1748				struct netlink_ext_ack *extack,
1749				u64 offset, u32 size, u8 *data)
1750{
1751	struct ice_pf *pf = devlink_priv(devlink);
1752	struct device *dev = ice_pf_to_dev(pf);
1753	struct ice_hw *hw = &pf->hw;
1754	bool read_shadow_ram;
1755	u64 nvm_size;
1756	int status;
1757
1758	if (ops == &ice_nvm_region_ops) {
1759		read_shadow_ram = false;
1760		nvm_size = hw->flash.flash_size;
1761	} else if (ops == &ice_sram_region_ops) {
1762		read_shadow_ram = true;
1763		nvm_size = hw->flash.sr_words * 2u;
1764	} else {
1765		NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
1766		return -EOPNOTSUPP;
1767	}
1768
1769	if (offset + size >= nvm_size) {
1770		NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
1771		return -ERANGE;
1772	}
1773
1774	status = ice_acquire_nvm(hw, ICE_RES_READ);
1775	if (status) {
1776		dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
1777			status, hw->adminq.sq_last_status);
1778		NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
1779		return -EIO;
1780	}
1781
1782	status = ice_read_flat_nvm(hw, (u32)offset, &size, data,
1783				   read_shadow_ram);
1784	if (status) {
1785		dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
1786			size, status, hw->adminq.sq_last_status);
1787		NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
1788		ice_release_nvm(hw);
1789		return -EIO;
1790	}
1791	ice_release_nvm(hw);
1792
1793	return 0;
1794}
1795
1796/**
1797 * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
1798 * @devlink: the devlink instance
1799 * @ops: the devlink region being snapshotted
1800 * @extack: extended ACK response structure
1801 * @data: on exit points to snapshot data buffer
1802 *
1803 * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
1804 * the device-caps devlink region. It captures a snapshot of the device
1805 * capabilities reported by firmware.
1806 *
1807 * @returns zero on success, and updates the data pointer. Returns a non-zero
1808 * error code on failure.
1809 */
1810static int
1811ice_devlink_devcaps_snapshot(struct devlink *devlink,
1812			     const struct devlink_region_ops *ops,
1813			     struct netlink_ext_ack *extack, u8 **data)
1814{
1815	struct ice_pf *pf = devlink_priv(devlink);
1816	struct device *dev = ice_pf_to_dev(pf);
1817	struct ice_hw *hw = &pf->hw;
1818	void *devcaps;
1819	int status;
1820
1821	devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
1822	if (!devcaps)
1823		return -ENOMEM;
1824
1825	status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
1826				  ice_aqc_opc_list_dev_caps, NULL);
1827	if (status) {
1828		dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
1829			status, hw->adminq.sq_last_status);
1830		NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
1831		vfree(devcaps);
1832		return status;
1833	}
1834
1835	*data = (u8 *)devcaps;
1836
1837	return 0;
1838}
1839
1840static const struct devlink_region_ops ice_nvm_region_ops = {
1841	.name = "nvm-flash",
1842	.destructor = vfree,
1843	.snapshot = ice_devlink_nvm_snapshot,
1844	.read = ice_devlink_nvm_read,
1845};
1846
1847static const struct devlink_region_ops ice_sram_region_ops = {
1848	.name = "shadow-ram",
1849	.destructor = vfree,
1850	.snapshot = ice_devlink_nvm_snapshot,
1851	.read = ice_devlink_nvm_read,
1852};
1853
1854static const struct devlink_region_ops ice_devcaps_region_ops = {
1855	.name = "device-caps",
1856	.destructor = vfree,
1857	.snapshot = ice_devlink_devcaps_snapshot,
1858};
1859
1860/**
1861 * ice_devlink_init_regions - Initialize devlink regions
1862 * @pf: the PF device structure
1863 *
1864 * Create devlink regions used to enable access to dump the contents of the
1865 * flash memory on the device.
1866 */
1867void ice_devlink_init_regions(struct ice_pf *pf)
1868{
1869	struct devlink *devlink = priv_to_devlink(pf);
1870	struct device *dev = ice_pf_to_dev(pf);
1871	u64 nvm_size, sram_size;
1872
1873	nvm_size = pf->hw.flash.flash_size;
1874	pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
1875					       nvm_size);
1876	if (IS_ERR(pf->nvm_region)) {
1877		dev_err(dev, "failed to create NVM devlink region, err %ld\n",
1878			PTR_ERR(pf->nvm_region));
1879		pf->nvm_region = NULL;
1880	}
1881
1882	sram_size = pf->hw.flash.sr_words * 2u;
1883	pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
1884						1, sram_size);
1885	if (IS_ERR(pf->sram_region)) {
1886		dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
1887			PTR_ERR(pf->sram_region));
1888		pf->sram_region = NULL;
1889	}
1890
1891	pf->devcaps_region = devlink_region_create(devlink,
1892						   &ice_devcaps_region_ops, 10,
1893						   ICE_AQ_MAX_BUF_LEN);
1894	if (IS_ERR(pf->devcaps_region)) {
1895		dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
1896			PTR_ERR(pf->devcaps_region));
1897		pf->devcaps_region = NULL;
1898	}
1899}
1900
1901/**
1902 * ice_devlink_destroy_regions - Destroy devlink regions
1903 * @pf: the PF device structure
1904 *
1905 * Remove previously created regions for this PF.
1906 */
1907void ice_devlink_destroy_regions(struct ice_pf *pf)
1908{
1909	if (pf->nvm_region)
1910		devlink_region_destroy(pf->nvm_region);
1911
1912	if (pf->sram_region)
1913		devlink_region_destroy(pf->sram_region);
1914
1915	if (pf->devcaps_region)
1916		devlink_region_destroy(pf->devcaps_region);
1917}
1918