1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_vf_lib_private.h"
6#include "ice_base.h"
7#include "ice_lib.h"
8#include "ice_fltr.h"
9#include "ice_dcb_lib.h"
10#include "ice_flow.h"
11#include "ice_eswitch.h"
12#include "ice_virtchnl_allowlist.h"
13#include "ice_flex_pipe.h"
14#include "ice_vf_vsi_vlan_ops.h"
15#include "ice_vlan.h"
16
17/**
18 * ice_free_vf_entries - Free all VF entries from the hash table
19 * @pf: pointer to the PF structure
20 *
21 * Iterate over the VF hash table, removing and releasing all VF entries.
22 * Called during VF teardown or as cleanup during failed VF initialization.
23 */
24static void ice_free_vf_entries(struct ice_pf *pf)
25{
26	struct ice_vfs *vfs = &pf->vfs;
27	struct hlist_node *tmp;
28	struct ice_vf *vf;
29	unsigned int bkt;
30
31	/* Remove all VFs from the hash table and release their main
32	 * reference. Once all references to the VF are dropped, ice_put_vf()
33	 * will call ice_release_vf which will remove the VF memory.
34	 */
35	lockdep_assert_held(&vfs->table_lock);
36
37	hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
38		hash_del_rcu(&vf->entry);
39		ice_put_vf(vf);
40	}
41}
42
43/**
44 * ice_free_vf_res - Free a VF's resources
45 * @vf: pointer to the VF info
46 */
47static void ice_free_vf_res(struct ice_vf *vf)
48{
49	struct ice_pf *pf = vf->pf;
50	int i, last_vector_idx;
51
52	/* First, disable VF's configuration API to prevent OS from
53	 * accessing the VF's VSI after it's freed or invalidated.
54	 */
55	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
56	ice_vf_fdir_exit(vf);
57	/* free VF control VSI */
58	if (vf->ctrl_vsi_idx != ICE_NO_VSI)
59		ice_vf_ctrl_vsi_release(vf);
60
61	/* free VSI and disconnect it from the parent uplink */
62	if (vf->lan_vsi_idx != ICE_NO_VSI) {
63		ice_vf_vsi_release(vf);
64		vf->num_mac = 0;
65	}
66
67	last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
68
69	/* clear VF MDD event information */
70	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
71	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
72
73	/* Disable interrupts so that VF starts in a known state */
74	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
75		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
76		ice_flush(&pf->hw);
77	}
78	/* reset some of the state variables keeping track of the resources */
79	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
80	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
81}
82
83/**
84 * ice_dis_vf_mappings
85 * @vf: pointer to the VF structure
86 */
87static void ice_dis_vf_mappings(struct ice_vf *vf)
88{
89	struct ice_pf *pf = vf->pf;
90	struct ice_vsi *vsi;
91	struct device *dev;
92	int first, last, v;
93	struct ice_hw *hw;
94
95	hw = &pf->hw;
96	vsi = ice_get_vf_vsi(vf);
97	if (WARN_ON(!vsi))
98		return;
99
100	dev = ice_pf_to_dev(pf);
101	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
102	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
103
104	first = vf->first_vector_idx;
105	last = first + pf->vfs.num_msix_per - 1;
106	for (v = first; v <= last; v++) {
107		u32 reg;
108
109		reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
110			GLINT_VECT2FUNC_IS_PF_M) |
111		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
112			GLINT_VECT2FUNC_PF_NUM_M));
113		wr32(hw, GLINT_VECT2FUNC(v), reg);
114	}
115
116	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
117		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
118	else
119		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
120
121	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
122		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
123	else
124		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
125}
126
127/**
128 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
129 * @pf: pointer to the PF structure
130 *
131 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
132 * the pf->sriov_base_vector.
133 *
134 * Returns 0 on success, and -EINVAL on error.
135 */
136static int ice_sriov_free_msix_res(struct ice_pf *pf)
137{
138	if (!pf)
139		return -EINVAL;
140
141	pf->sriov_base_vector = 0;
142
143	return 0;
144}
145
146/**
147 * ice_free_vfs - Free all VFs
148 * @pf: pointer to the PF structure
149 */
150void ice_free_vfs(struct ice_pf *pf)
151{
152	struct device *dev = ice_pf_to_dev(pf);
153	struct ice_vfs *vfs = &pf->vfs;
154	struct ice_hw *hw = &pf->hw;
155	struct ice_vf *vf;
156	unsigned int bkt;
157
158	if (!ice_has_vfs(pf))
159		return;
160
161	while (test_and_set_bit(ICE_VF_DIS, pf->state))
162		usleep_range(1000, 2000);
163
164	/* Disable IOV before freeing resources. This lets any VF drivers
165	 * running in the host get themselves cleaned up before we yank
166	 * the carpet out from underneath their feet.
167	 */
168	if (!pci_vfs_assigned(pf->pdev))
169		pci_disable_sriov(pf->pdev);
170	else
171		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
172
173	mutex_lock(&vfs->table_lock);
174
175	ice_eswitch_release(pf);
176
177	ice_for_each_vf(pf, bkt, vf) {
178		mutex_lock(&vf->cfg_lock);
179
180		ice_dis_vf_qs(vf);
181
182		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
183			/* disable VF qp mappings and set VF disable state */
184			ice_dis_vf_mappings(vf);
185			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
186			ice_free_vf_res(vf);
187		}
188
189		if (!pci_vfs_assigned(pf->pdev)) {
190			u32 reg_idx, bit_idx;
191
192			reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
193			bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
194			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
195		}
196
197		/* clear malicious info since the VF is getting released */
198		list_del(&vf->mbx_info.list_entry);
199
200		mutex_unlock(&vf->cfg_lock);
201	}
202
203	if (ice_sriov_free_msix_res(pf))
204		dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
205
206	vfs->num_qps_per = 0;
207	ice_free_vf_entries(pf);
208
209	mutex_unlock(&vfs->table_lock);
210
211	clear_bit(ICE_VF_DIS, pf->state);
212	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
213}
214
215/**
216 * ice_vf_vsi_setup - Set up a VF VSI
217 * @vf: VF to setup VSI for
218 *
219 * Returns pointer to the successfully allocated VSI struct on success,
220 * otherwise returns NULL on failure.
221 */
222static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
223{
224	struct ice_vsi_cfg_params params = {};
225	struct ice_pf *pf = vf->pf;
226	struct ice_vsi *vsi;
227
228	params.type = ICE_VSI_VF;
229	params.pi = ice_vf_get_port_info(vf);
230	params.vf = vf;
231	params.flags = ICE_VSI_FLAG_INIT;
232
233	vsi = ice_vsi_setup(pf, &params);
234
235	if (!vsi) {
236		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
237		ice_vf_invalidate_vsi(vf);
238		return NULL;
239	}
240
241	vf->lan_vsi_idx = vsi->idx;
242	vf->lan_vsi_num = vsi->vsi_num;
243
244	return vsi;
245}
246
247/**
248 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
249 * @pf: pointer to PF structure
250 * @vf: pointer to VF that the first MSIX vector index is being calculated for
251 *
252 * This returns the first MSIX vector index in PF space that is used by this VF.
253 * This index is used when accessing PF relative registers such as
254 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
255 * This will always be the OICR index in the AVF driver so any functionality
256 * using vf->first_vector_idx for queue configuration will have to increment by
257 * 1 to avoid meddling with the OICR index.
258 */
259static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
260{
261	return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
262}
263
264/**
265 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
266 * @vf: VF to enable MSIX mappings for
267 *
268 * Some of the registers need to be indexed/configured using hardware global
269 * device values and other registers need 0-based values, which represent PF
270 * based values.
271 */
272static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
273{
274	int device_based_first_msix, device_based_last_msix;
275	int pf_based_first_msix, pf_based_last_msix, v;
276	struct ice_pf *pf = vf->pf;
277	int device_based_vf_id;
278	struct ice_hw *hw;
279	u32 reg;
280
281	hw = &pf->hw;
282	pf_based_first_msix = vf->first_vector_idx;
283	pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
284
285	device_based_first_msix = pf_based_first_msix +
286		pf->hw.func_caps.common_cap.msix_vector_first_id;
287	device_based_last_msix =
288		(device_based_first_msix + pf->vfs.num_msix_per) - 1;
289	device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
290
291	reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
292		VPINT_ALLOC_FIRST_M) |
293	       ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
294		VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
295	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
296
297	reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
298		 & VPINT_ALLOC_PCI_FIRST_M) |
299	       ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
300		VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
301	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
302
303	/* map the interrupts to its functions */
304	for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
305		reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
306			GLINT_VECT2FUNC_VF_NUM_M) |
307		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
308			GLINT_VECT2FUNC_PF_NUM_M));
309		wr32(hw, GLINT_VECT2FUNC(v), reg);
310	}
311
312	/* Map mailbox interrupt to VF MSI-X vector 0 */
313	wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
314}
315
316/**
317 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
318 * @vf: VF to enable the mappings for
319 * @max_txq: max Tx queues allowed on the VF's VSI
320 * @max_rxq: max Rx queues allowed on the VF's VSI
321 */
322static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
323{
324	struct device *dev = ice_pf_to_dev(vf->pf);
325	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
326	struct ice_hw *hw = &vf->pf->hw;
327	u32 reg;
328
329	if (WARN_ON(!vsi))
330		return;
331
332	/* set regardless of mapping mode */
333	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
334
335	/* VF Tx queues allocation */
336	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
337		/* set the VF PF Tx queue range
338		 * VFNUMQ value should be set to (number of queues - 1). A value
339		 * of 0 means 1 queue and a value of 255 means 256 queues
340		 */
341		reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
342			VPLAN_TX_QBASE_VFFIRSTQ_M) |
343		       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
344			VPLAN_TX_QBASE_VFNUMQ_M));
345		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
346	} else {
347		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
348	}
349
350	/* set regardless of mapping mode */
351	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
352
353	/* VF Rx queues allocation */
354	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
355		/* set the VF PF Rx queue range
356		 * VFNUMQ value should be set to (number of queues - 1). A value
357		 * of 0 means 1 queue and a value of 255 means 256 queues
358		 */
359		reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
360			VPLAN_RX_QBASE_VFFIRSTQ_M) |
361		       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
362			VPLAN_RX_QBASE_VFNUMQ_M));
363		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
364	} else {
365		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
366	}
367}
368
369/**
370 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
371 * @vf: pointer to the VF structure
372 */
373static void ice_ena_vf_mappings(struct ice_vf *vf)
374{
375	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
376
377	if (WARN_ON(!vsi))
378		return;
379
380	ice_ena_vf_msix_mappings(vf);
381	ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
382}
383
384/**
385 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
386 * @vf: VF to calculate the register index for
387 * @q_vector: a q_vector associated to the VF
388 */
389int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
390{
391	struct ice_pf *pf;
392
393	if (!vf || !q_vector)
394		return -EINVAL;
395
396	pf = vf->pf;
397
398	/* always add one to account for the OICR being the first MSIX */
399	return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
400		q_vector->v_idx + 1;
401}
402
403/**
404 * ice_sriov_set_msix_res - Set any used MSIX resources
405 * @pf: pointer to PF structure
406 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
407 *
408 * This function allows SR-IOV resources to be taken from the end of the PF's
409 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
410 * just set the pf->sriov_base_vector and return success.
411 *
412 * If there are not enough resources available, return an error. This should
413 * always be caught by ice_set_per_vf_res().
414 *
415 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
416 * in the PF's space available for SR-IOV.
417 */
418static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
419{
420	u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
421	int vectors_used = ice_get_max_used_msix_vector(pf);
422	int sriov_base_vector;
423
424	sriov_base_vector = total_vectors - num_msix_needed;
425
426	/* make sure we only grab irq_tracker entries from the list end and
427	 * that we have enough available MSIX vectors
428	 */
429	if (sriov_base_vector < vectors_used)
430		return -EINVAL;
431
432	pf->sriov_base_vector = sriov_base_vector;
433
434	return 0;
435}
436
437/**
438 * ice_set_per_vf_res - check if vectors and queues are available
439 * @pf: pointer to the PF structure
440 * @num_vfs: the number of SR-IOV VFs being configured
441 *
442 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
443 * get more vectors and can enable more queues per VF. Note that this does not
444 * grab any vectors from the SW pool already allocated. Also note, that all
445 * vector counts include one for each VF's miscellaneous interrupt vector
446 * (i.e. OICR).
447 *
448 * Minimum VFs - 2 vectors, 1 queue pair
449 * Small VFs - 5 vectors, 4 queue pairs
450 * Medium VFs - 17 vectors, 16 queue pairs
451 *
452 * Second, determine number of queue pairs per VF by starting with a pre-defined
453 * maximum each VF supports. If this is not possible, then we adjust based on
454 * queue pairs available on the device.
455 *
456 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
457 * by each VF during VF initialization and reset.
458 */
459static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
460{
461	int vectors_used = ice_get_max_used_msix_vector(pf);
462	u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
463	int msix_avail_per_vf, msix_avail_for_sriov;
464	struct device *dev = ice_pf_to_dev(pf);
465	int err;
466
467	lockdep_assert_held(&pf->vfs.table_lock);
468
469	if (!num_vfs)
470		return -EINVAL;
471
472	/* determine MSI-X resources per VF */
473	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
474		vectors_used;
475	msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
476	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
477		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
478	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
479		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
480	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
481		num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
482	} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
483		num_msix_per_vf = ICE_MIN_INTR_PER_VF;
484	} else {
485		dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
486			msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
487			num_vfs);
488		return -ENOSPC;
489	}
490
491	num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
492			ICE_MAX_RSS_QS_PER_VF);
493	avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
494	if (!avail_qs)
495		num_txq = 0;
496	else if (num_txq > avail_qs)
497		num_txq = rounddown_pow_of_two(avail_qs);
498
499	num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
500			ICE_MAX_RSS_QS_PER_VF);
501	avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
502	if (!avail_qs)
503		num_rxq = 0;
504	else if (num_rxq > avail_qs)
505		num_rxq = rounddown_pow_of_two(avail_qs);
506
507	if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
508		dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
509			ICE_MIN_QS_PER_VF, num_vfs);
510		return -ENOSPC;
511	}
512
513	err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
514	if (err) {
515		dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
516			num_vfs, err);
517		return err;
518	}
519
520	/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
521	pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
522	pf->vfs.num_msix_per = num_msix_per_vf;
523	dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
524		 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
525
526	return 0;
527}
528
529/**
530 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
531 * @vf: VF to initialize/setup the VSI for
532 *
533 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
534 * VF VSI's broadcast filter and is only used during initial VF creation.
535 */
536static int ice_init_vf_vsi_res(struct ice_vf *vf)
537{
538	struct ice_pf *pf = vf->pf;
539	struct ice_vsi *vsi;
540	int err;
541
542	vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
543
544	vsi = ice_vf_vsi_setup(vf);
545	if (!vsi)
546		return -ENOMEM;
547
548	err = ice_vf_init_host_cfg(vf, vsi);
549	if (err)
550		goto release_vsi;
551
552	return 0;
553
554release_vsi:
555	ice_vf_vsi_release(vf);
556	return err;
557}
558
559/**
560 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
561 * @pf: PF the VFs are associated with
562 */
563static int ice_start_vfs(struct ice_pf *pf)
564{
565	struct ice_hw *hw = &pf->hw;
566	unsigned int bkt, it_cnt;
567	struct ice_vf *vf;
568	int retval;
569
570	lockdep_assert_held(&pf->vfs.table_lock);
571
572	it_cnt = 0;
573	ice_for_each_vf(pf, bkt, vf) {
574		vf->vf_ops->clear_reset_trigger(vf);
575
576		retval = ice_init_vf_vsi_res(vf);
577		if (retval) {
578			dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
579				vf->vf_id, retval);
580			goto teardown;
581		}
582
583		set_bit(ICE_VF_STATE_INIT, vf->vf_states);
584		ice_ena_vf_mappings(vf);
585		wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
586		it_cnt++;
587	}
588
589	ice_flush(hw);
590	return 0;
591
592teardown:
593	ice_for_each_vf(pf, bkt, vf) {
594		if (it_cnt == 0)
595			break;
596
597		ice_dis_vf_mappings(vf);
598		ice_vf_vsi_release(vf);
599		it_cnt--;
600	}
601
602	return retval;
603}
604
605/**
606 * ice_sriov_free_vf - Free VF memory after all references are dropped
607 * @vf: pointer to VF to free
608 *
609 * Called by ice_put_vf through ice_release_vf once the last reference to a VF
610 * structure has been dropped.
611 */
612static void ice_sriov_free_vf(struct ice_vf *vf)
613{
614	mutex_destroy(&vf->cfg_lock);
615
616	kfree_rcu(vf, rcu);
617}
618
619/**
620 * ice_sriov_clear_reset_state - clears VF Reset status register
621 * @vf: the vf to configure
622 */
623static void ice_sriov_clear_reset_state(struct ice_vf *vf)
624{
625	struct ice_hw *hw = &vf->pf->hw;
626
627	/* Clear the reset status register so that VF immediately sees that
628	 * the device is resetting, even if hardware hasn't yet gotten around
629	 * to clearing VFGEN_RSTAT for us.
630	 */
631	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
632}
633
634/**
635 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
636 * @vf: the vf to configure
637 */
638static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
639{
640	struct ice_pf *pf = vf->pf;
641
642	wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
643	wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
644}
645
646/**
647 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
648 * @vf: pointer to VF structure
649 * @is_vflr: true if reset occurred due to VFLR
650 *
651 * Trigger and cleanup after a VF reset for a SR-IOV VF.
652 */
653static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
654{
655	struct ice_pf *pf = vf->pf;
656	u32 reg, reg_idx, bit_idx;
657	unsigned int vf_abs_id, i;
658	struct device *dev;
659	struct ice_hw *hw;
660
661	dev = ice_pf_to_dev(pf);
662	hw = &pf->hw;
663	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
664
665	/* In the case of a VFLR, HW has already reset the VF and we just need
666	 * to clean up. Otherwise we must first trigger the reset using the
667	 * VFRTRIG register.
668	 */
669	if (!is_vflr) {
670		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
671		reg |= VPGEN_VFRTRIG_VFSWR_M;
672		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
673	}
674
675	/* clear the VFLR bit in GLGEN_VFLRSTAT */
676	reg_idx = (vf_abs_id) / 32;
677	bit_idx = (vf_abs_id) % 32;
678	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
679	ice_flush(hw);
680
681	wr32(hw, PF_PCI_CIAA,
682	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
683	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
684		reg = rd32(hw, PF_PCI_CIAD);
685		/* no transactions pending so stop polling */
686		if ((reg & VF_TRANS_PENDING_M) == 0)
687			break;
688
689		dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
690		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
691	}
692}
693
694/**
695 * ice_sriov_poll_reset_status - poll SRIOV VF reset status
696 * @vf: pointer to VF structure
697 *
698 * Returns true when reset is successful, else returns false
699 */
700static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
701{
702	struct ice_pf *pf = vf->pf;
703	unsigned int i;
704	u32 reg;
705
706	for (i = 0; i < 10; i++) {
707		/* VF reset requires driver to first reset the VF and then
708		 * poll the status register to make sure that the reset
709		 * completed successfully.
710		 */
711		reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
712		if (reg & VPGEN_VFRSTAT_VFRD_M)
713			return true;
714
715		/* only sleep if the reset is not done */
716		usleep_range(10, 20);
717	}
718	return false;
719}
720
721/**
722 * ice_sriov_clear_reset_trigger - enable VF to access hardware
723 * @vf: VF to enabled hardware access for
724 */
725static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
726{
727	struct ice_hw *hw = &vf->pf->hw;
728	u32 reg;
729
730	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
731	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
732	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
733	ice_flush(hw);
734}
735
736/**
737 * ice_sriov_create_vsi - Create a new VSI for a VF
738 * @vf: VF to create the VSI for
739 *
740 * This is called by ice_vf_recreate_vsi to create the new VSI after the old
741 * VSI has been released.
742 */
743static int ice_sriov_create_vsi(struct ice_vf *vf)
744{
745	struct ice_vsi *vsi;
746
747	vsi = ice_vf_vsi_setup(vf);
748	if (!vsi)
749		return -ENOMEM;
750
751	return 0;
752}
753
754/**
755 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
756 * @vf: VF to perform tasks on
757 */
758static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
759{
760	ice_ena_vf_mappings(vf);
761	wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
762}
763
764static const struct ice_vf_ops ice_sriov_vf_ops = {
765	.reset_type = ICE_VF_RESET,
766	.free = ice_sriov_free_vf,
767	.clear_reset_state = ice_sriov_clear_reset_state,
768	.clear_mbx_register = ice_sriov_clear_mbx_register,
769	.trigger_reset_register = ice_sriov_trigger_reset_register,
770	.poll_reset_status = ice_sriov_poll_reset_status,
771	.clear_reset_trigger = ice_sriov_clear_reset_trigger,
772	.irq_close = NULL,
773	.create_vsi = ice_sriov_create_vsi,
774	.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
775};
776
777/**
778 * ice_create_vf_entries - Allocate and insert VF entries
779 * @pf: pointer to the PF structure
780 * @num_vfs: the number of VFs to allocate
781 *
782 * Allocate new VF entries and insert them into the hash table. Set some
783 * basic default fields for initializing the new VFs.
784 *
785 * After this function exits, the hash table will have num_vfs entries
786 * inserted.
787 *
788 * Returns 0 on success or an integer error code on failure.
789 */
790static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
791{
792	struct ice_vfs *vfs = &pf->vfs;
793	struct ice_vf *vf;
794	u16 vf_id;
795	int err;
796
797	lockdep_assert_held(&vfs->table_lock);
798
799	for (vf_id = 0; vf_id < num_vfs; vf_id++) {
800		vf = kzalloc(sizeof(*vf), GFP_KERNEL);
801		if (!vf) {
802			err = -ENOMEM;
803			goto err_free_entries;
804		}
805		kref_init(&vf->refcnt);
806
807		vf->pf = pf;
808		vf->vf_id = vf_id;
809
810		/* set sriov vf ops for VFs created during SRIOV flow */
811		vf->vf_ops = &ice_sriov_vf_ops;
812
813		ice_initialize_vf_entry(vf);
814
815		vf->vf_sw_id = pf->first_sw;
816
817		hash_add_rcu(vfs->table, &vf->entry, vf_id);
818	}
819
820	return 0;
821
822err_free_entries:
823	ice_free_vf_entries(pf);
824	return err;
825}
826
827/**
828 * ice_ena_vfs - enable VFs so they are ready to be used
829 * @pf: pointer to the PF structure
830 * @num_vfs: number of VFs to enable
831 */
832static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
833{
834	struct device *dev = ice_pf_to_dev(pf);
835	struct ice_hw *hw = &pf->hw;
836	int ret;
837
838	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
839	wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
840	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
841	set_bit(ICE_OICR_INTR_DIS, pf->state);
842	ice_flush(hw);
843
844	ret = pci_enable_sriov(pf->pdev, num_vfs);
845	if (ret)
846		goto err_unroll_intr;
847
848	mutex_lock(&pf->vfs.table_lock);
849
850	ret = ice_set_per_vf_res(pf, num_vfs);
851	if (ret) {
852		dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
853			num_vfs, ret);
854		goto err_unroll_sriov;
855	}
856
857	ret = ice_create_vf_entries(pf, num_vfs);
858	if (ret) {
859		dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
860			num_vfs);
861		goto err_unroll_sriov;
862	}
863
864	ret = ice_start_vfs(pf);
865	if (ret) {
866		dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
867		ret = -EAGAIN;
868		goto err_unroll_vf_entries;
869	}
870
871	clear_bit(ICE_VF_DIS, pf->state);
872
873	ret = ice_eswitch_configure(pf);
874	if (ret) {
875		dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
876		goto err_unroll_sriov;
877	}
878
879	/* rearm global interrupts */
880	if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
881		ice_irq_dynamic_ena(hw, NULL, NULL);
882
883	mutex_unlock(&pf->vfs.table_lock);
884
885	return 0;
886
887err_unroll_vf_entries:
888	ice_free_vf_entries(pf);
889err_unroll_sriov:
890	mutex_unlock(&pf->vfs.table_lock);
891	pci_disable_sriov(pf->pdev);
892err_unroll_intr:
893	/* rearm interrupts here */
894	ice_irq_dynamic_ena(hw, NULL, NULL);
895	clear_bit(ICE_OICR_INTR_DIS, pf->state);
896	return ret;
897}
898
899/**
900 * ice_pci_sriov_ena - Enable or change number of VFs
901 * @pf: pointer to the PF structure
902 * @num_vfs: number of VFs to allocate
903 *
904 * Returns 0 on success and negative on failure
905 */
906static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
907{
908	struct device *dev = ice_pf_to_dev(pf);
909	int err;
910
911	if (!num_vfs) {
912		ice_free_vfs(pf);
913		return 0;
914	}
915
916	if (num_vfs > pf->vfs.num_supported) {
917		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
918			num_vfs, pf->vfs.num_supported);
919		return -EOPNOTSUPP;
920	}
921
922	dev_info(dev, "Enabling %d VFs\n", num_vfs);
923	err = ice_ena_vfs(pf, num_vfs);
924	if (err) {
925		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
926		return err;
927	}
928
929	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
930	return 0;
931}
932
933/**
934 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
935 * @pf: PF to enabled SR-IOV on
936 */
937static int ice_check_sriov_allowed(struct ice_pf *pf)
938{
939	struct device *dev = ice_pf_to_dev(pf);
940
941	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
942		dev_err(dev, "This device is not capable of SR-IOV\n");
943		return -EOPNOTSUPP;
944	}
945
946	if (ice_is_safe_mode(pf)) {
947		dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
948		return -EOPNOTSUPP;
949	}
950
951	if (!ice_pf_state_is_nominal(pf)) {
952		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
953		return -EBUSY;
954	}
955
956	return 0;
957}
958
959/**
960 * ice_sriov_configure - Enable or change number of VFs via sysfs
961 * @pdev: pointer to a pci_dev structure
962 * @num_vfs: number of VFs to allocate or 0 to free VFs
963 *
964 * This function is called when the user updates the number of VFs in sysfs. On
965 * success return whatever num_vfs was set to by the caller. Return negative on
966 * failure.
967 */
968int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
969{
970	struct ice_pf *pf = pci_get_drvdata(pdev);
971	struct device *dev = ice_pf_to_dev(pf);
972	int err;
973
974	err = ice_check_sriov_allowed(pf);
975	if (err)
976		return err;
977
978	if (!num_vfs) {
979		if (!pci_vfs_assigned(pdev)) {
980			ice_free_vfs(pf);
981			return 0;
982		}
983
984		dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
985		return -EBUSY;
986	}
987
988	err = ice_pci_sriov_ena(pf, num_vfs);
989	if (err)
990		return err;
991
992	return num_vfs;
993}
994
995/**
996 * ice_process_vflr_event - Free VF resources via IRQ calls
997 * @pf: pointer to the PF structure
998 *
999 * called from the VFLR IRQ handler to
1000 * free up VF resources and state variables
1001 */
1002void ice_process_vflr_event(struct ice_pf *pf)
1003{
1004	struct ice_hw *hw = &pf->hw;
1005	struct ice_vf *vf;
1006	unsigned int bkt;
1007	u32 reg;
1008
1009	if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1010	    !ice_has_vfs(pf))
1011		return;
1012
1013	mutex_lock(&pf->vfs.table_lock);
1014	ice_for_each_vf(pf, bkt, vf) {
1015		u32 reg_idx, bit_idx;
1016
1017		reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1018		bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1019		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
1020		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1021		if (reg & BIT(bit_idx))
1022			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1023			ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1024	}
1025	mutex_unlock(&pf->vfs.table_lock);
1026}
1027
1028/**
1029 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1030 * @pf: PF used to index all VFs
1031 * @pfq: queue index relative to the PF's function space
1032 *
1033 * If no VF is found who owns the pfq then return NULL, otherwise return a
1034 * pointer to the VF who owns the pfq
1035 *
1036 * If this function returns non-NULL, it acquires a reference count of the VF
1037 * structure. The caller is responsible for calling ice_put_vf() to drop this
1038 * reference.
1039 */
1040static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1041{
1042	struct ice_vf *vf;
1043	unsigned int bkt;
1044
1045	rcu_read_lock();
1046	ice_for_each_vf_rcu(pf, bkt, vf) {
1047		struct ice_vsi *vsi;
1048		u16 rxq_idx;
1049
1050		vsi = ice_get_vf_vsi(vf);
1051		if (!vsi)
1052			continue;
1053
1054		ice_for_each_rxq(vsi, rxq_idx)
1055			if (vsi->rxq_map[rxq_idx] == pfq) {
1056				struct ice_vf *found;
1057
1058				if (kref_get_unless_zero(&vf->refcnt))
1059					found = vf;
1060				else
1061					found = NULL;
1062				rcu_read_unlock();
1063				return found;
1064			}
1065	}
1066	rcu_read_unlock();
1067
1068	return NULL;
1069}
1070
1071/**
1072 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1073 * @pf: PF used for conversion
1074 * @globalq: global queue index used to convert to PF space queue index
1075 */
1076static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1077{
1078	return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1079}
1080
1081/**
1082 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1083 * @pf: PF that the LAN overflow event happened on
1084 * @event: structure holding the event information for the LAN overflow event
1085 *
1086 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1087 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1088 * reset on the offending VF.
1089 */
1090void
1091ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1092{
1093	u32 gldcb_rtctq, queue;
1094	struct ice_vf *vf;
1095
1096	gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1097	dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1098
1099	/* event returns device global Rx queue number */
1100	queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1101		GLDCB_RTCTQ_RXQNUM_S;
1102
1103	vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1104	if (!vf)
1105		return;
1106
1107	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1108	ice_put_vf(vf);
1109}
1110
1111/**
1112 * ice_set_vf_spoofchk
1113 * @netdev: network interface device structure
1114 * @vf_id: VF identifier
1115 * @ena: flag to enable or disable feature
1116 *
1117 * Enable or disable VF spoof checking
1118 */
1119int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1120{
1121	struct ice_netdev_priv *np = netdev_priv(netdev);
1122	struct ice_pf *pf = np->vsi->back;
1123	struct ice_vsi *vf_vsi;
1124	struct device *dev;
1125	struct ice_vf *vf;
1126	int ret;
1127
1128	dev = ice_pf_to_dev(pf);
1129
1130	vf = ice_get_vf_by_id(pf, vf_id);
1131	if (!vf)
1132		return -EINVAL;
1133
1134	ret = ice_check_vf_ready_for_cfg(vf);
1135	if (ret)
1136		goto out_put_vf;
1137
1138	vf_vsi = ice_get_vf_vsi(vf);
1139	if (!vf_vsi) {
1140		netdev_err(netdev, "VSI %d for VF %d is null\n",
1141			   vf->lan_vsi_idx, vf->vf_id);
1142		ret = -EINVAL;
1143		goto out_put_vf;
1144	}
1145
1146	if (vf_vsi->type != ICE_VSI_VF) {
1147		netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1148			   vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1149		ret = -ENODEV;
1150		goto out_put_vf;
1151	}
1152
1153	if (ena == vf->spoofchk) {
1154		dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1155		ret = 0;
1156		goto out_put_vf;
1157	}
1158
1159	ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1160	if (ret)
1161		dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1162			ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1163	else
1164		vf->spoofchk = ena;
1165
1166out_put_vf:
1167	ice_put_vf(vf);
1168	return ret;
1169}
1170
1171/**
1172 * ice_get_vf_cfg
1173 * @netdev: network interface device structure
1174 * @vf_id: VF identifier
1175 * @ivi: VF configuration structure
1176 *
1177 * return VF configuration
1178 */
1179int
1180ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1181{
1182	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1183	struct ice_vf *vf;
1184	int ret;
1185
1186	vf = ice_get_vf_by_id(pf, vf_id);
1187	if (!vf)
1188		return -EINVAL;
1189
1190	ret = ice_check_vf_ready_for_cfg(vf);
1191	if (ret)
1192		goto out_put_vf;
1193
1194	ivi->vf = vf_id;
1195	ether_addr_copy(ivi->mac, vf->hw_lan_addr);
1196
1197	/* VF configuration for VLAN and applicable QoS */
1198	ivi->vlan = ice_vf_get_port_vlan_id(vf);
1199	ivi->qos = ice_vf_get_port_vlan_prio(vf);
1200	if (ice_vf_is_port_vlan_ena(vf))
1201		ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1202
1203	ivi->trusted = vf->trusted;
1204	ivi->spoofchk = vf->spoofchk;
1205	if (!vf->link_forced)
1206		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1207	else if (vf->link_up)
1208		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1209	else
1210		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1211	ivi->max_tx_rate = vf->max_tx_rate;
1212	ivi->min_tx_rate = vf->min_tx_rate;
1213
1214out_put_vf:
1215	ice_put_vf(vf);
1216	return ret;
1217}
1218
1219/**
1220 * ice_set_vf_mac
1221 * @netdev: network interface device structure
1222 * @vf_id: VF identifier
1223 * @mac: MAC address
1224 *
1225 * program VF MAC address
1226 */
1227int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1228{
1229	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1230	struct ice_vf *vf;
1231	int ret;
1232
1233	if (is_multicast_ether_addr(mac)) {
1234		netdev_err(netdev, "%pM not a valid unicast address\n", mac);
1235		return -EINVAL;
1236	}
1237
1238	vf = ice_get_vf_by_id(pf, vf_id);
1239	if (!vf)
1240		return -EINVAL;
1241
1242	/* nothing left to do, unicast MAC already set */
1243	if (ether_addr_equal(vf->dev_lan_addr, mac) &&
1244	    ether_addr_equal(vf->hw_lan_addr, mac)) {
1245		ret = 0;
1246		goto out_put_vf;
1247	}
1248
1249	ret = ice_check_vf_ready_for_cfg(vf);
1250	if (ret)
1251		goto out_put_vf;
1252
1253	mutex_lock(&vf->cfg_lock);
1254
1255	/* VF is notified of its new MAC via the PF's response to the
1256	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1257	 */
1258	ether_addr_copy(vf->dev_lan_addr, mac);
1259	ether_addr_copy(vf->hw_lan_addr, mac);
1260	if (is_zero_ether_addr(mac)) {
1261		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1262		vf->pf_set_mac = false;
1263		netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1264			    vf->vf_id);
1265	} else {
1266		/* PF will add MAC rule for the VF */
1267		vf->pf_set_mac = true;
1268		netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1269			    mac, vf_id);
1270	}
1271
1272	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1273	mutex_unlock(&vf->cfg_lock);
1274
1275out_put_vf:
1276	ice_put_vf(vf);
1277	return ret;
1278}
1279
1280/**
1281 * ice_set_vf_trust
1282 * @netdev: network interface device structure
1283 * @vf_id: VF identifier
1284 * @trusted: Boolean value to enable/disable trusted VF
1285 *
1286 * Enable or disable a given VF as trusted
1287 */
1288int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1289{
1290	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1291	struct ice_vf *vf;
1292	int ret;
1293
1294	vf = ice_get_vf_by_id(pf, vf_id);
1295	if (!vf)
1296		return -EINVAL;
1297
1298	if (ice_is_eswitch_mode_switchdev(pf)) {
1299		dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1300		return -EOPNOTSUPP;
1301	}
1302
1303	ret = ice_check_vf_ready_for_cfg(vf);
1304	if (ret)
1305		goto out_put_vf;
1306
1307	/* Check if already trusted */
1308	if (trusted == vf->trusted) {
1309		ret = 0;
1310		goto out_put_vf;
1311	}
1312
1313	mutex_lock(&vf->cfg_lock);
1314
1315	vf->trusted = trusted;
1316	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1317	dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1318		 vf_id, trusted ? "" : "un");
1319
1320	mutex_unlock(&vf->cfg_lock);
1321
1322out_put_vf:
1323	ice_put_vf(vf);
1324	return ret;
1325}
1326
1327/**
1328 * ice_set_vf_link_state
1329 * @netdev: network interface device structure
1330 * @vf_id: VF identifier
1331 * @link_state: required link state
1332 *
1333 * Set VF's link state, irrespective of physical link state status
1334 */
1335int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1336{
1337	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1338	struct ice_vf *vf;
1339	int ret;
1340
1341	vf = ice_get_vf_by_id(pf, vf_id);
1342	if (!vf)
1343		return -EINVAL;
1344
1345	ret = ice_check_vf_ready_for_cfg(vf);
1346	if (ret)
1347		goto out_put_vf;
1348
1349	switch (link_state) {
1350	case IFLA_VF_LINK_STATE_AUTO:
1351		vf->link_forced = false;
1352		break;
1353	case IFLA_VF_LINK_STATE_ENABLE:
1354		vf->link_forced = true;
1355		vf->link_up = true;
1356		break;
1357	case IFLA_VF_LINK_STATE_DISABLE:
1358		vf->link_forced = true;
1359		vf->link_up = false;
1360		break;
1361	default:
1362		ret = -EINVAL;
1363		goto out_put_vf;
1364	}
1365
1366	ice_vc_notify_vf_link_state(vf);
1367
1368out_put_vf:
1369	ice_put_vf(vf);
1370	return ret;
1371}
1372
1373/**
1374 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1375 * @pf: PF associated with VFs
1376 */
1377static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1378{
1379	struct ice_vf *vf;
1380	unsigned int bkt;
1381	int rate = 0;
1382
1383	rcu_read_lock();
1384	ice_for_each_vf_rcu(pf, bkt, vf)
1385		rate += vf->min_tx_rate;
1386	rcu_read_unlock();
1387
1388	return rate;
1389}
1390
1391/**
1392 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1393 * @vf: VF trying to configure min_tx_rate
1394 * @min_tx_rate: min Tx rate in Mbps
1395 *
1396 * Check if the min_tx_rate being passed in will cause oversubscription of total
1397 * min_tx_rate based on the current link speed and all other VFs configured
1398 * min_tx_rate
1399 *
1400 * Return true if the passed min_tx_rate would cause oversubscription, else
1401 * return false
1402 */
1403static bool
1404ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1405{
1406	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1407	int all_vfs_min_tx_rate;
1408	int link_speed_mbps;
1409
1410	if (WARN_ON(!vsi))
1411		return false;
1412
1413	link_speed_mbps = ice_get_link_speed_mbps(vsi);
1414	all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1415
1416	/* this VF's previous rate is being overwritten */
1417	all_vfs_min_tx_rate -= vf->min_tx_rate;
1418
1419	if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1420		dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1421			min_tx_rate, vf->vf_id,
1422			all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1423			link_speed_mbps);
1424		return true;
1425	}
1426
1427	return false;
1428}
1429
1430/**
1431 * ice_set_vf_bw - set min/max VF bandwidth
1432 * @netdev: network interface device structure
1433 * @vf_id: VF identifier
1434 * @min_tx_rate: Minimum Tx rate in Mbps
1435 * @max_tx_rate: Maximum Tx rate in Mbps
1436 */
1437int
1438ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1439	      int max_tx_rate)
1440{
1441	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1442	struct ice_vsi *vsi;
1443	struct device *dev;
1444	struct ice_vf *vf;
1445	int ret;
1446
1447	dev = ice_pf_to_dev(pf);
1448
1449	vf = ice_get_vf_by_id(pf, vf_id);
1450	if (!vf)
1451		return -EINVAL;
1452
1453	ret = ice_check_vf_ready_for_cfg(vf);
1454	if (ret)
1455		goto out_put_vf;
1456
1457	vsi = ice_get_vf_vsi(vf);
1458	if (!vsi) {
1459		ret = -EINVAL;
1460		goto out_put_vf;
1461	}
1462
1463	if (min_tx_rate && ice_is_dcb_active(pf)) {
1464		dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1465		ret = -EOPNOTSUPP;
1466		goto out_put_vf;
1467	}
1468
1469	if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1470		ret = -EINVAL;
1471		goto out_put_vf;
1472	}
1473
1474	if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1475		ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1476		if (ret) {
1477			dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1478				vf->vf_id);
1479			goto out_put_vf;
1480		}
1481
1482		vf->min_tx_rate = min_tx_rate;
1483	}
1484
1485	if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1486		ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1487		if (ret) {
1488			dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1489				vf->vf_id);
1490			goto out_put_vf;
1491		}
1492
1493		vf->max_tx_rate = max_tx_rate;
1494	}
1495
1496out_put_vf:
1497	ice_put_vf(vf);
1498	return ret;
1499}
1500
1501/**
1502 * ice_get_vf_stats - populate some stats for the VF
1503 * @netdev: the netdev of the PF
1504 * @vf_id: the host OS identifier (0-255)
1505 * @vf_stats: pointer to the OS memory to be initialized
1506 */
1507int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1508		     struct ifla_vf_stats *vf_stats)
1509{
1510	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1511	struct ice_eth_stats *stats;
1512	struct ice_vsi *vsi;
1513	struct ice_vf *vf;
1514	int ret;
1515
1516	vf = ice_get_vf_by_id(pf, vf_id);
1517	if (!vf)
1518		return -EINVAL;
1519
1520	ret = ice_check_vf_ready_for_cfg(vf);
1521	if (ret)
1522		goto out_put_vf;
1523
1524	vsi = ice_get_vf_vsi(vf);
1525	if (!vsi) {
1526		ret = -EINVAL;
1527		goto out_put_vf;
1528	}
1529
1530	ice_update_eth_stats(vsi);
1531	stats = &vsi->eth_stats;
1532
1533	memset(vf_stats, 0, sizeof(*vf_stats));
1534
1535	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1536		stats->rx_multicast;
1537	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1538		stats->tx_multicast;
1539	vf_stats->rx_bytes   = stats->rx_bytes;
1540	vf_stats->tx_bytes   = stats->tx_bytes;
1541	vf_stats->broadcast  = stats->rx_broadcast;
1542	vf_stats->multicast  = stats->rx_multicast;
1543	vf_stats->rx_dropped = stats->rx_discards;
1544	vf_stats->tx_dropped = stats->tx_discards;
1545
1546out_put_vf:
1547	ice_put_vf(vf);
1548	return ret;
1549}
1550
1551/**
1552 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1553 * @hw: hardware structure used to check the VLAN mode
1554 * @vlan_proto: VLAN TPID being checked
1555 *
1556 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1557 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1558 * Mode (SVM), then only ETH_P_8021Q is supported.
1559 */
1560static bool
1561ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1562{
1563	bool is_supported = false;
1564
1565	switch (vlan_proto) {
1566	case ETH_P_8021Q:
1567		is_supported = true;
1568		break;
1569	case ETH_P_8021AD:
1570		if (ice_is_dvm_ena(hw))
1571			is_supported = true;
1572		break;
1573	}
1574
1575	return is_supported;
1576}
1577
1578/**
1579 * ice_set_vf_port_vlan
1580 * @netdev: network interface device structure
1581 * @vf_id: VF identifier
1582 * @vlan_id: VLAN ID being set
1583 * @qos: priority setting
1584 * @vlan_proto: VLAN protocol
1585 *
1586 * program VF Port VLAN ID and/or QoS
1587 */
1588int
1589ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1590		     __be16 vlan_proto)
1591{
1592	struct ice_pf *pf = ice_netdev_to_pf(netdev);
1593	u16 local_vlan_proto = ntohs(vlan_proto);
1594	struct device *dev;
1595	struct ice_vf *vf;
1596	int ret;
1597
1598	dev = ice_pf_to_dev(pf);
1599
1600	if (vlan_id >= VLAN_N_VID || qos > 7) {
1601		dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1602			vf_id, vlan_id, qos);
1603		return -EINVAL;
1604	}
1605
1606	if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1607		dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1608			local_vlan_proto);
1609		return -EPROTONOSUPPORT;
1610	}
1611
1612	vf = ice_get_vf_by_id(pf, vf_id);
1613	if (!vf)
1614		return -EINVAL;
1615
1616	ret = ice_check_vf_ready_for_cfg(vf);
1617	if (ret)
1618		goto out_put_vf;
1619
1620	if (ice_vf_get_port_vlan_prio(vf) == qos &&
1621	    ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1622	    ice_vf_get_port_vlan_id(vf) == vlan_id) {
1623		/* duplicate request, so just return success */
1624		dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1625			vlan_id, qos, local_vlan_proto);
1626		ret = 0;
1627		goto out_put_vf;
1628	}
1629
1630	mutex_lock(&vf->cfg_lock);
1631
1632	vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1633	if (ice_vf_is_port_vlan_ena(vf))
1634		dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1635			 vlan_id, qos, local_vlan_proto, vf_id);
1636	else
1637		dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1638
1639	ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1640	mutex_unlock(&vf->cfg_lock);
1641
1642out_put_vf:
1643	ice_put_vf(vf);
1644	return ret;
1645}
1646
1647/**
1648 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1649 * @vf: pointer to the VF structure
1650 */
1651void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1652{
1653	struct ice_pf *pf = vf->pf;
1654	struct device *dev;
1655
1656	dev = ice_pf_to_dev(pf);
1657
1658	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1659		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1660		 vf->dev_lan_addr,
1661		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1662			  ? "on" : "off");
1663}
1664
1665/**
1666 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1667 * @pf: pointer to the PF structure
1668 *
1669 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1670 */
1671void ice_print_vfs_mdd_events(struct ice_pf *pf)
1672{
1673	struct device *dev = ice_pf_to_dev(pf);
1674	struct ice_hw *hw = &pf->hw;
1675	struct ice_vf *vf;
1676	unsigned int bkt;
1677
1678	/* check that there are pending MDD events to print */
1679	if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1680		return;
1681
1682	/* VF MDD event logs are rate limited to one second intervals */
1683	if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1684		return;
1685
1686	pf->vfs.last_printed_mdd_jiffies = jiffies;
1687
1688	mutex_lock(&pf->vfs.table_lock);
1689	ice_for_each_vf(pf, bkt, vf) {
1690		/* only print Rx MDD event message if there are new events */
1691		if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1692			vf->mdd_rx_events.last_printed =
1693							vf->mdd_rx_events.count;
1694			ice_print_vf_rx_mdd_event(vf);
1695		}
1696
1697		/* only print Tx MDD event message if there are new events */
1698		if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1699			vf->mdd_tx_events.last_printed =
1700							vf->mdd_tx_events.count;
1701
1702			dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
1703				 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
1704				 vf->dev_lan_addr);
1705		}
1706	}
1707	mutex_unlock(&pf->vfs.table_lock);
1708}
1709
1710/**
1711 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1712 * @pdev: pointer to a pci_dev structure
1713 *
1714 * Called when recovering from a PF FLR to restore interrupt capability to
1715 * the VFs.
1716 */
1717void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
1718{
1719	u16 vf_id;
1720	int pos;
1721
1722	if (!pci_num_vf(pdev))
1723		return;
1724
1725	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1726	if (pos) {
1727		struct pci_dev *vfdev;
1728
1729		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
1730				     &vf_id);
1731		vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
1732		while (vfdev) {
1733			if (vfdev->is_virtfn && vfdev->physfn == pdev)
1734				pci_restore_msi_state(vfdev);
1735			vfdev = pci_get_device(pdev->vendor, vf_id,
1736					       vfdev);
1737		}
1738	}
1739}
1740