1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4#include "ixgbe.h"
5#include "ixgbe_sriov.h"
6
7#ifdef CONFIG_IXGBE_DCB
8/**
9 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
10 * @adapter: board private structure to initialize
11 *
12 * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
13 * will also try to cache the proper offsets if RSS/FCoE are enabled along
14 * with VMDq.
15 *
16 **/
17static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
18{
19#ifdef IXGBE_FCOE
20	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
21#endif /* IXGBE_FCOE */
22	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
23	int i;
24	u16 reg_idx, pool;
25	u8 tcs = adapter->hw_tcs;
26
27	/* verify we have DCB queueing enabled before proceeding */
28	if (tcs <= 1)
29		return false;
30
31	/* verify we have VMDq enabled before proceeding */
32	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
33		return false;
34
35	/* start at VMDq register offset for SR-IOV enabled setups */
36	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
37	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
38		/* If we are greater than indices move to next pool */
39		if ((reg_idx & ~vmdq->mask) >= tcs) {
40			pool++;
41			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
42		}
43		adapter->rx_ring[i]->reg_idx = reg_idx;
44		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
45	}
46
47	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
48	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
49		/* If we are greater than indices move to next pool */
50		if ((reg_idx & ~vmdq->mask) >= tcs)
51			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
52		adapter->tx_ring[i]->reg_idx = reg_idx;
53	}
54
55#ifdef IXGBE_FCOE
56	/* nothing to do if FCoE is disabled */
57	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
58		return true;
59
60	/* The work is already done if the FCoE ring is shared */
61	if (fcoe->offset < tcs)
62		return true;
63
64	/* The FCoE rings exist separately, we need to move their reg_idx */
65	if (fcoe->indices) {
66		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
67		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
68
69		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
70		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
71			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
72			adapter->rx_ring[i]->reg_idx = reg_idx;
73			adapter->rx_ring[i]->netdev = adapter->netdev;
74			reg_idx++;
75		}
76
77		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
78		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
79			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
80			adapter->tx_ring[i]->reg_idx = reg_idx;
81			reg_idx++;
82		}
83	}
84
85#endif /* IXGBE_FCOE */
86	return true;
87}
88
89/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
90static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
91				    unsigned int *tx, unsigned int *rx)
92{
93	struct ixgbe_hw *hw = &adapter->hw;
94	u8 num_tcs = adapter->hw_tcs;
95
96	*tx = 0;
97	*rx = 0;
98
99	switch (hw->mac.type) {
100	case ixgbe_mac_82598EB:
101		/* TxQs/TC: 4	RxQs/TC: 8 */
102		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
103		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
104		break;
105	case ixgbe_mac_82599EB:
106	case ixgbe_mac_X540:
107	case ixgbe_mac_X550:
108	case ixgbe_mac_X550EM_x:
109	case ixgbe_mac_x550em_a:
110		if (num_tcs > 4) {
111			/*
112			 * TCs    : TC0/1 TC2/3 TC4-7
113			 * TxQs/TC:    32    16     8
114			 * RxQs/TC:    16    16    16
115			 */
116			*rx = tc << 4;
117			if (tc < 3)
118				*tx = tc << 5;		/*   0,  32,  64 */
119			else if (tc < 5)
120				*tx = (tc + 2) << 4;	/*  80,  96 */
121			else
122				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
123		} else {
124			/*
125			 * TCs    : TC0 TC1 TC2/3
126			 * TxQs/TC:  64  32    16
127			 * RxQs/TC:  32  32    32
128			 */
129			*rx = tc << 5;
130			if (tc < 2)
131				*tx = tc << 6;		/*  0,  64 */
132			else
133				*tx = (tc + 4) << 4;	/* 96, 112 */
134		}
135	default:
136		break;
137	}
138}
139
140/**
141 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
142 * @adapter: board private structure to initialize
143 *
144 * Cache the descriptor ring offsets for DCB to the assigned rings.
145 *
146 **/
147static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
148{
149	u8 num_tcs = adapter->hw_tcs;
150	unsigned int tx_idx, rx_idx;
151	int tc, offset, rss_i, i;
152
153	/* verify we have DCB queueing enabled before proceeding */
154	if (num_tcs <= 1)
155		return false;
156
157	rss_i = adapter->ring_feature[RING_F_RSS].indices;
158
159	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
160		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
161		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
162			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
163			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
164			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
165			adapter->tx_ring[offset + i]->dcb_tc = tc;
166			adapter->rx_ring[offset + i]->dcb_tc = tc;
167		}
168	}
169
170	return true;
171}
172
173#endif
174/**
175 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
176 * @adapter: board private structure to initialize
177 *
178 * SR-IOV doesn't use any descriptor rings but changes the default if
179 * no other mapping is used.
180 *
181 */
182static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
183{
184#ifdef IXGBE_FCOE
185	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
186#endif /* IXGBE_FCOE */
187	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
188	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
189	u16 reg_idx, pool;
190	int i;
191
192	/* only proceed if VMDq is enabled */
193	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
194		return false;
195
196	/* start at VMDq register offset for SR-IOV enabled setups */
197	pool = 0;
198	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
199	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
200#ifdef IXGBE_FCOE
201		/* Allow first FCoE queue to be mapped as RSS */
202		if (fcoe->offset && (i > fcoe->offset))
203			break;
204#endif
205		/* If we are greater than indices move to next pool */
206		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
207			pool++;
208			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
209		}
210		adapter->rx_ring[i]->reg_idx = reg_idx;
211		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
212	}
213
214#ifdef IXGBE_FCOE
215	/* FCoE uses a linear block of queues so just assigning 1:1 */
216	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
217		adapter->rx_ring[i]->reg_idx = reg_idx;
218		adapter->rx_ring[i]->netdev = adapter->netdev;
219	}
220
221#endif
222	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
223	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
224#ifdef IXGBE_FCOE
225		/* Allow first FCoE queue to be mapped as RSS */
226		if (fcoe->offset && (i > fcoe->offset))
227			break;
228#endif
229		/* If we are greater than indices move to next pool */
230		if ((reg_idx & rss->mask) >= rss->indices)
231			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
232		adapter->tx_ring[i]->reg_idx = reg_idx;
233	}
234
235#ifdef IXGBE_FCOE
236	/* FCoE uses a linear block of queues so just assigning 1:1 */
237	for (; i < adapter->num_tx_queues; i++, reg_idx++)
238		adapter->tx_ring[i]->reg_idx = reg_idx;
239
240#endif
241
242	return true;
243}
244
245/**
246 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
247 * @adapter: board private structure to initialize
248 *
249 * Cache the descriptor ring offsets for RSS to the assigned rings.
250 *
251 **/
252static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
253{
254	int i, reg_idx;
255
256	for (i = 0; i < adapter->num_rx_queues; i++) {
257		adapter->rx_ring[i]->reg_idx = i;
258		adapter->rx_ring[i]->netdev = adapter->netdev;
259	}
260	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
261		adapter->tx_ring[i]->reg_idx = reg_idx;
262	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
263		adapter->xdp_ring[i]->reg_idx = reg_idx;
264
265	return true;
266}
267
268/**
269 * ixgbe_cache_ring_register - Descriptor ring to register mapping
270 * @adapter: board private structure to initialize
271 *
272 * Once we know the feature-set enabled for the device, we'll cache
273 * the register offset the descriptor ring is assigned to.
274 *
275 * Note, the order the various feature calls is important.  It must start with
276 * the "most" features enabled at the same time, then trickle down to the
277 * least amount of features turned on at once.
278 **/
279static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
280{
281	/* start with default case */
282	adapter->rx_ring[0]->reg_idx = 0;
283	adapter->tx_ring[0]->reg_idx = 0;
284
285#ifdef CONFIG_IXGBE_DCB
286	if (ixgbe_cache_ring_dcb_sriov(adapter))
287		return;
288
289	if (ixgbe_cache_ring_dcb(adapter))
290		return;
291
292#endif
293	if (ixgbe_cache_ring_sriov(adapter))
294		return;
295
296	ixgbe_cache_ring_rss(adapter);
297}
298
299static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
300{
301	return adapter->xdp_prog ? nr_cpu_ids : 0;
302}
303
304#define IXGBE_RSS_64Q_MASK	0x3F
305#define IXGBE_RSS_16Q_MASK	0xF
306#define IXGBE_RSS_8Q_MASK	0x7
307#define IXGBE_RSS_4Q_MASK	0x3
308#define IXGBE_RSS_2Q_MASK	0x1
309#define IXGBE_RSS_DISABLED_MASK	0x0
310
311#ifdef CONFIG_IXGBE_DCB
312/**
313 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
314 * @adapter: board private structure to initialize
315 *
316 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
317 * and VM pools where appropriate.  Also assign queues based on DCB
318 * priorities and map accordingly..
319 *
320 **/
321static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
322{
323	int i;
324	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
325	u16 vmdq_m = 0;
326#ifdef IXGBE_FCOE
327	u16 fcoe_i = 0;
328#endif
329	u8 tcs = adapter->hw_tcs;
330
331	/* verify we have DCB queueing enabled before proceeding */
332	if (tcs <= 1)
333		return false;
334
335	/* verify we have VMDq enabled before proceeding */
336	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
337		return false;
338
339	/* limit VMDq instances on the PF by number of Tx queues */
340	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
341
342	/* Add starting offset to total pool count */
343	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
344
345	/* 16 pools w/ 8 TC per pool */
346	if (tcs > 4) {
347		vmdq_i = min_t(u16, vmdq_i, 16);
348		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
349	/* 32 pools w/ 4 TC per pool */
350	} else {
351		vmdq_i = min_t(u16, vmdq_i, 32);
352		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
353	}
354
355#ifdef IXGBE_FCOE
356	/* queues in the remaining pools are available for FCoE */
357	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
358
359#endif
360	/* remove the starting offset from the pool count */
361	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
362
363	/* save features for later use */
364	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
365	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
366
367	/*
368	 * We do not support DCB, VMDq, and RSS all simultaneously
369	 * so we will disable RSS since it is the lowest priority
370	 */
371	adapter->ring_feature[RING_F_RSS].indices = 1;
372	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
373
374	/* disable ATR as it is not supported when VMDq is enabled */
375	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
376
377	adapter->num_rx_pools = vmdq_i;
378	adapter->num_rx_queues_per_pool = tcs;
379
380	adapter->num_tx_queues = vmdq_i * tcs;
381	adapter->num_xdp_queues = 0;
382	adapter->num_rx_queues = vmdq_i * tcs;
383
384#ifdef IXGBE_FCOE
385	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
386		struct ixgbe_ring_feature *fcoe;
387
388		fcoe = &adapter->ring_feature[RING_F_FCOE];
389
390		/* limit ourselves based on feature limits */
391		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
392
393		if (fcoe_i) {
394			/* alloc queues for FCoE separately */
395			fcoe->indices = fcoe_i;
396			fcoe->offset = vmdq_i * tcs;
397
398			/* add queues to adapter */
399			adapter->num_tx_queues += fcoe_i;
400			adapter->num_rx_queues += fcoe_i;
401		} else if (tcs > 1) {
402			/* use queue belonging to FcoE TC */
403			fcoe->indices = 1;
404			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
405		} else {
406			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
407
408			fcoe->indices = 0;
409			fcoe->offset = 0;
410		}
411	}
412
413#endif /* IXGBE_FCOE */
414	/* configure TC to queue mapping */
415	for (i = 0; i < tcs; i++)
416		netdev_set_tc_queue(adapter->netdev, i, 1, i);
417
418	return true;
419}
420
421static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
422{
423	struct net_device *dev = adapter->netdev;
424	struct ixgbe_ring_feature *f;
425	int rss_i, rss_m, i;
426	int tcs;
427
428	/* Map queue offset and counts onto allocated tx queues */
429	tcs = adapter->hw_tcs;
430
431	/* verify we have DCB queueing enabled before proceeding */
432	if (tcs <= 1)
433		return false;
434
435	/* determine the upper limit for our current DCB mode */
436	rss_i = dev->num_tx_queues / tcs;
437	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
438		/* 8 TC w/ 4 queues per TC */
439		rss_i = min_t(u16, rss_i, 4);
440		rss_m = IXGBE_RSS_4Q_MASK;
441	} else if (tcs > 4) {
442		/* 8 TC w/ 8 queues per TC */
443		rss_i = min_t(u16, rss_i, 8);
444		rss_m = IXGBE_RSS_8Q_MASK;
445	} else {
446		/* 4 TC w/ 16 queues per TC */
447		rss_i = min_t(u16, rss_i, 16);
448		rss_m = IXGBE_RSS_16Q_MASK;
449	}
450
451	/* set RSS mask and indices */
452	f = &adapter->ring_feature[RING_F_RSS];
453	rss_i = min_t(int, rss_i, f->limit);
454	f->indices = rss_i;
455	f->mask = rss_m;
456
457	/* disable ATR as it is not supported when multiple TCs are enabled */
458	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
459
460#ifdef IXGBE_FCOE
461	/* FCoE enabled queues require special configuration indexed
462	 * by feature specific indices and offset. Here we map FCoE
463	 * indices onto the DCB queue pairs allowing FCoE to own
464	 * configuration later.
465	 */
466	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
467		u8 tc = ixgbe_fcoe_get_tc(adapter);
468
469		f = &adapter->ring_feature[RING_F_FCOE];
470		f->indices = min_t(u16, rss_i, f->limit);
471		f->offset = rss_i * tc;
472	}
473
474#endif /* IXGBE_FCOE */
475	for (i = 0; i < tcs; i++)
476		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
477
478	adapter->num_tx_queues = rss_i * tcs;
479	adapter->num_xdp_queues = 0;
480	adapter->num_rx_queues = rss_i * tcs;
481
482	return true;
483}
484
485#endif
486/**
487 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
488 * @adapter: board private structure to initialize
489 *
490 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
491 * and VM pools where appropriate.  If RSS is available, then also try and
492 * enable RSS and map accordingly.
493 *
494 **/
495static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
496{
497	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
498	u16 vmdq_m = 0;
499	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
500	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
501#ifdef IXGBE_FCOE
502	u16 fcoe_i = 0;
503#endif
504
505	/* only proceed if SR-IOV is enabled */
506	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
507		return false;
508
509	/* limit l2fwd RSS based on total Tx queue limit */
510	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
511
512	/* Add starting offset to total pool count */
513	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
514
515	/* double check we are limited to maximum pools */
516	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
517
518	/* 64 pool mode with 2 queues per pool */
519	if (vmdq_i > 32) {
520		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
521		rss_m = IXGBE_RSS_2Q_MASK;
522		rss_i = min_t(u16, rss_i, 2);
523	/* 32 pool mode with up to 4 queues per pool */
524	} else {
525		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
526		rss_m = IXGBE_RSS_4Q_MASK;
527		/* We can support 4, 2, or 1 queues */
528		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
529	}
530
531#ifdef IXGBE_FCOE
532	/* queues in the remaining pools are available for FCoE */
533	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
534
535#endif
536	/* remove the starting offset from the pool count */
537	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
538
539	/* save features for later use */
540	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
541	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
542
543	/* limit RSS based on user input and save for later use */
544	adapter->ring_feature[RING_F_RSS].indices = rss_i;
545	adapter->ring_feature[RING_F_RSS].mask = rss_m;
546
547	adapter->num_rx_pools = vmdq_i;
548	adapter->num_rx_queues_per_pool = rss_i;
549
550	adapter->num_rx_queues = vmdq_i * rss_i;
551	adapter->num_tx_queues = vmdq_i * rss_i;
552	adapter->num_xdp_queues = 0;
553
554	/* disable ATR as it is not supported when VMDq is enabled */
555	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
556
557#ifdef IXGBE_FCOE
558	/*
559	 * FCoE can use rings from adjacent buffers to allow RSS
560	 * like behavior.  To account for this we need to add the
561	 * FCoE indices to the total ring count.
562	 */
563	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
564		struct ixgbe_ring_feature *fcoe;
565
566		fcoe = &adapter->ring_feature[RING_F_FCOE];
567
568		/* limit ourselves based on feature limits */
569		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
570
571		if (vmdq_i > 1 && fcoe_i) {
572			/* alloc queues for FCoE separately */
573			fcoe->indices = fcoe_i;
574			fcoe->offset = vmdq_i * rss_i;
575		} else {
576			/* merge FCoE queues with RSS queues */
577			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
578
579			/* limit indices to rss_i if MSI-X is disabled */
580			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
581				fcoe_i = rss_i;
582
583			/* attempt to reserve some queues for just FCoE */
584			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
585			fcoe->offset = fcoe_i - fcoe->indices;
586
587			fcoe_i -= rss_i;
588		}
589
590		/* add queues to adapter */
591		adapter->num_tx_queues += fcoe_i;
592		adapter->num_rx_queues += fcoe_i;
593	}
594
595#endif
596	/* To support macvlan offload we have to use num_tc to
597	 * restrict the queues that can be used by the device.
598	 * By doing this we can avoid reporting a false number of
599	 * queues.
600	 */
601	if (vmdq_i > 1)
602		netdev_set_num_tc(adapter->netdev, 1);
603
604	/* populate TC0 for use by pool 0 */
605	netdev_set_tc_queue(adapter->netdev, 0,
606			    adapter->num_rx_queues_per_pool, 0);
607
608	return true;
609}
610
611/**
612 * ixgbe_set_rss_queues - Allocate queues for RSS
613 * @adapter: board private structure to initialize
614 *
615 * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
616 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
617 *
618 **/
619static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
620{
621	struct ixgbe_hw *hw = &adapter->hw;
622	struct ixgbe_ring_feature *f;
623	u16 rss_i;
624
625	/* set mask for 16 queue limit of RSS */
626	f = &adapter->ring_feature[RING_F_RSS];
627	rss_i = f->limit;
628
629	f->indices = rss_i;
630
631	if (hw->mac.type < ixgbe_mac_X550)
632		f->mask = IXGBE_RSS_16Q_MASK;
633	else
634		f->mask = IXGBE_RSS_64Q_MASK;
635
636	/* disable ATR by default, it will be configured below */
637	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
638
639	/*
640	 * Use Flow Director in addition to RSS to ensure the best
641	 * distribution of flows across cores, even when an FDIR flow
642	 * isn't matched.
643	 */
644	if (rss_i > 1 && adapter->atr_sample_rate) {
645		f = &adapter->ring_feature[RING_F_FDIR];
646
647		rss_i = f->indices = f->limit;
648
649		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
650			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
651	}
652
653#ifdef IXGBE_FCOE
654	/*
655	 * FCoE can exist on the same rings as standard network traffic
656	 * however it is preferred to avoid that if possible.  In order
657	 * to get the best performance we allocate as many FCoE queues
658	 * as we can and we place them at the end of the ring array to
659	 * avoid sharing queues with standard RSS on systems with 24 or
660	 * more CPUs.
661	 */
662	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
663		struct net_device *dev = adapter->netdev;
664		u16 fcoe_i;
665
666		f = &adapter->ring_feature[RING_F_FCOE];
667
668		/* merge FCoE queues with RSS queues */
669		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
670		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
671
672		/* limit indices to rss_i if MSI-X is disabled */
673		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
674			fcoe_i = rss_i;
675
676		/* attempt to reserve some queues for just FCoE */
677		f->indices = min_t(u16, fcoe_i, f->limit);
678		f->offset = fcoe_i - f->indices;
679		rss_i = max_t(u16, fcoe_i, rss_i);
680	}
681
682#endif /* IXGBE_FCOE */
683	adapter->num_rx_queues = rss_i;
684	adapter->num_tx_queues = rss_i;
685	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
686
687	return true;
688}
689
690/**
691 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
692 * @adapter: board private structure to initialize
693 *
694 * This is the top level queue allocation routine.  The order here is very
695 * important, starting with the "most" number of features turned on at once,
696 * and ending with the smallest set of features.  This way large combinations
697 * can be allocated if they're turned on, and smaller combinations are the
698 * fallthrough conditions.
699 *
700 **/
701static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
702{
703	/* Start with base case */
704	adapter->num_rx_queues = 1;
705	adapter->num_tx_queues = 1;
706	adapter->num_xdp_queues = 0;
707	adapter->num_rx_pools = 1;
708	adapter->num_rx_queues_per_pool = 1;
709
710#ifdef CONFIG_IXGBE_DCB
711	if (ixgbe_set_dcb_sriov_queues(adapter))
712		return;
713
714	if (ixgbe_set_dcb_queues(adapter))
715		return;
716
717#endif
718	if (ixgbe_set_sriov_queues(adapter))
719		return;
720
721	ixgbe_set_rss_queues(adapter);
722}
723
724/**
725 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
726 * @adapter: board private structure
727 *
728 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
729 * return a negative error code if unable to acquire MSI-X vectors for any
730 * reason.
731 */
732static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
733{
734	struct ixgbe_hw *hw = &adapter->hw;
735	int i, vectors, vector_threshold;
736
737	/* We start by asking for one vector per queue pair with XDP queues
738	 * being stacked with TX queues.
739	 */
740	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
741	vectors = max(vectors, adapter->num_xdp_queues);
742
743	/* It is easy to be greedy for MSI-X vectors. However, it really
744	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
745	 * be somewhat conservative and only ask for (roughly) the same number
746	 * of vectors as there are CPUs.
747	 */
748	vectors = min_t(int, vectors, num_online_cpus());
749
750	/* Some vectors are necessary for non-queue interrupts */
751	vectors += NON_Q_VECTORS;
752
753	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
754	 * With features such as RSS and VMDq, we can easily surpass the
755	 * number of Rx and Tx descriptor queues supported by our device.
756	 * Thus, we cap the maximum in the rare cases where the CPU count also
757	 * exceeds our vector limit
758	 */
759	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
760
761	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
762	 * handler, and (2) an Other (Link Status Change, etc.) handler.
763	 */
764	vector_threshold = MIN_MSIX_COUNT;
765
766	adapter->msix_entries = kcalloc(vectors,
767					sizeof(struct msix_entry),
768					GFP_KERNEL);
769	if (!adapter->msix_entries)
770		return -ENOMEM;
771
772	for (i = 0; i < vectors; i++)
773		adapter->msix_entries[i].entry = i;
774
775	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
776					vector_threshold, vectors);
777
778	if (vectors < 0) {
779		/* A negative count of allocated vectors indicates an error in
780		 * acquiring within the specified range of MSI-X vectors
781		 */
782		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
783			   vectors);
784
785		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
786		kfree(adapter->msix_entries);
787		adapter->msix_entries = NULL;
788
789		return vectors;
790	}
791
792	/* we successfully allocated some number of vectors within our
793	 * requested range.
794	 */
795	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
796
797	/* Adjust for only the vectors we'll use, which is minimum
798	 * of max_q_vectors, or the number of vectors we were allocated.
799	 */
800	vectors -= NON_Q_VECTORS;
801	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
802
803	return 0;
804}
805
806static void ixgbe_add_ring(struct ixgbe_ring *ring,
807			   struct ixgbe_ring_container *head)
808{
809	ring->next = head->ring;
810	head->ring = ring;
811	head->count++;
812	head->next_update = jiffies + 1;
813}
814
815/**
816 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
817 * @adapter: board private structure to initialize
818 * @v_count: q_vectors allocated on adapter, used for ring interleaving
819 * @v_idx: index of vector in adapter struct
820 * @txr_count: total number of Tx rings to allocate
821 * @txr_idx: index of first Tx ring to allocate
822 * @xdp_count: total number of XDP rings to allocate
823 * @xdp_idx: index of first XDP ring to allocate
824 * @rxr_count: total number of Rx rings to allocate
825 * @rxr_idx: index of first Rx ring to allocate
826 *
827 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
828 **/
829static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
830				int v_count, int v_idx,
831				int txr_count, int txr_idx,
832				int xdp_count, int xdp_idx,
833				int rxr_count, int rxr_idx)
834{
835	int node = dev_to_node(&adapter->pdev->dev);
836	struct ixgbe_q_vector *q_vector;
837	struct ixgbe_ring *ring;
838	int cpu = -1;
839	int ring_count;
840	u8 tcs = adapter->hw_tcs;
841
842	ring_count = txr_count + rxr_count + xdp_count;
843
844	/* customize cpu for Flow Director mapping */
845	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
846		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
847		if (rss_i > 1 && adapter->atr_sample_rate) {
848			cpu = cpumask_local_spread(v_idx, node);
849			node = cpu_to_node(cpu);
850		}
851	}
852
853	/* allocate q_vector and rings */
854	q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
855				GFP_KERNEL, node);
856	if (!q_vector)
857		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
858				   GFP_KERNEL);
859	if (!q_vector)
860		return -ENOMEM;
861
862	/* setup affinity mask and node */
863	if (cpu != -1)
864		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
865	q_vector->numa_node = node;
866
867#ifdef CONFIG_IXGBE_DCA
868	/* initialize CPU for DCA */
869	q_vector->cpu = -1;
870
871#endif
872	/* initialize NAPI */
873	netif_napi_add(adapter->netdev, &q_vector->napi,
874		       ixgbe_poll, 64);
875
876	/* tie q_vector and adapter together */
877	adapter->q_vector[v_idx] = q_vector;
878	q_vector->adapter = adapter;
879	q_vector->v_idx = v_idx;
880
881	/* initialize work limits */
882	q_vector->tx.work_limit = adapter->tx_work_limit;
883
884	/* Initialize setting for adaptive ITR */
885	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
886			   IXGBE_ITR_ADAPTIVE_LATENCY;
887	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
888			   IXGBE_ITR_ADAPTIVE_LATENCY;
889
890	/* intialize ITR */
891	if (txr_count && !rxr_count) {
892		/* tx only vector */
893		if (adapter->tx_itr_setting == 1)
894			q_vector->itr = IXGBE_12K_ITR;
895		else
896			q_vector->itr = adapter->tx_itr_setting;
897	} else {
898		/* rx or rx/tx vector */
899		if (adapter->rx_itr_setting == 1)
900			q_vector->itr = IXGBE_20K_ITR;
901		else
902			q_vector->itr = adapter->rx_itr_setting;
903	}
904
905	/* initialize pointer to rings */
906	ring = q_vector->ring;
907
908	while (txr_count) {
909		/* assign generic ring traits */
910		ring->dev = &adapter->pdev->dev;
911		ring->netdev = adapter->netdev;
912
913		/* configure backlink on ring */
914		ring->q_vector = q_vector;
915
916		/* update q_vector Tx values */
917		ixgbe_add_ring(ring, &q_vector->tx);
918
919		/* apply Tx specific ring traits */
920		ring->count = adapter->tx_ring_count;
921		ring->queue_index = txr_idx;
922
923		/* assign ring to adapter */
924		WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
925
926		/* update count and index */
927		txr_count--;
928		txr_idx += v_count;
929
930		/* push pointer to next ring */
931		ring++;
932	}
933
934	while (xdp_count) {
935		/* assign generic ring traits */
936		ring->dev = &adapter->pdev->dev;
937		ring->netdev = adapter->netdev;
938
939		/* configure backlink on ring */
940		ring->q_vector = q_vector;
941
942		/* update q_vector Tx values */
943		ixgbe_add_ring(ring, &q_vector->tx);
944
945		/* apply Tx specific ring traits */
946		ring->count = adapter->tx_ring_count;
947		ring->queue_index = xdp_idx;
948		set_ring_xdp(ring);
949
950		/* assign ring to adapter */
951		WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
952
953		/* update count and index */
954		xdp_count--;
955		xdp_idx++;
956
957		/* push pointer to next ring */
958		ring++;
959	}
960
961	while (rxr_count) {
962		/* assign generic ring traits */
963		ring->dev = &adapter->pdev->dev;
964		ring->netdev = adapter->netdev;
965
966		/* configure backlink on ring */
967		ring->q_vector = q_vector;
968
969		/* update q_vector Rx values */
970		ixgbe_add_ring(ring, &q_vector->rx);
971
972		/*
973		 * 82599 errata, UDP frames with a 0 checksum
974		 * can be marked as checksum errors.
975		 */
976		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
977			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
978
979#ifdef IXGBE_FCOE
980		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
981			struct ixgbe_ring_feature *f;
982			f = &adapter->ring_feature[RING_F_FCOE];
983			if ((rxr_idx >= f->offset) &&
984			    (rxr_idx < f->offset + f->indices))
985				set_bit(__IXGBE_RX_FCOE, &ring->state);
986		}
987
988#endif /* IXGBE_FCOE */
989		/* apply Rx specific ring traits */
990		ring->count = adapter->rx_ring_count;
991		ring->queue_index = rxr_idx;
992
993		/* assign ring to adapter */
994		WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
995
996		/* update count and index */
997		rxr_count--;
998		rxr_idx += v_count;
999
1000		/* push pointer to next ring */
1001		ring++;
1002	}
1003
1004	return 0;
1005}
1006
1007/**
1008 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1009 * @adapter: board private structure to initialize
1010 * @v_idx: Index of vector to be freed
1011 *
1012 * This function frees the memory allocated to the q_vector.  In addition if
1013 * NAPI is enabled it will delete any references to the NAPI struct prior
1014 * to freeing the q_vector.
1015 **/
1016static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1017{
1018	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1019	struct ixgbe_ring *ring;
1020
1021	ixgbe_for_each_ring(ring, q_vector->tx) {
1022		if (ring_is_xdp(ring))
1023			WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1024		else
1025			WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1026	}
1027
1028	ixgbe_for_each_ring(ring, q_vector->rx)
1029		WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1030
1031	adapter->q_vector[v_idx] = NULL;
1032	__netif_napi_del(&q_vector->napi);
1033
1034	/*
1035	 * after a call to __netif_napi_del() napi may still be used and
1036	 * ixgbe_get_stats64() might access the rings on this vector,
1037	 * we must wait a grace period before freeing it.
1038	 */
1039	kfree_rcu(q_vector, rcu);
1040}
1041
1042/**
1043 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1044 * @adapter: board private structure to initialize
1045 *
1046 * We allocate one q_vector per queue interrupt.  If allocation fails we
1047 * return -ENOMEM.
1048 **/
1049static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1050{
1051	int q_vectors = adapter->num_q_vectors;
1052	int rxr_remaining = adapter->num_rx_queues;
1053	int txr_remaining = adapter->num_tx_queues;
1054	int xdp_remaining = adapter->num_xdp_queues;
1055	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1056	int err, i;
1057
1058	/* only one q_vector if MSI-X is disabled. */
1059	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1060		q_vectors = 1;
1061
1062	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1063		for (; rxr_remaining; v_idx++) {
1064			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1065						   0, 0, 0, 0, 1, rxr_idx);
1066
1067			if (err)
1068				goto err_out;
1069
1070			/* update counts and index */
1071			rxr_remaining--;
1072			rxr_idx++;
1073		}
1074	}
1075
1076	for (; v_idx < q_vectors; v_idx++) {
1077		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1078		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1079		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1080
1081		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1082					   tqpv, txr_idx,
1083					   xqpv, xdp_idx,
1084					   rqpv, rxr_idx);
1085
1086		if (err)
1087			goto err_out;
1088
1089		/* update counts and index */
1090		rxr_remaining -= rqpv;
1091		txr_remaining -= tqpv;
1092		xdp_remaining -= xqpv;
1093		rxr_idx++;
1094		txr_idx++;
1095		xdp_idx += xqpv;
1096	}
1097
1098	for (i = 0; i < adapter->num_rx_queues; i++) {
1099		if (adapter->rx_ring[i])
1100			adapter->rx_ring[i]->ring_idx = i;
1101	}
1102
1103	for (i = 0; i < adapter->num_tx_queues; i++) {
1104		if (adapter->tx_ring[i])
1105			adapter->tx_ring[i]->ring_idx = i;
1106	}
1107
1108	for (i = 0; i < adapter->num_xdp_queues; i++) {
1109		if (adapter->xdp_ring[i])
1110			adapter->xdp_ring[i]->ring_idx = i;
1111	}
1112
1113	return 0;
1114
1115err_out:
1116	adapter->num_tx_queues = 0;
1117	adapter->num_xdp_queues = 0;
1118	adapter->num_rx_queues = 0;
1119	adapter->num_q_vectors = 0;
1120
1121	while (v_idx--)
1122		ixgbe_free_q_vector(adapter, v_idx);
1123
1124	return -ENOMEM;
1125}
1126
1127/**
1128 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1129 * @adapter: board private structure to initialize
1130 *
1131 * This function frees the memory allocated to the q_vectors.  In addition if
1132 * NAPI is enabled it will delete any references to the NAPI struct prior
1133 * to freeing the q_vector.
1134 **/
1135static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1136{
1137	int v_idx = adapter->num_q_vectors;
1138
1139	adapter->num_tx_queues = 0;
1140	adapter->num_xdp_queues = 0;
1141	adapter->num_rx_queues = 0;
1142	adapter->num_q_vectors = 0;
1143
1144	while (v_idx--)
1145		ixgbe_free_q_vector(adapter, v_idx);
1146}
1147
1148static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1149{
1150	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1151		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1152		pci_disable_msix(adapter->pdev);
1153		kfree(adapter->msix_entries);
1154		adapter->msix_entries = NULL;
1155	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1156		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1157		pci_disable_msi(adapter->pdev);
1158	}
1159}
1160
1161/**
1162 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1163 * @adapter: board private structure to initialize
1164 *
1165 * Attempt to configure the interrupts using the best available
1166 * capabilities of the hardware and the kernel.
1167 **/
1168static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1169{
1170	int err;
1171
1172	/* We will try to get MSI-X interrupts first */
1173	if (!ixgbe_acquire_msix_vectors(adapter))
1174		return;
1175
1176	/* At this point, we do not have MSI-X capabilities. We need to
1177	 * reconfigure or disable various features which require MSI-X
1178	 * capability.
1179	 */
1180
1181	/* Disable DCB unless we only have a single traffic class */
1182	if (adapter->hw_tcs > 1) {
1183		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1184		netdev_reset_tc(adapter->netdev);
1185
1186		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1187			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1188
1189		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1190		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1191		adapter->dcb_cfg.pfc_mode_enable = false;
1192	}
1193
1194	adapter->hw_tcs = 0;
1195	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1196	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1197
1198	/* Disable SR-IOV support */
1199	e_dev_warn("Disabling SR-IOV support\n");
1200	ixgbe_disable_sriov(adapter);
1201
1202	/* Disable RSS */
1203	e_dev_warn("Disabling RSS support\n");
1204	adapter->ring_feature[RING_F_RSS].limit = 1;
1205
1206	/* recalculate number of queues now that many features have been
1207	 * changed or disabled.
1208	 */
1209	ixgbe_set_num_queues(adapter);
1210	adapter->num_q_vectors = 1;
1211
1212	err = pci_enable_msi(adapter->pdev);
1213	if (err)
1214		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1215			   err);
1216	else
1217		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1218}
1219
1220/**
1221 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1222 * @adapter: board private structure to initialize
1223 *
1224 * We determine which interrupt scheme to use based on...
1225 * - Kernel support (MSI, MSI-X)
1226 *   - which can be user-defined (via MODULE_PARAM)
1227 * - Hardware queue count (num_*_queues)
1228 *   - defined by miscellaneous hardware support/features (RSS, etc.)
1229 **/
1230int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1231{
1232	int err;
1233
1234	/* Number of supported queues */
1235	ixgbe_set_num_queues(adapter);
1236
1237	/* Set interrupt mode */
1238	ixgbe_set_interrupt_capability(adapter);
1239
1240	err = ixgbe_alloc_q_vectors(adapter);
1241	if (err) {
1242		e_dev_err("Unable to allocate memory for queue vectors\n");
1243		goto err_alloc_q_vectors;
1244	}
1245
1246	ixgbe_cache_ring_register(adapter);
1247
1248	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1249		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1250		   adapter->num_rx_queues, adapter->num_tx_queues,
1251		   adapter->num_xdp_queues);
1252
1253	set_bit(__IXGBE_DOWN, &adapter->state);
1254
1255	return 0;
1256
1257err_alloc_q_vectors:
1258	ixgbe_reset_interrupt_capability(adapter);
1259	return err;
1260}
1261
1262/**
1263 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1264 * @adapter: board private structure to clear interrupt scheme on
1265 *
1266 * We go through and clear interrupt specific resources and reset the structure
1267 * to pre-load conditions
1268 **/
1269void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1270{
1271	adapter->num_tx_queues = 0;
1272	adapter->num_xdp_queues = 0;
1273	adapter->num_rx_queues = 0;
1274
1275	ixgbe_free_q_vectors(adapter);
1276	ixgbe_reset_interrupt_capability(adapter);
1277}
1278
1279void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1280		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1281{
1282	struct ixgbe_adv_tx_context_desc *context_desc;
1283	u16 i = tx_ring->next_to_use;
1284
1285	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1286
1287	i++;
1288	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1289
1290	/* set bits to identify this as an advanced context descriptor */
1291	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1292
1293	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1294	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
1295	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1296	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1297}
1298
1299