1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include <net/xdp_sock_drv.h>
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8
9 /**
10 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
11 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
12 *
13 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
14 */
__ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)15 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
16 {
17 unsigned int offset, i;
18
19 mutex_lock(qs_cfg->qs_mutex);
20 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
21 0, qs_cfg->q_count, 0);
22 if (offset >= qs_cfg->pf_map_size) {
23 mutex_unlock(qs_cfg->qs_mutex);
24 return -ENOMEM;
25 }
26
27 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
28 for (i = 0; i < qs_cfg->q_count; i++)
29 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
30 mutex_unlock(qs_cfg->qs_mutex);
31
32 return 0;
33 }
34
35 /**
36 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
37 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
38 *
39 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
40 */
__ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)41 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
42 {
43 unsigned int i, index = 0;
44
45 mutex_lock(qs_cfg->qs_mutex);
46 for (i = 0; i < qs_cfg->q_count; i++) {
47 index = find_next_zero_bit(qs_cfg->pf_map,
48 qs_cfg->pf_map_size, index);
49 if (index >= qs_cfg->pf_map_size)
50 goto err_scatter;
51 set_bit(index, qs_cfg->pf_map);
52 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
53 }
54 mutex_unlock(qs_cfg->qs_mutex);
55
56 return 0;
57 err_scatter:
58 for (index = 0; index < i; index++) {
59 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
60 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
61 }
62 mutex_unlock(qs_cfg->qs_mutex);
63
64 return -ENOMEM;
65 }
66
67 /**
68 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
69 * @pf: the PF being configured
70 * @pf_q: the PF queue
71 * @ena: enable or disable state of the queue
72 *
73 * This routine will wait for the given Rx queue of the PF to reach the
74 * enabled or disabled state.
75 * Returns -ETIMEDOUT in case of failing to reach the requested state after
76 * multiple retries; else will return 0 in case of success.
77 */
ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)78 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
79 {
80 int i;
81
82 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
83 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
84 QRX_CTRL_QENA_STAT_M))
85 return 0;
86
87 usleep_range(20, 40);
88 }
89
90 return -ETIMEDOUT;
91 }
92
93 /**
94 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
95 * @vsi: the VSI being configured
96 * @v_idx: index of the vector in the VSI struct
97 *
98 * We allocate one q_vector and set default value for ITR setting associated
99 * with this q_vector. If allocation fails we return -ENOMEM.
100 */
ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)101 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
102 {
103 struct ice_pf *pf = vsi->back;
104 struct ice_q_vector *q_vector;
105
106 /* allocate q_vector */
107 q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
108 GFP_KERNEL);
109 if (!q_vector)
110 return -ENOMEM;
111
112 q_vector->vsi = vsi;
113 q_vector->v_idx = v_idx;
114 q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
115 q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
116 if (vsi->type == ICE_VSI_VF)
117 goto out;
118 /* only set affinity_mask if the CPU is online */
119 if (cpu_online(v_idx))
120 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
121
122 /* This will not be called in the driver load path because the netdev
123 * will not be created yet. All other cases with register the NAPI
124 * handler here (i.e. resume, reset/rebuild, etc.)
125 */
126 if (vsi->netdev)
127 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
128 NAPI_POLL_WEIGHT);
129
130 out:
131 /* tie q_vector and VSI together */
132 vsi->q_vectors[v_idx] = q_vector;
133
134 return 0;
135 }
136
137 /**
138 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
139 * @vsi: VSI having the memory freed
140 * @v_idx: index of the vector to be freed
141 */
ice_free_q_vector(struct ice_vsi *vsi, int v_idx)142 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
143 {
144 struct ice_q_vector *q_vector;
145 struct ice_pf *pf = vsi->back;
146 struct ice_ring *ring;
147 struct device *dev;
148
149 dev = ice_pf_to_dev(pf);
150 if (!vsi->q_vectors[v_idx]) {
151 dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
152 return;
153 }
154 q_vector = vsi->q_vectors[v_idx];
155
156 ice_for_each_ring(ring, q_vector->tx)
157 ring->q_vector = NULL;
158 ice_for_each_ring(ring, q_vector->rx)
159 ring->q_vector = NULL;
160
161 /* only VSI with an associated netdev is set up with NAPI */
162 if (vsi->netdev)
163 netif_napi_del(&q_vector->napi);
164
165 devm_kfree(dev, q_vector);
166 vsi->q_vectors[v_idx] = NULL;
167 }
168
169 /**
170 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
171 * @hw: board specific structure
172 */
ice_cfg_itr_gran(struct ice_hw *hw)173 static void ice_cfg_itr_gran(struct ice_hw *hw)
174 {
175 u32 regval = rd32(hw, GLINT_CTL);
176
177 /* no need to update global register if ITR gran is already set */
178 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
179 (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
180 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
181 (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
182 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
183 (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
184 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
185 (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
186 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
187 return;
188
189 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
190 GLINT_CTL_ITR_GRAN_200_M) |
191 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
192 GLINT_CTL_ITR_GRAN_100_M) |
193 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
194 GLINT_CTL_ITR_GRAN_50_M) |
195 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
196 GLINT_CTL_ITR_GRAN_25_M);
197 wr32(hw, GLINT_CTL, regval);
198 }
199
200 /**
201 * ice_calc_q_handle - calculate the queue handle
202 * @vsi: VSI that ring belongs to
203 * @ring: ring to get the absolute queue index
204 * @tc: traffic class number
205 */
ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)206 static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
207 {
208 WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
209
210 /* Idea here for calculation is that we subtract the number of queue
211 * count from TC that ring belongs to from it's absolute queue index
212 * and as a result we get the queue's index within TC.
213 */
214 return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
215 }
216
217 /**
218 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
219 * @ring: The Tx ring to configure
220 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
221 * @pf_q: queue index in the PF space
222 *
223 * Configure the Tx descriptor ring in TLAN context.
224 */
225 static void
ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)226 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
227 {
228 struct ice_vsi *vsi = ring->vsi;
229 struct ice_hw *hw = &vsi->back->hw;
230
231 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
232
233 tlan_ctx->port_num = vsi->port_info->lport;
234
235 /* Transmit Queue Length */
236 tlan_ctx->qlen = ring->count;
237
238 ice_set_cgd_num(tlan_ctx, ring);
239
240 /* PF number */
241 tlan_ctx->pf_num = hw->pf_id;
242
243 /* queue belongs to a specific VSI type
244 * VF / VM index should be programmed per vmvf_type setting:
245 * for vmvf_type = VF, it is VF number between 0-256
246 * for vmvf_type = VM, it is VM number between 0-767
247 * for PF or EMP this field should be set to zero
248 */
249 switch (vsi->type) {
250 case ICE_VSI_LB:
251 case ICE_VSI_CTRL:
252 case ICE_VSI_PF:
253 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
254 break;
255 case ICE_VSI_VF:
256 /* Firmware expects vmvf_num to be absolute VF ID */
257 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
258 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
259 break;
260 default:
261 return;
262 }
263
264 /* make sure the context is associated with the right VSI */
265 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
266
267 tlan_ctx->tso_ena = ICE_TX_LEGACY;
268 tlan_ctx->tso_qnum = pf_q;
269
270 /* Legacy or Advanced Host Interface:
271 * 0: Advanced Host Interface
272 * 1: Legacy Host Interface
273 */
274 tlan_ctx->legacy_int = ICE_TX_LEGACY;
275 }
276
277 /**
278 * ice_setup_rx_ctx - Configure a receive ring context
279 * @ring: The Rx ring to configure
280 *
281 * Configure the Rx descriptor ring in RLAN context.
282 */
ice_setup_rx_ctx(struct ice_ring *ring)283 int ice_setup_rx_ctx(struct ice_ring *ring)
284 {
285 struct device *dev = ice_pf_to_dev(ring->vsi->back);
286 int chain_len = ICE_MAX_CHAINED_RX_BUFS;
287 u16 num_bufs = ICE_DESC_UNUSED(ring);
288 struct ice_vsi *vsi = ring->vsi;
289 u32 rxdid = ICE_RXDID_FLEX_NIC;
290 struct ice_rlan_ctx rlan_ctx;
291 struct ice_hw *hw;
292 u16 pf_q;
293 int err;
294
295 hw = &vsi->back->hw;
296
297 /* what is Rx queue number in global space of 2K Rx queues */
298 pf_q = vsi->rxq_map[ring->q_index];
299
300 /* clear the context structure first */
301 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
302
303 ring->rx_buf_len = vsi->rx_buf_len;
304
305 if (ring->vsi->type == ICE_VSI_PF) {
306 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
307 /* coverity[check_return] */
308 xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
309 ring->q_index);
310
311 ring->xsk_pool = ice_xsk_pool(ring);
312 if (ring->xsk_pool) {
313 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
314
315 ring->rx_buf_len =
316 xsk_pool_get_rx_frame_size(ring->xsk_pool);
317 /* For AF_XDP ZC, we disallow packets to span on
318 * multiple buffers, thus letting us skip that
319 * handling in the fast-path.
320 */
321 chain_len = 1;
322 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
323 MEM_TYPE_XSK_BUFF_POOL,
324 NULL);
325 if (err)
326 return err;
327 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
328
329 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
330 ring->q_index);
331 } else {
332 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
333 /* coverity[check_return] */
334 xdp_rxq_info_reg(&ring->xdp_rxq,
335 ring->netdev,
336 ring->q_index);
337
338 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
339 MEM_TYPE_PAGE_SHARED,
340 NULL);
341 if (err)
342 return err;
343 }
344 }
345 /* Receive Queue Base Address.
346 * Indicates the starting address of the descriptor queue defined in
347 * 128 Byte units.
348 */
349 rlan_ctx.base = ring->dma >> 7;
350
351 rlan_ctx.qlen = ring->count;
352
353 /* Receive Packet Data Buffer Size.
354 * The Packet Data Buffer Size is defined in 128 byte units.
355 */
356 rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
357 BIT_ULL(ICE_RLAN_CTX_DBUF_S));
358
359 /* use 32 byte descriptors */
360 rlan_ctx.dsize = 1;
361
362 /* Strip the Ethernet CRC bytes before the packet is posted to host
363 * memory.
364 */
365 rlan_ctx.crcstrip = 1;
366
367 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
368 rlan_ctx.l2tsel = 1;
369
370 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
371 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
372 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
373
374 /* This controls whether VLAN is stripped from inner headers
375 * The VLAN in the inner L2 header is stripped to the receive
376 * descriptor if enabled by this flag.
377 */
378 rlan_ctx.showiv = 0;
379
380 /* Max packet size for this queue - must not be set to a larger value
381 * than 5 x DBUF
382 */
383 rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
384 chain_len * ring->rx_buf_len);
385
386 /* Rx queue threshold in units of 64 */
387 rlan_ctx.lrxqthresh = 1;
388
389 /* Enable Flexible Descriptors in the queue context which
390 * allows this driver to select a specific receive descriptor format
391 * increasing context priority to pick up profile ID; default is 0x01;
392 * setting to 0x03 to ensure profile is programming if prev context is
393 * of same priority
394 */
395 if (vsi->type != ICE_VSI_VF)
396 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
397 else
398 ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
399
400 /* Absolute queue number out of 2K needs to be passed */
401 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
402 if (err) {
403 dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
404 pf_q, err);
405 return -EIO;
406 }
407
408 if (vsi->type == ICE_VSI_VF)
409 return 0;
410
411 /* configure Rx buffer alignment */
412 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
413 ice_clear_ring_build_skb_ena(ring);
414 else
415 ice_set_ring_build_skb_ena(ring);
416
417 /* init queue specific tail register */
418 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
419 writel(0, ring->tail);
420
421 if (ring->xsk_pool) {
422 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
423 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
424 num_bufs, ring->q_index);
425 dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
426
427 return 0;
428 }
429
430 err = ice_alloc_rx_bufs_zc(ring, num_bufs);
431 if (err)
432 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
433 ring->q_index, pf_q);
434 return 0;
435 }
436
437 ice_alloc_rx_bufs(ring, num_bufs);
438
439 return 0;
440 }
441
442 /**
443 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
444 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
445 *
446 * This function first tries to find contiguous space. If it is not successful,
447 * it tries with the scatter approach.
448 *
449 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
450 */
__ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)451 int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
452 {
453 int ret = 0;
454
455 ret = __ice_vsi_get_qs_contig(qs_cfg);
456 if (ret) {
457 /* contig failed, so try with scatter approach */
458 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
459 qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
460 qs_cfg->scatter_count);
461 ret = __ice_vsi_get_qs_sc(qs_cfg);
462 }
463 return ret;
464 }
465
466 /**
467 * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
468 * @vsi: the VSI being configured
469 * @ena: start or stop the Rx ring
470 * @rxq_idx: 0-based Rx queue index for the VSI passed in
471 * @wait: wait or don't wait for configuration to finish in hardware
472 *
473 * Return 0 on success and negative on error.
474 */
475 int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)476 ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
477 {
478 int pf_q = vsi->rxq_map[rxq_idx];
479 struct ice_pf *pf = vsi->back;
480 struct ice_hw *hw = &pf->hw;
481 u32 rx_reg;
482
483 rx_reg = rd32(hw, QRX_CTRL(pf_q));
484
485 /* Skip if the queue is already in the requested state */
486 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
487 return 0;
488
489 /* turn on/off the queue */
490 if (ena)
491 rx_reg |= QRX_CTRL_QENA_REQ_M;
492 else
493 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
494 wr32(hw, QRX_CTRL(pf_q), rx_reg);
495
496 if (!wait)
497 return 0;
498
499 ice_flush(hw);
500 return ice_pf_rxq_wait(pf, pf_q, ena);
501 }
502
503 /**
504 * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
505 * @vsi: the VSI being configured
506 * @ena: true/false to verify Rx ring has been enabled/disabled respectively
507 * @rxq_idx: 0-based Rx queue index for the VSI passed in
508 *
509 * This routine will wait for the given Rx queue of the VSI to reach the
510 * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
511 * the requested state after multiple retries; else will return 0 in case of
512 * success.
513 */
ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)514 int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
515 {
516 int pf_q = vsi->rxq_map[rxq_idx];
517 struct ice_pf *pf = vsi->back;
518
519 return ice_pf_rxq_wait(pf, pf_q, ena);
520 }
521
522 /**
523 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
524 * @vsi: the VSI being configured
525 *
526 * We allocate one q_vector per queue interrupt. If allocation fails we
527 * return -ENOMEM.
528 */
ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)529 int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
530 {
531 struct device *dev = ice_pf_to_dev(vsi->back);
532 u16 v_idx;
533 int err;
534
535 if (vsi->q_vectors[0]) {
536 dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
537 return -EEXIST;
538 }
539
540 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
541 err = ice_vsi_alloc_q_vector(vsi, v_idx);
542 if (err)
543 goto err_out;
544 }
545
546 return 0;
547
548 err_out:
549 while (v_idx--)
550 ice_free_q_vector(vsi, v_idx);
551
552 dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
553 vsi->num_q_vectors, vsi->vsi_num, err);
554 vsi->num_q_vectors = 0;
555 return err;
556 }
557
558 /**
559 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
560 * @vsi: the VSI being configured
561 *
562 * This function maps descriptor rings to the queue-specific vectors allotted
563 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
564 * and Rx rings to the vector as "efficiently" as possible.
565 */
ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)566 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
567 {
568 int q_vectors = vsi->num_q_vectors;
569 u16 tx_rings_rem, rx_rings_rem;
570 int v_id;
571
572 /* initially assigning remaining rings count to VSIs num queue value */
573 tx_rings_rem = vsi->num_txq;
574 rx_rings_rem = vsi->num_rxq;
575
576 for (v_id = 0; v_id < q_vectors; v_id++) {
577 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
578 u8 tx_rings_per_v, rx_rings_per_v;
579 u16 q_id, q_base;
580
581 /* Tx rings mapping to vector */
582 tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
583 q_vectors - v_id);
584 q_vector->num_ring_tx = tx_rings_per_v;
585 q_vector->tx.ring = NULL;
586 q_vector->tx.itr_idx = ICE_TX_ITR;
587 q_base = vsi->num_txq - tx_rings_rem;
588
589 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
590 struct ice_ring *tx_ring = vsi->tx_rings[q_id];
591
592 tx_ring->q_vector = q_vector;
593 tx_ring->next = q_vector->tx.ring;
594 q_vector->tx.ring = tx_ring;
595 }
596 tx_rings_rem -= tx_rings_per_v;
597
598 /* Rx rings mapping to vector */
599 rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
600 q_vectors - v_id);
601 q_vector->num_ring_rx = rx_rings_per_v;
602 q_vector->rx.ring = NULL;
603 q_vector->rx.itr_idx = ICE_RX_ITR;
604 q_base = vsi->num_rxq - rx_rings_rem;
605
606 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
607 struct ice_ring *rx_ring = vsi->rx_rings[q_id];
608
609 rx_ring->q_vector = q_vector;
610 rx_ring->next = q_vector->rx.ring;
611 q_vector->rx.ring = rx_ring;
612 }
613 rx_rings_rem -= rx_rings_per_v;
614 }
615 }
616
617 /**
618 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
619 * @vsi: the VSI having memory freed
620 */
ice_vsi_free_q_vectors(struct ice_vsi *vsi)621 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
622 {
623 int v_idx;
624
625 ice_for_each_q_vector(vsi, v_idx)
626 ice_free_q_vector(vsi, v_idx);
627 }
628
629 /**
630 * ice_vsi_cfg_txq - Configure single Tx queue
631 * @vsi: the VSI that queue belongs to
632 * @ring: Tx ring to be configured
633 * @qg_buf: queue group buffer
634 */
635 int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, struct ice_aqc_add_tx_qgrp *qg_buf)636 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
637 struct ice_aqc_add_tx_qgrp *qg_buf)
638 {
639 u8 buf_len = struct_size(qg_buf, txqs, 1);
640 struct ice_tlan_ctx tlan_ctx = { 0 };
641 struct ice_aqc_add_txqs_perq *txq;
642 struct ice_pf *pf = vsi->back;
643 struct ice_hw *hw = &pf->hw;
644 enum ice_status status;
645 u16 pf_q;
646 u8 tc;
647
648 pf_q = ring->reg_idx;
649 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
650 /* copy context contents into the qg_buf */
651 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
652 ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
653 ice_tlan_ctx_info);
654
655 /* init queue specific tail reg. It is referred as
656 * transmit comm scheduler queue doorbell.
657 */
658 ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
659
660 if (IS_ENABLED(CONFIG_DCB))
661 tc = ring->dcb_tc;
662 else
663 tc = 0;
664
665 /* Add unique software queue handle of the Tx queue per
666 * TC into the VSI Tx ring
667 */
668 ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
669
670 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
671 1, qg_buf, buf_len, NULL);
672 if (status) {
673 dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
674 ice_stat_str(status));
675 return -ENODEV;
676 }
677
678 /* Add Tx Queue TEID into the VSI Tx ring from the
679 * response. This will complete configuring and
680 * enabling the queue.
681 */
682 txq = &qg_buf->txqs[0];
683 if (pf_q == le16_to_cpu(txq->txq_id))
684 ring->txq_teid = le32_to_cpu(txq->q_teid);
685
686 return 0;
687 }
688
689 /**
690 * ice_cfg_itr - configure the initial interrupt throttle values
691 * @hw: pointer to the HW structure
692 * @q_vector: interrupt vector that's being configured
693 *
694 * Configure interrupt throttling values for the ring containers that are
695 * associated with the interrupt vector passed in.
696 */
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)697 void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
698 {
699 ice_cfg_itr_gran(hw);
700
701 if (q_vector->num_ring_rx) {
702 struct ice_ring_container *rc = &q_vector->rx;
703
704 rc->target_itr = ITR_TO_REG(rc->itr_setting);
705 rc->next_update = jiffies + 1;
706 rc->current_itr = rc->target_itr;
707 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
708 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
709 }
710
711 if (q_vector->num_ring_tx) {
712 struct ice_ring_container *rc = &q_vector->tx;
713
714 rc->target_itr = ITR_TO_REG(rc->itr_setting);
715 rc->next_update = jiffies + 1;
716 rc->current_itr = rc->target_itr;
717 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
718 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
719 }
720 }
721
722 /**
723 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
724 * @vsi: the VSI being configured
725 * @txq: Tx queue being mapped to MSI-X vector
726 * @msix_idx: MSI-X vector index within the function
727 * @itr_idx: ITR index of the interrupt cause
728 *
729 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
730 * within the function space.
731 */
732 void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)733 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
734 {
735 struct ice_pf *pf = vsi->back;
736 struct ice_hw *hw = &pf->hw;
737 u32 val;
738
739 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
740
741 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
742 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
743
744 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
745 if (ice_is_xdp_ena_vsi(vsi)) {
746 u32 xdp_txq = txq + vsi->num_xdp_txq;
747
748 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
749 val);
750 }
751 ice_flush(hw);
752 }
753
754 /**
755 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
756 * @vsi: the VSI being configured
757 * @rxq: Rx queue being mapped to MSI-X vector
758 * @msix_idx: MSI-X vector index within the function
759 * @itr_idx: ITR index of the interrupt cause
760 *
761 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
762 * within the function space.
763 */
764 void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)765 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
766 {
767 struct ice_pf *pf = vsi->back;
768 struct ice_hw *hw = &pf->hw;
769 u32 val;
770
771 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
772
773 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
774 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
775
776 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
777
778 ice_flush(hw);
779 }
780
781 /**
782 * ice_trigger_sw_intr - trigger a software interrupt
783 * @hw: pointer to the HW structure
784 * @q_vector: interrupt vector to trigger the software interrupt for
785 */
ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)786 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
787 {
788 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
789 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
790 GLINT_DYN_CTL_SWINT_TRIG_M |
791 GLINT_DYN_CTL_INTENA_M);
792 }
793
794 /**
795 * ice_vsi_stop_tx_ring - Disable single Tx ring
796 * @vsi: the VSI being configured
797 * @rst_src: reset source
798 * @rel_vmvf_num: Relative ID of VF/VM
799 * @ring: Tx ring to be stopped
800 * @txq_meta: Meta data of Tx ring to be stopped
801 */
802 int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num, struct ice_ring *ring, struct ice_txq_meta *txq_meta)803 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
804 u16 rel_vmvf_num, struct ice_ring *ring,
805 struct ice_txq_meta *txq_meta)
806 {
807 struct ice_pf *pf = vsi->back;
808 struct ice_q_vector *q_vector;
809 struct ice_hw *hw = &pf->hw;
810 enum ice_status status;
811 u32 val;
812
813 /* clear cause_ena bit for disabled queues */
814 val = rd32(hw, QINT_TQCTL(ring->reg_idx));
815 val &= ~QINT_TQCTL_CAUSE_ENA_M;
816 wr32(hw, QINT_TQCTL(ring->reg_idx), val);
817
818 /* software is expected to wait for 100 ns */
819 ndelay(100);
820
821 /* trigger a software interrupt for the vector
822 * associated to the queue to schedule NAPI handler
823 */
824 q_vector = ring->q_vector;
825 if (q_vector)
826 ice_trigger_sw_intr(hw, q_vector);
827
828 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
829 txq_meta->tc, 1, &txq_meta->q_handle,
830 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
831 rel_vmvf_num, NULL);
832
833 /* if the disable queue command was exercised during an
834 * active reset flow, ICE_ERR_RESET_ONGOING is returned.
835 * This is not an error as the reset operation disables
836 * queues at the hardware level anyway.
837 */
838 if (status == ICE_ERR_RESET_ONGOING) {
839 dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
840 } else if (status == ICE_ERR_DOES_NOT_EXIST) {
841 dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
842 } else if (status) {
843 dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
844 ice_stat_str(status));
845 return -ENODEV;
846 }
847
848 return 0;
849 }
850
851 /**
852 * ice_fill_txq_meta - Prepare the Tx queue's meta data
853 * @vsi: VSI that ring belongs to
854 * @ring: ring that txq_meta will be based on
855 * @txq_meta: a helper struct that wraps Tx queue's information
856 *
857 * Set up a helper struct that will contain all the necessary fields that
858 * are needed for stopping Tx queue
859 */
860 void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, struct ice_txq_meta *txq_meta)861 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
862 struct ice_txq_meta *txq_meta)
863 {
864 u8 tc;
865
866 if (IS_ENABLED(CONFIG_DCB))
867 tc = ring->dcb_tc;
868 else
869 tc = 0;
870
871 txq_meta->q_id = ring->reg_idx;
872 txq_meta->q_teid = ring->txq_teid;
873 txq_meta->q_handle = ring->q_handle;
874 txq_meta->vsi_idx = vsi->idx;
875 txq_meta->tc = tc;
876 }
877