/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ice/ |
H A D | ice_xsk.c | 18 * @q_idx: ring index in array 20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) in ice_qp_reset_stats() argument 22 memset(&vsi->rx_rings[q_idx]->rx_stats, 0, in ice_qp_reset_stats() 23 sizeof(vsi->rx_rings[q_idx]->rx_stats)); in ice_qp_reset_stats() 24 memset(&vsi->tx_rings[q_idx]->stats, 0, in ice_qp_reset_stats() 25 sizeof(vsi->tx_rings[q_idx]->stats)); in ice_qp_reset_stats() 27 memset(&vsi->xdp_rings[q_idx]->stats, 0, in ice_qp_reset_stats() 28 sizeof(vsi->xdp_rings[q_idx]->stats)); in ice_qp_reset_stats() 34 * @q_idx: ring index in array 36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) in ice_qp_clean_rings() argument 149 ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ice_qp_dis() argument 206 ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ice_qp_ena() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/ |
H A D | ice_xsk.c | 23 * @q_idx: ring index in array 25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) in ice_qp_reset_stats() argument 38 memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0, in ice_qp_reset_stats() 39 sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats)); in ice_qp_reset_stats() 40 memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0, in ice_qp_reset_stats() 41 sizeof(vsi_stat->tx_ring_stats[q_idx]->stats)); in ice_qp_reset_stats() 43 memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0, in ice_qp_reset_stats() 44 sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats)); in ice_qp_reset_stats() 50 * @q_idx: ring index in array 52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) in ice_qp_clean_rings() argument 160 ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ice_qp_dis() argument 219 ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ice_qp_ena() argument [all...] |
H A D | ice_lib.h | 57 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); 59 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | vnic_main.c | 166 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() 169 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters() 195 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() 198 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters() 247 u8 q_idx) in hfi1_vnic_maybe_stop_tx() 249 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 250 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx() 253 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 260 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local 267 v_dbg("xmit: queue %d skb len %d\n", q_idx, sk in hfi1_netdev_start_xmit() 165 hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo, u8 q_idx, struct sk_buff *skb, int err) hfi1_vnic_update_tx_counters() argument 194 hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info *vinfo, u8 q_idx, struct sk_buff *skb, int err) hfi1_vnic_update_rx_counters() argument 246 hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo, u8 q_idx) hfi1_vnic_maybe_stop_tx() argument 381 u8 q_idx; hfi1_vnic_bypass_rcv() local [all...] |
H A D | vnic_sdma.c | 168 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument 172 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma() 266 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup() 267 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup() 271 u8 q_idx) in hfi1_vnic_sdma_write_avail() 273 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail() 291 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init() 270 hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo, u8 q_idx) hfi1_vnic_sdma_write_avail() argument
|
H A D | vnic.h | 82 * @q_idx - vnic Tx queue index 91 u8 q_idx; member 155 u8 q_idx); 164 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
|
H A D | ipoib_tx.c | 69 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq() 76 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq() 137 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx() 220 dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx); in hfi1_ipoib_add_tx() 748 txq->q_idx = i; in hfi1_ipoib_txreq_init() 816 txq->q_idx, in hfi1_ipoib_drain_tx_list()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | vnic_main.c | 124 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() 127 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters() 153 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() 156 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters() 205 u8 q_idx) in hfi1_vnic_maybe_stop_tx() 207 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 208 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx() 211 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 218 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local 225 v_dbg("xmit: queue %d skb len %d\n", q_idx, sk in hfi1_netdev_start_xmit() 123 hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo, u8 q_idx, struct sk_buff *skb, int err) hfi1_vnic_update_tx_counters() argument 152 hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info *vinfo, u8 q_idx, struct sk_buff *skb, int err) hfi1_vnic_update_rx_counters() argument 204 hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo, u8 q_idx) hfi1_vnic_maybe_stop_tx() argument 339 u8 q_idx; hfi1_vnic_bypass_rcv() local [all...] |
H A D | vnic_sdma.c | 126 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument 130 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma() 224 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup() 225 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup() 229 u8 q_idx) in hfi1_vnic_sdma_write_avail() 231 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail() 249 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init() 228 hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo, u8 q_idx) hfi1_vnic_sdma_write_avail() argument
|
H A D | vnic.h | 40 * @q_idx - vnic Tx queue index 49 u8 q_idx; member 113 u8 q_idx); 122 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
|
/kernel/linux/linux-6.6/drivers/accel/habanalabs/common/ |
H A D | hw_queue.c | 409 u32 q_idx; in init_signal_cs() local 412 q_idx = job->hw_queue_id; in init_signal_cs() 413 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_signal_cs() 420 "generate signal CB, sob_id: %d, sob val: %u, q_idx: %d, seq: %llu\n", in init_signal_cs() 421 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx, in init_signal_cs() 430 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1, in init_signal_cs() 469 u32 q_idx; in init_wait_cs() local 471 q_idx = job->hw_queue_id; in init_wait_cs() 472 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_wait_cs() 486 cs->encaps_sig_hdl->q_idx, in init_wait_cs() 902 sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) sync_stream_queue_init() argument 961 sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx) sync_stream_queue_reset() argument [all...] |
H A D | command_submission.c | 137 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n", in hl_sob_reset_error() 138 hw_sob->q_idx, hw_sob->sob_id); in hl_sob_reset_error() 1778 * @q_idx: stream queue index 1785 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, in hl_cs_signal_sob_wraparound_handler() argument 1793 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in hl_cs_signal_sob_wraparound_handler() 1815 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n", in hl_cs_signal_sob_wraparound_handler() 1816 q_idx); in hl_cs_signal_sob_wraparound_handler() 1856 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", in hl_cs_signal_sob_wraparound_handler() 1857 prop->curr_sob_offset, q_idx); in hl_cs_signal_sob_wraparound_handler() 1925 enum hl_queue_type q_type, u32 q_idx, u3 in cs_ioctl_signal_wait_create_jobs() 1923 cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset) cs_ioctl_signal_wait_create_jobs() argument 1988 cs_ioctl_reserve_signals(struct hl_fpriv *hpriv, u32 q_idx, u32 count, u32 *handle_id, u32 *sob_addr, u32 *signals_count) cs_ioctl_reserve_signals() argument 2117 u32 q_idx, sob_addr; cs_ioctl_unreserve_signals() local 2197 u32 q_idx, collective_engine_id = 0; cs_ioctl_signal_wait() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/microsoft/mana/ |
H A D | mana_bpf.c | 36 u16 q_idx) in mana_xdp_xmit_fm() 44 skb_set_queue_mapping(skb, q_idx); in mana_xdp_xmit_fm() 57 u16 q_idx; in mana_xdp_xmit() local 62 q_idx = smp_processor_id() % ndev->real_num_tx_queues; in mana_xdp_xmit() 65 if (mana_xdp_xmit_fm(ndev, frames[i], q_idx)) in mana_xdp_xmit() 71 tx_stats = &apc->tx_qp[q_idx].txq.stats; in mana_xdp_xmit() 35 mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame, u16 q_idx) mana_xdp_xmit_fm() argument
|
/kernel/linux/linux-6.6/drivers/net/hyperv/ |
H A D | netvsc_bpf.c | 230 struct xdp_frame *frame, u16 q_idx) in netvsc_ndoxdp_xmit_fm() 240 skb_record_rx_queue(skb, q_idx); in netvsc_ndoxdp_xmit_fm() 256 u16 q_idx; in netvsc_ndoxdp_xmit() local 276 q_idx = smp_processor_id() % ndev->real_num_tx_queues; in netvsc_ndoxdp_xmit() 279 if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx)) in netvsc_ndoxdp_xmit() 285 tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats; in netvsc_ndoxdp_xmit() 229 netvsc_ndoxdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame, u16 q_idx) netvsc_ndoxdp_xmit_fm() argument
|
H A D | netvsc.c | 318 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument 320 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring() 757 u16 q_idx = 0; in netvsc_send_tx_complete() local 778 q_idx = packet->q_idx; in netvsc_send_tx_complete() 780 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete() 792 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete() 798 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete() 1058 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt() 1062 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt() 1357 enq_receive_complete(struct net_device *ndev, struct netvsc_device *nvdev, u16 q_idx, u64 tid, u32 status) enq_receive_complete() argument 1398 u16 q_idx = channel->offermsg.offer.sub_channel_index; netvsc_receive() local [all...] |
/kernel/linux/linux-5.10/drivers/misc/habanalabs/common/ |
H A D | hw_queue.c | 407 u32 q_idx; in init_signal_wait_cs() local 412 q_idx = job->hw_queue_id; in init_signal_wait_cs() 413 hw_queue = &hdev->kernel_queues[q_idx]; in init_signal_wait_cs() 422 "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n", in init_signal_wait_cs() 423 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); in init_signal_wait_cs() 444 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", in init_signal_wait_cs() 445 hw_queue->curr_sob_offset, q_idx); in init_signal_wait_cs() 459 "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n", in init_signal_wait_cs() 461 hw_queue->base_mon_id, q_idx); in init_signal_wait_cs() 467 q_idx); in init_signal_wait_cs() 715 sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) sync_stream_queue_init() argument 738 sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx) sync_stream_queue_reset() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/hyperv/ |
H A D | netvsc.c | 283 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument 285 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring() 703 u16 q_idx = 0; in netvsc_send_tx_complete() local 715 q_idx = packet->q_idx; in netvsc_send_tx_complete() 717 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete() 728 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete() 734 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete() 875 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt() 879 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt() 1142 enq_receive_complete(struct net_device *ndev, struct netvsc_device *nvdev, u16 q_idx, u64 tid, u32 status) enq_receive_complete() argument 1183 u16 q_idx = channel->offermsg.offer.sub_channel_index; netvsc_receive() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/thunder/ |
H A D | nicvf_queues.h | 354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); 355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); 356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); 357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
|
/kernel/linux/linux-6.6/drivers/net/ethernet/cavium/thunder/ |
H A D | nicvf_queues.h | 354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); 355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); 356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); 357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_pf.c | 502 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local 519 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf() 523 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf() 524 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 527 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 528 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 538 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf() 540 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 543 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf() 546 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqct in fm10k_configure_dglort_map_pf() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_pf.c | 502 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local 519 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf() 523 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf() 524 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 527 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 528 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 538 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf() 540 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 543 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf() 546 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqct in fm10k_configure_dglort_map_pf() [all...] |
/kernel/linux/linux-5.10/net/sched/ |
H A D | sch_api.c | 1742 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local 1750 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1751 q_idx++; in tc_dump_qdisc_root() 1758 q_idx++; in tc_dump_qdisc_root() 1771 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1772 q_idx++; in tc_dump_qdisc_root() 1780 q_idx++; in tc_dump_qdisc_root() 1784 *q_idx_p = q_idx; in tc_dump_qdisc_root() 1794 int idx, q_idx; in tc_dump_qdisc() local 1802 s_q_idx = q_idx in tc_dump_qdisc() [all...] |
/kernel/linux/linux-6.6/net/sched/ |
H A D | sch_api.c | 1773 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local 1781 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1782 q_idx++; in tc_dump_qdisc_root() 1789 q_idx++; in tc_dump_qdisc_root() 1802 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1803 q_idx++; in tc_dump_qdisc_root() 1811 q_idx++; in tc_dump_qdisc_root() 1815 *q_idx_p = q_idx; in tc_dump_qdisc_root() 1825 int idx, q_idx; in tc_dump_qdisc() local 1833 s_q_idx = q_idx in tc_dump_qdisc() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_main.c | 904 int q_idx; in iavf_napi_enable_all() local 908 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all() 911 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_enable_all() 923 int q_idx; in iavf_napi_disable_all() local 927 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all() 928 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_disable_all() 1346 int q_idx = 0, num_q_vectors; iavf_alloc_q_vectors() local 1379 int q_idx, num_q_vectors; iavf_free_q_vectors() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/iavf/ |
H A D | iavf_main.c | 1203 int q_idx; in iavf_napi_enable_all() local 1207 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all() 1210 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_enable_all() 1222 int q_idx; in iavf_napi_disable_all() local 1226 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all() 1227 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_disable_all() 1819 int q_idx = 0, num_q_vectors; iavf_alloc_q_vectors() local 1852 int q_idx, num_q_vectors; iavf_free_q_vectors() local [all...] |