Home
last modified time | relevance | path

Searched refs:xdp_rings (Results 1 - 25 of 25) sorted by relevance

/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ice/
H A Dice_xsk.c27 memset(&vsi->xdp_rings[q_idx]->stats, 0, in ice_qp_reset_stats()
28 sizeof(vsi->xdp_rings[q_idx]->stats)); in ice_qp_reset_stats()
41 ice_clean_tx_ring(vsi->xdp_rings[q_idx]); in ice_qp_clean_rings()
177 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_dis()
233 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_ena()
404 napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi); in ice_xsk_pool_setup()
556 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; in ice_run_xdp_zc()
844 if (!vsi->xdp_rings[queue_id]->xsk_pool) in ice_xsk_wakeup()
847 ring = vsi->xdp_rings[queue_id]; in ice_xsk_wakeup()
H A Dice_ethtool.c2734 struct ice_ring *xdp_rings = NULL; in ice_set_ringparam() local
2789 vsi->xdp_rings[i]->count = new_tx_cnt; in ice_set_ringparam()
2829 vsi->xdp_rings[0]->count, new_tx_cnt); in ice_set_ringparam()
2831 xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL); in ice_set_ringparam()
2832 if (!xdp_rings) { in ice_set_ringparam()
2839 xdp_rings[i] = *vsi->xdp_rings[i]; in ice_set_ringparam()
2840 xdp_rings[i].count = new_tx_cnt; in ice_set_ringparam()
2841 xdp_rings[ in ice_set_ringparam()
[all...]
H A Dice_main.c2292 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); in ice_xdp_alloc_setup_rings()
2303 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in ice_xdp_alloc_setup_rings()
2304 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_xdp_alloc_setup_rings()
2353 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2354 sizeof(*vsi->xdp_rings), GFP_KERNEL); in ice_prepare_xdp_rings()
2355 if (!vsi->xdp_rings) in ice_prepare_xdp_rings()
2375 struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; in ice_prepare_xdp_rings()
2420 if (vsi->xdp_rings[i]) { in ice_prepare_xdp_rings()
2421 kfree_rcu(vsi->xdp_rings[ in ice_prepare_xdp_rings()
[all...]
H A Dice_txrx_lib.c286 rx_ring->vsi->xdp_rings[rx_ring->q_index]; in ice_finalize_xdp_rx()
H A Dice.h326 struct ice_ring **xdp_rings; /* XDP ring array */ member
H A Dice_lib.c1766 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_cfg_xdp_txqs()
1771 vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]); in ice_vsi_cfg_xdp_txqs()
2012 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
H A Dice_txrx.c549 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; in ice_run_xdp()
602 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/
H A Dice_ethtool.c2705 struct ice_tx_ring *xdp_rings = NULL; in ice_set_ringparam() local
2766 vsi->xdp_rings[i]->count = new_tx_cnt; in ice_set_ringparam()
2807 vsi->xdp_rings[0]->count, new_tx_cnt); in ice_set_ringparam()
2809 xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL); in ice_set_ringparam()
2810 if (!xdp_rings) { in ice_set_ringparam()
2817 xdp_rings[i] = *vsi->xdp_rings[i]; in ice_set_ringparam()
2818 xdp_rings[i].count = new_tx_cnt; in ice_set_ringparam()
2819 xdp_rings[ in ice_set_ringparam()
[all...]
H A Dice_xsk.c43 memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0, in ice_qp_reset_stats()
44 sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats)); in ice_qp_reset_stats()
57 ice_clean_tx_ring(vsi->xdp_rings[q_idx]); in ice_qp_clean_rings()
193 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_dis()
247 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_ena()
H A Dice_main.c2614 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); in ice_xdp_alloc_setup_rings()
2629 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { in ice_xdp_alloc_setup_rings()
2630 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_xdp_alloc_setup_rings()
2631 vsi->xdp_rings[i]->ring_stats = NULL; in ice_xdp_alloc_setup_rings()
2632 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_xdp_alloc_setup_rings()
2683 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2684 sizeof(*vsi->xdp_rings), GFP_KERNEL); in ice_prepare_xdp_rings()
2685 if (!vsi->xdp_rings) in ice_prepare_xdp_rings()
2709 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_i in ice_prepare_xdp_rings()
[all...]
H A Dice.h409 struct ice_tx_ring **xdp_rings; /* XDP ring array */ member
H A Dice_txrx.c654 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
660 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit()
H A Dice_lib.c1936 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_cfg_xdp_txqs()
2171 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); in ice_vsi_stop_xdp_tx_rings()
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/i40e/
H A Di40e_xsk.c225 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc()
606 if (!vsi->xdp_rings[queue_id]->xsk_pool) in i40e_xsk_wakeup()
609 ring = vsi->xdp_rings[queue_id]; in i40e_xsk_wakeup()
H A Di40e_main.c468 ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_get_netdev_stats_struct()
925 p = READ_ONCE(vsi->xdp_rings[q]); in i40e_update_vsi_stats()
3172 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); in i40e_vsi_setup_tx_resources()
3193 if (vsi->xdp_rings) { in i40e_vsi_free_tx_resources()
3195 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in i40e_vsi_free_tx_resources()
3196 i40e_free_tx_resources(vsi->xdp_rings[i]); in i40e_vsi_free_tx_resources()
3538 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); in i40e_vsi_configure_tx()
4018 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
4339 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_id in i40e_map_vector_to_qp()
[all...]
H A Di40e_txrx.c2203 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp()
2300 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_finalize_xdp_rx()
3801 xdp_ring = vsi->xdp_rings[queue_index]; in i40e_xdp_xmit()
H A Di40e_debugfs.c330 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_dbg_dump_vsi_seid()
336 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", in i40e_dbg_dump_vsi_seid()
341 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", in i40e_dbg_dump_vsi_seid()
347 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", in i40e_dbg_dump_vsi_seid()
352 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", in i40e_dbg_dump_vsi_seid()
357 " xdp_rings[%i]: size = %i\n", in i40e_dbg_dump_vsi_seid()
360 " xdp_rings[%i]: DCB tc = %d\n", in i40e_dbg_dump_vsi_seid()
363 " xdp_rings[%i]: itr_setting = %d (%s)\n", in i40e_dbg_dump_vsi_seid()
587 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); in i40e_dbg_dump_desc()
H A Di40e.h768 struct i40e_ring **xdp_rings; /* XDP Tx rings */ member
H A Di40e_ethtool.c2001 vsi->xdp_rings[i]->count = new_tx_count; in i40e_set_ringparam()
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/i40e/
H A Di40e_xsk.c225 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc()
738 if (!vsi->xdp_rings[queue_id]->xsk_pool) in i40e_xsk_wakeup()
741 ring = vsi->xdp_rings[queue_id]; in i40e_xsk_wakeup()
H A Di40e_main.c471 ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_get_netdev_stats_struct()
939 p = READ_ONCE(vsi->xdp_rings[q]); in i40e_update_vsi_stats()
3334 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); in i40e_vsi_setup_tx_resources()
3355 if (vsi->xdp_rings) { in i40e_vsi_free_tx_resources()
3357 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in i40e_vsi_free_tx_resources()
3358 i40e_free_tx_resources(vsi->xdp_rings[i]); in i40e_vsi_free_tx_resources()
3731 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); in i40e_vsi_configure_tx()
4215 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
4538 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_id in i40e_map_vector_to_qp()
[all...]
H A Di40e_txrx.c2332 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp()
2409 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_finalize_xdp_rx()
4042 xdp_ring = vsi->xdp_rings[queue_index]; in i40e_xdp_xmit()
H A Di40e_debugfs.c330 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_dbg_dump_vsi_seid()
336 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", in i40e_dbg_dump_vsi_seid()
341 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", in i40e_dbg_dump_vsi_seid()
347 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", in i40e_dbg_dump_vsi_seid()
352 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", in i40e_dbg_dump_vsi_seid()
357 " xdp_rings[%i]: size = %i\n", in i40e_dbg_dump_vsi_seid()
360 " xdp_rings[%i]: DCB tc = %d\n", in i40e_dbg_dump_vsi_seid()
363 " xdp_rings[%i]: itr_setting = %d (%s)\n", in i40e_dbg_dump_vsi_seid()
587 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); in i40e_dbg_dump_desc()
H A Di40e.h882 struct i40e_ring **xdp_rings; /* XDP Tx rings */ member
H A Di40e_ethtool.c2103 vsi->xdp_rings[i]->count = new_tx_count; in i40e_set_ringparam()

Completed in 87 milliseconds