/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ice/ |
H A D | ice_xsk.c | 193 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_dis() local 196 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); in ice_qp_dis() 197 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, in ice_qp_dis() 247 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_ena() local 251 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); in ice_qp_ena() 254 ice_set_ring_xdp(xdp_ring); in ice_qp_ena() 426 napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi); in ice_xsk_pool_setup() 615 * @xdp_ring: XDP Tx ring 617 static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) in ice_clean_xdp_irq_zc() argument 619 u16 ntc = xdp_ring in ice_clean_xdp_irq_zc() 685 ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring) ice_xmit_xdp_tx_zc() argument 766 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) ice_run_xdp_zc() argument 852 struct ice_tx_ring *xdp_ring; ice_clean_rx_irq_zc() local 990 ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc, unsigned int *total_bytes) ice_xmit_pkt() argument 1013 ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, unsigned int *total_bytes) ice_xmit_pkt_batch() argument 1044 ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, unsigned int *total_bytes) ice_fill_tx_hw_ring() argument 1063 ice_xmit_zc(struct ice_tx_ring *xdp_ring) ice_xmit_zc() argument 1183 ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) ice_xsk_clean_xdp_ring() argument [all...] |
H A D | ice_txrx_lib.c | 251 * @xdp_ring: XDP ring to clean 253 static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) in ice_clean_xdp_irq() argument 256 struct device *dev = xdp_ring->dev; in ice_clean_xdp_irq() 257 u32 ntc = xdp_ring->next_to_clean; in ice_clean_xdp_irq() 259 u32 cnt = xdp_ring->count; in ice_clean_xdp_irq() 266 idx = xdp_ring->tx_buf[ntc].rs_idx; in ice_clean_xdp_irq() 267 tx_desc = ICE_TX_DESC(xdp_ring, idx); in ice_clean_xdp_irq() 284 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; in ice_clean_xdp_irq() 300 tx_buf = &xdp_ring->tx_buf[ntc]; in ice_clean_xdp_irq() 315 xdp_ring in ice_clean_xdp_irq() 328 __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, bool frame) __ice_xmit_xdp_ring() argument 447 ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx) ice_finalize_xdp_rx() argument [all...] |
H A D | ice_txrx_lib.h | 125 * @xdp_ring: XDP Tx ring 129 static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring) in ice_xdp_ring_update_tail() argument 135 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); in ice_xdp_ring_update_tail() 140 * @xdp_ring: XDP ring to produce the HW Tx descriptors on 144 static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring) in ice_set_rs_bit() argument 146 u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; in ice_set_rs_bit() 149 tx_desc = ICE_TX_DESC(xdp_ring, rs_id in ice_set_rs_bit() [all...] |
H A D | ice_xsk.h | 28 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); 29 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring); 32 static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring) in ice_xmit_zc() argument 72 static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } in ice_xsk_clean_xdp_ring() argument
|
H A D | ice_txrx.c | 553 * @xdp_ring: ring to be used for XDP_TX action 560 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, in ice_run_xdp() 575 spin_lock(&xdp_ring->tx_lock); in ice_run_xdp() 576 ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); in ice_run_xdp() 578 spin_unlock(&xdp_ring->tx_lock); in ice_run_xdp() 604 * @xdp_ring: XDP ring for transmission 607 struct ice_tx_ring *xdp_ring) in ice_xmit_xdp_ring() 617 return __ice_xmit_xdp_ring(&xdp, xdp_ring, true); in ice_xmit_xdp_ring() 639 struct ice_tx_ring *xdp_ring; in ice_xdp_xmit() local 654 xdp_ring in ice_xdp_xmit() 559 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, struct ice_rx_buf *rx_buf) ice_run_xdp() argument 606 ice_xmit_xdp_ring(const struct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring) ice_xmit_xdp_ring() argument 1152 struct ice_tx_ring *xdp_ring = NULL; ice_clean_rx_irq() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ice/ |
H A D | ice_xsk.c | 177 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_dis() local 180 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); in ice_qp_dis() 181 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, in ice_qp_dis() 233 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_ena() local 237 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); in ice_qp_ena() 240 ice_set_ring_xdp(xdp_ring); in ice_qp_ena() 241 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); in ice_qp_ena() 532 struct ice_ring *xdp_ring; in ice_run_xdp_zc() local 556 xdp_ring in ice_run_xdp_zc() 694 ice_xmit_zc(struct ice_ring *xdp_ring, int budget) ice_xmit_zc() argument 745 ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf) ice_clean_xdp_tx_buf() argument 760 ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) ice_clean_tx_irq_zc() argument 905 ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) ice_xsk_clean_xdp_ring() argument [all...] |
H A D | ice_txrx_lib.c | 206 * @xdp_ring: XDP ring for transmission 208 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) in ice_xmit_xdp_ring() argument 210 u16 i = xdp_ring->next_to_use; in ice_xmit_xdp_ring() 215 if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { in ice_xmit_xdp_ring() 216 xdp_ring->tx_stats.tx_busy++; in ice_xmit_xdp_ring() 220 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); in ice_xmit_xdp_ring() 221 if (dma_mapping_error(xdp_ring->dev, dma)) in ice_xmit_xdp_ring() 224 tx_buf = &xdp_ring->tx_buf[i]; in ice_xmit_xdp_ring() 233 tx_desc = ICE_TX_DESC(xdp_ring, i); in ice_xmit_xdp_ring() 244 if (i == xdp_ring in ice_xmit_xdp_ring() 260 ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) ice_xmit_xdp_buff() argument 285 struct ice_ring *xdp_ring = ice_finalize_xdp_rx() local [all...] |
H A D | ice_txrx_lib.h | 36 * @xdp_ring: XDP Tx ring 40 static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) in ice_xdp_ring_update_tail() argument 46 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); in ice_xdp_ring_update_tail() 50 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring); 51 int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
|
H A D | ice_xsk.h | 15 bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget); 20 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring); 38 ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring, in ice_clean_tx_irq_zc() argument 64 #define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
|
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_xsk.c | 205 struct i40e_ring *xdp_ring; in i40e_run_xdp_zc() local 225 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc() 226 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp_zc() 537 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, in i40e_xmit_pkt() argument 544 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt() 545 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt() 547 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); in i40e_xmit_pkt() 554 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, in i40e_xmit_pkt_batch() argument 557 u16 ntu = xdp_ring in i40e_xmit_pkt_batch() 578 i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, unsigned int *total_bytes) i40e_fill_tx_hw_ring() argument 591 i40e_set_rs_bit(struct i40e_ring *xdp_ring) i40e_set_rs_bit() argument 607 i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xmit_zc() argument [all...] |
H A D | i40e_txrx.c | 2298 struct i40e_ring *xdp_ring); 2300 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) in i40e_xmit_xdp_tx_ring() argument 2307 return i40e_xmit_xdp_ring(xdpf, xdp_ring); in i40e_xmit_xdp_tx_ring() 2319 struct i40e_ring *xdp_ring; in i40e_run_xdp() local 2332 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp() 2333 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp() 2360 * @xdp_ring: XDP Tx ring 2364 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) in i40e_xdp_ring_update_tail() argument 2370 writel_relaxed(xdp_ring->next_to_use, xdp_ring in i40e_xdp_ring_update_tail() 2408 struct i40e_ring *xdp_ring = i40e_finalize_xdp_rx() local 3793 i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring) i40e_xmit_xdp_ring() argument 4028 struct i40e_ring *xdp_ring; i40e_xdp_xmit() local [all...] |
H A D | i40e_txrx_common.h | 7 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); 12 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
|
H A D | i40e_debugfs.c | 330 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_dbg_dump_vsi_seid() local 332 if (!xdp_ring) in i40e_dbg_dump_vsi_seid() 337 i, *xdp_ring->state, in i40e_dbg_dump_vsi_seid() 338 xdp_ring->queue_index, in i40e_dbg_dump_vsi_seid() 339 xdp_ring->reg_idx); in i40e_dbg_dump_vsi_seid() 343 xdp_ring->next_to_use, in i40e_dbg_dump_vsi_seid() 344 xdp_ring->next_to_clean, in i40e_dbg_dump_vsi_seid() 345 xdp_ring->ring_active); in i40e_dbg_dump_vsi_seid() 348 i, xdp_ring->stats.packets, in i40e_dbg_dump_vsi_seid() 349 xdp_ring in i40e_dbg_dump_vsi_seid() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_xsk.c | 202 struct i40e_ring *xdp_ring; in i40e_run_xdp_zc() local 225 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc() 226 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp_zc() 451 * @xdp_ring: XDP Tx ring 456 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) in i40e_xmit_zc() argument 465 if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) in i40e_xmit_zc() 468 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); in i40e_xmit_zc() 469 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, in i40e_xmit_zc() 472 tx_bi = &xdp_ring->tx_bi[xdp_ring in i40e_xmit_zc() [all...] |
H A D | i40e_txrx_common.h | 7 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); 12 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
|
H A D | i40e_txrx.c | 2166 struct i40e_ring *xdp_ring); 2168 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) in i40e_xmit_xdp_tx_ring() argument 2175 return i40e_xmit_xdp_ring(xdpf, xdp_ring); in i40e_xmit_xdp_tx_ring() 2186 struct i40e_ring *xdp_ring; in i40e_run_xdp() local 2203 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp() 2204 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp() 2251 * @xdp_ring: XDP Tx ring 2255 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) in i40e_xdp_ring_update_tail() argument 2261 writel_relaxed(xdp_ring->next_to_use, xdp_ring in i40e_xdp_ring_update_tail() 2299 struct i40e_ring *xdp_ring = i40e_finalize_xdp_rx() local 3578 i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring) i40e_xmit_xdp_ring() argument 3787 struct i40e_ring *xdp_ring; i40e_xdp_xmit() local [all...] |
H A D | i40e_debugfs.c | 330 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_dbg_dump_vsi_seid() local 332 if (!xdp_ring) in i40e_dbg_dump_vsi_seid() 337 i, *xdp_ring->state, in i40e_dbg_dump_vsi_seid() 338 xdp_ring->queue_index, in i40e_dbg_dump_vsi_seid() 339 xdp_ring->reg_idx); in i40e_dbg_dump_vsi_seid() 343 xdp_ring->next_to_use, in i40e_dbg_dump_vsi_seid() 344 xdp_ring->next_to_clean, in i40e_dbg_dump_vsi_seid() 345 xdp_ring->ring_active); in i40e_dbg_dump_vsi_seid() 348 i, xdp_ring->stats.packets, in i40e_dbg_dump_vsi_seid() 349 xdp_ring in i40e_dbg_dump_vsi_seid() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_xsk.c | 340 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; in ixgbe_clean_rx_irq_zc() 383 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) in ixgbe_xmit_zc() argument 385 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc() 394 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { in ixgbe_xmit_zc() 399 if (!netif_carrier_ok(xdp_ring->netdev)) in ixgbe_xmit_zc() 408 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; in ixgbe_xmit_zc() 413 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); in ixgbe_xmit_zc() 425 xdp_ring in ixgbe_xmit_zc() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_xsk.c | 396 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) in ixgbe_xmit_zc() argument 398 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc() 407 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { in ixgbe_xmit_zc() 412 if (!netif_carrier_ok(xdp_ring->netdev)) in ixgbe_xmit_zc() 421 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; in ixgbe_xmit_zc() 426 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); in ixgbe_xmit_zc() 438 xdp_ring->next_to_use++; in ixgbe_xmit_zc() 439 if (xdp_ring in ixgbe_xmit_zc() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/amazon/ena/ |
H A D | ena_netdev.c | 60 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); 196 struct ena_ring *xdp_ring; in ena_xdp_io_poll() local 200 xdp_ring = ena_napi->xdp_ring; in ena_xdp_io_poll() 204 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || in ena_xdp_io_poll() 205 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { in ena_xdp_io_poll() 210 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); in ena_xdp_io_poll() 215 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { in ena_xdp_io_poll() 221 ena_unmask_interrupt(xdp_ring, NULL); in ena_xdp_io_poll() 222 ena_update_ring_numa_node(xdp_ring, NUL in ena_xdp_io_poll() 237 ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, struct ena_tx_buffer *tx_info, struct xdp_frame *xdpf, struct ena_com_tx_ctx *ena_tx_ctx) ena_xdp_tx_map_frame() argument 293 ena_xdp_xmit_frame(struct ena_ring *xdp_ring, struct net_device *dev, struct xdp_frame *xdpf, int flags) ena_xdp_xmit_frame() argument 341 struct ena_ring *xdp_ring; ena_xdp_xmit() local 381 struct ena_ring *xdp_ring; ena_xdp_execute() local 1308 validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) validate_xdp_req_id() argument 1952 ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) ena_clean_xdp_irq() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/amazon/ena/ |
H A D | ena_netdev.c | 63 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); 189 struct ena_ring *xdp_ring; in ena_xdp_io_poll() local 193 xdp_ring = ena_napi->xdp_ring; in ena_xdp_io_poll() 194 xdp_ring->first_interrupt = ena_napi->first_interrupt; in ena_xdp_io_poll() 198 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || in ena_xdp_io_poll() 199 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { in ena_xdp_io_poll() 204 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); in ena_xdp_io_poll() 209 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { in ena_xdp_io_poll() 215 ena_unmask_interrupt(xdp_ring, NUL in ena_xdp_io_poll() 230 ena_xdp_tx_map_buff(struct ena_ring *xdp_ring, struct ena_tx_buffer *tx_info, struct xdp_buff *xdp, void **push_hdr, u32 *push_len) ena_xdp_tx_map_buff() argument 287 struct ena_ring *xdp_ring; ena_xdp_xmit_buff() local 1212 validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) validate_xdp_req_id() argument 1787 ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) ena_clean_xdp_irq() argument [all...] |
/kernel/linux/linux-5.10/net/xdp/ |
H A D | xsk_queue.h | 16 struct xdp_ring { struct 28 struct xdp_ring ptrs; 34 struct xdp_ring ptrs; 43 struct xdp_ring *ring;
|
H A D | xsk_queue.c | 40 q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags, in xskq_create()
|
/kernel/linux/linux-6.6/net/xdp/ |
H A D | xsk_queue.h | 16 struct xdp_ring { struct 30 struct xdp_ring ptrs; 36 struct xdp_ring ptrs; 45 struct xdp_ring *ring;
|
/kernel/linux/linux-5.10/drivers/net/ |
H A D | veth.c | 64 struct ptr_ring xdp_ring; member 267 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { in veth_xdp_rx() 451 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive in veth_xdp_xmit() 460 spin_lock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit() 466 __ptr_ring_produce(&rq->xdp_ring, ptr))) { in veth_xdp_xmit() 471 spin_unlock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit() 540 /* xdp_ring is initialized on receive side? */ in veth_xdp_flush() 806 void *ptr = __ptr_ring_consume(&rq->xdp_ring); in veth_xdp_rcv() 859 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { in veth_poll() 880 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZ in veth_napi_add() [all...] |