Lines Matching defs:pool

36 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
38 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
41 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
42 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
46 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
50 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
54 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
59 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
63 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
65 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
68 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
69 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
73 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
77 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
81 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
86 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
90 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
92 return pool->uses_need_wakeup;
100 return dev->_rx[queue_id].pool;
102 return dev->_tx[queue_id].pool;
111 dev->_rx[queue_id].pool = NULL;
113 dev->_tx[queue_id].pool = NULL;
116 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
120 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
129 dev->_rx[queue_id].pool = pool;
131 dev->_tx[queue_id].pool = pool;
138 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
145 offset += xskb->pool->headroom;
146 if (!xskb->pool->unaligned)
192 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
197 xsk_xdp = xsk_buff_alloc(xs->pool);
253 __xskq_cons_release(xs->pool->fq);
294 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
296 xskq_prod_submit_n(pool->cq, nb_entries);
300 void xsk_tx_release(struct xsk_buff_pool *pool)
305 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
314 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
319 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
320 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
330 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
367 spin_lock_irqsave(&xs->pool->cq_lock, flags);
368 xskq_prod_submit_addr(xs->pool->cq, addr);
369 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
393 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
412 buffer = xsk_buff_raw_get_data(xs->pool, addr);
419 spin_lock_irqsave(&xs->pool->cq_lock, flags);
420 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
421 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
425 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
437 spin_lock_irqsave(&xs->pool->cq_lock, flags);
438 xskq_prod_cancel(xs->pool->cq);
439 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
500 struct xsk_buff_pool *pool;
507 pool = xs->pool;
509 if (pool->cached_need_wakeup) {
511 xsk_wakeup(xs, pool->cached_need_wakeup);
552 xp_del_xsk(xs->pool, xs);
742 xs->pool = xp_create_and_assign_umem(xs,
744 if (!xs->pool) {
750 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
753 xp_destroy(xs->pool);
754 xs->pool = NULL;
759 /* Share the buffer pool with the other socket. */
767 xp_get_pool(umem_xs->pool);
768 xs->pool = umem_xs->pool;
779 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
780 if (!xs->pool) {
785 err = xp_assign_dev(xs->pool, dev, qid, flags);
787 xp_destroy(xs->pool);
788 xs->pool = NULL;
793 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
800 xp_add_xsk(xs->pool, xs);
976 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1133 xp_clear_dev(xs->pool);
1177 if (!xp_put_pool(xs->pool))
1178 xdp_put_umem(xs->umem, !xs->pool);