Lines Matching refs:pool
39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
41 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
44 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
45 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
53 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
57 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
62 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
68 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
71 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
72 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
76 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
80 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
84 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
89 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
93 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
95 return pool->uses_need_wakeup;
103 return dev->_rx[queue_id].pool;
105 return dev->_tx[queue_id].pool;
114 dev->_rx[queue_id].pool = NULL;
116 dev->_tx[queue_id].pool = NULL;
119 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
123 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
132 dev->_rx[queue_id].pool = pool;
134 dev->_tx[queue_id].pool = pool;
174 xskb_list = &xskb->pool->xskb_list;
229 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
243 xsk_xdp = xsk_buff_alloc(xs->pool);
261 if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
281 xsk_xdp = xsk_buff_alloc(xs->pool);
321 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
333 __xskq_cons_release(xs->pool->fq);
398 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
400 xskq_prod_submit_n(pool->cq, nb_entries);
404 void xsk_tx_release(struct xsk_buff_pool *pool)
409 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
418 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
423 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
424 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
435 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
449 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
451 struct xdp_desc *descs = pool->tx_descs;
454 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
457 xsk_tx_release(pool);
461 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
466 if (!list_is_singular(&pool->xsk_tx_list)) {
469 return xsk_tx_peek_release_fallback(pool, nb_pkts);
472 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
486 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
490 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
497 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
518 spin_lock_irqsave(&xs->pool->cq_lock, flags);
519 ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
520 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
529 spin_lock_irqsave(&xs->pool->cq_lock, flags);
530 xskq_prod_submit_n(xs->pool->cq, n);
531 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
538 spin_lock_irqsave(&xs->pool->cq_lock, flags);
539 xskq_prod_cancel_n(xs->pool->cq, n);
540 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
581 struct xsk_buff_pool *pool = xs->pool;
601 ts = pool->unaligned ? len : pool->chunk_size;
603 buffer = xsk_buff_raw_get_data(pool, addr);
605 addr = buffer - pool->addrs;
611 page = pool->umem->pgs[addr >> PAGE_SHIFT];
648 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
733 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
838 struct xsk_buff_pool *pool;
851 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
858 pool = xs->pool;
859 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
899 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
921 struct xsk_buff_pool *pool;
929 pool = xs->pool;
931 if (pool->cached_need_wakeup) {
933 xsk_wakeup(xs, pool->cached_need_wakeup);
975 xp_del_xsk(xs->pool, xs);
1164 xs->pool = xp_create_and_assign_umem(xs,
1166 if (!xs->pool) {
1172 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1175 xp_destroy(xs->pool);
1176 xs->pool = NULL;
1181 /* Share the buffer pool with the other socket. */
1189 xp_get_pool(umem_xs->pool);
1190 xs->pool = umem_xs->pool;
1196 if (xs->tx && !xs->pool->tx_descs) {
1197 err = xp_alloc_tx_descs(xs->pool, xs);
1199 xp_put_pool(xs->pool);
1200 xs->pool = NULL;
1215 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1216 if (!xs->pool) {
1221 err = xp_assign_dev(xs->pool, dev, qid, flags);
1223 xp_destroy(xs->pool);
1224 xs->pool = NULL;
1229 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1237 xp_add_xsk(xs->pool, xs);
1413 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1529 READ_ONCE(xs->pool->fq);
1532 READ_ONCE(xs->pool->cq);
1568 xp_clear_dev(xs->pool);
1611 if (!xp_put_pool(xs->pool))
1612 xdp_put_umem(xs->umem, !xs->pool);