Lines Matching refs:xs

48 	struct xdp_sock *xs;
54 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
55 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
75 struct xdp_sock *xs;
81 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
82 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
151 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
158 err = xskq_prod_reserve_desc(xs->rx, addr, len);
160 xs->rx_queue_full++;
186 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
192 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
193 xs->rx_dropped++;
197 xsk_xdp = xsk_buff_alloc(xs->pool);
199 xs->rx_dropped++;
204 err = __xsk_rcv_zc(xs, xsk_xdp, len);
214 static bool xsk_tx_writeable(struct xdp_sock *xs)
216 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
222 static bool xsk_is_bound(struct xdp_sock *xs)
224 if (READ_ONCE(xs->state) == XSK_BOUND) {
232 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
237 if (!xsk_is_bound(xs))
240 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
246 __xsk_rcv_zc(xs, xdp, len) :
247 __xsk_rcv(xs, xdp, len, explicit_free);
250 static void xsk_flush(struct xdp_sock *xs)
252 xskq_prod_submit(xs->rx);
253 __xskq_cons_release(xs->pool->fq);
254 sock_def_readable(&xs->sk);
257 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
261 spin_lock_bh(&xs->rx_lock);
262 err = xsk_rcv(xs, xdp, false);
263 xsk_flush(xs);
264 spin_unlock_bh(&xs->rx_lock);
268 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
273 err = xsk_rcv(xs, xdp, true);
277 if (!xs->flush_node.prev)
278 list_add(&xs->flush_node, flush_list);
286 struct xdp_sock *xs, *tmp;
288 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
289 xsk_flush(xs);
290 __list_del_clearprev(&xs->flush_node);
302 struct xdp_sock *xs;
305 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
306 __xskq_cons_release(xs->tx);
307 if (xsk_tx_writeable(xs))
308 xs->sk.sk_write_space(&xs->sk);
316 struct xdp_sock *xs;
319 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
320 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
321 xs->tx->queue_empty_descs++;
333 xskq_cons_release(xs->tx);
344 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
346 struct net_device *dev = xs->dev;
350 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
356 static int xsk_zc_xmit(struct xdp_sock *xs)
358 return xsk_wakeup(xs, XDP_WAKEUP_TX);
364 struct xdp_sock *xs = xdp_sk(skb->sk);
367 spin_lock_irqsave(&xs->pool->cq_lock, flags);
368 xskq_prod_submit_addr(xs->pool->cq, addr);
369 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
376 struct xdp_sock *xs = xdp_sk(sk);
385 mutex_lock(&xs->mutex);
387 if (xs->queue_id >= xs->dev->real_num_tx_queues)
390 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
391 tr = xs->dev->needed_tailroom;
393 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
412 buffer = xsk_buff_raw_get_data(xs->pool, addr);
419 spin_lock_irqsave(&xs->pool->cq_lock, flags);
420 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
421 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
425 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
427 skb->dev = xs->dev;
433 err = __dev_direct_xmit(skb, xs->queue_id);
437 spin_lock_irqsave(&xs->pool->cq_lock, flags);
438 xskq_prod_cancel(xs->pool->cq);
439 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
446 xskq_cons_release(xs->tx);
457 xs->tx->queue_empty_descs++;
461 if (xsk_tx_writeable(xs))
464 mutex_unlock(&xs->mutex);
470 struct xdp_sock *xs = xdp_sk(sk);
472 if (unlikely(!(xs->dev->flags & IFF_UP)))
474 if (unlikely(!xs->tx))
477 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
484 struct xdp_sock *xs = xdp_sk(sk);
486 if (unlikely(!xsk_is_bound(xs)))
499 struct xdp_sock *xs = xdp_sk(sk);
504 if (unlikely(!xsk_is_bound(xs)))
507 pool = xs->pool;
510 if (xs->zc)
511 xsk_wakeup(xs, pool->cached_need_wakeup);
517 if (xs->rx && !xskq_prod_is_empty(xs->rx))
519 if (xs->tx && xsk_tx_writeable(xs))
543 static void xsk_unbind_dev(struct xdp_sock *xs)
545 struct net_device *dev = xs->dev;
547 if (xs->state != XSK_BOUND)
549 WRITE_ONCE(xs->state, XSK_UNBOUND);
552 xp_del_xsk(xs->pool, xs);
553 xs->dev = NULL;
558 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
566 spin_lock_bh(&xs->map_list_lock);
567 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
574 spin_unlock_bh(&xs->map_list_lock);
578 static void xsk_delete_from_maps(struct xdp_sock *xs)
598 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
599 xsk_map_try_sock_delete(map, xs, map_entry);
607 struct xdp_sock *xs = xdp_sk(sk);
623 xsk_delete_from_maps(xs);
624 mutex_lock(&xs->mutex);
625 xsk_unbind_dev(xs);
626 mutex_unlock(&xs->mutex);
628 xskq_destroy(xs->rx);
629 xskq_destroy(xs->tx);
630 xskq_destroy(xs->fq_tmp);
631 xskq_destroy(xs->cq_tmp);
659 static bool xsk_validate_queues(struct xdp_sock *xs)
661 return xs->fq_tmp && xs->cq_tmp;
668 struct xdp_sock *xs = xdp_sk(sk);
689 mutex_lock(&xs->mutex);
690 if (xs->state != XSK_READY) {
701 if (!xs->rx && !xs->tx) {
719 if (xs->umem) {
742 xs->pool = xp_create_and_assign_umem(xs,
744 if (!xs->pool) {
750 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
753 xp_destroy(xs->pool);
754 xs->pool = NULL;
760 if (xs->fq_tmp || xs->cq_tmp) {
768 xs->pool = umem_xs->pool;
772 WRITE_ONCE(xs->umem, umem_xs->umem);
774 } else if (!xs->umem || !xsk_validate_queues(xs)) {
779 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
780 if (!xs->pool) {
785 err = xp_assign_dev(xs->pool, dev, qid, flags);
787 xp_destroy(xs->pool);
788 xs->pool = NULL;
794 xs->fq_tmp = NULL;
795 xs->cq_tmp = NULL;
797 xs->dev = dev;
798 xs->zc = xs->umem->zc;
799 xs->queue_id = qid;
800 xp_add_xsk(xs->pool, xs);
810 WRITE_ONCE(xs->state, XSK_BOUND);
813 mutex_unlock(&xs->mutex);
829 struct xdp_sock *xs = xdp_sk(sk);
847 mutex_lock(&xs->mutex);
848 if (xs->state != XSK_READY) {
849 mutex_unlock(&xs->mutex);
852 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
856 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
857 mutex_unlock(&xs->mutex);
874 mutex_lock(&xs->mutex);
875 if (xs->state != XSK_READY || xs->umem) {
876 mutex_unlock(&xs->mutex);
882 mutex_unlock(&xs->mutex);
888 WRITE_ONCE(xs->umem, umem);
889 mutex_unlock(&xs->mutex);
901 mutex_lock(&xs->mutex);
902 if (xs->state != XSK_READY) {
903 mutex_unlock(&xs->mutex);
907 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
908 &xs->cq_tmp;
910 mutex_unlock(&xs->mutex);
944 struct xdp_sock *xs = xdp_sk(sk);
971 mutex_lock(&xs->mutex);
972 stats.rx_dropped = xs->rx_dropped;
974 stats.rx_ring_full = xs->rx_queue_full;
976 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
977 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
979 stats.rx_dropped += xs->rx_queue_full;
981 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
982 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
983 mutex_unlock(&xs->mutex);
1051 mutex_lock(&xs->mutex);
1052 if (xs->zc)
1054 mutex_unlock(&xs->mutex);
1076 struct xdp_sock *xs = xdp_sk(sock->sk);
1081 if (READ_ONCE(xs->state) != XSK_READY)
1085 q = READ_ONCE(xs->rx);
1087 q = READ_ONCE(xs->tx);
1092 q = READ_ONCE(xs->fq_tmp);
1094 q = READ_ONCE(xs->cq_tmp);
1122 struct xdp_sock *xs = xdp_sk(sk);
1124 mutex_lock(&xs->mutex);
1125 if (xs->dev == dev) {
1130 xsk_unbind_dev(xs);
1133 xp_clear_dev(xs->pool);
1135 mutex_unlock(&xs->mutex);
1172 struct xdp_sock *xs = xdp_sk(sk);
1177 if (!xp_put_pool(xs->pool))
1178 xdp_put_umem(xs->umem, !xs->pool);
1186 struct xdp_sock *xs;
1214 xs = xdp_sk(sk);
1215 xs->state = XSK_READY;
1216 mutex_init(&xs->mutex);
1217 spin_lock_init(&xs->rx_lock);
1219 INIT_LIST_HEAD(&xs->map_list);
1220 spin_lock_init(&xs->map_list_lock);