Lines Matching refs:xs
51 struct xdp_sock *xs;
57 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
58 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
78 struct xdp_sock *xs;
84 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
85 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
139 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
146 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
148 xs->rx_queue_full++;
156 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
168 err = __xsk_rcv_zc(xs, xskb, len, contd);
179 err = __xsk_rcv_zc(xs, pos, len, contd);
227 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
229 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
243 xsk_xdp = xsk_buff_alloc(xs->pool);
245 xs->rx_dropped++;
250 err = __xsk_rcv_zc(xs, xskb, len, 0);
261 if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
262 xs->rx_dropped++;
265 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
266 xs->rx_queue_full++;
281 xsk_xdp = xsk_buff_alloc(xs->pool);
288 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
295 static bool xsk_tx_writeable(struct xdp_sock *xs)
297 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
303 static bool xsk_is_bound(struct xdp_sock *xs)
305 if (READ_ONCE(xs->state) == XSK_BOUND) {
313 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
315 if (!xsk_is_bound(xs))
318 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
321 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
322 xs->rx_dropped++;
326 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
330 static void xsk_flush(struct xdp_sock *xs)
332 xskq_prod_submit(xs->rx);
333 __xskq_cons_release(xs->pool->fq);
334 sock_def_readable(&xs->sk);
337 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
342 spin_lock_bh(&xs->rx_lock);
343 err = xsk_rcv_check(xs, xdp, len);
345 err = __xsk_rcv(xs, xdp, len);
346 xsk_flush(xs);
348 spin_unlock_bh(&xs->rx_lock);
352 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
357 err = xsk_rcv_check(xs, xdp, len);
363 return xsk_rcv_zc(xs, xdp, len);
366 err = __xsk_rcv(xs, xdp, len);
372 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
377 err = xsk_rcv(xs, xdp);
381 if (!xs->flush_node.prev)
382 list_add(&xs->flush_node, flush_list);
390 struct xdp_sock *xs, *tmp;
392 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
393 xsk_flush(xs);
394 __list_del_clearprev(&xs->flush_node);
406 struct xdp_sock *xs;
409 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
410 __xskq_cons_release(xs->tx);
411 if (xsk_tx_writeable(xs))
412 xs->sk.sk_write_space(&xs->sk);
420 struct xdp_sock *xs;
423 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
424 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
425 if (xskq_has_descs(xs->tx))
426 xskq_cons_release(xs->tx);
438 xskq_cons_release(xs->tx);
463 struct xdp_sock *xs;
472 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
473 if (!xs) {
478 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
490 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
492 xs->tx->queue_empty_descs++;
496 __xskq_cons_release(xs->tx);
498 xs->sk.sk_write_space(&xs->sk);
506 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
508 struct net_device *dev = xs->dev;
510 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
513 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
518 spin_lock_irqsave(&xs->pool->cq_lock, flags);
519 ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
520 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
525 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
529 spin_lock_irqsave(&xs->pool->cq_lock, flags);
530 xskq_prod_submit_n(xs->pool->cq, n);
531 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
534 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
538 spin_lock_irqsave(&xs->pool->cq_lock, flags);
539 xskq_prod_cancel_n(xs->pool->cq, n);
540 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
563 struct xdp_sock *xs = xdp_sk(skb->sk);
566 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
569 xs->skb = NULL;
578 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
581 struct xsk_buff_pool *pool = xs->pool;
583 struct sk_buff *skb = xs->skb;
590 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
592 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
626 refcount_add(ts, &xs->sk.sk_wmem_alloc);
631 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
634 struct net_device *dev = xs->dev;
635 struct sk_buff *skb = xs->skb;
639 skb = xsk_build_skb_zerocopy(xs, desc);
648 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
654 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
676 page = alloc_page(xs->sk.sk_allocation);
687 refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
692 skb->priority = xs->sk.sk_priority;
693 skb->mark = READ_ONCE(xs->sk.sk_mark);
702 xsk_set_destructor_arg(xs->skb);
703 xsk_drop_skb(xs->skb);
704 xskq_cons_release(xs->tx);
707 xsk_cq_cancel_locked(xs, 1);
715 struct xdp_sock *xs = xdp_sk(sk);
722 mutex_lock(&xs->mutex);
725 if (unlikely(!xsk_is_bound(xs))) {
730 if (xs->queue_id >= xs->dev->real_num_tx_queues)
733 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
744 if (xsk_cq_reserve_addr_locked(xs, desc.addr))
747 skb = xsk_build_skb(xs, &desc);
756 xskq_cons_release(xs->tx);
759 xs->skb = skb;
763 err = __dev_direct_xmit(skb, xs->queue_id);
766 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
776 xs->skb = NULL;
781 xs->skb = NULL;
784 if (xskq_has_descs(xs->tx)) {
785 if (xs->skb)
786 xsk_drop_skb(xs->skb);
787 xskq_cons_release(xs->tx);
792 if (xsk_tx_writeable(xs))
795 mutex_unlock(&xs->mutex);
823 static int xsk_check_common(struct xdp_sock *xs)
825 if (unlikely(!xsk_is_bound(xs)))
827 if (unlikely(!(xs->dev->flags & IFF_UP)))
837 struct xdp_sock *xs = xdp_sk(sk);
841 err = xsk_check_common(xs);
846 if (unlikely(!xs->tx))
850 if (xs->zc)
851 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
855 if (xs->zc && xsk_no_wakeup(sk))
858 pool = xs->pool;
860 if (xs->zc)
861 return xsk_wakeup(xs, XDP_WAKEUP_TX);
882 struct xdp_sock *xs = xdp_sk(sk);
885 err = xsk_check_common(xs);
888 if (unlikely(!xs->rx))
899 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
900 return xsk_wakeup(xs, XDP_WAKEUP_RX);
920 struct xdp_sock *xs = xdp_sk(sk);
926 if (xsk_check_common(xs))
929 pool = xs->pool;
932 if (xs->zc)
933 xsk_wakeup(xs, pool->cached_need_wakeup);
934 else if (xs->tx)
939 if (xs->rx && !xskq_prod_is_empty(xs->rx))
941 if (xs->tx && xsk_tx_writeable(xs))
966 static void xsk_unbind_dev(struct xdp_sock *xs)
968 struct net_device *dev = xs->dev;
970 if (xs->state != XSK_BOUND)
972 WRITE_ONCE(xs->state, XSK_UNBOUND);
975 xp_del_xsk(xs->pool, xs);
980 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
988 spin_lock_bh(&xs->map_list_lock);
989 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
996 spin_unlock_bh(&xs->map_list_lock);
1000 static void xsk_delete_from_maps(struct xdp_sock *xs)
1020 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1021 xsk_map_try_sock_delete(map, xs, map_entry);
1029 struct xdp_sock *xs = xdp_sk(sk);
1037 if (xs->skb)
1038 xsk_drop_skb(xs->skb);
1046 xsk_delete_from_maps(xs);
1047 mutex_lock(&xs->mutex);
1048 xsk_unbind_dev(xs);
1049 mutex_unlock(&xs->mutex);
1051 xskq_destroy(xs->rx);
1052 xskq_destroy(xs->tx);
1053 xskq_destroy(xs->fq_tmp);
1054 xskq_destroy(xs->cq_tmp);
1081 static bool xsk_validate_queues(struct xdp_sock *xs)
1083 return xs->fq_tmp && xs->cq_tmp;
1090 struct xdp_sock *xs = xdp_sk(sk);
1111 mutex_lock(&xs->mutex);
1112 if (xs->state != XSK_READY) {
1123 if (!xs->rx && !xs->tx) {
1141 if (xs->umem) {
1164 xs->pool = xp_create_and_assign_umem(xs,
1166 if (!xs->pool) {
1172 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1175 xp_destroy(xs->pool);
1176 xs->pool = NULL;
1182 if (xs->fq_tmp || xs->cq_tmp) {
1190 xs->pool = umem_xs->pool;
1196 if (xs->tx && !xs->pool->tx_descs) {
1197 err = xp_alloc_tx_descs(xs->pool, xs);
1199 xp_put_pool(xs->pool);
1200 xs->pool = NULL;
1208 WRITE_ONCE(xs->umem, umem_xs->umem);
1210 } else if (!xs->umem || !xsk_validate_queues(xs)) {
1215 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1216 if (!xs->pool) {
1221 err = xp_assign_dev(xs->pool, dev, qid, flags);
1223 xp_destroy(xs->pool);
1224 xs->pool = NULL;
1230 xs->fq_tmp = NULL;
1231 xs->cq_tmp = NULL;
1233 xs->dev = dev;
1234 xs->zc = xs->umem->zc;
1235 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1236 xs->queue_id = qid;
1237 xp_add_xsk(xs->pool, xs);
1247 WRITE_ONCE(xs->state, XSK_BOUND);
1250 mutex_unlock(&xs->mutex);
1266 struct xdp_sock *xs = xdp_sk(sk);
1284 mutex_lock(&xs->mutex);
1285 if (xs->state != XSK_READY) {
1286 mutex_unlock(&xs->mutex);
1289 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1293 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1294 mutex_unlock(&xs->mutex);
1311 mutex_lock(&xs->mutex);
1312 if (xs->state != XSK_READY || xs->umem) {
1313 mutex_unlock(&xs->mutex);
1319 mutex_unlock(&xs->mutex);
1325 WRITE_ONCE(xs->umem, umem);
1326 mutex_unlock(&xs->mutex);
1338 mutex_lock(&xs->mutex);
1339 if (xs->state != XSK_READY) {
1340 mutex_unlock(&xs->mutex);
1344 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1345 &xs->cq_tmp;
1347 mutex_unlock(&xs->mutex);
1381 struct xdp_sock *xs = xdp_sk(sk);
1408 mutex_lock(&xs->mutex);
1409 stats.rx_dropped = xs->rx_dropped;
1411 stats.rx_ring_full = xs->rx_queue_full;
1413 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1414 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1416 stats.rx_dropped += xs->rx_queue_full;
1418 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1419 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1420 mutex_unlock(&xs->mutex);
1488 mutex_lock(&xs->mutex);
1489 if (xs->zc)
1491 mutex_unlock(&xs->mutex);
1513 struct xdp_sock *xs = xdp_sk(sock->sk);
1514 int state = READ_ONCE(xs->state);
1521 q = READ_ONCE(xs->rx);
1523 q = READ_ONCE(xs->tx);
1528 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1529 READ_ONCE(xs->pool->fq);
1531 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1532 READ_ONCE(xs->pool->cq);
1557 struct xdp_sock *xs = xdp_sk(sk);
1559 mutex_lock(&xs->mutex);
1560 if (xs->dev == dev) {
1565 xsk_unbind_dev(xs);
1568 xp_clear_dev(xs->pool);
1570 mutex_unlock(&xs->mutex);
1606 struct xdp_sock *xs = xdp_sk(sk);
1611 if (!xp_put_pool(xs->pool))
1612 xdp_put_umem(xs->umem, !xs->pool);
1618 struct xdp_sock *xs;
1645 xs = xdp_sk(sk);
1646 xs->state = XSK_READY;
1647 mutex_init(&xs->mutex);
1648 spin_lock_init(&xs->rx_lock);
1650 INIT_LIST_HEAD(&xs->map_list);
1651 spin_lock_init(&xs->map_list_lock);