Lines Matching defs:xdp
26 #include <net/xdp.h>
143 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
151 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
153 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
186 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
203 xsk_copy_xdp(xsk_xdp, xdp, len);
210 xdp_return_buff(xdp);
232 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
240 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
243 len = xdp->data_end - xdp->data;
245 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
246 __xsk_rcv_zc(xs, xdp, len) :
247 __xsk_rcv(xs, xdp, len, explicit_free);
257 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
262 err = xsk_rcv(xs, xdp, false);
268 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
273 err = xsk_rcv(xs, xdp, true);
551 /* Wait for driver to stop using the xdp socket. */
615 mutex_lock(&net->xdp.lock);
617 mutex_unlock(&net->xdp.lock);
1120 mutex_lock(&net->xdp.lock);
1121 sk_for_each(sk, &net->xdp.list) {
1137 mutex_unlock(&net->xdp.lock);
1222 mutex_lock(&net->xdp.lock);
1223 sk_add_node_rcu(sk, &net->xdp.list);
1224 mutex_unlock(&net->xdp.lock);
1245 mutex_init(&net->xdp.lock);
1246 INIT_HLIST_HEAD(&net->xdp.list);
1252 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));