xref: /kernel/linux/linux-6.6/include/net/xdp_sock.h (revision 62306a36)
1/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
9#include <linux/bpf.h>
10#include <linux/workqueue.h>
11#include <linux/if_xdp.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/mm.h>
15#include <net/sock.h>
16
17#define XDP_UMEM_SG_FLAG (1 << 1)
18
19struct net_device;
20struct xsk_queue;
21struct xdp_buff;
22
23struct xdp_umem {
24	void *addrs;
25	u64 size;
26	u32 headroom;
27	u32 chunk_size;
28	u32 chunks;
29	u32 npgs;
30	struct user_struct *user;
31	refcount_t users;
32	u8 flags;
33	bool zc;
34	struct page **pgs;
35	int id;
36	struct list_head xsk_dma_list;
37	struct work_struct work;
38};
39
40struct xsk_map {
41	struct bpf_map map;
42	spinlock_t lock; /* Synchronize map updates */
43	atomic_t count;
44	struct xdp_sock __rcu *xsk_map[];
45};
46
47struct xdp_sock {
48	/* struct sock must be the first member of struct xdp_sock */
49	struct sock sk;
50	struct xsk_queue *rx ____cacheline_aligned_in_smp;
51	struct net_device *dev;
52	struct xdp_umem *umem;
53	struct list_head flush_node;
54	struct xsk_buff_pool *pool;
55	u16 queue_id;
56	bool zc;
57	bool sg;
58	enum {
59		XSK_READY = 0,
60		XSK_BOUND,
61		XSK_UNBOUND,
62	} state;
63
64	struct xsk_queue *tx ____cacheline_aligned_in_smp;
65	struct list_head tx_list;
66	/* Protects generic receive. */
67	spinlock_t rx_lock;
68
69	/* Statistics */
70	u64 rx_dropped;
71	u64 rx_queue_full;
72
73	/* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current
74	 * packet, the partially built skb is saved here so that packet building can resume in next
75	 * call of __xsk_generic_xmit().
76	 */
77	struct sk_buff *skb;
78
79	struct list_head map_list;
80	/* Protects map_list */
81	spinlock_t map_list_lock;
82	/* Protects multiple processes in the control path */
83	struct mutex mutex;
84	struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
85	struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
86};
87
88#ifdef CONFIG_XDP_SOCKETS
89
90int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
91int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
92void __xsk_map_flush(void);
93
94#else
95
96static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
97{
98	return -ENOTSUPP;
99}
100
101static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
102{
103	return -EOPNOTSUPP;
104}
105
106static inline void __xsk_map_flush(void)
107{
108}
109
110#endif /* CONFIG_XDP_SOCKETS */
111
112#endif /* _LINUX_XDP_SOCK_H */
113