1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
10#include <net/xsk_buff_pool.h>
11
12#define XDP_UMEM_MIN_CHUNK_SHIFT 11
13#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14
15#ifdef CONFIG_XDP_SOCKETS
16
17void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
18bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
19u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
20void xsk_tx_release(struct xsk_buff_pool *pool);
21struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
22					    u16 queue_id);
23void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
24void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
25void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
26void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
27bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
28
29static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
30{
31	return XDP_PACKET_HEADROOM + pool->headroom;
32}
33
34static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
35{
36	return pool->chunk_size;
37}
38
39static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
40{
41	return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
42}
43
44static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
45					 struct xdp_rxq_info *rxq)
46{
47	xp_set_rxq_info(pool, rxq);
48}
49
50static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
51{
52#ifdef CONFIG_NET_RX_BUSY_POLL
53	return pool->heads[0].xdp.rxq->napi_id;
54#else
55	return 0;
56#endif
57}
58
59static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
60				      unsigned long attrs)
61{
62	xp_dma_unmap(pool, attrs);
63}
64
65static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
66				   struct device *dev, unsigned long attrs)
67{
68	struct xdp_umem *umem = pool->umem;
69
70	return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
71}
72
73static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
74{
75	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
76
77	return xp_get_dma(xskb);
78}
79
80static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
81{
82	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
83
84	return xp_get_frame_dma(xskb);
85}
86
87static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
88{
89	return xp_alloc(pool);
90}
91
92static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
93{
94	return !xp_mb_desc(desc);
95}
96
97/* Returns as many entries as possible up to max. 0 <= N <= max. */
98static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
99{
100	return xp_alloc_batch(pool, xdp, max);
101}
102
103static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
104{
105	return xp_can_alloc(pool, count);
106}
107
108static inline void xsk_buff_free(struct xdp_buff *xdp)
109{
110	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
111	struct list_head *xskb_list = &xskb->pool->xskb_list;
112	struct xdp_buff_xsk *pos, *tmp;
113
114	if (likely(!xdp_buff_has_frags(xdp)))
115		goto out;
116
117	list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
118		list_del(&pos->xskb_list_node);
119		xp_free(pos);
120	}
121
122	xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
123out:
124	xp_free(xskb);
125}
126
127static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
128{
129	struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
130
131	list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
132}
133
134static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
135{
136	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
137	struct xdp_buff *ret = NULL;
138	struct xdp_buff_xsk *frag;
139
140	frag = list_first_entry_or_null(&xskb->pool->xskb_list,
141					struct xdp_buff_xsk, xskb_list_node);
142	if (frag) {
143		list_del(&frag->xskb_list_node);
144		ret = &frag->xdp;
145	}
146
147	return ret;
148}
149
150static inline void xsk_buff_del_tail(struct xdp_buff *tail)
151{
152	struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
153
154	list_del(&xskb->xskb_list_node);
155}
156
157static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
158{
159	struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
160	struct xdp_buff_xsk *frag;
161
162	frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
163			       xskb_list_node);
164	return &frag->xdp;
165}
166
167static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
168{
169	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
170	xdp->data_meta = xdp->data;
171	xdp->data_end = xdp->data + size;
172	xdp->flags = 0;
173}
174
175static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
176					      u64 addr)
177{
178	return xp_raw_get_dma(pool, addr);
179}
180
181static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
182{
183	return xp_raw_get_data(pool, addr);
184}
185
186static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
187{
188	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
189
190	if (!pool->dma_need_sync)
191		return;
192
193	xp_dma_sync_for_cpu(xskb);
194}
195
196static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
197						    dma_addr_t dma,
198						    size_t size)
199{
200	xp_dma_sync_for_device(pool, dma, size);
201}
202
203#else
204
205static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
206{
207}
208
209static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
210				    struct xdp_desc *desc)
211{
212	return false;
213}
214
215static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
216{
217	return 0;
218}
219
220static inline void xsk_tx_release(struct xsk_buff_pool *pool)
221{
222}
223
224static inline struct xsk_buff_pool *
225xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
226{
227	return NULL;
228}
229
230static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
231{
232}
233
234static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
235{
236}
237
238static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
239{
240}
241
242static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
243{
244}
245
246static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
247{
248	return false;
249}
250
251static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
252{
253	return 0;
254}
255
256static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
257{
258	return 0;
259}
260
261static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
262{
263	return 0;
264}
265
266static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
267					 struct xdp_rxq_info *rxq)
268{
269}
270
271static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
272{
273	return 0;
274}
275
276static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
277				      unsigned long attrs)
278{
279}
280
281static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
282				   struct device *dev, unsigned long attrs)
283{
284	return 0;
285}
286
287static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
288{
289	return 0;
290}
291
292static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
293{
294	return 0;
295}
296
297static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
298{
299	return NULL;
300}
301
302static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
303{
304	return false;
305}
306
307static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
308{
309	return 0;
310}
311
312static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
313{
314	return false;
315}
316
317static inline void xsk_buff_free(struct xdp_buff *xdp)
318{
319}
320
321static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
322{
323}
324
325static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
326{
327	return NULL;
328}
329
330static inline void xsk_buff_del_tail(struct xdp_buff *tail)
331{
332}
333
334static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
335{
336	return NULL;
337}
338
339static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
340{
341}
342
343static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
344					      u64 addr)
345{
346	return 0;
347}
348
349static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
350{
351	return NULL;
352}
353
354static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
355{
356}
357
358static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
359						    dma_addr_t dma,
360						    size_t size)
361{
362}
363
364#endif /* CONFIG_XDP_SOCKETS */
365
366#endif /* _LINUX_XDP_SOCK_DRV_H */
367