1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
5
6#ifndef _WG_QUEUEING_H
7#define _WG_QUEUEING_H
8
9#include "peer.h"
10#include <linux/types.h>
11#include <linux/skbuff.h>
12#include <linux/ip.h>
13#include <linux/ipv6.h>
14#include <net/ip_tunnels.h>
15
16struct wg_device;
17struct wg_peer;
18struct multicore_worker;
19struct crypt_queue;
20struct prev_queue;
21struct sk_buff;
22
23/* queueing.c APIs: */
24int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
25			 unsigned int len);
26void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
27struct multicore_worker __percpu *
28wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
29
30/* receive.c APIs: */
31void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
32void wg_packet_handshake_receive_worker(struct work_struct *work);
33/* NAPI poll function: */
34int wg_packet_rx_poll(struct napi_struct *napi, int budget);
35/* Workqueue worker: */
36void wg_packet_decrypt_worker(struct work_struct *work);
37
38/* send.c APIs: */
39void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
40						bool is_retry);
41void wg_packet_send_handshake_response(struct wg_peer *peer);
42void wg_packet_send_handshake_cookie(struct wg_device *wg,
43				     struct sk_buff *initiating_skb,
44				     __le32 sender_index);
45void wg_packet_send_keepalive(struct wg_peer *peer);
46void wg_packet_purge_staged_packets(struct wg_peer *peer);
47void wg_packet_send_staged_packets(struct wg_peer *peer);
48/* Workqueue workers: */
49void wg_packet_handshake_send_worker(struct work_struct *work);
50void wg_packet_tx_worker(struct work_struct *work);
51void wg_packet_encrypt_worker(struct work_struct *work);
52
53enum packet_state {
54	PACKET_STATE_UNCRYPTED,
55	PACKET_STATE_CRYPTED,
56	PACKET_STATE_DEAD
57};
58
59struct packet_cb {
60	u64 nonce;
61	struct noise_keypair *keypair;
62	atomic_t state;
63	u32 mtu;
64	u8 ds;
65};
66
67#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
68#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
69
70static inline bool wg_check_packet_protocol(struct sk_buff *skb)
71{
72	__be16 real_protocol = ip_tunnel_parse_protocol(skb);
73	return real_protocol && skb->protocol == real_protocol;
74}
75
76static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
77{
78	u8 l4_hash = skb->l4_hash;
79	u8 sw_hash = skb->sw_hash;
80	u32 hash = skb->hash;
81	skb_scrub_packet(skb, true);
82	memset(&skb->headers_start, 0,
83	       offsetof(struct sk_buff, headers_end) -
84		       offsetof(struct sk_buff, headers_start));
85	if (encapsulating) {
86		skb->l4_hash = l4_hash;
87		skb->sw_hash = sw_hash;
88		skb->hash = hash;
89	}
90	skb->queue_mapping = 0;
91	skb->nohdr = 0;
92	skb->peeked = 0;
93	skb->mac_len = 0;
94	skb->dev = NULL;
95#ifdef CONFIG_NET_SCHED
96	skb->tc_index = 0;
97#endif
98	skb_reset_redirect(skb);
99	skb->hdr_len = skb_headroom(skb);
100	skb_reset_mac_header(skb);
101	skb_reset_network_header(skb);
102	skb_reset_transport_header(skb);
103	skb_probe_transport_header(skb);
104	skb_reset_inner_headers(skb);
105}
106
107static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
108{
109	unsigned int cpu = *stored_cpu, cpu_index, i;
110
111	if (unlikely(cpu == nr_cpumask_bits ||
112		     !cpumask_test_cpu(cpu, cpu_online_mask))) {
113		cpu_index = id % cpumask_weight(cpu_online_mask);
114		cpu = cpumask_first(cpu_online_mask);
115		for (i = 0; i < cpu_index; ++i)
116			cpu = cpumask_next(cpu, cpu_online_mask);
117		*stored_cpu = cpu;
118	}
119	return cpu;
120}
121
122/* This function is racy, in the sense that it's called while last_cpu is
123 * unlocked, so it could return the same CPU twice. Adding locking or using
124 * atomic sequence numbers is slower though, and the consequences of racing are
125 * harmless, so live with it.
126 */
127static inline int wg_cpumask_next_online(int *last_cpu)
128{
129	int cpu = cpumask_next(*last_cpu, cpu_online_mask);
130	if (cpu >= nr_cpu_ids)
131		cpu = cpumask_first(cpu_online_mask);
132	*last_cpu = cpu;
133	return cpu;
134}
135
136void wg_prev_queue_init(struct prev_queue *queue);
137
138/* Multi producer */
139bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
140
141/* Single consumer */
142struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
143
144/* Single consumer */
145static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
146{
147	if (queue->peeked)
148		return queue->peeked;
149	queue->peeked = wg_prev_queue_dequeue(queue);
150	return queue->peeked;
151}
152
153/* Single consumer */
154static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
155{
156	queue->peeked = NULL;
157}
158
159static inline int wg_queue_enqueue_per_device_and_peer(
160	struct crypt_queue *device_queue, struct prev_queue *peer_queue,
161	struct sk_buff *skb, struct workqueue_struct *wq)
162{
163	int cpu;
164
165	atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
166	/* We first queue this up for the peer ingestion, but the consumer
167	 * will wait for the state to change to CRYPTED or DEAD before.
168	 */
169	if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
170		return -ENOSPC;
171
172	/* Then we queue it up in the device queue, which consumes the
173	 * packet as soon as it can.
174	 */
175	cpu = wg_cpumask_next_online(&device_queue->last_cpu);
176	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
177		return -EPIPE;
178	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
179	return 0;
180}
181
182static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
183{
184	/* We take a reference, because as soon as we call atomic_set, the
185	 * peer can be freed from below us.
186	 */
187	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
188
189	atomic_set_release(&PACKET_CB(skb)->state, state);
190	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
191		      peer->device->packet_crypt_wq, &peer->transmit_packet_work);
192	wg_peer_put(peer);
193}
194
195static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
196{
197	/* We take a reference, because as soon as we call atomic_set, the
198	 * peer can be freed from below us.
199	 */
200	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
201
202	atomic_set_release(&PACKET_CB(skb)->state, state);
203	napi_schedule(&peer->napi);
204	wg_peer_put(peer);
205}
206
207#ifdef DEBUG
208bool wg_packet_counter_selftest(void);
209#endif
210
211#endif /* _WG_QUEUEING_H */
212