1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4 */ 5 6#include "queueing.h" 7#include "socket.h" 8#include "timers.h" 9#include "device.h" 10#include "ratelimiter.h" 11#include "peer.h" 12#include "messages.h" 13 14#include <linux/module.h> 15#include <linux/rtnetlink.h> 16#include <linux/inet.h> 17#include <linux/netdevice.h> 18#include <linux/inetdevice.h> 19#include <linux/if_arp.h> 20#include <linux/icmp.h> 21#include <linux/suspend.h> 22#include <net/dst_metadata.h> 23#include <net/icmp.h> 24#include <net/rtnetlink.h> 25#include <net/ip_tunnels.h> 26#include <net/addrconf.h> 27 28static LIST_HEAD(device_list); 29 30static int wg_open(struct net_device *dev) 31{ 32 struct in_device *dev_v4 = __in_dev_get_rtnl(dev); 33 struct inet6_dev *dev_v6 = __in6_dev_get(dev); 34 struct wg_device *wg = netdev_priv(dev); 35 struct wg_peer *peer; 36 int ret; 37 38 if (dev_v4) { 39 /* At some point we might put this check near the ip_rt_send_ 40 * redirect call of ip_forward in net/ipv4/ip_forward.c, similar 41 * to the current secpath check. 42 */ 43 IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); 44 IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; 45 } 46 if (dev_v6) 47 dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; 48 49 mutex_lock(&wg->device_update_lock); 50 ret = wg_socket_init(wg, wg->incoming_port); 51 if (ret < 0) 52 goto out; 53 list_for_each_entry(peer, &wg->peer_list, peer_list) { 54 wg_packet_send_staged_packets(peer); 55 if (peer->persistent_keepalive_interval) 56 wg_packet_send_keepalive(peer); 57 } 58out: 59 mutex_unlock(&wg->device_update_lock); 60 return ret; 61} 62 63#ifdef CONFIG_PM_SLEEP 64static int wg_pm_notification(struct notifier_block *nb, unsigned long action, 65 void *data) 66{ 67 struct wg_device *wg; 68 struct wg_peer *peer; 69 70 /* If the machine is constantly suspending and resuming, as part of 71 * its normal operation rather than as a somewhat rare event, then we 72 * don't actually want to clear keys. 73 */ 74 if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) 75 return 0; 76 77 if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) 78 return 0; 79 80 rtnl_lock(); 81 list_for_each_entry(wg, &device_list, device_list) { 82 mutex_lock(&wg->device_update_lock); 83 list_for_each_entry(peer, &wg->peer_list, peer_list) { 84 del_timer(&peer->timer_zero_key_material); 85 wg_noise_handshake_clear(&peer->handshake); 86 wg_noise_keypairs_clear(&peer->keypairs); 87 } 88 mutex_unlock(&wg->device_update_lock); 89 } 90 rtnl_unlock(); 91 rcu_barrier(); 92 return 0; 93} 94 95static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; 96#endif 97 98static int wg_stop(struct net_device *dev) 99{ 100 struct wg_device *wg = netdev_priv(dev); 101 struct wg_peer *peer; 102 struct sk_buff *skb; 103 104 mutex_lock(&wg->device_update_lock); 105 list_for_each_entry(peer, &wg->peer_list, peer_list) { 106 wg_packet_purge_staged_packets(peer); 107 wg_timers_stop(peer); 108 wg_noise_handshake_clear(&peer->handshake); 109 wg_noise_keypairs_clear(&peer->keypairs); 110 wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); 111 } 112 mutex_unlock(&wg->device_update_lock); 113 while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) 114 kfree_skb(skb); 115 atomic_set(&wg->handshake_queue_len, 0); 116 wg_socket_reinit(wg, NULL, NULL); 117 return 0; 118} 119 120static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) 121{ 122 struct wg_device *wg = netdev_priv(dev); 123 struct sk_buff_head packets; 124 struct wg_peer *peer; 125 struct sk_buff *next; 126 sa_family_t family; 127 u32 mtu; 128 int ret; 129 130 if (unlikely(!wg_check_packet_protocol(skb))) { 131 ret = -EPROTONOSUPPORT; 132 net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); 133 goto err; 134 } 135 136 peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); 137 if (unlikely(!peer)) { 138 ret = -ENOKEY; 139 if (skb->protocol == htons(ETH_P_IP)) 140 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", 141 dev->name, &ip_hdr(skb)->daddr); 142 else if (skb->protocol == htons(ETH_P_IPV6)) 143 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", 144 dev->name, &ipv6_hdr(skb)->daddr); 145 goto err_icmp; 146 } 147 148 family = READ_ONCE(peer->endpoint.addr.sa_family); 149 if (unlikely(family != AF_INET && family != AF_INET6)) { 150 ret = -EDESTADDRREQ; 151 net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", 152 dev->name, peer->internal_id); 153 goto err_peer; 154 } 155 156 mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 157 158 __skb_queue_head_init(&packets); 159 if (!skb_is_gso(skb)) { 160 skb_mark_not_on_list(skb); 161 } else { 162 struct sk_buff *segs = skb_gso_segment(skb, 0); 163 164 if (unlikely(IS_ERR(segs))) { 165 ret = PTR_ERR(segs); 166 goto err_peer; 167 } 168 dev_kfree_skb(skb); 169 skb = segs; 170 } 171 172 skb_list_walk_safe(skb, skb, next) { 173 skb_mark_not_on_list(skb); 174 175 skb = skb_share_check(skb, GFP_ATOMIC); 176 if (unlikely(!skb)) 177 continue; 178 179 /* We only need to keep the original dst around for icmp, 180 * so at this point we're in a position to drop it. 181 */ 182 skb_dst_drop(skb); 183 184 PACKET_CB(skb)->mtu = mtu; 185 186 __skb_queue_tail(&packets, skb); 187 } 188 189 spin_lock_bh(&peer->staged_packet_queue.lock); 190 /* If the queue is getting too big, we start removing the oldest packets 191 * until it's small again. We do this before adding the new packet, so 192 * we don't remove GSO segments that are in excess. 193 */ 194 while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { 195 dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); 196 DEV_STATS_INC(dev, tx_dropped); 197 } 198 skb_queue_splice_tail(&packets, &peer->staged_packet_queue); 199 spin_unlock_bh(&peer->staged_packet_queue.lock); 200 201 wg_packet_send_staged_packets(peer); 202 203 wg_peer_put(peer); 204 return NETDEV_TX_OK; 205 206err_peer: 207 wg_peer_put(peer); 208err_icmp: 209 if (skb->protocol == htons(ETH_P_IP)) 210 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 211 else if (skb->protocol == htons(ETH_P_IPV6)) 212 icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); 213err: 214 DEV_STATS_INC(dev, tx_errors); 215 kfree_skb(skb); 216 return ret; 217} 218 219static const struct net_device_ops netdev_ops = { 220 .ndo_open = wg_open, 221 .ndo_stop = wg_stop, 222 .ndo_start_xmit = wg_xmit, 223 .ndo_get_stats64 = ip_tunnel_get_stats64 224}; 225 226static void wg_destruct(struct net_device *dev) 227{ 228 struct wg_device *wg = netdev_priv(dev); 229 230 rtnl_lock(); 231 list_del(&wg->device_list); 232 rtnl_unlock(); 233 mutex_lock(&wg->device_update_lock); 234 rcu_assign_pointer(wg->creating_net, NULL); 235 wg->incoming_port = 0; 236 wg_socket_reinit(wg, NULL, NULL); 237 /* The final references are cleared in the below calls to destroy_workqueue. */ 238 wg_peer_remove_all(wg); 239 destroy_workqueue(wg->handshake_receive_wq); 240 destroy_workqueue(wg->handshake_send_wq); 241 destroy_workqueue(wg->packet_crypt_wq); 242 wg_packet_queue_free(&wg->handshake_queue, true); 243 wg_packet_queue_free(&wg->decrypt_queue, false); 244 wg_packet_queue_free(&wg->encrypt_queue, false); 245 rcu_barrier(); /* Wait for all the peers to be actually freed. */ 246 wg_ratelimiter_uninit(); 247 memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); 248 free_percpu(dev->tstats); 249 kvfree(wg->index_hashtable); 250 kvfree(wg->peer_hashtable); 251 mutex_unlock(&wg->device_update_lock); 252 253 pr_debug("%s: Interface destroyed\n", dev->name); 254 free_netdev(dev); 255} 256 257static const struct device_type device_type = { .name = KBUILD_MODNAME }; 258 259static void wg_setup(struct net_device *dev) 260{ 261 struct wg_device *wg = netdev_priv(dev); 262 enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 263 NETIF_F_SG | NETIF_F_GSO | 264 NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; 265 const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + 266 max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); 267 268 dev->netdev_ops = &netdev_ops; 269 dev->header_ops = &ip_tunnel_header_ops; 270 dev->hard_header_len = 0; 271 dev->addr_len = 0; 272 dev->needed_headroom = DATA_PACKET_HEAD_ROOM; 273 dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); 274 dev->type = ARPHRD_NONE; 275 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 276 dev->priv_flags |= IFF_NO_QUEUE; 277 dev->features |= NETIF_F_LLTX; 278 dev->features |= WG_NETDEV_FEATURES; 279 dev->hw_features |= WG_NETDEV_FEATURES; 280 dev->hw_enc_features |= WG_NETDEV_FEATURES; 281 dev->mtu = ETH_DATA_LEN - overhead; 282 dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; 283 284 SET_NETDEV_DEVTYPE(dev, &device_type); 285 286 /* We need to keep the dst around in case of icmp replies. */ 287 netif_keep_dst(dev); 288 289 memset(wg, 0, sizeof(*wg)); 290 wg->dev = dev; 291} 292 293static int wg_newlink(struct net *src_net, struct net_device *dev, 294 struct nlattr *tb[], struct nlattr *data[], 295 struct netlink_ext_ack *extack) 296{ 297 struct wg_device *wg = netdev_priv(dev); 298 int ret = -ENOMEM; 299 300 rcu_assign_pointer(wg->creating_net, src_net); 301 init_rwsem(&wg->static_identity.lock); 302 mutex_init(&wg->socket_update_lock); 303 mutex_init(&wg->device_update_lock); 304 wg_allowedips_init(&wg->peer_allowedips); 305 wg_cookie_checker_init(&wg->cookie_checker, wg); 306 INIT_LIST_HEAD(&wg->peer_list); 307 wg->device_update_gen = 1; 308 309 wg->peer_hashtable = wg_pubkey_hashtable_alloc(); 310 if (!wg->peer_hashtable) 311 return ret; 312 313 wg->index_hashtable = wg_index_hashtable_alloc(); 314 if (!wg->index_hashtable) 315 goto err_free_peer_hashtable; 316 317 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 318 if (!dev->tstats) 319 goto err_free_index_hashtable; 320 321 wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", 322 WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); 323 if (!wg->handshake_receive_wq) 324 goto err_free_tstats; 325 326 wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", 327 WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); 328 if (!wg->handshake_send_wq) 329 goto err_destroy_handshake_receive; 330 331 wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", 332 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); 333 if (!wg->packet_crypt_wq) 334 goto err_destroy_handshake_send; 335 336 ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, 337 MAX_QUEUED_PACKETS); 338 if (ret < 0) 339 goto err_destroy_packet_crypt; 340 341 ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, 342 MAX_QUEUED_PACKETS); 343 if (ret < 0) 344 goto err_free_encrypt_queue; 345 346 ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, 347 MAX_QUEUED_INCOMING_HANDSHAKES); 348 if (ret < 0) 349 goto err_free_decrypt_queue; 350 351 ret = wg_ratelimiter_init(); 352 if (ret < 0) 353 goto err_free_handshake_queue; 354 355 ret = register_netdevice(dev); 356 if (ret < 0) 357 goto err_uninit_ratelimiter; 358 359 list_add(&wg->device_list, &device_list); 360 361 /* We wait until the end to assign priv_destructor, so that 362 * register_netdevice doesn't call it for us if it fails. 363 */ 364 dev->priv_destructor = wg_destruct; 365 366 pr_debug("%s: Interface created\n", dev->name); 367 return ret; 368 369err_uninit_ratelimiter: 370 wg_ratelimiter_uninit(); 371err_free_handshake_queue: 372 wg_packet_queue_free(&wg->handshake_queue, false); 373err_free_decrypt_queue: 374 wg_packet_queue_free(&wg->decrypt_queue, false); 375err_free_encrypt_queue: 376 wg_packet_queue_free(&wg->encrypt_queue, false); 377err_destroy_packet_crypt: 378 destroy_workqueue(wg->packet_crypt_wq); 379err_destroy_handshake_send: 380 destroy_workqueue(wg->handshake_send_wq); 381err_destroy_handshake_receive: 382 destroy_workqueue(wg->handshake_receive_wq); 383err_free_tstats: 384 free_percpu(dev->tstats); 385err_free_index_hashtable: 386 kvfree(wg->index_hashtable); 387err_free_peer_hashtable: 388 kvfree(wg->peer_hashtable); 389 return ret; 390} 391 392static struct rtnl_link_ops link_ops __read_mostly = { 393 .kind = KBUILD_MODNAME, 394 .priv_size = sizeof(struct wg_device), 395 .setup = wg_setup, 396 .newlink = wg_newlink, 397}; 398 399static void wg_netns_pre_exit(struct net *net) 400{ 401 struct wg_device *wg; 402 struct wg_peer *peer; 403 404 rtnl_lock(); 405 list_for_each_entry(wg, &device_list, device_list) { 406 if (rcu_access_pointer(wg->creating_net) == net) { 407 pr_debug("%s: Creating namespace exiting\n", wg->dev->name); 408 netif_carrier_off(wg->dev); 409 mutex_lock(&wg->device_update_lock); 410 rcu_assign_pointer(wg->creating_net, NULL); 411 wg_socket_reinit(wg, NULL, NULL); 412 list_for_each_entry(peer, &wg->peer_list, peer_list) 413 wg_socket_clear_peer_endpoint_src(peer); 414 mutex_unlock(&wg->device_update_lock); 415 } 416 } 417 rtnl_unlock(); 418} 419 420static struct pernet_operations pernet_ops = { 421 .pre_exit = wg_netns_pre_exit 422}; 423 424int __init wg_device_init(void) 425{ 426 int ret; 427 428#ifdef CONFIG_PM_SLEEP 429 ret = register_pm_notifier(&pm_notifier); 430 if (ret) 431 return ret; 432#endif 433 434 ret = register_pernet_device(&pernet_ops); 435 if (ret) 436 goto error_pm; 437 438 ret = rtnl_link_register(&link_ops); 439 if (ret) 440 goto error_pernet; 441 442 return 0; 443 444error_pernet: 445 unregister_pernet_device(&pernet_ops); 446error_pm: 447#ifdef CONFIG_PM_SLEEP 448 unregister_pm_notifier(&pm_notifier); 449#endif 450 return ret; 451} 452 453void wg_device_uninit(void) 454{ 455 rtnl_link_unregister(&link_ops); 456 unregister_pernet_device(&pernet_ops); 457#ifdef CONFIG_PM_SLEEP 458 unregister_pm_notifier(&pm_notifier); 459#endif 460 rcu_barrier(); 461} 462