1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
10 #include "ratelimiter.h"
14 #include <linux/module.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/inet.h>
17 #include <linux/netdevice.h>
18 #include <linux/inetdevice.h>
19 #include <linux/if_arp.h>
20 #include <linux/icmp.h>
21 #include <linux/suspend.h>
23 #include <net/rtnetlink.h>
24 #include <net/ip_tunnels.h>
25 #include <net/addrconf.h>
27 static LIST_HEAD(device_list
);
29 static int wg_open(struct net_device
*dev
)
31 struct in_device
*dev_v4
= __in_dev_get_rtnl(dev
);
32 struct inet6_dev
*dev_v6
= __in6_dev_get(dev
);
33 struct wg_device
*wg
= netdev_priv(dev
);
38 /* At some point we might put this check near the ip_rt_send_
39 * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
40 * to the current secpath check.
42 IN_DEV_CONF_SET(dev_v4
, SEND_REDIRECTS
, false);
43 IPV4_DEVCONF_ALL(dev_net(dev
), SEND_REDIRECTS
) = false;
46 dev_v6
->cnf
.addr_gen_mode
= IN6_ADDR_GEN_MODE_NONE
;
48 mutex_lock(&wg
->device_update_lock
);
49 ret
= wg_socket_init(wg
, wg
->incoming_port
);
52 list_for_each_entry(peer
, &wg
->peer_list
, peer_list
) {
53 wg_packet_send_staged_packets(peer
);
54 if (peer
->persistent_keepalive_interval
)
55 wg_packet_send_keepalive(peer
);
58 mutex_unlock(&wg
->device_update_lock
);
62 #ifdef CONFIG_PM_SLEEP
63 static int wg_pm_notification(struct notifier_block
*nb
, unsigned long action
,
69 /* If the machine is constantly suspending and resuming, as part of
70 * its normal operation rather than as a somewhat rare event, then we
71 * don't actually want to clear keys.
73 if (IS_ENABLED(CONFIG_PM_AUTOSLEEP
) || IS_ENABLED(CONFIG_ANDROID
))
76 if (action
!= PM_HIBERNATION_PREPARE
&& action
!= PM_SUSPEND_PREPARE
)
80 list_for_each_entry(wg
, &device_list
, device_list
) {
81 mutex_lock(&wg
->device_update_lock
);
82 list_for_each_entry(peer
, &wg
->peer_list
, peer_list
) {
83 del_timer(&peer
->timer_zero_key_material
);
84 wg_noise_handshake_clear(&peer
->handshake
);
85 wg_noise_keypairs_clear(&peer
->keypairs
);
87 mutex_unlock(&wg
->device_update_lock
);
94 static struct notifier_block pm_notifier
= { .notifier_call
= wg_pm_notification
};
97 static int wg_stop(struct net_device
*dev
)
99 struct wg_device
*wg
= netdev_priv(dev
);
100 struct wg_peer
*peer
;
102 mutex_lock(&wg
->device_update_lock
);
103 list_for_each_entry(peer
, &wg
->peer_list
, peer_list
) {
104 wg_packet_purge_staged_packets(peer
);
105 wg_timers_stop(peer
);
106 wg_noise_handshake_clear(&peer
->handshake
);
107 wg_noise_keypairs_clear(&peer
->keypairs
);
108 wg_noise_reset_last_sent_handshake(&peer
->last_sent_handshake
);
110 mutex_unlock(&wg
->device_update_lock
);
111 skb_queue_purge(&wg
->incoming_handshakes
);
112 wg_socket_reinit(wg
, NULL
, NULL
);
116 static netdev_tx_t
wg_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
118 struct wg_device
*wg
= netdev_priv(dev
);
119 struct sk_buff_head packets
;
120 struct wg_peer
*peer
;
121 struct sk_buff
*next
;
126 if (unlikely(!wg_check_packet_protocol(skb
))) {
127 ret
= -EPROTONOSUPPORT
;
128 net_dbg_ratelimited("%s: Invalid IP packet\n", dev
->name
);
132 peer
= wg_allowedips_lookup_dst(&wg
->peer_allowedips
, skb
);
133 if (unlikely(!peer
)) {
135 if (skb
->protocol
== htons(ETH_P_IP
))
136 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
137 dev
->name
, &ip_hdr(skb
)->daddr
);
138 else if (skb
->protocol
== htons(ETH_P_IPV6
))
139 net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
140 dev
->name
, &ipv6_hdr(skb
)->daddr
);
144 family
= READ_ONCE(peer
->endpoint
.addr
.sa_family
);
145 if (unlikely(family
!= AF_INET
&& family
!= AF_INET6
)) {
147 net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
148 dev
->name
, peer
->internal_id
);
152 mtu
= skb_dst(skb
) ? dst_mtu(skb_dst(skb
)) : dev
->mtu
;
154 __skb_queue_head_init(&packets
);
155 if (!skb_is_gso(skb
)) {
156 skb_mark_not_on_list(skb
);
158 struct sk_buff
*segs
= skb_gso_segment(skb
, 0);
160 if (unlikely(IS_ERR(segs
))) {
168 skb_list_walk_safe(skb
, skb
, next
) {
169 skb_mark_not_on_list(skb
);
171 skb
= skb_share_check(skb
, GFP_ATOMIC
);
175 /* We only need to keep the original dst around for icmp,
176 * so at this point we're in a position to drop it.
180 PACKET_CB(skb
)->mtu
= mtu
;
182 __skb_queue_tail(&packets
, skb
);
185 spin_lock_bh(&peer
->staged_packet_queue
.lock
);
186 /* If the queue is getting too big, we start removing the oldest packets
187 * until it's small again. We do this before adding the new packet, so
188 * we don't remove GSO segments that are in excess.
190 while (skb_queue_len(&peer
->staged_packet_queue
) > MAX_STAGED_PACKETS
) {
191 dev_kfree_skb(__skb_dequeue(&peer
->staged_packet_queue
));
192 ++dev
->stats
.tx_dropped
;
194 skb_queue_splice_tail(&packets
, &peer
->staged_packet_queue
);
195 spin_unlock_bh(&peer
->staged_packet_queue
.lock
);
197 wg_packet_send_staged_packets(peer
);
205 ++dev
->stats
.tx_errors
;
206 if (skb
->protocol
== htons(ETH_P_IP
))
207 icmp_ndo_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
208 else if (skb
->protocol
== htons(ETH_P_IPV6
))
209 icmpv6_ndo_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_ADDR_UNREACH
, 0);
214 static const struct net_device_ops netdev_ops
= {
217 .ndo_start_xmit
= wg_xmit
,
218 .ndo_get_stats64
= dev_get_tstats64
221 static void wg_destruct(struct net_device
*dev
)
223 struct wg_device
*wg
= netdev_priv(dev
);
226 list_del(&wg
->device_list
);
228 mutex_lock(&wg
->device_update_lock
);
229 rcu_assign_pointer(wg
->creating_net
, NULL
);
230 wg
->incoming_port
= 0;
231 wg_socket_reinit(wg
, NULL
, NULL
);
232 /* The final references are cleared in the below calls to destroy_workqueue. */
233 wg_peer_remove_all(wg
);
234 destroy_workqueue(wg
->handshake_receive_wq
);
235 destroy_workqueue(wg
->handshake_send_wq
);
236 destroy_workqueue(wg
->packet_crypt_wq
);
237 wg_packet_queue_free(&wg
->decrypt_queue
, true);
238 wg_packet_queue_free(&wg
->encrypt_queue
, true);
239 rcu_barrier(); /* Wait for all the peers to be actually freed. */
240 wg_ratelimiter_uninit();
241 memzero_explicit(&wg
->static_identity
, sizeof(wg
->static_identity
));
242 skb_queue_purge(&wg
->incoming_handshakes
);
243 free_percpu(dev
->tstats
);
244 free_percpu(wg
->incoming_handshakes_worker
);
245 kvfree(wg
->index_hashtable
);
246 kvfree(wg
->peer_hashtable
);
247 mutex_unlock(&wg
->device_update_lock
);
249 pr_debug("%s: Interface destroyed\n", dev
->name
);
253 static const struct device_type device_type
= { .name
= KBUILD_MODNAME
};
255 static void wg_setup(struct net_device
*dev
)
257 struct wg_device
*wg
= netdev_priv(dev
);
258 enum { WG_NETDEV_FEATURES
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
|
259 NETIF_F_SG
| NETIF_F_GSO
|
260 NETIF_F_GSO_SOFTWARE
| NETIF_F_HIGHDMA
};
261 const int overhead
= MESSAGE_MINIMUM_LENGTH
+ sizeof(struct udphdr
) +
262 max(sizeof(struct ipv6hdr
), sizeof(struct iphdr
));
264 dev
->netdev_ops
= &netdev_ops
;
265 dev
->header_ops
= &ip_tunnel_header_ops
;
266 dev
->hard_header_len
= 0;
268 dev
->needed_headroom
= DATA_PACKET_HEAD_ROOM
;
269 dev
->needed_tailroom
= noise_encrypted_len(MESSAGE_PADDING_MULTIPLE
);
270 dev
->type
= ARPHRD_NONE
;
271 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
272 dev
->priv_flags
|= IFF_NO_QUEUE
;
273 dev
->features
|= NETIF_F_LLTX
;
274 dev
->features
|= WG_NETDEV_FEATURES
;
275 dev
->hw_features
|= WG_NETDEV_FEATURES
;
276 dev
->hw_enc_features
|= WG_NETDEV_FEATURES
;
277 dev
->mtu
= ETH_DATA_LEN
- overhead
;
278 dev
->max_mtu
= round_down(INT_MAX
, MESSAGE_PADDING_MULTIPLE
) - overhead
;
280 SET_NETDEV_DEVTYPE(dev
, &device_type
);
282 /* We need to keep the dst around in case of icmp replies. */
285 memset(wg
, 0, sizeof(*wg
));
289 static int wg_newlink(struct net
*src_net
, struct net_device
*dev
,
290 struct nlattr
*tb
[], struct nlattr
*data
[],
291 struct netlink_ext_ack
*extack
)
293 struct wg_device
*wg
= netdev_priv(dev
);
296 rcu_assign_pointer(wg
->creating_net
, src_net
);
297 init_rwsem(&wg
->static_identity
.lock
);
298 mutex_init(&wg
->socket_update_lock
);
299 mutex_init(&wg
->device_update_lock
);
300 skb_queue_head_init(&wg
->incoming_handshakes
);
301 wg_allowedips_init(&wg
->peer_allowedips
);
302 wg_cookie_checker_init(&wg
->cookie_checker
, wg
);
303 INIT_LIST_HEAD(&wg
->peer_list
);
304 wg
->device_update_gen
= 1;
306 wg
->peer_hashtable
= wg_pubkey_hashtable_alloc();
307 if (!wg
->peer_hashtable
)
310 wg
->index_hashtable
= wg_index_hashtable_alloc();
311 if (!wg
->index_hashtable
)
312 goto err_free_peer_hashtable
;
314 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
316 goto err_free_index_hashtable
;
318 wg
->incoming_handshakes_worker
=
319 wg_packet_percpu_multicore_worker_alloc(
320 wg_packet_handshake_receive_worker
, wg
);
321 if (!wg
->incoming_handshakes_worker
)
322 goto err_free_tstats
;
324 wg
->handshake_receive_wq
= alloc_workqueue("wg-kex-%s",
325 WQ_CPU_INTENSIVE
| WQ_FREEZABLE
, 0, dev
->name
);
326 if (!wg
->handshake_receive_wq
)
327 goto err_free_incoming_handshakes
;
329 wg
->handshake_send_wq
= alloc_workqueue("wg-kex-%s",
330 WQ_UNBOUND
| WQ_FREEZABLE
, 0, dev
->name
);
331 if (!wg
->handshake_send_wq
)
332 goto err_destroy_handshake_receive
;
334 wg
->packet_crypt_wq
= alloc_workqueue("wg-crypt-%s",
335 WQ_CPU_INTENSIVE
| WQ_MEM_RECLAIM
, 0, dev
->name
);
336 if (!wg
->packet_crypt_wq
)
337 goto err_destroy_handshake_send
;
339 ret
= wg_packet_queue_init(&wg
->encrypt_queue
, wg_packet_encrypt_worker
,
340 true, MAX_QUEUED_PACKETS
);
342 goto err_destroy_packet_crypt
;
344 ret
= wg_packet_queue_init(&wg
->decrypt_queue
, wg_packet_decrypt_worker
,
345 true, MAX_QUEUED_PACKETS
);
347 goto err_free_encrypt_queue
;
349 ret
= wg_ratelimiter_init();
351 goto err_free_decrypt_queue
;
353 ret
= register_netdevice(dev
);
355 goto err_uninit_ratelimiter
;
357 list_add(&wg
->device_list
, &device_list
);
359 /* We wait until the end to assign priv_destructor, so that
360 * register_netdevice doesn't call it for us if it fails.
362 dev
->priv_destructor
= wg_destruct
;
364 pr_debug("%s: Interface created\n", dev
->name
);
367 err_uninit_ratelimiter
:
368 wg_ratelimiter_uninit();
369 err_free_decrypt_queue
:
370 wg_packet_queue_free(&wg
->decrypt_queue
, true);
371 err_free_encrypt_queue
:
372 wg_packet_queue_free(&wg
->encrypt_queue
, true);
373 err_destroy_packet_crypt
:
374 destroy_workqueue(wg
->packet_crypt_wq
);
375 err_destroy_handshake_send
:
376 destroy_workqueue(wg
->handshake_send_wq
);
377 err_destroy_handshake_receive
:
378 destroy_workqueue(wg
->handshake_receive_wq
);
379 err_free_incoming_handshakes
:
380 free_percpu(wg
->incoming_handshakes_worker
);
382 free_percpu(dev
->tstats
);
383 err_free_index_hashtable
:
384 kvfree(wg
->index_hashtable
);
385 err_free_peer_hashtable
:
386 kvfree(wg
->peer_hashtable
);
390 static struct rtnl_link_ops link_ops __read_mostly
= {
391 .kind
= KBUILD_MODNAME
,
392 .priv_size
= sizeof(struct wg_device
),
394 .newlink
= wg_newlink
,
397 static void wg_netns_pre_exit(struct net
*net
)
399 struct wg_device
*wg
;
402 list_for_each_entry(wg
, &device_list
, device_list
) {
403 if (rcu_access_pointer(wg
->creating_net
) == net
) {
404 pr_debug("%s: Creating namespace exiting\n", wg
->dev
->name
);
405 netif_carrier_off(wg
->dev
);
406 mutex_lock(&wg
->device_update_lock
);
407 rcu_assign_pointer(wg
->creating_net
, NULL
);
408 wg_socket_reinit(wg
, NULL
, NULL
);
409 mutex_unlock(&wg
->device_update_lock
);
415 static struct pernet_operations pernet_ops
= {
416 .pre_exit
= wg_netns_pre_exit
419 int __init
wg_device_init(void)
423 #ifdef CONFIG_PM_SLEEP
424 ret
= register_pm_notifier(&pm_notifier
);
429 ret
= register_pernet_device(&pernet_ops
);
433 ret
= rtnl_link_register(&link_ops
);
440 unregister_pernet_device(&pernet_ops
);
442 #ifdef CONFIG_PM_SLEEP
443 unregister_pm_notifier(&pm_notifier
);
448 void wg_device_uninit(void)
450 rtnl_link_unregister(&link_ops
);
451 unregister_pernet_device(&pernet_ops
);
452 #ifdef CONFIG_PM_SLEEP
453 unregister_pm_notifier(&pm_notifier
);