1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
10 #include "peerlookup.h"
13 #include <linux/kref.h>
14 #include <linux/lockdep.h>
15 #include <linux/rcupdate.h>
16 #include <linux/list.h>
18 static atomic64_t peer_counter
= ATOMIC64_INIT(0);
20 struct wg_peer
*wg_peer_create(struct wg_device
*wg
,
21 const u8 public_key
[NOISE_PUBLIC_KEY_LEN
],
22 const u8 preshared_key
[NOISE_SYMMETRIC_KEY_LEN
])
27 lockdep_assert_held(&wg
->device_update_lock
);
29 if (wg
->num_peers
>= MAX_PEERS_PER_DEVICE
)
32 peer
= kzalloc(sizeof(*peer
), GFP_KERNEL
);
37 wg_noise_handshake_init(&peer
->handshake
, &wg
->static_identity
,
38 public_key
, preshared_key
, peer
);
39 if (dst_cache_init(&peer
->endpoint_cache
, GFP_KERNEL
))
41 if (wg_packet_queue_init(&peer
->tx_queue
, wg_packet_tx_worker
, false,
44 if (wg_packet_queue_init(&peer
->rx_queue
, NULL
, false,
48 peer
->internal_id
= atomic64_inc_return(&peer_counter
);
49 peer
->serial_work_cpu
= nr_cpumask_bits
;
50 wg_cookie_init(&peer
->latest_cookie
);
52 wg_cookie_checker_precompute_peer_keys(peer
);
53 spin_lock_init(&peer
->keypairs
.keypair_update_lock
);
54 INIT_WORK(&peer
->transmit_handshake_work
,
55 wg_packet_handshake_send_worker
);
56 rwlock_init(&peer
->endpoint_lock
);
57 kref_init(&peer
->refcount
);
58 skb_queue_head_init(&peer
->staged_packet_queue
);
59 wg_noise_reset_last_sent_handshake(&peer
->last_sent_handshake
);
60 set_bit(NAPI_STATE_NO_BUSY_POLL
, &peer
->napi
.state
);
61 netif_napi_add(wg
->dev
, &peer
->napi
, wg_packet_rx_poll
,
63 napi_enable(&peer
->napi
);
64 list_add_tail(&peer
->peer_list
, &wg
->peer_list
);
65 INIT_LIST_HEAD(&peer
->allowedips_list
);
66 wg_pubkey_hashtable_add(wg
->peer_hashtable
, peer
);
68 pr_debug("%s: Peer %llu created\n", wg
->dev
->name
, peer
->internal_id
);
72 wg_packet_queue_free(&peer
->tx_queue
, false);
74 dst_cache_destroy(&peer
->endpoint_cache
);
80 struct wg_peer
*wg_peer_get_maybe_zero(struct wg_peer
*peer
)
82 RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
83 "Taking peer reference without holding the RCU read lock");
84 if (unlikely(!peer
|| !kref_get_unless_zero(&peer
->refcount
)))
89 static void peer_make_dead(struct wg_peer
*peer
)
91 /* Remove from configuration-time lookup structures. */
92 list_del_init(&peer
->peer_list
);
93 wg_allowedips_remove_by_peer(&peer
->device
->peer_allowedips
, peer
,
94 &peer
->device
->device_update_lock
);
95 wg_pubkey_hashtable_remove(peer
->device
->peer_hashtable
, peer
);
97 /* Mark as dead, so that we don't allow jumping contexts after. */
98 WRITE_ONCE(peer
->is_dead
, true);
100 /* The caller must now synchronize_rcu() for this to take effect. */
103 static void peer_remove_after_dead(struct wg_peer
*peer
)
105 WARN_ON(!peer
->is_dead
);
107 /* No more keypairs can be created for this peer, since is_dead protects
108 * add_new_keypair, so we can now destroy existing ones.
110 wg_noise_keypairs_clear(&peer
->keypairs
);
112 /* Destroy all ongoing timers that were in-flight at the beginning of
115 wg_timers_stop(peer
);
117 /* The transition between packet encryption/decryption queues isn't
118 * guarded by is_dead, but each reference's life is strictly bounded by
119 * two generations: once for parallel crypto and once for serial
120 * ingestion, so we can simply flush twice, and be sure that we no
121 * longer have references inside these queues.
124 /* a) For encrypt/decrypt. */
125 flush_workqueue(peer
->device
->packet_crypt_wq
);
126 /* b.1) For send (but not receive, since that's napi). */
127 flush_workqueue(peer
->device
->packet_crypt_wq
);
128 /* b.2.1) For receive (but not send, since that's wq). */
129 napi_disable(&peer
->napi
);
130 /* b.2.1) It's now safe to remove the napi struct, which must be done
131 * here from process context.
133 netif_napi_del(&peer
->napi
);
135 /* Ensure any workstructs we own (like transmit_handshake_work or
136 * clear_peer_work) no longer are in use.
138 flush_workqueue(peer
->device
->handshake_send_wq
);
140 /* After the above flushes, a peer might still be active in a few
141 * different contexts: 1) from xmit(), before hitting is_dead and
142 * returning, 2) from wg_packet_consume_data(), before hitting is_dead
143 * and returning, 3) from wg_receive_handshake_packet() after a point
144 * where it has processed an incoming handshake packet, but where
145 * all calls to pass it off to timers fails because of is_dead. We won't
146 * have new references in (1) eventually, because we're removed from
147 * allowedips; we won't have new references in (2) eventually, because
148 * wg_index_hashtable_lookup will always return NULL, since we removed
149 * all existing keypairs and no more can be created; we won't have new
150 * references in (3) eventually, because we're removed from the pubkey
151 * hash table, which allows for a maximum of one handshake response,
152 * via the still-uncleared index hashtable entry, but not more than one,
153 * and in wg_cookie_message_consume, the lookup eventually gets a peer
154 * with a refcount of zero, so no new reference is taken.
157 --peer
->device
->num_peers
;
161 /* We have a separate "remove" function make sure that all active places where
162 * a peer is currently operating will eventually come to an end and not pass
163 * their reference onto another context.
165 void wg_peer_remove(struct wg_peer
*peer
)
169 lockdep_assert_held(&peer
->device
->device_update_lock
);
171 peer_make_dead(peer
);
173 peer_remove_after_dead(peer
);
176 void wg_peer_remove_all(struct wg_device
*wg
)
178 struct wg_peer
*peer
, *temp
;
179 LIST_HEAD(dead_peers
);
181 lockdep_assert_held(&wg
->device_update_lock
);
183 /* Avoid having to traverse individually for each one. */
184 wg_allowedips_free(&wg
->peer_allowedips
, &wg
->device_update_lock
);
186 list_for_each_entry_safe(peer
, temp
, &wg
->peer_list
, peer_list
) {
187 peer_make_dead(peer
);
188 list_add_tail(&peer
->peer_list
, &dead_peers
);
191 list_for_each_entry_safe(peer
, temp
, &dead_peers
, peer_list
)
192 peer_remove_after_dead(peer
);
195 static void rcu_release(struct rcu_head
*rcu
)
197 struct wg_peer
*peer
= container_of(rcu
, struct wg_peer
, rcu
);
199 dst_cache_destroy(&peer
->endpoint_cache
);
200 wg_packet_queue_free(&peer
->rx_queue
, false);
201 wg_packet_queue_free(&peer
->tx_queue
, false);
203 /* The final zeroing takes care of clearing any remaining handshake key
204 * material and other potentially sensitive information.
206 kfree_sensitive(peer
);
209 static void kref_release(struct kref
*refcount
)
211 struct wg_peer
*peer
= container_of(refcount
, struct wg_peer
, refcount
);
213 pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
214 peer
->device
->dev
->name
, peer
->internal_id
,
215 &peer
->endpoint
.addr
);
217 /* Remove ourself from dynamic runtime lookup structures, now that the
218 * last reference is gone.
220 wg_index_hashtable_remove(peer
->device
->index_hashtable
,
221 &peer
->handshake
.entry
);
223 /* Remove any lingering packets that didn't have a chance to be
226 wg_packet_purge_staged_packets(peer
);
228 /* Free the memory used. */
229 call_rcu(&peer
->rcu
, rcu_release
);
232 void wg_peer_put(struct wg_peer
*peer
)
236 kref_put(&peer
->refcount
, kref_release
);