treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / wireguard / peer.c
blob071eedf33f5aa7bb45b924208ff5e7bcd1ab3e12
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
6 #include "peer.h"
7 #include "device.h"
8 #include "queueing.h"
9 #include "timers.h"
10 #include "peerlookup.h"
11 #include "noise.h"
13 #include <linux/kref.h>
14 #include <linux/lockdep.h>
15 #include <linux/rcupdate.h>
16 #include <linux/list.h>
18 static atomic64_t peer_counter = ATOMIC64_INIT(0);
20 struct wg_peer *wg_peer_create(struct wg_device *wg,
21 const u8 public_key[NOISE_PUBLIC_KEY_LEN],
22 const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
24 struct wg_peer *peer;
25 int ret = -ENOMEM;
27 lockdep_assert_held(&wg->device_update_lock);
29 if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
30 return ERR_PTR(ret);
32 peer = kzalloc(sizeof(*peer), GFP_KERNEL);
33 if (unlikely(!peer))
34 return ERR_PTR(ret);
35 peer->device = wg;
37 if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
38 public_key, preshared_key, peer)) {
39 ret = -EKEYREJECTED;
40 goto err_1;
42 if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
43 goto err_1;
44 if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
45 MAX_QUEUED_PACKETS))
46 goto err_2;
47 if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
48 MAX_QUEUED_PACKETS))
49 goto err_3;
51 peer->internal_id = atomic64_inc_return(&peer_counter);
52 peer->serial_work_cpu = nr_cpumask_bits;
53 wg_cookie_init(&peer->latest_cookie);
54 wg_timers_init(peer);
55 wg_cookie_checker_precompute_peer_keys(peer);
56 spin_lock_init(&peer->keypairs.keypair_update_lock);
57 INIT_WORK(&peer->transmit_handshake_work,
58 wg_packet_handshake_send_worker);
59 rwlock_init(&peer->endpoint_lock);
60 kref_init(&peer->refcount);
61 skb_queue_head_init(&peer->staged_packet_queue);
62 wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
63 set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
64 netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
65 NAPI_POLL_WEIGHT);
66 napi_enable(&peer->napi);
67 list_add_tail(&peer->peer_list, &wg->peer_list);
68 INIT_LIST_HEAD(&peer->allowedips_list);
69 wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
70 ++wg->num_peers;
71 pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
72 return peer;
74 err_3:
75 wg_packet_queue_free(&peer->tx_queue, false);
76 err_2:
77 dst_cache_destroy(&peer->endpoint_cache);
78 err_1:
79 kfree(peer);
80 return ERR_PTR(ret);
83 struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
85 RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
86 "Taking peer reference without holding the RCU read lock");
87 if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
88 return NULL;
89 return peer;
92 static void peer_make_dead(struct wg_peer *peer)
94 /* Remove from configuration-time lookup structures. */
95 list_del_init(&peer->peer_list);
96 wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
97 &peer->device->device_update_lock);
98 wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
100 /* Mark as dead, so that we don't allow jumping contexts after. */
101 WRITE_ONCE(peer->is_dead, true);
103 /* The caller must now synchronize_rcu() for this to take effect. */
106 static void peer_remove_after_dead(struct wg_peer *peer)
108 WARN_ON(!peer->is_dead);
110 /* No more keypairs can be created for this peer, since is_dead protects
111 * add_new_keypair, so we can now destroy existing ones.
113 wg_noise_keypairs_clear(&peer->keypairs);
115 /* Destroy all ongoing timers that were in-flight at the beginning of
116 * this function.
118 wg_timers_stop(peer);
120 /* The transition between packet encryption/decryption queues isn't
121 * guarded by is_dead, but each reference's life is strictly bounded by
122 * two generations: once for parallel crypto and once for serial
123 * ingestion, so we can simply flush twice, and be sure that we no
124 * longer have references inside these queues.
127 /* a) For encrypt/decrypt. */
128 flush_workqueue(peer->device->packet_crypt_wq);
129 /* b.1) For send (but not receive, since that's napi). */
130 flush_workqueue(peer->device->packet_crypt_wq);
131 /* b.2.1) For receive (but not send, since that's wq). */
132 napi_disable(&peer->napi);
133 /* b.2.1) It's now safe to remove the napi struct, which must be done
134 * here from process context.
136 netif_napi_del(&peer->napi);
138 /* Ensure any workstructs we own (like transmit_handshake_work or
139 * clear_peer_work) no longer are in use.
141 flush_workqueue(peer->device->handshake_send_wq);
143 /* After the above flushes, a peer might still be active in a few
144 * different contexts: 1) from xmit(), before hitting is_dead and
145 * returning, 2) from wg_packet_consume_data(), before hitting is_dead
146 * and returning, 3) from wg_receive_handshake_packet() after a point
147 * where it has processed an incoming handshake packet, but where
148 * all calls to pass it off to timers fails because of is_dead. We won't
149 * have new references in (1) eventually, because we're removed from
150 * allowedips; we won't have new references in (2) eventually, because
151 * wg_index_hashtable_lookup will always return NULL, since we removed
152 * all existing keypairs and no more can be created; we won't have new
153 * references in (3) eventually, because we're removed from the pubkey
154 * hash table, which allows for a maximum of one handshake response,
155 * via the still-uncleared index hashtable entry, but not more than one,
156 * and in wg_cookie_message_consume, the lookup eventually gets a peer
157 * with a refcount of zero, so no new reference is taken.
160 --peer->device->num_peers;
161 wg_peer_put(peer);
164 /* We have a separate "remove" function make sure that all active places where
165 * a peer is currently operating will eventually come to an end and not pass
166 * their reference onto another context.
168 void wg_peer_remove(struct wg_peer *peer)
170 if (unlikely(!peer))
171 return;
172 lockdep_assert_held(&peer->device->device_update_lock);
174 peer_make_dead(peer);
175 synchronize_rcu();
176 peer_remove_after_dead(peer);
179 void wg_peer_remove_all(struct wg_device *wg)
181 struct wg_peer *peer, *temp;
182 LIST_HEAD(dead_peers);
184 lockdep_assert_held(&wg->device_update_lock);
186 /* Avoid having to traverse individually for each one. */
187 wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
189 list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
190 peer_make_dead(peer);
191 list_add_tail(&peer->peer_list, &dead_peers);
193 synchronize_rcu();
194 list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
195 peer_remove_after_dead(peer);
198 static void rcu_release(struct rcu_head *rcu)
200 struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
202 dst_cache_destroy(&peer->endpoint_cache);
203 wg_packet_queue_free(&peer->rx_queue, false);
204 wg_packet_queue_free(&peer->tx_queue, false);
206 /* The final zeroing takes care of clearing any remaining handshake key
207 * material and other potentially sensitive information.
209 kzfree(peer);
212 static void kref_release(struct kref *refcount)
214 struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
216 pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
217 peer->device->dev->name, peer->internal_id,
218 &peer->endpoint.addr);
220 /* Remove ourself from dynamic runtime lookup structures, now that the
221 * last reference is gone.
223 wg_index_hashtable_remove(peer->device->index_hashtable,
224 &peer->handshake.entry);
226 /* Remove any lingering packets that didn't have a chance to be
227 * transmitted.
229 wg_packet_purge_staged_packets(peer);
231 /* Free the memory used. */
232 call_rcu(&peer->rcu, rcu_release);
235 void wg_peer_put(struct wg_peer *peer)
237 if (unlikely(!peer))
238 return;
239 kref_put(&peer->refcount, kref_release);