1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
6 #include "peerlookup.h"
10 static struct hlist_head
*pubkey_bucket(struct pubkey_hashtable
*table
,
11 const u8 pubkey
[NOISE_PUBLIC_KEY_LEN
])
13 /* siphash gives us a secure 64bit number based on a random key. Since
14 * the bits are uniformly distributed, we can then mask off to get the
17 const u64 hash
= siphash(pubkey
, NOISE_PUBLIC_KEY_LEN
, &table
->key
);
19 return &table
->hashtable
[hash
& (HASH_SIZE(table
->hashtable
) - 1)];
22 struct pubkey_hashtable
*wg_pubkey_hashtable_alloc(void)
24 struct pubkey_hashtable
*table
= kvmalloc(sizeof(*table
), GFP_KERNEL
);
29 get_random_bytes(&table
->key
, sizeof(table
->key
));
30 hash_init(table
->hashtable
);
31 mutex_init(&table
->lock
);
35 void wg_pubkey_hashtable_add(struct pubkey_hashtable
*table
,
38 mutex_lock(&table
->lock
);
39 hlist_add_head_rcu(&peer
->pubkey_hash
,
40 pubkey_bucket(table
, peer
->handshake
.remote_static
));
41 mutex_unlock(&table
->lock
);
44 void wg_pubkey_hashtable_remove(struct pubkey_hashtable
*table
,
47 mutex_lock(&table
->lock
);
48 hlist_del_init_rcu(&peer
->pubkey_hash
);
49 mutex_unlock(&table
->lock
);
52 /* Returns a strong reference to a peer */
54 wg_pubkey_hashtable_lookup(struct pubkey_hashtable
*table
,
55 const u8 pubkey
[NOISE_PUBLIC_KEY_LEN
])
57 struct wg_peer
*iter_peer
, *peer
= NULL
;
60 hlist_for_each_entry_rcu_bh(iter_peer
, pubkey_bucket(table
, pubkey
),
62 if (!memcmp(pubkey
, iter_peer
->handshake
.remote_static
,
63 NOISE_PUBLIC_KEY_LEN
)) {
68 peer
= wg_peer_get_maybe_zero(peer
);
73 static struct hlist_head
*index_bucket(struct index_hashtable
*table
,
76 /* Since the indices are random and thus all bits are uniformly
77 * distributed, we can find its bucket simply by masking.
79 return &table
->hashtable
[(__force u32
)index
&
80 (HASH_SIZE(table
->hashtable
) - 1)];
83 struct index_hashtable
*wg_index_hashtable_alloc(void)
85 struct index_hashtable
*table
= kvmalloc(sizeof(*table
), GFP_KERNEL
);
90 hash_init(table
->hashtable
);
91 spin_lock_init(&table
->lock
);
95 /* At the moment, we limit ourselves to 2^20 total peers, which generally might
96 * amount to 2^20*3 items in this hashtable. The algorithm below works by
97 * picking a random number and testing it. We can see that these limits mean we
98 * usually succeed pretty quickly:
100 * >>> def calculation(tries, size):
101 * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
103 * >>> calculation(1, 2**20 * 3)
105 * >>> calculation(2, 2**20 * 3)
106 * 0.0007318854331970215
107 * >>> calculation(3, 2**20 * 3)
108 * 5.360489012673497e-07
109 * >>> calculation(4, 2**20 * 3)
110 * 3.9261394135792216e-10
112 * At the moment, we don't do any masking, so this algorithm isn't exactly
113 * constant time in either the random guessing or in the hash list lookup. We
114 * could require a minimum of 3 tries, which would successfully mask the
115 * guessing. this would not, however, help with the growing hash lengths, which
116 * is another thing to consider moving forward.
119 __le32
wg_index_hashtable_insert(struct index_hashtable
*table
,
120 struct index_hashtable_entry
*entry
)
122 struct index_hashtable_entry
*existing_entry
;
124 spin_lock_bh(&table
->lock
);
125 hlist_del_init_rcu(&entry
->index_hash
);
126 spin_unlock_bh(&table
->lock
);
131 /* First we try to find an unused slot, randomly, while unlocked. */
132 entry
->index
= (__force __le32
)get_random_u32();
133 hlist_for_each_entry_rcu_bh(existing_entry
,
134 index_bucket(table
, entry
->index
),
136 if (existing_entry
->index
== entry
->index
)
137 /* If it's already in use, we continue searching. */
138 goto search_unused_slot
;
141 /* Once we've found an unused slot, we lock it, and then double-check
142 * that nobody else stole it from us.
144 spin_lock_bh(&table
->lock
);
145 hlist_for_each_entry_rcu_bh(existing_entry
,
146 index_bucket(table
, entry
->index
),
148 if (existing_entry
->index
== entry
->index
) {
149 spin_unlock_bh(&table
->lock
);
150 /* If it was stolen, we start over. */
151 goto search_unused_slot
;
154 /* Otherwise, we know we have it exclusively (since we're locked),
157 hlist_add_head_rcu(&entry
->index_hash
,
158 index_bucket(table
, entry
->index
));
159 spin_unlock_bh(&table
->lock
);
161 rcu_read_unlock_bh();
166 bool wg_index_hashtable_replace(struct index_hashtable
*table
,
167 struct index_hashtable_entry
*old
,
168 struct index_hashtable_entry
*new)
170 if (unlikely(hlist_unhashed(&old
->index_hash
)))
172 spin_lock_bh(&table
->lock
);
173 new->index
= old
->index
;
174 hlist_replace_rcu(&old
->index_hash
, &new->index_hash
);
176 /* Calling init here NULLs out index_hash, and in fact after this
177 * function returns, it's theoretically possible for this to get
178 * reinserted elsewhere. That means the RCU lookup below might either
179 * terminate early or jump between buckets, in which case the packet
180 * simply gets dropped, which isn't terrible.
182 INIT_HLIST_NODE(&old
->index_hash
);
183 spin_unlock_bh(&table
->lock
);
187 void wg_index_hashtable_remove(struct index_hashtable
*table
,
188 struct index_hashtable_entry
*entry
)
190 spin_lock_bh(&table
->lock
);
191 hlist_del_init_rcu(&entry
->index_hash
);
192 spin_unlock_bh(&table
->lock
);
195 /* Returns a strong reference to a entry->peer */
196 struct index_hashtable_entry
*
197 wg_index_hashtable_lookup(struct index_hashtable
*table
,
198 const enum index_hashtable_type type_mask
,
199 const __le32 index
, struct wg_peer
**peer
)
201 struct index_hashtable_entry
*iter_entry
, *entry
= NULL
;
204 hlist_for_each_entry_rcu_bh(iter_entry
, index_bucket(table
, index
),
206 if (iter_entry
->index
== index
) {
207 if (likely(iter_entry
->type
& type_mask
))
213 entry
->peer
= wg_peer_get_maybe_zero(entry
->peer
);
214 if (likely(entry
->peer
))
219 rcu_read_unlock_bh();