Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / net / core / sock_reuseport.c
blobbbdd3c7b6cb5b960e4d107c3f25e7067db938483
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * To speed up listener socket lookup, create an array to store all sockets
4 * listening on the same port. This allows a decision to be made after finding
5 * the first socket. An optional BPF program can also be configured for
6 * selecting the socket index from the array of available sockets.
7 */
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/idr.h>
12 #include <linux/filter.h>
13 #include <linux/rcupdate.h>
15 #define INIT_SOCKS 128
17 DEFINE_SPINLOCK(reuseport_lock);
19 static DEFINE_IDA(reuseport_ida);
21 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
23 unsigned int size = sizeof(struct sock_reuseport) +
24 sizeof(struct sock *) * max_socks;
25 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
27 if (!reuse)
28 return NULL;
30 reuse->max_socks = max_socks;
32 RCU_INIT_POINTER(reuse->prog, NULL);
33 return reuse;
36 int reuseport_alloc(struct sock *sk, bool bind_inany)
38 struct sock_reuseport *reuse;
39 int id, ret = 0;
41 /* bh lock used since this function call may precede hlist lock in
42 * soft irq of receive path or setsockopt from process context
44 spin_lock_bh(&reuseport_lock);
46 /* Allocation attempts can occur concurrently via the setsockopt path
47 * and the bind/hash path. Nothing to do when we lose the race.
49 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
50 lockdep_is_held(&reuseport_lock));
51 if (reuse) {
52 /* Only set reuse->bind_inany if the bind_inany is true.
53 * Otherwise, it will overwrite the reuse->bind_inany
54 * which was set by the bind/hash path.
56 if (bind_inany)
57 reuse->bind_inany = bind_inany;
58 goto out;
61 reuse = __reuseport_alloc(INIT_SOCKS);
62 if (!reuse) {
63 ret = -ENOMEM;
64 goto out;
67 id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
68 if (id < 0) {
69 kfree(reuse);
70 ret = id;
71 goto out;
74 reuse->reuseport_id = id;
75 reuse->socks[0] = sk;
76 reuse->num_socks = 1;
77 reuse->bind_inany = bind_inany;
78 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
80 out:
81 spin_unlock_bh(&reuseport_lock);
83 return ret;
85 EXPORT_SYMBOL(reuseport_alloc);
87 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
89 struct sock_reuseport *more_reuse;
90 u32 more_socks_size, i;
92 more_socks_size = reuse->max_socks * 2U;
93 if (more_socks_size > U16_MAX)
94 return NULL;
96 more_reuse = __reuseport_alloc(more_socks_size);
97 if (!more_reuse)
98 return NULL;
100 more_reuse->num_socks = reuse->num_socks;
101 more_reuse->prog = reuse->prog;
102 more_reuse->reuseport_id = reuse->reuseport_id;
103 more_reuse->bind_inany = reuse->bind_inany;
104 more_reuse->has_conns = reuse->has_conns;
106 memcpy(more_reuse->socks, reuse->socks,
107 reuse->num_socks * sizeof(struct sock *));
108 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
110 for (i = 0; i < reuse->num_socks; ++i)
111 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
112 more_reuse);
114 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
115 * that reuse and more_reuse can temporarily share a reference
116 * to prog.
118 kfree_rcu(reuse, rcu);
119 return more_reuse;
122 static void reuseport_free_rcu(struct rcu_head *head)
124 struct sock_reuseport *reuse;
126 reuse = container_of(head, struct sock_reuseport, rcu);
127 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
128 ida_free(&reuseport_ida, reuse->reuseport_id);
129 kfree(reuse);
133 * reuseport_add_sock - Add a socket to the reuseport group of another.
134 * @sk: New socket to add to the group.
135 * @sk2: Socket belonging to the existing reuseport group.
136 * @bind_inany: Whether or not the group is bound to a local INANY address.
138 * May return ENOMEM and not add socket to group under memory pressure.
140 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
142 struct sock_reuseport *old_reuse, *reuse;
144 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
145 int err = reuseport_alloc(sk2, bind_inany);
147 if (err)
148 return err;
151 spin_lock_bh(&reuseport_lock);
152 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
153 lockdep_is_held(&reuseport_lock));
154 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
155 lockdep_is_held(&reuseport_lock));
156 if (old_reuse && old_reuse->num_socks != 1) {
157 spin_unlock_bh(&reuseport_lock);
158 return -EBUSY;
161 if (reuse->num_socks == reuse->max_socks) {
162 reuse = reuseport_grow(reuse);
163 if (!reuse) {
164 spin_unlock_bh(&reuseport_lock);
165 return -ENOMEM;
169 reuse->socks[reuse->num_socks] = sk;
170 /* paired with smp_rmb() in reuseport_select_sock() */
171 smp_wmb();
172 reuse->num_socks++;
173 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
175 spin_unlock_bh(&reuseport_lock);
177 if (old_reuse)
178 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
179 return 0;
181 EXPORT_SYMBOL(reuseport_add_sock);
183 void reuseport_detach_sock(struct sock *sk)
185 struct sock_reuseport *reuse;
186 int i;
188 spin_lock_bh(&reuseport_lock);
189 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
190 lockdep_is_held(&reuseport_lock));
192 /* Notify the bpf side. The sk may be added to a sockarray
193 * map. If so, sockarray logic will remove it from the map.
195 * Other bpf map types that work with reuseport, like sockmap,
196 * don't need an explicit callback from here. They override sk
197 * unhash/close ops to remove the sk from the map before we
198 * get to this point.
200 bpf_sk_reuseport_detach(sk);
202 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
204 for (i = 0; i < reuse->num_socks; i++) {
205 if (reuse->socks[i] == sk) {
206 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
207 reuse->num_socks--;
208 if (reuse->num_socks == 0)
209 call_rcu(&reuse->rcu, reuseport_free_rcu);
210 break;
213 spin_unlock_bh(&reuseport_lock);
215 EXPORT_SYMBOL(reuseport_detach_sock);
217 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
218 struct bpf_prog *prog, struct sk_buff *skb,
219 int hdr_len)
221 struct sk_buff *nskb = NULL;
222 u32 index;
224 if (skb_shared(skb)) {
225 nskb = skb_clone(skb, GFP_ATOMIC);
226 if (!nskb)
227 return NULL;
228 skb = nskb;
231 /* temporarily advance data past protocol header */
232 if (!pskb_pull(skb, hdr_len)) {
233 kfree_skb(nskb);
234 return NULL;
236 index = bpf_prog_run_save_cb(prog, skb);
237 __skb_push(skb, hdr_len);
239 consume_skb(nskb);
241 if (index >= socks)
242 return NULL;
244 return reuse->socks[index];
248 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
249 * @sk: First socket in the group.
250 * @hash: When no BPF filter is available, use this hash to select.
251 * @skb: skb to run through BPF filter.
252 * @hdr_len: BPF filter expects skb data pointer at payload data. If
253 * the skb does not yet point at the payload, this parameter represents
254 * how far the pointer needs to advance to reach the payload.
255 * Returns a socket that should receive the packet (or NULL on error).
257 struct sock *reuseport_select_sock(struct sock *sk,
258 u32 hash,
259 struct sk_buff *skb,
260 int hdr_len)
262 struct sock_reuseport *reuse;
263 struct bpf_prog *prog;
264 struct sock *sk2 = NULL;
265 u16 socks;
267 rcu_read_lock();
268 reuse = rcu_dereference(sk->sk_reuseport_cb);
270 /* if memory allocation failed or add call is not yet complete */
271 if (!reuse)
272 goto out;
274 prog = rcu_dereference(reuse->prog);
275 socks = READ_ONCE(reuse->num_socks);
276 if (likely(socks)) {
277 /* paired with smp_wmb() in reuseport_add_sock() */
278 smp_rmb();
280 if (!prog || !skb)
281 goto select_by_hash;
283 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
284 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
285 else
286 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
288 select_by_hash:
289 /* no bpf or invalid bpf result: fall back to hash usage */
290 if (!sk2) {
291 int i, j;
293 i = j = reciprocal_scale(hash, socks);
294 while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
295 i++;
296 if (i >= reuse->num_socks)
297 i = 0;
298 if (i == j)
299 goto out;
301 sk2 = reuse->socks[i];
305 out:
306 rcu_read_unlock();
307 return sk2;
309 EXPORT_SYMBOL(reuseport_select_sock);
311 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
313 struct sock_reuseport *reuse;
314 struct bpf_prog *old_prog;
316 if (sk_unhashed(sk) && sk->sk_reuseport) {
317 int err = reuseport_alloc(sk, false);
319 if (err)
320 return err;
321 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
322 /* The socket wasn't bound with SO_REUSEPORT */
323 return -EINVAL;
326 spin_lock_bh(&reuseport_lock);
327 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
328 lockdep_is_held(&reuseport_lock));
329 old_prog = rcu_dereference_protected(reuse->prog,
330 lockdep_is_held(&reuseport_lock));
331 rcu_assign_pointer(reuse->prog, prog);
332 spin_unlock_bh(&reuseport_lock);
334 sk_reuseport_prog_free(old_prog);
335 return 0;
337 EXPORT_SYMBOL(reuseport_attach_prog);
339 int reuseport_detach_prog(struct sock *sk)
341 struct sock_reuseport *reuse;
342 struct bpf_prog *old_prog;
344 if (!rcu_access_pointer(sk->sk_reuseport_cb))
345 return sk->sk_reuseport ? -ENOENT : -EINVAL;
347 old_prog = NULL;
348 spin_lock_bh(&reuseport_lock);
349 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
350 lockdep_is_held(&reuseport_lock));
351 old_prog = rcu_replace_pointer(reuse->prog, old_prog,
352 lockdep_is_held(&reuseport_lock));
353 spin_unlock_bh(&reuseport_lock);
355 if (!old_prog)
356 return -ENOENT;
358 sk_reuseport_prog_free(old_prog);
359 return 0;
361 EXPORT_SYMBOL(reuseport_detach_prog);