ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / net / core / sock_reuseport.c
blob91e9f2223c39ca53cc7d075fcff6ff02f00aa123
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * To speed up listener socket lookup, create an array to store all sockets
4 * listening on the same port. This allows a decision to be made after finding
5 * the first socket. An optional BPF program can also be configured for
6 * selecting the socket index from the array of available sockets.
7 */
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/idr.h>
12 #include <linux/filter.h>
13 #include <linux/rcupdate.h>
15 #define INIT_SOCKS 128
17 DEFINE_SPINLOCK(reuseport_lock);
19 #define REUSEPORT_MIN_ID 1
20 static DEFINE_IDA(reuseport_ida);
22 int reuseport_get_id(struct sock_reuseport *reuse)
24 int id;
26 if (reuse->reuseport_id)
27 return reuse->reuseport_id;
29 id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
30 /* Called under reuseport_lock */
31 GFP_ATOMIC);
32 if (id < 0)
33 return id;
35 reuse->reuseport_id = id;
37 return reuse->reuseport_id;
40 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
42 unsigned int size = sizeof(struct sock_reuseport) +
43 sizeof(struct sock *) * max_socks;
44 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
46 if (!reuse)
47 return NULL;
49 reuse->max_socks = max_socks;
51 RCU_INIT_POINTER(reuse->prog, NULL);
52 return reuse;
55 int reuseport_alloc(struct sock *sk, bool bind_inany)
57 struct sock_reuseport *reuse;
59 /* bh lock used since this function call may precede hlist lock in
60 * soft irq of receive path or setsockopt from process context
62 spin_lock_bh(&reuseport_lock);
64 /* Allocation attempts can occur concurrently via the setsockopt path
65 * and the bind/hash path. Nothing to do when we lose the race.
67 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
68 lockdep_is_held(&reuseport_lock));
69 if (reuse) {
70 /* Only set reuse->bind_inany if the bind_inany is true.
71 * Otherwise, it will overwrite the reuse->bind_inany
72 * which was set by the bind/hash path.
74 if (bind_inany)
75 reuse->bind_inany = bind_inany;
76 goto out;
79 reuse = __reuseport_alloc(INIT_SOCKS);
80 if (!reuse) {
81 spin_unlock_bh(&reuseport_lock);
82 return -ENOMEM;
85 reuse->socks[0] = sk;
86 reuse->num_socks = 1;
87 reuse->bind_inany = bind_inany;
88 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
90 out:
91 spin_unlock_bh(&reuseport_lock);
93 return 0;
95 EXPORT_SYMBOL(reuseport_alloc);
97 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
99 struct sock_reuseport *more_reuse;
100 u32 more_socks_size, i;
102 more_socks_size = reuse->max_socks * 2U;
103 if (more_socks_size > U16_MAX)
104 return NULL;
106 more_reuse = __reuseport_alloc(more_socks_size);
107 if (!more_reuse)
108 return NULL;
110 more_reuse->num_socks = reuse->num_socks;
111 more_reuse->prog = reuse->prog;
112 more_reuse->reuseport_id = reuse->reuseport_id;
113 more_reuse->bind_inany = reuse->bind_inany;
115 memcpy(more_reuse->socks, reuse->socks,
116 reuse->num_socks * sizeof(struct sock *));
117 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
119 for (i = 0; i < reuse->num_socks; ++i)
120 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
121 more_reuse);
123 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
124 * that reuse and more_reuse can temporarily share a reference
125 * to prog.
127 kfree_rcu(reuse, rcu);
128 return more_reuse;
131 static void reuseport_free_rcu(struct rcu_head *head)
133 struct sock_reuseport *reuse;
135 reuse = container_of(head, struct sock_reuseport, rcu);
136 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
137 if (reuse->reuseport_id)
138 ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
139 kfree(reuse);
143 * reuseport_add_sock - Add a socket to the reuseport group of another.
144 * @sk: New socket to add to the group.
145 * @sk2: Socket belonging to the existing reuseport group.
146 * @bind_inany: Whether or not the group is bound to a local INANY address.
148 * May return ENOMEM and not add socket to group under memory pressure.
150 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
152 struct sock_reuseport *old_reuse, *reuse;
154 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
155 int err = reuseport_alloc(sk2, bind_inany);
157 if (err)
158 return err;
161 spin_lock_bh(&reuseport_lock);
162 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
163 lockdep_is_held(&reuseport_lock));
164 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
165 lockdep_is_held(&reuseport_lock));
166 if (old_reuse && old_reuse->num_socks != 1) {
167 spin_unlock_bh(&reuseport_lock);
168 return -EBUSY;
171 if (reuse->num_socks == reuse->max_socks) {
172 reuse = reuseport_grow(reuse);
173 if (!reuse) {
174 spin_unlock_bh(&reuseport_lock);
175 return -ENOMEM;
179 reuse->socks[reuse->num_socks] = sk;
180 /* paired with smp_rmb() in reuseport_select_sock() */
181 smp_wmb();
182 reuse->num_socks++;
183 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
185 spin_unlock_bh(&reuseport_lock);
187 if (old_reuse)
188 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
189 return 0;
191 EXPORT_SYMBOL(reuseport_add_sock);
193 void reuseport_detach_sock(struct sock *sk)
195 struct sock_reuseport *reuse;
196 int i;
198 spin_lock_bh(&reuseport_lock);
199 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
200 lockdep_is_held(&reuseport_lock));
202 /* At least one of the sk in this reuseport group is added to
203 * a bpf map. Notify the bpf side. The bpf map logic will
204 * remove the sk if it is indeed added to a bpf map.
206 if (reuse->reuseport_id)
207 bpf_sk_reuseport_detach(sk);
209 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
211 for (i = 0; i < reuse->num_socks; i++) {
212 if (reuse->socks[i] == sk) {
213 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
214 reuse->num_socks--;
215 if (reuse->num_socks == 0)
216 call_rcu(&reuse->rcu, reuseport_free_rcu);
217 break;
220 spin_unlock_bh(&reuseport_lock);
222 EXPORT_SYMBOL(reuseport_detach_sock);
224 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
225 struct bpf_prog *prog, struct sk_buff *skb,
226 int hdr_len)
228 struct sk_buff *nskb = NULL;
229 u32 index;
231 if (skb_shared(skb)) {
232 nskb = skb_clone(skb, GFP_ATOMIC);
233 if (!nskb)
234 return NULL;
235 skb = nskb;
238 /* temporarily advance data past protocol header */
239 if (!pskb_pull(skb, hdr_len)) {
240 kfree_skb(nskb);
241 return NULL;
243 index = bpf_prog_run_save_cb(prog, skb);
244 __skb_push(skb, hdr_len);
246 consume_skb(nskb);
248 if (index >= socks)
249 return NULL;
251 return reuse->socks[index];
255 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
256 * @sk: First socket in the group.
257 * @hash: When no BPF filter is available, use this hash to select.
258 * @skb: skb to run through BPF filter.
259 * @hdr_len: BPF filter expects skb data pointer at payload data. If
260 * the skb does not yet point at the payload, this parameter represents
261 * how far the pointer needs to advance to reach the payload.
262 * Returns a socket that should receive the packet (or NULL on error).
264 struct sock *reuseport_select_sock(struct sock *sk,
265 u32 hash,
266 struct sk_buff *skb,
267 int hdr_len)
269 struct sock_reuseport *reuse;
270 struct bpf_prog *prog;
271 struct sock *sk2 = NULL;
272 u16 socks;
274 rcu_read_lock();
275 reuse = rcu_dereference(sk->sk_reuseport_cb);
277 /* if memory allocation failed or add call is not yet complete */
278 if (!reuse)
279 goto out;
281 prog = rcu_dereference(reuse->prog);
282 socks = READ_ONCE(reuse->num_socks);
283 if (likely(socks)) {
284 /* paired with smp_wmb() in reuseport_add_sock() */
285 smp_rmb();
287 if (!prog || !skb)
288 goto select_by_hash;
290 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
291 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
292 else
293 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
295 select_by_hash:
296 /* no bpf or invalid bpf result: fall back to hash usage */
297 if (!sk2) {
298 int i, j;
300 i = j = reciprocal_scale(hash, socks);
301 while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
302 i++;
303 if (i >= reuse->num_socks)
304 i = 0;
305 if (i == j)
306 goto out;
308 sk2 = reuse->socks[i];
312 out:
313 rcu_read_unlock();
314 return sk2;
316 EXPORT_SYMBOL(reuseport_select_sock);
318 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
320 struct sock_reuseport *reuse;
321 struct bpf_prog *old_prog;
323 if (sk_unhashed(sk) && sk->sk_reuseport) {
324 int err = reuseport_alloc(sk, false);
326 if (err)
327 return err;
328 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
329 /* The socket wasn't bound with SO_REUSEPORT */
330 return -EINVAL;
333 spin_lock_bh(&reuseport_lock);
334 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
335 lockdep_is_held(&reuseport_lock));
336 old_prog = rcu_dereference_protected(reuse->prog,
337 lockdep_is_held(&reuseport_lock));
338 rcu_assign_pointer(reuse->prog, prog);
339 spin_unlock_bh(&reuseport_lock);
341 sk_reuseport_prog_free(old_prog);
342 return 0;
344 EXPORT_SYMBOL(reuseport_attach_prog);
346 int reuseport_detach_prog(struct sock *sk)
348 struct sock_reuseport *reuse;
349 struct bpf_prog *old_prog;
351 if (!rcu_access_pointer(sk->sk_reuseport_cb))
352 return sk->sk_reuseport ? -ENOENT : -EINVAL;
354 old_prog = NULL;
355 spin_lock_bh(&reuseport_lock);
356 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
357 lockdep_is_held(&reuseport_lock));
358 old_prog = rcu_replace_pointer(reuse->prog, old_prog,
359 lockdep_is_held(&reuseport_lock));
360 spin_unlock_bh(&reuseport_lock);
362 if (!old_prog)
363 return -ENOENT;
365 sk_reuseport_prog_free(old_prog);
366 return 0;
368 EXPORT_SYMBOL(reuseport_detach_prog);