Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / core / sock_reuseport.c
blobdc4aefdf2a084b0ec46cc14415dcfda00a9aaa3d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * To speed up listener socket lookup, create an array to store all sockets
4 * listening on the same port. This allows a decision to be made after finding
5 * the first socket. An optional BPF program can also be configured for
6 * selecting the socket index from the array of available sockets.
7 */
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/idr.h>
12 #include <linux/filter.h>
13 #include <linux/rcupdate.h>
15 #define INIT_SOCKS 128
17 DEFINE_SPINLOCK(reuseport_lock);
19 #define REUSEPORT_MIN_ID 1
20 static DEFINE_IDA(reuseport_ida);
22 int reuseport_get_id(struct sock_reuseport *reuse)
24 int id;
26 if (reuse->reuseport_id)
27 return reuse->reuseport_id;
29 id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
30 /* Called under reuseport_lock */
31 GFP_ATOMIC);
32 if (id < 0)
33 return id;
35 reuse->reuseport_id = id;
37 return reuse->reuseport_id;
40 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
42 unsigned int size = sizeof(struct sock_reuseport) +
43 sizeof(struct sock *) * max_socks;
44 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
46 if (!reuse)
47 return NULL;
49 reuse->max_socks = max_socks;
51 RCU_INIT_POINTER(reuse->prog, NULL);
52 return reuse;
55 int reuseport_alloc(struct sock *sk, bool bind_inany)
57 struct sock_reuseport *reuse;
59 /* bh lock used since this function call may precede hlist lock in
60 * soft irq of receive path or setsockopt from process context
62 spin_lock_bh(&reuseport_lock);
64 /* Allocation attempts can occur concurrently via the setsockopt path
65 * and the bind/hash path. Nothing to do when we lose the race.
67 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
68 lockdep_is_held(&reuseport_lock));
69 if (reuse) {
70 /* Only set reuse->bind_inany if the bind_inany is true.
71 * Otherwise, it will overwrite the reuse->bind_inany
72 * which was set by the bind/hash path.
74 if (bind_inany)
75 reuse->bind_inany = bind_inany;
76 goto out;
79 reuse = __reuseport_alloc(INIT_SOCKS);
80 if (!reuse) {
81 spin_unlock_bh(&reuseport_lock);
82 return -ENOMEM;
85 reuse->socks[0] = sk;
86 reuse->num_socks = 1;
87 reuse->bind_inany = bind_inany;
88 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
90 out:
91 spin_unlock_bh(&reuseport_lock);
93 return 0;
95 EXPORT_SYMBOL(reuseport_alloc);
97 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
99 struct sock_reuseport *more_reuse;
100 u32 more_socks_size, i;
102 more_socks_size = reuse->max_socks * 2U;
103 if (more_socks_size > U16_MAX)
104 return NULL;
106 more_reuse = __reuseport_alloc(more_socks_size);
107 if (!more_reuse)
108 return NULL;
110 more_reuse->max_socks = more_socks_size;
111 more_reuse->num_socks = reuse->num_socks;
112 more_reuse->prog = reuse->prog;
113 more_reuse->reuseport_id = reuse->reuseport_id;
114 more_reuse->bind_inany = reuse->bind_inany;
116 memcpy(more_reuse->socks, reuse->socks,
117 reuse->num_socks * sizeof(struct sock *));
118 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
120 for (i = 0; i < reuse->num_socks; ++i)
121 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
122 more_reuse);
124 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
125 * that reuse and more_reuse can temporarily share a reference
126 * to prog.
128 kfree_rcu(reuse, rcu);
129 return more_reuse;
132 static void reuseport_free_rcu(struct rcu_head *head)
134 struct sock_reuseport *reuse;
136 reuse = container_of(head, struct sock_reuseport, rcu);
137 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
138 if (reuse->reuseport_id)
139 ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
140 kfree(reuse);
144 * reuseport_add_sock - Add a socket to the reuseport group of another.
145 * @sk: New socket to add to the group.
146 * @sk2: Socket belonging to the existing reuseport group.
147 * @bind_inany: Whether or not the group is bound to a local INANY address.
149 * May return ENOMEM and not add socket to group under memory pressure.
151 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
153 struct sock_reuseport *old_reuse, *reuse;
155 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
156 int err = reuseport_alloc(sk2, bind_inany);
158 if (err)
159 return err;
162 spin_lock_bh(&reuseport_lock);
163 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
164 lockdep_is_held(&reuseport_lock));
165 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
166 lockdep_is_held(&reuseport_lock));
167 if (old_reuse && old_reuse->num_socks != 1) {
168 spin_unlock_bh(&reuseport_lock);
169 return -EBUSY;
172 if (reuse->num_socks == reuse->max_socks) {
173 reuse = reuseport_grow(reuse);
174 if (!reuse) {
175 spin_unlock_bh(&reuseport_lock);
176 return -ENOMEM;
180 reuse->socks[reuse->num_socks] = sk;
181 /* paired with smp_rmb() in reuseport_select_sock() */
182 smp_wmb();
183 reuse->num_socks++;
184 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
186 spin_unlock_bh(&reuseport_lock);
188 if (old_reuse)
189 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
190 return 0;
192 EXPORT_SYMBOL(reuseport_add_sock);
194 void reuseport_detach_sock(struct sock *sk)
196 struct sock_reuseport *reuse;
197 int i;
199 spin_lock_bh(&reuseport_lock);
200 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
201 lockdep_is_held(&reuseport_lock));
203 /* At least one of the sk in this reuseport group is added to
204 * a bpf map. Notify the bpf side. The bpf map logic will
205 * remove the sk if it is indeed added to a bpf map.
207 if (reuse->reuseport_id)
208 bpf_sk_reuseport_detach(sk);
210 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
212 for (i = 0; i < reuse->num_socks; i++) {
213 if (reuse->socks[i] == sk) {
214 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
215 reuse->num_socks--;
216 if (reuse->num_socks == 0)
217 call_rcu(&reuse->rcu, reuseport_free_rcu);
218 break;
221 spin_unlock_bh(&reuseport_lock);
223 EXPORT_SYMBOL(reuseport_detach_sock);
225 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
226 struct bpf_prog *prog, struct sk_buff *skb,
227 int hdr_len)
229 struct sk_buff *nskb = NULL;
230 u32 index;
232 if (skb_shared(skb)) {
233 nskb = skb_clone(skb, GFP_ATOMIC);
234 if (!nskb)
235 return NULL;
236 skb = nskb;
239 /* temporarily advance data past protocol header */
240 if (!pskb_pull(skb, hdr_len)) {
241 kfree_skb(nskb);
242 return NULL;
244 index = bpf_prog_run_save_cb(prog, skb);
245 __skb_push(skb, hdr_len);
247 consume_skb(nskb);
249 if (index >= socks)
250 return NULL;
252 return reuse->socks[index];
256 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
257 * @sk: First socket in the group.
258 * @hash: When no BPF filter is available, use this hash to select.
259 * @skb: skb to run through BPF filter.
260 * @hdr_len: BPF filter expects skb data pointer at payload data. If
261 * the skb does not yet point at the payload, this parameter represents
262 * how far the pointer needs to advance to reach the payload.
263 * Returns a socket that should receive the packet (or NULL on error).
265 struct sock *reuseport_select_sock(struct sock *sk,
266 u32 hash,
267 struct sk_buff *skb,
268 int hdr_len)
270 struct sock_reuseport *reuse;
271 struct bpf_prog *prog;
272 struct sock *sk2 = NULL;
273 u16 socks;
275 rcu_read_lock();
276 reuse = rcu_dereference(sk->sk_reuseport_cb);
278 /* if memory allocation failed or add call is not yet complete */
279 if (!reuse)
280 goto out;
282 prog = rcu_dereference(reuse->prog);
283 socks = READ_ONCE(reuse->num_socks);
284 if (likely(socks)) {
285 /* paired with smp_wmb() in reuseport_add_sock() */
286 smp_rmb();
288 if (!prog || !skb)
289 goto select_by_hash;
291 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
292 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
293 else
294 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
296 select_by_hash:
297 /* no bpf or invalid bpf result: fall back to hash usage */
298 if (!sk2)
299 sk2 = reuse->socks[reciprocal_scale(hash, socks)];
302 out:
303 rcu_read_unlock();
304 return sk2;
306 EXPORT_SYMBOL(reuseport_select_sock);
308 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
310 struct sock_reuseport *reuse;
311 struct bpf_prog *old_prog;
313 if (sk_unhashed(sk) && sk->sk_reuseport) {
314 int err = reuseport_alloc(sk, false);
316 if (err)
317 return err;
318 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
319 /* The socket wasn't bound with SO_REUSEPORT */
320 return -EINVAL;
323 spin_lock_bh(&reuseport_lock);
324 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
325 lockdep_is_held(&reuseport_lock));
326 old_prog = rcu_dereference_protected(reuse->prog,
327 lockdep_is_held(&reuseport_lock));
328 rcu_assign_pointer(reuse->prog, prog);
329 spin_unlock_bh(&reuseport_lock);
331 sk_reuseport_prog_free(old_prog);
332 return 0;
334 EXPORT_SYMBOL(reuseport_attach_prog);