Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / kernel / bpf / sockmap.c
bloba927e89dad6e9591066c3a87afc497a196ebd887
1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
31 #include <linux/bpf.h>
32 #include <net/sock.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
41 #include <net/strparser.h>
42 #include <net/tcp.h>
44 #define SOCK_CREATE_FLAG_MASK \
45 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
47 struct bpf_stab {
48 struct bpf_map map;
49 struct sock **sock_map;
50 struct bpf_prog *bpf_parse;
51 struct bpf_prog *bpf_verdict;
54 enum smap_psock_state {
55 SMAP_TX_RUNNING,
58 struct smap_psock_map_entry {
59 struct list_head list;
60 struct sock **entry;
63 struct smap_psock {
64 struct rcu_head rcu;
65 /* refcnt is used inside sk_callback_lock */
66 u32 refcnt;
68 /* datapath variables */
69 struct sk_buff_head rxqueue;
70 bool strp_enabled;
72 /* datapath error path cache across tx work invocations */
73 int save_rem;
74 int save_off;
75 struct sk_buff *save_skb;
77 struct strparser strp;
78 struct bpf_prog *bpf_parse;
79 struct bpf_prog *bpf_verdict;
80 struct list_head maps;
82 /* Back reference used when sock callback trigger sockmap operations */
83 struct sock *sock;
84 unsigned long state;
86 struct work_struct tx_work;
87 struct work_struct gc_work;
89 struct proto *sk_proto;
90 void (*save_close)(struct sock *sk, long timeout);
91 void (*save_data_ready)(struct sock *sk);
92 void (*save_write_space)(struct sock *sk);
95 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
97 return rcu_dereference_sk_user_data(sk);
100 static struct proto tcp_bpf_proto;
101 static int bpf_tcp_init(struct sock *sk)
103 struct smap_psock *psock;
105 rcu_read_lock();
106 psock = smap_psock_sk(sk);
107 if (unlikely(!psock)) {
108 rcu_read_unlock();
109 return -EINVAL;
112 if (unlikely(psock->sk_proto)) {
113 rcu_read_unlock();
114 return -EBUSY;
117 psock->save_close = sk->sk_prot->close;
118 psock->sk_proto = sk->sk_prot;
119 sk->sk_prot = &tcp_bpf_proto;
120 rcu_read_unlock();
121 return 0;
124 static void bpf_tcp_release(struct sock *sk)
126 struct smap_psock *psock;
128 rcu_read_lock();
129 psock = smap_psock_sk(sk);
131 if (likely(psock)) {
132 sk->sk_prot = psock->sk_proto;
133 psock->sk_proto = NULL;
135 rcu_read_unlock();
138 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
140 static void bpf_tcp_close(struct sock *sk, long timeout)
142 void (*close_fun)(struct sock *sk, long timeout);
143 struct smap_psock_map_entry *e, *tmp;
144 struct smap_psock *psock;
145 struct sock *osk;
147 rcu_read_lock();
148 psock = smap_psock_sk(sk);
149 if (unlikely(!psock)) {
150 rcu_read_unlock();
151 return sk->sk_prot->close(sk, timeout);
154 /* The psock may be destroyed anytime after exiting the RCU critial
155 * section so by the time we use close_fun the psock may no longer
156 * be valid. However, bpf_tcp_close is called with the sock lock
157 * held so the close hook and sk are still valid.
159 close_fun = psock->save_close;
161 write_lock_bh(&sk->sk_callback_lock);
162 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
163 osk = cmpxchg(e->entry, sk, NULL);
164 if (osk == sk) {
165 list_del(&e->list);
166 smap_release_sock(psock, sk);
169 write_unlock_bh(&sk->sk_callback_lock);
170 rcu_read_unlock();
171 close_fun(sk, timeout);
174 enum __sk_action {
175 __SK_DROP = 0,
176 __SK_PASS,
177 __SK_REDIRECT,
180 static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
181 .name = "bpf_tcp",
182 .uid = TCP_ULP_BPF,
183 .user_visible = false,
184 .owner = NULL,
185 .init = bpf_tcp_init,
186 .release = bpf_tcp_release,
189 static int bpf_tcp_ulp_register(void)
191 tcp_bpf_proto = tcp_prot;
192 tcp_bpf_proto.close = bpf_tcp_close;
193 return tcp_register_ulp(&bpf_tcp_ulp_ops);
196 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
198 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
199 int rc;
201 if (unlikely(!prog))
202 return __SK_DROP;
204 skb_orphan(skb);
205 /* We need to ensure that BPF metadata for maps is also cleared
206 * when we orphan the skb so that we don't have the possibility
207 * to reference a stale map.
209 TCP_SKB_CB(skb)->bpf.map = NULL;
210 skb->sk = psock->sock;
211 bpf_compute_data_pointers(skb);
212 preempt_disable();
213 rc = (*prog->bpf_func)(skb, prog->insnsi);
214 preempt_enable();
215 skb->sk = NULL;
217 /* Moving return codes from UAPI namespace into internal namespace */
218 return rc == SK_PASS ?
219 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
220 __SK_DROP;
223 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
225 struct sock *sk;
226 int rc;
228 rc = smap_verdict_func(psock, skb);
229 switch (rc) {
230 case __SK_REDIRECT:
231 sk = do_sk_redirect_map(skb);
232 if (likely(sk)) {
233 struct smap_psock *peer = smap_psock_sk(sk);
235 if (likely(peer &&
236 test_bit(SMAP_TX_RUNNING, &peer->state) &&
237 !sock_flag(sk, SOCK_DEAD) &&
238 sock_writeable(sk))) {
239 skb_set_owner_w(skb, sk);
240 skb_queue_tail(&peer->rxqueue, skb);
241 schedule_work(&peer->tx_work);
242 break;
245 /* Fall through and free skb otherwise */
246 case __SK_DROP:
247 default:
248 kfree_skb(skb);
252 static void smap_report_sk_error(struct smap_psock *psock, int err)
254 struct sock *sk = psock->sock;
256 sk->sk_err = err;
257 sk->sk_error_report(sk);
260 static void smap_read_sock_strparser(struct strparser *strp,
261 struct sk_buff *skb)
263 struct smap_psock *psock;
265 rcu_read_lock();
266 psock = container_of(strp, struct smap_psock, strp);
267 smap_do_verdict(psock, skb);
268 rcu_read_unlock();
271 /* Called with lock held on socket */
272 static void smap_data_ready(struct sock *sk)
274 struct smap_psock *psock;
276 rcu_read_lock();
277 psock = smap_psock_sk(sk);
278 if (likely(psock)) {
279 write_lock_bh(&sk->sk_callback_lock);
280 strp_data_ready(&psock->strp);
281 write_unlock_bh(&sk->sk_callback_lock);
283 rcu_read_unlock();
286 static void smap_tx_work(struct work_struct *w)
288 struct smap_psock *psock;
289 struct sk_buff *skb;
290 int rem, off, n;
292 psock = container_of(w, struct smap_psock, tx_work);
294 /* lock sock to avoid losing sk_socket at some point during loop */
295 lock_sock(psock->sock);
296 if (psock->save_skb) {
297 skb = psock->save_skb;
298 rem = psock->save_rem;
299 off = psock->save_off;
300 psock->save_skb = NULL;
301 goto start;
304 while ((skb = skb_dequeue(&psock->rxqueue))) {
305 rem = skb->len;
306 off = 0;
307 start:
308 do {
309 if (likely(psock->sock->sk_socket))
310 n = skb_send_sock_locked(psock->sock,
311 skb, off, rem);
312 else
313 n = -EINVAL;
314 if (n <= 0) {
315 if (n == -EAGAIN) {
316 /* Retry when space is available */
317 psock->save_skb = skb;
318 psock->save_rem = rem;
319 psock->save_off = off;
320 goto out;
322 /* Hard errors break pipe and stop xmit */
323 smap_report_sk_error(psock, n ? -n : EPIPE);
324 clear_bit(SMAP_TX_RUNNING, &psock->state);
325 kfree_skb(skb);
326 goto out;
328 rem -= n;
329 off += n;
330 } while (rem);
331 kfree_skb(skb);
333 out:
334 release_sock(psock->sock);
337 static void smap_write_space(struct sock *sk)
339 struct smap_psock *psock;
341 rcu_read_lock();
342 psock = smap_psock_sk(sk);
343 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
344 schedule_work(&psock->tx_work);
345 rcu_read_unlock();
348 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
350 if (!psock->strp_enabled)
351 return;
352 sk->sk_data_ready = psock->save_data_ready;
353 sk->sk_write_space = psock->save_write_space;
354 psock->save_data_ready = NULL;
355 psock->save_write_space = NULL;
356 strp_stop(&psock->strp);
357 psock->strp_enabled = false;
360 static void smap_destroy_psock(struct rcu_head *rcu)
362 struct smap_psock *psock = container_of(rcu,
363 struct smap_psock, rcu);
365 /* Now that a grace period has passed there is no longer
366 * any reference to this sock in the sockmap so we can
367 * destroy the psock, strparser, and bpf programs. But,
368 * because we use workqueue sync operations we can not
369 * do it in rcu context
371 schedule_work(&psock->gc_work);
374 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
376 psock->refcnt--;
377 if (psock->refcnt)
378 return;
380 tcp_cleanup_ulp(sock);
381 smap_stop_sock(psock, sock);
382 clear_bit(SMAP_TX_RUNNING, &psock->state);
383 rcu_assign_sk_user_data(sock, NULL);
384 call_rcu_sched(&psock->rcu, smap_destroy_psock);
387 static int smap_parse_func_strparser(struct strparser *strp,
388 struct sk_buff *skb)
390 struct smap_psock *psock;
391 struct bpf_prog *prog;
392 int rc;
394 rcu_read_lock();
395 psock = container_of(strp, struct smap_psock, strp);
396 prog = READ_ONCE(psock->bpf_parse);
398 if (unlikely(!prog)) {
399 rcu_read_unlock();
400 return skb->len;
403 /* Attach socket for bpf program to use if needed we can do this
404 * because strparser clones the skb before handing it to a upper
405 * layer, meaning skb_orphan has been called. We NULL sk on the
406 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
407 * later and because we are not charging the memory of this skb to
408 * any socket yet.
410 skb->sk = psock->sock;
411 bpf_compute_data_pointers(skb);
412 rc = (*prog->bpf_func)(skb, prog->insnsi);
413 skb->sk = NULL;
414 rcu_read_unlock();
415 return rc;
419 static int smap_read_sock_done(struct strparser *strp, int err)
421 return err;
424 static int smap_init_sock(struct smap_psock *psock,
425 struct sock *sk)
427 static const struct strp_callbacks cb = {
428 .rcv_msg = smap_read_sock_strparser,
429 .parse_msg = smap_parse_func_strparser,
430 .read_sock_done = smap_read_sock_done,
433 return strp_init(&psock->strp, sk, &cb);
436 static void smap_init_progs(struct smap_psock *psock,
437 struct bpf_stab *stab,
438 struct bpf_prog *verdict,
439 struct bpf_prog *parse)
441 struct bpf_prog *orig_parse, *orig_verdict;
443 orig_parse = xchg(&psock->bpf_parse, parse);
444 orig_verdict = xchg(&psock->bpf_verdict, verdict);
446 if (orig_verdict)
447 bpf_prog_put(orig_verdict);
448 if (orig_parse)
449 bpf_prog_put(orig_parse);
452 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
454 if (sk->sk_data_ready == smap_data_ready)
455 return;
456 psock->save_data_ready = sk->sk_data_ready;
457 psock->save_write_space = sk->sk_write_space;
458 sk->sk_data_ready = smap_data_ready;
459 sk->sk_write_space = smap_write_space;
460 psock->strp_enabled = true;
463 static void sock_map_remove_complete(struct bpf_stab *stab)
465 bpf_map_area_free(stab->sock_map);
466 kfree(stab);
469 static void smap_gc_work(struct work_struct *w)
471 struct smap_psock_map_entry *e, *tmp;
472 struct smap_psock *psock;
474 psock = container_of(w, struct smap_psock, gc_work);
476 /* no callback lock needed because we already detached sockmap ops */
477 if (psock->strp_enabled)
478 strp_done(&psock->strp);
480 cancel_work_sync(&psock->tx_work);
481 __skb_queue_purge(&psock->rxqueue);
483 /* At this point all strparser and xmit work must be complete */
484 if (psock->bpf_parse)
485 bpf_prog_put(psock->bpf_parse);
486 if (psock->bpf_verdict)
487 bpf_prog_put(psock->bpf_verdict);
489 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
490 list_del(&e->list);
491 kfree(e);
494 sock_put(psock->sock);
495 kfree(psock);
498 static struct smap_psock *smap_init_psock(struct sock *sock,
499 struct bpf_stab *stab)
501 struct smap_psock *psock;
503 psock = kzalloc_node(sizeof(struct smap_psock),
504 GFP_ATOMIC | __GFP_NOWARN,
505 stab->map.numa_node);
506 if (!psock)
507 return ERR_PTR(-ENOMEM);
509 psock->sock = sock;
510 skb_queue_head_init(&psock->rxqueue);
511 INIT_WORK(&psock->tx_work, smap_tx_work);
512 INIT_WORK(&psock->gc_work, smap_gc_work);
513 INIT_LIST_HEAD(&psock->maps);
514 psock->refcnt = 1;
516 rcu_assign_sk_user_data(sock, psock);
517 sock_hold(sock);
518 return psock;
521 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
523 struct bpf_stab *stab;
524 u64 cost;
525 int err;
527 if (!capable(CAP_NET_ADMIN))
528 return ERR_PTR(-EPERM);
530 /* check sanity of attributes */
531 if (attr->max_entries == 0 || attr->key_size != 4 ||
532 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
533 return ERR_PTR(-EINVAL);
535 if (attr->value_size > KMALLOC_MAX_SIZE)
536 return ERR_PTR(-E2BIG);
538 err = bpf_tcp_ulp_register();
539 if (err && err != -EEXIST)
540 return ERR_PTR(err);
542 stab = kzalloc(sizeof(*stab), GFP_USER);
543 if (!stab)
544 return ERR_PTR(-ENOMEM);
546 bpf_map_init_from_attr(&stab->map, attr);
548 /* make sure page count doesn't overflow */
549 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
550 err = -EINVAL;
551 if (cost >= U32_MAX - PAGE_SIZE)
552 goto free_stab;
554 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
556 /* if map size is larger than memlock limit, reject it early */
557 err = bpf_map_precharge_memlock(stab->map.pages);
558 if (err)
559 goto free_stab;
561 err = -ENOMEM;
562 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
563 sizeof(struct sock *),
564 stab->map.numa_node);
565 if (!stab->sock_map)
566 goto free_stab;
568 return &stab->map;
569 free_stab:
570 kfree(stab);
571 return ERR_PTR(err);
574 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
576 struct smap_psock_map_entry *e, *tmp;
578 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
579 if (e->entry == entry) {
580 list_del(&e->list);
581 break;
586 static void sock_map_free(struct bpf_map *map)
588 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
589 int i;
591 synchronize_rcu();
593 /* At this point no update, lookup or delete operations can happen.
594 * However, be aware we can still get a socket state event updates,
595 * and data ready callabacks that reference the psock from sk_user_data
596 * Also psock worker threads are still in-flight. So smap_release_sock
597 * will only free the psock after cancel_sync on the worker threads
598 * and a grace period expire to ensure psock is really safe to remove.
600 rcu_read_lock();
601 for (i = 0; i < stab->map.max_entries; i++) {
602 struct smap_psock *psock;
603 struct sock *sock;
605 sock = xchg(&stab->sock_map[i], NULL);
606 if (!sock)
607 continue;
609 write_lock_bh(&sock->sk_callback_lock);
610 psock = smap_psock_sk(sock);
611 /* This check handles a racing sock event that can get the
612 * sk_callback_lock before this case but after xchg happens
613 * causing the refcnt to hit zero and sock user data (psock)
614 * to be null and queued for garbage collection.
616 if (likely(psock)) {
617 smap_list_remove(psock, &stab->sock_map[i]);
618 smap_release_sock(psock, sock);
620 write_unlock_bh(&sock->sk_callback_lock);
622 rcu_read_unlock();
624 sock_map_remove_complete(stab);
627 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
629 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
630 u32 i = key ? *(u32 *)key : U32_MAX;
631 u32 *next = (u32 *)next_key;
633 if (i >= stab->map.max_entries) {
634 *next = 0;
635 return 0;
638 if (i == stab->map.max_entries - 1)
639 return -ENOENT;
641 *next = i + 1;
642 return 0;
645 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
647 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
649 if (key >= map->max_entries)
650 return NULL;
652 return READ_ONCE(stab->sock_map[key]);
655 static int sock_map_delete_elem(struct bpf_map *map, void *key)
657 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
658 struct smap_psock *psock;
659 int k = *(u32 *)key;
660 struct sock *sock;
662 if (k >= map->max_entries)
663 return -EINVAL;
665 sock = xchg(&stab->sock_map[k], NULL);
666 if (!sock)
667 return -EINVAL;
669 write_lock_bh(&sock->sk_callback_lock);
670 psock = smap_psock_sk(sock);
671 if (!psock)
672 goto out;
674 if (psock->bpf_parse)
675 smap_stop_sock(psock, sock);
676 smap_list_remove(psock, &stab->sock_map[k]);
677 smap_release_sock(psock, sock);
678 out:
679 write_unlock_bh(&sock->sk_callback_lock);
680 return 0;
683 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
684 * done inside rcu critical sections. This ensures on updates that the psock
685 * will not be released via smap_release_sock() until concurrent updates/deletes
686 * complete. All operations operate on sock_map using cmpxchg and xchg
687 * operations to ensure we do not get stale references. Any reads into the
688 * map must be done with READ_ONCE() because of this.
690 * A psock is destroyed via call_rcu and after any worker threads are cancelled
691 * and syncd so we are certain all references from the update/lookup/delete
692 * operations as well as references in the data path are no longer in use.
694 * Psocks may exist in multiple maps, but only a single set of parse/verdict
695 * programs may be inherited from the maps it belongs to. A reference count
696 * is kept with the total number of references to the psock from all maps. The
697 * psock will not be released until this reaches zero. The psock and sock
698 * user data data use the sk_callback_lock to protect critical data structures
699 * from concurrent access. This allows us to avoid two updates from modifying
700 * the user data in sock and the lock is required anyways for modifying
701 * callbacks, we simply increase its scope slightly.
703 * Rules to follow,
704 * - psock must always be read inside RCU critical section
705 * - sk_user_data must only be modified inside sk_callback_lock and read
706 * inside RCU critical section.
707 * - psock->maps list must only be read & modified inside sk_callback_lock
708 * - sock_map must use READ_ONCE and (cmp)xchg operations
709 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
711 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
712 struct bpf_map *map,
713 void *key, u64 flags)
715 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
716 struct smap_psock_map_entry *e = NULL;
717 struct bpf_prog *verdict, *parse;
718 struct sock *osock, *sock;
719 struct smap_psock *psock;
720 u32 i = *(u32 *)key;
721 int err;
723 if (unlikely(flags > BPF_EXIST))
724 return -EINVAL;
726 if (unlikely(i >= stab->map.max_entries))
727 return -E2BIG;
729 sock = READ_ONCE(stab->sock_map[i]);
730 if (flags == BPF_EXIST && !sock)
731 return -ENOENT;
732 else if (flags == BPF_NOEXIST && sock)
733 return -EEXIST;
735 sock = skops->sk;
737 /* 1. If sock map has BPF programs those will be inherited by the
738 * sock being added. If the sock is already attached to BPF programs
739 * this results in an error.
741 verdict = READ_ONCE(stab->bpf_verdict);
742 parse = READ_ONCE(stab->bpf_parse);
744 if (parse && verdict) {
745 /* bpf prog refcnt may be zero if a concurrent attach operation
746 * removes the program after the above READ_ONCE() but before
747 * we increment the refcnt. If this is the case abort with an
748 * error.
750 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
751 if (IS_ERR(verdict))
752 return PTR_ERR(verdict);
754 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
755 if (IS_ERR(parse)) {
756 bpf_prog_put(verdict);
757 return PTR_ERR(parse);
761 write_lock_bh(&sock->sk_callback_lock);
762 psock = smap_psock_sk(sock);
764 /* 2. Do not allow inheriting programs if psock exists and has
765 * already inherited programs. This would create confusion on
766 * which parser/verdict program is running. If no psock exists
767 * create one. Inside sk_callback_lock to ensure concurrent create
768 * doesn't update user data.
770 if (psock) {
771 if (READ_ONCE(psock->bpf_parse) && parse) {
772 err = -EBUSY;
773 goto out_progs;
775 psock->refcnt++;
776 } else {
777 psock = smap_init_psock(sock, stab);
778 if (IS_ERR(psock)) {
779 err = PTR_ERR(psock);
780 goto out_progs;
783 err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
784 if (err)
785 goto out_progs;
787 set_bit(SMAP_TX_RUNNING, &psock->state);
790 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
791 if (!e) {
792 err = -ENOMEM;
793 goto out_progs;
795 e->entry = &stab->sock_map[i];
797 /* 3. At this point we have a reference to a valid psock that is
798 * running. Attach any BPF programs needed.
800 if (parse && verdict && !psock->strp_enabled) {
801 err = smap_init_sock(psock, sock);
802 if (err)
803 goto out_free;
804 smap_init_progs(psock, stab, verdict, parse);
805 smap_start_sock(psock, sock);
808 /* 4. Place psock in sockmap for use and stop any programs on
809 * the old sock assuming its not the same sock we are replacing
810 * it with. Because we can only have a single set of programs if
811 * old_sock has a strp we can stop it.
813 list_add_tail(&e->list, &psock->maps);
814 write_unlock_bh(&sock->sk_callback_lock);
816 osock = xchg(&stab->sock_map[i], sock);
817 if (osock) {
818 struct smap_psock *opsock = smap_psock_sk(osock);
820 write_lock_bh(&osock->sk_callback_lock);
821 if (osock != sock && parse)
822 smap_stop_sock(opsock, osock);
823 smap_list_remove(opsock, &stab->sock_map[i]);
824 smap_release_sock(opsock, osock);
825 write_unlock_bh(&osock->sk_callback_lock);
827 return 0;
828 out_free:
829 smap_release_sock(psock, sock);
830 out_progs:
831 if (verdict)
832 bpf_prog_put(verdict);
833 if (parse)
834 bpf_prog_put(parse);
835 write_unlock_bh(&sock->sk_callback_lock);
836 kfree(e);
837 return err;
840 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
842 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
843 struct bpf_prog *orig;
845 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
846 return -EINVAL;
848 switch (type) {
849 case BPF_SK_SKB_STREAM_PARSER:
850 orig = xchg(&stab->bpf_parse, prog);
851 break;
852 case BPF_SK_SKB_STREAM_VERDICT:
853 orig = xchg(&stab->bpf_verdict, prog);
854 break;
855 default:
856 return -EOPNOTSUPP;
859 if (orig)
860 bpf_prog_put(orig);
862 return 0;
865 static void *sock_map_lookup(struct bpf_map *map, void *key)
867 return NULL;
870 static int sock_map_update_elem(struct bpf_map *map,
871 void *key, void *value, u64 flags)
873 struct bpf_sock_ops_kern skops;
874 u32 fd = *(u32 *)value;
875 struct socket *socket;
876 int err;
878 socket = sockfd_lookup(fd, &err);
879 if (!socket)
880 return err;
882 skops.sk = socket->sk;
883 if (!skops.sk) {
884 fput(socket->file);
885 return -EINVAL;
888 if (skops.sk->sk_type != SOCK_STREAM ||
889 skops.sk->sk_protocol != IPPROTO_TCP) {
890 fput(socket->file);
891 return -EOPNOTSUPP;
894 err = sock_map_ctx_update_elem(&skops, map, key, flags);
895 fput(socket->file);
896 return err;
899 static void sock_map_release(struct bpf_map *map, struct file *map_file)
901 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
902 struct bpf_prog *orig;
904 orig = xchg(&stab->bpf_parse, NULL);
905 if (orig)
906 bpf_prog_put(orig);
907 orig = xchg(&stab->bpf_verdict, NULL);
908 if (orig)
909 bpf_prog_put(orig);
912 const struct bpf_map_ops sock_map_ops = {
913 .map_alloc = sock_map_alloc,
914 .map_free = sock_map_free,
915 .map_lookup_elem = sock_map_lookup,
916 .map_get_next_key = sock_map_get_next_key,
917 .map_update_elem = sock_map_update_elem,
918 .map_delete_elem = sock_map_delete_elem,
919 .map_release = sock_map_release,
922 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
923 struct bpf_map *, map, void *, key, u64, flags)
925 WARN_ON_ONCE(!rcu_read_lock_held());
926 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
929 const struct bpf_func_proto bpf_sock_map_update_proto = {
930 .func = bpf_sock_map_update,
931 .gpl_only = false,
932 .pkt_access = true,
933 .ret_type = RET_INTEGER,
934 .arg1_type = ARG_PTR_TO_CTX,
935 .arg2_type = ARG_CONST_MAP_PTR,
936 .arg3_type = ARG_PTR_TO_MAP_KEY,
937 .arg4_type = ARG_ANYTHING,