Linux 4.14.14
[linux/fpc-iii.git] / kernel / bpf / sockmap.c
blobdbd7b322a86b5c3eb7150d2cc886223da6387b70
1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* A BPF sock_map is used to store sock objects. This is primarly used
14 * for doing socket redirect with BPF helper routines.
16 * A sock map may have BPF programs attached to it, currently a program
17 * used to parse packets and a program to provide a verdict and redirect
18 * decision on the packet are supported. Any programs attached to a sock
19 * map are inherited by sock objects when they are added to the map. If
20 * no BPF programs are attached the sock object may only be used for sock
21 * redirect.
23 * A sock object may be in multiple maps, but can only inherit a single
24 * parse or verdict program. If adding a sock object to a map would result
25 * in having multiple parsing programs the update will return an EBUSY error.
27 * For reference this program is similar to devmap used in XDP context
28 * reviewing these together may be useful. For an example please review
29 * ./samples/bpf/sockmap/.
31 #include <linux/bpf.h>
32 #include <net/sock.h>
33 #include <linux/filter.h>
34 #include <linux/errno.h>
35 #include <linux/file.h>
36 #include <linux/kernel.h>
37 #include <linux/net.h>
38 #include <linux/skbuff.h>
39 #include <linux/workqueue.h>
40 #include <linux/list.h>
41 #include <net/strparser.h>
42 #include <net/tcp.h>
44 struct bpf_stab {
45 struct bpf_map map;
46 struct sock **sock_map;
47 struct bpf_prog *bpf_parse;
48 struct bpf_prog *bpf_verdict;
51 enum smap_psock_state {
52 SMAP_TX_RUNNING,
55 struct smap_psock_map_entry {
56 struct list_head list;
57 struct sock **entry;
60 struct smap_psock {
61 struct rcu_head rcu;
62 /* refcnt is used inside sk_callback_lock */
63 u32 refcnt;
65 /* datapath variables */
66 struct sk_buff_head rxqueue;
67 bool strp_enabled;
69 /* datapath error path cache across tx work invocations */
70 int save_rem;
71 int save_off;
72 struct sk_buff *save_skb;
74 struct strparser strp;
75 struct bpf_prog *bpf_parse;
76 struct bpf_prog *bpf_verdict;
77 struct list_head maps;
79 /* Back reference used when sock callback trigger sockmap operations */
80 struct sock *sock;
81 unsigned long state;
83 struct work_struct tx_work;
84 struct work_struct gc_work;
86 void (*save_data_ready)(struct sock *sk);
87 void (*save_write_space)(struct sock *sk);
88 void (*save_state_change)(struct sock *sk);
91 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
93 return rcu_dereference_sk_user_data(sk);
96 /* compute the linear packet data range [data, data_end) for skb when
97 * sk_skb type programs are in use.
99 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
101 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
104 enum __sk_action {
105 __SK_DROP = 0,
106 __SK_PASS,
107 __SK_REDIRECT,
110 static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
112 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
113 int rc;
115 if (unlikely(!prog))
116 return __SK_DROP;
118 skb_orphan(skb);
119 /* We need to ensure that BPF metadata for maps is also cleared
120 * when we orphan the skb so that we don't have the possibility
121 * to reference a stale map.
123 TCP_SKB_CB(skb)->bpf.map = NULL;
124 skb->sk = psock->sock;
125 bpf_compute_data_end_sk_skb(skb);
126 preempt_disable();
127 rc = (*prog->bpf_func)(skb, prog->insnsi);
128 preempt_enable();
129 skb->sk = NULL;
131 /* Moving return codes from UAPI namespace into internal namespace */
132 return rc == SK_PASS ?
133 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
134 __SK_DROP;
137 static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
139 struct sock *sk;
140 int rc;
142 rc = smap_verdict_func(psock, skb);
143 switch (rc) {
144 case __SK_REDIRECT:
145 sk = do_sk_redirect_map(skb);
146 if (likely(sk)) {
147 struct smap_psock *peer = smap_psock_sk(sk);
149 if (likely(peer &&
150 test_bit(SMAP_TX_RUNNING, &peer->state) &&
151 !sock_flag(sk, SOCK_DEAD) &&
152 sock_writeable(sk))) {
153 skb_set_owner_w(skb, sk);
154 skb_queue_tail(&peer->rxqueue, skb);
155 schedule_work(&peer->tx_work);
156 break;
159 /* Fall through and free skb otherwise */
160 case __SK_DROP:
161 default:
162 kfree_skb(skb);
166 static void smap_report_sk_error(struct smap_psock *psock, int err)
168 struct sock *sk = psock->sock;
170 sk->sk_err = err;
171 sk->sk_error_report(sk);
174 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
176 /* Called with lock_sock(sk) held */
177 static void smap_state_change(struct sock *sk)
179 struct smap_psock_map_entry *e, *tmp;
180 struct smap_psock *psock;
181 struct socket_wq *wq;
182 struct sock *osk;
184 rcu_read_lock();
186 /* Allowing transitions into an established syn_recv states allows
187 * for early binding sockets to a smap object before the connection
188 * is established.
190 switch (sk->sk_state) {
191 case TCP_SYN_SENT:
192 case TCP_SYN_RECV:
193 case TCP_ESTABLISHED:
194 break;
195 case TCP_CLOSE_WAIT:
196 case TCP_CLOSING:
197 case TCP_LAST_ACK:
198 case TCP_FIN_WAIT1:
199 case TCP_FIN_WAIT2:
200 case TCP_LISTEN:
201 break;
202 case TCP_CLOSE:
203 /* Only release if the map entry is in fact the sock in
204 * question. There is a case where the operator deletes
205 * the sock from the map, but the TCP sock is closed before
206 * the psock is detached. Use cmpxchg to verify correct
207 * sock is removed.
209 psock = smap_psock_sk(sk);
210 if (unlikely(!psock))
211 break;
212 write_lock_bh(&sk->sk_callback_lock);
213 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
214 osk = cmpxchg(e->entry, sk, NULL);
215 if (osk == sk) {
216 list_del(&e->list);
217 smap_release_sock(psock, sk);
220 write_unlock_bh(&sk->sk_callback_lock);
221 break;
222 default:
223 psock = smap_psock_sk(sk);
224 if (unlikely(!psock))
225 break;
226 smap_report_sk_error(psock, EPIPE);
227 break;
230 wq = rcu_dereference(sk->sk_wq);
231 if (skwq_has_sleeper(wq))
232 wake_up_interruptible_all(&wq->wait);
233 rcu_read_unlock();
236 static void smap_read_sock_strparser(struct strparser *strp,
237 struct sk_buff *skb)
239 struct smap_psock *psock;
241 rcu_read_lock();
242 psock = container_of(strp, struct smap_psock, strp);
243 smap_do_verdict(psock, skb);
244 rcu_read_unlock();
247 /* Called with lock held on socket */
248 static void smap_data_ready(struct sock *sk)
250 struct smap_psock *psock;
252 rcu_read_lock();
253 psock = smap_psock_sk(sk);
254 if (likely(psock)) {
255 write_lock_bh(&sk->sk_callback_lock);
256 strp_data_ready(&psock->strp);
257 write_unlock_bh(&sk->sk_callback_lock);
259 rcu_read_unlock();
262 static void smap_tx_work(struct work_struct *w)
264 struct smap_psock *psock;
265 struct sk_buff *skb;
266 int rem, off, n;
268 psock = container_of(w, struct smap_psock, tx_work);
270 /* lock sock to avoid losing sk_socket at some point during loop */
271 lock_sock(psock->sock);
272 if (psock->save_skb) {
273 skb = psock->save_skb;
274 rem = psock->save_rem;
275 off = psock->save_off;
276 psock->save_skb = NULL;
277 goto start;
280 while ((skb = skb_dequeue(&psock->rxqueue))) {
281 rem = skb->len;
282 off = 0;
283 start:
284 do {
285 if (likely(psock->sock->sk_socket))
286 n = skb_send_sock_locked(psock->sock,
287 skb, off, rem);
288 else
289 n = -EINVAL;
290 if (n <= 0) {
291 if (n == -EAGAIN) {
292 /* Retry when space is available */
293 psock->save_skb = skb;
294 psock->save_rem = rem;
295 psock->save_off = off;
296 goto out;
298 /* Hard errors break pipe and stop xmit */
299 smap_report_sk_error(psock, n ? -n : EPIPE);
300 clear_bit(SMAP_TX_RUNNING, &psock->state);
301 kfree_skb(skb);
302 goto out;
304 rem -= n;
305 off += n;
306 } while (rem);
307 kfree_skb(skb);
309 out:
310 release_sock(psock->sock);
313 static void smap_write_space(struct sock *sk)
315 struct smap_psock *psock;
317 rcu_read_lock();
318 psock = smap_psock_sk(sk);
319 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
320 schedule_work(&psock->tx_work);
321 rcu_read_unlock();
324 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
326 if (!psock->strp_enabled)
327 return;
328 sk->sk_data_ready = psock->save_data_ready;
329 sk->sk_write_space = psock->save_write_space;
330 sk->sk_state_change = psock->save_state_change;
331 psock->save_data_ready = NULL;
332 psock->save_write_space = NULL;
333 psock->save_state_change = NULL;
334 strp_stop(&psock->strp);
335 psock->strp_enabled = false;
338 static void smap_destroy_psock(struct rcu_head *rcu)
340 struct smap_psock *psock = container_of(rcu,
341 struct smap_psock, rcu);
343 /* Now that a grace period has passed there is no longer
344 * any reference to this sock in the sockmap so we can
345 * destroy the psock, strparser, and bpf programs. But,
346 * because we use workqueue sync operations we can not
347 * do it in rcu context
349 schedule_work(&psock->gc_work);
352 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
354 psock->refcnt--;
355 if (psock->refcnt)
356 return;
358 smap_stop_sock(psock, sock);
359 clear_bit(SMAP_TX_RUNNING, &psock->state);
360 rcu_assign_sk_user_data(sock, NULL);
361 call_rcu_sched(&psock->rcu, smap_destroy_psock);
364 static int smap_parse_func_strparser(struct strparser *strp,
365 struct sk_buff *skb)
367 struct smap_psock *psock;
368 struct bpf_prog *prog;
369 int rc;
371 rcu_read_lock();
372 psock = container_of(strp, struct smap_psock, strp);
373 prog = READ_ONCE(psock->bpf_parse);
375 if (unlikely(!prog)) {
376 rcu_read_unlock();
377 return skb->len;
380 /* Attach socket for bpf program to use if needed we can do this
381 * because strparser clones the skb before handing it to a upper
382 * layer, meaning skb_orphan has been called. We NULL sk on the
383 * way out to ensure we don't trigger a BUG_ON in skb/sk operations
384 * later and because we are not charging the memory of this skb to
385 * any socket yet.
387 skb->sk = psock->sock;
388 bpf_compute_data_end_sk_skb(skb);
389 rc = (*prog->bpf_func)(skb, prog->insnsi);
390 skb->sk = NULL;
391 rcu_read_unlock();
392 return rc;
396 static int smap_read_sock_done(struct strparser *strp, int err)
398 return err;
401 static int smap_init_sock(struct smap_psock *psock,
402 struct sock *sk)
404 static const struct strp_callbacks cb = {
405 .rcv_msg = smap_read_sock_strparser,
406 .parse_msg = smap_parse_func_strparser,
407 .read_sock_done = smap_read_sock_done,
410 return strp_init(&psock->strp, sk, &cb);
413 static void smap_init_progs(struct smap_psock *psock,
414 struct bpf_stab *stab,
415 struct bpf_prog *verdict,
416 struct bpf_prog *parse)
418 struct bpf_prog *orig_parse, *orig_verdict;
420 orig_parse = xchg(&psock->bpf_parse, parse);
421 orig_verdict = xchg(&psock->bpf_verdict, verdict);
423 if (orig_verdict)
424 bpf_prog_put(orig_verdict);
425 if (orig_parse)
426 bpf_prog_put(orig_parse);
429 static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
431 if (sk->sk_data_ready == smap_data_ready)
432 return;
433 psock->save_data_ready = sk->sk_data_ready;
434 psock->save_write_space = sk->sk_write_space;
435 psock->save_state_change = sk->sk_state_change;
436 sk->sk_data_ready = smap_data_ready;
437 sk->sk_write_space = smap_write_space;
438 sk->sk_state_change = smap_state_change;
439 psock->strp_enabled = true;
442 static void sock_map_remove_complete(struct bpf_stab *stab)
444 bpf_map_area_free(stab->sock_map);
445 kfree(stab);
448 static void smap_gc_work(struct work_struct *w)
450 struct smap_psock_map_entry *e, *tmp;
451 struct smap_psock *psock;
453 psock = container_of(w, struct smap_psock, gc_work);
455 /* no callback lock needed because we already detached sockmap ops */
456 if (psock->strp_enabled)
457 strp_done(&psock->strp);
459 cancel_work_sync(&psock->tx_work);
460 __skb_queue_purge(&psock->rxqueue);
462 /* At this point all strparser and xmit work must be complete */
463 if (psock->bpf_parse)
464 bpf_prog_put(psock->bpf_parse);
465 if (psock->bpf_verdict)
466 bpf_prog_put(psock->bpf_verdict);
468 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
469 list_del(&e->list);
470 kfree(e);
473 sock_put(psock->sock);
474 kfree(psock);
477 static struct smap_psock *smap_init_psock(struct sock *sock,
478 struct bpf_stab *stab)
480 struct smap_psock *psock;
482 psock = kzalloc_node(sizeof(struct smap_psock),
483 GFP_ATOMIC | __GFP_NOWARN,
484 stab->map.numa_node);
485 if (!psock)
486 return ERR_PTR(-ENOMEM);
488 psock->sock = sock;
489 skb_queue_head_init(&psock->rxqueue);
490 INIT_WORK(&psock->tx_work, smap_tx_work);
491 INIT_WORK(&psock->gc_work, smap_gc_work);
492 INIT_LIST_HEAD(&psock->maps);
493 psock->refcnt = 1;
495 rcu_assign_sk_user_data(sock, psock);
496 sock_hold(sock);
497 return psock;
500 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
502 struct bpf_stab *stab;
503 int err = -EINVAL;
504 u64 cost;
506 if (!capable(CAP_NET_ADMIN))
507 return ERR_PTR(-EPERM);
509 /* check sanity of attributes */
510 if (attr->max_entries == 0 || attr->key_size != 4 ||
511 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
512 return ERR_PTR(-EINVAL);
514 if (attr->value_size > KMALLOC_MAX_SIZE)
515 return ERR_PTR(-E2BIG);
517 stab = kzalloc(sizeof(*stab), GFP_USER);
518 if (!stab)
519 return ERR_PTR(-ENOMEM);
521 /* mandatory map attributes */
522 stab->map.map_type = attr->map_type;
523 stab->map.key_size = attr->key_size;
524 stab->map.value_size = attr->value_size;
525 stab->map.max_entries = attr->max_entries;
526 stab->map.map_flags = attr->map_flags;
527 stab->map.numa_node = bpf_map_attr_numa_node(attr);
529 /* make sure page count doesn't overflow */
530 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
531 if (cost >= U32_MAX - PAGE_SIZE)
532 goto free_stab;
534 stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
536 /* if map size is larger than memlock limit, reject it early */
537 err = bpf_map_precharge_memlock(stab->map.pages);
538 if (err)
539 goto free_stab;
541 err = -ENOMEM;
542 stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
543 sizeof(struct sock *),
544 stab->map.numa_node);
545 if (!stab->sock_map)
546 goto free_stab;
548 return &stab->map;
549 free_stab:
550 kfree(stab);
551 return ERR_PTR(err);
554 static void smap_list_remove(struct smap_psock *psock, struct sock **entry)
556 struct smap_psock_map_entry *e, *tmp;
558 list_for_each_entry_safe(e, tmp, &psock->maps, list) {
559 if (e->entry == entry) {
560 list_del(&e->list);
561 break;
566 static void sock_map_free(struct bpf_map *map)
568 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
569 int i;
571 synchronize_rcu();
573 /* At this point no update, lookup or delete operations can happen.
574 * However, be aware we can still get a socket state event updates,
575 * and data ready callabacks that reference the psock from sk_user_data
576 * Also psock worker threads are still in-flight. So smap_release_sock
577 * will only free the psock after cancel_sync on the worker threads
578 * and a grace period expire to ensure psock is really safe to remove.
580 rcu_read_lock();
581 for (i = 0; i < stab->map.max_entries; i++) {
582 struct smap_psock *psock;
583 struct sock *sock;
585 sock = xchg(&stab->sock_map[i], NULL);
586 if (!sock)
587 continue;
589 write_lock_bh(&sock->sk_callback_lock);
590 psock = smap_psock_sk(sock);
591 smap_list_remove(psock, &stab->sock_map[i]);
592 smap_release_sock(psock, sock);
593 write_unlock_bh(&sock->sk_callback_lock);
595 rcu_read_unlock();
597 if (stab->bpf_verdict)
598 bpf_prog_put(stab->bpf_verdict);
599 if (stab->bpf_parse)
600 bpf_prog_put(stab->bpf_parse);
602 sock_map_remove_complete(stab);
605 static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
607 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
608 u32 i = key ? *(u32 *)key : U32_MAX;
609 u32 *next = (u32 *)next_key;
611 if (i >= stab->map.max_entries) {
612 *next = 0;
613 return 0;
616 if (i == stab->map.max_entries - 1)
617 return -ENOENT;
619 *next = i + 1;
620 return 0;
623 struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
625 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
627 if (key >= map->max_entries)
628 return NULL;
630 return READ_ONCE(stab->sock_map[key]);
633 static int sock_map_delete_elem(struct bpf_map *map, void *key)
635 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
636 struct smap_psock *psock;
637 int k = *(u32 *)key;
638 struct sock *sock;
640 if (k >= map->max_entries)
641 return -EINVAL;
643 sock = xchg(&stab->sock_map[k], NULL);
644 if (!sock)
645 return -EINVAL;
647 write_lock_bh(&sock->sk_callback_lock);
648 psock = smap_psock_sk(sock);
649 if (!psock)
650 goto out;
652 if (psock->bpf_parse)
653 smap_stop_sock(psock, sock);
654 smap_list_remove(psock, &stab->sock_map[k]);
655 smap_release_sock(psock, sock);
656 out:
657 write_unlock_bh(&sock->sk_callback_lock);
658 return 0;
661 /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
662 * done inside rcu critical sections. This ensures on updates that the psock
663 * will not be released via smap_release_sock() until concurrent updates/deletes
664 * complete. All operations operate on sock_map using cmpxchg and xchg
665 * operations to ensure we do not get stale references. Any reads into the
666 * map must be done with READ_ONCE() because of this.
668 * A psock is destroyed via call_rcu and after any worker threads are cancelled
669 * and syncd so we are certain all references from the update/lookup/delete
670 * operations as well as references in the data path are no longer in use.
672 * Psocks may exist in multiple maps, but only a single set of parse/verdict
673 * programs may be inherited from the maps it belongs to. A reference count
674 * is kept with the total number of references to the psock from all maps. The
675 * psock will not be released until this reaches zero. The psock and sock
676 * user data data use the sk_callback_lock to protect critical data structures
677 * from concurrent access. This allows us to avoid two updates from modifying
678 * the user data in sock and the lock is required anyways for modifying
679 * callbacks, we simply increase its scope slightly.
681 * Rules to follow,
682 * - psock must always be read inside RCU critical section
683 * - sk_user_data must only be modified inside sk_callback_lock and read
684 * inside RCU critical section.
685 * - psock->maps list must only be read & modified inside sk_callback_lock
686 * - sock_map must use READ_ONCE and (cmp)xchg operations
687 * - BPF verdict/parse programs must use READ_ONCE and xchg operations
689 static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
690 struct bpf_map *map,
691 void *key, u64 flags)
693 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
694 struct smap_psock_map_entry *e = NULL;
695 struct bpf_prog *verdict, *parse;
696 struct sock *osock, *sock;
697 struct smap_psock *psock;
698 u32 i = *(u32 *)key;
699 int err;
701 if (unlikely(flags > BPF_EXIST))
702 return -EINVAL;
704 if (unlikely(i >= stab->map.max_entries))
705 return -E2BIG;
707 sock = READ_ONCE(stab->sock_map[i]);
708 if (flags == BPF_EXIST && !sock)
709 return -ENOENT;
710 else if (flags == BPF_NOEXIST && sock)
711 return -EEXIST;
713 sock = skops->sk;
715 /* 1. If sock map has BPF programs those will be inherited by the
716 * sock being added. If the sock is already attached to BPF programs
717 * this results in an error.
719 verdict = READ_ONCE(stab->bpf_verdict);
720 parse = READ_ONCE(stab->bpf_parse);
722 if (parse && verdict) {
723 /* bpf prog refcnt may be zero if a concurrent attach operation
724 * removes the program after the above READ_ONCE() but before
725 * we increment the refcnt. If this is the case abort with an
726 * error.
728 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict);
729 if (IS_ERR(verdict))
730 return PTR_ERR(verdict);
732 parse = bpf_prog_inc_not_zero(stab->bpf_parse);
733 if (IS_ERR(parse)) {
734 bpf_prog_put(verdict);
735 return PTR_ERR(parse);
739 write_lock_bh(&sock->sk_callback_lock);
740 psock = smap_psock_sk(sock);
742 /* 2. Do not allow inheriting programs if psock exists and has
743 * already inherited programs. This would create confusion on
744 * which parser/verdict program is running. If no psock exists
745 * create one. Inside sk_callback_lock to ensure concurrent create
746 * doesn't update user data.
748 if (psock) {
749 if (READ_ONCE(psock->bpf_parse) && parse) {
750 err = -EBUSY;
751 goto out_progs;
753 psock->refcnt++;
754 } else {
755 psock = smap_init_psock(sock, stab);
756 if (IS_ERR(psock)) {
757 err = PTR_ERR(psock);
758 goto out_progs;
761 set_bit(SMAP_TX_RUNNING, &psock->state);
764 e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
765 if (!e) {
766 err = -ENOMEM;
767 goto out_progs;
769 e->entry = &stab->sock_map[i];
771 /* 3. At this point we have a reference to a valid psock that is
772 * running. Attach any BPF programs needed.
774 if (parse && verdict && !psock->strp_enabled) {
775 err = smap_init_sock(psock, sock);
776 if (err)
777 goto out_free;
778 smap_init_progs(psock, stab, verdict, parse);
779 smap_start_sock(psock, sock);
782 /* 4. Place psock in sockmap for use and stop any programs on
783 * the old sock assuming its not the same sock we are replacing
784 * it with. Because we can only have a single set of programs if
785 * old_sock has a strp we can stop it.
787 list_add_tail(&e->list, &psock->maps);
788 write_unlock_bh(&sock->sk_callback_lock);
790 osock = xchg(&stab->sock_map[i], sock);
791 if (osock) {
792 struct smap_psock *opsock = smap_psock_sk(osock);
794 write_lock_bh(&osock->sk_callback_lock);
795 if (osock != sock && parse)
796 smap_stop_sock(opsock, osock);
797 smap_list_remove(opsock, &stab->sock_map[i]);
798 smap_release_sock(opsock, osock);
799 write_unlock_bh(&osock->sk_callback_lock);
801 return 0;
802 out_free:
803 smap_release_sock(psock, sock);
804 out_progs:
805 if (verdict)
806 bpf_prog_put(verdict);
807 if (parse)
808 bpf_prog_put(parse);
809 write_unlock_bh(&sock->sk_callback_lock);
810 kfree(e);
811 return err;
814 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
816 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
817 struct bpf_prog *orig;
819 if (unlikely(map->map_type != BPF_MAP_TYPE_SOCKMAP))
820 return -EINVAL;
822 switch (type) {
823 case BPF_SK_SKB_STREAM_PARSER:
824 orig = xchg(&stab->bpf_parse, prog);
825 break;
826 case BPF_SK_SKB_STREAM_VERDICT:
827 orig = xchg(&stab->bpf_verdict, prog);
828 break;
829 default:
830 return -EOPNOTSUPP;
833 if (orig)
834 bpf_prog_put(orig);
836 return 0;
839 static void *sock_map_lookup(struct bpf_map *map, void *key)
841 return NULL;
844 static int sock_map_update_elem(struct bpf_map *map,
845 void *key, void *value, u64 flags)
847 struct bpf_sock_ops_kern skops;
848 u32 fd = *(u32 *)value;
849 struct socket *socket;
850 int err;
852 socket = sockfd_lookup(fd, &err);
853 if (!socket)
854 return err;
856 skops.sk = socket->sk;
857 if (!skops.sk) {
858 fput(socket->file);
859 return -EINVAL;
862 if (skops.sk->sk_type != SOCK_STREAM ||
863 skops.sk->sk_protocol != IPPROTO_TCP) {
864 fput(socket->file);
865 return -EOPNOTSUPP;
868 err = sock_map_ctx_update_elem(&skops, map, key, flags);
869 fput(socket->file);
870 return err;
873 const struct bpf_map_ops sock_map_ops = {
874 .map_alloc = sock_map_alloc,
875 .map_free = sock_map_free,
876 .map_lookup_elem = sock_map_lookup,
877 .map_get_next_key = sock_map_get_next_key,
878 .map_update_elem = sock_map_update_elem,
879 .map_delete_elem = sock_map_delete_elem,
882 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
883 struct bpf_map *, map, void *, key, u64, flags)
885 WARN_ON_ONCE(!rcu_read_lock_held());
886 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
889 const struct bpf_func_proto bpf_sock_map_update_proto = {
890 .func = bpf_sock_map_update,
891 .gpl_only = false,
892 .pkt_access = true,
893 .ret_type = RET_INTEGER,
894 .arg1_type = ARG_PTR_TO_CTX,
895 .arg2_type = ARG_CONST_MAP_PTR,
896 .arg3_type = ARG_PTR_TO_MAP_KEY,
897 .arg4_type = ARG_ANYTHING,