perf bpf: Move perf_event_output() from stdio.h to bpf.h
[linux/fpc-iii.git] / net / ipv4 / tcp_ulp.c
blob95df7f7f6328b86e2d24cc62695e598f73e8a96d
1 /*
2 * Pluggable TCP upper layer protocol support.
4 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
7 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
13 #include <linux/gfp.h>
14 #include <net/tcp.h>
16 static DEFINE_SPINLOCK(tcp_ulp_list_lock);
17 static LIST_HEAD(tcp_ulp_list);
19 /* Simple linear search, don't expect many entries! */
20 static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
22 struct tcp_ulp_ops *e;
24 list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
25 if (strcmp(e->name, name) == 0)
26 return e;
29 return NULL;
32 static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
34 const struct tcp_ulp_ops *ulp = NULL;
36 rcu_read_lock();
37 ulp = tcp_ulp_find(name);
39 #ifdef CONFIG_MODULES
40 if (!ulp && capable(CAP_NET_ADMIN)) {
41 rcu_read_unlock();
42 request_module("tcp-ulp-%s", name);
43 rcu_read_lock();
44 ulp = tcp_ulp_find(name);
46 #endif
47 if (!ulp || !try_module_get(ulp->owner))
48 ulp = NULL;
50 rcu_read_unlock();
51 return ulp;
54 /* Attach new upper layer protocol to the list
55 * of available protocols.
57 int tcp_register_ulp(struct tcp_ulp_ops *ulp)
59 int ret = 0;
61 spin_lock(&tcp_ulp_list_lock);
62 if (tcp_ulp_find(ulp->name))
63 ret = -EEXIST;
64 else
65 list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
66 spin_unlock(&tcp_ulp_list_lock);
68 return ret;
70 EXPORT_SYMBOL_GPL(tcp_register_ulp);
72 void tcp_unregister_ulp(struct tcp_ulp_ops *ulp)
74 spin_lock(&tcp_ulp_list_lock);
75 list_del_rcu(&ulp->list);
76 spin_unlock(&tcp_ulp_list_lock);
78 synchronize_rcu();
80 EXPORT_SYMBOL_GPL(tcp_unregister_ulp);
82 /* Build string with list of available upper layer protocl values */
83 void tcp_get_available_ulp(char *buf, size_t maxlen)
85 struct tcp_ulp_ops *ulp_ops;
86 size_t offs = 0;
88 *buf = '\0';
89 rcu_read_lock();
90 list_for_each_entry_rcu(ulp_ops, &tcp_ulp_list, list) {
91 offs += snprintf(buf + offs, maxlen - offs,
92 "%s%s",
93 offs == 0 ? "" : " ", ulp_ops->name);
95 rcu_read_unlock();
98 void tcp_cleanup_ulp(struct sock *sk)
100 struct inet_connection_sock *icsk = inet_csk(sk);
102 /* No sock_owned_by_me() check here as at the time the
103 * stack calls this function, the socket is dead and
104 * about to be destroyed.
106 if (!icsk->icsk_ulp_ops)
107 return;
109 if (icsk->icsk_ulp_ops->release)
110 icsk->icsk_ulp_ops->release(sk);
111 module_put(icsk->icsk_ulp_ops->owner);
113 icsk->icsk_ulp_ops = NULL;
116 static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
118 struct inet_connection_sock *icsk = inet_csk(sk);
119 int err;
121 err = -EEXIST;
122 if (icsk->icsk_ulp_ops)
123 goto out_err;
125 err = ulp_ops->init(sk);
126 if (err)
127 goto out_err;
129 icsk->icsk_ulp_ops = ulp_ops;
130 return 0;
131 out_err:
132 module_put(ulp_ops->owner);
133 return err;
136 int tcp_set_ulp(struct sock *sk, const char *name)
138 const struct tcp_ulp_ops *ulp_ops;
140 sock_owned_by_me(sk);
142 ulp_ops = __tcp_ulp_find_autoload(name);
143 if (!ulp_ops)
144 return -ENOENT;
146 return __tcp_set_ulp(sk, ulp_ops);