1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2022, SUSE.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
16 static DEFINE_SPINLOCK(mptcp_sched_list_lock
);
17 static LIST_HEAD(mptcp_sched_list
);
19 static int mptcp_sched_default_get_subflow(struct mptcp_sock
*msk
,
20 struct mptcp_sched_data
*data
)
24 ssk
= data
->reinject
? mptcp_subflow_get_retrans(msk
) :
25 mptcp_subflow_get_send(msk
);
29 mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk
), true);
33 static struct mptcp_sched_ops mptcp_sched_default
= {
34 .get_subflow
= mptcp_sched_default_get_subflow
,
39 /* Must be called with rcu read lock held */
40 struct mptcp_sched_ops
*mptcp_sched_find(const char *name
)
42 struct mptcp_sched_ops
*sched
, *ret
= NULL
;
44 list_for_each_entry_rcu(sched
, &mptcp_sched_list
, list
) {
45 if (!strcmp(sched
->name
, name
)) {
54 /* Build string with list of available scheduler values.
55 * Similar to tcp_get_available_congestion_control()
57 void mptcp_get_available_schedulers(char *buf
, size_t maxlen
)
59 struct mptcp_sched_ops
*sched
;
63 list_for_each_entry_rcu(sched
, &mptcp_sched_list
, list
) {
64 offs
+= snprintf(buf
+ offs
, maxlen
- offs
,
66 offs
== 0 ? "" : " ", sched
->name
);
68 if (WARN_ON_ONCE(offs
>= maxlen
))
74 int mptcp_register_scheduler(struct mptcp_sched_ops
*sched
)
76 if (!sched
->get_subflow
)
79 spin_lock(&mptcp_sched_list_lock
);
80 if (mptcp_sched_find(sched
->name
)) {
81 spin_unlock(&mptcp_sched_list_lock
);
84 list_add_tail_rcu(&sched
->list
, &mptcp_sched_list
);
85 spin_unlock(&mptcp_sched_list_lock
);
87 pr_debug("%s registered\n", sched
->name
);
91 void mptcp_unregister_scheduler(struct mptcp_sched_ops
*sched
)
93 if (sched
== &mptcp_sched_default
)
96 spin_lock(&mptcp_sched_list_lock
);
97 list_del_rcu(&sched
->list
);
98 spin_unlock(&mptcp_sched_list_lock
);
101 void mptcp_sched_init(void)
103 mptcp_register_scheduler(&mptcp_sched_default
);
106 int mptcp_init_sched(struct mptcp_sock
*msk
,
107 struct mptcp_sched_ops
*sched
)
110 sched
= &mptcp_sched_default
;
112 if (!bpf_try_module_get(sched
, sched
->owner
))
116 if (msk
->sched
->init
)
117 msk
->sched
->init(msk
);
119 pr_debug("sched=%s\n", msk
->sched
->name
);
124 void mptcp_release_sched(struct mptcp_sock
*msk
)
126 struct mptcp_sched_ops
*sched
= msk
->sched
;
135 bpf_module_put(sched
, sched
->owner
);
138 void mptcp_subflow_set_scheduled(struct mptcp_subflow_context
*subflow
,
141 WRITE_ONCE(subflow
->scheduled
, scheduled
);
144 int mptcp_sched_get_send(struct mptcp_sock
*msk
)
146 struct mptcp_subflow_context
*subflow
;
147 struct mptcp_sched_data data
;
149 msk_owned_by_me(msk
);
151 /* the following check is moved out of mptcp_subflow_get_send */
152 if (__mptcp_check_fallback(msk
)) {
154 __tcp_can_send(msk
->first
) &&
155 sk_stream_memory_free(msk
->first
)) {
156 mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk
->first
), true);
162 mptcp_for_each_subflow(msk
, subflow
) {
163 if (READ_ONCE(subflow
->scheduled
))
167 data
.reinject
= false;
168 if (msk
->sched
== &mptcp_sched_default
|| !msk
->sched
)
169 return mptcp_sched_default_get_subflow(msk
, &data
);
170 return msk
->sched
->get_subflow(msk
, &data
);
173 int mptcp_sched_get_retrans(struct mptcp_sock
*msk
)
175 struct mptcp_subflow_context
*subflow
;
176 struct mptcp_sched_data data
;
178 msk_owned_by_me(msk
);
180 /* the following check is moved out of mptcp_subflow_get_retrans */
181 if (__mptcp_check_fallback(msk
))
184 mptcp_for_each_subflow(msk
, subflow
) {
185 if (READ_ONCE(subflow
->scheduled
))
189 data
.reinject
= true;
190 if (msk
->sched
== &mptcp_sched_default
|| !msk
->sched
)
191 return mptcp_sched_default_get_subflow(msk
, &data
);
192 return msk
->sched
->get_subflow(msk
, &data
);