2 * Plugable TCP congestion control support and newReno
4 * Based on ideas from I/O scheduler suport and Web100.
6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
9 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
15 static DEFINE_SPINLOCK(tcp_cong_list_lock
);
16 static LIST_HEAD(tcp_cong_list
);
18 /* Simple linear search, don't expect many entries! */
19 static struct tcp_congestion_ops
*tcp_ca_find(const char *name
)
21 struct tcp_congestion_ops
*e
;
23 list_for_each_entry_rcu(e
, &tcp_cong_list
, list
) {
24 if (strcmp(e
->name
, name
) == 0)
32 * Attach new congestion control algorthim to the list
33 * of available options.
35 int tcp_register_congestion_control(struct tcp_congestion_ops
*ca
)
39 /* all algorithms must implement ssthresh and cong_avoid ops */
40 if (!ca
->ssthresh
|| !ca
->cong_avoid
) {
41 printk(KERN_ERR
"TCP %s does not implement required ops\n",
46 spin_lock(&tcp_cong_list_lock
);
47 if (tcp_ca_find(ca
->name
)) {
48 printk(KERN_NOTICE
"TCP %s already registered\n", ca
->name
);
51 list_add_tail_rcu(&ca
->list
, &tcp_cong_list
);
52 printk(KERN_INFO
"TCP %s registered\n", ca
->name
);
54 spin_unlock(&tcp_cong_list_lock
);
58 EXPORT_SYMBOL_GPL(tcp_register_congestion_control
);
61 * Remove congestion control algorithm, called from
62 * the module's remove function. Module ref counts are used
63 * to ensure that this can't be done till all sockets using
64 * that method are closed.
66 void tcp_unregister_congestion_control(struct tcp_congestion_ops
*ca
)
68 spin_lock(&tcp_cong_list_lock
);
69 list_del_rcu(&ca
->list
);
70 spin_unlock(&tcp_cong_list_lock
);
72 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control
);
74 /* Assign choice of congestion control. */
75 void tcp_init_congestion_control(struct sock
*sk
)
77 struct inet_connection_sock
*icsk
= inet_csk(sk
);
78 struct tcp_congestion_ops
*ca
;
80 if (icsk
->icsk_ca_ops
!= &tcp_init_congestion_ops
)
84 list_for_each_entry_rcu(ca
, &tcp_cong_list
, list
) {
85 if (try_module_get(ca
->owner
)) {
86 icsk
->icsk_ca_ops
= ca
;
93 if (icsk
->icsk_ca_ops
->init
)
94 icsk
->icsk_ca_ops
->init(sk
);
97 /* Manage refcounts on socket close. */
98 void tcp_cleanup_congestion_control(struct sock
*sk
)
100 struct inet_connection_sock
*icsk
= inet_csk(sk
);
102 if (icsk
->icsk_ca_ops
->release
)
103 icsk
->icsk_ca_ops
->release(sk
);
104 module_put(icsk
->icsk_ca_ops
->owner
);
107 /* Used by sysctl to change default congestion control */
108 int tcp_set_default_congestion_control(const char *name
)
110 struct tcp_congestion_ops
*ca
;
113 spin_lock(&tcp_cong_list_lock
);
114 ca
= tcp_ca_find(name
);
117 spin_unlock(&tcp_cong_list_lock
);
119 request_module("tcp_%s", name
);
120 spin_lock(&tcp_cong_list_lock
);
121 ca
= tcp_ca_find(name
);
126 list_move(&ca
->list
, &tcp_cong_list
);
129 spin_unlock(&tcp_cong_list_lock
);
134 /* Set default value from kernel configuration at bootup */
135 static int __init
tcp_congestion_default(void)
137 return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG
);
139 late_initcall(tcp_congestion_default
);
142 /* Get current default congestion control */
143 void tcp_get_default_congestion_control(char *name
)
145 struct tcp_congestion_ops
*ca
;
146 /* We will always have reno... */
147 BUG_ON(list_empty(&tcp_cong_list
));
150 ca
= list_entry(tcp_cong_list
.next
, struct tcp_congestion_ops
, list
);
151 strncpy(name
, ca
->name
, TCP_CA_NAME_MAX
);
155 /* Change congestion control for socket */
156 int tcp_set_congestion_control(struct sock
*sk
, const char *name
)
158 struct inet_connection_sock
*icsk
= inet_csk(sk
);
159 struct tcp_congestion_ops
*ca
;
163 ca
= tcp_ca_find(name
);
164 if (ca
== icsk
->icsk_ca_ops
)
170 else if (!try_module_get(ca
->owner
))
174 tcp_cleanup_congestion_control(sk
);
175 icsk
->icsk_ca_ops
= ca
;
176 if (icsk
->icsk_ca_ops
->init
)
177 icsk
->icsk_ca_ops
->init(sk
);
186 * Linear increase during slow start
188 void tcp_slow_start(struct tcp_sock
*tp
)
190 if (sysctl_tcp_abc
) {
191 /* RFC3465: Slow Start
192 * TCP sender SHOULD increase cwnd by the number of
193 * previously unacknowledged bytes ACKed by each incoming
194 * acknowledgment, provided the increase is not more than L
196 if (tp
->bytes_acked
< tp
->mss_cache
)
199 /* We MAY increase by 2 if discovered delayed ack */
200 if (sysctl_tcp_abc
> 1 && tp
->bytes_acked
>= 2*tp
->mss_cache
) {
201 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
207 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
210 EXPORT_SYMBOL_GPL(tcp_slow_start
);
213 * TCP Reno congestion control
214 * This is special case used for fallback as well.
216 /* This is Jacobson's slow start and congestion avoidance.
217 * SIGCOMM '88, p. 328.
219 void tcp_reno_cong_avoid(struct sock
*sk
, u32 ack
, u32 rtt
, u32 in_flight
,
222 struct tcp_sock
*tp
= tcp_sk(sk
);
224 if (!tcp_is_cwnd_limited(sk
, in_flight
))
227 /* In "safe" area, increase. */
228 if (tp
->snd_cwnd
<= tp
->snd_ssthresh
)
231 /* In dangerous area, increase slowly. */
232 else if (sysctl_tcp_abc
) {
233 /* RFC3465: Appropriate Byte Count
234 * increase once for each full cwnd acked
236 if (tp
->bytes_acked
>= tp
->snd_cwnd
*tp
->mss_cache
) {
237 tp
->bytes_acked
-= tp
->snd_cwnd
*tp
->mss_cache
;
238 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
242 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
243 if (tp
->snd_cwnd_cnt
>= tp
->snd_cwnd
) {
244 if (tp
->snd_cwnd
< tp
->snd_cwnd_clamp
)
246 tp
->snd_cwnd_cnt
= 0;
251 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid
);
253 /* Slow start threshold is half the congestion window (min 2) */
254 u32
tcp_reno_ssthresh(struct sock
*sk
)
256 const struct tcp_sock
*tp
= tcp_sk(sk
);
257 return max(tp
->snd_cwnd
>> 1U, 2U);
259 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh
);
261 /* Lower bound on congestion window with halving. */
262 u32
tcp_reno_min_cwnd(const struct sock
*sk
)
264 const struct tcp_sock
*tp
= tcp_sk(sk
);
265 return tp
->snd_ssthresh
/2;
267 EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd
);
269 struct tcp_congestion_ops tcp_reno
= {
271 .owner
= THIS_MODULE
,
272 .ssthresh
= tcp_reno_ssthresh
,
273 .cong_avoid
= tcp_reno_cong_avoid
,
274 .min_cwnd
= tcp_reno_min_cwnd
,
277 /* Initial congestion control used (until SYN)
278 * really reno under another name so we can tell difference
279 * during tcp_set_default_congestion_control
281 struct tcp_congestion_ops tcp_init_congestion_ops
= {
283 .owner
= THIS_MODULE
,
284 .ssthresh
= tcp_reno_ssthresh
,
285 .cong_avoid
= tcp_reno_cong_avoid
,
286 .min_cwnd
= tcp_reno_min_cwnd
,
288 EXPORT_SYMBOL_GPL(tcp_init_congestion_ops
);