1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/crypto.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/list.h>
8 #include <linux/rcupdate.h>
9 #include <linux/rculist.h>
10 #include <net/inetpeer.h>
13 int sysctl_tcp_fastopen __read_mostly
= TFO_CLIENT_ENABLE
;
15 struct tcp_fastopen_context __rcu
*tcp_fastopen_ctx
;
17 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock
);
19 void tcp_fastopen_init_key_once(bool publish
)
21 static u8 key
[TCP_FASTOPEN_KEY_LENGTH
];
23 /* tcp_fastopen_reset_cipher publishes the new context
24 * atomically, so we allow this race happening here.
26 * All call sites of tcp_fastopen_cookie_gen also check
27 * for a valid cookie, so this is an acceptable risk.
29 if (net_get_random_once(key
, sizeof(key
)) && publish
)
30 tcp_fastopen_reset_cipher(key
, sizeof(key
));
33 static void tcp_fastopen_ctx_free(struct rcu_head
*head
)
35 struct tcp_fastopen_context
*ctx
=
36 container_of(head
, struct tcp_fastopen_context
, rcu
);
37 crypto_free_cipher(ctx
->tfm
);
41 int tcp_fastopen_reset_cipher(void *key
, unsigned int len
)
44 struct tcp_fastopen_context
*ctx
, *octx
;
46 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
49 ctx
->tfm
= crypto_alloc_cipher("aes", 0, 0);
51 if (IS_ERR(ctx
->tfm
)) {
52 err
= PTR_ERR(ctx
->tfm
);
54 pr_err("TCP: TFO aes cipher alloc error: %d\n", err
);
57 err
= crypto_cipher_setkey(ctx
->tfm
, key
, len
);
59 pr_err("TCP: TFO cipher key error: %d\n", err
);
60 crypto_free_cipher(ctx
->tfm
);
63 memcpy(ctx
->key
, key
, len
);
65 spin_lock(&tcp_fastopen_ctx_lock
);
67 octx
= rcu_dereference_protected(tcp_fastopen_ctx
,
68 lockdep_is_held(&tcp_fastopen_ctx_lock
));
69 rcu_assign_pointer(tcp_fastopen_ctx
, ctx
);
70 spin_unlock(&tcp_fastopen_ctx_lock
);
73 call_rcu(&octx
->rcu
, tcp_fastopen_ctx_free
);
77 static bool __tcp_fastopen_cookie_gen(const void *path
,
78 struct tcp_fastopen_cookie
*foc
)
80 struct tcp_fastopen_context
*ctx
;
84 ctx
= rcu_dereference(tcp_fastopen_ctx
);
86 crypto_cipher_encrypt_one(ctx
->tfm
, foc
->val
, path
);
87 foc
->len
= TCP_FASTOPEN_COOKIE_SIZE
;
94 /* Generate the fastopen cookie by doing aes128 encryption on both
95 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
96 * addresses. For the longer IPv6 addresses use CBC-MAC.
98 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
100 static bool tcp_fastopen_cookie_gen(struct request_sock
*req
,
102 struct tcp_fastopen_cookie
*foc
)
104 if (req
->rsk_ops
->family
== AF_INET
) {
105 const struct iphdr
*iph
= ip_hdr(syn
);
107 __be32 path
[4] = { iph
->saddr
, iph
->daddr
, 0, 0 };
108 return __tcp_fastopen_cookie_gen(path
, foc
);
111 #if IS_ENABLED(CONFIG_IPV6)
112 if (req
->rsk_ops
->family
== AF_INET6
) {
113 const struct ipv6hdr
*ip6h
= ipv6_hdr(syn
);
114 struct tcp_fastopen_cookie tmp
;
116 if (__tcp_fastopen_cookie_gen(&ip6h
->saddr
, &tmp
)) {
117 struct in6_addr
*buf
= &tmp
.addr
;
120 for (i
= 0; i
< 4; i
++)
121 buf
->s6_addr32
[i
] ^= ip6h
->daddr
.s6_addr32
[i
];
122 return __tcp_fastopen_cookie_gen(buf
, foc
);
130 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
131 * queue this additional data / FIN.
133 void tcp_fastopen_add_skb(struct sock
*sk
, struct sk_buff
*skb
)
135 struct tcp_sock
*tp
= tcp_sk(sk
);
137 if (TCP_SKB_CB(skb
)->end_seq
== tp
->rcv_nxt
)
140 skb
= skb_clone(skb
, GFP_ATOMIC
);
145 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
146 * Hence, reset segs_in to 0 before calling tcp_segs_in()
147 * to avoid double counting. Also, tcp_segs_in() expects
148 * skb->len to include the tcp_hdrlen. Hence, it should
149 * be called before __skb_pull().
152 tcp_segs_in(tp
, skb
);
153 __skb_pull(skb
, tcp_hdrlen(skb
));
154 sk_forced_mem_schedule(sk
, skb
->truesize
);
155 skb_set_owner_r(skb
, sk
);
157 TCP_SKB_CB(skb
)->seq
++;
158 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_SYN
;
160 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
161 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
162 tp
->syn_data_acked
= 1;
164 /* u64_stats_update_begin(&tp->syncp) not needed here,
165 * as we certainly are not changing upper 32bit value (0)
167 tp
->bytes_received
= skb
->len
;
169 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
173 static struct sock
*tcp_fastopen_create_child(struct sock
*sk
,
175 struct request_sock
*req
)
178 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
182 req
->num_retrans
= 0;
183 req
->num_timeout
= 0;
186 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
,
191 spin_lock(&queue
->fastopenq
.lock
);
192 queue
->fastopenq
.qlen
++;
193 spin_unlock(&queue
->fastopenq
.lock
);
195 /* Initialize the child socket. Have to fix some values to take
196 * into account the child is a Fast Open socket and is created
197 * only out of the bits carried in the SYN packet.
201 tp
->fastopen_rsk
= req
;
202 tcp_rsk(req
)->tfo_listener
= true;
204 /* RFC1323: The window in SYN & SYN/ACK segments is never
205 * scaled. So correct it appropriately.
207 tp
->snd_wnd
= ntohs(tcp_hdr(skb
)->window
);
208 tp
->max_window
= tp
->snd_wnd
;
210 /* Activate the retrans timer so that SYNACK can be retransmitted.
211 * The request socket is not added to the ehash
212 * because it's been added to the accept queue directly.
214 inet_csk_reset_xmit_timer(child
, ICSK_TIME_RETRANS
,
215 TCP_TIMEOUT_INIT
, TCP_RTO_MAX
);
217 refcount_set(&req
->rsk_refcnt
, 2);
219 /* Now finish processing the fastopen child socket. */
220 inet_csk(child
)->icsk_af_ops
->rebuild_header(child
);
221 tcp_init_congestion_control(child
);
222 tcp_mtup_init(child
);
223 tcp_init_metrics(child
);
224 tcp_call_bpf(child
, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB
);
225 tcp_init_buffer_space(child
);
227 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->seq
+ 1;
229 tcp_fastopen_add_skb(child
, skb
);
231 tcp_rsk(req
)->rcv_nxt
= tp
->rcv_nxt
;
232 tp
->rcv_wup
= tp
->rcv_nxt
;
233 /* tcp_conn_request() is sending the SYNACK,
234 * and queues the child into listener accept queue.
239 static bool tcp_fastopen_queue_check(struct sock
*sk
)
241 struct fastopen_queue
*fastopenq
;
243 /* Make sure the listener has enabled fastopen, and we don't
244 * exceed the max # of pending TFO requests allowed before trying
245 * to validating the cookie in order to avoid burning CPU cycles
248 * XXX (TFO) - The implication of checking the max_qlen before
249 * processing a cookie request is that clients can't differentiate
250 * between qlen overflow causing Fast Open to be disabled
251 * temporarily vs a server not supporting Fast Open at all.
253 fastopenq
= &inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
254 if (fastopenq
->max_qlen
== 0)
257 if (fastopenq
->qlen
>= fastopenq
->max_qlen
) {
258 struct request_sock
*req1
;
259 spin_lock(&fastopenq
->lock
);
260 req1
= fastopenq
->rskq_rst_head
;
261 if (!req1
|| time_after(req1
->rsk_timer
.expires
, jiffies
)) {
262 __NET_INC_STATS(sock_net(sk
),
263 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW
);
264 spin_unlock(&fastopenq
->lock
);
267 fastopenq
->rskq_rst_head
= req1
->dl_next
;
269 spin_unlock(&fastopenq
->lock
);
275 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
276 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
277 * cookie request (foc->len == 0).
279 struct sock
*tcp_try_fastopen(struct sock
*sk
, struct sk_buff
*skb
,
280 struct request_sock
*req
,
281 struct tcp_fastopen_cookie
*foc
)
283 struct tcp_fastopen_cookie valid_foc
= { .len
= -1 };
284 bool syn_data
= TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
+ 1;
287 if (foc
->len
== 0) /* Client requests a cookie */
288 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENCOOKIEREQD
);
290 if (!((sysctl_tcp_fastopen
& TFO_SERVER_ENABLE
) &&
291 (syn_data
|| foc
->len
>= 0) &&
292 tcp_fastopen_queue_check(sk
))) {
297 if (syn_data
&& (sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_REQD
))
300 if (foc
->len
>= 0 && /* Client presents or requests a cookie */
301 tcp_fastopen_cookie_gen(req
, skb
, &valid_foc
) &&
302 foc
->len
== TCP_FASTOPEN_COOKIE_SIZE
&&
303 foc
->len
== valid_foc
.len
&&
304 !memcmp(foc
->val
, valid_foc
.val
, foc
->len
)) {
305 /* Cookie is valid. Create a (full) child socket to accept
306 * the data in SYN before returning a SYN-ACK to ack the
307 * data. If we fail to create the socket, fall back and
308 * ack the ISN only but includes the same cookie.
310 * Note: Data-less SYN with valid cookie is allowed to send
311 * data in SYN_RECV state.
314 child
= tcp_fastopen_create_child(sk
, skb
, req
);
317 NET_INC_STATS(sock_net(sk
),
318 LINUX_MIB_TCPFASTOPENPASSIVE
);
321 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
322 } else if (foc
->len
> 0) /* Client presents an invalid cookie */
323 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
325 valid_foc
.exp
= foc
->exp
;
330 bool tcp_fastopen_cookie_check(struct sock
*sk
, u16
*mss
,
331 struct tcp_fastopen_cookie
*cookie
)
333 unsigned long last_syn_loss
= 0;
336 tcp_fastopen_cache_get(sk
, mss
, cookie
, &syn_loss
, &last_syn_loss
);
338 /* Recurring FO SYN losses: no cookie or data in SYN */
340 time_before(jiffies
, last_syn_loss
+ (60*HZ
<< syn_loss
))) {
345 /* Firewall blackhole issue check */
346 if (tcp_fastopen_active_should_disable(sk
)) {
351 if (sysctl_tcp_fastopen
& TFO_CLIENT_NO_COOKIE
) {
355 return cookie
->len
> 0;
358 /* This function checks if we want to defer sending SYN until the first
359 * write(). We defer under the following conditions:
360 * 1. fastopen_connect sockopt is set
361 * 2. we have a valid cookie
362 * Return value: return true if we want to defer until application writes data
363 * return false if we want to send out SYN immediately
365 bool tcp_fastopen_defer_connect(struct sock
*sk
, int *err
)
367 struct tcp_fastopen_cookie cookie
= { .len
= 0 };
368 struct tcp_sock
*tp
= tcp_sk(sk
);
371 if (tp
->fastopen_connect
&& !tp
->fastopen_req
) {
372 if (tcp_fastopen_cookie_check(sk
, &mss
, &cookie
)) {
373 inet_sk(sk
)->defer_connect
= 1;
377 /* Alloc fastopen_req in order for FO option to be included
380 tp
->fastopen_req
= kzalloc(sizeof(*tp
->fastopen_req
),
382 if (tp
->fastopen_req
)
383 tp
->fastopen_req
->cookie
= cookie
;
389 EXPORT_SYMBOL(tcp_fastopen_defer_connect
);
392 * The following code block is to deal with middle box issues with TFO:
393 * Middlebox firewall issues can potentially cause server's data being
394 * blackholed after a successful 3WHS using TFO.
395 * The proposed solution is to disable active TFO globally under the
396 * following circumstances:
397 * 1. client side TFO socket receives out of order FIN
398 * 2. client side TFO socket receives out of order RST
399 * We disable active side TFO globally for 1hr at first. Then if it
400 * happens again, we disable it for 2h, then 4h, 8h, ...
401 * And we reset the timeout back to 1hr when we see a successful active
402 * TFO connection with data exchanges.
406 unsigned int sysctl_tcp_fastopen_blackhole_timeout __read_mostly
= 60 * 60;
407 static atomic_t tfo_active_disable_times __read_mostly
= ATOMIC_INIT(0);
408 static unsigned long tfo_active_disable_stamp __read_mostly
;
410 /* Disable active TFO and record current jiffies and
411 * tfo_active_disable_times
413 void tcp_fastopen_active_disable(struct sock
*sk
)
415 atomic_inc(&tfo_active_disable_times
);
416 tfo_active_disable_stamp
= jiffies
;
417 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENBLACKHOLE
);
420 /* Reset tfo_active_disable_times to 0 */
421 void tcp_fastopen_active_timeout_reset(void)
423 atomic_set(&tfo_active_disable_times
, 0);
426 /* Calculate timeout for tfo active disable
427 * Return true if we are still in the active TFO disable period
428 * Return false if timeout already expired and we should use active TFO
430 bool tcp_fastopen_active_should_disable(struct sock
*sk
)
432 int tfo_da_times
= atomic_read(&tfo_active_disable_times
);
434 unsigned long timeout
;
439 /* Limit timout to max: 2^6 * initial timeout */
440 multiplier
= 1 << min(tfo_da_times
- 1, 6);
441 timeout
= multiplier
* sysctl_tcp_fastopen_blackhole_timeout
* HZ
;
442 if (time_before(jiffies
, tfo_active_disable_stamp
+ timeout
))
445 /* Mark check bit so we can check for successful active TFO
446 * condition and reset tfo_active_disable_times
448 tcp_sk(sk
)->syn_fastopen_ch
= 1;
452 /* Disable active TFO if FIN is the only packet in the ofo queue
453 * and no data is received.
454 * Also check if we can reset tfo_active_disable_times if data is
455 * received successfully on a marked active TFO sockets opened on
456 * a non-loopback interface
458 void tcp_fastopen_active_disable_ofo_check(struct sock
*sk
)
460 struct tcp_sock
*tp
= tcp_sk(sk
);
461 struct dst_entry
*dst
;
464 if (!tp
->syn_fastopen
)
467 if (!tp
->data_segs_in
) {
468 skb
= skb_rb_first(&tp
->out_of_order_queue
);
469 if (skb
&& !skb_rb_next(skb
)) {
470 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
) {
471 tcp_fastopen_active_disable(sk
);
475 } else if (tp
->syn_fastopen_ch
&&
476 atomic_read(&tfo_active_disable_times
)) {
477 dst
= sk_dst_get(sk
);
478 if (!(dst
&& dst
->dev
&& (dst
->dev
->flags
& IFF_LOOPBACK
)))
479 tcp_fastopen_active_timeout_reset();