1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/crypto.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/list.h>
8 #include <linux/rcupdate.h>
9 #include <linux/rculist.h>
10 #include <net/inetpeer.h>
13 void tcp_fastopen_init_key_once(struct net
*net
)
15 u8 key
[TCP_FASTOPEN_KEY_LENGTH
];
16 struct tcp_fastopen_context
*ctxt
;
19 ctxt
= rcu_dereference(net
->ipv4
.tcp_fastopen_ctx
);
26 /* tcp_fastopen_reset_cipher publishes the new context
27 * atomically, so we allow this race happening here.
29 * All call sites of tcp_fastopen_cookie_gen also check
30 * for a valid cookie, so this is an acceptable risk.
32 get_random_bytes(key
, sizeof(key
));
33 tcp_fastopen_reset_cipher(net
, NULL
, key
, sizeof(key
));
36 static void tcp_fastopen_ctx_free(struct rcu_head
*head
)
38 struct tcp_fastopen_context
*ctx
=
39 container_of(head
, struct tcp_fastopen_context
, rcu
);
40 crypto_free_cipher(ctx
->tfm
);
44 void tcp_fastopen_destroy_cipher(struct sock
*sk
)
46 struct tcp_fastopen_context
*ctx
;
48 ctx
= rcu_dereference_protected(
49 inet_csk(sk
)->icsk_accept_queue
.fastopenq
.ctx
, 1);
51 call_rcu(&ctx
->rcu
, tcp_fastopen_ctx_free
);
54 void tcp_fastopen_ctx_destroy(struct net
*net
)
56 struct tcp_fastopen_context
*ctxt
;
58 spin_lock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
60 ctxt
= rcu_dereference_protected(net
->ipv4
.tcp_fastopen_ctx
,
61 lockdep_is_held(&net
->ipv4
.tcp_fastopen_ctx_lock
));
62 rcu_assign_pointer(net
->ipv4
.tcp_fastopen_ctx
, NULL
);
63 spin_unlock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
66 call_rcu(&ctxt
->rcu
, tcp_fastopen_ctx_free
);
69 int tcp_fastopen_reset_cipher(struct net
*net
, struct sock
*sk
,
70 void *key
, unsigned int len
)
72 struct tcp_fastopen_context
*ctx
, *octx
;
73 struct fastopen_queue
*q
;
76 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
79 ctx
->tfm
= crypto_alloc_cipher("aes", 0, 0);
81 if (IS_ERR(ctx
->tfm
)) {
82 err
= PTR_ERR(ctx
->tfm
);
84 pr_err("TCP: TFO aes cipher alloc error: %d\n", err
);
87 err
= crypto_cipher_setkey(ctx
->tfm
, key
, len
);
89 pr_err("TCP: TFO cipher key error: %d\n", err
);
90 crypto_free_cipher(ctx
->tfm
);
93 memcpy(ctx
->key
, key
, len
);
96 spin_lock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
98 q
= &inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
99 octx
= rcu_dereference_protected(q
->ctx
,
100 lockdep_is_held(&net
->ipv4
.tcp_fastopen_ctx_lock
));
101 rcu_assign_pointer(q
->ctx
, ctx
);
103 octx
= rcu_dereference_protected(net
->ipv4
.tcp_fastopen_ctx
,
104 lockdep_is_held(&net
->ipv4
.tcp_fastopen_ctx_lock
));
105 rcu_assign_pointer(net
->ipv4
.tcp_fastopen_ctx
, ctx
);
107 spin_unlock(&net
->ipv4
.tcp_fastopen_ctx_lock
);
110 call_rcu(&octx
->rcu
, tcp_fastopen_ctx_free
);
114 static bool __tcp_fastopen_cookie_gen(struct sock
*sk
, const void *path
,
115 struct tcp_fastopen_cookie
*foc
)
117 struct tcp_fastopen_context
*ctx
;
122 ctx
= rcu_dereference(inet_csk(sk
)->icsk_accept_queue
.fastopenq
.ctx
);
124 ctx
= rcu_dereference(sock_net(sk
)->ipv4
.tcp_fastopen_ctx
);
127 crypto_cipher_encrypt_one(ctx
->tfm
, foc
->val
, path
);
128 foc
->len
= TCP_FASTOPEN_COOKIE_SIZE
;
135 /* Generate the fastopen cookie by doing aes128 encryption on both
136 * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
137 * addresses. For the longer IPv6 addresses use CBC-MAC.
139 * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
141 static bool tcp_fastopen_cookie_gen(struct sock
*sk
,
142 struct request_sock
*req
,
144 struct tcp_fastopen_cookie
*foc
)
146 if (req
->rsk_ops
->family
== AF_INET
) {
147 const struct iphdr
*iph
= ip_hdr(syn
);
149 __be32 path
[4] = { iph
->saddr
, iph
->daddr
, 0, 0 };
150 return __tcp_fastopen_cookie_gen(sk
, path
, foc
);
153 #if IS_ENABLED(CONFIG_IPV6)
154 if (req
->rsk_ops
->family
== AF_INET6
) {
155 const struct ipv6hdr
*ip6h
= ipv6_hdr(syn
);
156 struct tcp_fastopen_cookie tmp
;
158 if (__tcp_fastopen_cookie_gen(sk
, &ip6h
->saddr
, &tmp
)) {
159 struct in6_addr
*buf
= &tmp
.addr
;
162 for (i
= 0; i
< 4; i
++)
163 buf
->s6_addr32
[i
] ^= ip6h
->daddr
.s6_addr32
[i
];
164 return __tcp_fastopen_cookie_gen(sk
, buf
, foc
);
172 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
173 * queue this additional data / FIN.
175 void tcp_fastopen_add_skb(struct sock
*sk
, struct sk_buff
*skb
)
177 struct tcp_sock
*tp
= tcp_sk(sk
);
179 if (TCP_SKB_CB(skb
)->end_seq
== tp
->rcv_nxt
)
182 skb
= skb_clone(skb
, GFP_ATOMIC
);
187 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
188 * Hence, reset segs_in to 0 before calling tcp_segs_in()
189 * to avoid double counting. Also, tcp_segs_in() expects
190 * skb->len to include the tcp_hdrlen. Hence, it should
191 * be called before __skb_pull().
194 tcp_segs_in(tp
, skb
);
195 __skb_pull(skb
, tcp_hdrlen(skb
));
196 sk_forced_mem_schedule(sk
, skb
->truesize
);
197 skb_set_owner_r(skb
, sk
);
199 TCP_SKB_CB(skb
)->seq
++;
200 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_SYN
;
202 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
203 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
204 tp
->syn_data_acked
= 1;
206 /* u64_stats_update_begin(&tp->syncp) not needed here,
207 * as we certainly are not changing upper 32bit value (0)
209 tp
->bytes_received
= skb
->len
;
211 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
)
215 static struct sock
*tcp_fastopen_create_child(struct sock
*sk
,
217 struct request_sock
*req
)
220 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
224 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
,
229 spin_lock(&queue
->fastopenq
.lock
);
230 queue
->fastopenq
.qlen
++;
231 spin_unlock(&queue
->fastopenq
.lock
);
233 /* Initialize the child socket. Have to fix some values to take
234 * into account the child is a Fast Open socket and is created
235 * only out of the bits carried in the SYN packet.
239 tp
->fastopen_rsk
= req
;
240 tcp_rsk(req
)->tfo_listener
= true;
242 /* RFC1323: The window in SYN & SYN/ACK segments is never
243 * scaled. So correct it appropriately.
245 tp
->snd_wnd
= ntohs(tcp_hdr(skb
)->window
);
246 tp
->max_window
= tp
->snd_wnd
;
248 /* Activate the retrans timer so that SYNACK can be retransmitted.
249 * The request socket is not added to the ehash
250 * because it's been added to the accept queue directly.
252 inet_csk_reset_xmit_timer(child
, ICSK_TIME_RETRANS
,
253 TCP_TIMEOUT_INIT
, TCP_RTO_MAX
);
255 refcount_set(&req
->rsk_refcnt
, 2);
257 /* Now finish processing the fastopen child socket. */
258 tcp_init_transfer(child
, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB
);
260 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->seq
+ 1;
262 tcp_fastopen_add_skb(child
, skb
);
264 tcp_rsk(req
)->rcv_nxt
= tp
->rcv_nxt
;
265 tp
->rcv_wup
= tp
->rcv_nxt
;
266 /* tcp_conn_request() is sending the SYNACK,
267 * and queues the child into listener accept queue.
272 static bool tcp_fastopen_queue_check(struct sock
*sk
)
274 struct fastopen_queue
*fastopenq
;
276 /* Make sure the listener has enabled fastopen, and we don't
277 * exceed the max # of pending TFO requests allowed before trying
278 * to validating the cookie in order to avoid burning CPU cycles
281 * XXX (TFO) - The implication of checking the max_qlen before
282 * processing a cookie request is that clients can't differentiate
283 * between qlen overflow causing Fast Open to be disabled
284 * temporarily vs a server not supporting Fast Open at all.
286 fastopenq
= &inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
287 if (fastopenq
->max_qlen
== 0)
290 if (fastopenq
->qlen
>= fastopenq
->max_qlen
) {
291 struct request_sock
*req1
;
292 spin_lock(&fastopenq
->lock
);
293 req1
= fastopenq
->rskq_rst_head
;
294 if (!req1
|| time_after(req1
->rsk_timer
.expires
, jiffies
)) {
295 __NET_INC_STATS(sock_net(sk
),
296 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW
);
297 spin_unlock(&fastopenq
->lock
);
300 fastopenq
->rskq_rst_head
= req1
->dl_next
;
302 spin_unlock(&fastopenq
->lock
);
308 static bool tcp_fastopen_no_cookie(const struct sock
*sk
,
309 const struct dst_entry
*dst
,
312 return (sock_net(sk
)->ipv4
.sysctl_tcp_fastopen
& flag
) ||
313 tcp_sk(sk
)->fastopen_no_cookie
||
314 (dst
&& dst_metric(dst
, RTAX_FASTOPEN_NO_COOKIE
));
317 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
318 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
319 * cookie request (foc->len == 0).
321 struct sock
*tcp_try_fastopen(struct sock
*sk
, struct sk_buff
*skb
,
322 struct request_sock
*req
,
323 struct tcp_fastopen_cookie
*foc
,
324 const struct dst_entry
*dst
)
326 bool syn_data
= TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
+ 1;
327 int tcp_fastopen
= sock_net(sk
)->ipv4
.sysctl_tcp_fastopen
;
328 struct tcp_fastopen_cookie valid_foc
= { .len
= -1 };
331 if (foc
->len
== 0) /* Client requests a cookie */
332 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENCOOKIEREQD
);
334 if (!((tcp_fastopen
& TFO_SERVER_ENABLE
) &&
335 (syn_data
|| foc
->len
>= 0) &&
336 tcp_fastopen_queue_check(sk
))) {
342 tcp_fastopen_no_cookie(sk
, dst
, TFO_SERVER_COOKIE_NOT_REQD
))
345 if (foc
->len
>= 0 && /* Client presents or requests a cookie */
346 tcp_fastopen_cookie_gen(sk
, req
, skb
, &valid_foc
) &&
347 foc
->len
== TCP_FASTOPEN_COOKIE_SIZE
&&
348 foc
->len
== valid_foc
.len
&&
349 !memcmp(foc
->val
, valid_foc
.val
, foc
->len
)) {
350 /* Cookie is valid. Create a (full) child socket to accept
351 * the data in SYN before returning a SYN-ACK to ack the
352 * data. If we fail to create the socket, fall back and
353 * ack the ISN only but includes the same cookie.
355 * Note: Data-less SYN with valid cookie is allowed to send
356 * data in SYN_RECV state.
359 child
= tcp_fastopen_create_child(sk
, skb
, req
);
362 NET_INC_STATS(sock_net(sk
),
363 LINUX_MIB_TCPFASTOPENPASSIVE
);
366 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
367 } else if (foc
->len
> 0) /* Client presents an invalid cookie */
368 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
370 valid_foc
.exp
= foc
->exp
;
375 bool tcp_fastopen_cookie_check(struct sock
*sk
, u16
*mss
,
376 struct tcp_fastopen_cookie
*cookie
)
378 const struct dst_entry
*dst
;
380 tcp_fastopen_cache_get(sk
, mss
, cookie
);
382 /* Firewall blackhole issue check */
383 if (tcp_fastopen_active_should_disable(sk
)) {
388 dst
= __sk_dst_get(sk
);
390 if (tcp_fastopen_no_cookie(sk
, dst
, TFO_CLIENT_NO_COOKIE
)) {
394 return cookie
->len
> 0;
397 /* This function checks if we want to defer sending SYN until the first
398 * write(). We defer under the following conditions:
399 * 1. fastopen_connect sockopt is set
400 * 2. we have a valid cookie
401 * Return value: return true if we want to defer until application writes data
402 * return false if we want to send out SYN immediately
404 bool tcp_fastopen_defer_connect(struct sock
*sk
, int *err
)
406 struct tcp_fastopen_cookie cookie
= { .len
= 0 };
407 struct tcp_sock
*tp
= tcp_sk(sk
);
410 if (tp
->fastopen_connect
&& !tp
->fastopen_req
) {
411 if (tcp_fastopen_cookie_check(sk
, &mss
, &cookie
)) {
412 inet_sk(sk
)->defer_connect
= 1;
416 /* Alloc fastopen_req in order for FO option to be included
419 tp
->fastopen_req
= kzalloc(sizeof(*tp
->fastopen_req
),
421 if (tp
->fastopen_req
)
422 tp
->fastopen_req
->cookie
= cookie
;
428 EXPORT_SYMBOL(tcp_fastopen_defer_connect
);
431 * The following code block is to deal with middle box issues with TFO:
432 * Middlebox firewall issues can potentially cause server's data being
433 * blackholed after a successful 3WHS using TFO.
434 * The proposed solution is to disable active TFO globally under the
435 * following circumstances:
436 * 1. client side TFO socket receives out of order FIN
437 * 2. client side TFO socket receives out of order RST
438 * 3. client side TFO socket has timed out three times consecutively during
440 * We disable active side TFO globally for 1hr at first. Then if it
441 * happens again, we disable it for 2h, then 4h, 8h, ...
442 * And we reset the timeout back to 1hr when we see a successful active
443 * TFO connection with data exchanges.
446 /* Disable active TFO and record current jiffies and
447 * tfo_active_disable_times
449 void tcp_fastopen_active_disable(struct sock
*sk
)
451 struct net
*net
= sock_net(sk
);
453 atomic_inc(&net
->ipv4
.tfo_active_disable_times
);
454 net
->ipv4
.tfo_active_disable_stamp
= jiffies
;
455 NET_INC_STATS(net
, LINUX_MIB_TCPFASTOPENBLACKHOLE
);
458 /* Calculate timeout for tfo active disable
459 * Return true if we are still in the active TFO disable period
460 * Return false if timeout already expired and we should use active TFO
462 bool tcp_fastopen_active_should_disable(struct sock
*sk
)
464 unsigned int tfo_bh_timeout
= sock_net(sk
)->ipv4
.sysctl_tcp_fastopen_blackhole_timeout
;
465 int tfo_da_times
= atomic_read(&sock_net(sk
)->ipv4
.tfo_active_disable_times
);
466 unsigned long timeout
;
472 /* Limit timout to max: 2^6 * initial timeout */
473 multiplier
= 1 << min(tfo_da_times
- 1, 6);
474 timeout
= multiplier
* tfo_bh_timeout
* HZ
;
475 if (time_before(jiffies
, sock_net(sk
)->ipv4
.tfo_active_disable_stamp
+ timeout
))
478 /* Mark check bit so we can check for successful active TFO
479 * condition and reset tfo_active_disable_times
481 tcp_sk(sk
)->syn_fastopen_ch
= 1;
485 /* Disable active TFO if FIN is the only packet in the ofo queue
486 * and no data is received.
487 * Also check if we can reset tfo_active_disable_times if data is
488 * received successfully on a marked active TFO sockets opened on
489 * a non-loopback interface
491 void tcp_fastopen_active_disable_ofo_check(struct sock
*sk
)
493 struct tcp_sock
*tp
= tcp_sk(sk
);
494 struct dst_entry
*dst
;
497 if (!tp
->syn_fastopen
)
500 if (!tp
->data_segs_in
) {
501 skb
= skb_rb_first(&tp
->out_of_order_queue
);
502 if (skb
&& !skb_rb_next(skb
)) {
503 if (TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
) {
504 tcp_fastopen_active_disable(sk
);
508 } else if (tp
->syn_fastopen_ch
&&
509 atomic_read(&sock_net(sk
)->ipv4
.tfo_active_disable_times
)) {
510 dst
= sk_dst_get(sk
);
511 if (!(dst
&& dst
->dev
&& (dst
->dev
->flags
& IFF_LOOPBACK
)))
512 atomic_set(&sock_net(sk
)->ipv4
.tfo_active_disable_times
, 0);
517 void tcp_fastopen_active_detect_blackhole(struct sock
*sk
, bool expired
)
519 u32 timeouts
= inet_csk(sk
)->icsk_retransmits
;
520 struct tcp_sock
*tp
= tcp_sk(sk
);
522 /* Broken middle-boxes may black-hole Fast Open connection during or
523 * even after the handshake. Be extremely conservative and pause
524 * Fast Open globally after hitting the third consecutive timeout or
525 * exceeding the configured timeout limit.
527 if ((tp
->syn_fastopen
|| tp
->syn_data
|| tp
->syn_data_acked
) &&
528 (timeouts
== 2 || (timeouts
< 2 && expired
))) {
529 tcp_fastopen_active_disable(sk
);
530 NET_INC_STATS(sock_net(sk
), LINUX_MIB_TCPFASTOPENACTIVEFAIL
);