1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #include <net/tcp_states.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/transp_v6.h>
22 #include <net/mptcp.h>
23 #include <net/hotdata.h>
25 #include <asm/ioctls.h>
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/mptcp.h>
32 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
34 struct mptcp_sock msk
;
40 MPTCP_CMSG_TS
= BIT(0),
41 MPTCP_CMSG_INQ
= BIT(1),
44 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp
;
46 static void __mptcp_destroy_sock(struct sock
*sk
);
47 static void mptcp_check_send_data_fin(struct sock
*sk
);
49 DEFINE_PER_CPU(struct mptcp_delegated_action
, mptcp_delegated_actions
);
50 static struct net_device mptcp_napi_dev
;
52 /* Returns end sequence number of the receiver's advertised window */
53 static u64
mptcp_wnd_end(const struct mptcp_sock
*msk
)
55 return READ_ONCE(msk
->wnd_end
);
58 static const struct proto_ops
*mptcp_fallback_tcp_ops(const struct sock
*sk
)
60 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
61 if (sk
->sk_prot
== &tcpv6_prot
)
62 return &inet6_stream_ops
;
64 WARN_ON_ONCE(sk
->sk_prot
!= &tcp_prot
);
65 return &inet_stream_ops
;
68 static int __mptcp_socket_create(struct mptcp_sock
*msk
)
70 struct mptcp_subflow_context
*subflow
;
71 struct sock
*sk
= (struct sock
*)msk
;
75 err
= mptcp_subflow_create_socket(sk
, sk
->sk_family
, &ssock
);
79 msk
->scaling_ratio
= tcp_sk(ssock
->sk
)->scaling_ratio
;
80 WRITE_ONCE(msk
->first
, ssock
->sk
);
81 subflow
= mptcp_subflow_ctx(ssock
->sk
);
82 list_add(&subflow
->node
, &msk
->conn_list
);
84 subflow
->request_mptcp
= 1;
85 subflow
->subflow_id
= msk
->subflow_id
++;
87 /* This is the first subflow, always with id 0 */
88 WRITE_ONCE(subflow
->local_id
, 0);
89 mptcp_sock_graft(msk
->first
, sk
->sk_socket
);
90 iput(SOCK_INODE(ssock
));
95 /* If the MPC handshake is not started, returns the first subflow,
96 * eventually allocating it.
98 struct sock
*__mptcp_nmpc_sk(struct mptcp_sock
*msk
)
100 struct sock
*sk
= (struct sock
*)msk
;
103 if (!((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
104 return ERR_PTR(-EINVAL
);
107 ret
= __mptcp_socket_create(msk
);
115 static void mptcp_drop(struct sock
*sk
, struct sk_buff
*skb
)
117 sk_drops_add(sk
, skb
);
121 static void mptcp_rmem_fwd_alloc_add(struct sock
*sk
, int size
)
123 WRITE_ONCE(mptcp_sk(sk
)->rmem_fwd_alloc
,
124 mptcp_sk(sk
)->rmem_fwd_alloc
+ size
);
127 static void mptcp_rmem_charge(struct sock
*sk
, int size
)
129 mptcp_rmem_fwd_alloc_add(sk
, -size
);
132 static bool mptcp_try_coalesce(struct sock
*sk
, struct sk_buff
*to
,
133 struct sk_buff
*from
)
138 if (MPTCP_SKB_CB(from
)->offset
||
139 ((to
->len
+ from
->len
) > (sk
->sk_rcvbuf
>> 3)) ||
140 !skb_try_coalesce(to
, from
, &fragstolen
, &delta
))
143 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
144 MPTCP_SKB_CB(from
)->map_seq
, MPTCP_SKB_CB(to
)->map_seq
,
145 to
->len
, MPTCP_SKB_CB(from
)->end_seq
);
146 MPTCP_SKB_CB(to
)->end_seq
= MPTCP_SKB_CB(from
)->end_seq
;
148 /* note the fwd memory can reach a negative value after accounting
149 * for the delta, but the later skb free will restore a non
152 atomic_add(delta
, &sk
->sk_rmem_alloc
);
153 mptcp_rmem_charge(sk
, delta
);
154 kfree_skb_partial(from
, fragstolen
);
159 static bool mptcp_ooo_try_coalesce(struct mptcp_sock
*msk
, struct sk_buff
*to
,
160 struct sk_buff
*from
)
162 if (MPTCP_SKB_CB(from
)->map_seq
!= MPTCP_SKB_CB(to
)->end_seq
)
165 return mptcp_try_coalesce((struct sock
*)msk
, to
, from
);
168 static void __mptcp_rmem_reclaim(struct sock
*sk
, int amount
)
170 amount
>>= PAGE_SHIFT
;
171 mptcp_rmem_charge(sk
, amount
<< PAGE_SHIFT
);
172 __sk_mem_reduce_allocated(sk
, amount
);
175 static void mptcp_rmem_uncharge(struct sock
*sk
, int size
)
177 struct mptcp_sock
*msk
= mptcp_sk(sk
);
180 mptcp_rmem_fwd_alloc_add(sk
, size
);
181 reclaimable
= msk
->rmem_fwd_alloc
- sk_unused_reserved_mem(sk
);
183 /* see sk_mem_uncharge() for the rationale behind the following schema */
184 if (unlikely(reclaimable
>= PAGE_SIZE
))
185 __mptcp_rmem_reclaim(sk
, reclaimable
);
188 static void mptcp_rfree(struct sk_buff
*skb
)
190 unsigned int len
= skb
->truesize
;
191 struct sock
*sk
= skb
->sk
;
193 atomic_sub(len
, &sk
->sk_rmem_alloc
);
194 mptcp_rmem_uncharge(sk
, len
);
197 void mptcp_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
201 skb
->destructor
= mptcp_rfree
;
202 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
203 mptcp_rmem_charge(sk
, skb
->truesize
);
206 /* "inspired" by tcp_data_queue_ofo(), main differences:
208 * - don't cope with sacks
210 static void mptcp_data_queue_ofo(struct mptcp_sock
*msk
, struct sk_buff
*skb
)
212 struct sock
*sk
= (struct sock
*)msk
;
213 struct rb_node
**p
, *parent
;
214 u64 seq
, end_seq
, max_seq
;
215 struct sk_buff
*skb1
;
217 seq
= MPTCP_SKB_CB(skb
)->map_seq
;
218 end_seq
= MPTCP_SKB_CB(skb
)->end_seq
;
219 max_seq
= atomic64_read(&msk
->rcv_wnd_sent
);
221 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk
, seq
, max_seq
,
222 RB_EMPTY_ROOT(&msk
->out_of_order_queue
));
223 if (after64(end_seq
, max_seq
)) {
226 pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
227 (unsigned long long)end_seq
- (unsigned long)max_seq
,
228 (unsigned long long)atomic64_read(&msk
->rcv_wnd_sent
));
229 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_NODSSWINDOW
);
233 p
= &msk
->out_of_order_queue
.rb_node
;
234 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUE
);
235 if (RB_EMPTY_ROOT(&msk
->out_of_order_queue
)) {
236 rb_link_node(&skb
->rbnode
, NULL
, p
);
237 rb_insert_color(&skb
->rbnode
, &msk
->out_of_order_queue
);
238 msk
->ooo_last_skb
= skb
;
242 /* with 2 subflows, adding at end of ooo queue is quite likely
243 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
245 if (mptcp_ooo_try_coalesce(msk
, msk
->ooo_last_skb
, skb
)) {
246 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOMERGE
);
247 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUETAIL
);
251 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
252 if (!before64(seq
, MPTCP_SKB_CB(msk
->ooo_last_skb
)->end_seq
)) {
253 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUETAIL
);
254 parent
= &msk
->ooo_last_skb
->rbnode
;
255 p
= &parent
->rb_right
;
259 /* Find place to insert this segment. Handle overlaps on the way. */
263 skb1
= rb_to_skb(parent
);
264 if (before64(seq
, MPTCP_SKB_CB(skb1
)->map_seq
)) {
265 p
= &parent
->rb_left
;
268 if (before64(seq
, MPTCP_SKB_CB(skb1
)->end_seq
)) {
269 if (!after64(end_seq
, MPTCP_SKB_CB(skb1
)->end_seq
)) {
270 /* All the bits are present. Drop. */
272 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
275 if (after64(seq
, MPTCP_SKB_CB(skb1
)->map_seq
)) {
279 * continue traversing
282 /* skb's seq == skb1's seq and skb covers skb1.
283 * Replace skb1 with skb.
285 rb_replace_node(&skb1
->rbnode
, &skb
->rbnode
,
286 &msk
->out_of_order_queue
);
287 mptcp_drop(sk
, skb1
);
288 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
291 } else if (mptcp_ooo_try_coalesce(msk
, skb1
, skb
)) {
292 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOMERGE
);
295 p
= &parent
->rb_right
;
299 /* Insert segment into RB tree. */
300 rb_link_node(&skb
->rbnode
, parent
, p
);
301 rb_insert_color(&skb
->rbnode
, &msk
->out_of_order_queue
);
304 /* Remove other segments covered by skb. */
305 while ((skb1
= skb_rb_next(skb
)) != NULL
) {
306 if (before64(end_seq
, MPTCP_SKB_CB(skb1
)->end_seq
))
308 rb_erase(&skb1
->rbnode
, &msk
->out_of_order_queue
);
309 mptcp_drop(sk
, skb1
);
310 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
312 /* If there is no skb after us, we are the last_skb ! */
314 msk
->ooo_last_skb
= skb
;
318 mptcp_set_owner_r(skb
, sk
);
321 static bool mptcp_rmem_schedule(struct sock
*sk
, struct sock
*ssk
, int size
)
323 struct mptcp_sock
*msk
= mptcp_sk(sk
);
326 if (size
<= msk
->rmem_fwd_alloc
)
329 size
-= msk
->rmem_fwd_alloc
;
330 amt
= sk_mem_pages(size
);
331 amount
= amt
<< PAGE_SHIFT
;
332 if (!__sk_mem_raise_allocated(sk
, size
, amt
, SK_MEM_RECV
))
335 mptcp_rmem_fwd_alloc_add(sk
, amount
);
339 static bool __mptcp_move_skb(struct mptcp_sock
*msk
, struct sock
*ssk
,
340 struct sk_buff
*skb
, unsigned int offset
,
343 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
344 struct sock
*sk
= (struct sock
*)msk
;
345 struct sk_buff
*tail
;
348 __skb_unlink(skb
, &ssk
->sk_receive_queue
);
353 /* try to fetch required memory from subflow */
354 if (!mptcp_rmem_schedule(sk
, ssk
, skb
->truesize
)) {
355 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_RCVPRUNED
);
359 has_rxtstamp
= TCP_SKB_CB(skb
)->has_rxtstamp
;
361 /* the skb map_seq accounts for the skb offset:
362 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
365 MPTCP_SKB_CB(skb
)->map_seq
= mptcp_subflow_get_mapped_dsn(subflow
);
366 MPTCP_SKB_CB(skb
)->end_seq
= MPTCP_SKB_CB(skb
)->map_seq
+ copy_len
;
367 MPTCP_SKB_CB(skb
)->offset
= offset
;
368 MPTCP_SKB_CB(skb
)->has_rxtstamp
= has_rxtstamp
;
370 if (MPTCP_SKB_CB(skb
)->map_seq
== msk
->ack_seq
) {
372 msk
->bytes_received
+= copy_len
;
373 WRITE_ONCE(msk
->ack_seq
, msk
->ack_seq
+ copy_len
);
374 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
375 if (tail
&& mptcp_try_coalesce(sk
, tail
, skb
))
378 mptcp_set_owner_r(skb
, sk
);
379 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
381 } else if (after64(MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
)) {
382 mptcp_data_queue_ofo(msk
, skb
);
386 /* old data, keep it simple and drop the whole pkt, sender
387 * will retransmit as needed, if needed.
389 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
395 static void mptcp_stop_rtx_timer(struct sock
*sk
)
397 struct inet_connection_sock
*icsk
= inet_csk(sk
);
399 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
400 mptcp_sk(sk
)->timer_ival
= 0;
403 static void mptcp_close_wake_up(struct sock
*sk
)
405 if (sock_flag(sk
, SOCK_DEAD
))
408 sk
->sk_state_change(sk
);
409 if (sk
->sk_shutdown
== SHUTDOWN_MASK
||
410 sk
->sk_state
== TCP_CLOSE
)
411 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_HUP
);
413 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
416 /* called under the msk socket lock */
417 static bool mptcp_pending_data_fin_ack(struct sock
*sk
)
419 struct mptcp_sock
*msk
= mptcp_sk(sk
);
421 return ((1 << sk
->sk_state
) &
422 (TCPF_FIN_WAIT1
| TCPF_CLOSING
| TCPF_LAST_ACK
)) &&
423 msk
->write_seq
== READ_ONCE(msk
->snd_una
);
426 static void mptcp_check_data_fin_ack(struct sock
*sk
)
428 struct mptcp_sock
*msk
= mptcp_sk(sk
);
430 /* Look for an acknowledged DATA_FIN */
431 if (mptcp_pending_data_fin_ack(sk
)) {
432 WRITE_ONCE(msk
->snd_data_fin_enable
, 0);
434 switch (sk
->sk_state
) {
436 mptcp_set_state(sk
, TCP_FIN_WAIT2
);
440 mptcp_set_state(sk
, TCP_CLOSE
);
444 mptcp_close_wake_up(sk
);
448 /* can be called with no lock acquired */
449 static bool mptcp_pending_data_fin(struct sock
*sk
, u64
*seq
)
451 struct mptcp_sock
*msk
= mptcp_sk(sk
);
453 if (READ_ONCE(msk
->rcv_data_fin
) &&
454 ((1 << inet_sk_state_load(sk
)) &
455 (TCPF_ESTABLISHED
| TCPF_FIN_WAIT1
| TCPF_FIN_WAIT2
))) {
456 u64 rcv_data_fin_seq
= READ_ONCE(msk
->rcv_data_fin_seq
);
458 if (READ_ONCE(msk
->ack_seq
) == rcv_data_fin_seq
) {
460 *seq
= rcv_data_fin_seq
;
469 static void mptcp_set_datafin_timeout(struct sock
*sk
)
471 struct inet_connection_sock
*icsk
= inet_csk(sk
);
474 retransmits
= min_t(u32
, icsk
->icsk_retransmits
,
475 ilog2(TCP_RTO_MAX
/ TCP_RTO_MIN
));
477 mptcp_sk(sk
)->timer_ival
= TCP_RTO_MIN
<< retransmits
;
480 static void __mptcp_set_timeout(struct sock
*sk
, long tout
)
482 mptcp_sk(sk
)->timer_ival
= tout
> 0 ? tout
: TCP_RTO_MIN
;
485 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context
*subflow
)
487 const struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
489 return inet_csk(ssk
)->icsk_pending
&& !subflow
->stale_count
?
490 inet_csk(ssk
)->icsk_timeout
- jiffies
: 0;
493 static void mptcp_set_timeout(struct sock
*sk
)
495 struct mptcp_subflow_context
*subflow
;
498 mptcp_for_each_subflow(mptcp_sk(sk
), subflow
)
499 tout
= max(tout
, mptcp_timeout_from_subflow(subflow
));
500 __mptcp_set_timeout(sk
, tout
);
503 static inline bool tcp_can_send_ack(const struct sock
*ssk
)
505 return !((1 << inet_sk_state_load(ssk
)) &
506 (TCPF_SYN_SENT
| TCPF_SYN_RECV
| TCPF_TIME_WAIT
| TCPF_CLOSE
| TCPF_LISTEN
));
509 void __mptcp_subflow_send_ack(struct sock
*ssk
)
511 if (tcp_can_send_ack(ssk
))
515 static void mptcp_subflow_send_ack(struct sock
*ssk
)
519 slow
= lock_sock_fast(ssk
);
520 __mptcp_subflow_send_ack(ssk
);
521 unlock_sock_fast(ssk
, slow
);
524 static void mptcp_send_ack(struct mptcp_sock
*msk
)
526 struct mptcp_subflow_context
*subflow
;
528 mptcp_for_each_subflow(msk
, subflow
)
529 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow
));
532 static void mptcp_subflow_cleanup_rbuf(struct sock
*ssk
, int copied
)
536 slow
= lock_sock_fast(ssk
);
537 if (tcp_can_send_ack(ssk
))
538 tcp_cleanup_rbuf(ssk
, copied
);
539 unlock_sock_fast(ssk
, slow
);
542 static bool mptcp_subflow_could_cleanup(const struct sock
*ssk
, bool rx_empty
)
544 const struct inet_connection_sock
*icsk
= inet_csk(ssk
);
545 u8 ack_pending
= READ_ONCE(icsk
->icsk_ack
.pending
);
546 const struct tcp_sock
*tp
= tcp_sk(ssk
);
548 return (ack_pending
& ICSK_ACK_SCHED
) &&
549 ((READ_ONCE(tp
->rcv_nxt
) - READ_ONCE(tp
->rcv_wup
) >
550 READ_ONCE(icsk
->icsk_ack
.rcv_mss
)) ||
551 (rx_empty
&& ack_pending
&
552 (ICSK_ACK_PUSHED2
| ICSK_ACK_PUSHED
)));
555 static void mptcp_cleanup_rbuf(struct mptcp_sock
*msk
, int copied
)
557 int old_space
= READ_ONCE(msk
->old_wspace
);
558 struct mptcp_subflow_context
*subflow
;
559 struct sock
*sk
= (struct sock
*)msk
;
560 int space
= __mptcp_space(sk
);
561 bool cleanup
, rx_empty
;
563 cleanup
= (space
> 0) && (space
>= (old_space
<< 1)) && copied
;
564 rx_empty
= !__mptcp_rmem(sk
) && copied
;
566 mptcp_for_each_subflow(msk
, subflow
) {
567 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
569 if (cleanup
|| mptcp_subflow_could_cleanup(ssk
, rx_empty
))
570 mptcp_subflow_cleanup_rbuf(ssk
, copied
);
574 static bool mptcp_check_data_fin(struct sock
*sk
)
576 struct mptcp_sock
*msk
= mptcp_sk(sk
);
577 u64 rcv_data_fin_seq
;
580 /* Need to ack a DATA_FIN received from a peer while this side
581 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
582 * msk->rcv_data_fin was set when parsing the incoming options
583 * at the subflow level and the msk lock was not held, so this
584 * is the first opportunity to act on the DATA_FIN and change
587 * If we are caught up to the sequence number of the incoming
588 * DATA_FIN, send the DATA_ACK now and do state transition. If
589 * not caught up, do nothing and let the recv code send DATA_ACK
593 if (mptcp_pending_data_fin(sk
, &rcv_data_fin_seq
)) {
594 WRITE_ONCE(msk
->ack_seq
, msk
->ack_seq
+ 1);
595 WRITE_ONCE(msk
->rcv_data_fin
, 0);
597 WRITE_ONCE(sk
->sk_shutdown
, sk
->sk_shutdown
| RCV_SHUTDOWN
);
598 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
600 switch (sk
->sk_state
) {
601 case TCP_ESTABLISHED
:
602 mptcp_set_state(sk
, TCP_CLOSE_WAIT
);
605 mptcp_set_state(sk
, TCP_CLOSING
);
608 mptcp_set_state(sk
, TCP_CLOSE
);
611 /* Other states not expected */
617 if (!__mptcp_check_fallback(msk
))
619 mptcp_close_wake_up(sk
);
624 static void mptcp_dss_corruption(struct mptcp_sock
*msk
, struct sock
*ssk
)
626 if (READ_ONCE(msk
->allow_infinite_fallback
)) {
627 MPTCP_INC_STATS(sock_net(ssk
),
628 MPTCP_MIB_DSSCORRUPTIONFALLBACK
);
629 mptcp_do_fallback(ssk
);
631 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_DSSCORRUPTIONRESET
);
632 mptcp_subflow_reset(ssk
);
636 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock
*msk
,
640 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
641 struct sock
*sk
= (struct sock
*)msk
;
642 unsigned int moved
= 0;
643 bool more_data_avail
;
648 sk_rbuf
= READ_ONCE(sk
->sk_rcvbuf
);
650 if (!(sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)) {
651 int ssk_rbuf
= READ_ONCE(ssk
->sk_rcvbuf
);
653 if (unlikely(ssk_rbuf
> sk_rbuf
)) {
654 WRITE_ONCE(sk
->sk_rcvbuf
, ssk_rbuf
);
659 pr_debug("msk=%p ssk=%p\n", msk
, ssk
);
662 u32 map_remaining
, offset
;
663 u32 seq
= tp
->copied_seq
;
667 /* try to move as much data as available */
668 map_remaining
= subflow
->map_data_len
-
669 mptcp_subflow_get_map_offset(subflow
);
671 skb
= skb_peek(&ssk
->sk_receive_queue
);
673 /* With racing move_skbs_to_msk() and __mptcp_move_skbs(),
674 * a different CPU can have already processed the pending
675 * data, stop here or we can enter an infinite loop
682 if (__mptcp_check_fallback(msk
)) {
683 /* Under fallback skbs have no MPTCP extension and TCP could
684 * collapse them between the dummy map creation and the
685 * current dequeue. Be sure to adjust the map size.
687 map_remaining
= skb
->len
;
688 subflow
->map_data_len
= skb
->len
;
691 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
692 fin
= TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
;
698 if (offset
< skb
->len
) {
699 size_t len
= skb
->len
- offset
;
704 if (__mptcp_move_skb(msk
, ssk
, skb
, offset
, len
))
708 if (unlikely(map_remaining
< len
)) {
709 DEBUG_NET_WARN_ON_ONCE(1);
710 mptcp_dss_corruption(msk
, ssk
);
713 if (unlikely(!fin
)) {
714 DEBUG_NET_WARN_ON_ONCE(1);
715 mptcp_dss_corruption(msk
, ssk
);
718 sk_eat_skb(ssk
, skb
);
722 WRITE_ONCE(tp
->copied_seq
, seq
);
723 more_data_avail
= mptcp_subflow_data_available(ssk
);
725 if (atomic_read(&sk
->sk_rmem_alloc
) > sk_rbuf
) {
729 } while (more_data_avail
);
732 msk
->last_data_recv
= tcp_jiffies32
;
737 static bool __mptcp_ofo_queue(struct mptcp_sock
*msk
)
739 struct sock
*sk
= (struct sock
*)msk
;
740 struct sk_buff
*skb
, *tail
;
745 p
= rb_first(&msk
->out_of_order_queue
);
746 pr_debug("msk=%p empty=%d\n", msk
, RB_EMPTY_ROOT(&msk
->out_of_order_queue
));
749 if (after64(MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
))
753 rb_erase(&skb
->rbnode
, &msk
->out_of_order_queue
);
755 if (unlikely(!after64(MPTCP_SKB_CB(skb
)->end_seq
,
758 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
762 end_seq
= MPTCP_SKB_CB(skb
)->end_seq
;
763 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
764 if (!tail
|| !mptcp_ooo_try_coalesce(msk
, tail
, skb
)) {
765 int delta
= msk
->ack_seq
- MPTCP_SKB_CB(skb
)->map_seq
;
767 /* skip overlapping data, if any */
768 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
769 MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
,
771 MPTCP_SKB_CB(skb
)->offset
+= delta
;
772 MPTCP_SKB_CB(skb
)->map_seq
+= delta
;
773 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
775 msk
->bytes_received
+= end_seq
- msk
->ack_seq
;
776 WRITE_ONCE(msk
->ack_seq
, end_seq
);
782 static bool __mptcp_subflow_error_report(struct sock
*sk
, struct sock
*ssk
)
784 int err
= sock_error(ssk
);
790 /* only propagate errors on fallen-back sockets or
793 if (sk
->sk_state
!= TCP_SYN_SENT
&& !__mptcp_check_fallback(mptcp_sk(sk
)))
796 /* We need to propagate only transition to CLOSE state.
797 * Orphaned socket will see such state change via
798 * subflow_sched_work_if_closed() and that path will properly
799 * destroy the msk as needed.
801 ssk_state
= inet_sk_state_load(ssk
);
802 if (ssk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DEAD
))
803 mptcp_set_state(sk
, ssk_state
);
804 WRITE_ONCE(sk
->sk_err
, -err
);
806 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
812 void __mptcp_error_report(struct sock
*sk
)
814 struct mptcp_subflow_context
*subflow
;
815 struct mptcp_sock
*msk
= mptcp_sk(sk
);
817 mptcp_for_each_subflow(msk
, subflow
)
818 if (__mptcp_subflow_error_report(sk
, mptcp_subflow_tcp_sock(subflow
)))
822 /* In most cases we will be able to lock the mptcp socket. If its already
823 * owned, we need to defer to the work queue to avoid ABBA deadlock.
825 static bool move_skbs_to_msk(struct mptcp_sock
*msk
, struct sock
*ssk
)
827 struct sock
*sk
= (struct sock
*)msk
;
828 unsigned int moved
= 0;
830 __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
831 __mptcp_ofo_queue(msk
);
832 if (unlikely(ssk
->sk_err
)) {
833 if (!sock_owned_by_user(sk
))
834 __mptcp_error_report(sk
);
836 __set_bit(MPTCP_ERROR_REPORT
, &msk
->cb_flags
);
839 /* If the moves have caught up with the DATA_FIN sequence number
840 * it's time to ack the DATA_FIN and change socket state, but
841 * this is not a good place to change state. Let the workqueue
844 if (mptcp_pending_data_fin(sk
, NULL
))
845 mptcp_schedule_work(sk
);
849 void mptcp_data_ready(struct sock
*sk
, struct sock
*ssk
)
851 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
852 struct mptcp_sock
*msk
= mptcp_sk(sk
);
853 int sk_rbuf
, ssk_rbuf
;
855 /* The peer can send data while we are shutting down this
856 * subflow at msk destruction time, but we must avoid enqueuing
857 * more data to the msk receive queue
859 if (unlikely(subflow
->disposable
))
862 ssk_rbuf
= READ_ONCE(ssk
->sk_rcvbuf
);
863 sk_rbuf
= READ_ONCE(sk
->sk_rcvbuf
);
864 if (unlikely(ssk_rbuf
> sk_rbuf
))
867 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
868 if (__mptcp_rmem(sk
) > sk_rbuf
)
871 /* Wake-up the reader only for in-sequence data */
873 if (move_skbs_to_msk(msk
, ssk
) && mptcp_epollin_ready(sk
))
874 sk
->sk_data_ready(sk
);
875 mptcp_data_unlock(sk
);
878 static void mptcp_subflow_joined(struct mptcp_sock
*msk
, struct sock
*ssk
)
880 mptcp_subflow_ctx(ssk
)->map_seq
= READ_ONCE(msk
->ack_seq
);
881 WRITE_ONCE(msk
->allow_infinite_fallback
, false);
882 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED
, msk
, ssk
, GFP_ATOMIC
);
885 static bool __mptcp_finish_join(struct mptcp_sock
*msk
, struct sock
*ssk
)
887 struct sock
*sk
= (struct sock
*)msk
;
889 if (sk
->sk_state
!= TCP_ESTABLISHED
)
892 /* attach to msk socket only after we are sure we will deal with it
895 if (sk
->sk_socket
&& !ssk
->sk_socket
)
896 mptcp_sock_graft(ssk
, sk
->sk_socket
);
898 mptcp_subflow_ctx(ssk
)->subflow_id
= msk
->subflow_id
++;
899 mptcp_sockopt_sync_locked(msk
, ssk
);
900 mptcp_subflow_joined(msk
, ssk
);
901 mptcp_stop_tout_timer(sk
);
902 __mptcp_propagate_sndbuf(sk
, ssk
);
906 static void __mptcp_flush_join_list(struct sock
*sk
, struct list_head
*join_list
)
908 struct mptcp_subflow_context
*tmp
, *subflow
;
909 struct mptcp_sock
*msk
= mptcp_sk(sk
);
911 list_for_each_entry_safe(subflow
, tmp
, join_list
, node
) {
912 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
913 bool slow
= lock_sock_fast(ssk
);
915 list_move_tail(&subflow
->node
, &msk
->conn_list
);
916 if (!__mptcp_finish_join(msk
, ssk
))
917 mptcp_subflow_reset(ssk
);
918 unlock_sock_fast(ssk
, slow
);
922 static bool mptcp_rtx_timer_pending(struct sock
*sk
)
924 return timer_pending(&inet_csk(sk
)->icsk_retransmit_timer
);
927 static void mptcp_reset_rtx_timer(struct sock
*sk
)
929 struct inet_connection_sock
*icsk
= inet_csk(sk
);
932 /* prevent rescheduling on close */
933 if (unlikely(inet_sk_state_load(sk
) == TCP_CLOSE
))
936 tout
= mptcp_sk(sk
)->timer_ival
;
937 sk_reset_timer(sk
, &icsk
->icsk_retransmit_timer
, jiffies
+ tout
);
940 bool mptcp_schedule_work(struct sock
*sk
)
942 if (inet_sk_state_load(sk
) != TCP_CLOSE
&&
943 schedule_work(&mptcp_sk(sk
)->work
)) {
944 /* each subflow already holds a reference to the sk, and the
945 * workqueue is invoked by a subflow, so sk can't go away here.
953 static struct sock
*mptcp_subflow_recv_lookup(const struct mptcp_sock
*msk
)
955 struct mptcp_subflow_context
*subflow
;
957 msk_owned_by_me(msk
);
959 mptcp_for_each_subflow(msk
, subflow
) {
960 if (READ_ONCE(subflow
->data_avail
))
961 return mptcp_subflow_tcp_sock(subflow
);
967 static bool mptcp_skb_can_collapse_to(u64 write_seq
,
968 const struct sk_buff
*skb
,
969 const struct mptcp_ext
*mpext
)
971 if (!tcp_skb_can_collapse_to(skb
))
974 /* can collapse only if MPTCP level sequence is in order and this
975 * mapping has not been xmitted yet
977 return mpext
&& mpext
->data_seq
+ mpext
->data_len
== write_seq
&&
981 /* we can append data to the given data frag if:
982 * - there is space available in the backing page_frag
983 * - the data frag tail matches the current page_frag free offset
984 * - the data frag end sequence number matches the current write seq
986 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock
*msk
,
987 const struct page_frag
*pfrag
,
988 const struct mptcp_data_frag
*df
)
990 return df
&& pfrag
->page
== df
->page
&&
991 pfrag
->size
- pfrag
->offset
> 0 &&
992 pfrag
->offset
== (df
->offset
+ df
->data_len
) &&
993 df
->data_seq
+ df
->data_len
== msk
->write_seq
;
996 static void dfrag_uncharge(struct sock
*sk
, int len
)
998 sk_mem_uncharge(sk
, len
);
999 sk_wmem_queued_add(sk
, -len
);
1002 static void dfrag_clear(struct sock
*sk
, struct mptcp_data_frag
*dfrag
)
1004 int len
= dfrag
->data_len
+ dfrag
->overhead
;
1006 list_del(&dfrag
->list
);
1007 dfrag_uncharge(sk
, len
);
1008 put_page(dfrag
->page
);
1011 /* called under both the msk socket lock and the data lock */
1012 static void __mptcp_clean_una(struct sock
*sk
)
1014 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1015 struct mptcp_data_frag
*dtmp
, *dfrag
;
1018 snd_una
= msk
->snd_una
;
1019 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
) {
1020 if (after64(dfrag
->data_seq
+ dfrag
->data_len
, snd_una
))
1023 if (unlikely(dfrag
== msk
->first_pending
)) {
1024 /* in recovery mode can see ack after the current snd head */
1025 if (WARN_ON_ONCE(!msk
->recovery
))
1028 WRITE_ONCE(msk
->first_pending
, mptcp_send_next(sk
));
1031 dfrag_clear(sk
, dfrag
);
1034 dfrag
= mptcp_rtx_head(sk
);
1035 if (dfrag
&& after64(snd_una
, dfrag
->data_seq
)) {
1036 u64 delta
= snd_una
- dfrag
->data_seq
;
1038 /* prevent wrap around in recovery mode */
1039 if (unlikely(delta
> dfrag
->already_sent
)) {
1040 if (WARN_ON_ONCE(!msk
->recovery
))
1042 if (WARN_ON_ONCE(delta
> dfrag
->data_len
))
1044 dfrag
->already_sent
+= delta
- dfrag
->already_sent
;
1047 dfrag
->data_seq
+= delta
;
1048 dfrag
->offset
+= delta
;
1049 dfrag
->data_len
-= delta
;
1050 dfrag
->already_sent
-= delta
;
1052 dfrag_uncharge(sk
, delta
);
1055 /* all retransmitted data acked, recovery completed */
1056 if (unlikely(msk
->recovery
) && after64(msk
->snd_una
, msk
->recovery_snd_nxt
))
1057 msk
->recovery
= false;
1060 if (snd_una
== msk
->snd_nxt
&& snd_una
== msk
->write_seq
) {
1061 if (mptcp_rtx_timer_pending(sk
) && !mptcp_data_fin_enabled(msk
))
1062 mptcp_stop_rtx_timer(sk
);
1064 mptcp_reset_rtx_timer(sk
);
1067 if (mptcp_pending_data_fin_ack(sk
))
1068 mptcp_schedule_work(sk
);
1071 static void __mptcp_clean_una_wakeup(struct sock
*sk
)
1073 lockdep_assert_held_once(&sk
->sk_lock
.slock
);
1075 __mptcp_clean_una(sk
);
1076 mptcp_write_space(sk
);
1079 static void mptcp_clean_una_wakeup(struct sock
*sk
)
1081 mptcp_data_lock(sk
);
1082 __mptcp_clean_una_wakeup(sk
);
1083 mptcp_data_unlock(sk
);
1086 static void mptcp_enter_memory_pressure(struct sock
*sk
)
1088 struct mptcp_subflow_context
*subflow
;
1089 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1092 mptcp_for_each_subflow(msk
, subflow
) {
1093 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
1096 tcp_enter_memory_pressure(ssk
);
1097 sk_stream_moderate_sndbuf(ssk
);
1101 __mptcp_sync_sndbuf(sk
);
1104 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1107 static bool mptcp_page_frag_refill(struct sock
*sk
, struct page_frag
*pfrag
)
1109 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag
),
1110 pfrag
, sk
->sk_allocation
)))
1113 mptcp_enter_memory_pressure(sk
);
1117 static struct mptcp_data_frag
*
1118 mptcp_carve_data_frag(const struct mptcp_sock
*msk
, struct page_frag
*pfrag
,
1121 int offset
= ALIGN(orig_offset
, sizeof(long));
1122 struct mptcp_data_frag
*dfrag
;
1124 dfrag
= (struct mptcp_data_frag
*)(page_to_virt(pfrag
->page
) + offset
);
1125 dfrag
->data_len
= 0;
1126 dfrag
->data_seq
= msk
->write_seq
;
1127 dfrag
->overhead
= offset
- orig_offset
+ sizeof(struct mptcp_data_frag
);
1128 dfrag
->offset
= offset
+ sizeof(struct mptcp_data_frag
);
1129 dfrag
->already_sent
= 0;
1130 dfrag
->page
= pfrag
->page
;
1135 struct mptcp_sendmsg_info
{
1141 bool data_lock_held
;
1144 static int mptcp_check_allowed_size(const struct mptcp_sock
*msk
, struct sock
*ssk
,
1145 u64 data_seq
, int avail_size
)
1147 u64 window_end
= mptcp_wnd_end(msk
);
1150 if (__mptcp_check_fallback(msk
))
1153 mptcp_snd_wnd
= window_end
- data_seq
;
1154 avail_size
= min_t(unsigned int, mptcp_snd_wnd
, avail_size
);
1156 if (unlikely(tcp_sk(ssk
)->snd_wnd
< mptcp_snd_wnd
)) {
1157 tcp_sk(ssk
)->snd_wnd
= min_t(u64
, U32_MAX
, mptcp_snd_wnd
);
1158 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_SNDWNDSHARED
);
1164 static bool __mptcp_add_ext(struct sk_buff
*skb
, gfp_t gfp
)
1166 struct skb_ext
*mpext
= __skb_ext_alloc(gfp
);
1170 __skb_ext_set(skb
, SKB_EXT_MPTCP
, mpext
);
1174 static struct sk_buff
*__mptcp_do_alloc_tx_skb(struct sock
*sk
, gfp_t gfp
)
1176 struct sk_buff
*skb
;
1178 skb
= alloc_skb_fclone(MAX_TCP_HEADER
, gfp
);
1180 if (likely(__mptcp_add_ext(skb
, gfp
))) {
1181 skb_reserve(skb
, MAX_TCP_HEADER
);
1182 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1183 INIT_LIST_HEAD(&skb
->tcp_tsorted_anchor
);
1188 mptcp_enter_memory_pressure(sk
);
1193 static struct sk_buff
*__mptcp_alloc_tx_skb(struct sock
*sk
, struct sock
*ssk
, gfp_t gfp
)
1195 struct sk_buff
*skb
;
1197 skb
= __mptcp_do_alloc_tx_skb(sk
, gfp
);
1201 if (likely(sk_wmem_schedule(ssk
, skb
->truesize
))) {
1202 tcp_skb_entail(ssk
, skb
);
1205 tcp_skb_tsorted_anchor_cleanup(skb
);
1210 static struct sk_buff
*mptcp_alloc_tx_skb(struct sock
*sk
, struct sock
*ssk
, bool data_lock_held
)
1212 gfp_t gfp
= data_lock_held
? GFP_ATOMIC
: sk
->sk_allocation
;
1214 return __mptcp_alloc_tx_skb(sk
, ssk
, gfp
);
1217 /* note: this always recompute the csum on the whole skb, even
1218 * if we just appended a single frag. More status info needed
1220 static void mptcp_update_data_checksum(struct sk_buff
*skb
, int added
)
1222 struct mptcp_ext
*mpext
= mptcp_get_ext(skb
);
1223 __wsum csum
= ~csum_unfold(mpext
->csum
);
1224 int offset
= skb
->len
- added
;
1226 mpext
->csum
= csum_fold(csum_block_add(csum
, skb_checksum(skb
, offset
, added
, 0), offset
));
1229 static void mptcp_update_infinite_map(struct mptcp_sock
*msk
,
1231 struct mptcp_ext
*mpext
)
1236 mpext
->infinite_map
= 1;
1237 mpext
->data_len
= 0;
1239 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_INFINITEMAPTX
);
1240 mptcp_subflow_ctx(ssk
)->send_infinite_map
= 0;
1242 mptcp_do_fallback(ssk
);
1245 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
1247 static int mptcp_sendmsg_frag(struct sock
*sk
, struct sock
*ssk
,
1248 struct mptcp_data_frag
*dfrag
,
1249 struct mptcp_sendmsg_info
*info
)
1251 u64 data_seq
= dfrag
->data_seq
+ info
->sent
;
1252 int offset
= dfrag
->offset
+ info
->sent
;
1253 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1254 bool zero_window_probe
= false;
1255 struct mptcp_ext
*mpext
= NULL
;
1256 bool can_coalesce
= false;
1257 bool reuse_skb
= true;
1258 struct sk_buff
*skb
;
1262 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
1263 msk
, ssk
, dfrag
->data_seq
, dfrag
->data_len
, info
->sent
);
1265 if (WARN_ON_ONCE(info
->sent
> info
->limit
||
1266 info
->limit
> dfrag
->data_len
))
1269 if (unlikely(!__tcp_can_send(ssk
)))
1272 /* compute send limit */
1273 if (unlikely(ssk
->sk_gso_max_size
> MPTCP_MAX_GSO_SIZE
))
1274 ssk
->sk_gso_max_size
= MPTCP_MAX_GSO_SIZE
;
1275 info
->mss_now
= tcp_send_mss(ssk
, &info
->size_goal
, info
->flags
);
1276 copy
= info
->size_goal
;
1278 skb
= tcp_write_queue_tail(ssk
);
1279 if (skb
&& copy
> skb
->len
) {
1280 /* Limit the write to the size available in the
1281 * current skb, if any, so that we create at most a new skb.
1282 * Explicitly tells TCP internals to avoid collapsing on later
1283 * queue management operation, to avoid breaking the ext <->
1284 * SSN association set here
1286 mpext
= mptcp_get_ext(skb
);
1287 if (!mptcp_skb_can_collapse_to(data_seq
, skb
, mpext
)) {
1288 TCP_SKB_CB(skb
)->eor
= 1;
1289 tcp_mark_push(tcp_sk(ssk
), skb
);
1293 i
= skb_shinfo(skb
)->nr_frags
;
1294 can_coalesce
= skb_can_coalesce(skb
, i
, dfrag
->page
, offset
);
1295 if (!can_coalesce
&& i
>= READ_ONCE(net_hotdata
.sysctl_max_skb_frags
)) {
1296 tcp_mark_push(tcp_sk(ssk
), skb
);
1303 skb
= mptcp_alloc_tx_skb(sk
, ssk
, info
->data_lock_held
);
1307 i
= skb_shinfo(skb
)->nr_frags
;
1309 mpext
= mptcp_get_ext(skb
);
1312 /* Zero window and all data acked? Probe. */
1313 copy
= mptcp_check_allowed_size(msk
, ssk
, data_seq
, copy
);
1315 u64 snd_una
= READ_ONCE(msk
->snd_una
);
1317 if (snd_una
!= msk
->snd_nxt
|| tcp_write_queue_tail(ssk
)) {
1318 tcp_remove_empty_skb(ssk
);
1322 zero_window_probe
= true;
1323 data_seq
= snd_una
- 1;
1327 copy
= min_t(size_t, copy
, info
->limit
- info
->sent
);
1328 if (!sk_wmem_schedule(ssk
, copy
)) {
1329 tcp_remove_empty_skb(ssk
);
1334 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1336 get_page(dfrag
->page
);
1337 skb_fill_page_desc(skb
, i
, dfrag
->page
, offset
, copy
);
1341 skb
->data_len
+= copy
;
1342 skb
->truesize
+= copy
;
1343 sk_wmem_queued_add(ssk
, copy
);
1344 sk_mem_charge(ssk
, copy
);
1345 WRITE_ONCE(tcp_sk(ssk
)->write_seq
, tcp_sk(ssk
)->write_seq
+ copy
);
1346 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1347 tcp_skb_pcount_set(skb
, 0);
1349 /* on skb reuse we just need to update the DSS len */
1351 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1352 mpext
->data_len
+= copy
;
1356 memset(mpext
, 0, sizeof(*mpext
));
1357 mpext
->data_seq
= data_seq
;
1358 mpext
->subflow_seq
= mptcp_subflow_ctx(ssk
)->rel_write_seq
;
1359 mpext
->data_len
= copy
;
1363 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
1364 mpext
->data_seq
, mpext
->subflow_seq
, mpext
->data_len
,
1367 if (zero_window_probe
) {
1368 mptcp_subflow_ctx(ssk
)->rel_write_seq
+= copy
;
1370 if (READ_ONCE(msk
->csum_enabled
))
1371 mptcp_update_data_checksum(skb
, copy
);
1372 tcp_push_pending_frames(ssk
);
1376 if (READ_ONCE(msk
->csum_enabled
))
1377 mptcp_update_data_checksum(skb
, copy
);
1378 if (mptcp_subflow_ctx(ssk
)->send_infinite_map
)
1379 mptcp_update_infinite_map(msk
, ssk
, mpext
);
1380 trace_mptcp_sendmsg_frag(mpext
);
1381 mptcp_subflow_ctx(ssk
)->rel_write_seq
+= copy
;
1385 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
1386 sizeof(struct tcphdr) - \
1387 MAX_TCP_OPTION_SPACE - \
1388 sizeof(struct ipv6hdr) - \
1389 sizeof(struct frag_hdr))
1391 struct subflow_send_info
{
1396 void mptcp_subflow_set_active(struct mptcp_subflow_context
*subflow
)
1398 if (!subflow
->stale
)
1402 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow
)), MPTCP_MIB_SUBFLOWRECOVER
);
1405 bool mptcp_subflow_active(struct mptcp_subflow_context
*subflow
)
1407 if (unlikely(subflow
->stale
)) {
1408 u32 rcv_tstamp
= READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow
))->rcv_tstamp
);
1410 if (subflow
->stale_rcv_tstamp
== rcv_tstamp
)
1413 mptcp_subflow_set_active(subflow
);
1415 return __mptcp_subflow_active(subflow
);
1418 #define SSK_MODE_ACTIVE 0
1419 #define SSK_MODE_BACKUP 1
1420 #define SSK_MODE_MAX 2
1422 /* implement the mptcp packet scheduler;
1423 * returns the subflow that will transmit the next DSS
1424 * additionally updates the rtx timeout
1426 struct sock
*mptcp_subflow_get_send(struct mptcp_sock
*msk
)
1428 struct subflow_send_info send_info
[SSK_MODE_MAX
];
1429 struct mptcp_subflow_context
*subflow
;
1430 struct sock
*sk
= (struct sock
*)msk
;
1431 u32 pace
, burst
, wmem
;
1432 int i
, nr_active
= 0;
1437 /* pick the subflow with the lower wmem/wspace ratio */
1438 for (i
= 0; i
< SSK_MODE_MAX
; ++i
) {
1439 send_info
[i
].ssk
= NULL
;
1440 send_info
[i
].linger_time
= -1;
1443 mptcp_for_each_subflow(msk
, subflow
) {
1444 bool backup
= subflow
->backup
|| subflow
->request_bkup
;
1446 trace_mptcp_subflow_get_send(subflow
);
1447 ssk
= mptcp_subflow_tcp_sock(subflow
);
1448 if (!mptcp_subflow_active(subflow
))
1451 tout
= max(tout
, mptcp_timeout_from_subflow(subflow
));
1452 nr_active
+= !backup
;
1453 pace
= subflow
->avg_pacing_rate
;
1454 if (unlikely(!pace
)) {
1455 /* init pacing rate from socket */
1456 subflow
->avg_pacing_rate
= READ_ONCE(ssk
->sk_pacing_rate
);
1457 pace
= subflow
->avg_pacing_rate
;
1462 linger_time
= div_u64((u64
)READ_ONCE(ssk
->sk_wmem_queued
) << 32, pace
);
1463 if (linger_time
< send_info
[backup
].linger_time
) {
1464 send_info
[backup
].ssk
= ssk
;
1465 send_info
[backup
].linger_time
= linger_time
;
1468 __mptcp_set_timeout(sk
, tout
);
1470 /* pick the best backup if no other subflow is active */
1472 send_info
[SSK_MODE_ACTIVE
].ssk
= send_info
[SSK_MODE_BACKUP
].ssk
;
1474 /* According to the blest algorithm, to avoid HoL blocking for the
1475 * faster flow, we need to:
1476 * - estimate the faster flow linger time
1477 * - use the above to estimate the amount of byte transferred
1478 * by the faster flow
1479 * - check that the amount of queued data is greter than the above,
1480 * otherwise do not use the picked, slower, subflow
1481 * We select the subflow with the shorter estimated time to flush
1482 * the queued mem, which basically ensure the above. We just need
1483 * to check that subflow has a non empty cwin.
1485 ssk
= send_info
[SSK_MODE_ACTIVE
].ssk
;
1486 if (!ssk
|| !sk_stream_memory_free(ssk
))
1489 burst
= min_t(int, MPTCP_SEND_BURST_SIZE
, mptcp_wnd_end(msk
) - msk
->snd_nxt
);
1490 wmem
= READ_ONCE(ssk
->sk_wmem_queued
);
1494 subflow
= mptcp_subflow_ctx(ssk
);
1495 subflow
->avg_pacing_rate
= div_u64((u64
)subflow
->avg_pacing_rate
* wmem
+
1496 READ_ONCE(ssk
->sk_pacing_rate
) * burst
,
1498 msk
->snd_burst
= burst
;
1502 static void mptcp_push_release(struct sock
*ssk
, struct mptcp_sendmsg_info
*info
)
1504 tcp_push(ssk
, 0, info
->mss_now
, tcp_sk(ssk
)->nonagle
, info
->size_goal
);
1508 static void mptcp_update_post_push(struct mptcp_sock
*msk
,
1509 struct mptcp_data_frag
*dfrag
,
1512 u64 snd_nxt_new
= dfrag
->data_seq
;
1514 dfrag
->already_sent
+= sent
;
1516 msk
->snd_burst
-= sent
;
1518 snd_nxt_new
+= dfrag
->already_sent
;
1520 /* snd_nxt_new can be smaller than snd_nxt in case mptcp
1521 * is recovering after a failover. In that event, this re-sends
1524 * Thus compute snd_nxt_new candidate based on
1525 * the dfrag->data_seq that was sent and the data
1526 * that has been handed to the subflow for transmission
1527 * and skip update in case it was old dfrag.
1529 if (likely(after64(snd_nxt_new
, msk
->snd_nxt
))) {
1530 msk
->bytes_sent
+= snd_nxt_new
- msk
->snd_nxt
;
1531 WRITE_ONCE(msk
->snd_nxt
, snd_nxt_new
);
1535 void mptcp_check_and_set_pending(struct sock
*sk
)
1537 if (mptcp_send_head(sk
)) {
1538 mptcp_data_lock(sk
);
1539 mptcp_sk(sk
)->cb_flags
|= BIT(MPTCP_PUSH_PENDING
);
1540 mptcp_data_unlock(sk
);
1544 static int __subflow_push_pending(struct sock
*sk
, struct sock
*ssk
,
1545 struct mptcp_sendmsg_info
*info
)
1547 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1548 struct mptcp_data_frag
*dfrag
;
1549 int len
, copied
= 0, err
= 0;
1551 while ((dfrag
= mptcp_send_head(sk
))) {
1552 info
->sent
= dfrag
->already_sent
;
1553 info
->limit
= dfrag
->data_len
;
1554 len
= dfrag
->data_len
- dfrag
->already_sent
;
1558 ret
= mptcp_sendmsg_frag(sk
, ssk
, dfrag
, info
);
1560 err
= copied
? : ret
;
1568 mptcp_update_post_push(msk
, dfrag
, ret
);
1570 WRITE_ONCE(msk
->first_pending
, mptcp_send_next(sk
));
1572 if (msk
->snd_burst
<= 0 ||
1573 !sk_stream_memory_free(ssk
) ||
1574 !mptcp_subflow_active(mptcp_subflow_ctx(ssk
))) {
1578 mptcp_set_timeout(sk
);
1584 msk
->last_data_sent
= tcp_jiffies32
;
1588 void __mptcp_push_pending(struct sock
*sk
, unsigned int flags
)
1590 struct sock
*prev_ssk
= NULL
, *ssk
= NULL
;
1591 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1592 struct mptcp_sendmsg_info info
= {
1595 bool do_check_data_fin
= false;
1598 while (mptcp_send_head(sk
) && (push_count
> 0)) {
1599 struct mptcp_subflow_context
*subflow
;
1602 if (mptcp_sched_get_send(msk
))
1607 mptcp_for_each_subflow(msk
, subflow
) {
1608 if (READ_ONCE(subflow
->scheduled
)) {
1609 mptcp_subflow_set_scheduled(subflow
, false);
1612 ssk
= mptcp_subflow_tcp_sock(subflow
);
1613 if (ssk
!= prev_ssk
) {
1614 /* First check. If the ssk has changed since
1615 * the last round, release prev_ssk
1618 mptcp_push_release(prev_ssk
, &info
);
1620 /* Need to lock the new subflow only if different
1621 * from the previous one, otherwise we are still
1622 * helding the relevant lock
1629 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1631 if (ret
!= -EAGAIN
||
1632 (1 << ssk
->sk_state
) &
1633 (TCPF_FIN_WAIT1
| TCPF_FIN_WAIT2
| TCPF_CLOSE
))
1637 do_check_data_fin
= true;
1642 /* at this point we held the socket lock for the last subflow we used */
1644 mptcp_push_release(ssk
, &info
);
1646 /* ensure the rtx timer is running */
1647 if (!mptcp_rtx_timer_pending(sk
))
1648 mptcp_reset_rtx_timer(sk
);
1649 if (do_check_data_fin
)
1650 mptcp_check_send_data_fin(sk
);
1653 static void __mptcp_subflow_push_pending(struct sock
*sk
, struct sock
*ssk
, bool first
)
1655 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1656 struct mptcp_sendmsg_info info
= {
1657 .data_lock_held
= true,
1659 bool keep_pushing
= true;
1660 struct sock
*xmit_ssk
;
1664 while (mptcp_send_head(sk
) && keep_pushing
) {
1665 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
1668 /* check for a different subflow usage only after
1669 * spooling the first chunk of data
1672 mptcp_subflow_set_scheduled(subflow
, false);
1673 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1681 if (mptcp_sched_get_send(msk
))
1684 if (READ_ONCE(subflow
->scheduled
)) {
1685 mptcp_subflow_set_scheduled(subflow
, false);
1686 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1688 keep_pushing
= false;
1692 mptcp_for_each_subflow(msk
, subflow
) {
1693 if (READ_ONCE(subflow
->scheduled
)) {
1694 xmit_ssk
= mptcp_subflow_tcp_sock(subflow
);
1695 if (xmit_ssk
!= ssk
) {
1696 mptcp_subflow_delegate(subflow
,
1697 MPTCP_DELEGATE_SEND
);
1698 keep_pushing
= false;
1705 /* __mptcp_alloc_tx_skb could have released some wmem and we are
1706 * not going to flush it via release_sock()
1709 tcp_push(ssk
, 0, info
.mss_now
, tcp_sk(ssk
)->nonagle
,
1711 if (!mptcp_rtx_timer_pending(sk
))
1712 mptcp_reset_rtx_timer(sk
);
1714 if (msk
->snd_data_fin_enable
&&
1715 msk
->snd_nxt
+ 1 == msk
->write_seq
)
1716 mptcp_schedule_work(sk
);
1720 static int mptcp_disconnect(struct sock
*sk
, int flags
);
1722 static int mptcp_sendmsg_fastopen(struct sock
*sk
, struct msghdr
*msg
,
1723 size_t len
, int *copied_syn
)
1725 unsigned int saved_flags
= msg
->msg_flags
;
1726 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1730 /* on flags based fastopen the mptcp is supposed to create the
1731 * first subflow right now. Otherwise we are in the defer_connect
1732 * path, and the first subflow must be already present.
1733 * Since the defer_connect flag is cleared after the first succsful
1734 * fastopen attempt, no need to check for additional subflow status.
1736 if (msg
->msg_flags
& MSG_FASTOPEN
) {
1737 ssk
= __mptcp_nmpc_sk(msk
);
1739 return PTR_ERR(ssk
);
1747 msg
->msg_flags
|= MSG_DONTWAIT
;
1748 msk
->fastopening
= 1;
1749 ret
= tcp_sendmsg_fastopen(ssk
, msg
, copied_syn
, len
, NULL
);
1750 msk
->fastopening
= 0;
1751 msg
->msg_flags
= saved_flags
;
1754 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1755 if (ret
== -EINPROGRESS
&& !(msg
->msg_flags
& MSG_DONTWAIT
)) {
1756 ret
= __inet_stream_connect(sk
->sk_socket
, msg
->msg_name
,
1757 msg
->msg_namelen
, msg
->msg_flags
, 1);
1759 /* Keep the same behaviour of plain TCP: zero the copied bytes in
1760 * case of any error, except timeout or signal
1762 if (ret
&& ret
!= -EINPROGRESS
&& ret
!= -ERESTARTSYS
&& ret
!= -EINTR
)
1764 } else if (ret
&& ret
!= -EINPROGRESS
) {
1765 /* The disconnect() op called by tcp_sendmsg_fastopen()/
1766 * __inet_stream_connect() can fail, due to looking check,
1767 * see mptcp_disconnect().
1768 * Attempt it again outside the problematic scope.
1770 if (!mptcp_disconnect(sk
, 0))
1771 sk
->sk_socket
->state
= SS_UNCONNECTED
;
1773 inet_clear_bit(DEFER_CONNECT
, sk
);
1778 static int do_copy_data_nocache(struct sock
*sk
, int copy
,
1779 struct iov_iter
*from
, char *to
)
1781 if (sk
->sk_route_caps
& NETIF_F_NOCACHE_COPY
) {
1782 if (!copy_from_iter_full_nocache(to
, copy
, from
))
1784 } else if (!copy_from_iter_full(to
, copy
, from
)) {
1790 /* open-code sk_stream_memory_free() plus sent limit computation to
1791 * avoid indirect calls in fast-path.
1792 * Called under the msk socket lock, so we can avoid a bunch of ONCE
1795 static u32
mptcp_send_limit(const struct sock
*sk
)
1797 const struct mptcp_sock
*msk
= mptcp_sk(sk
);
1798 u32 limit
, not_sent
;
1800 if (sk
->sk_wmem_queued
>= READ_ONCE(sk
->sk_sndbuf
))
1803 limit
= mptcp_notsent_lowat(sk
);
1804 if (limit
== UINT_MAX
)
1807 not_sent
= msk
->write_seq
- msk
->snd_nxt
;
1808 if (not_sent
>= limit
)
1811 return limit
- not_sent
;
1814 static int mptcp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1816 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1817 struct page_frag
*pfrag
;
1822 /* silently ignore everything else */
1823 msg
->msg_flags
&= MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
| MSG_FASTOPEN
;
1827 if (unlikely(inet_test_bit(DEFER_CONNECT
, sk
) ||
1828 msg
->msg_flags
& MSG_FASTOPEN
)) {
1831 ret
= mptcp_sendmsg_fastopen(sk
, msg
, len
, &copied_syn
);
1832 copied
+= copied_syn
;
1833 if (ret
== -EINPROGRESS
&& copied_syn
> 0)
1839 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1841 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) {
1842 ret
= sk_stream_wait_connect(sk
, &timeo
);
1848 if (unlikely(sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
)))
1851 pfrag
= sk_page_frag(sk
);
1853 while (msg_data_left(msg
)) {
1854 int total_ts
, frag_truesize
= 0;
1855 struct mptcp_data_frag
*dfrag
;
1856 bool dfrag_collapsed
;
1857 size_t psize
, offset
;
1860 /* ensure fitting the notsent_lowat() constraint */
1861 copy_limit
= mptcp_send_limit(sk
);
1863 goto wait_for_memory
;
1865 /* reuse tail pfrag, if possible, or carve a new one from the
1868 dfrag
= mptcp_pending_tail(sk
);
1869 dfrag_collapsed
= mptcp_frag_can_collapse_to(msk
, pfrag
, dfrag
);
1870 if (!dfrag_collapsed
) {
1871 if (!mptcp_page_frag_refill(sk
, pfrag
))
1872 goto wait_for_memory
;
1874 dfrag
= mptcp_carve_data_frag(msk
, pfrag
, pfrag
->offset
);
1875 frag_truesize
= dfrag
->overhead
;
1878 /* we do not bound vs wspace, to allow a single packet.
1879 * memory accounting will prevent execessive memory usage
1882 offset
= dfrag
->offset
+ dfrag
->data_len
;
1883 psize
= pfrag
->size
- offset
;
1884 psize
= min_t(size_t, psize
, msg_data_left(msg
));
1885 psize
= min_t(size_t, psize
, copy_limit
);
1886 total_ts
= psize
+ frag_truesize
;
1888 if (!sk_wmem_schedule(sk
, total_ts
))
1889 goto wait_for_memory
;
1891 ret
= do_copy_data_nocache(sk
, psize
, &msg
->msg_iter
,
1892 page_address(dfrag
->page
) + offset
);
1896 /* data successfully copied into the write queue */
1897 sk_forward_alloc_add(sk
, -total_ts
);
1899 dfrag
->data_len
+= psize
;
1900 frag_truesize
+= psize
;
1901 pfrag
->offset
+= frag_truesize
;
1902 WRITE_ONCE(msk
->write_seq
, msk
->write_seq
+ psize
);
1904 /* charge data on mptcp pending queue to the msk socket
1905 * Note: we charge such data both to sk and ssk
1907 sk_wmem_queued_add(sk
, frag_truesize
);
1908 if (!dfrag_collapsed
) {
1909 get_page(dfrag
->page
);
1910 list_add_tail(&dfrag
->list
, &msk
->rtx_queue
);
1911 if (!msk
->first_pending
)
1912 WRITE_ONCE(msk
->first_pending
, dfrag
);
1914 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk
,
1915 dfrag
->data_seq
, dfrag
->data_len
, dfrag
->already_sent
,
1921 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1922 __mptcp_push_pending(sk
, msg
->msg_flags
);
1923 ret
= sk_stream_wait_memory(sk
, &timeo
);
1929 __mptcp_push_pending(sk
, msg
->msg_flags
);
1939 copied
= sk_stream_error(sk
, msg
->msg_flags
, ret
);
1943 static void mptcp_rcv_space_adjust(struct mptcp_sock
*msk
, int copied
);
1945 static int __mptcp_recvmsg_mskq(struct mptcp_sock
*msk
,
1947 size_t len
, int flags
,
1948 struct scm_timestamping_internal
*tss
,
1951 struct sk_buff
*skb
, *tmp
;
1954 skb_queue_walk_safe(&msk
->receive_queue
, skb
, tmp
) {
1955 u32 offset
= MPTCP_SKB_CB(skb
)->offset
;
1956 u32 data_len
= skb
->len
- offset
;
1957 u32 count
= min_t(size_t, len
- copied
, data_len
);
1960 if (!(flags
& MSG_TRUNC
)) {
1961 err
= skb_copy_datagram_msg(skb
, offset
, msg
, count
);
1962 if (unlikely(err
< 0)) {
1969 if (MPTCP_SKB_CB(skb
)->has_rxtstamp
) {
1970 tcp_update_recv_tstamps(skb
, tss
);
1971 *cmsg_flags
|= MPTCP_CMSG_TS
;
1976 if (count
< data_len
) {
1977 if (!(flags
& MSG_PEEK
)) {
1978 MPTCP_SKB_CB(skb
)->offset
+= count
;
1979 MPTCP_SKB_CB(skb
)->map_seq
+= count
;
1980 msk
->bytes_consumed
+= count
;
1985 if (!(flags
& MSG_PEEK
)) {
1986 /* we will bulk release the skb memory later */
1987 skb
->destructor
= NULL
;
1988 WRITE_ONCE(msk
->rmem_released
, msk
->rmem_released
+ skb
->truesize
);
1989 __skb_unlink(skb
, &msk
->receive_queue
);
1991 msk
->bytes_consumed
+= count
;
1998 mptcp_rcv_space_adjust(msk
, copied
);
2002 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
2004 * Only difference: Use highest rtt estimate of the subflows in use.
2006 static void mptcp_rcv_space_adjust(struct mptcp_sock
*msk
, int copied
)
2008 struct mptcp_subflow_context
*subflow
;
2009 struct sock
*sk
= (struct sock
*)msk
;
2010 u8 scaling_ratio
= U8_MAX
;
2011 u32 time
, advmss
= 1;
2014 msk_owned_by_me(msk
);
2019 if (!msk
->rcvspace_init
)
2020 mptcp_rcv_space_init(msk
, msk
->first
);
2022 msk
->rcvq_space
.copied
+= copied
;
2024 mstamp
= div_u64(tcp_clock_ns(), NSEC_PER_USEC
);
2025 time
= tcp_stamp_us_delta(mstamp
, msk
->rcvq_space
.time
);
2027 rtt_us
= msk
->rcvq_space
.rtt_us
;
2028 if (rtt_us
&& time
< (rtt_us
>> 3))
2032 mptcp_for_each_subflow(msk
, subflow
) {
2033 const struct tcp_sock
*tp
;
2037 tp
= tcp_sk(mptcp_subflow_tcp_sock(subflow
));
2039 sf_rtt_us
= READ_ONCE(tp
->rcv_rtt_est
.rtt_us
);
2040 sf_advmss
= READ_ONCE(tp
->advmss
);
2042 rtt_us
= max(sf_rtt_us
, rtt_us
);
2043 advmss
= max(sf_advmss
, advmss
);
2044 scaling_ratio
= min(tp
->scaling_ratio
, scaling_ratio
);
2047 msk
->rcvq_space
.rtt_us
= rtt_us
;
2048 msk
->scaling_ratio
= scaling_ratio
;
2049 if (time
< (rtt_us
>> 3) || rtt_us
== 0)
2052 if (msk
->rcvq_space
.copied
<= msk
->rcvq_space
.space
)
2055 if (READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_moderate_rcvbuf
) &&
2056 !(sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)) {
2060 rcvwin
= ((u64
)msk
->rcvq_space
.copied
<< 1) + 16 * advmss
;
2062 grow
= rcvwin
* (msk
->rcvq_space
.copied
- msk
->rcvq_space
.space
);
2064 do_div(grow
, msk
->rcvq_space
.space
);
2065 rcvwin
+= (grow
<< 1);
2067 rcvbuf
= min_t(u64
, mptcp_space_from_win(sk
, rcvwin
),
2068 READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_rmem
[2]));
2070 if (rcvbuf
> sk
->sk_rcvbuf
) {
2073 window_clamp
= mptcp_win_from_space(sk
, rcvbuf
);
2074 WRITE_ONCE(sk
->sk_rcvbuf
, rcvbuf
);
2076 /* Make subflows follow along. If we do not do this, we
2077 * get drops at subflow level if skbs can't be moved to
2078 * the mptcp rx queue fast enough (announced rcv_win can
2079 * exceed ssk->sk_rcvbuf).
2081 mptcp_for_each_subflow(msk
, subflow
) {
2085 ssk
= mptcp_subflow_tcp_sock(subflow
);
2086 slow
= lock_sock_fast(ssk
);
2087 WRITE_ONCE(ssk
->sk_rcvbuf
, rcvbuf
);
2088 WRITE_ONCE(tcp_sk(ssk
)->window_clamp
, window_clamp
);
2089 if (tcp_can_send_ack(ssk
))
2090 tcp_cleanup_rbuf(ssk
, 1);
2091 unlock_sock_fast(ssk
, slow
);
2096 msk
->rcvq_space
.space
= msk
->rcvq_space
.copied
;
2098 msk
->rcvq_space
.copied
= 0;
2099 msk
->rcvq_space
.time
= mstamp
;
2102 static void __mptcp_update_rmem(struct sock
*sk
)
2104 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2106 if (!msk
->rmem_released
)
2109 atomic_sub(msk
->rmem_released
, &sk
->sk_rmem_alloc
);
2110 mptcp_rmem_uncharge(sk
, msk
->rmem_released
);
2111 WRITE_ONCE(msk
->rmem_released
, 0);
2114 static void __mptcp_splice_receive_queue(struct sock
*sk
)
2116 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2118 skb_queue_splice_tail_init(&sk
->sk_receive_queue
, &msk
->receive_queue
);
2121 static bool __mptcp_move_skbs(struct mptcp_sock
*msk
)
2123 struct sock
*sk
= (struct sock
*)msk
;
2124 unsigned int moved
= 0;
2128 struct sock
*ssk
= mptcp_subflow_recv_lookup(msk
);
2131 /* we can have data pending in the subflows only if the msk
2132 * receive buffer was full at subflow_data_ready() time,
2133 * that is an unlikely slow path.
2138 slowpath
= lock_sock_fast(ssk
);
2139 mptcp_data_lock(sk
);
2140 __mptcp_update_rmem(sk
);
2141 done
= __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
2142 mptcp_data_unlock(sk
);
2144 if (unlikely(ssk
->sk_err
))
2145 __mptcp_error_report(sk
);
2146 unlock_sock_fast(ssk
, slowpath
);
2149 /* acquire the data lock only if some input data is pending */
2151 if (!RB_EMPTY_ROOT(&msk
->out_of_order_queue
) ||
2152 !skb_queue_empty_lockless(&sk
->sk_receive_queue
)) {
2153 mptcp_data_lock(sk
);
2154 __mptcp_update_rmem(sk
);
2155 ret
|= __mptcp_ofo_queue(msk
);
2156 __mptcp_splice_receive_queue(sk
);
2157 mptcp_data_unlock(sk
);
2160 mptcp_check_data_fin((struct sock
*)msk
);
2161 return !skb_queue_empty(&msk
->receive_queue
);
2164 static unsigned int mptcp_inq_hint(const struct sock
*sk
)
2166 const struct mptcp_sock
*msk
= mptcp_sk(sk
);
2167 const struct sk_buff
*skb
;
2169 skb
= skb_peek(&msk
->receive_queue
);
2171 u64 hint_val
= READ_ONCE(msk
->ack_seq
) - MPTCP_SKB_CB(skb
)->map_seq
;
2173 if (hint_val
>= INT_MAX
)
2176 return (unsigned int)hint_val
;
2179 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
2185 static int mptcp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
2186 int flags
, int *addr_len
)
2188 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2189 struct scm_timestamping_internal tss
;
2190 int copied
= 0, cmsg_flags
= 0;
2194 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2195 if (unlikely(flags
& MSG_ERRQUEUE
))
2196 return inet_recv_error(sk
, msg
, len
, addr_len
);
2199 if (unlikely(sk
->sk_state
== TCP_LISTEN
)) {
2204 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
2206 len
= min_t(size_t, len
, INT_MAX
);
2207 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
2209 if (unlikely(msk
->recvmsg_inq
))
2210 cmsg_flags
= MPTCP_CMSG_INQ
;
2212 while (copied
< len
) {
2213 int err
, bytes_read
;
2215 bytes_read
= __mptcp_recvmsg_mskq(msk
, msg
, len
- copied
, flags
, &tss
, &cmsg_flags
);
2216 if (unlikely(bytes_read
< 0)) {
2218 copied
= bytes_read
;
2222 copied
+= bytes_read
;
2224 if (skb_queue_empty(&msk
->receive_queue
) && __mptcp_move_skbs(msk
))
2227 /* only the MPTCP socket status is relevant here. The exit
2228 * conditions mirror closely tcp_recvmsg()
2230 if (copied
>= target
)
2235 sk
->sk_state
== TCP_CLOSE
||
2236 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
2238 signal_pending(current
))
2242 copied
= sock_error(sk
);
2246 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2247 /* race breaker: the shutdown could be after the
2248 * previous receive queue check
2250 if (__mptcp_move_skbs(msk
))
2255 if (sk
->sk_state
== TCP_CLOSE
) {
2265 if (signal_pending(current
)) {
2266 copied
= sock_intr_errno(timeo
);
2271 pr_debug("block timeout %ld\n", timeo
);
2272 mptcp_cleanup_rbuf(msk
, copied
);
2273 err
= sk_wait_data(sk
, &timeo
, NULL
);
2275 err
= copied
? : err
;
2280 mptcp_cleanup_rbuf(msk
, copied
);
2283 if (cmsg_flags
&& copied
>= 0) {
2284 if (cmsg_flags
& MPTCP_CMSG_TS
)
2285 tcp_recv_timestamp(msg
, sk
, &tss
);
2287 if (cmsg_flags
& MPTCP_CMSG_INQ
) {
2288 unsigned int inq
= mptcp_inq_hint(sk
);
2290 put_cmsg(msg
, SOL_TCP
, TCP_CM_INQ
, sizeof(inq
), &inq
);
2294 pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
2295 msk
, skb_queue_empty_lockless(&sk
->sk_receive_queue
),
2296 skb_queue_empty(&msk
->receive_queue
), copied
);
2302 static void mptcp_retransmit_timer(struct timer_list
*t
)
2304 struct inet_connection_sock
*icsk
= from_timer(icsk
, t
,
2305 icsk_retransmit_timer
);
2306 struct sock
*sk
= &icsk
->icsk_inet
.sk
;
2307 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2310 if (!sock_owned_by_user(sk
)) {
2311 /* we need a process context to retransmit */
2312 if (!test_and_set_bit(MPTCP_WORK_RTX
, &msk
->flags
))
2313 mptcp_schedule_work(sk
);
2315 /* delegate our work to tcp_release_cb() */
2316 __set_bit(MPTCP_RETRANSMIT
, &msk
->cb_flags
);
2322 static void mptcp_tout_timer(struct timer_list
*t
)
2324 struct sock
*sk
= from_timer(sk
, t
, sk_timer
);
2326 mptcp_schedule_work(sk
);
2330 /* Find an idle subflow. Return NULL if there is unacked data at tcp
2333 * A backup subflow is returned only if that is the only kind available.
2335 struct sock
*mptcp_subflow_get_retrans(struct mptcp_sock
*msk
)
2337 struct sock
*backup
= NULL
, *pick
= NULL
;
2338 struct mptcp_subflow_context
*subflow
;
2339 int min_stale_count
= INT_MAX
;
2341 mptcp_for_each_subflow(msk
, subflow
) {
2342 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
2344 if (!__mptcp_subflow_active(subflow
))
2347 /* still data outstanding at TCP level? skip this */
2348 if (!tcp_rtx_and_write_queues_empty(ssk
)) {
2349 mptcp_pm_subflow_chk_stale(msk
, ssk
);
2350 min_stale_count
= min_t(int, min_stale_count
, subflow
->stale_count
);
2354 if (subflow
->backup
|| subflow
->request_bkup
) {
2367 /* use backup only if there are no progresses anywhere */
2368 return min_stale_count
> 1 ? backup
: NULL
;
2371 bool __mptcp_retransmit_pending_data(struct sock
*sk
)
2373 struct mptcp_data_frag
*cur
, *rtx_head
;
2374 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2376 if (__mptcp_check_fallback(msk
))
2379 /* the closing socket has some data untransmitted and/or unacked:
2380 * some data in the mptcp rtx queue has not really xmitted yet.
2381 * keep it simple and re-inject the whole mptcp level rtx queue
2383 mptcp_data_lock(sk
);
2384 __mptcp_clean_una_wakeup(sk
);
2385 rtx_head
= mptcp_rtx_head(sk
);
2387 mptcp_data_unlock(sk
);
2391 msk
->recovery_snd_nxt
= msk
->snd_nxt
;
2392 msk
->recovery
= true;
2393 mptcp_data_unlock(sk
);
2395 msk
->first_pending
= rtx_head
;
2398 /* be sure to clear the "sent status" on all re-injected fragments */
2399 list_for_each_entry(cur
, &msk
->rtx_queue
, list
) {
2400 if (!cur
->already_sent
)
2402 cur
->already_sent
= 0;
2408 /* flags for __mptcp_close_ssk() */
2409 #define MPTCP_CF_PUSH BIT(1)
2410 #define MPTCP_CF_FASTCLOSE BIT(2)
2412 /* be sure to send a reset only if the caller asked for it, also
2413 * clean completely the subflow status when the subflow reaches
2416 static void __mptcp_subflow_disconnect(struct sock
*ssk
,
2417 struct mptcp_subflow_context
*subflow
,
2420 if (((1 << ssk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)) ||
2421 (flags
& MPTCP_CF_FASTCLOSE
)) {
2422 /* The MPTCP code never wait on the subflow sockets, TCP-level
2423 * disconnect should never fail
2425 WARN_ON_ONCE(tcp_disconnect(ssk
, 0));
2426 mptcp_subflow_ctx_reset(subflow
);
2428 tcp_shutdown(ssk
, SEND_SHUTDOWN
);
2432 /* subflow sockets can be either outgoing (connect) or incoming
2435 * Outgoing subflows use in-kernel sockets.
2436 * Incoming subflows do not have their own 'struct socket' allocated,
2437 * so we need to use tcp_close() after detaching them from the mptcp
2440 static void __mptcp_close_ssk(struct sock
*sk
, struct sock
*ssk
,
2441 struct mptcp_subflow_context
*subflow
,
2444 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2445 bool dispose_it
, need_push
= false;
2447 /* If the first subflow moved to a close state before accept, e.g. due
2448 * to an incoming reset or listener shutdown, the subflow socket is
2449 * already deleted by inet_child_forget() and the mptcp socket can't
2452 if (msk
->in_accept_queue
&& msk
->first
== ssk
&&
2453 (sock_flag(sk
, SOCK_DEAD
) || sock_flag(ssk
, SOCK_DEAD
))) {
2454 /* ensure later check in mptcp_worker() will dispose the msk */
2455 sock_set_flag(sk
, SOCK_DEAD
);
2456 mptcp_set_close_tout(sk
, tcp_jiffies32
- (mptcp_close_timeout(sk
) + 1));
2457 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
2458 mptcp_subflow_drop_ctx(ssk
);
2462 dispose_it
= msk
->free_first
|| ssk
!= msk
->first
;
2464 list_del(&subflow
->node
);
2466 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
2468 if ((flags
& MPTCP_CF_FASTCLOSE
) && !__mptcp_check_fallback(msk
)) {
2469 /* be sure to force the tcp_close path
2470 * to generate the egress reset
2472 ssk
->sk_lingertime
= 0;
2473 sock_set_flag(ssk
, SOCK_LINGER
);
2474 subflow
->send_fastclose
= 1;
2477 need_push
= (flags
& MPTCP_CF_PUSH
) && __mptcp_retransmit_pending_data(sk
);
2479 __mptcp_subflow_disconnect(ssk
, subflow
, flags
);
2485 subflow
->disposable
= 1;
2487 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2488 * the ssk has been already destroyed, we just need to release the
2489 * reference owned by msk;
2491 if (!inet_csk(ssk
)->icsk_ulp_ops
) {
2492 WARN_ON_ONCE(!sock_flag(ssk
, SOCK_DEAD
));
2493 kfree_rcu(subflow
, rcu
);
2495 /* otherwise tcp will dispose of the ssk and subflow ctx */
2496 __tcp_close(ssk
, 0);
2498 /* close acquired an extra ref */
2503 __mptcp_subflow_error_report(sk
, ssk
);
2508 if (ssk
== msk
->first
)
2509 WRITE_ONCE(msk
->first
, NULL
);
2512 __mptcp_sync_sndbuf(sk
);
2514 __mptcp_push_pending(sk
, 0);
2516 /* Catch every 'all subflows closed' scenario, including peers silently
2517 * closing them, e.g. due to timeout.
2518 * For established sockets, allow an additional timeout before closing,
2519 * as the protocol can still create more subflows.
2521 if (list_is_singular(&msk
->conn_list
) && msk
->first
&&
2522 inet_sk_state_load(msk
->first
) == TCP_CLOSE
) {
2523 if (sk
->sk_state
!= TCP_ESTABLISHED
||
2524 msk
->in_accept_queue
|| sock_flag(sk
, SOCK_DEAD
)) {
2525 mptcp_set_state(sk
, TCP_CLOSE
);
2526 mptcp_close_wake_up(sk
);
2528 mptcp_start_tout_timer(sk
);
2533 void mptcp_close_ssk(struct sock
*sk
, struct sock
*ssk
,
2534 struct mptcp_subflow_context
*subflow
)
2536 /* The first subflow can already be closed and still in the list */
2537 if (subflow
->close_event_done
)
2540 subflow
->close_event_done
= true;
2542 if (sk
->sk_state
== TCP_ESTABLISHED
)
2543 mptcp_event(MPTCP_EVENT_SUB_CLOSED
, mptcp_sk(sk
), ssk
, GFP_KERNEL
);
2545 /* subflow aborted before reaching the fully_established status
2546 * attempt the creation of the next subflow
2548 mptcp_pm_subflow_check_next(mptcp_sk(sk
), subflow
);
2550 __mptcp_close_ssk(sk
, ssk
, subflow
, MPTCP_CF_PUSH
);
2553 static unsigned int mptcp_sync_mss(struct sock
*sk
, u32 pmtu
)
2558 static void __mptcp_close_subflow(struct sock
*sk
)
2560 struct mptcp_subflow_context
*subflow
, *tmp
;
2561 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2565 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
) {
2566 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
2567 int ssk_state
= inet_sk_state_load(ssk
);
2569 if (ssk_state
!= TCP_CLOSE
&&
2570 (ssk_state
!= TCP_CLOSE_WAIT
||
2571 inet_sk_state_load(sk
) != TCP_ESTABLISHED
))
2574 /* 'subflow_data_ready' will re-sched once rx queue is empty */
2575 if (!skb_queue_empty_lockless(&ssk
->sk_receive_queue
))
2578 mptcp_close_ssk(sk
, ssk
, subflow
);
2583 static bool mptcp_close_tout_expired(const struct sock
*sk
)
2585 if (!inet_csk(sk
)->icsk_mtup
.probe_timestamp
||
2586 sk
->sk_state
== TCP_CLOSE
)
2589 return time_after32(tcp_jiffies32
,
2590 inet_csk(sk
)->icsk_mtup
.probe_timestamp
+ mptcp_close_timeout(sk
));
2593 static void mptcp_check_fastclose(struct mptcp_sock
*msk
)
2595 struct mptcp_subflow_context
*subflow
, *tmp
;
2596 struct sock
*sk
= (struct sock
*)msk
;
2598 if (likely(!READ_ONCE(msk
->rcv_fastclose
)))
2601 mptcp_token_destroy(msk
);
2603 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
) {
2604 struct sock
*tcp_sk
= mptcp_subflow_tcp_sock(subflow
);
2607 slow
= lock_sock_fast(tcp_sk
);
2608 if (tcp_sk
->sk_state
!= TCP_CLOSE
) {
2609 mptcp_send_active_reset_reason(tcp_sk
);
2610 tcp_set_state(tcp_sk
, TCP_CLOSE
);
2612 unlock_sock_fast(tcp_sk
, slow
);
2615 /* Mirror the tcp_reset() error propagation */
2616 switch (sk
->sk_state
) {
2618 WRITE_ONCE(sk
->sk_err
, ECONNREFUSED
);
2620 case TCP_CLOSE_WAIT
:
2621 WRITE_ONCE(sk
->sk_err
, EPIPE
);
2626 WRITE_ONCE(sk
->sk_err
, ECONNRESET
);
2629 mptcp_set_state(sk
, TCP_CLOSE
);
2630 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
2631 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2632 set_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &msk
->flags
);
2634 /* the calling mptcp_worker will properly destroy the socket */
2635 if (sock_flag(sk
, SOCK_DEAD
))
2638 sk
->sk_state_change(sk
);
2639 sk_error_report(sk
);
2642 static void __mptcp_retrans(struct sock
*sk
)
2644 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2645 struct mptcp_subflow_context
*subflow
;
2646 struct mptcp_sendmsg_info info
= {};
2647 struct mptcp_data_frag
*dfrag
;
2652 mptcp_clean_una_wakeup(sk
);
2654 /* first check ssk: need to kick "stale" logic */
2655 err
= mptcp_sched_get_retrans(msk
);
2656 dfrag
= mptcp_rtx_head(sk
);
2658 if (mptcp_data_fin_enabled(msk
)) {
2659 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2661 icsk
->icsk_retransmits
++;
2662 mptcp_set_datafin_timeout(sk
);
2663 mptcp_send_ack(msk
);
2668 if (!mptcp_send_head(sk
))
2677 mptcp_for_each_subflow(msk
, subflow
) {
2678 if (READ_ONCE(subflow
->scheduled
)) {
2681 mptcp_subflow_set_scheduled(subflow
, false);
2683 ssk
= mptcp_subflow_tcp_sock(subflow
);
2687 /* limit retransmission to the bytes already sent on some subflows */
2689 info
.limit
= READ_ONCE(msk
->csum_enabled
) ? dfrag
->data_len
:
2690 dfrag
->already_sent
;
2691 while (info
.sent
< info
.limit
) {
2692 ret
= mptcp_sendmsg_frag(sk
, ssk
, dfrag
, &info
);
2696 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_RETRANSSEGS
);
2701 len
= max(copied
, len
);
2702 tcp_push(ssk
, 0, info
.mss_now
, tcp_sk(ssk
)->nonagle
,
2704 WRITE_ONCE(msk
->allow_infinite_fallback
, false);
2711 msk
->bytes_retrans
+= len
;
2712 dfrag
->already_sent
= max(dfrag
->already_sent
, len
);
2715 mptcp_check_and_set_pending(sk
);
2717 if (!mptcp_rtx_timer_pending(sk
))
2718 mptcp_reset_rtx_timer(sk
);
2721 /* schedule the timeout timer for the relevant event: either close timeout
2722 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
2724 void mptcp_reset_tout_timer(struct mptcp_sock
*msk
, unsigned long fail_tout
)
2726 struct sock
*sk
= (struct sock
*)msk
;
2727 unsigned long timeout
, close_timeout
;
2729 if (!fail_tout
&& !inet_csk(sk
)->icsk_mtup
.probe_timestamp
)
2732 close_timeout
= (unsigned long)inet_csk(sk
)->icsk_mtup
.probe_timestamp
-
2733 tcp_jiffies32
+ jiffies
+ mptcp_close_timeout(sk
);
2735 /* the close timeout takes precedence on the fail one, and here at least one of
2738 timeout
= inet_csk(sk
)->icsk_mtup
.probe_timestamp
? close_timeout
: fail_tout
;
2740 sk_reset_timer(sk
, &sk
->sk_timer
, timeout
);
2743 static void mptcp_mp_fail_no_response(struct mptcp_sock
*msk
)
2745 struct sock
*ssk
= msk
->first
;
2751 pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
2753 slow
= lock_sock_fast(ssk
);
2754 mptcp_subflow_reset(ssk
);
2755 WRITE_ONCE(mptcp_subflow_ctx(ssk
)->fail_tout
, 0);
2756 unlock_sock_fast(ssk
, slow
);
2759 static void mptcp_do_fastclose(struct sock
*sk
)
2761 struct mptcp_subflow_context
*subflow
, *tmp
;
2762 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2764 mptcp_set_state(sk
, TCP_CLOSE
);
2765 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
)
2766 __mptcp_close_ssk(sk
, mptcp_subflow_tcp_sock(subflow
),
2767 subflow
, MPTCP_CF_FASTCLOSE
);
2770 static void mptcp_worker(struct work_struct
*work
)
2772 struct mptcp_sock
*msk
= container_of(work
, struct mptcp_sock
, work
);
2773 struct sock
*sk
= (struct sock
*)msk
;
2774 unsigned long fail_tout
;
2778 state
= sk
->sk_state
;
2779 if (unlikely((1 << state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
2782 mptcp_check_fastclose(msk
);
2784 mptcp_pm_nl_work(msk
);
2786 mptcp_check_send_data_fin(sk
);
2787 mptcp_check_data_fin_ack(sk
);
2788 mptcp_check_data_fin(sk
);
2790 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &msk
->flags
))
2791 __mptcp_close_subflow(sk
);
2793 if (mptcp_close_tout_expired(sk
)) {
2794 mptcp_do_fastclose(sk
);
2795 mptcp_close_wake_up(sk
);
2798 if (sock_flag(sk
, SOCK_DEAD
) && sk
->sk_state
== TCP_CLOSE
) {
2799 __mptcp_destroy_sock(sk
);
2803 if (test_and_clear_bit(MPTCP_WORK_RTX
, &msk
->flags
))
2804 __mptcp_retrans(sk
);
2806 fail_tout
= msk
->first
? READ_ONCE(mptcp_subflow_ctx(msk
->first
)->fail_tout
) : 0;
2807 if (fail_tout
&& time_after(jiffies
, fail_tout
))
2808 mptcp_mp_fail_no_response(msk
);
2815 static void __mptcp_init_sock(struct sock
*sk
)
2817 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2819 INIT_LIST_HEAD(&msk
->conn_list
);
2820 INIT_LIST_HEAD(&msk
->join_list
);
2821 INIT_LIST_HEAD(&msk
->rtx_queue
);
2822 INIT_WORK(&msk
->work
, mptcp_worker
);
2823 __skb_queue_head_init(&msk
->receive_queue
);
2824 msk
->out_of_order_queue
= RB_ROOT
;
2825 msk
->first_pending
= NULL
;
2826 WRITE_ONCE(msk
->rmem_fwd_alloc
, 0);
2827 WRITE_ONCE(msk
->rmem_released
, 0);
2828 msk
->timer_ival
= TCP_RTO_MIN
;
2829 msk
->scaling_ratio
= TCP_DEFAULT_SCALING_RATIO
;
2831 WRITE_ONCE(msk
->first
, NULL
);
2832 inet_csk(sk
)->icsk_sync_mss
= mptcp_sync_mss
;
2833 WRITE_ONCE(msk
->csum_enabled
, mptcp_is_checksum_enabled(sock_net(sk
)));
2834 WRITE_ONCE(msk
->allow_infinite_fallback
, true);
2835 msk
->recovery
= false;
2836 msk
->subflow_id
= 1;
2837 msk
->last_data_sent
= tcp_jiffies32
;
2838 msk
->last_data_recv
= tcp_jiffies32
;
2839 msk
->last_ack_recv
= tcp_jiffies32
;
2841 mptcp_pm_data_init(msk
);
2843 /* re-use the csk retrans timer for MPTCP-level retrans */
2844 timer_setup(&msk
->sk
.icsk_retransmit_timer
, mptcp_retransmit_timer
, 0);
2845 timer_setup(&sk
->sk_timer
, mptcp_tout_timer
, 0);
2848 static void mptcp_ca_reset(struct sock
*sk
)
2850 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2852 tcp_assign_congestion_control(sk
);
2853 strscpy(mptcp_sk(sk
)->ca_name
, icsk
->icsk_ca_ops
->name
,
2854 sizeof(mptcp_sk(sk
)->ca_name
));
2856 /* no need to keep a reference to the ops, the name will suffice */
2857 tcp_cleanup_congestion_control(sk
);
2858 icsk
->icsk_ca_ops
= NULL
;
2861 static int mptcp_init_sock(struct sock
*sk
)
2863 struct net
*net
= sock_net(sk
);
2866 __mptcp_init_sock(sk
);
2868 if (!mptcp_is_enabled(net
))
2869 return -ENOPROTOOPT
;
2871 if (unlikely(!net
->mib
.mptcp_statistics
) && !mptcp_mib_alloc(net
))
2875 ret
= mptcp_init_sched(mptcp_sk(sk
),
2876 mptcp_sched_find(mptcp_get_scheduler(net
)));
2881 set_bit(SOCK_CUSTOM_SOCKOPT
, &sk
->sk_socket
->flags
);
2883 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
2884 * propagate the correct value
2888 sk_sockets_allocated_inc(sk
);
2889 sk
->sk_rcvbuf
= READ_ONCE(net
->ipv4
.sysctl_tcp_rmem
[1]);
2890 sk
->sk_sndbuf
= READ_ONCE(net
->ipv4
.sysctl_tcp_wmem
[1]);
2895 static void __mptcp_clear_xmit(struct sock
*sk
)
2897 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2898 struct mptcp_data_frag
*dtmp
, *dfrag
;
2900 WRITE_ONCE(msk
->first_pending
, NULL
);
2901 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
)
2902 dfrag_clear(sk
, dfrag
);
2905 void mptcp_cancel_work(struct sock
*sk
)
2907 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2909 if (cancel_work_sync(&msk
->work
))
2913 void mptcp_subflow_shutdown(struct sock
*sk
, struct sock
*ssk
, int how
)
2917 switch (ssk
->sk_state
) {
2919 if (!(how
& RCV_SHUTDOWN
))
2923 WARN_ON_ONCE(tcp_disconnect(ssk
, O_NONBLOCK
));
2926 if (__mptcp_check_fallback(mptcp_sk(sk
))) {
2927 pr_debug("Fallback\n");
2928 ssk
->sk_shutdown
|= how
;
2929 tcp_shutdown(ssk
, how
);
2931 /* simulate the data_fin ack reception to let the state
2932 * machine move forward
2934 WRITE_ONCE(mptcp_sk(sk
)->snd_una
, mptcp_sk(sk
)->snd_nxt
);
2935 mptcp_schedule_work(sk
);
2937 pr_debug("Sending DATA_FIN on subflow %p\n", ssk
);
2939 if (!mptcp_rtx_timer_pending(sk
))
2940 mptcp_reset_rtx_timer(sk
);
2948 void mptcp_set_state(struct sock
*sk
, int state
)
2950 int oldstate
= sk
->sk_state
;
2953 case TCP_ESTABLISHED
:
2954 if (oldstate
!= TCP_ESTABLISHED
)
2955 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_CURRESTAB
);
2957 case TCP_CLOSE_WAIT
:
2958 /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
2959 * MPTCP "accepted" sockets will be created later on. So no
2960 * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
2964 if (oldstate
== TCP_ESTABLISHED
|| oldstate
== TCP_CLOSE_WAIT
)
2965 MPTCP_DEC_STATS(sock_net(sk
), MPTCP_MIB_CURRESTAB
);
2968 inet_sk_state_store(sk
, state
);
2971 static const unsigned char new_state
[16] = {
2972 /* current state: new state: action: */
2973 [0 /* (Invalid) */] = TCP_CLOSE
,
2974 [TCP_ESTABLISHED
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2975 [TCP_SYN_SENT
] = TCP_CLOSE
,
2976 [TCP_SYN_RECV
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2977 [TCP_FIN_WAIT1
] = TCP_FIN_WAIT1
,
2978 [TCP_FIN_WAIT2
] = TCP_FIN_WAIT2
,
2979 [TCP_TIME_WAIT
] = TCP_CLOSE
, /* should not happen ! */
2980 [TCP_CLOSE
] = TCP_CLOSE
,
2981 [TCP_CLOSE_WAIT
] = TCP_LAST_ACK
| TCP_ACTION_FIN
,
2982 [TCP_LAST_ACK
] = TCP_LAST_ACK
,
2983 [TCP_LISTEN
] = TCP_CLOSE
,
2984 [TCP_CLOSING
] = TCP_CLOSING
,
2985 [TCP_NEW_SYN_RECV
] = TCP_CLOSE
, /* should not happen ! */
2988 static int mptcp_close_state(struct sock
*sk
)
2990 int next
= (int)new_state
[sk
->sk_state
];
2991 int ns
= next
& TCP_STATE_MASK
;
2993 mptcp_set_state(sk
, ns
);
2995 return next
& TCP_ACTION_FIN
;
2998 static void mptcp_check_send_data_fin(struct sock
*sk
)
3000 struct mptcp_subflow_context
*subflow
;
3001 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3003 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
3004 msk
, msk
->snd_data_fin_enable
, !!mptcp_send_head(sk
),
3005 msk
->snd_nxt
, msk
->write_seq
);
3007 /* we still need to enqueue subflows or not really shutting down,
3010 if (!msk
->snd_data_fin_enable
|| msk
->snd_nxt
+ 1 != msk
->write_seq
||
3011 mptcp_send_head(sk
))
3014 WRITE_ONCE(msk
->snd_nxt
, msk
->write_seq
);
3016 mptcp_for_each_subflow(msk
, subflow
) {
3017 struct sock
*tcp_sk
= mptcp_subflow_tcp_sock(subflow
);
3019 mptcp_subflow_shutdown(sk
, tcp_sk
, SEND_SHUTDOWN
);
3023 static void __mptcp_wr_shutdown(struct sock
*sk
)
3025 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3027 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
3028 msk
, msk
->snd_data_fin_enable
, sk
->sk_shutdown
, sk
->sk_state
,
3029 !!mptcp_send_head(sk
));
3031 /* will be ignored by fallback sockets */
3032 WRITE_ONCE(msk
->write_seq
, msk
->write_seq
+ 1);
3033 WRITE_ONCE(msk
->snd_data_fin_enable
, 1);
3035 mptcp_check_send_data_fin(sk
);
3038 static void __mptcp_destroy_sock(struct sock
*sk
)
3040 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3042 pr_debug("msk=%p\n", msk
);
3046 mptcp_stop_rtx_timer(sk
);
3047 sk_stop_timer(sk
, &sk
->sk_timer
);
3049 mptcp_release_sched(msk
);
3051 sk
->sk_prot
->destroy(sk
);
3053 WARN_ON_ONCE(READ_ONCE(msk
->rmem_fwd_alloc
));
3054 WARN_ON_ONCE(msk
->rmem_released
);
3055 sk_stream_kill_queues(sk
);
3056 xfrm_sk_free_policy(sk
);
3061 void __mptcp_unaccepted_force_close(struct sock
*sk
)
3063 sock_set_flag(sk
, SOCK_DEAD
);
3064 mptcp_do_fastclose(sk
);
3065 __mptcp_destroy_sock(sk
);
3068 static __poll_t
mptcp_check_readable(struct sock
*sk
)
3070 return mptcp_epollin_ready(sk
) ? EPOLLIN
| EPOLLRDNORM
: 0;
3073 static void mptcp_check_listen_stop(struct sock
*sk
)
3077 if (inet_sk_state_load(sk
) != TCP_LISTEN
)
3080 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
3081 ssk
= mptcp_sk(sk
)->first
;
3082 if (WARN_ON_ONCE(!ssk
|| inet_sk_state_load(ssk
) != TCP_LISTEN
))
3085 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
3086 tcp_set_state(ssk
, TCP_CLOSE
);
3087 mptcp_subflow_queue_clean(sk
, ssk
);
3088 inet_csk_listen_stop(ssk
);
3089 mptcp_event_pm_listener(ssk
, MPTCP_EVENT_LISTENER_CLOSED
);
3093 bool __mptcp_close(struct sock
*sk
, long timeout
)
3095 struct mptcp_subflow_context
*subflow
;
3096 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3097 bool do_cancel_work
= false;
3098 int subflows_alive
= 0;
3100 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
3102 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
)) {
3103 mptcp_check_listen_stop(sk
);
3104 mptcp_set_state(sk
, TCP_CLOSE
);
3108 if (mptcp_data_avail(msk
) || timeout
< 0) {
3109 /* If the msk has read data, or the caller explicitly ask it,
3110 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
3112 mptcp_do_fastclose(sk
);
3114 } else if (mptcp_close_state(sk
)) {
3115 __mptcp_wr_shutdown(sk
);
3118 sk_stream_wait_close(sk
, timeout
);
3121 /* orphan all the subflows */
3122 mptcp_for_each_subflow(msk
, subflow
) {
3123 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3124 bool slow
= lock_sock_fast_nested(ssk
);
3126 subflows_alive
+= ssk
->sk_state
!= TCP_CLOSE
;
3128 /* since the close timeout takes precedence on the fail one,
3131 if (ssk
== msk
->first
)
3132 subflow
->fail_tout
= 0;
3134 /* detach from the parent socket, but allow data_ready to
3135 * push incoming data into the mptcp stack, to properly ack it
3137 ssk
->sk_socket
= NULL
;
3139 unlock_sock_fast(ssk
, slow
);
3143 /* all the subflows are closed, only timeout can change the msk
3144 * state, let's not keep resources busy for no reasons
3146 if (subflows_alive
== 0)
3147 mptcp_set_state(sk
, TCP_CLOSE
);
3150 pr_debug("msk=%p state=%d\n", sk
, sk
->sk_state
);
3151 mptcp_pm_connection_closed(msk
);
3153 if (sk
->sk_state
== TCP_CLOSE
) {
3154 __mptcp_destroy_sock(sk
);
3155 do_cancel_work
= true;
3157 mptcp_start_tout_timer(sk
);
3160 return do_cancel_work
;
3163 static void mptcp_close(struct sock
*sk
, long timeout
)
3165 bool do_cancel_work
;
3169 do_cancel_work
= __mptcp_close(sk
, timeout
);
3172 mptcp_cancel_work(sk
);
3177 static void mptcp_copy_inaddrs(struct sock
*msk
, const struct sock
*ssk
)
3179 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3180 const struct ipv6_pinfo
*ssk6
= inet6_sk(ssk
);
3181 struct ipv6_pinfo
*msk6
= inet6_sk(msk
);
3183 msk
->sk_v6_daddr
= ssk
->sk_v6_daddr
;
3184 msk
->sk_v6_rcv_saddr
= ssk
->sk_v6_rcv_saddr
;
3187 msk6
->saddr
= ssk6
->saddr
;
3188 msk6
->flow_label
= ssk6
->flow_label
;
3192 inet_sk(msk
)->inet_num
= inet_sk(ssk
)->inet_num
;
3193 inet_sk(msk
)->inet_dport
= inet_sk(ssk
)->inet_dport
;
3194 inet_sk(msk
)->inet_sport
= inet_sk(ssk
)->inet_sport
;
3195 inet_sk(msk
)->inet_daddr
= inet_sk(ssk
)->inet_daddr
;
3196 inet_sk(msk
)->inet_saddr
= inet_sk(ssk
)->inet_saddr
;
3197 inet_sk(msk
)->inet_rcv_saddr
= inet_sk(ssk
)->inet_rcv_saddr
;
3200 static int mptcp_disconnect(struct sock
*sk
, int flags
)
3202 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3204 /* We are on the fastopen error path. We can't call straight into the
3205 * subflows cleanup code due to lock nesting (we are already under
3206 * msk->firstsocket lock).
3208 if (msk
->fastopening
)
3211 mptcp_check_listen_stop(sk
);
3212 mptcp_set_state(sk
, TCP_CLOSE
);
3214 mptcp_stop_rtx_timer(sk
);
3215 mptcp_stop_tout_timer(sk
);
3217 mptcp_pm_connection_closed(msk
);
3219 /* msk->subflow is still intact, the following will not free the first
3222 mptcp_destroy_common(msk
, MPTCP_CF_FASTCLOSE
);
3223 WRITE_ONCE(msk
->flags
, 0);
3225 msk
->recovery
= false;
3226 WRITE_ONCE(msk
->can_ack
, false);
3227 WRITE_ONCE(msk
->fully_established
, false);
3228 WRITE_ONCE(msk
->rcv_data_fin
, false);
3229 WRITE_ONCE(msk
->snd_data_fin_enable
, false);
3230 WRITE_ONCE(msk
->rcv_fastclose
, false);
3231 WRITE_ONCE(msk
->use_64bit_ack
, false);
3232 WRITE_ONCE(msk
->csum_enabled
, mptcp_is_checksum_enabled(sock_net(sk
)));
3233 mptcp_pm_data_reset(msk
);
3235 msk
->bytes_consumed
= 0;
3236 msk
->bytes_acked
= 0;
3237 msk
->bytes_received
= 0;
3238 msk
->bytes_sent
= 0;
3239 msk
->bytes_retrans
= 0;
3240 msk
->rcvspace_init
= 0;
3242 WRITE_ONCE(sk
->sk_shutdown
, 0);
3243 sk_error_report(sk
);
3247 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3248 static struct ipv6_pinfo
*mptcp_inet6_sk(const struct sock
*sk
)
3250 unsigned int offset
= sizeof(struct mptcp6_sock
) - sizeof(struct ipv6_pinfo
);
3252 return (struct ipv6_pinfo
*)(((u8
*)sk
) + offset
);
3255 static void mptcp_copy_ip6_options(struct sock
*newsk
, const struct sock
*sk
)
3257 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
3258 struct ipv6_txoptions
*opt
;
3259 struct ipv6_pinfo
*newnp
;
3261 newnp
= inet6_sk(newsk
);
3264 opt
= rcu_dereference(np
->opt
);
3266 opt
= ipv6_dup_options(newsk
, opt
);
3268 net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__
);
3270 RCU_INIT_POINTER(newnp
->opt
, opt
);
3275 static void mptcp_copy_ip_options(struct sock
*newsk
, const struct sock
*sk
)
3277 struct ip_options_rcu
*inet_opt
, *newopt
= NULL
;
3278 const struct inet_sock
*inet
= inet_sk(sk
);
3279 struct inet_sock
*newinet
;
3281 newinet
= inet_sk(newsk
);
3284 inet_opt
= rcu_dereference(inet
->inet_opt
);
3286 newopt
= sock_kmalloc(newsk
, sizeof(*inet_opt
) +
3287 inet_opt
->opt
.optlen
, GFP_ATOMIC
);
3289 memcpy(newopt
, inet_opt
, sizeof(*inet_opt
) +
3290 inet_opt
->opt
.optlen
);
3292 net_warn_ratelimited("%s: Failed to copy ip options\n", __func__
);
3294 RCU_INIT_POINTER(newinet
->inet_opt
, newopt
);
3298 struct sock
*mptcp_sk_clone_init(const struct sock
*sk
,
3299 const struct mptcp_options_received
*mp_opt
,
3301 struct request_sock
*req
)
3303 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
3304 struct sock
*nsk
= sk_clone_lock(sk
, GFP_ATOMIC
);
3305 struct mptcp_subflow_context
*subflow
;
3306 struct mptcp_sock
*msk
;
3311 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3312 if (nsk
->sk_family
== AF_INET6
)
3313 inet_sk(nsk
)->pinet6
= mptcp_inet6_sk(nsk
);
3316 __mptcp_init_sock(nsk
);
3318 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3319 if (nsk
->sk_family
== AF_INET6
)
3320 mptcp_copy_ip6_options(nsk
, sk
);
3323 mptcp_copy_ip_options(nsk
, sk
);
3325 msk
= mptcp_sk(nsk
);
3326 WRITE_ONCE(msk
->local_key
, subflow_req
->local_key
);
3327 WRITE_ONCE(msk
->token
, subflow_req
->token
);
3328 msk
->in_accept_queue
= 1;
3329 WRITE_ONCE(msk
->fully_established
, false);
3330 if (mp_opt
->suboptions
& OPTION_MPTCP_CSUMREQD
)
3331 WRITE_ONCE(msk
->csum_enabled
, true);
3333 WRITE_ONCE(msk
->write_seq
, subflow_req
->idsn
+ 1);
3334 WRITE_ONCE(msk
->snd_nxt
, msk
->write_seq
);
3335 WRITE_ONCE(msk
->snd_una
, msk
->write_seq
);
3336 WRITE_ONCE(msk
->wnd_end
, msk
->snd_nxt
+ tcp_sk(ssk
)->snd_wnd
);
3337 msk
->setsockopt_seq
= mptcp_sk(sk
)->setsockopt_seq
;
3338 mptcp_init_sched(msk
, mptcp_sk(sk
)->sched
);
3340 /* passive msk is created after the first/MPC subflow */
3341 msk
->subflow_id
= 2;
3343 sock_reset_flag(nsk
, SOCK_RCU_FREE
);
3344 security_inet_csk_clone(nsk
, req
);
3346 /* this can't race with mptcp_close(), as the msk is
3347 * not yet exposted to user-space
3349 mptcp_set_state(nsk
, TCP_ESTABLISHED
);
3351 /* The msk maintain a ref to each subflow in the connections list */
3352 WRITE_ONCE(msk
->first
, ssk
);
3353 subflow
= mptcp_subflow_ctx(ssk
);
3354 list_add(&subflow
->node
, &msk
->conn_list
);
3357 /* new mpc subflow takes ownership of the newly
3358 * created mptcp socket
3360 mptcp_token_accept(subflow_req
, msk
);
3362 /* set msk addresses early to ensure mptcp_pm_get_local_id()
3363 * uses the correct data
3365 mptcp_copy_inaddrs(nsk
, ssk
);
3366 __mptcp_propagate_sndbuf(nsk
, ssk
);
3368 mptcp_rcv_space_init(msk
, ssk
);
3370 if (mp_opt
->suboptions
& OPTION_MPTCP_MPC_ACK
)
3371 __mptcp_subflow_fully_established(msk
, subflow
, mp_opt
);
3372 bh_unlock_sock(nsk
);
3374 /* note: the newly allocated socket refcount is 2 now */
3378 void mptcp_rcv_space_init(struct mptcp_sock
*msk
, const struct sock
*ssk
)
3380 const struct tcp_sock
*tp
= tcp_sk(ssk
);
3382 msk
->rcvspace_init
= 1;
3383 msk
->rcvq_space
.copied
= 0;
3384 msk
->rcvq_space
.rtt_us
= 0;
3386 msk
->rcvq_space
.time
= tp
->tcp_mstamp
;
3388 /* initial rcv_space offering made to peer */
3389 msk
->rcvq_space
.space
= min_t(u32
, tp
->rcv_wnd
,
3390 TCP_INIT_CWND
* tp
->advmss
);
3391 if (msk
->rcvq_space
.space
== 0)
3392 msk
->rcvq_space
.space
= TCP_INIT_CWND
* TCP_MSS_DEFAULT
;
3395 void mptcp_destroy_common(struct mptcp_sock
*msk
, unsigned int flags
)
3397 struct mptcp_subflow_context
*subflow
, *tmp
;
3398 struct sock
*sk
= (struct sock
*)msk
;
3400 __mptcp_clear_xmit(sk
);
3402 /* join list will be eventually flushed (with rst) at sock lock release time */
3403 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
)
3404 __mptcp_close_ssk(sk
, mptcp_subflow_tcp_sock(subflow
), subflow
, flags
);
3406 /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
3407 mptcp_data_lock(sk
);
3408 skb_queue_splice_tail_init(&msk
->receive_queue
, &sk
->sk_receive_queue
);
3409 __skb_queue_purge(&sk
->sk_receive_queue
);
3410 skb_rbtree_purge(&msk
->out_of_order_queue
);
3411 mptcp_data_unlock(sk
);
3413 /* move all the rx fwd alloc into the sk_mem_reclaim_final in
3414 * inet_sock_destruct() will dispose it
3416 sk_forward_alloc_add(sk
, msk
->rmem_fwd_alloc
);
3417 WRITE_ONCE(msk
->rmem_fwd_alloc
, 0);
3418 mptcp_token_destroy(msk
);
3419 mptcp_pm_free_anno_list(msk
);
3420 mptcp_free_local_addr_list(msk
);
3423 static void mptcp_destroy(struct sock
*sk
)
3425 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3427 /* allow the following to close even the initial subflow */
3428 msk
->free_first
= 1;
3429 mptcp_destroy_common(msk
, 0);
3430 sk_sockets_allocated_dec(sk
);
3433 void __mptcp_data_acked(struct sock
*sk
)
3435 if (!sock_owned_by_user(sk
))
3436 __mptcp_clean_una(sk
);
3438 __set_bit(MPTCP_CLEAN_UNA
, &mptcp_sk(sk
)->cb_flags
);
3441 void __mptcp_check_push(struct sock
*sk
, struct sock
*ssk
)
3443 if (!mptcp_send_head(sk
))
3446 if (!sock_owned_by_user(sk
))
3447 __mptcp_subflow_push_pending(sk
, ssk
, false);
3449 __set_bit(MPTCP_PUSH_PENDING
, &mptcp_sk(sk
)->cb_flags
);
3452 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3453 BIT(MPTCP_RETRANSMIT) | \
3454 BIT(MPTCP_FLUSH_JOIN_LIST))
3456 /* processes deferred events and flush wmem */
3457 static void mptcp_release_cb(struct sock
*sk
)
3458 __must_hold(&sk
->sk_lock
.slock
)
3460 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3463 unsigned long flags
= (msk
->cb_flags
& MPTCP_FLAGS_PROCESS_CTX_NEED
);
3464 struct list_head join_list
;
3469 INIT_LIST_HEAD(&join_list
);
3470 list_splice_init(&msk
->join_list
, &join_list
);
3472 /* the following actions acquire the subflow socket lock
3474 * 1) can't be invoked in atomic scope
3475 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3476 * datapath acquires the msk socket spinlock while helding
3477 * the subflow socket lock
3479 msk
->cb_flags
&= ~flags
;
3480 spin_unlock_bh(&sk
->sk_lock
.slock
);
3482 if (flags
& BIT(MPTCP_FLUSH_JOIN_LIST
))
3483 __mptcp_flush_join_list(sk
, &join_list
);
3484 if (flags
& BIT(MPTCP_PUSH_PENDING
))
3485 __mptcp_push_pending(sk
, 0);
3486 if (flags
& BIT(MPTCP_RETRANSMIT
))
3487 __mptcp_retrans(sk
);
3490 spin_lock_bh(&sk
->sk_lock
.slock
);
3493 if (__test_and_clear_bit(MPTCP_CLEAN_UNA
, &msk
->cb_flags
))
3494 __mptcp_clean_una_wakeup(sk
);
3495 if (unlikely(msk
->cb_flags
)) {
3496 /* be sure to sync the msk state before taking actions
3497 * depending on sk_state (MPTCP_ERROR_REPORT)
3498 * On sk release avoid actions depending on the first subflow
3500 if (__test_and_clear_bit(MPTCP_SYNC_STATE
, &msk
->cb_flags
) && msk
->first
)
3501 __mptcp_sync_state(sk
, msk
->pending_state
);
3502 if (__test_and_clear_bit(MPTCP_ERROR_REPORT
, &msk
->cb_flags
))
3503 __mptcp_error_report(sk
);
3504 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF
, &msk
->cb_flags
))
3505 __mptcp_sync_sndbuf(sk
);
3508 __mptcp_update_rmem(sk
);
3511 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3512 * TCP can't schedule delack timer before the subflow is fully established.
3513 * MPTCP uses the delack timer to do 3rd ack retransmissions
3515 static void schedule_3rdack_retransmission(struct sock
*ssk
)
3517 struct inet_connection_sock
*icsk
= inet_csk(ssk
);
3518 struct tcp_sock
*tp
= tcp_sk(ssk
);
3519 unsigned long timeout
;
3521 if (READ_ONCE(mptcp_subflow_ctx(ssk
)->fully_established
))
3524 /* reschedule with a timeout above RTT, as we must look only for drop */
3526 timeout
= usecs_to_jiffies(tp
->srtt_us
>> (3 - 1));
3528 timeout
= TCP_TIMEOUT_INIT
;
3531 WARN_ON_ONCE(icsk
->icsk_ack
.pending
& ICSK_ACK_TIMER
);
3532 smp_store_release(&icsk
->icsk_ack
.pending
,
3533 icsk
->icsk_ack
.pending
| ICSK_ACK_SCHED
| ICSK_ACK_TIMER
);
3534 icsk
->icsk_ack
.timeout
= timeout
;
3535 sk_reset_timer(ssk
, &icsk
->icsk_delack_timer
, timeout
);
3538 void mptcp_subflow_process_delegated(struct sock
*ssk
, long status
)
3540 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
3541 struct sock
*sk
= subflow
->conn
;
3543 if (status
& BIT(MPTCP_DELEGATE_SEND
)) {
3544 mptcp_data_lock(sk
);
3545 if (!sock_owned_by_user(sk
))
3546 __mptcp_subflow_push_pending(sk
, ssk
, true);
3548 __set_bit(MPTCP_PUSH_PENDING
, &mptcp_sk(sk
)->cb_flags
);
3549 mptcp_data_unlock(sk
);
3551 if (status
& BIT(MPTCP_DELEGATE_SNDBUF
)) {
3552 mptcp_data_lock(sk
);
3553 if (!sock_owned_by_user(sk
))
3554 __mptcp_sync_sndbuf(sk
);
3556 __set_bit(MPTCP_SYNC_SNDBUF
, &mptcp_sk(sk
)->cb_flags
);
3557 mptcp_data_unlock(sk
);
3559 if (status
& BIT(MPTCP_DELEGATE_ACK
))
3560 schedule_3rdack_retransmission(ssk
);
3563 static int mptcp_hash(struct sock
*sk
)
3565 /* should never be called,
3566 * we hash the TCP subflows not the MPTCP socket
3572 static void mptcp_unhash(struct sock
*sk
)
3574 /* called from sk_common_release(), but nothing to do here */
3577 static int mptcp_get_port(struct sock
*sk
, unsigned short snum
)
3579 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3581 pr_debug("msk=%p, ssk=%p\n", msk
, msk
->first
);
3582 if (WARN_ON_ONCE(!msk
->first
))
3585 return inet_csk_get_port(msk
->first
, snum
);
3588 void mptcp_finish_connect(struct sock
*ssk
)
3590 struct mptcp_subflow_context
*subflow
;
3591 struct mptcp_sock
*msk
;
3594 subflow
= mptcp_subflow_ctx(ssk
);
3598 pr_debug("msk=%p, token=%u\n", sk
, subflow
->token
);
3600 subflow
->map_seq
= subflow
->iasn
;
3601 subflow
->map_subflow_seq
= 1;
3603 /* the socket is not connected yet, no msk/subflow ops can access/race
3604 * accessing the field below
3606 WRITE_ONCE(msk
->local_key
, subflow
->local_key
);
3608 mptcp_pm_new_connection(msk
, ssk
, 0);
3611 void mptcp_sock_graft(struct sock
*sk
, struct socket
*parent
)
3613 write_lock_bh(&sk
->sk_callback_lock
);
3614 rcu_assign_pointer(sk
->sk_wq
, &parent
->wq
);
3615 sk_set_socket(sk
, parent
);
3616 sk
->sk_uid
= SOCK_INODE(parent
)->i_uid
;
3617 write_unlock_bh(&sk
->sk_callback_lock
);
3620 bool mptcp_finish_join(struct sock
*ssk
)
3622 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
3623 struct mptcp_sock
*msk
= mptcp_sk(subflow
->conn
);
3624 struct sock
*parent
= (void *)msk
;
3627 pr_debug("msk=%p, subflow=%p\n", msk
, subflow
);
3629 /* mptcp socket already closing? */
3630 if (!mptcp_is_fully_established(parent
)) {
3631 subflow
->reset_reason
= MPTCP_RST_EMPTCP
;
3635 /* active subflow, already present inside the conn_list */
3636 if (!list_empty(&subflow
->node
)) {
3637 mptcp_subflow_joined(msk
, ssk
);
3638 mptcp_propagate_sndbuf(parent
, ssk
);
3642 if (!mptcp_pm_allow_new_subflow(msk
))
3643 goto err_prohibited
;
3645 /* If we can't acquire msk socket lock here, let the release callback
3648 mptcp_data_lock(parent
);
3649 if (!sock_owned_by_user(parent
)) {
3650 ret
= __mptcp_finish_join(msk
, ssk
);
3653 list_add_tail(&subflow
->node
, &msk
->conn_list
);
3657 list_add_tail(&subflow
->node
, &msk
->join_list
);
3658 __set_bit(MPTCP_FLUSH_JOIN_LIST
, &msk
->cb_flags
);
3660 mptcp_data_unlock(parent
);
3664 subflow
->reset_reason
= MPTCP_RST_EPROHIBIT
;
3671 static void mptcp_shutdown(struct sock
*sk
, int how
)
3673 pr_debug("sk=%p, how=%d\n", sk
, how
);
3675 if ((how
& SEND_SHUTDOWN
) && mptcp_close_state(sk
))
3676 __mptcp_wr_shutdown(sk
);
3679 static int mptcp_forward_alloc_get(const struct sock
*sk
)
3681 return READ_ONCE(sk
->sk_forward_alloc
) +
3682 READ_ONCE(mptcp_sk(sk
)->rmem_fwd_alloc
);
3685 static int mptcp_ioctl_outq(const struct mptcp_sock
*msk
, u64 v
)
3687 const struct sock
*sk
= (void *)msk
;
3690 if (sk
->sk_state
== TCP_LISTEN
)
3693 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
3696 delta
= msk
->write_seq
- v
;
3697 if (__mptcp_check_fallback(msk
) && msk
->first
) {
3698 struct tcp_sock
*tp
= tcp_sk(msk
->first
);
3700 /* the first subflow is disconnected after close - see
3701 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3702 * so ignore that status, too.
3704 if (!((1 << msk
->first
->sk_state
) &
3705 (TCPF_SYN_SENT
| TCPF_SYN_RECV
| TCPF_CLOSE
)))
3706 delta
+= READ_ONCE(tp
->write_seq
) - tp
->snd_una
;
3708 if (delta
> INT_MAX
)
3714 static int mptcp_ioctl(struct sock
*sk
, int cmd
, int *karg
)
3716 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3721 if (sk
->sk_state
== TCP_LISTEN
)
3725 __mptcp_move_skbs(msk
);
3726 *karg
= mptcp_inq_hint(sk
);
3730 slow
= lock_sock_fast(sk
);
3731 *karg
= mptcp_ioctl_outq(msk
, READ_ONCE(msk
->snd_una
));
3732 unlock_sock_fast(sk
, slow
);
3735 slow
= lock_sock_fast(sk
);
3736 *karg
= mptcp_ioctl_outq(msk
, msk
->snd_nxt
);
3737 unlock_sock_fast(sk
, slow
);
3740 return -ENOIOCTLCMD
;
3746 static int mptcp_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
3748 struct mptcp_subflow_context
*subflow
;
3749 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3753 ssk
= __mptcp_nmpc_sk(msk
);
3755 return PTR_ERR(ssk
);
3757 mptcp_set_state(sk
, TCP_SYN_SENT
);
3758 subflow
= mptcp_subflow_ctx(ssk
);
3759 #ifdef CONFIG_TCP_MD5SIG
3760 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3763 if (rcu_access_pointer(tcp_sk(ssk
)->md5sig_info
))
3764 mptcp_subflow_early_fallback(msk
, subflow
);
3766 if (subflow
->request_mptcp
) {
3767 if (mptcp_active_should_disable(sk
)) {
3768 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_MPCAPABLEACTIVEDISABLED
);
3769 mptcp_subflow_early_fallback(msk
, subflow
);
3770 } else if (mptcp_token_new_connect(ssk
) < 0) {
3771 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_TOKENFALLBACKINIT
);
3772 mptcp_subflow_early_fallback(msk
, subflow
);
3776 WRITE_ONCE(msk
->write_seq
, subflow
->idsn
);
3777 WRITE_ONCE(msk
->snd_nxt
, subflow
->idsn
);
3778 WRITE_ONCE(msk
->snd_una
, subflow
->idsn
);
3779 if (likely(!__mptcp_check_fallback(msk
)))
3780 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_MPCAPABLEACTIVE
);
3782 /* if reaching here via the fastopen/sendmsg path, the caller already
3783 * acquired the subflow socket lock, too.
3785 if (!msk
->fastopening
)
3788 /* the following mirrors closely a very small chunk of code from
3789 * __inet_stream_connect()
3791 if (ssk
->sk_state
!= TCP_CLOSE
)
3794 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk
)) {
3795 err
= ssk
->sk_prot
->pre_connect(ssk
, uaddr
, addr_len
);
3800 err
= ssk
->sk_prot
->connect(ssk
, uaddr
, addr_len
);
3804 inet_assign_bit(DEFER_CONNECT
, sk
, inet_test_bit(DEFER_CONNECT
, ssk
));
3807 if (!msk
->fastopening
)
3810 /* on successful connect, the msk state will be moved to established by
3811 * subflow_finish_connect()
3813 if (unlikely(err
)) {
3814 /* avoid leaving a dangling token in an unconnected socket */
3815 mptcp_token_destroy(msk
);
3816 mptcp_set_state(sk
, TCP_CLOSE
);
3820 mptcp_copy_inaddrs(sk
, ssk
);
3824 static struct proto mptcp_prot
= {
3826 .owner
= THIS_MODULE
,
3827 .init
= mptcp_init_sock
,
3828 .connect
= mptcp_connect
,
3829 .disconnect
= mptcp_disconnect
,
3830 .close
= mptcp_close
,
3831 .setsockopt
= mptcp_setsockopt
,
3832 .getsockopt
= mptcp_getsockopt
,
3833 .shutdown
= mptcp_shutdown
,
3834 .destroy
= mptcp_destroy
,
3835 .sendmsg
= mptcp_sendmsg
,
3836 .ioctl
= mptcp_ioctl
,
3837 .recvmsg
= mptcp_recvmsg
,
3838 .release_cb
= mptcp_release_cb
,
3840 .unhash
= mptcp_unhash
,
3841 .get_port
= mptcp_get_port
,
3842 .forward_alloc_get
= mptcp_forward_alloc_get
,
3843 .stream_memory_free
= mptcp_stream_memory_free
,
3844 .sockets_allocated
= &mptcp_sockets_allocated
,
3846 .memory_allocated
= &tcp_memory_allocated
,
3847 .per_cpu_fw_alloc
= &tcp_memory_per_cpu_fw_alloc
,
3849 .memory_pressure
= &tcp_memory_pressure
,
3850 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_wmem
),
3851 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_rmem
),
3852 .sysctl_mem
= sysctl_tcp_mem
,
3853 .obj_size
= sizeof(struct mptcp_sock
),
3854 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
3855 .no_autobind
= true,
3858 static int mptcp_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
3860 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3861 struct sock
*ssk
, *sk
= sock
->sk
;
3865 ssk
= __mptcp_nmpc_sk(msk
);
3871 if (sk
->sk_family
== AF_INET
)
3872 err
= inet_bind_sk(ssk
, uaddr
, addr_len
);
3873 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3874 else if (sk
->sk_family
== AF_INET6
)
3875 err
= inet6_bind_sk(ssk
, uaddr
, addr_len
);
3878 mptcp_copy_inaddrs(sk
, ssk
);
3885 static int mptcp_listen(struct socket
*sock
, int backlog
)
3887 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3888 struct sock
*sk
= sock
->sk
;
3892 pr_debug("msk=%p\n", msk
);
3897 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_STREAM
)
3900 ssk
= __mptcp_nmpc_sk(msk
);
3906 mptcp_set_state(sk
, TCP_LISTEN
);
3907 sock_set_flag(sk
, SOCK_RCU_FREE
);
3910 err
= __inet_listen_sk(ssk
, backlog
);
3912 mptcp_set_state(sk
, inet_sk_state_load(ssk
));
3915 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
3916 mptcp_copy_inaddrs(sk
, ssk
);
3917 mptcp_event_pm_listener(ssk
, MPTCP_EVENT_LISTENER_CREATED
);
3925 static int mptcp_stream_accept(struct socket
*sock
, struct socket
*newsock
,
3926 struct proto_accept_arg
*arg
)
3928 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3929 struct sock
*ssk
, *newsk
;
3931 pr_debug("msk=%p\n", msk
);
3933 /* Buggy applications can call accept on socket states other then LISTEN
3934 * but no need to allocate the first subflow just to error out.
3936 ssk
= READ_ONCE(msk
->first
);
3940 pr_debug("ssk=%p, listener=%p\n", ssk
, mptcp_subflow_ctx(ssk
));
3941 newsk
= inet_csk_accept(ssk
, arg
);
3945 pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk
, sk_is_mptcp(newsk
));
3946 if (sk_is_mptcp(newsk
)) {
3947 struct mptcp_subflow_context
*subflow
;
3948 struct sock
*new_mptcp_sock
;
3950 subflow
= mptcp_subflow_ctx(newsk
);
3951 new_mptcp_sock
= subflow
->conn
;
3953 /* is_mptcp should be false if subflow->conn is missing, see
3954 * subflow_syn_recv_sock()
3956 if (WARN_ON_ONCE(!new_mptcp_sock
)) {
3957 tcp_sk(newsk
)->is_mptcp
= 0;
3961 newsk
= new_mptcp_sock
;
3962 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_MPCAPABLEPASSIVEACK
);
3964 newsk
->sk_kern_sock
= arg
->kern
;
3966 __inet_accept(sock
, newsock
, newsk
);
3968 set_bit(SOCK_CUSTOM_SOCKOPT
, &newsock
->flags
);
3969 msk
= mptcp_sk(newsk
);
3970 msk
->in_accept_queue
= 0;
3972 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
3973 * This is needed so NOSPACE flag can be set from tcp stack.
3975 mptcp_for_each_subflow(msk
, subflow
) {
3976 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3978 if (!ssk
->sk_socket
)
3979 mptcp_sock_graft(ssk
, newsock
);
3982 /* Do late cleanup for the first subflow as necessary. Also
3983 * deal with bad peers not doing a complete shutdown.
3985 if (unlikely(inet_sk_state_load(msk
->first
) == TCP_CLOSE
)) {
3986 __mptcp_close_ssk(newsk
, msk
->first
,
3987 mptcp_subflow_ctx(msk
->first
), 0);
3988 if (unlikely(list_is_singular(&msk
->conn_list
)))
3989 mptcp_set_state(newsk
, TCP_CLOSE
);
3993 newsk
->sk_kern_sock
= arg
->kern
;
3995 __inet_accept(sock
, newsock
, newsk
);
3996 /* we are being invoked after accepting a non-mp-capable
3997 * flow: sk is a tcp_sk, not an mptcp one.
3999 * Hand the socket over to tcp so all further socket ops
4002 WRITE_ONCE(newsock
->sk
->sk_socket
->ops
,
4003 mptcp_fallback_tcp_ops(newsock
->sk
));
4005 release_sock(newsk
);
4010 static __poll_t
mptcp_check_writeable(struct mptcp_sock
*msk
)
4012 struct sock
*sk
= (struct sock
*)msk
;
4014 if (__mptcp_stream_is_writeable(sk
, 1))
4015 return EPOLLOUT
| EPOLLWRNORM
;
4017 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
4018 smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
4019 if (__mptcp_stream_is_writeable(sk
, 1))
4020 return EPOLLOUT
| EPOLLWRNORM
;
4025 static __poll_t
mptcp_poll(struct file
*file
, struct socket
*sock
,
4026 struct poll_table_struct
*wait
)
4028 struct sock
*sk
= sock
->sk
;
4029 struct mptcp_sock
*msk
;
4035 sock_poll_wait(file
, sock
, wait
);
4037 state
= inet_sk_state_load(sk
);
4038 pr_debug("msk=%p state=%d flags=%lx\n", msk
, state
, msk
->flags
);
4039 if (state
== TCP_LISTEN
) {
4040 struct sock
*ssk
= READ_ONCE(msk
->first
);
4042 if (WARN_ON_ONCE(!ssk
))
4045 return inet_csk_listen_poll(ssk
);
4048 shutdown
= READ_ONCE(sk
->sk_shutdown
);
4049 if (shutdown
== SHUTDOWN_MASK
|| state
== TCP_CLOSE
)
4051 if (shutdown
& RCV_SHUTDOWN
)
4052 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
4054 if (state
!= TCP_SYN_SENT
&& state
!= TCP_SYN_RECV
) {
4055 mask
|= mptcp_check_readable(sk
);
4056 if (shutdown
& SEND_SHUTDOWN
)
4057 mask
|= EPOLLOUT
| EPOLLWRNORM
;
4059 mask
|= mptcp_check_writeable(msk
);
4060 } else if (state
== TCP_SYN_SENT
&&
4061 inet_test_bit(DEFER_CONNECT
, sk
)) {
4062 /* cf tcp_poll() note about TFO */
4063 mask
|= EPOLLOUT
| EPOLLWRNORM
;
4066 /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
4068 if (READ_ONCE(sk
->sk_err
))
4074 static const struct proto_ops mptcp_stream_ops
= {
4076 .owner
= THIS_MODULE
,
4077 .release
= inet_release
,
4079 .connect
= inet_stream_connect
,
4080 .socketpair
= sock_no_socketpair
,
4081 .accept
= mptcp_stream_accept
,
4082 .getname
= inet_getname
,
4084 .ioctl
= inet_ioctl
,
4085 .gettstamp
= sock_gettstamp
,
4086 .listen
= mptcp_listen
,
4087 .shutdown
= inet_shutdown
,
4088 .setsockopt
= sock_common_setsockopt
,
4089 .getsockopt
= sock_common_getsockopt
,
4090 .sendmsg
= inet_sendmsg
,
4091 .recvmsg
= inet_recvmsg
,
4092 .mmap
= sock_no_mmap
,
4093 .set_rcvlowat
= mptcp_set_rcvlowat
,
4096 static struct inet_protosw mptcp_protosw
= {
4097 .type
= SOCK_STREAM
,
4098 .protocol
= IPPROTO_MPTCP
,
4099 .prot
= &mptcp_prot
,
4100 .ops
= &mptcp_stream_ops
,
4101 .flags
= INET_PROTOSW_ICSK
,
4104 static int mptcp_napi_poll(struct napi_struct
*napi
, int budget
)
4106 struct mptcp_delegated_action
*delegated
;
4107 struct mptcp_subflow_context
*subflow
;
4110 delegated
= container_of(napi
, struct mptcp_delegated_action
, napi
);
4111 while ((subflow
= mptcp_subflow_delegated_next(delegated
)) != NULL
) {
4112 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
4114 bh_lock_sock_nested(ssk
);
4115 if (!sock_owned_by_user(ssk
)) {
4116 mptcp_subflow_process_delegated(ssk
, xchg(&subflow
->delegated_status
, 0));
4118 /* tcp_release_cb_override already processed
4119 * the action or will do at next release_sock().
4120 * In both case must dequeue the subflow here - on the same
4121 * CPU that scheduled it.
4124 clear_bit(MPTCP_DELEGATE_SCHEDULED
, &subflow
->delegated_status
);
4126 bh_unlock_sock(ssk
);
4129 if (++work_done
== budget
)
4133 /* always provide a 0 'work_done' argument, so that napi_complete_done
4134 * will not try accessing the NULL napi->dev ptr
4136 napi_complete_done(napi
, 0);
4140 void __init
mptcp_proto_init(void)
4142 struct mptcp_delegated_action
*delegated
;
4145 mptcp_prot
.h
.hashinfo
= tcp_prot
.h
.hashinfo
;
4147 if (percpu_counter_init(&mptcp_sockets_allocated
, 0, GFP_KERNEL
))
4148 panic("Failed to allocate MPTCP pcpu counter\n");
4150 init_dummy_netdev(&mptcp_napi_dev
);
4151 for_each_possible_cpu(cpu
) {
4152 delegated
= per_cpu_ptr(&mptcp_delegated_actions
, cpu
);
4153 INIT_LIST_HEAD(&delegated
->head
);
4154 netif_napi_add_tx(&mptcp_napi_dev
, &delegated
->napi
,
4156 napi_enable(&delegated
->napi
);
4159 mptcp_subflow_init();
4164 if (proto_register(&mptcp_prot
, 1) != 0)
4165 panic("Failed to register MPTCP proto.\n");
4167 inet_register_protosw(&mptcp_protosw
);
4169 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb
) > sizeof_field(struct sk_buff
, cb
));
4172 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4173 static const struct proto_ops mptcp_v6_stream_ops
= {
4175 .owner
= THIS_MODULE
,
4176 .release
= inet6_release
,
4178 .connect
= inet_stream_connect
,
4179 .socketpair
= sock_no_socketpair
,
4180 .accept
= mptcp_stream_accept
,
4181 .getname
= inet6_getname
,
4183 .ioctl
= inet6_ioctl
,
4184 .gettstamp
= sock_gettstamp
,
4185 .listen
= mptcp_listen
,
4186 .shutdown
= inet_shutdown
,
4187 .setsockopt
= sock_common_setsockopt
,
4188 .getsockopt
= sock_common_getsockopt
,
4189 .sendmsg
= inet6_sendmsg
,
4190 .recvmsg
= inet6_recvmsg
,
4191 .mmap
= sock_no_mmap
,
4192 #ifdef CONFIG_COMPAT
4193 .compat_ioctl
= inet6_compat_ioctl
,
4195 .set_rcvlowat
= mptcp_set_rcvlowat
,
4198 static struct proto mptcp_v6_prot
;
4200 static struct inet_protosw mptcp_v6_protosw
= {
4201 .type
= SOCK_STREAM
,
4202 .protocol
= IPPROTO_MPTCP
,
4203 .prot
= &mptcp_v6_prot
,
4204 .ops
= &mptcp_v6_stream_ops
,
4205 .flags
= INET_PROTOSW_ICSK
,
4208 int __init
mptcp_proto_v6_init(void)
4212 mptcp_v6_prot
= mptcp_prot
;
4213 strscpy(mptcp_v6_prot
.name
, "MPTCPv6", sizeof(mptcp_v6_prot
.name
));
4214 mptcp_v6_prot
.slab
= NULL
;
4215 mptcp_v6_prot
.obj_size
= sizeof(struct mptcp6_sock
);
4216 mptcp_v6_prot
.ipv6_pinfo_offset
= offsetof(struct mptcp6_sock
, np
);
4218 err
= proto_register(&mptcp_v6_prot
, 1);
4222 err
= inet6_register_protosw(&mptcp_v6_protosw
);
4224 proto_unregister(&mptcp_v6_prot
);