1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
21 #include <net/genetlink.h>
23 int sysctl_tcp_nometrics_save __read_mostly
;
25 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
26 const struct inetpeer_addr
*daddr
,
27 struct net
*net
, unsigned int hash
);
29 struct tcp_fastopen_metrics
{
31 u16 syn_loss
:10; /* Recurring Fast Open SYN losses */
32 unsigned long last_syn_loss
; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie
;
36 struct tcp_metrics_block
{
37 struct tcp_metrics_block __rcu
*tcpm_next
;
38 struct inetpeer_addr tcpm_saddr
;
39 struct inetpeer_addr tcpm_daddr
;
40 unsigned long tcpm_stamp
;
44 u32 tcpm_vals
[TCP_METRIC_MAX
+ 1];
45 struct tcp_fastopen_metrics tcpm_fastopen
;
47 struct rcu_head rcu_head
;
50 static bool tcp_metric_locked(struct tcp_metrics_block
*tm
,
51 enum tcp_metric_index idx
)
53 return tm
->tcpm_lock
& (1 << idx
);
56 static u32
tcp_metric_get(struct tcp_metrics_block
*tm
,
57 enum tcp_metric_index idx
)
59 return tm
->tcpm_vals
[idx
];
62 static u32
tcp_metric_get_jiffies(struct tcp_metrics_block
*tm
,
63 enum tcp_metric_index idx
)
65 return msecs_to_jiffies(tm
->tcpm_vals
[idx
]);
68 static void tcp_metric_set(struct tcp_metrics_block
*tm
,
69 enum tcp_metric_index idx
,
72 tm
->tcpm_vals
[idx
] = val
;
75 static void tcp_metric_set_msecs(struct tcp_metrics_block
*tm
,
76 enum tcp_metric_index idx
,
79 tm
->tcpm_vals
[idx
] = jiffies_to_msecs(val
);
82 static bool addr_same(const struct inetpeer_addr
*a
,
83 const struct inetpeer_addr
*b
)
85 const struct in6_addr
*a6
, *b6
;
87 if (a
->family
!= b
->family
)
89 if (a
->family
== AF_INET
)
90 return a
->addr
.a4
== b
->addr
.a4
;
92 a6
= (const struct in6_addr
*) &a
->addr
.a6
[0];
93 b6
= (const struct in6_addr
*) &b
->addr
.a6
[0];
95 return ipv6_addr_equal(a6
, b6
);
98 struct tcpm_hash_bucket
{
99 struct tcp_metrics_block __rcu
*chain
;
102 static DEFINE_SPINLOCK(tcp_metrics_lock
);
104 static void tcpm_suck_dst(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
,
109 tm
->tcpm_stamp
= jiffies
;
112 if (dst_metric_locked(dst
, RTAX_RTT
))
113 val
|= 1 << TCP_METRIC_RTT
;
114 if (dst_metric_locked(dst
, RTAX_RTTVAR
))
115 val
|= 1 << TCP_METRIC_RTTVAR
;
116 if (dst_metric_locked(dst
, RTAX_SSTHRESH
))
117 val
|= 1 << TCP_METRIC_SSTHRESH
;
118 if (dst_metric_locked(dst
, RTAX_CWND
))
119 val
|= 1 << TCP_METRIC_CWND
;
120 if (dst_metric_locked(dst
, RTAX_REORDERING
))
121 val
|= 1 << TCP_METRIC_REORDERING
;
124 tm
->tcpm_vals
[TCP_METRIC_RTT
] = dst_metric_raw(dst
, RTAX_RTT
);
125 tm
->tcpm_vals
[TCP_METRIC_RTTVAR
] = dst_metric_raw(dst
, RTAX_RTTVAR
);
126 tm
->tcpm_vals
[TCP_METRIC_SSTHRESH
] = dst_metric_raw(dst
, RTAX_SSTHRESH
);
127 tm
->tcpm_vals
[TCP_METRIC_CWND
] = dst_metric_raw(dst
, RTAX_CWND
);
128 tm
->tcpm_vals
[TCP_METRIC_REORDERING
] = dst_metric_raw(dst
, RTAX_REORDERING
);
130 tm
->tcpm_ts_stamp
= 0;
131 if (fastopen_clear
) {
132 tm
->tcpm_fastopen
.mss
= 0;
133 tm
->tcpm_fastopen
.syn_loss
= 0;
134 tm
->tcpm_fastopen
.cookie
.len
= 0;
138 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
140 static void tcpm_check_stamp(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
)
142 if (tm
&& unlikely(time_after(jiffies
, tm
->tcpm_stamp
+ TCP_METRICS_TIMEOUT
)))
143 tcpm_suck_dst(tm
, dst
, false);
146 #define TCP_METRICS_RECLAIM_DEPTH 5
147 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
149 static struct tcp_metrics_block
*tcpm_new(struct dst_entry
*dst
,
150 struct inetpeer_addr
*saddr
,
151 struct inetpeer_addr
*daddr
,
154 struct tcp_metrics_block
*tm
;
156 bool reclaim
= false;
158 spin_lock_bh(&tcp_metrics_lock
);
159 net
= dev_net(dst
->dev
);
161 /* While waiting for the spin-lock the cache might have been populated
162 * with this entry and so we have to check again.
164 tm
= __tcp_get_metrics(saddr
, daddr
, net
, hash
);
165 if (tm
== TCP_METRICS_RECLAIM_PTR
) {
170 tcpm_check_stamp(tm
, dst
);
174 if (unlikely(reclaim
)) {
175 struct tcp_metrics_block
*oldest
;
177 oldest
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
);
178 for (tm
= rcu_dereference(oldest
->tcpm_next
); tm
;
179 tm
= rcu_dereference(tm
->tcpm_next
)) {
180 if (time_before(tm
->tcpm_stamp
, oldest
->tcpm_stamp
))
185 tm
= kmalloc(sizeof(*tm
), GFP_ATOMIC
);
189 tm
->tcpm_saddr
= *saddr
;
190 tm
->tcpm_daddr
= *daddr
;
192 tcpm_suck_dst(tm
, dst
, true);
194 if (likely(!reclaim
)) {
195 tm
->tcpm_next
= net
->ipv4
.tcp_metrics_hash
[hash
].chain
;
196 rcu_assign_pointer(net
->ipv4
.tcp_metrics_hash
[hash
].chain
, tm
);
200 spin_unlock_bh(&tcp_metrics_lock
);
204 static struct tcp_metrics_block
*tcp_get_encode(struct tcp_metrics_block
*tm
, int depth
)
208 if (depth
> TCP_METRICS_RECLAIM_DEPTH
)
209 return TCP_METRICS_RECLAIM_PTR
;
213 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
214 const struct inetpeer_addr
*daddr
,
215 struct net
*net
, unsigned int hash
)
217 struct tcp_metrics_block
*tm
;
220 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
221 tm
= rcu_dereference(tm
->tcpm_next
)) {
222 if (addr_same(&tm
->tcpm_saddr
, saddr
) &&
223 addr_same(&tm
->tcpm_daddr
, daddr
))
227 return tcp_get_encode(tm
, depth
);
230 static struct tcp_metrics_block
*__tcp_get_metrics_req(struct request_sock
*req
,
231 struct dst_entry
*dst
)
233 struct tcp_metrics_block
*tm
;
234 struct inetpeer_addr saddr
, daddr
;
238 saddr
.family
= req
->rsk_ops
->family
;
239 daddr
.family
= req
->rsk_ops
->family
;
240 switch (daddr
.family
) {
242 saddr
.addr
.a4
= inet_rsk(req
)->ir_loc_addr
;
243 daddr
.addr
.a4
= inet_rsk(req
)->ir_rmt_addr
;
244 hash
= (__force
unsigned int) daddr
.addr
.a4
;
246 #if IS_ENABLED(CONFIG_IPV6)
248 *(struct in6_addr
*)saddr
.addr
.a6
= inet_rsk(req
)->ir_v6_loc_addr
;
249 *(struct in6_addr
*)daddr
.addr
.a6
= inet_rsk(req
)->ir_v6_rmt_addr
;
250 hash
= ipv6_addr_hash(&inet_rsk(req
)->ir_v6_rmt_addr
);
257 net
= dev_net(dst
->dev
);
258 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
260 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
261 tm
= rcu_dereference(tm
->tcpm_next
)) {
262 if (addr_same(&tm
->tcpm_saddr
, &saddr
) &&
263 addr_same(&tm
->tcpm_daddr
, &daddr
))
266 tcpm_check_stamp(tm
, dst
);
270 static struct tcp_metrics_block
*__tcp_get_metrics_tw(struct inet_timewait_sock
*tw
)
272 struct tcp_metrics_block
*tm
;
273 struct inetpeer_addr saddr
, daddr
;
277 if (tw
->tw_family
== AF_INET
) {
278 saddr
.family
= AF_INET
;
279 saddr
.addr
.a4
= tw
->tw_rcv_saddr
;
280 daddr
.family
= AF_INET
;
281 daddr
.addr
.a4
= tw
->tw_daddr
;
282 hash
= (__force
unsigned int) daddr
.addr
.a4
;
284 #if IS_ENABLED(CONFIG_IPV6)
285 else if (tw
->tw_family
== AF_INET6
) {
286 if (ipv6_addr_v4mapped(&tw
->tw_v6_daddr
)) {
287 saddr
.family
= AF_INET
;
288 saddr
.addr
.a4
= tw
->tw_rcv_saddr
;
289 daddr
.family
= AF_INET
;
290 daddr
.addr
.a4
= tw
->tw_daddr
;
291 hash
= (__force
unsigned int) daddr
.addr
.a4
;
293 saddr
.family
= AF_INET6
;
294 *(struct in6_addr
*)saddr
.addr
.a6
= tw
->tw_v6_rcv_saddr
;
295 daddr
.family
= AF_INET6
;
296 *(struct in6_addr
*)daddr
.addr
.a6
= tw
->tw_v6_daddr
;
297 hash
= ipv6_addr_hash(&tw
->tw_v6_daddr
);
305 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
307 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
308 tm
= rcu_dereference(tm
->tcpm_next
)) {
309 if (addr_same(&tm
->tcpm_saddr
, &saddr
) &&
310 addr_same(&tm
->tcpm_daddr
, &daddr
))
316 static struct tcp_metrics_block
*tcp_get_metrics(struct sock
*sk
,
317 struct dst_entry
*dst
,
320 struct tcp_metrics_block
*tm
;
321 struct inetpeer_addr saddr
, daddr
;
325 if (sk
->sk_family
== AF_INET
) {
326 saddr
.family
= AF_INET
;
327 saddr
.addr
.a4
= inet_sk(sk
)->inet_saddr
;
328 daddr
.family
= AF_INET
;
329 daddr
.addr
.a4
= inet_sk(sk
)->inet_daddr
;
330 hash
= (__force
unsigned int) daddr
.addr
.a4
;
332 #if IS_ENABLED(CONFIG_IPV6)
333 else if (sk
->sk_family
== AF_INET6
) {
334 if (ipv6_addr_v4mapped(&sk
->sk_v6_daddr
)) {
335 saddr
.family
= AF_INET
;
336 saddr
.addr
.a4
= inet_sk(sk
)->inet_saddr
;
337 daddr
.family
= AF_INET
;
338 daddr
.addr
.a4
= inet_sk(sk
)->inet_daddr
;
339 hash
= (__force
unsigned int) daddr
.addr
.a4
;
341 saddr
.family
= AF_INET6
;
342 *(struct in6_addr
*)saddr
.addr
.a6
= sk
->sk_v6_rcv_saddr
;
343 daddr
.family
= AF_INET6
;
344 *(struct in6_addr
*)daddr
.addr
.a6
= sk
->sk_v6_daddr
;
345 hash
= ipv6_addr_hash(&sk
->sk_v6_daddr
);
352 net
= dev_net(dst
->dev
);
353 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
355 tm
= __tcp_get_metrics(&saddr
, &daddr
, net
, hash
);
356 if (tm
== TCP_METRICS_RECLAIM_PTR
)
359 tm
= tcpm_new(dst
, &saddr
, &daddr
, hash
);
361 tcpm_check_stamp(tm
, dst
);
366 /* Save metrics learned by this TCP session. This function is called
367 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
368 * or goes from LAST-ACK to CLOSE.
370 void tcp_update_metrics(struct sock
*sk
)
372 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
373 struct dst_entry
*dst
= __sk_dst_get(sk
);
374 struct tcp_sock
*tp
= tcp_sk(sk
);
375 struct tcp_metrics_block
*tm
;
380 if (sysctl_tcp_nometrics_save
|| !dst
)
383 if (dst
->flags
& DST_HOST
)
387 if (icsk
->icsk_backoff
|| !tp
->srtt
) {
388 /* This session failed to estimate rtt. Why?
389 * Probably, no packets returned in time. Reset our
392 tm
= tcp_get_metrics(sk
, dst
, false);
393 if (tm
&& !tcp_metric_locked(tm
, TCP_METRIC_RTT
))
394 tcp_metric_set(tm
, TCP_METRIC_RTT
, 0);
397 tm
= tcp_get_metrics(sk
, dst
, true);
402 rtt
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTT
);
405 /* If newly calculated rtt larger than stored one, store new
406 * one. Otherwise, use EWMA. Remember, rtt overestimation is
407 * always better than underestimation.
409 if (!tcp_metric_locked(tm
, TCP_METRIC_RTT
)) {
414 tcp_metric_set_msecs(tm
, TCP_METRIC_RTT
, rtt
);
417 if (!tcp_metric_locked(tm
, TCP_METRIC_RTTVAR
)) {
423 /* Scale deviation to rttvar fixed point */
428 var
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTTVAR
);
432 var
-= (var
- m
) >> 2;
434 tcp_metric_set_msecs(tm
, TCP_METRIC_RTTVAR
, var
);
437 if (tcp_in_initial_slowstart(tp
)) {
438 /* Slow start still did not finish. */
439 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
440 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
441 if (val
&& (tp
->snd_cwnd
>> 1) > val
)
442 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
445 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
446 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
447 if (tp
->snd_cwnd
> val
)
448 tcp_metric_set(tm
, TCP_METRIC_CWND
,
451 } else if (tp
->snd_cwnd
> tp
->snd_ssthresh
&&
452 icsk
->icsk_ca_state
== TCP_CA_Open
) {
453 /* Cong. avoidance phase, cwnd is reliable. */
454 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
))
455 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
456 max(tp
->snd_cwnd
>> 1, tp
->snd_ssthresh
));
457 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
458 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
459 tcp_metric_set(tm
, TCP_METRIC_CWND
, (val
+ tp
->snd_cwnd
) >> 1);
462 /* Else slow start did not finish, cwnd is non-sense,
463 * ssthresh may be also invalid.
465 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
466 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
467 tcp_metric_set(tm
, TCP_METRIC_CWND
,
468 (val
+ tp
->snd_ssthresh
) >> 1);
470 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
471 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
472 if (val
&& tp
->snd_ssthresh
> val
)
473 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
476 if (!tcp_metric_locked(tm
, TCP_METRIC_REORDERING
)) {
477 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
478 if (val
< tp
->reordering
&&
479 tp
->reordering
!= sysctl_tcp_reordering
)
480 tcp_metric_set(tm
, TCP_METRIC_REORDERING
,
484 tm
->tcpm_stamp
= jiffies
;
489 /* Initialize metrics on socket. */
491 void tcp_init_metrics(struct sock
*sk
)
493 struct dst_entry
*dst
= __sk_dst_get(sk
);
494 struct tcp_sock
*tp
= tcp_sk(sk
);
495 struct tcp_metrics_block
*tm
;
496 u32 val
, crtt
= 0; /* cached RTT scaled by 8 */
504 tm
= tcp_get_metrics(sk
, dst
, true);
510 if (tcp_metric_locked(tm
, TCP_METRIC_CWND
))
511 tp
->snd_cwnd_clamp
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
513 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
515 tp
->snd_ssthresh
= val
;
516 if (tp
->snd_ssthresh
> tp
->snd_cwnd_clamp
)
517 tp
->snd_ssthresh
= tp
->snd_cwnd_clamp
;
519 /* ssthresh may have been reduced unnecessarily during.
520 * 3WHS. Restore it back to its initial default.
522 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
524 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
525 if (val
&& tp
->reordering
!= val
) {
526 tcp_disable_fack(tp
);
527 tcp_disable_early_retrans(tp
);
528 tp
->reordering
= val
;
531 crtt
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTT
);
534 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
535 * to seed the RTO for later data packets because SYN packets are
536 * small. Use the per-dst cached values to seed the RTO but keep
537 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
538 * Later the RTO will be updated immediately upon obtaining the first
539 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
540 * influences the first RTO but not later RTT estimation.
542 * But if RTT is not available from the SYN (due to retransmits or
543 * syn cookies) or the cache, force a conservative 3secs timeout.
545 * A bit of theory. RTT is time passed after "normal" sized packet
546 * is sent until it is ACKed. In normal circumstances sending small
547 * packets force peer to delay ACKs and calculation is correct too.
548 * The algorithm is adaptive and, provided we follow specs, it
549 * NEVER underestimate RTT. BUT! If peer tries to make some clever
550 * tricks sort of "quick acks" for time long enough to decrease RTT
551 * to low value, and then abruptly stops to do it and starts to delay
552 * ACKs, wait for troubles.
554 if (crtt
> tp
->srtt
) {
555 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
557 inet_csk(sk
)->icsk_rto
= crtt
+ max(2 * crtt
, tcp_rto_min(sk
));
558 } else if (tp
->srtt
== 0) {
559 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
560 * 3WHS. This is most likely due to retransmission,
561 * including spurious one. Reset the RTO back to 3secs
562 * from the more aggressive 1sec to avoid more spurious
565 tp
->mdev
= tp
->mdev_max
= tp
->rttvar
= TCP_TIMEOUT_FALLBACK
;
566 inet_csk(sk
)->icsk_rto
= TCP_TIMEOUT_FALLBACK
;
568 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
569 * retransmitted. In light of RFC6298 more aggressive 1sec
570 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
571 * retransmission has occurred.
573 if (tp
->total_retrans
> 1)
576 tp
->snd_cwnd
= tcp_init_cwnd(tp
, dst
);
577 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
580 bool tcp_peer_is_proven(struct request_sock
*req
, struct dst_entry
*dst
, bool paws_check
)
582 struct tcp_metrics_block
*tm
;
589 tm
= __tcp_get_metrics_req(req
, dst
);
592 (u32
)get_seconds() - tm
->tcpm_ts_stamp
< TCP_PAWS_MSL
&&
593 (s32
)(tm
->tcpm_ts
- req
->ts_recent
) > TCP_PAWS_WINDOW
)
598 if (tm
&& tcp_metric_get(tm
, TCP_METRIC_RTT
) && tm
->tcpm_ts_stamp
)
607 EXPORT_SYMBOL_GPL(tcp_peer_is_proven
);
609 void tcp_fetch_timewait_stamp(struct sock
*sk
, struct dst_entry
*dst
)
611 struct tcp_metrics_block
*tm
;
614 tm
= tcp_get_metrics(sk
, dst
, true);
616 struct tcp_sock
*tp
= tcp_sk(sk
);
618 if ((u32
)get_seconds() - tm
->tcpm_ts_stamp
<= TCP_PAWS_MSL
) {
619 tp
->rx_opt
.ts_recent_stamp
= tm
->tcpm_ts_stamp
;
620 tp
->rx_opt
.ts_recent
= tm
->tcpm_ts
;
625 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp
);
627 /* VJ's idea. Save last timestamp seen from this destination and hold
628 * it at least for normal timewait interval to use for duplicate
629 * segment detection in subsequent connections, before they enter
630 * synchronized state.
632 bool tcp_remember_stamp(struct sock
*sk
)
634 struct dst_entry
*dst
= __sk_dst_get(sk
);
638 struct tcp_metrics_block
*tm
;
641 tm
= tcp_get_metrics(sk
, dst
, true);
643 struct tcp_sock
*tp
= tcp_sk(sk
);
645 if ((s32
)(tm
->tcpm_ts
- tp
->rx_opt
.ts_recent
) <= 0 ||
646 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
647 tm
->tcpm_ts_stamp
<= (u32
)tp
->rx_opt
.ts_recent_stamp
)) {
648 tm
->tcpm_ts_stamp
= (u32
)tp
->rx_opt
.ts_recent_stamp
;
649 tm
->tcpm_ts
= tp
->rx_opt
.ts_recent
;
658 bool tcp_tw_remember_stamp(struct inet_timewait_sock
*tw
)
660 struct tcp_metrics_block
*tm
;
664 tm
= __tcp_get_metrics_tw(tw
);
666 const struct tcp_timewait_sock
*tcptw
;
667 struct sock
*sk
= (struct sock
*) tw
;
669 tcptw
= tcp_twsk(sk
);
670 if ((s32
)(tm
->tcpm_ts
- tcptw
->tw_ts_recent
) <= 0 ||
671 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
672 tm
->tcpm_ts_stamp
<= (u32
)tcptw
->tw_ts_recent_stamp
)) {
673 tm
->tcpm_ts_stamp
= (u32
)tcptw
->tw_ts_recent_stamp
;
674 tm
->tcpm_ts
= tcptw
->tw_ts_recent
;
683 static DEFINE_SEQLOCK(fastopen_seqlock
);
685 void tcp_fastopen_cache_get(struct sock
*sk
, u16
*mss
,
686 struct tcp_fastopen_cookie
*cookie
,
687 int *syn_loss
, unsigned long *last_syn_loss
)
689 struct tcp_metrics_block
*tm
;
692 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), false);
694 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
698 seq
= read_seqbegin(&fastopen_seqlock
);
701 *cookie
= tfom
->cookie
;
702 *syn_loss
= tfom
->syn_loss
;
703 *last_syn_loss
= *syn_loss
? tfom
->last_syn_loss
: 0;
704 } while (read_seqretry(&fastopen_seqlock
, seq
));
709 void tcp_fastopen_cache_set(struct sock
*sk
, u16 mss
,
710 struct tcp_fastopen_cookie
*cookie
, bool syn_lost
)
712 struct dst_entry
*dst
= __sk_dst_get(sk
);
713 struct tcp_metrics_block
*tm
;
718 tm
= tcp_get_metrics(sk
, dst
, true);
720 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
722 write_seqlock_bh(&fastopen_seqlock
);
725 if (cookie
&& cookie
->len
> 0)
726 tfom
->cookie
= *cookie
;
729 tfom
->last_syn_loss
= jiffies
;
732 write_sequnlock_bh(&fastopen_seqlock
);
737 static struct genl_family tcp_metrics_nl_family
= {
738 .id
= GENL_ID_GENERATE
,
740 .name
= TCP_METRICS_GENL_NAME
,
741 .version
= TCP_METRICS_GENL_VERSION
,
742 .maxattr
= TCP_METRICS_ATTR_MAX
,
746 static struct nla_policy tcp_metrics_nl_policy
[TCP_METRICS_ATTR_MAX
+ 1] = {
747 [TCP_METRICS_ATTR_ADDR_IPV4
] = { .type
= NLA_U32
, },
748 [TCP_METRICS_ATTR_ADDR_IPV6
] = { .type
= NLA_BINARY
,
749 .len
= sizeof(struct in6_addr
), },
750 /* Following attributes are not received for GET/DEL,
751 * we keep them for reference
754 [TCP_METRICS_ATTR_AGE
] = { .type
= NLA_MSECS
, },
755 [TCP_METRICS_ATTR_TW_TSVAL
] = { .type
= NLA_U32
, },
756 [TCP_METRICS_ATTR_TW_TS_STAMP
] = { .type
= NLA_S32
, },
757 [TCP_METRICS_ATTR_VALS
] = { .type
= NLA_NESTED
, },
758 [TCP_METRICS_ATTR_FOPEN_MSS
] = { .type
= NLA_U16
, },
759 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS
] = { .type
= NLA_U16
, },
760 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
] = { .type
= NLA_MSECS
, },
761 [TCP_METRICS_ATTR_FOPEN_COOKIE
] = { .type
= NLA_BINARY
,
762 .len
= TCP_FASTOPEN_COOKIE_MAX
, },
766 /* Add attributes, caller cancels its header on failure */
767 static int tcp_metrics_fill_info(struct sk_buff
*msg
,
768 struct tcp_metrics_block
*tm
)
773 switch (tm
->tcpm_daddr
.family
) {
775 if (nla_put_be32(msg
, TCP_METRICS_ATTR_ADDR_IPV4
,
776 tm
->tcpm_daddr
.addr
.a4
) < 0)
777 goto nla_put_failure
;
778 if (nla_put_be32(msg
, TCP_METRICS_ATTR_SADDR_IPV4
,
779 tm
->tcpm_saddr
.addr
.a4
) < 0)
780 goto nla_put_failure
;
783 if (nla_put(msg
, TCP_METRICS_ATTR_ADDR_IPV6
, 16,
784 tm
->tcpm_daddr
.addr
.a6
) < 0)
785 goto nla_put_failure
;
786 if (nla_put(msg
, TCP_METRICS_ATTR_SADDR_IPV6
, 16,
787 tm
->tcpm_saddr
.addr
.a6
) < 0)
788 goto nla_put_failure
;
791 return -EAFNOSUPPORT
;
794 if (nla_put_msecs(msg
, TCP_METRICS_ATTR_AGE
,
795 jiffies
- tm
->tcpm_stamp
) < 0)
796 goto nla_put_failure
;
797 if (tm
->tcpm_ts_stamp
) {
798 if (nla_put_s32(msg
, TCP_METRICS_ATTR_TW_TS_STAMP
,
799 (s32
) (get_seconds() - tm
->tcpm_ts_stamp
)) < 0)
800 goto nla_put_failure
;
801 if (nla_put_u32(msg
, TCP_METRICS_ATTR_TW_TSVAL
,
803 goto nla_put_failure
;
809 nest
= nla_nest_start(msg
, TCP_METRICS_ATTR_VALS
);
811 goto nla_put_failure
;
812 for (i
= 0; i
< TCP_METRIC_MAX
+ 1; i
++) {
813 if (!tm
->tcpm_vals
[i
])
815 if (nla_put_u32(msg
, i
+ 1, tm
->tcpm_vals
[i
]) < 0)
816 goto nla_put_failure
;
820 nla_nest_end(msg
, nest
);
822 nla_nest_cancel(msg
, nest
);
826 struct tcp_fastopen_metrics tfom_copy
[1], *tfom
;
830 seq
= read_seqbegin(&fastopen_seqlock
);
831 tfom_copy
[0] = tm
->tcpm_fastopen
;
832 } while (read_seqretry(&fastopen_seqlock
, seq
));
836 nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_MSS
,
838 goto nla_put_failure
;
839 if (tfom
->syn_loss
&&
840 (nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROPS
,
841 tfom
->syn_loss
) < 0 ||
842 nla_put_msecs(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
,
843 jiffies
- tfom
->last_syn_loss
) < 0))
844 goto nla_put_failure
;
845 if (tfom
->cookie
.len
> 0 &&
846 nla_put(msg
, TCP_METRICS_ATTR_FOPEN_COOKIE
,
847 tfom
->cookie
.len
, tfom
->cookie
.val
) < 0)
848 goto nla_put_failure
;
857 static int tcp_metrics_dump_info(struct sk_buff
*skb
,
858 struct netlink_callback
*cb
,
859 struct tcp_metrics_block
*tm
)
863 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
864 &tcp_metrics_nl_family
, NLM_F_MULTI
,
865 TCP_METRICS_CMD_GET
);
869 if (tcp_metrics_fill_info(skb
, tm
) < 0)
870 goto nla_put_failure
;
872 return genlmsg_end(skb
, hdr
);
875 genlmsg_cancel(skb
, hdr
);
879 static int tcp_metrics_nl_dump(struct sk_buff
*skb
,
880 struct netlink_callback
*cb
)
882 struct net
*net
= sock_net(skb
->sk
);
883 unsigned int max_rows
= 1U << net
->ipv4
.tcp_metrics_hash_log
;
884 unsigned int row
, s_row
= cb
->args
[0];
885 int s_col
= cb
->args
[1], col
= s_col
;
887 for (row
= s_row
; row
< max_rows
; row
++, s_col
= 0) {
888 struct tcp_metrics_block
*tm
;
889 struct tcpm_hash_bucket
*hb
= net
->ipv4
.tcp_metrics_hash
+ row
;
892 for (col
= 0, tm
= rcu_dereference(hb
->chain
); tm
;
893 tm
= rcu_dereference(tm
->tcpm_next
), col
++) {
896 if (tcp_metrics_dump_info(skb
, cb
, tm
) < 0) {
910 static int __parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
911 unsigned int *hash
, int optional
, int v4
, int v6
)
917 addr
->family
= AF_INET
;
918 addr
->addr
.a4
= nla_get_be32(a
);
920 *hash
= (__force
unsigned int) addr
->addr
.a4
;
925 if (nla_len(a
) != sizeof(struct in6_addr
))
927 addr
->family
= AF_INET6
;
928 memcpy(addr
->addr
.a6
, nla_data(a
), sizeof(addr
->addr
.a6
));
930 *hash
= ipv6_addr_hash((struct in6_addr
*) addr
->addr
.a6
);
933 return optional
? 1 : -EAFNOSUPPORT
;
936 static int parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
937 unsigned int *hash
, int optional
)
939 return __parse_nl_addr(info
, addr
, hash
, optional
,
940 TCP_METRICS_ATTR_ADDR_IPV4
,
941 TCP_METRICS_ATTR_ADDR_IPV6
);
944 static int parse_nl_saddr(struct genl_info
*info
, struct inetpeer_addr
*addr
)
946 return __parse_nl_addr(info
, addr
, NULL
, 0,
947 TCP_METRICS_ATTR_SADDR_IPV4
,
948 TCP_METRICS_ATTR_SADDR_IPV6
);
951 static int tcp_metrics_nl_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
953 struct tcp_metrics_block
*tm
;
954 struct inetpeer_addr saddr
, daddr
;
957 struct net
*net
= genl_info_net(info
);
962 ret
= parse_nl_addr(info
, &daddr
, &hash
, 0);
966 ret
= parse_nl_saddr(info
, &saddr
);
970 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
974 reply
= genlmsg_put_reply(msg
, info
, &tcp_metrics_nl_family
, 0,
977 goto nla_put_failure
;
979 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
982 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
983 tm
= rcu_dereference(tm
->tcpm_next
)) {
984 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
985 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
))) {
986 ret
= tcp_metrics_fill_info(msg
, tm
);
994 genlmsg_end(msg
, reply
);
995 return genlmsg_reply(msg
, info
);
1005 #define deref_locked_genl(p) \
1006 rcu_dereference_protected(p, lockdep_genl_is_held() && \
1007 lockdep_is_held(&tcp_metrics_lock))
1009 #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
1011 static int tcp_metrics_flush_all(struct net
*net
)
1013 unsigned int max_rows
= 1U << net
->ipv4
.tcp_metrics_hash_log
;
1014 struct tcpm_hash_bucket
*hb
= net
->ipv4
.tcp_metrics_hash
;
1015 struct tcp_metrics_block
*tm
;
1018 for (row
= 0; row
< max_rows
; row
++, hb
++) {
1019 spin_lock_bh(&tcp_metrics_lock
);
1020 tm
= deref_locked_genl(hb
->chain
);
1023 spin_unlock_bh(&tcp_metrics_lock
);
1025 struct tcp_metrics_block
*next
;
1027 next
= deref_genl(tm
->tcpm_next
);
1028 kfree_rcu(tm
, rcu_head
);
1035 static int tcp_metrics_nl_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1037 struct tcpm_hash_bucket
*hb
;
1038 struct tcp_metrics_block
*tm
;
1039 struct tcp_metrics_block __rcu
**pp
;
1040 struct inetpeer_addr saddr
, daddr
;
1042 struct net
*net
= genl_info_net(info
);
1044 bool src
= true, found
= false;
1046 ret
= parse_nl_addr(info
, &daddr
, &hash
, 1);
1050 return tcp_metrics_flush_all(net
);
1051 ret
= parse_nl_saddr(info
, &saddr
);
1055 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
1056 hb
= net
->ipv4
.tcp_metrics_hash
+ hash
;
1058 spin_lock_bh(&tcp_metrics_lock
);
1059 for (tm
= deref_locked_genl(*pp
); tm
; tm
= deref_locked_genl(*pp
)) {
1060 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
1061 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
))) {
1062 *pp
= tm
->tcpm_next
;
1063 kfree_rcu(tm
, rcu_head
);
1066 pp
= &tm
->tcpm_next
;
1069 spin_unlock_bh(&tcp_metrics_lock
);
1075 static const struct genl_ops tcp_metrics_nl_ops
[] = {
1077 .cmd
= TCP_METRICS_CMD_GET
,
1078 .doit
= tcp_metrics_nl_cmd_get
,
1079 .dumpit
= tcp_metrics_nl_dump
,
1080 .policy
= tcp_metrics_nl_policy
,
1081 .flags
= GENL_ADMIN_PERM
,
1084 .cmd
= TCP_METRICS_CMD_DEL
,
1085 .doit
= tcp_metrics_nl_cmd_del
,
1086 .policy
= tcp_metrics_nl_policy
,
1087 .flags
= GENL_ADMIN_PERM
,
1091 static unsigned int tcpmhash_entries
;
1092 static int __init
set_tcpmhash_entries(char *str
)
1099 ret
= kstrtouint(str
, 0, &tcpmhash_entries
);
1105 __setup("tcpmhash_entries=", set_tcpmhash_entries
);
1107 static int __net_init
tcp_net_metrics_init(struct net
*net
)
1112 slots
= tcpmhash_entries
;
1114 if (totalram_pages
>= 128 * 1024)
1120 net
->ipv4
.tcp_metrics_hash_log
= order_base_2(slots
);
1121 size
= sizeof(struct tcpm_hash_bucket
) << net
->ipv4
.tcp_metrics_hash_log
;
1123 net
->ipv4
.tcp_metrics_hash
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1124 if (!net
->ipv4
.tcp_metrics_hash
)
1125 net
->ipv4
.tcp_metrics_hash
= vzalloc(size
);
1127 if (!net
->ipv4
.tcp_metrics_hash
)
1133 static void __net_exit
tcp_net_metrics_exit(struct net
*net
)
1137 for (i
= 0; i
< (1U << net
->ipv4
.tcp_metrics_hash_log
) ; i
++) {
1138 struct tcp_metrics_block
*tm
, *next
;
1140 tm
= rcu_dereference_protected(net
->ipv4
.tcp_metrics_hash
[i
].chain
, 1);
1142 next
= rcu_dereference_protected(tm
->tcpm_next
, 1);
1147 if (is_vmalloc_addr(net
->ipv4
.tcp_metrics_hash
))
1148 vfree(net
->ipv4
.tcp_metrics_hash
);
1150 kfree(net
->ipv4
.tcp_metrics_hash
);
1153 static __net_initdata
struct pernet_operations tcp_net_metrics_ops
= {
1154 .init
= tcp_net_metrics_init
,
1155 .exit
= tcp_net_metrics_exit
,
1158 void __init
tcp_metrics_init(void)
1162 ret
= register_pernet_subsys(&tcp_net_metrics_ops
);
1165 ret
= genl_register_family_with_ops(&tcp_metrics_nl_family
,
1166 tcp_metrics_nl_ops
);
1168 goto cleanup_subsys
;
1172 unregister_pernet_subsys(&tcp_net_metrics_ops
);