1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
21 #include <net/genetlink.h>
23 int sysctl_tcp_nometrics_save __read_mostly
;
25 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
26 const struct inetpeer_addr
*daddr
,
27 struct net
*net
, unsigned int hash
);
29 struct tcp_fastopen_metrics
{
31 u16 syn_loss
:10, /* Recurring Fast Open SYN losses */
32 try_exp
:2; /* Request w/ exp. option (once) */
33 unsigned long last_syn_loss
; /* Last Fast Open SYN loss */
34 struct tcp_fastopen_cookie cookie
;
37 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
38 * Kernel only stores RTT and RTTVAR in usec resolution
40 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
42 struct tcp_metrics_block
{
43 struct tcp_metrics_block __rcu
*tcpm_next
;
44 possible_net_t tcpm_net
;
45 struct inetpeer_addr tcpm_saddr
;
46 struct inetpeer_addr tcpm_daddr
;
47 unsigned long tcpm_stamp
;
51 u32 tcpm_vals
[TCP_METRIC_MAX_KERNEL
+ 1];
52 struct tcp_fastopen_metrics tcpm_fastopen
;
54 struct rcu_head rcu_head
;
57 static inline struct net
*tm_net(struct tcp_metrics_block
*tm
)
59 return read_pnet(&tm
->tcpm_net
);
62 static bool tcp_metric_locked(struct tcp_metrics_block
*tm
,
63 enum tcp_metric_index idx
)
65 return tm
->tcpm_lock
& (1 << idx
);
68 static u32
tcp_metric_get(struct tcp_metrics_block
*tm
,
69 enum tcp_metric_index idx
)
71 return tm
->tcpm_vals
[idx
];
74 static void tcp_metric_set(struct tcp_metrics_block
*tm
,
75 enum tcp_metric_index idx
,
78 tm
->tcpm_vals
[idx
] = val
;
81 static bool addr_same(const struct inetpeer_addr
*a
,
82 const struct inetpeer_addr
*b
)
84 if (a
->family
!= b
->family
)
86 if (a
->family
== AF_INET
)
87 return a
->addr
.a4
== b
->addr
.a4
;
88 return ipv6_addr_equal(&a
->addr
.in6
, &b
->addr
.in6
);
91 struct tcpm_hash_bucket
{
92 struct tcp_metrics_block __rcu
*chain
;
95 static struct tcpm_hash_bucket
*tcp_metrics_hash __read_mostly
;
96 static unsigned int tcp_metrics_hash_log __read_mostly
;
98 static DEFINE_SPINLOCK(tcp_metrics_lock
);
100 static void tcpm_suck_dst(struct tcp_metrics_block
*tm
,
101 const struct dst_entry
*dst
,
107 tm
->tcpm_stamp
= jiffies
;
110 if (dst_metric_locked(dst
, RTAX_RTT
))
111 val
|= 1 << TCP_METRIC_RTT
;
112 if (dst_metric_locked(dst
, RTAX_RTTVAR
))
113 val
|= 1 << TCP_METRIC_RTTVAR
;
114 if (dst_metric_locked(dst
, RTAX_SSTHRESH
))
115 val
|= 1 << TCP_METRIC_SSTHRESH
;
116 if (dst_metric_locked(dst
, RTAX_CWND
))
117 val
|= 1 << TCP_METRIC_CWND
;
118 if (dst_metric_locked(dst
, RTAX_REORDERING
))
119 val
|= 1 << TCP_METRIC_REORDERING
;
122 msval
= dst_metric_raw(dst
, RTAX_RTT
);
123 tm
->tcpm_vals
[TCP_METRIC_RTT
] = msval
* USEC_PER_MSEC
;
125 msval
= dst_metric_raw(dst
, RTAX_RTTVAR
);
126 tm
->tcpm_vals
[TCP_METRIC_RTTVAR
] = msval
* USEC_PER_MSEC
;
127 tm
->tcpm_vals
[TCP_METRIC_SSTHRESH
] = dst_metric_raw(dst
, RTAX_SSTHRESH
);
128 tm
->tcpm_vals
[TCP_METRIC_CWND
] = dst_metric_raw(dst
, RTAX_CWND
);
129 tm
->tcpm_vals
[TCP_METRIC_REORDERING
] = dst_metric_raw(dst
, RTAX_REORDERING
);
131 tm
->tcpm_ts_stamp
= 0;
132 if (fastopen_clear
) {
133 tm
->tcpm_fastopen
.mss
= 0;
134 tm
->tcpm_fastopen
.syn_loss
= 0;
135 tm
->tcpm_fastopen
.try_exp
= 0;
136 tm
->tcpm_fastopen
.cookie
.exp
= false;
137 tm
->tcpm_fastopen
.cookie
.len
= 0;
141 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
143 static void tcpm_check_stamp(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
)
145 if (tm
&& unlikely(time_after(jiffies
, tm
->tcpm_stamp
+ TCP_METRICS_TIMEOUT
)))
146 tcpm_suck_dst(tm
, dst
, false);
149 #define TCP_METRICS_RECLAIM_DEPTH 5
150 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
152 #define deref_locked(p) \
153 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
155 static struct tcp_metrics_block
*tcpm_new(struct dst_entry
*dst
,
156 struct inetpeer_addr
*saddr
,
157 struct inetpeer_addr
*daddr
,
160 struct tcp_metrics_block
*tm
;
162 bool reclaim
= false;
164 spin_lock_bh(&tcp_metrics_lock
);
165 net
= dev_net(dst
->dev
);
167 /* While waiting for the spin-lock the cache might have been populated
168 * with this entry and so we have to check again.
170 tm
= __tcp_get_metrics(saddr
, daddr
, net
, hash
);
171 if (tm
== TCP_METRICS_RECLAIM_PTR
) {
176 tcpm_check_stamp(tm
, dst
);
180 if (unlikely(reclaim
)) {
181 struct tcp_metrics_block
*oldest
;
183 oldest
= deref_locked(tcp_metrics_hash
[hash
].chain
);
184 for (tm
= deref_locked(oldest
->tcpm_next
); tm
;
185 tm
= deref_locked(tm
->tcpm_next
)) {
186 if (time_before(tm
->tcpm_stamp
, oldest
->tcpm_stamp
))
191 tm
= kmalloc(sizeof(*tm
), GFP_ATOMIC
);
195 write_pnet(&tm
->tcpm_net
, net
);
196 tm
->tcpm_saddr
= *saddr
;
197 tm
->tcpm_daddr
= *daddr
;
199 tcpm_suck_dst(tm
, dst
, true);
201 if (likely(!reclaim
)) {
202 tm
->tcpm_next
= tcp_metrics_hash
[hash
].chain
;
203 rcu_assign_pointer(tcp_metrics_hash
[hash
].chain
, tm
);
207 spin_unlock_bh(&tcp_metrics_lock
);
211 static struct tcp_metrics_block
*tcp_get_encode(struct tcp_metrics_block
*tm
, int depth
)
215 if (depth
> TCP_METRICS_RECLAIM_DEPTH
)
216 return TCP_METRICS_RECLAIM_PTR
;
220 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
221 const struct inetpeer_addr
*daddr
,
222 struct net
*net
, unsigned int hash
)
224 struct tcp_metrics_block
*tm
;
227 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
228 tm
= rcu_dereference(tm
->tcpm_next
)) {
229 if (addr_same(&tm
->tcpm_saddr
, saddr
) &&
230 addr_same(&tm
->tcpm_daddr
, daddr
) &&
231 net_eq(tm_net(tm
), net
))
235 return tcp_get_encode(tm
, depth
);
238 static struct tcp_metrics_block
*__tcp_get_metrics_req(struct request_sock
*req
,
239 struct dst_entry
*dst
)
241 struct tcp_metrics_block
*tm
;
242 struct inetpeer_addr saddr
, daddr
;
246 saddr
.family
= req
->rsk_ops
->family
;
247 daddr
.family
= req
->rsk_ops
->family
;
248 switch (daddr
.family
) {
250 saddr
.addr
.a4
= inet_rsk(req
)->ir_loc_addr
;
251 daddr
.addr
.a4
= inet_rsk(req
)->ir_rmt_addr
;
252 hash
= (__force
unsigned int) daddr
.addr
.a4
;
254 #if IS_ENABLED(CONFIG_IPV6)
256 saddr
.addr
.in6
= inet_rsk(req
)->ir_v6_loc_addr
;
257 daddr
.addr
.in6
= inet_rsk(req
)->ir_v6_rmt_addr
;
258 hash
= ipv6_addr_hash(&inet_rsk(req
)->ir_v6_rmt_addr
);
265 net
= dev_net(dst
->dev
);
266 hash
^= net_hash_mix(net
);
267 hash
= hash_32(hash
, tcp_metrics_hash_log
);
269 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
270 tm
= rcu_dereference(tm
->tcpm_next
)) {
271 if (addr_same(&tm
->tcpm_saddr
, &saddr
) &&
272 addr_same(&tm
->tcpm_daddr
, &daddr
) &&
273 net_eq(tm_net(tm
), net
))
276 tcpm_check_stamp(tm
, dst
);
280 static struct tcp_metrics_block
*__tcp_get_metrics_tw(struct inet_timewait_sock
*tw
)
282 struct tcp_metrics_block
*tm
;
283 struct inetpeer_addr saddr
, daddr
;
287 if (tw
->tw_family
== AF_INET
) {
288 saddr
.family
= AF_INET
;
289 saddr
.addr
.a4
= tw
->tw_rcv_saddr
;
290 daddr
.family
= AF_INET
;
291 daddr
.addr
.a4
= tw
->tw_daddr
;
292 hash
= (__force
unsigned int) daddr
.addr
.a4
;
294 #if IS_ENABLED(CONFIG_IPV6)
295 else if (tw
->tw_family
== AF_INET6
) {
296 if (ipv6_addr_v4mapped(&tw
->tw_v6_daddr
)) {
297 saddr
.family
= AF_INET
;
298 saddr
.addr
.a4
= tw
->tw_rcv_saddr
;
299 daddr
.family
= AF_INET
;
300 daddr
.addr
.a4
= tw
->tw_daddr
;
301 hash
= (__force
unsigned int) daddr
.addr
.a4
;
303 saddr
.family
= AF_INET6
;
304 saddr
.addr
.in6
= tw
->tw_v6_rcv_saddr
;
305 daddr
.family
= AF_INET6
;
306 daddr
.addr
.in6
= tw
->tw_v6_daddr
;
307 hash
= ipv6_addr_hash(&tw
->tw_v6_daddr
);
315 hash
^= net_hash_mix(net
);
316 hash
= hash_32(hash
, tcp_metrics_hash_log
);
318 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
319 tm
= rcu_dereference(tm
->tcpm_next
)) {
320 if (addr_same(&tm
->tcpm_saddr
, &saddr
) &&
321 addr_same(&tm
->tcpm_daddr
, &daddr
) &&
322 net_eq(tm_net(tm
), net
))
328 static struct tcp_metrics_block
*tcp_get_metrics(struct sock
*sk
,
329 struct dst_entry
*dst
,
332 struct tcp_metrics_block
*tm
;
333 struct inetpeer_addr saddr
, daddr
;
337 if (sk
->sk_family
== AF_INET
) {
338 saddr
.family
= AF_INET
;
339 saddr
.addr
.a4
= inet_sk(sk
)->inet_saddr
;
340 daddr
.family
= AF_INET
;
341 daddr
.addr
.a4
= inet_sk(sk
)->inet_daddr
;
342 hash
= (__force
unsigned int) daddr
.addr
.a4
;
344 #if IS_ENABLED(CONFIG_IPV6)
345 else if (sk
->sk_family
== AF_INET6
) {
346 if (ipv6_addr_v4mapped(&sk
->sk_v6_daddr
)) {
347 saddr
.family
= AF_INET
;
348 saddr
.addr
.a4
= inet_sk(sk
)->inet_saddr
;
349 daddr
.family
= AF_INET
;
350 daddr
.addr
.a4
= inet_sk(sk
)->inet_daddr
;
351 hash
= (__force
unsigned int) daddr
.addr
.a4
;
353 saddr
.family
= AF_INET6
;
354 saddr
.addr
.in6
= sk
->sk_v6_rcv_saddr
;
355 daddr
.family
= AF_INET6
;
356 daddr
.addr
.in6
= sk
->sk_v6_daddr
;
357 hash
= ipv6_addr_hash(&sk
->sk_v6_daddr
);
364 net
= dev_net(dst
->dev
);
365 hash
^= net_hash_mix(net
);
366 hash
= hash_32(hash
, tcp_metrics_hash_log
);
368 tm
= __tcp_get_metrics(&saddr
, &daddr
, net
, hash
);
369 if (tm
== TCP_METRICS_RECLAIM_PTR
)
372 tm
= tcpm_new(dst
, &saddr
, &daddr
, hash
);
374 tcpm_check_stamp(tm
, dst
);
379 /* Save metrics learned by this TCP session. This function is called
380 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
381 * or goes from LAST-ACK to CLOSE.
383 void tcp_update_metrics(struct sock
*sk
)
385 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
386 struct dst_entry
*dst
= __sk_dst_get(sk
);
387 struct tcp_sock
*tp
= tcp_sk(sk
);
388 struct tcp_metrics_block
*tm
;
393 if (sysctl_tcp_nometrics_save
|| !dst
)
396 if (dst
->flags
& DST_HOST
)
400 if (icsk
->icsk_backoff
|| !tp
->srtt_us
) {
401 /* This session failed to estimate rtt. Why?
402 * Probably, no packets returned in time. Reset our
405 tm
= tcp_get_metrics(sk
, dst
, false);
406 if (tm
&& !tcp_metric_locked(tm
, TCP_METRIC_RTT
))
407 tcp_metric_set(tm
, TCP_METRIC_RTT
, 0);
410 tm
= tcp_get_metrics(sk
, dst
, true);
415 rtt
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
416 m
= rtt
- tp
->srtt_us
;
418 /* If newly calculated rtt larger than stored one, store new
419 * one. Otherwise, use EWMA. Remember, rtt overestimation is
420 * always better than underestimation.
422 if (!tcp_metric_locked(tm
, TCP_METRIC_RTT
)) {
427 tcp_metric_set(tm
, TCP_METRIC_RTT
, rtt
);
430 if (!tcp_metric_locked(tm
, TCP_METRIC_RTTVAR
)) {
436 /* Scale deviation to rttvar fixed point */
441 var
= tcp_metric_get(tm
, TCP_METRIC_RTTVAR
);
445 var
-= (var
- m
) >> 2;
447 tcp_metric_set(tm
, TCP_METRIC_RTTVAR
, var
);
450 if (tcp_in_initial_slowstart(tp
)) {
451 /* Slow start still did not finish. */
452 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
453 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
454 if (val
&& (tp
->snd_cwnd
>> 1) > val
)
455 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
458 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
459 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
460 if (tp
->snd_cwnd
> val
)
461 tcp_metric_set(tm
, TCP_METRIC_CWND
,
464 } else if (tp
->snd_cwnd
> tp
->snd_ssthresh
&&
465 icsk
->icsk_ca_state
== TCP_CA_Open
) {
466 /* Cong. avoidance phase, cwnd is reliable. */
467 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
))
468 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
469 max(tp
->snd_cwnd
>> 1, tp
->snd_ssthresh
));
470 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
471 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
472 tcp_metric_set(tm
, TCP_METRIC_CWND
, (val
+ tp
->snd_cwnd
) >> 1);
475 /* Else slow start did not finish, cwnd is non-sense,
476 * ssthresh may be also invalid.
478 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
479 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
480 tcp_metric_set(tm
, TCP_METRIC_CWND
,
481 (val
+ tp
->snd_ssthresh
) >> 1);
483 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
484 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
485 if (val
&& tp
->snd_ssthresh
> val
)
486 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
489 if (!tcp_metric_locked(tm
, TCP_METRIC_REORDERING
)) {
490 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
491 if (val
< tp
->reordering
&&
492 tp
->reordering
!= sysctl_tcp_reordering
)
493 tcp_metric_set(tm
, TCP_METRIC_REORDERING
,
497 tm
->tcpm_stamp
= jiffies
;
502 /* Initialize metrics on socket. */
504 void tcp_init_metrics(struct sock
*sk
)
506 struct dst_entry
*dst
= __sk_dst_get(sk
);
507 struct tcp_sock
*tp
= tcp_sk(sk
);
508 struct tcp_metrics_block
*tm
;
509 u32 val
, crtt
= 0; /* cached RTT scaled by 8 */
517 tm
= tcp_get_metrics(sk
, dst
, true);
523 if (tcp_metric_locked(tm
, TCP_METRIC_CWND
))
524 tp
->snd_cwnd_clamp
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
526 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
528 tp
->snd_ssthresh
= val
;
529 if (tp
->snd_ssthresh
> tp
->snd_cwnd_clamp
)
530 tp
->snd_ssthresh
= tp
->snd_cwnd_clamp
;
532 /* ssthresh may have been reduced unnecessarily during.
533 * 3WHS. Restore it back to its initial default.
535 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
537 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
538 if (val
&& tp
->reordering
!= val
) {
539 tcp_disable_fack(tp
);
540 tcp_disable_early_retrans(tp
);
541 tp
->reordering
= val
;
544 crtt
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
547 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
548 * to seed the RTO for later data packets because SYN packets are
549 * small. Use the per-dst cached values to seed the RTO but keep
550 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
551 * Later the RTO will be updated immediately upon obtaining the first
552 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
553 * influences the first RTO but not later RTT estimation.
555 * But if RTT is not available from the SYN (due to retransmits or
556 * syn cookies) or the cache, force a conservative 3secs timeout.
558 * A bit of theory. RTT is time passed after "normal" sized packet
559 * is sent until it is ACKed. In normal circumstances sending small
560 * packets force peer to delay ACKs and calculation is correct too.
561 * The algorithm is adaptive and, provided we follow specs, it
562 * NEVER underestimate RTT. BUT! If peer tries to make some clever
563 * tricks sort of "quick acks" for time long enough to decrease RTT
564 * to low value, and then abruptly stops to do it and starts to delay
565 * ACKs, wait for troubles.
567 if (crtt
> tp
->srtt_us
) {
568 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
569 crtt
/= 8 * USEC_PER_SEC
/ HZ
;
570 inet_csk(sk
)->icsk_rto
= crtt
+ max(2 * crtt
, tcp_rto_min(sk
));
571 } else if (tp
->srtt_us
== 0) {
572 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
573 * 3WHS. This is most likely due to retransmission,
574 * including spurious one. Reset the RTO back to 3secs
575 * from the more aggressive 1sec to avoid more spurious
578 tp
->rttvar_us
= jiffies_to_usecs(TCP_TIMEOUT_FALLBACK
);
579 tp
->mdev_us
= tp
->mdev_max_us
= tp
->rttvar_us
;
581 inet_csk(sk
)->icsk_rto
= TCP_TIMEOUT_FALLBACK
;
583 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
584 * retransmitted. In light of RFC6298 more aggressive 1sec
585 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
586 * retransmission has occurred.
588 if (tp
->total_retrans
> 1)
591 tp
->snd_cwnd
= tcp_init_cwnd(tp
, dst
);
592 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
595 bool tcp_peer_is_proven(struct request_sock
*req
, struct dst_entry
*dst
,
596 bool paws_check
, bool timestamps
)
598 struct tcp_metrics_block
*tm
;
605 tm
= __tcp_get_metrics_req(req
, dst
);
608 (u32
)get_seconds() - tm
->tcpm_ts_stamp
< TCP_PAWS_MSL
&&
609 ((s32
)(tm
->tcpm_ts
- req
->ts_recent
) > TCP_PAWS_WINDOW
||
615 if (tm
&& tcp_metric_get(tm
, TCP_METRIC_RTT
) && tm
->tcpm_ts_stamp
)
624 EXPORT_SYMBOL_GPL(tcp_peer_is_proven
);
626 void tcp_fetch_timewait_stamp(struct sock
*sk
, struct dst_entry
*dst
)
628 struct tcp_metrics_block
*tm
;
631 tm
= tcp_get_metrics(sk
, dst
, true);
633 struct tcp_sock
*tp
= tcp_sk(sk
);
635 if ((u32
)get_seconds() - tm
->tcpm_ts_stamp
<= TCP_PAWS_MSL
) {
636 tp
->rx_opt
.ts_recent_stamp
= tm
->tcpm_ts_stamp
;
637 tp
->rx_opt
.ts_recent
= tm
->tcpm_ts
;
642 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp
);
644 /* VJ's idea. Save last timestamp seen from this destination and hold
645 * it at least for normal timewait interval to use for duplicate
646 * segment detection in subsequent connections, before they enter
647 * synchronized state.
649 bool tcp_remember_stamp(struct sock
*sk
)
651 struct dst_entry
*dst
= __sk_dst_get(sk
);
655 struct tcp_metrics_block
*tm
;
658 tm
= tcp_get_metrics(sk
, dst
, true);
660 struct tcp_sock
*tp
= tcp_sk(sk
);
662 if ((s32
)(tm
->tcpm_ts
- tp
->rx_opt
.ts_recent
) <= 0 ||
663 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
664 tm
->tcpm_ts_stamp
<= (u32
)tp
->rx_opt
.ts_recent_stamp
)) {
665 tm
->tcpm_ts_stamp
= (u32
)tp
->rx_opt
.ts_recent_stamp
;
666 tm
->tcpm_ts
= tp
->rx_opt
.ts_recent
;
675 bool tcp_tw_remember_stamp(struct inet_timewait_sock
*tw
)
677 struct tcp_metrics_block
*tm
;
681 tm
= __tcp_get_metrics_tw(tw
);
683 const struct tcp_timewait_sock
*tcptw
;
684 struct sock
*sk
= (struct sock
*) tw
;
686 tcptw
= tcp_twsk(sk
);
687 if ((s32
)(tm
->tcpm_ts
- tcptw
->tw_ts_recent
) <= 0 ||
688 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
689 tm
->tcpm_ts_stamp
<= (u32
)tcptw
->tw_ts_recent_stamp
)) {
690 tm
->tcpm_ts_stamp
= (u32
)tcptw
->tw_ts_recent_stamp
;
691 tm
->tcpm_ts
= tcptw
->tw_ts_recent
;
700 static DEFINE_SEQLOCK(fastopen_seqlock
);
702 void tcp_fastopen_cache_get(struct sock
*sk
, u16
*mss
,
703 struct tcp_fastopen_cookie
*cookie
,
704 int *syn_loss
, unsigned long *last_syn_loss
)
706 struct tcp_metrics_block
*tm
;
709 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), false);
711 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
715 seq
= read_seqbegin(&fastopen_seqlock
);
718 *cookie
= tfom
->cookie
;
719 if (cookie
->len
<= 0 && tfom
->try_exp
== 1)
721 *syn_loss
= tfom
->syn_loss
;
722 *last_syn_loss
= *syn_loss
? tfom
->last_syn_loss
: 0;
723 } while (read_seqretry(&fastopen_seqlock
, seq
));
728 void tcp_fastopen_cache_set(struct sock
*sk
, u16 mss
,
729 struct tcp_fastopen_cookie
*cookie
, bool syn_lost
,
732 struct dst_entry
*dst
= __sk_dst_get(sk
);
733 struct tcp_metrics_block
*tm
;
738 tm
= tcp_get_metrics(sk
, dst
, true);
740 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
742 write_seqlock_bh(&fastopen_seqlock
);
745 if (cookie
&& cookie
->len
> 0)
746 tfom
->cookie
= *cookie
;
747 else if (try_exp
> tfom
->try_exp
&&
748 tfom
->cookie
.len
<= 0 && !tfom
->cookie
.exp
)
749 tfom
->try_exp
= try_exp
;
752 tfom
->last_syn_loss
= jiffies
;
755 write_sequnlock_bh(&fastopen_seqlock
);
760 static struct genl_family tcp_metrics_nl_family
= {
761 .id
= GENL_ID_GENERATE
,
763 .name
= TCP_METRICS_GENL_NAME
,
764 .version
= TCP_METRICS_GENL_VERSION
,
765 .maxattr
= TCP_METRICS_ATTR_MAX
,
769 static struct nla_policy tcp_metrics_nl_policy
[TCP_METRICS_ATTR_MAX
+ 1] = {
770 [TCP_METRICS_ATTR_ADDR_IPV4
] = { .type
= NLA_U32
, },
771 [TCP_METRICS_ATTR_ADDR_IPV6
] = { .type
= NLA_BINARY
,
772 .len
= sizeof(struct in6_addr
), },
773 /* Following attributes are not received for GET/DEL,
774 * we keep them for reference
777 [TCP_METRICS_ATTR_AGE
] = { .type
= NLA_MSECS
, },
778 [TCP_METRICS_ATTR_TW_TSVAL
] = { .type
= NLA_U32
, },
779 [TCP_METRICS_ATTR_TW_TS_STAMP
] = { .type
= NLA_S32
, },
780 [TCP_METRICS_ATTR_VALS
] = { .type
= NLA_NESTED
, },
781 [TCP_METRICS_ATTR_FOPEN_MSS
] = { .type
= NLA_U16
, },
782 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS
] = { .type
= NLA_U16
, },
783 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
] = { .type
= NLA_MSECS
, },
784 [TCP_METRICS_ATTR_FOPEN_COOKIE
] = { .type
= NLA_BINARY
,
785 .len
= TCP_FASTOPEN_COOKIE_MAX
, },
789 /* Add attributes, caller cancels its header on failure */
790 static int tcp_metrics_fill_info(struct sk_buff
*msg
,
791 struct tcp_metrics_block
*tm
)
796 switch (tm
->tcpm_daddr
.family
) {
798 if (nla_put_in_addr(msg
, TCP_METRICS_ATTR_ADDR_IPV4
,
799 tm
->tcpm_daddr
.addr
.a4
) < 0)
800 goto nla_put_failure
;
801 if (nla_put_in_addr(msg
, TCP_METRICS_ATTR_SADDR_IPV4
,
802 tm
->tcpm_saddr
.addr
.a4
) < 0)
803 goto nla_put_failure
;
806 if (nla_put_in6_addr(msg
, TCP_METRICS_ATTR_ADDR_IPV6
,
807 &tm
->tcpm_daddr
.addr
.in6
) < 0)
808 goto nla_put_failure
;
809 if (nla_put_in6_addr(msg
, TCP_METRICS_ATTR_SADDR_IPV6
,
810 &tm
->tcpm_saddr
.addr
.in6
) < 0)
811 goto nla_put_failure
;
814 return -EAFNOSUPPORT
;
817 if (nla_put_msecs(msg
, TCP_METRICS_ATTR_AGE
,
818 jiffies
- tm
->tcpm_stamp
) < 0)
819 goto nla_put_failure
;
820 if (tm
->tcpm_ts_stamp
) {
821 if (nla_put_s32(msg
, TCP_METRICS_ATTR_TW_TS_STAMP
,
822 (s32
) (get_seconds() - tm
->tcpm_ts_stamp
)) < 0)
823 goto nla_put_failure
;
824 if (nla_put_u32(msg
, TCP_METRICS_ATTR_TW_TSVAL
,
826 goto nla_put_failure
;
832 nest
= nla_nest_start(msg
, TCP_METRICS_ATTR_VALS
);
834 goto nla_put_failure
;
835 for (i
= 0; i
< TCP_METRIC_MAX_KERNEL
+ 1; i
++) {
836 u32 val
= tm
->tcpm_vals
[i
];
840 if (i
== TCP_METRIC_RTT
) {
841 if (nla_put_u32(msg
, TCP_METRIC_RTT_US
+ 1,
843 goto nla_put_failure
;
845 val
= max(val
/ 1000, 1U);
847 if (i
== TCP_METRIC_RTTVAR
) {
848 if (nla_put_u32(msg
, TCP_METRIC_RTTVAR_US
+ 1,
850 goto nla_put_failure
;
852 val
= max(val
/ 1000, 1U);
854 if (nla_put_u32(msg
, i
+ 1, val
) < 0)
855 goto nla_put_failure
;
859 nla_nest_end(msg
, nest
);
861 nla_nest_cancel(msg
, nest
);
865 struct tcp_fastopen_metrics tfom_copy
[1], *tfom
;
869 seq
= read_seqbegin(&fastopen_seqlock
);
870 tfom_copy
[0] = tm
->tcpm_fastopen
;
871 } while (read_seqretry(&fastopen_seqlock
, seq
));
875 nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_MSS
,
877 goto nla_put_failure
;
878 if (tfom
->syn_loss
&&
879 (nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROPS
,
880 tfom
->syn_loss
) < 0 ||
881 nla_put_msecs(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
,
882 jiffies
- tfom
->last_syn_loss
) < 0))
883 goto nla_put_failure
;
884 if (tfom
->cookie
.len
> 0 &&
885 nla_put(msg
, TCP_METRICS_ATTR_FOPEN_COOKIE
,
886 tfom
->cookie
.len
, tfom
->cookie
.val
) < 0)
887 goto nla_put_failure
;
896 static int tcp_metrics_dump_info(struct sk_buff
*skb
,
897 struct netlink_callback
*cb
,
898 struct tcp_metrics_block
*tm
)
902 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
903 &tcp_metrics_nl_family
, NLM_F_MULTI
,
904 TCP_METRICS_CMD_GET
);
908 if (tcp_metrics_fill_info(skb
, tm
) < 0)
909 goto nla_put_failure
;
911 genlmsg_end(skb
, hdr
);
915 genlmsg_cancel(skb
, hdr
);
919 static int tcp_metrics_nl_dump(struct sk_buff
*skb
,
920 struct netlink_callback
*cb
)
922 struct net
*net
= sock_net(skb
->sk
);
923 unsigned int max_rows
= 1U << tcp_metrics_hash_log
;
924 unsigned int row
, s_row
= cb
->args
[0];
925 int s_col
= cb
->args
[1], col
= s_col
;
927 for (row
= s_row
; row
< max_rows
; row
++, s_col
= 0) {
928 struct tcp_metrics_block
*tm
;
929 struct tcpm_hash_bucket
*hb
= tcp_metrics_hash
+ row
;
932 for (col
= 0, tm
= rcu_dereference(hb
->chain
); tm
;
933 tm
= rcu_dereference(tm
->tcpm_next
), col
++) {
934 if (!net_eq(tm_net(tm
), net
))
938 if (tcp_metrics_dump_info(skb
, cb
, tm
) < 0) {
952 static int __parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
953 unsigned int *hash
, int optional
, int v4
, int v6
)
959 addr
->family
= AF_INET
;
960 addr
->addr
.a4
= nla_get_in_addr(a
);
962 *hash
= (__force
unsigned int) addr
->addr
.a4
;
967 if (nla_len(a
) != sizeof(struct in6_addr
))
969 addr
->family
= AF_INET6
;
970 addr
->addr
.in6
= nla_get_in6_addr(a
);
972 *hash
= ipv6_addr_hash(&addr
->addr
.in6
);
975 return optional
? 1 : -EAFNOSUPPORT
;
978 static int parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
979 unsigned int *hash
, int optional
)
981 return __parse_nl_addr(info
, addr
, hash
, optional
,
982 TCP_METRICS_ATTR_ADDR_IPV4
,
983 TCP_METRICS_ATTR_ADDR_IPV6
);
986 static int parse_nl_saddr(struct genl_info
*info
, struct inetpeer_addr
*addr
)
988 return __parse_nl_addr(info
, addr
, NULL
, 0,
989 TCP_METRICS_ATTR_SADDR_IPV4
,
990 TCP_METRICS_ATTR_SADDR_IPV6
);
993 static int tcp_metrics_nl_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
995 struct tcp_metrics_block
*tm
;
996 struct inetpeer_addr saddr
, daddr
;
999 struct net
*net
= genl_info_net(info
);
1004 ret
= parse_nl_addr(info
, &daddr
, &hash
, 0);
1008 ret
= parse_nl_saddr(info
, &saddr
);
1012 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1016 reply
= genlmsg_put_reply(msg
, info
, &tcp_metrics_nl_family
, 0,
1017 info
->genlhdr
->cmd
);
1019 goto nla_put_failure
;
1021 hash
^= net_hash_mix(net
);
1022 hash
= hash_32(hash
, tcp_metrics_hash_log
);
1025 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
1026 tm
= rcu_dereference(tm
->tcpm_next
)) {
1027 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
1028 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
)) &&
1029 net_eq(tm_net(tm
), net
)) {
1030 ret
= tcp_metrics_fill_info(msg
, tm
);
1038 genlmsg_end(msg
, reply
);
1039 return genlmsg_reply(msg
, info
);
1049 static void tcp_metrics_flush_all(struct net
*net
)
1051 unsigned int max_rows
= 1U << tcp_metrics_hash_log
;
1052 struct tcpm_hash_bucket
*hb
= tcp_metrics_hash
;
1053 struct tcp_metrics_block
*tm
;
1056 for (row
= 0; row
< max_rows
; row
++, hb
++) {
1057 struct tcp_metrics_block __rcu
**pp
;
1058 spin_lock_bh(&tcp_metrics_lock
);
1060 for (tm
= deref_locked(*pp
); tm
; tm
= deref_locked(*pp
)) {
1061 if (net_eq(tm_net(tm
), net
)) {
1062 *pp
= tm
->tcpm_next
;
1063 kfree_rcu(tm
, rcu_head
);
1065 pp
= &tm
->tcpm_next
;
1068 spin_unlock_bh(&tcp_metrics_lock
);
1072 static int tcp_metrics_nl_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1074 struct tcpm_hash_bucket
*hb
;
1075 struct tcp_metrics_block
*tm
;
1076 struct tcp_metrics_block __rcu
**pp
;
1077 struct inetpeer_addr saddr
, daddr
;
1079 struct net
*net
= genl_info_net(info
);
1081 bool src
= true, found
= false;
1083 ret
= parse_nl_addr(info
, &daddr
, &hash
, 1);
1087 tcp_metrics_flush_all(net
);
1090 ret
= parse_nl_saddr(info
, &saddr
);
1094 hash
^= net_hash_mix(net
);
1095 hash
= hash_32(hash
, tcp_metrics_hash_log
);
1096 hb
= tcp_metrics_hash
+ hash
;
1098 spin_lock_bh(&tcp_metrics_lock
);
1099 for (tm
= deref_locked(*pp
); tm
; tm
= deref_locked(*pp
)) {
1100 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
1101 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
)) &&
1102 net_eq(tm_net(tm
), net
)) {
1103 *pp
= tm
->tcpm_next
;
1104 kfree_rcu(tm
, rcu_head
);
1107 pp
= &tm
->tcpm_next
;
1110 spin_unlock_bh(&tcp_metrics_lock
);
1116 static const struct genl_ops tcp_metrics_nl_ops
[] = {
1118 .cmd
= TCP_METRICS_CMD_GET
,
1119 .doit
= tcp_metrics_nl_cmd_get
,
1120 .dumpit
= tcp_metrics_nl_dump
,
1121 .policy
= tcp_metrics_nl_policy
,
1124 .cmd
= TCP_METRICS_CMD_DEL
,
1125 .doit
= tcp_metrics_nl_cmd_del
,
1126 .policy
= tcp_metrics_nl_policy
,
1127 .flags
= GENL_ADMIN_PERM
,
1131 static unsigned int tcpmhash_entries
;
1132 static int __init
set_tcpmhash_entries(char *str
)
1139 ret
= kstrtouint(str
, 0, &tcpmhash_entries
);
1145 __setup("tcpmhash_entries=", set_tcpmhash_entries
);
1147 static int __net_init
tcp_net_metrics_init(struct net
*net
)
1152 if (!net_eq(net
, &init_net
))
1155 slots
= tcpmhash_entries
;
1157 if (totalram_pages
>= 128 * 1024)
1163 tcp_metrics_hash_log
= order_base_2(slots
);
1164 size
= sizeof(struct tcpm_hash_bucket
) << tcp_metrics_hash_log
;
1166 tcp_metrics_hash
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1167 if (!tcp_metrics_hash
)
1168 tcp_metrics_hash
= vzalloc(size
);
1170 if (!tcp_metrics_hash
)
1176 static void __net_exit
tcp_net_metrics_exit(struct net
*net
)
1178 tcp_metrics_flush_all(net
);
1181 static __net_initdata
struct pernet_operations tcp_net_metrics_ops
= {
1182 .init
= tcp_net_metrics_init
,
1183 .exit
= tcp_net_metrics_exit
,
1186 void __init
tcp_metrics_init(void)
1190 ret
= register_pernet_subsys(&tcp_net_metrics_ops
);
1192 panic("Could not allocate the tcp_metrics hash table\n");
1194 ret
= genl_register_family_with_ops(&tcp_metrics_nl_family
,
1195 tcp_metrics_nl_ops
);
1197 panic("Could not register tcp_metrics generic netlink\n");