1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
22 #include <net/genetlink.h>
24 int sysctl_tcp_nometrics_save __read_mostly
;
26 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
27 const struct inetpeer_addr
*daddr
,
28 struct net
*net
, unsigned int hash
);
30 struct tcp_fastopen_metrics
{
32 u16 syn_loss
:10, /* Recurring Fast Open SYN losses */
33 try_exp
:2; /* Request w/ exp. option (once) */
34 unsigned long last_syn_loss
; /* Last Fast Open SYN loss */
35 struct tcp_fastopen_cookie cookie
;
38 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
39 * Kernel only stores RTT and RTTVAR in usec resolution
41 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
43 struct tcp_metrics_block
{
44 struct tcp_metrics_block __rcu
*tcpm_next
;
45 possible_net_t tcpm_net
;
46 struct inetpeer_addr tcpm_saddr
;
47 struct inetpeer_addr tcpm_daddr
;
48 unsigned long tcpm_stamp
;
50 u32 tcpm_vals
[TCP_METRIC_MAX_KERNEL
+ 1];
51 struct tcp_fastopen_metrics tcpm_fastopen
;
53 struct rcu_head rcu_head
;
56 static inline struct net
*tm_net(struct tcp_metrics_block
*tm
)
58 return read_pnet(&tm
->tcpm_net
);
61 static bool tcp_metric_locked(struct tcp_metrics_block
*tm
,
62 enum tcp_metric_index idx
)
64 return tm
->tcpm_lock
& (1 << idx
);
67 static u32
tcp_metric_get(struct tcp_metrics_block
*tm
,
68 enum tcp_metric_index idx
)
70 return tm
->tcpm_vals
[idx
];
73 static void tcp_metric_set(struct tcp_metrics_block
*tm
,
74 enum tcp_metric_index idx
,
77 tm
->tcpm_vals
[idx
] = val
;
80 static bool addr_same(const struct inetpeer_addr
*a
,
81 const struct inetpeer_addr
*b
)
83 return inetpeer_addr_cmp(a
, b
) == 0;
86 struct tcpm_hash_bucket
{
87 struct tcp_metrics_block __rcu
*chain
;
90 static struct tcpm_hash_bucket
*tcp_metrics_hash __read_mostly
;
91 static unsigned int tcp_metrics_hash_log __read_mostly
;
93 static DEFINE_SPINLOCK(tcp_metrics_lock
);
95 static void tcpm_suck_dst(struct tcp_metrics_block
*tm
,
96 const struct dst_entry
*dst
,
102 tm
->tcpm_stamp
= jiffies
;
105 if (dst_metric_locked(dst
, RTAX_RTT
))
106 val
|= 1 << TCP_METRIC_RTT
;
107 if (dst_metric_locked(dst
, RTAX_RTTVAR
))
108 val
|= 1 << TCP_METRIC_RTTVAR
;
109 if (dst_metric_locked(dst
, RTAX_SSTHRESH
))
110 val
|= 1 << TCP_METRIC_SSTHRESH
;
111 if (dst_metric_locked(dst
, RTAX_CWND
))
112 val
|= 1 << TCP_METRIC_CWND
;
113 if (dst_metric_locked(dst
, RTAX_REORDERING
))
114 val
|= 1 << TCP_METRIC_REORDERING
;
117 msval
= dst_metric_raw(dst
, RTAX_RTT
);
118 tm
->tcpm_vals
[TCP_METRIC_RTT
] = msval
* USEC_PER_MSEC
;
120 msval
= dst_metric_raw(dst
, RTAX_RTTVAR
);
121 tm
->tcpm_vals
[TCP_METRIC_RTTVAR
] = msval
* USEC_PER_MSEC
;
122 tm
->tcpm_vals
[TCP_METRIC_SSTHRESH
] = dst_metric_raw(dst
, RTAX_SSTHRESH
);
123 tm
->tcpm_vals
[TCP_METRIC_CWND
] = dst_metric_raw(dst
, RTAX_CWND
);
124 tm
->tcpm_vals
[TCP_METRIC_REORDERING
] = dst_metric_raw(dst
, RTAX_REORDERING
);
125 if (fastopen_clear
) {
126 tm
->tcpm_fastopen
.mss
= 0;
127 tm
->tcpm_fastopen
.syn_loss
= 0;
128 tm
->tcpm_fastopen
.try_exp
= 0;
129 tm
->tcpm_fastopen
.cookie
.exp
= false;
130 tm
->tcpm_fastopen
.cookie
.len
= 0;
134 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
136 static void tcpm_check_stamp(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
)
138 if (tm
&& unlikely(time_after(jiffies
, tm
->tcpm_stamp
+ TCP_METRICS_TIMEOUT
)))
139 tcpm_suck_dst(tm
, dst
, false);
142 #define TCP_METRICS_RECLAIM_DEPTH 5
143 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
145 #define deref_locked(p) \
146 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
148 static struct tcp_metrics_block
*tcpm_new(struct dst_entry
*dst
,
149 struct inetpeer_addr
*saddr
,
150 struct inetpeer_addr
*daddr
,
153 struct tcp_metrics_block
*tm
;
155 bool reclaim
= false;
157 spin_lock_bh(&tcp_metrics_lock
);
158 net
= dev_net(dst
->dev
);
160 /* While waiting for the spin-lock the cache might have been populated
161 * with this entry and so we have to check again.
163 tm
= __tcp_get_metrics(saddr
, daddr
, net
, hash
);
164 if (tm
== TCP_METRICS_RECLAIM_PTR
) {
169 tcpm_check_stamp(tm
, dst
);
173 if (unlikely(reclaim
)) {
174 struct tcp_metrics_block
*oldest
;
176 oldest
= deref_locked(tcp_metrics_hash
[hash
].chain
);
177 for (tm
= deref_locked(oldest
->tcpm_next
); tm
;
178 tm
= deref_locked(tm
->tcpm_next
)) {
179 if (time_before(tm
->tcpm_stamp
, oldest
->tcpm_stamp
))
184 tm
= kmalloc(sizeof(*tm
), GFP_ATOMIC
);
188 write_pnet(&tm
->tcpm_net
, net
);
189 tm
->tcpm_saddr
= *saddr
;
190 tm
->tcpm_daddr
= *daddr
;
192 tcpm_suck_dst(tm
, dst
, true);
194 if (likely(!reclaim
)) {
195 tm
->tcpm_next
= tcp_metrics_hash
[hash
].chain
;
196 rcu_assign_pointer(tcp_metrics_hash
[hash
].chain
, tm
);
200 spin_unlock_bh(&tcp_metrics_lock
);
204 static struct tcp_metrics_block
*tcp_get_encode(struct tcp_metrics_block
*tm
, int depth
)
208 if (depth
> TCP_METRICS_RECLAIM_DEPTH
)
209 return TCP_METRICS_RECLAIM_PTR
;
213 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
214 const struct inetpeer_addr
*daddr
,
215 struct net
*net
, unsigned int hash
)
217 struct tcp_metrics_block
*tm
;
220 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
221 tm
= rcu_dereference(tm
->tcpm_next
)) {
222 if (addr_same(&tm
->tcpm_saddr
, saddr
) &&
223 addr_same(&tm
->tcpm_daddr
, daddr
) &&
224 net_eq(tm_net(tm
), net
))
228 return tcp_get_encode(tm
, depth
);
231 static struct tcp_metrics_block
*__tcp_get_metrics_req(struct request_sock
*req
,
232 struct dst_entry
*dst
)
234 struct tcp_metrics_block
*tm
;
235 struct inetpeer_addr saddr
, daddr
;
239 saddr
.family
= req
->rsk_ops
->family
;
240 daddr
.family
= req
->rsk_ops
->family
;
241 switch (daddr
.family
) {
243 inetpeer_set_addr_v4(&saddr
, inet_rsk(req
)->ir_loc_addr
);
244 inetpeer_set_addr_v4(&daddr
, inet_rsk(req
)->ir_rmt_addr
);
245 hash
= ipv4_addr_hash(inet_rsk(req
)->ir_rmt_addr
);
247 #if IS_ENABLED(CONFIG_IPV6)
249 inetpeer_set_addr_v6(&saddr
, &inet_rsk(req
)->ir_v6_loc_addr
);
250 inetpeer_set_addr_v6(&daddr
, &inet_rsk(req
)->ir_v6_rmt_addr
);
251 hash
= ipv6_addr_hash(&inet_rsk(req
)->ir_v6_rmt_addr
);
258 net
= dev_net(dst
->dev
);
259 hash
^= net_hash_mix(net
);
260 hash
= hash_32(hash
, tcp_metrics_hash_log
);
262 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
263 tm
= rcu_dereference(tm
->tcpm_next
)) {
264 if (addr_same(&tm
->tcpm_saddr
, &saddr
) &&
265 addr_same(&tm
->tcpm_daddr
, &daddr
) &&
266 net_eq(tm_net(tm
), net
))
269 tcpm_check_stamp(tm
, dst
);
273 static struct tcp_metrics_block
*tcp_get_metrics(struct sock
*sk
,
274 struct dst_entry
*dst
,
277 struct tcp_metrics_block
*tm
;
278 struct inetpeer_addr saddr
, daddr
;
282 if (sk
->sk_family
== AF_INET
) {
283 inetpeer_set_addr_v4(&saddr
, inet_sk(sk
)->inet_saddr
);
284 inetpeer_set_addr_v4(&daddr
, inet_sk(sk
)->inet_daddr
);
285 hash
= ipv4_addr_hash(inet_sk(sk
)->inet_daddr
);
287 #if IS_ENABLED(CONFIG_IPV6)
288 else if (sk
->sk_family
== AF_INET6
) {
289 if (ipv6_addr_v4mapped(&sk
->sk_v6_daddr
)) {
290 inetpeer_set_addr_v4(&saddr
, inet_sk(sk
)->inet_saddr
);
291 inetpeer_set_addr_v4(&daddr
, inet_sk(sk
)->inet_daddr
);
292 hash
= ipv4_addr_hash(inet_sk(sk
)->inet_daddr
);
294 inetpeer_set_addr_v6(&saddr
, &sk
->sk_v6_rcv_saddr
);
295 inetpeer_set_addr_v6(&daddr
, &sk
->sk_v6_daddr
);
296 hash
= ipv6_addr_hash(&sk
->sk_v6_daddr
);
303 net
= dev_net(dst
->dev
);
304 hash
^= net_hash_mix(net
);
305 hash
= hash_32(hash
, tcp_metrics_hash_log
);
307 tm
= __tcp_get_metrics(&saddr
, &daddr
, net
, hash
);
308 if (tm
== TCP_METRICS_RECLAIM_PTR
)
311 tm
= tcpm_new(dst
, &saddr
, &daddr
, hash
);
313 tcpm_check_stamp(tm
, dst
);
318 /* Save metrics learned by this TCP session. This function is called
319 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
320 * or goes from LAST-ACK to CLOSE.
322 void tcp_update_metrics(struct sock
*sk
)
324 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
325 struct dst_entry
*dst
= __sk_dst_get(sk
);
326 struct tcp_sock
*tp
= tcp_sk(sk
);
327 struct net
*net
= sock_net(sk
);
328 struct tcp_metrics_block
*tm
;
334 if (sysctl_tcp_nometrics_save
|| !dst
)
338 if (icsk
->icsk_backoff
|| !tp
->srtt_us
) {
339 /* This session failed to estimate rtt. Why?
340 * Probably, no packets returned in time. Reset our
343 tm
= tcp_get_metrics(sk
, dst
, false);
344 if (tm
&& !tcp_metric_locked(tm
, TCP_METRIC_RTT
))
345 tcp_metric_set(tm
, TCP_METRIC_RTT
, 0);
348 tm
= tcp_get_metrics(sk
, dst
, true);
353 rtt
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
354 m
= rtt
- tp
->srtt_us
;
356 /* If newly calculated rtt larger than stored one, store new
357 * one. Otherwise, use EWMA. Remember, rtt overestimation is
358 * always better than underestimation.
360 if (!tcp_metric_locked(tm
, TCP_METRIC_RTT
)) {
365 tcp_metric_set(tm
, TCP_METRIC_RTT
, rtt
);
368 if (!tcp_metric_locked(tm
, TCP_METRIC_RTTVAR
)) {
374 /* Scale deviation to rttvar fixed point */
379 var
= tcp_metric_get(tm
, TCP_METRIC_RTTVAR
);
383 var
-= (var
- m
) >> 2;
385 tcp_metric_set(tm
, TCP_METRIC_RTTVAR
, var
);
388 if (tcp_in_initial_slowstart(tp
)) {
389 /* Slow start still did not finish. */
390 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
391 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
392 if (val
&& (tp
->snd_cwnd
>> 1) > val
)
393 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
396 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
397 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
398 if (tp
->snd_cwnd
> val
)
399 tcp_metric_set(tm
, TCP_METRIC_CWND
,
402 } else if (!tcp_in_slow_start(tp
) &&
403 icsk
->icsk_ca_state
== TCP_CA_Open
) {
404 /* Cong. avoidance phase, cwnd is reliable. */
405 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
))
406 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
407 max(tp
->snd_cwnd
>> 1, tp
->snd_ssthresh
));
408 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
409 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
410 tcp_metric_set(tm
, TCP_METRIC_CWND
, (val
+ tp
->snd_cwnd
) >> 1);
413 /* Else slow start did not finish, cwnd is non-sense,
414 * ssthresh may be also invalid.
416 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
417 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
418 tcp_metric_set(tm
, TCP_METRIC_CWND
,
419 (val
+ tp
->snd_ssthresh
) >> 1);
421 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
422 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
423 if (val
&& tp
->snd_ssthresh
> val
)
424 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
427 if (!tcp_metric_locked(tm
, TCP_METRIC_REORDERING
)) {
428 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
429 if (val
< tp
->reordering
&&
430 tp
->reordering
!= net
->ipv4
.sysctl_tcp_reordering
)
431 tcp_metric_set(tm
, TCP_METRIC_REORDERING
,
435 tm
->tcpm_stamp
= jiffies
;
440 /* Initialize metrics on socket. */
442 void tcp_init_metrics(struct sock
*sk
)
444 struct dst_entry
*dst
= __sk_dst_get(sk
);
445 struct tcp_sock
*tp
= tcp_sk(sk
);
446 struct tcp_metrics_block
*tm
;
447 u32 val
, crtt
= 0; /* cached RTT scaled by 8 */
454 tm
= tcp_get_metrics(sk
, dst
, true);
460 if (tcp_metric_locked(tm
, TCP_METRIC_CWND
))
461 tp
->snd_cwnd_clamp
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
463 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
465 tp
->snd_ssthresh
= val
;
466 if (tp
->snd_ssthresh
> tp
->snd_cwnd_clamp
)
467 tp
->snd_ssthresh
= tp
->snd_cwnd_clamp
;
469 /* ssthresh may have been reduced unnecessarily during.
470 * 3WHS. Restore it back to its initial default.
472 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
474 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
475 if (val
&& tp
->reordering
!= val
) {
476 tcp_disable_fack(tp
);
477 tp
->reordering
= val
;
480 crtt
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
483 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
484 * to seed the RTO for later data packets because SYN packets are
485 * small. Use the per-dst cached values to seed the RTO but keep
486 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
487 * Later the RTO will be updated immediately upon obtaining the first
488 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
489 * influences the first RTO but not later RTT estimation.
491 * But if RTT is not available from the SYN (due to retransmits or
492 * syn cookies) or the cache, force a conservative 3secs timeout.
494 * A bit of theory. RTT is time passed after "normal" sized packet
495 * is sent until it is ACKed. In normal circumstances sending small
496 * packets force peer to delay ACKs and calculation is correct too.
497 * The algorithm is adaptive and, provided we follow specs, it
498 * NEVER underestimate RTT. BUT! If peer tries to make some clever
499 * tricks sort of "quick acks" for time long enough to decrease RTT
500 * to low value, and then abruptly stops to do it and starts to delay
501 * ACKs, wait for troubles.
503 if (crtt
> tp
->srtt_us
) {
504 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
505 crtt
/= 8 * USEC_PER_SEC
/ HZ
;
506 inet_csk(sk
)->icsk_rto
= crtt
+ max(2 * crtt
, tcp_rto_min(sk
));
507 } else if (tp
->srtt_us
== 0) {
508 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
509 * 3WHS. This is most likely due to retransmission,
510 * including spurious one. Reset the RTO back to 3secs
511 * from the more aggressive 1sec to avoid more spurious
514 tp
->rttvar_us
= jiffies_to_usecs(TCP_TIMEOUT_FALLBACK
);
515 tp
->mdev_us
= tp
->mdev_max_us
= tp
->rttvar_us
;
517 inet_csk(sk
)->icsk_rto
= TCP_TIMEOUT_FALLBACK
;
519 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
520 * retransmitted. In light of RFC6298 more aggressive 1sec
521 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
522 * retransmission has occurred.
524 if (tp
->total_retrans
> 1)
527 tp
->snd_cwnd
= tcp_init_cwnd(tp
, dst
);
528 tp
->snd_cwnd_stamp
= tcp_jiffies32
;
531 bool tcp_peer_is_proven(struct request_sock
*req
, struct dst_entry
*dst
)
533 struct tcp_metrics_block
*tm
;
540 tm
= __tcp_get_metrics_req(req
, dst
);
541 if (tm
&& tcp_metric_get(tm
, TCP_METRIC_RTT
))
550 static DEFINE_SEQLOCK(fastopen_seqlock
);
552 void tcp_fastopen_cache_get(struct sock
*sk
, u16
*mss
,
553 struct tcp_fastopen_cookie
*cookie
,
554 int *syn_loss
, unsigned long *last_syn_loss
)
556 struct tcp_metrics_block
*tm
;
559 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), false);
561 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
565 seq
= read_seqbegin(&fastopen_seqlock
);
568 *cookie
= tfom
->cookie
;
569 if (cookie
->len
<= 0 && tfom
->try_exp
== 1)
571 *syn_loss
= tfom
->syn_loss
;
572 *last_syn_loss
= *syn_loss
? tfom
->last_syn_loss
: 0;
573 } while (read_seqretry(&fastopen_seqlock
, seq
));
578 void tcp_fastopen_cache_set(struct sock
*sk
, u16 mss
,
579 struct tcp_fastopen_cookie
*cookie
, bool syn_lost
,
582 struct dst_entry
*dst
= __sk_dst_get(sk
);
583 struct tcp_metrics_block
*tm
;
588 tm
= tcp_get_metrics(sk
, dst
, true);
590 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
592 write_seqlock_bh(&fastopen_seqlock
);
595 if (cookie
&& cookie
->len
> 0)
596 tfom
->cookie
= *cookie
;
597 else if (try_exp
> tfom
->try_exp
&&
598 tfom
->cookie
.len
<= 0 && !tfom
->cookie
.exp
)
599 tfom
->try_exp
= try_exp
;
602 tfom
->last_syn_loss
= jiffies
;
605 write_sequnlock_bh(&fastopen_seqlock
);
610 static struct genl_family tcp_metrics_nl_family
;
612 static const struct nla_policy tcp_metrics_nl_policy
[TCP_METRICS_ATTR_MAX
+ 1] = {
613 [TCP_METRICS_ATTR_ADDR_IPV4
] = { .type
= NLA_U32
, },
614 [TCP_METRICS_ATTR_ADDR_IPV6
] = { .type
= NLA_BINARY
,
615 .len
= sizeof(struct in6_addr
), },
616 /* Following attributes are not received for GET/DEL,
617 * we keep them for reference
620 [TCP_METRICS_ATTR_AGE
] = { .type
= NLA_MSECS
, },
621 [TCP_METRICS_ATTR_TW_TSVAL
] = { .type
= NLA_U32
, },
622 [TCP_METRICS_ATTR_TW_TS_STAMP
] = { .type
= NLA_S32
, },
623 [TCP_METRICS_ATTR_VALS
] = { .type
= NLA_NESTED
, },
624 [TCP_METRICS_ATTR_FOPEN_MSS
] = { .type
= NLA_U16
, },
625 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS
] = { .type
= NLA_U16
, },
626 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
] = { .type
= NLA_MSECS
, },
627 [TCP_METRICS_ATTR_FOPEN_COOKIE
] = { .type
= NLA_BINARY
,
628 .len
= TCP_FASTOPEN_COOKIE_MAX
, },
632 /* Add attributes, caller cancels its header on failure */
633 static int tcp_metrics_fill_info(struct sk_buff
*msg
,
634 struct tcp_metrics_block
*tm
)
639 switch (tm
->tcpm_daddr
.family
) {
641 if (nla_put_in_addr(msg
, TCP_METRICS_ATTR_ADDR_IPV4
,
642 inetpeer_get_addr_v4(&tm
->tcpm_daddr
)) < 0)
643 goto nla_put_failure
;
644 if (nla_put_in_addr(msg
, TCP_METRICS_ATTR_SADDR_IPV4
,
645 inetpeer_get_addr_v4(&tm
->tcpm_saddr
)) < 0)
646 goto nla_put_failure
;
649 if (nla_put_in6_addr(msg
, TCP_METRICS_ATTR_ADDR_IPV6
,
650 inetpeer_get_addr_v6(&tm
->tcpm_daddr
)) < 0)
651 goto nla_put_failure
;
652 if (nla_put_in6_addr(msg
, TCP_METRICS_ATTR_SADDR_IPV6
,
653 inetpeer_get_addr_v6(&tm
->tcpm_saddr
)) < 0)
654 goto nla_put_failure
;
657 return -EAFNOSUPPORT
;
660 if (nla_put_msecs(msg
, TCP_METRICS_ATTR_AGE
,
661 jiffies
- tm
->tcpm_stamp
,
662 TCP_METRICS_ATTR_PAD
) < 0)
663 goto nla_put_failure
;
668 nest
= nla_nest_start(msg
, TCP_METRICS_ATTR_VALS
);
670 goto nla_put_failure
;
671 for (i
= 0; i
< TCP_METRIC_MAX_KERNEL
+ 1; i
++) {
672 u32 val
= tm
->tcpm_vals
[i
];
676 if (i
== TCP_METRIC_RTT
) {
677 if (nla_put_u32(msg
, TCP_METRIC_RTT_US
+ 1,
679 goto nla_put_failure
;
681 val
= max(val
/ 1000, 1U);
683 if (i
== TCP_METRIC_RTTVAR
) {
684 if (nla_put_u32(msg
, TCP_METRIC_RTTVAR_US
+ 1,
686 goto nla_put_failure
;
688 val
= max(val
/ 1000, 1U);
690 if (nla_put_u32(msg
, i
+ 1, val
) < 0)
691 goto nla_put_failure
;
695 nla_nest_end(msg
, nest
);
697 nla_nest_cancel(msg
, nest
);
701 struct tcp_fastopen_metrics tfom_copy
[1], *tfom
;
705 seq
= read_seqbegin(&fastopen_seqlock
);
706 tfom_copy
[0] = tm
->tcpm_fastopen
;
707 } while (read_seqretry(&fastopen_seqlock
, seq
));
711 nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_MSS
,
713 goto nla_put_failure
;
714 if (tfom
->syn_loss
&&
715 (nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROPS
,
716 tfom
->syn_loss
) < 0 ||
717 nla_put_msecs(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
,
718 jiffies
- tfom
->last_syn_loss
,
719 TCP_METRICS_ATTR_PAD
) < 0))
720 goto nla_put_failure
;
721 if (tfom
->cookie
.len
> 0 &&
722 nla_put(msg
, TCP_METRICS_ATTR_FOPEN_COOKIE
,
723 tfom
->cookie
.len
, tfom
->cookie
.val
) < 0)
724 goto nla_put_failure
;
733 static int tcp_metrics_dump_info(struct sk_buff
*skb
,
734 struct netlink_callback
*cb
,
735 struct tcp_metrics_block
*tm
)
739 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
740 &tcp_metrics_nl_family
, NLM_F_MULTI
,
741 TCP_METRICS_CMD_GET
);
745 if (tcp_metrics_fill_info(skb
, tm
) < 0)
746 goto nla_put_failure
;
748 genlmsg_end(skb
, hdr
);
752 genlmsg_cancel(skb
, hdr
);
756 static int tcp_metrics_nl_dump(struct sk_buff
*skb
,
757 struct netlink_callback
*cb
)
759 struct net
*net
= sock_net(skb
->sk
);
760 unsigned int max_rows
= 1U << tcp_metrics_hash_log
;
761 unsigned int row
, s_row
= cb
->args
[0];
762 int s_col
= cb
->args
[1], col
= s_col
;
764 for (row
= s_row
; row
< max_rows
; row
++, s_col
= 0) {
765 struct tcp_metrics_block
*tm
;
766 struct tcpm_hash_bucket
*hb
= tcp_metrics_hash
+ row
;
769 for (col
= 0, tm
= rcu_dereference(hb
->chain
); tm
;
770 tm
= rcu_dereference(tm
->tcpm_next
), col
++) {
771 if (!net_eq(tm_net(tm
), net
))
775 if (tcp_metrics_dump_info(skb
, cb
, tm
) < 0) {
789 static int __parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
790 unsigned int *hash
, int optional
, int v4
, int v6
)
796 inetpeer_set_addr_v4(addr
, nla_get_in_addr(a
));
798 *hash
= ipv4_addr_hash(inetpeer_get_addr_v4(addr
));
805 if (nla_len(a
) != sizeof(struct in6_addr
))
807 in6
= nla_get_in6_addr(a
);
808 inetpeer_set_addr_v6(addr
, &in6
);
810 *hash
= ipv6_addr_hash(inetpeer_get_addr_v6(addr
));
813 return optional
? 1 : -EAFNOSUPPORT
;
816 static int parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
817 unsigned int *hash
, int optional
)
819 return __parse_nl_addr(info
, addr
, hash
, optional
,
820 TCP_METRICS_ATTR_ADDR_IPV4
,
821 TCP_METRICS_ATTR_ADDR_IPV6
);
824 static int parse_nl_saddr(struct genl_info
*info
, struct inetpeer_addr
*addr
)
826 return __parse_nl_addr(info
, addr
, NULL
, 0,
827 TCP_METRICS_ATTR_SADDR_IPV4
,
828 TCP_METRICS_ATTR_SADDR_IPV6
);
831 static int tcp_metrics_nl_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
833 struct tcp_metrics_block
*tm
;
834 struct inetpeer_addr saddr
, daddr
;
837 struct net
*net
= genl_info_net(info
);
842 ret
= parse_nl_addr(info
, &daddr
, &hash
, 0);
846 ret
= parse_nl_saddr(info
, &saddr
);
850 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
854 reply
= genlmsg_put_reply(msg
, info
, &tcp_metrics_nl_family
, 0,
857 goto nla_put_failure
;
859 hash
^= net_hash_mix(net
);
860 hash
= hash_32(hash
, tcp_metrics_hash_log
);
863 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
864 tm
= rcu_dereference(tm
->tcpm_next
)) {
865 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
866 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
)) &&
867 net_eq(tm_net(tm
), net
)) {
868 ret
= tcp_metrics_fill_info(msg
, tm
);
876 genlmsg_end(msg
, reply
);
877 return genlmsg_reply(msg
, info
);
887 static void tcp_metrics_flush_all(struct net
*net
)
889 unsigned int max_rows
= 1U << tcp_metrics_hash_log
;
890 struct tcpm_hash_bucket
*hb
= tcp_metrics_hash
;
891 struct tcp_metrics_block
*tm
;
894 for (row
= 0; row
< max_rows
; row
++, hb
++) {
895 struct tcp_metrics_block __rcu
**pp
;
896 spin_lock_bh(&tcp_metrics_lock
);
898 for (tm
= deref_locked(*pp
); tm
; tm
= deref_locked(*pp
)) {
899 if (net_eq(tm_net(tm
), net
)) {
901 kfree_rcu(tm
, rcu_head
);
906 spin_unlock_bh(&tcp_metrics_lock
);
910 static int tcp_metrics_nl_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
912 struct tcpm_hash_bucket
*hb
;
913 struct tcp_metrics_block
*tm
;
914 struct tcp_metrics_block __rcu
**pp
;
915 struct inetpeer_addr saddr
, daddr
;
917 struct net
*net
= genl_info_net(info
);
919 bool src
= true, found
= false;
921 ret
= parse_nl_addr(info
, &daddr
, &hash
, 1);
925 tcp_metrics_flush_all(net
);
928 ret
= parse_nl_saddr(info
, &saddr
);
932 hash
^= net_hash_mix(net
);
933 hash
= hash_32(hash
, tcp_metrics_hash_log
);
934 hb
= tcp_metrics_hash
+ hash
;
936 spin_lock_bh(&tcp_metrics_lock
);
937 for (tm
= deref_locked(*pp
); tm
; tm
= deref_locked(*pp
)) {
938 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
939 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
)) &&
940 net_eq(tm_net(tm
), net
)) {
942 kfree_rcu(tm
, rcu_head
);
948 spin_unlock_bh(&tcp_metrics_lock
);
954 static const struct genl_ops tcp_metrics_nl_ops
[] = {
956 .cmd
= TCP_METRICS_CMD_GET
,
957 .doit
= tcp_metrics_nl_cmd_get
,
958 .dumpit
= tcp_metrics_nl_dump
,
959 .policy
= tcp_metrics_nl_policy
,
962 .cmd
= TCP_METRICS_CMD_DEL
,
963 .doit
= tcp_metrics_nl_cmd_del
,
964 .policy
= tcp_metrics_nl_policy
,
965 .flags
= GENL_ADMIN_PERM
,
969 static struct genl_family tcp_metrics_nl_family __ro_after_init
= {
971 .name
= TCP_METRICS_GENL_NAME
,
972 .version
= TCP_METRICS_GENL_VERSION
,
973 .maxattr
= TCP_METRICS_ATTR_MAX
,
975 .module
= THIS_MODULE
,
976 .ops
= tcp_metrics_nl_ops
,
977 .n_ops
= ARRAY_SIZE(tcp_metrics_nl_ops
),
980 static unsigned int tcpmhash_entries
;
981 static int __init
set_tcpmhash_entries(char *str
)
988 ret
= kstrtouint(str
, 0, &tcpmhash_entries
);
994 __setup("tcpmhash_entries=", set_tcpmhash_entries
);
996 static int __net_init
tcp_net_metrics_init(struct net
*net
)
1001 if (!net_eq(net
, &init_net
))
1004 slots
= tcpmhash_entries
;
1006 if (totalram_pages
>= 128 * 1024)
1012 tcp_metrics_hash_log
= order_base_2(slots
);
1013 size
= sizeof(struct tcpm_hash_bucket
) << tcp_metrics_hash_log
;
1015 tcp_metrics_hash
= kvzalloc(size
, GFP_KERNEL
);
1016 if (!tcp_metrics_hash
)
1022 static void __net_exit
tcp_net_metrics_exit(struct net
*net
)
1024 tcp_metrics_flush_all(net
);
1027 static __net_initdata
struct pernet_operations tcp_net_metrics_ops
= {
1028 .init
= tcp_net_metrics_init
,
1029 .exit
= tcp_net_metrics_exit
,
1032 void __init
tcp_metrics_init(void)
1036 ret
= register_pernet_subsys(&tcp_net_metrics_ops
);
1038 panic("Could not allocate the tcp_metrics hash table\n");
1040 ret
= genl_register_family(&tcp_metrics_nl_family
);
1042 panic("Could not register tcp_metrics generic netlink\n");