1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/rcupdate.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
10 #include <linux/hash.h>
11 #include <linux/tcp_metrics.h>
12 #include <linux/vmalloc.h>
14 #include <net/inet_connection_sock.h>
15 #include <net/net_namespace.h>
16 #include <net/request_sock.h>
17 #include <net/inetpeer.h>
22 #include <net/genetlink.h>
24 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
25 const struct inetpeer_addr
*daddr
,
26 struct net
*net
, unsigned int hash
);
28 struct tcp_fastopen_metrics
{
30 u16 syn_loss
:10, /* Recurring Fast Open SYN losses */
31 try_exp
:2; /* Request w/ exp. option (once) */
32 unsigned long last_syn_loss
; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie
;
36 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
37 * Kernel only stores RTT and RTTVAR in usec resolution
39 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
41 struct tcp_metrics_block
{
42 struct tcp_metrics_block __rcu
*tcpm_next
;
44 struct inetpeer_addr tcpm_saddr
;
45 struct inetpeer_addr tcpm_daddr
;
46 unsigned long tcpm_stamp
;
48 u32 tcpm_vals
[TCP_METRIC_MAX_KERNEL
+ 1];
49 struct tcp_fastopen_metrics tcpm_fastopen
;
51 struct rcu_head rcu_head
;
54 static inline struct net
*tm_net(const struct tcp_metrics_block
*tm
)
56 /* Paired with the WRITE_ONCE() in tcpm_new() */
57 return READ_ONCE(tm
->tcpm_net
);
60 static bool tcp_metric_locked(struct tcp_metrics_block
*tm
,
61 enum tcp_metric_index idx
)
63 /* Paired with WRITE_ONCE() in tcpm_suck_dst() */
64 return READ_ONCE(tm
->tcpm_lock
) & (1 << idx
);
67 static u32
tcp_metric_get(const struct tcp_metrics_block
*tm
,
68 enum tcp_metric_index idx
)
70 /* Paired with WRITE_ONCE() in tcp_metric_set() */
71 return READ_ONCE(tm
->tcpm_vals
[idx
]);
74 static void tcp_metric_set(struct tcp_metrics_block
*tm
,
75 enum tcp_metric_index idx
,
78 /* Paired with READ_ONCE() in tcp_metric_get() */
79 WRITE_ONCE(tm
->tcpm_vals
[idx
], val
);
82 static bool addr_same(const struct inetpeer_addr
*a
,
83 const struct inetpeer_addr
*b
)
85 return (a
->family
== b
->family
) && !inetpeer_addr_cmp(a
, b
);
88 struct tcpm_hash_bucket
{
89 struct tcp_metrics_block __rcu
*chain
;
92 static struct tcpm_hash_bucket
*tcp_metrics_hash __read_mostly
;
93 static unsigned int tcp_metrics_hash_log __read_mostly
;
95 static DEFINE_SPINLOCK(tcp_metrics_lock
);
96 static DEFINE_SEQLOCK(fastopen_seqlock
);
98 static void tcpm_suck_dst(struct tcp_metrics_block
*tm
,
99 const struct dst_entry
*dst
,
105 WRITE_ONCE(tm
->tcpm_stamp
, jiffies
);
108 if (dst_metric_locked(dst
, RTAX_RTT
))
109 val
|= 1 << TCP_METRIC_RTT
;
110 if (dst_metric_locked(dst
, RTAX_RTTVAR
))
111 val
|= 1 << TCP_METRIC_RTTVAR
;
112 if (dst_metric_locked(dst
, RTAX_SSTHRESH
))
113 val
|= 1 << TCP_METRIC_SSTHRESH
;
114 if (dst_metric_locked(dst
, RTAX_CWND
))
115 val
|= 1 << TCP_METRIC_CWND
;
116 if (dst_metric_locked(dst
, RTAX_REORDERING
))
117 val
|= 1 << TCP_METRIC_REORDERING
;
118 /* Paired with READ_ONCE() in tcp_metric_locked() */
119 WRITE_ONCE(tm
->tcpm_lock
, val
);
121 msval
= dst_metric_raw(dst
, RTAX_RTT
);
122 tcp_metric_set(tm
, TCP_METRIC_RTT
, msval
* USEC_PER_MSEC
);
124 msval
= dst_metric_raw(dst
, RTAX_RTTVAR
);
125 tcp_metric_set(tm
, TCP_METRIC_RTTVAR
, msval
* USEC_PER_MSEC
);
126 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
127 dst_metric_raw(dst
, RTAX_SSTHRESH
));
128 tcp_metric_set(tm
, TCP_METRIC_CWND
,
129 dst_metric_raw(dst
, RTAX_CWND
));
130 tcp_metric_set(tm
, TCP_METRIC_REORDERING
,
131 dst_metric_raw(dst
, RTAX_REORDERING
));
132 if (fastopen_clear
) {
133 write_seqlock(&fastopen_seqlock
);
134 tm
->tcpm_fastopen
.mss
= 0;
135 tm
->tcpm_fastopen
.syn_loss
= 0;
136 tm
->tcpm_fastopen
.try_exp
= 0;
137 tm
->tcpm_fastopen
.cookie
.exp
= false;
138 tm
->tcpm_fastopen
.cookie
.len
= 0;
139 write_sequnlock(&fastopen_seqlock
);
143 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
145 static void tcpm_check_stamp(struct tcp_metrics_block
*tm
,
146 const struct dst_entry
*dst
)
152 limit
= READ_ONCE(tm
->tcpm_stamp
) + TCP_METRICS_TIMEOUT
;
153 if (unlikely(time_after(jiffies
, limit
)))
154 tcpm_suck_dst(tm
, dst
, false);
157 #define TCP_METRICS_RECLAIM_DEPTH 5
158 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
160 #define deref_locked(p) \
161 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
163 static struct tcp_metrics_block
*tcpm_new(struct dst_entry
*dst
,
164 struct inetpeer_addr
*saddr
,
165 struct inetpeer_addr
*daddr
,
168 struct tcp_metrics_block
*tm
;
170 bool reclaim
= false;
172 spin_lock_bh(&tcp_metrics_lock
);
173 net
= dev_net(dst
->dev
);
175 /* While waiting for the spin-lock the cache might have been populated
176 * with this entry and so we have to check again.
178 tm
= __tcp_get_metrics(saddr
, daddr
, net
, hash
);
179 if (tm
== TCP_METRICS_RECLAIM_PTR
) {
184 tcpm_check_stamp(tm
, dst
);
188 if (unlikely(reclaim
)) {
189 struct tcp_metrics_block
*oldest
;
191 oldest
= deref_locked(tcp_metrics_hash
[hash
].chain
);
192 for (tm
= deref_locked(oldest
->tcpm_next
); tm
;
193 tm
= deref_locked(tm
->tcpm_next
)) {
194 if (time_before(READ_ONCE(tm
->tcpm_stamp
),
195 READ_ONCE(oldest
->tcpm_stamp
)))
200 tm
= kzalloc(sizeof(*tm
), GFP_ATOMIC
);
204 /* Paired with the READ_ONCE() in tm_net() */
205 WRITE_ONCE(tm
->tcpm_net
, net
);
207 tm
->tcpm_saddr
= *saddr
;
208 tm
->tcpm_daddr
= *daddr
;
210 tcpm_suck_dst(tm
, dst
, reclaim
);
212 if (likely(!reclaim
)) {
213 tm
->tcpm_next
= tcp_metrics_hash
[hash
].chain
;
214 rcu_assign_pointer(tcp_metrics_hash
[hash
].chain
, tm
);
218 spin_unlock_bh(&tcp_metrics_lock
);
222 static struct tcp_metrics_block
*tcp_get_encode(struct tcp_metrics_block
*tm
, int depth
)
226 if (depth
> TCP_METRICS_RECLAIM_DEPTH
)
227 return TCP_METRICS_RECLAIM_PTR
;
231 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*saddr
,
232 const struct inetpeer_addr
*daddr
,
233 struct net
*net
, unsigned int hash
)
235 struct tcp_metrics_block
*tm
;
238 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
239 tm
= rcu_dereference(tm
->tcpm_next
)) {
240 if (addr_same(&tm
->tcpm_saddr
, saddr
) &&
241 addr_same(&tm
->tcpm_daddr
, daddr
) &&
242 net_eq(tm_net(tm
), net
))
246 return tcp_get_encode(tm
, depth
);
249 static struct tcp_metrics_block
*__tcp_get_metrics_req(struct request_sock
*req
,
250 struct dst_entry
*dst
)
252 struct tcp_metrics_block
*tm
;
253 struct inetpeer_addr saddr
, daddr
;
257 saddr
.family
= req
->rsk_ops
->family
;
258 daddr
.family
= req
->rsk_ops
->family
;
259 switch (daddr
.family
) {
261 inetpeer_set_addr_v4(&saddr
, inet_rsk(req
)->ir_loc_addr
);
262 inetpeer_set_addr_v4(&daddr
, inet_rsk(req
)->ir_rmt_addr
);
263 hash
= ipv4_addr_hash(inet_rsk(req
)->ir_rmt_addr
);
265 #if IS_ENABLED(CONFIG_IPV6)
267 inetpeer_set_addr_v6(&saddr
, &inet_rsk(req
)->ir_v6_loc_addr
);
268 inetpeer_set_addr_v6(&daddr
, &inet_rsk(req
)->ir_v6_rmt_addr
);
269 hash
= ipv6_addr_hash(&inet_rsk(req
)->ir_v6_rmt_addr
);
276 net
= dev_net(dst
->dev
);
277 hash
^= net_hash_mix(net
);
278 hash
= hash_32(hash
, tcp_metrics_hash_log
);
280 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
281 tm
= rcu_dereference(tm
->tcpm_next
)) {
282 if (addr_same(&tm
->tcpm_saddr
, &saddr
) &&
283 addr_same(&tm
->tcpm_daddr
, &daddr
) &&
284 net_eq(tm_net(tm
), net
))
287 tcpm_check_stamp(tm
, dst
);
291 static struct tcp_metrics_block
*tcp_get_metrics(struct sock
*sk
,
292 struct dst_entry
*dst
,
295 struct tcp_metrics_block
*tm
;
296 struct inetpeer_addr saddr
, daddr
;
300 if (sk
->sk_family
== AF_INET
) {
301 inetpeer_set_addr_v4(&saddr
, inet_sk(sk
)->inet_saddr
);
302 inetpeer_set_addr_v4(&daddr
, inet_sk(sk
)->inet_daddr
);
303 hash
= ipv4_addr_hash(inet_sk(sk
)->inet_daddr
);
305 #if IS_ENABLED(CONFIG_IPV6)
306 else if (sk
->sk_family
== AF_INET6
) {
307 if (ipv6_addr_v4mapped(&sk
->sk_v6_daddr
)) {
308 inetpeer_set_addr_v4(&saddr
, inet_sk(sk
)->inet_saddr
);
309 inetpeer_set_addr_v4(&daddr
, inet_sk(sk
)->inet_daddr
);
310 hash
= ipv4_addr_hash(inet_sk(sk
)->inet_daddr
);
312 inetpeer_set_addr_v6(&saddr
, &sk
->sk_v6_rcv_saddr
);
313 inetpeer_set_addr_v6(&daddr
, &sk
->sk_v6_daddr
);
314 hash
= ipv6_addr_hash(&sk
->sk_v6_daddr
);
321 net
= dev_net(dst
->dev
);
322 hash
^= net_hash_mix(net
);
323 hash
= hash_32(hash
, tcp_metrics_hash_log
);
325 tm
= __tcp_get_metrics(&saddr
, &daddr
, net
, hash
);
326 if (tm
== TCP_METRICS_RECLAIM_PTR
)
329 tm
= tcpm_new(dst
, &saddr
, &daddr
, hash
);
331 tcpm_check_stamp(tm
, dst
);
336 /* Save metrics learned by this TCP session. This function is called
337 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
338 * or goes from LAST-ACK to CLOSE.
340 void tcp_update_metrics(struct sock
*sk
)
342 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
343 struct dst_entry
*dst
= __sk_dst_get(sk
);
344 struct tcp_sock
*tp
= tcp_sk(sk
);
345 struct net
*net
= sock_net(sk
);
346 struct tcp_metrics_block
*tm
;
352 if (READ_ONCE(net
->ipv4
.sysctl_tcp_nometrics_save
) || !dst
)
356 if (icsk
->icsk_backoff
|| !tp
->srtt_us
) {
357 /* This session failed to estimate rtt. Why?
358 * Probably, no packets returned in time. Reset our
361 tm
= tcp_get_metrics(sk
, dst
, false);
362 if (tm
&& !tcp_metric_locked(tm
, TCP_METRIC_RTT
))
363 tcp_metric_set(tm
, TCP_METRIC_RTT
, 0);
366 tm
= tcp_get_metrics(sk
, dst
, true);
371 rtt
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
372 m
= rtt
- tp
->srtt_us
;
374 /* If newly calculated rtt larger than stored one, store new
375 * one. Otherwise, use EWMA. Remember, rtt overestimation is
376 * always better than underestimation.
378 if (!tcp_metric_locked(tm
, TCP_METRIC_RTT
)) {
383 tcp_metric_set(tm
, TCP_METRIC_RTT
, rtt
);
386 if (!tcp_metric_locked(tm
, TCP_METRIC_RTTVAR
)) {
392 /* Scale deviation to rttvar fixed point */
397 var
= tcp_metric_get(tm
, TCP_METRIC_RTTVAR
);
401 var
-= (var
- m
) >> 2;
403 tcp_metric_set(tm
, TCP_METRIC_RTTVAR
, var
);
406 if (tcp_in_initial_slowstart(tp
)) {
407 /* Slow start still did not finish. */
408 if (!READ_ONCE(net
->ipv4
.sysctl_tcp_no_ssthresh_metrics_save
) &&
409 !tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
410 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
411 if (val
&& (tcp_snd_cwnd(tp
) >> 1) > val
)
412 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
413 tcp_snd_cwnd(tp
) >> 1);
415 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
416 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
417 if (tcp_snd_cwnd(tp
) > val
)
418 tcp_metric_set(tm
, TCP_METRIC_CWND
,
421 } else if (!tcp_in_slow_start(tp
) &&
422 icsk
->icsk_ca_state
== TCP_CA_Open
) {
423 /* Cong. avoidance phase, cwnd is reliable. */
424 if (!READ_ONCE(net
->ipv4
.sysctl_tcp_no_ssthresh_metrics_save
) &&
425 !tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
))
426 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
427 max(tcp_snd_cwnd(tp
) >> 1, tp
->snd_ssthresh
));
428 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
429 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
430 tcp_metric_set(tm
, TCP_METRIC_CWND
, (val
+ tcp_snd_cwnd(tp
)) >> 1);
433 /* Else slow start did not finish, cwnd is non-sense,
434 * ssthresh may be also invalid.
436 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
437 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
438 tcp_metric_set(tm
, TCP_METRIC_CWND
,
439 (val
+ tp
->snd_ssthresh
) >> 1);
441 if (!READ_ONCE(net
->ipv4
.sysctl_tcp_no_ssthresh_metrics_save
) &&
442 !tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
443 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
444 if (val
&& tp
->snd_ssthresh
> val
)
445 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
448 if (!tcp_metric_locked(tm
, TCP_METRIC_REORDERING
)) {
449 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
450 if (val
< tp
->reordering
&&
452 READ_ONCE(net
->ipv4
.sysctl_tcp_reordering
))
453 tcp_metric_set(tm
, TCP_METRIC_REORDERING
,
457 WRITE_ONCE(tm
->tcpm_stamp
, jiffies
);
462 /* Initialize metrics on socket. */
464 void tcp_init_metrics(struct sock
*sk
)
466 struct dst_entry
*dst
= __sk_dst_get(sk
);
467 struct tcp_sock
*tp
= tcp_sk(sk
);
468 struct net
*net
= sock_net(sk
);
469 struct tcp_metrics_block
*tm
;
470 u32 val
, crtt
= 0; /* cached RTT scaled by 8 */
473 /* ssthresh may have been reduced unnecessarily during.
474 * 3WHS. Restore it back to its initial default.
476 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
481 tm
= tcp_get_metrics(sk
, dst
, false);
487 if (tcp_metric_locked(tm
, TCP_METRIC_CWND
))
488 tp
->snd_cwnd_clamp
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
490 val
= READ_ONCE(net
->ipv4
.sysctl_tcp_no_ssthresh_metrics_save
) ?
491 0 : tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
493 tp
->snd_ssthresh
= val
;
494 if (tp
->snd_ssthresh
> tp
->snd_cwnd_clamp
)
495 tp
->snd_ssthresh
= tp
->snd_cwnd_clamp
;
497 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
498 if (val
&& tp
->reordering
!= val
)
499 tp
->reordering
= val
;
501 crtt
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
504 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
505 * to seed the RTO for later data packets because SYN packets are
506 * small. Use the per-dst cached values to seed the RTO but keep
507 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
508 * Later the RTO will be updated immediately upon obtaining the first
509 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
510 * influences the first RTO but not later RTT estimation.
512 * But if RTT is not available from the SYN (due to retransmits or
513 * syn cookies) or the cache, force a conservative 3secs timeout.
515 * A bit of theory. RTT is time passed after "normal" sized packet
516 * is sent until it is ACKed. In normal circumstances sending small
517 * packets force peer to delay ACKs and calculation is correct too.
518 * The algorithm is adaptive and, provided we follow specs, it
519 * NEVER underestimate RTT. BUT! If peer tries to make some clever
520 * tricks sort of "quick acks" for time long enough to decrease RTT
521 * to low value, and then abruptly stops to do it and starts to delay
522 * ACKs, wait for troubles.
524 if (crtt
> tp
->srtt_us
) {
525 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
526 crtt
/= 8 * USEC_PER_SEC
/ HZ
;
527 inet_csk(sk
)->icsk_rto
= crtt
+ max(2 * crtt
, tcp_rto_min(sk
));
528 } else if (tp
->srtt_us
== 0) {
529 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
530 * 3WHS. This is most likely due to retransmission,
531 * including spurious one. Reset the RTO back to 3secs
532 * from the more aggressive 1sec to avoid more spurious
535 tp
->rttvar_us
= jiffies_to_usecs(TCP_TIMEOUT_FALLBACK
);
536 tp
->mdev_us
= tp
->mdev_max_us
= tp
->rttvar_us
;
538 inet_csk(sk
)->icsk_rto
= TCP_TIMEOUT_FALLBACK
;
542 bool tcp_peer_is_proven(struct request_sock
*req
, struct dst_entry
*dst
)
544 struct tcp_metrics_block
*tm
;
551 tm
= __tcp_get_metrics_req(req
, dst
);
552 if (tm
&& tcp_metric_get(tm
, TCP_METRIC_RTT
))
561 void tcp_fastopen_cache_get(struct sock
*sk
, u16
*mss
,
562 struct tcp_fastopen_cookie
*cookie
)
564 struct tcp_metrics_block
*tm
;
567 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), false);
569 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
573 seq
= read_seqbegin(&fastopen_seqlock
);
576 *cookie
= tfom
->cookie
;
577 if (cookie
->len
<= 0 && tfom
->try_exp
== 1)
579 } while (read_seqretry(&fastopen_seqlock
, seq
));
584 void tcp_fastopen_cache_set(struct sock
*sk
, u16 mss
,
585 struct tcp_fastopen_cookie
*cookie
, bool syn_lost
,
588 struct dst_entry
*dst
= __sk_dst_get(sk
);
589 struct tcp_metrics_block
*tm
;
594 tm
= tcp_get_metrics(sk
, dst
, true);
596 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
598 write_seqlock_bh(&fastopen_seqlock
);
601 if (cookie
&& cookie
->len
> 0)
602 tfom
->cookie
= *cookie
;
603 else if (try_exp
> tfom
->try_exp
&&
604 tfom
->cookie
.len
<= 0 && !tfom
->cookie
.exp
)
605 tfom
->try_exp
= try_exp
;
608 tfom
->last_syn_loss
= jiffies
;
611 write_sequnlock_bh(&fastopen_seqlock
);
616 static struct genl_family tcp_metrics_nl_family
;
618 static const struct nla_policy tcp_metrics_nl_policy
[TCP_METRICS_ATTR_MAX
+ 1] = {
619 [TCP_METRICS_ATTR_ADDR_IPV4
] = { .type
= NLA_U32
, },
620 [TCP_METRICS_ATTR_ADDR_IPV6
] =
621 NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr
)),
623 [TCP_METRICS_ATTR_SADDR_IPV4
] = { .type
= NLA_U32
, },
624 [TCP_METRICS_ATTR_SADDR_IPV6
] =
625 NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr
)),
627 /* Following attributes are not received for GET/DEL,
628 * we keep them for reference
631 [TCP_METRICS_ATTR_AGE
] = { .type
= NLA_MSECS
, },
632 [TCP_METRICS_ATTR_TW_TSVAL
] = { .type
= NLA_U32
, },
633 [TCP_METRICS_ATTR_TW_TS_STAMP
] = { .type
= NLA_S32
, },
634 [TCP_METRICS_ATTR_VALS
] = { .type
= NLA_NESTED
, },
635 [TCP_METRICS_ATTR_FOPEN_MSS
] = { .type
= NLA_U16
, },
636 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS
] = { .type
= NLA_U16
, },
637 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
] = { .type
= NLA_MSECS
, },
638 [TCP_METRICS_ATTR_FOPEN_COOKIE
] = { .type
= NLA_BINARY
,
639 .len
= TCP_FASTOPEN_COOKIE_MAX
, },
643 /* Add attributes, caller cancels its header on failure */
644 static int tcp_metrics_fill_info(struct sk_buff
*msg
,
645 struct tcp_metrics_block
*tm
)
650 switch (tm
->tcpm_daddr
.family
) {
652 if (nla_put_in_addr(msg
, TCP_METRICS_ATTR_ADDR_IPV4
,
653 inetpeer_get_addr_v4(&tm
->tcpm_daddr
)) < 0)
654 goto nla_put_failure
;
655 if (nla_put_in_addr(msg
, TCP_METRICS_ATTR_SADDR_IPV4
,
656 inetpeer_get_addr_v4(&tm
->tcpm_saddr
)) < 0)
657 goto nla_put_failure
;
660 if (nla_put_in6_addr(msg
, TCP_METRICS_ATTR_ADDR_IPV6
,
661 inetpeer_get_addr_v6(&tm
->tcpm_daddr
)) < 0)
662 goto nla_put_failure
;
663 if (nla_put_in6_addr(msg
, TCP_METRICS_ATTR_SADDR_IPV6
,
664 inetpeer_get_addr_v6(&tm
->tcpm_saddr
)) < 0)
665 goto nla_put_failure
;
668 return -EAFNOSUPPORT
;
671 if (nla_put_msecs(msg
, TCP_METRICS_ATTR_AGE
,
672 jiffies
- READ_ONCE(tm
->tcpm_stamp
),
673 TCP_METRICS_ATTR_PAD
) < 0)
674 goto nla_put_failure
;
679 nest
= nla_nest_start_noflag(msg
, TCP_METRICS_ATTR_VALS
);
681 goto nla_put_failure
;
682 for (i
= 0; i
< TCP_METRIC_MAX_KERNEL
+ 1; i
++) {
683 u32 val
= tcp_metric_get(tm
, i
);
687 if (i
== TCP_METRIC_RTT
) {
688 if (nla_put_u32(msg
, TCP_METRIC_RTT_US
+ 1,
690 goto nla_put_failure
;
692 val
= max(val
/ 1000, 1U);
694 if (i
== TCP_METRIC_RTTVAR
) {
695 if (nla_put_u32(msg
, TCP_METRIC_RTTVAR_US
+ 1,
697 goto nla_put_failure
;
699 val
= max(val
/ 1000, 1U);
701 if (nla_put_u32(msg
, i
+ 1, val
) < 0)
702 goto nla_put_failure
;
706 nla_nest_end(msg
, nest
);
708 nla_nest_cancel(msg
, nest
);
712 struct tcp_fastopen_metrics tfom_copy
[1], *tfom
;
716 seq
= read_seqbegin(&fastopen_seqlock
);
717 tfom_copy
[0] = tm
->tcpm_fastopen
;
718 } while (read_seqretry(&fastopen_seqlock
, seq
));
722 nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_MSS
,
724 goto nla_put_failure
;
725 if (tfom
->syn_loss
&&
726 (nla_put_u16(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROPS
,
727 tfom
->syn_loss
) < 0 ||
728 nla_put_msecs(msg
, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS
,
729 jiffies
- tfom
->last_syn_loss
,
730 TCP_METRICS_ATTR_PAD
) < 0))
731 goto nla_put_failure
;
732 if (tfom
->cookie
.len
> 0 &&
733 nla_put(msg
, TCP_METRICS_ATTR_FOPEN_COOKIE
,
734 tfom
->cookie
.len
, tfom
->cookie
.val
) < 0)
735 goto nla_put_failure
;
744 static int tcp_metrics_dump_info(struct sk_buff
*skb
,
745 struct netlink_callback
*cb
,
746 struct tcp_metrics_block
*tm
)
750 hdr
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
751 &tcp_metrics_nl_family
, NLM_F_MULTI
,
752 TCP_METRICS_CMD_GET
);
756 if (tcp_metrics_fill_info(skb
, tm
) < 0)
757 goto nla_put_failure
;
759 genlmsg_end(skb
, hdr
);
763 genlmsg_cancel(skb
, hdr
);
767 static int tcp_metrics_nl_dump(struct sk_buff
*skb
,
768 struct netlink_callback
*cb
)
770 struct net
*net
= sock_net(skb
->sk
);
771 unsigned int max_rows
= 1U << tcp_metrics_hash_log
;
772 unsigned int row
, s_row
= cb
->args
[0];
773 int s_col
= cb
->args
[1], col
= s_col
;
776 for (row
= s_row
; row
< max_rows
; row
++, s_col
= 0) {
777 struct tcp_metrics_block
*tm
;
778 struct tcpm_hash_bucket
*hb
= tcp_metrics_hash
+ row
;
781 for (col
= 0, tm
= rcu_dereference(hb
->chain
); tm
;
782 tm
= rcu_dereference(tm
->tcpm_next
), col
++) {
783 if (!net_eq(tm_net(tm
), net
))
787 res
= tcp_metrics_dump_info(skb
, cb
, tm
);
802 static int __parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
803 unsigned int *hash
, int optional
, int v4
, int v6
)
809 inetpeer_set_addr_v4(addr
, nla_get_in_addr(a
));
811 *hash
= ipv4_addr_hash(inetpeer_get_addr_v4(addr
));
818 in6
= nla_get_in6_addr(a
);
819 inetpeer_set_addr_v6(addr
, &in6
);
821 *hash
= ipv6_addr_hash(inetpeer_get_addr_v6(addr
));
824 return optional
? 1 : -EAFNOSUPPORT
;
827 static int parse_nl_addr(struct genl_info
*info
, struct inetpeer_addr
*addr
,
828 unsigned int *hash
, int optional
)
830 return __parse_nl_addr(info
, addr
, hash
, optional
,
831 TCP_METRICS_ATTR_ADDR_IPV4
,
832 TCP_METRICS_ATTR_ADDR_IPV6
);
835 static int parse_nl_saddr(struct genl_info
*info
, struct inetpeer_addr
*addr
)
837 return __parse_nl_addr(info
, addr
, NULL
, 0,
838 TCP_METRICS_ATTR_SADDR_IPV4
,
839 TCP_METRICS_ATTR_SADDR_IPV6
);
842 static int tcp_metrics_nl_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
844 struct tcp_metrics_block
*tm
;
845 struct inetpeer_addr saddr
, daddr
;
848 struct net
*net
= genl_info_net(info
);
853 ret
= parse_nl_addr(info
, &daddr
, &hash
, 0);
857 ret
= parse_nl_saddr(info
, &saddr
);
861 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
865 reply
= genlmsg_put_reply(msg
, info
, &tcp_metrics_nl_family
, 0,
868 goto nla_put_failure
;
870 hash
^= net_hash_mix(net
);
871 hash
= hash_32(hash
, tcp_metrics_hash_log
);
874 for (tm
= rcu_dereference(tcp_metrics_hash
[hash
].chain
); tm
;
875 tm
= rcu_dereference(tm
->tcpm_next
)) {
876 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
877 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
)) &&
878 net_eq(tm_net(tm
), net
)) {
879 ret
= tcp_metrics_fill_info(msg
, tm
);
887 genlmsg_end(msg
, reply
);
888 return genlmsg_reply(msg
, info
);
898 static void tcp_metrics_flush_all(struct net
*net
)
900 unsigned int max_rows
= 1U << tcp_metrics_hash_log
;
901 struct tcpm_hash_bucket
*hb
= tcp_metrics_hash
;
902 struct tcp_metrics_block
*tm
;
905 for (row
= 0; row
< max_rows
; row
++, hb
++) {
906 struct tcp_metrics_block __rcu
**pp
= &hb
->chain
;
909 if (!rcu_access_pointer(*pp
))
912 spin_lock_bh(&tcp_metrics_lock
);
913 for (tm
= deref_locked(*pp
); tm
; tm
= deref_locked(*pp
)) {
914 match
= net
? net_eq(tm_net(tm
), net
) :
915 !refcount_read(&tm_net(tm
)->ns
.count
);
917 rcu_assign_pointer(*pp
, tm
->tcpm_next
);
918 kfree_rcu(tm
, rcu_head
);
923 spin_unlock_bh(&tcp_metrics_lock
);
928 static int tcp_metrics_nl_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
930 struct tcpm_hash_bucket
*hb
;
931 struct tcp_metrics_block
*tm
;
932 struct tcp_metrics_block __rcu
**pp
;
933 struct inetpeer_addr saddr
, daddr
;
935 struct net
*net
= genl_info_net(info
);
937 bool src
= true, found
= false;
939 ret
= parse_nl_addr(info
, &daddr
, &hash
, 1);
943 tcp_metrics_flush_all(net
);
946 ret
= parse_nl_saddr(info
, &saddr
);
950 hash
^= net_hash_mix(net
);
951 hash
= hash_32(hash
, tcp_metrics_hash_log
);
952 hb
= tcp_metrics_hash
+ hash
;
954 spin_lock_bh(&tcp_metrics_lock
);
955 for (tm
= deref_locked(*pp
); tm
; tm
= deref_locked(*pp
)) {
956 if (addr_same(&tm
->tcpm_daddr
, &daddr
) &&
957 (!src
|| addr_same(&tm
->tcpm_saddr
, &saddr
)) &&
958 net_eq(tm_net(tm
), net
)) {
959 rcu_assign_pointer(*pp
, tm
->tcpm_next
);
960 kfree_rcu(tm
, rcu_head
);
966 spin_unlock_bh(&tcp_metrics_lock
);
972 static const struct genl_small_ops tcp_metrics_nl_ops
[] = {
974 .cmd
= TCP_METRICS_CMD_GET
,
975 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
976 .doit
= tcp_metrics_nl_cmd_get
,
977 .dumpit
= tcp_metrics_nl_dump
,
980 .cmd
= TCP_METRICS_CMD_DEL
,
981 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
982 .doit
= tcp_metrics_nl_cmd_del
,
983 .flags
= GENL_ADMIN_PERM
,
987 static struct genl_family tcp_metrics_nl_family __ro_after_init
= {
989 .name
= TCP_METRICS_GENL_NAME
,
990 .version
= TCP_METRICS_GENL_VERSION
,
991 .maxattr
= TCP_METRICS_ATTR_MAX
,
992 .policy
= tcp_metrics_nl_policy
,
994 .parallel_ops
= true,
995 .module
= THIS_MODULE
,
996 .small_ops
= tcp_metrics_nl_ops
,
997 .n_small_ops
= ARRAY_SIZE(tcp_metrics_nl_ops
),
998 .resv_start_op
= TCP_METRICS_CMD_DEL
+ 1,
1001 static unsigned int tcpmhash_entries __initdata
;
1002 static int __init
set_tcpmhash_entries(char *str
)
1009 ret
= kstrtouint(str
, 0, &tcpmhash_entries
);
1015 __setup("tcpmhash_entries=", set_tcpmhash_entries
);
1017 static void __init
tcp_metrics_hash_alloc(void)
1019 unsigned int slots
= tcpmhash_entries
;
1023 if (totalram_pages() >= 128 * 1024)
1029 tcp_metrics_hash_log
= order_base_2(slots
);
1030 size
= sizeof(struct tcpm_hash_bucket
) << tcp_metrics_hash_log
;
1032 tcp_metrics_hash
= kvzalloc(size
, GFP_KERNEL
);
1033 if (!tcp_metrics_hash
)
1034 panic("Could not allocate the tcp_metrics hash table\n");
1037 static void __net_exit
tcp_net_metrics_exit_batch(struct list_head
*net_exit_list
)
1039 tcp_metrics_flush_all(NULL
);
1042 static __net_initdata
struct pernet_operations tcp_net_metrics_ops
= {
1043 .exit_batch
= tcp_net_metrics_exit_batch
,
1046 void __init
tcp_metrics_init(void)
1050 tcp_metrics_hash_alloc();
1052 ret
= register_pernet_subsys(&tcp_net_metrics_ops
);
1054 panic("Could not register tcp_net_metrics_ops\n");
1056 ret
= genl_register_family(&tcp_metrics_nl_family
);
1058 panic("Could not register tcp_metrics generic netlink\n");