1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/bootmem.h>
5 #include <linux/module.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
10 #include <linux/hash.h>
12 #include <net/inet_connection_sock.h>
13 #include <net/net_namespace.h>
14 #include <net/request_sock.h>
15 #include <net/inetpeer.h>
21 int sysctl_tcp_nometrics_save __read_mostly
;
23 enum tcp_metric_index
{
28 TCP_METRIC_REORDERING
,
34 struct tcp_fastopen_metrics
{
36 u16 syn_loss
:10; /* Recurring Fast Open SYN losses */
37 unsigned long last_syn_loss
; /* Last Fast Open SYN loss */
38 struct tcp_fastopen_cookie cookie
;
41 struct tcp_metrics_block
{
42 struct tcp_metrics_block __rcu
*tcpm_next
;
43 struct inetpeer_addr tcpm_addr
;
44 unsigned long tcpm_stamp
;
48 u32 tcpm_vals
[TCP_METRIC_MAX
];
49 struct tcp_fastopen_metrics tcpm_fastopen
;
52 static bool tcp_metric_locked(struct tcp_metrics_block
*tm
,
53 enum tcp_metric_index idx
)
55 return tm
->tcpm_lock
& (1 << idx
);
58 static u32
tcp_metric_get(struct tcp_metrics_block
*tm
,
59 enum tcp_metric_index idx
)
61 return tm
->tcpm_vals
[idx
];
64 static u32
tcp_metric_get_jiffies(struct tcp_metrics_block
*tm
,
65 enum tcp_metric_index idx
)
67 return msecs_to_jiffies(tm
->tcpm_vals
[idx
]);
70 static void tcp_metric_set(struct tcp_metrics_block
*tm
,
71 enum tcp_metric_index idx
,
74 tm
->tcpm_vals
[idx
] = val
;
77 static void tcp_metric_set_msecs(struct tcp_metrics_block
*tm
,
78 enum tcp_metric_index idx
,
81 tm
->tcpm_vals
[idx
] = jiffies_to_msecs(val
);
84 static bool addr_same(const struct inetpeer_addr
*a
,
85 const struct inetpeer_addr
*b
)
87 const struct in6_addr
*a6
, *b6
;
89 if (a
->family
!= b
->family
)
91 if (a
->family
== AF_INET
)
92 return a
->addr
.a4
== b
->addr
.a4
;
94 a6
= (const struct in6_addr
*) &a
->addr
.a6
[0];
95 b6
= (const struct in6_addr
*) &b
->addr
.a6
[0];
97 return ipv6_addr_equal(a6
, b6
);
100 struct tcpm_hash_bucket
{
101 struct tcp_metrics_block __rcu
*chain
;
104 static DEFINE_SPINLOCK(tcp_metrics_lock
);
106 static void tcpm_suck_dst(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
)
110 tm
->tcpm_stamp
= jiffies
;
113 if (dst_metric_locked(dst
, RTAX_RTT
))
114 val
|= 1 << TCP_METRIC_RTT
;
115 if (dst_metric_locked(dst
, RTAX_RTTVAR
))
116 val
|= 1 << TCP_METRIC_RTTVAR
;
117 if (dst_metric_locked(dst
, RTAX_SSTHRESH
))
118 val
|= 1 << TCP_METRIC_SSTHRESH
;
119 if (dst_metric_locked(dst
, RTAX_CWND
))
120 val
|= 1 << TCP_METRIC_CWND
;
121 if (dst_metric_locked(dst
, RTAX_REORDERING
))
122 val
|= 1 << TCP_METRIC_REORDERING
;
125 tm
->tcpm_vals
[TCP_METRIC_RTT
] = dst_metric_raw(dst
, RTAX_RTT
);
126 tm
->tcpm_vals
[TCP_METRIC_RTTVAR
] = dst_metric_raw(dst
, RTAX_RTTVAR
);
127 tm
->tcpm_vals
[TCP_METRIC_SSTHRESH
] = dst_metric_raw(dst
, RTAX_SSTHRESH
);
128 tm
->tcpm_vals
[TCP_METRIC_CWND
] = dst_metric_raw(dst
, RTAX_CWND
);
129 tm
->tcpm_vals
[TCP_METRIC_REORDERING
] = dst_metric_raw(dst
, RTAX_REORDERING
);
131 tm
->tcpm_ts_stamp
= 0;
132 tm
->tcpm_fastopen
.mss
= 0;
133 tm
->tcpm_fastopen
.syn_loss
= 0;
134 tm
->tcpm_fastopen
.cookie
.len
= 0;
137 static struct tcp_metrics_block
*tcpm_new(struct dst_entry
*dst
,
138 struct inetpeer_addr
*addr
,
142 struct tcp_metrics_block
*tm
;
145 spin_lock_bh(&tcp_metrics_lock
);
146 net
= dev_net(dst
->dev
);
147 if (unlikely(reclaim
)) {
148 struct tcp_metrics_block
*oldest
;
150 oldest
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
);
151 for (tm
= rcu_dereference(oldest
->tcpm_next
); tm
;
152 tm
= rcu_dereference(tm
->tcpm_next
)) {
153 if (time_before(tm
->tcpm_stamp
, oldest
->tcpm_stamp
))
158 tm
= kmalloc(sizeof(*tm
), GFP_ATOMIC
);
162 tm
->tcpm_addr
= *addr
;
164 tcpm_suck_dst(tm
, dst
);
166 if (likely(!reclaim
)) {
167 tm
->tcpm_next
= net
->ipv4
.tcp_metrics_hash
[hash
].chain
;
168 rcu_assign_pointer(net
->ipv4
.tcp_metrics_hash
[hash
].chain
, tm
);
172 spin_unlock_bh(&tcp_metrics_lock
);
176 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
178 static void tcpm_check_stamp(struct tcp_metrics_block
*tm
, struct dst_entry
*dst
)
180 if (tm
&& unlikely(time_after(jiffies
, tm
->tcpm_stamp
+ TCP_METRICS_TIMEOUT
)))
181 tcpm_suck_dst(tm
, dst
);
184 #define TCP_METRICS_RECLAIM_DEPTH 5
185 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
187 static struct tcp_metrics_block
*tcp_get_encode(struct tcp_metrics_block
*tm
, int depth
)
191 if (depth
> TCP_METRICS_RECLAIM_DEPTH
)
192 return TCP_METRICS_RECLAIM_PTR
;
196 static struct tcp_metrics_block
*__tcp_get_metrics(const struct inetpeer_addr
*addr
,
197 struct net
*net
, unsigned int hash
)
199 struct tcp_metrics_block
*tm
;
202 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
203 tm
= rcu_dereference(tm
->tcpm_next
)) {
204 if (addr_same(&tm
->tcpm_addr
, addr
))
208 return tcp_get_encode(tm
, depth
);
211 static struct tcp_metrics_block
*__tcp_get_metrics_req(struct request_sock
*req
,
212 struct dst_entry
*dst
)
214 struct tcp_metrics_block
*tm
;
215 struct inetpeer_addr addr
;
219 addr
.family
= req
->rsk_ops
->family
;
220 switch (addr
.family
) {
222 addr
.addr
.a4
= inet_rsk(req
)->rmt_addr
;
223 hash
= (__force
unsigned int) addr
.addr
.a4
;
226 *(struct in6_addr
*)addr
.addr
.a6
= inet6_rsk(req
)->rmt_addr
;
227 hash
= ipv6_addr_hash(&inet6_rsk(req
)->rmt_addr
);
233 net
= dev_net(dst
->dev
);
234 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
236 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
237 tm
= rcu_dereference(tm
->tcpm_next
)) {
238 if (addr_same(&tm
->tcpm_addr
, &addr
))
241 tcpm_check_stamp(tm
, dst
);
245 static struct tcp_metrics_block
*__tcp_get_metrics_tw(struct inet_timewait_sock
*tw
)
247 struct inet6_timewait_sock
*tw6
;
248 struct tcp_metrics_block
*tm
;
249 struct inetpeer_addr addr
;
253 addr
.family
= tw
->tw_family
;
254 switch (addr
.family
) {
256 addr
.addr
.a4
= tw
->tw_daddr
;
257 hash
= (__force
unsigned int) addr
.addr
.a4
;
260 tw6
= inet6_twsk((struct sock
*)tw
);
261 *(struct in6_addr
*)addr
.addr
.a6
= tw6
->tw_v6_daddr
;
262 hash
= ipv6_addr_hash(&tw6
->tw_v6_daddr
);
269 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
271 for (tm
= rcu_dereference(net
->ipv4
.tcp_metrics_hash
[hash
].chain
); tm
;
272 tm
= rcu_dereference(tm
->tcpm_next
)) {
273 if (addr_same(&tm
->tcpm_addr
, &addr
))
279 static struct tcp_metrics_block
*tcp_get_metrics(struct sock
*sk
,
280 struct dst_entry
*dst
,
283 struct tcp_metrics_block
*tm
;
284 struct inetpeer_addr addr
;
289 addr
.family
= sk
->sk_family
;
290 switch (addr
.family
) {
292 addr
.addr
.a4
= inet_sk(sk
)->inet_daddr
;
293 hash
= (__force
unsigned int) addr
.addr
.a4
;
296 *(struct in6_addr
*)addr
.addr
.a6
= inet6_sk(sk
)->daddr
;
297 hash
= ipv6_addr_hash(&inet6_sk(sk
)->daddr
);
303 net
= dev_net(dst
->dev
);
304 hash
= hash_32(hash
, net
->ipv4
.tcp_metrics_hash_log
);
306 tm
= __tcp_get_metrics(&addr
, net
, hash
);
308 if (tm
== TCP_METRICS_RECLAIM_PTR
) {
313 tm
= tcpm_new(dst
, &addr
, hash
, reclaim
);
315 tcpm_check_stamp(tm
, dst
);
320 /* Save metrics learned by this TCP session. This function is called
321 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
322 * or goes from LAST-ACK to CLOSE.
324 void tcp_update_metrics(struct sock
*sk
)
326 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
327 struct dst_entry
*dst
= __sk_dst_get(sk
);
328 struct tcp_sock
*tp
= tcp_sk(sk
);
329 struct tcp_metrics_block
*tm
;
334 if (sysctl_tcp_nometrics_save
|| !dst
)
337 if (dst
->flags
& DST_HOST
)
341 if (icsk
->icsk_backoff
|| !tp
->srtt
) {
342 /* This session failed to estimate rtt. Why?
343 * Probably, no packets returned in time. Reset our
346 tm
= tcp_get_metrics(sk
, dst
, false);
347 if (tm
&& !tcp_metric_locked(tm
, TCP_METRIC_RTT
))
348 tcp_metric_set(tm
, TCP_METRIC_RTT
, 0);
351 tm
= tcp_get_metrics(sk
, dst
, true);
356 rtt
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTT
);
359 /* If newly calculated rtt larger than stored one, store new
360 * one. Otherwise, use EWMA. Remember, rtt overestimation is
361 * always better than underestimation.
363 if (!tcp_metric_locked(tm
, TCP_METRIC_RTT
)) {
368 tcp_metric_set_msecs(tm
, TCP_METRIC_RTT
, rtt
);
371 if (!tcp_metric_locked(tm
, TCP_METRIC_RTTVAR
)) {
377 /* Scale deviation to rttvar fixed point */
382 var
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTTVAR
);
386 var
-= (var
- m
) >> 2;
388 tcp_metric_set_msecs(tm
, TCP_METRIC_RTTVAR
, var
);
391 if (tcp_in_initial_slowstart(tp
)) {
392 /* Slow start still did not finish. */
393 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
394 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
395 if (val
&& (tp
->snd_cwnd
>> 1) > val
)
396 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
399 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
400 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
401 if (tp
->snd_cwnd
> val
)
402 tcp_metric_set(tm
, TCP_METRIC_CWND
,
405 } else if (tp
->snd_cwnd
> tp
->snd_ssthresh
&&
406 icsk
->icsk_ca_state
== TCP_CA_Open
) {
407 /* Cong. avoidance phase, cwnd is reliable. */
408 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
))
409 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
410 max(tp
->snd_cwnd
>> 1, tp
->snd_ssthresh
));
411 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
412 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
413 tcp_metric_set(tm
, TCP_METRIC_CWND
, (val
+ tp
->snd_cwnd
) >> 1);
416 /* Else slow start did not finish, cwnd is non-sense,
417 * ssthresh may be also invalid.
419 if (!tcp_metric_locked(tm
, TCP_METRIC_CWND
)) {
420 val
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
421 tcp_metric_set(tm
, TCP_METRIC_CWND
,
422 (val
+ tp
->snd_ssthresh
) >> 1);
424 if (!tcp_metric_locked(tm
, TCP_METRIC_SSTHRESH
)) {
425 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
426 if (val
&& tp
->snd_ssthresh
> val
)
427 tcp_metric_set(tm
, TCP_METRIC_SSTHRESH
,
430 if (!tcp_metric_locked(tm
, TCP_METRIC_REORDERING
)) {
431 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
432 if (val
< tp
->reordering
&&
433 tp
->reordering
!= sysctl_tcp_reordering
)
434 tcp_metric_set(tm
, TCP_METRIC_REORDERING
,
438 tm
->tcpm_stamp
= jiffies
;
443 /* Initialize metrics on socket. */
445 void tcp_init_metrics(struct sock
*sk
)
447 struct dst_entry
*dst
= __sk_dst_get(sk
);
448 struct tcp_sock
*tp
= tcp_sk(sk
);
449 struct tcp_metrics_block
*tm
;
458 tm
= tcp_get_metrics(sk
, dst
, true);
464 if (tcp_metric_locked(tm
, TCP_METRIC_CWND
))
465 tp
->snd_cwnd_clamp
= tcp_metric_get(tm
, TCP_METRIC_CWND
);
467 val
= tcp_metric_get(tm
, TCP_METRIC_SSTHRESH
);
469 tp
->snd_ssthresh
= val
;
470 if (tp
->snd_ssthresh
> tp
->snd_cwnd_clamp
)
471 tp
->snd_ssthresh
= tp
->snd_cwnd_clamp
;
473 /* ssthresh may have been reduced unnecessarily during.
474 * 3WHS. Restore it back to its initial default.
476 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
478 val
= tcp_metric_get(tm
, TCP_METRIC_REORDERING
);
479 if (val
&& tp
->reordering
!= val
) {
480 tcp_disable_fack(tp
);
481 tcp_disable_early_retrans(tp
);
482 tp
->reordering
= val
;
485 val
= tcp_metric_get(tm
, TCP_METRIC_RTT
);
486 if (val
== 0 || tp
->srtt
== 0) {
490 /* Initial rtt is determined from SYN,SYN-ACK.
491 * The segment is small and rtt may appear much
492 * less than real one. Use per-dst memory
493 * to make it more realistic.
495 * A bit of theory. RTT is time passed after "normal" sized packet
496 * is sent until it is ACKed. In normal circumstances sending small
497 * packets force peer to delay ACKs and calculation is correct too.
498 * The algorithm is adaptive and, provided we follow specs, it
499 * NEVER underestimate RTT. BUT! If peer tries to make some clever
500 * tricks sort of "quick acks" for time long enough to decrease RTT
501 * to low value, and then abruptly stops to do it and starts to delay
502 * ACKs, wait for troubles.
504 val
= msecs_to_jiffies(val
);
505 if (val
> tp
->srtt
) {
507 tp
->rtt_seq
= tp
->snd_nxt
;
509 val
= tcp_metric_get_jiffies(tm
, TCP_METRIC_RTTVAR
);
510 if (val
> tp
->mdev
) {
512 tp
->mdev_max
= tp
->rttvar
= max(tp
->mdev
, tcp_rto_min(sk
));
519 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
520 * 3WHS. This is most likely due to retransmission,
521 * including spurious one. Reset the RTO back to 3secs
522 * from the more aggressive 1sec to avoid more spurious
525 tp
->mdev
= tp
->mdev_max
= tp
->rttvar
= TCP_TIMEOUT_FALLBACK
;
526 inet_csk(sk
)->icsk_rto
= TCP_TIMEOUT_FALLBACK
;
528 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
529 * retransmitted. In light of RFC6298 more aggressive 1sec
530 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
531 * retransmission has occurred.
533 if (tp
->total_retrans
> 1)
536 tp
->snd_cwnd
= tcp_init_cwnd(tp
, dst
);
537 tp
->snd_cwnd_stamp
= tcp_time_stamp
;
540 bool tcp_peer_is_proven(struct request_sock
*req
, struct dst_entry
*dst
, bool paws_check
)
542 struct tcp_metrics_block
*tm
;
549 tm
= __tcp_get_metrics_req(req
, dst
);
552 (u32
)get_seconds() - tm
->tcpm_ts_stamp
< TCP_PAWS_MSL
&&
553 (s32
)(tm
->tcpm_ts
- req
->ts_recent
) > TCP_PAWS_WINDOW
)
558 if (tm
&& tcp_metric_get(tm
, TCP_METRIC_RTT
) && tm
->tcpm_ts_stamp
)
567 EXPORT_SYMBOL_GPL(tcp_peer_is_proven
);
569 void tcp_fetch_timewait_stamp(struct sock
*sk
, struct dst_entry
*dst
)
571 struct tcp_metrics_block
*tm
;
574 tm
= tcp_get_metrics(sk
, dst
, true);
576 struct tcp_sock
*tp
= tcp_sk(sk
);
578 if ((u32
)get_seconds() - tm
->tcpm_ts_stamp
<= TCP_PAWS_MSL
) {
579 tp
->rx_opt
.ts_recent_stamp
= tm
->tcpm_ts_stamp
;
580 tp
->rx_opt
.ts_recent
= tm
->tcpm_ts
;
585 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp
);
587 /* VJ's idea. Save last timestamp seen from this destination and hold
588 * it at least for normal timewait interval to use for duplicate
589 * segment detection in subsequent connections, before they enter
590 * synchronized state.
592 bool tcp_remember_stamp(struct sock
*sk
)
594 struct dst_entry
*dst
= __sk_dst_get(sk
);
598 struct tcp_metrics_block
*tm
;
601 tm
= tcp_get_metrics(sk
, dst
, true);
603 struct tcp_sock
*tp
= tcp_sk(sk
);
605 if ((s32
)(tm
->tcpm_ts
- tp
->rx_opt
.ts_recent
) <= 0 ||
606 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
607 tm
->tcpm_ts_stamp
<= (u32
)tp
->rx_opt
.ts_recent_stamp
)) {
608 tm
->tcpm_ts_stamp
= (u32
)tp
->rx_opt
.ts_recent_stamp
;
609 tm
->tcpm_ts
= tp
->rx_opt
.ts_recent
;
618 bool tcp_tw_remember_stamp(struct inet_timewait_sock
*tw
)
620 struct tcp_metrics_block
*tm
;
624 tm
= __tcp_get_metrics_tw(tw
);
626 const struct tcp_timewait_sock
*tcptw
;
627 struct sock
*sk
= (struct sock
*) tw
;
629 tcptw
= tcp_twsk(sk
);
630 if ((s32
)(tm
->tcpm_ts
- tcptw
->tw_ts_recent
) <= 0 ||
631 ((u32
)get_seconds() - tm
->tcpm_ts_stamp
> TCP_PAWS_MSL
&&
632 tm
->tcpm_ts_stamp
<= (u32
)tcptw
->tw_ts_recent_stamp
)) {
633 tm
->tcpm_ts_stamp
= (u32
)tcptw
->tw_ts_recent_stamp
;
634 tm
->tcpm_ts
= tcptw
->tw_ts_recent
;
643 static DEFINE_SEQLOCK(fastopen_seqlock
);
645 void tcp_fastopen_cache_get(struct sock
*sk
, u16
*mss
,
646 struct tcp_fastopen_cookie
*cookie
,
647 int *syn_loss
, unsigned long *last_syn_loss
)
649 struct tcp_metrics_block
*tm
;
652 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), false);
654 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
658 seq
= read_seqbegin(&fastopen_seqlock
);
661 *cookie
= tfom
->cookie
;
662 *syn_loss
= tfom
->syn_loss
;
663 *last_syn_loss
= *syn_loss
? tfom
->last_syn_loss
: 0;
664 } while (read_seqretry(&fastopen_seqlock
, seq
));
669 void tcp_fastopen_cache_set(struct sock
*sk
, u16 mss
,
670 struct tcp_fastopen_cookie
*cookie
, bool syn_lost
)
672 struct tcp_metrics_block
*tm
;
675 tm
= tcp_get_metrics(sk
, __sk_dst_get(sk
), true);
677 struct tcp_fastopen_metrics
*tfom
= &tm
->tcpm_fastopen
;
679 write_seqlock_bh(&fastopen_seqlock
);
682 tfom
->cookie
= *cookie
;
685 tfom
->last_syn_loss
= jiffies
;
688 write_sequnlock_bh(&fastopen_seqlock
);
693 static unsigned int tcpmhash_entries
;
694 static int __init
set_tcpmhash_entries(char *str
)
701 ret
= kstrtouint(str
, 0, &tcpmhash_entries
);
707 __setup("tcpmhash_entries=", set_tcpmhash_entries
);
709 static int __net_init
tcp_net_metrics_init(struct net
*net
)
714 slots
= tcpmhash_entries
;
716 if (totalram_pages
>= 128 * 1024)
722 net
->ipv4
.tcp_metrics_hash_log
= order_base_2(slots
);
723 size
= sizeof(struct tcpm_hash_bucket
) << net
->ipv4
.tcp_metrics_hash_log
;
725 net
->ipv4
.tcp_metrics_hash
= kzalloc(size
, GFP_KERNEL
);
726 if (!net
->ipv4
.tcp_metrics_hash
)
732 static void __net_exit
tcp_net_metrics_exit(struct net
*net
)
736 for (i
= 0; i
< (1U << net
->ipv4
.tcp_metrics_hash_log
) ; i
++) {
737 struct tcp_metrics_block
*tm
, *next
;
739 tm
= rcu_dereference_protected(net
->ipv4
.tcp_metrics_hash
[i
].chain
, 1);
741 next
= rcu_dereference_protected(tm
->tcpm_next
, 1);
746 kfree(net
->ipv4
.tcp_metrics_hash
);
749 static __net_initdata
struct pernet_operations tcp_net_metrics_ops
= {
750 .init
= tcp_net_metrics_init
,
751 .exit
= tcp_net_metrics_exit
,
754 void __init
tcp_metrics_init(void)
756 register_pernet_subsys(&tcp_net_metrics_ops
);