gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / net / ipv4 / tcp_metrics.c
blob9c840c5c6047875e11da0d3bd4726204cbc87db9
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
23 int sysctl_tcp_nometrics_save __read_mostly;
25 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26 const struct inetpeer_addr *daddr,
27 struct net *net, unsigned int hash);
29 struct tcp_fastopen_metrics {
30 u16 mss;
31 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
32 try_exp:2; /* Request w/ exp. option (once) */
33 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
34 struct tcp_fastopen_cookie cookie;
37 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
38 * Kernel only stores RTT and RTTVAR in usec resolution
40 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
42 struct tcp_metrics_block {
43 struct tcp_metrics_block __rcu *tcpm_next;
44 possible_net_t tcpm_net;
45 struct inetpeer_addr tcpm_saddr;
46 struct inetpeer_addr tcpm_daddr;
47 unsigned long tcpm_stamp;
48 u32 tcpm_ts;
49 u32 tcpm_ts_stamp;
50 u32 tcpm_lock;
51 u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
52 struct tcp_fastopen_metrics tcpm_fastopen;
54 struct rcu_head rcu_head;
57 static inline struct net *tm_net(struct tcp_metrics_block *tm)
59 return read_pnet(&tm->tcpm_net);
62 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
63 enum tcp_metric_index idx)
65 return tm->tcpm_lock & (1 << idx);
68 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
69 enum tcp_metric_index idx)
71 return tm->tcpm_vals[idx];
74 static void tcp_metric_set(struct tcp_metrics_block *tm,
75 enum tcp_metric_index idx,
76 u32 val)
78 tm->tcpm_vals[idx] = val;
81 static bool addr_same(const struct inetpeer_addr *a,
82 const struct inetpeer_addr *b)
84 if (a->family != b->family)
85 return false;
86 if (a->family == AF_INET)
87 return a->addr.a4 == b->addr.a4;
88 return ipv6_addr_equal(&a->addr.in6, &b->addr.in6);
91 struct tcpm_hash_bucket {
92 struct tcp_metrics_block __rcu *chain;
95 static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly;
96 static unsigned int tcp_metrics_hash_log __read_mostly;
98 static DEFINE_SPINLOCK(tcp_metrics_lock);
100 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
101 const struct dst_entry *dst,
102 bool fastopen_clear)
104 u32 msval;
105 u32 val;
107 tm->tcpm_stamp = jiffies;
109 val = 0;
110 if (dst_metric_locked(dst, RTAX_RTT))
111 val |= 1 << TCP_METRIC_RTT;
112 if (dst_metric_locked(dst, RTAX_RTTVAR))
113 val |= 1 << TCP_METRIC_RTTVAR;
114 if (dst_metric_locked(dst, RTAX_SSTHRESH))
115 val |= 1 << TCP_METRIC_SSTHRESH;
116 if (dst_metric_locked(dst, RTAX_CWND))
117 val |= 1 << TCP_METRIC_CWND;
118 if (dst_metric_locked(dst, RTAX_REORDERING))
119 val |= 1 << TCP_METRIC_REORDERING;
120 tm->tcpm_lock = val;
122 msval = dst_metric_raw(dst, RTAX_RTT);
123 tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
125 msval = dst_metric_raw(dst, RTAX_RTTVAR);
126 tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
127 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
128 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
129 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
130 tm->tcpm_ts = 0;
131 tm->tcpm_ts_stamp = 0;
132 if (fastopen_clear) {
133 tm->tcpm_fastopen.mss = 0;
134 tm->tcpm_fastopen.syn_loss = 0;
135 tm->tcpm_fastopen.try_exp = 0;
136 tm->tcpm_fastopen.cookie.exp = false;
137 tm->tcpm_fastopen.cookie.len = 0;
141 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
143 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
145 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
146 tcpm_suck_dst(tm, dst, false);
149 #define TCP_METRICS_RECLAIM_DEPTH 5
150 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
152 #define deref_locked(p) \
153 rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
155 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
156 struct inetpeer_addr *saddr,
157 struct inetpeer_addr *daddr,
158 unsigned int hash)
160 struct tcp_metrics_block *tm;
161 struct net *net;
162 bool reclaim = false;
164 spin_lock_bh(&tcp_metrics_lock);
165 net = dev_net(dst->dev);
167 /* While waiting for the spin-lock the cache might have been populated
168 * with this entry and so we have to check again.
170 tm = __tcp_get_metrics(saddr, daddr, net, hash);
171 if (tm == TCP_METRICS_RECLAIM_PTR) {
172 reclaim = true;
173 tm = NULL;
175 if (tm) {
176 tcpm_check_stamp(tm, dst);
177 goto out_unlock;
180 if (unlikely(reclaim)) {
181 struct tcp_metrics_block *oldest;
183 oldest = deref_locked(tcp_metrics_hash[hash].chain);
184 for (tm = deref_locked(oldest->tcpm_next); tm;
185 tm = deref_locked(tm->tcpm_next)) {
186 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
187 oldest = tm;
189 tm = oldest;
190 } else {
191 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
192 if (!tm)
193 goto out_unlock;
195 write_pnet(&tm->tcpm_net, net);
196 tm->tcpm_saddr = *saddr;
197 tm->tcpm_daddr = *daddr;
199 tcpm_suck_dst(tm, dst, true);
201 if (likely(!reclaim)) {
202 tm->tcpm_next = tcp_metrics_hash[hash].chain;
203 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
206 out_unlock:
207 spin_unlock_bh(&tcp_metrics_lock);
208 return tm;
211 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
213 if (tm)
214 return tm;
215 if (depth > TCP_METRICS_RECLAIM_DEPTH)
216 return TCP_METRICS_RECLAIM_PTR;
217 return NULL;
220 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
221 const struct inetpeer_addr *daddr,
222 struct net *net, unsigned int hash)
224 struct tcp_metrics_block *tm;
225 int depth = 0;
227 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
228 tm = rcu_dereference(tm->tcpm_next)) {
229 if (addr_same(&tm->tcpm_saddr, saddr) &&
230 addr_same(&tm->tcpm_daddr, daddr) &&
231 net_eq(tm_net(tm), net))
232 break;
233 depth++;
235 return tcp_get_encode(tm, depth);
238 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
239 struct dst_entry *dst)
241 struct tcp_metrics_block *tm;
242 struct inetpeer_addr saddr, daddr;
243 unsigned int hash;
244 struct net *net;
246 saddr.family = req->rsk_ops->family;
247 daddr.family = req->rsk_ops->family;
248 switch (daddr.family) {
249 case AF_INET:
250 saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
251 daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
252 hash = (__force unsigned int) daddr.addr.a4;
253 break;
254 #if IS_ENABLED(CONFIG_IPV6)
255 case AF_INET6:
256 saddr.addr.in6 = inet_rsk(req)->ir_v6_loc_addr;
257 daddr.addr.in6 = inet_rsk(req)->ir_v6_rmt_addr;
258 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
259 break;
260 #endif
261 default:
262 return NULL;
265 net = dev_net(dst->dev);
266 hash ^= net_hash_mix(net);
267 hash = hash_32(hash, tcp_metrics_hash_log);
269 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
270 tm = rcu_dereference(tm->tcpm_next)) {
271 if (addr_same(&tm->tcpm_saddr, &saddr) &&
272 addr_same(&tm->tcpm_daddr, &daddr) &&
273 net_eq(tm_net(tm), net))
274 break;
276 tcpm_check_stamp(tm, dst);
277 return tm;
280 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
282 struct tcp_metrics_block *tm;
283 struct inetpeer_addr saddr, daddr;
284 unsigned int hash;
285 struct net *net;
287 if (tw->tw_family == AF_INET) {
288 saddr.family = AF_INET;
289 saddr.addr.a4 = tw->tw_rcv_saddr;
290 daddr.family = AF_INET;
291 daddr.addr.a4 = tw->tw_daddr;
292 hash = (__force unsigned int) daddr.addr.a4;
294 #if IS_ENABLED(CONFIG_IPV6)
295 else if (tw->tw_family == AF_INET6) {
296 if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
297 saddr.family = AF_INET;
298 saddr.addr.a4 = tw->tw_rcv_saddr;
299 daddr.family = AF_INET;
300 daddr.addr.a4 = tw->tw_daddr;
301 hash = (__force unsigned int) daddr.addr.a4;
302 } else {
303 saddr.family = AF_INET6;
304 saddr.addr.in6 = tw->tw_v6_rcv_saddr;
305 daddr.family = AF_INET6;
306 daddr.addr.in6 = tw->tw_v6_daddr;
307 hash = ipv6_addr_hash(&tw->tw_v6_daddr);
310 #endif
311 else
312 return NULL;
314 net = twsk_net(tw);
315 hash ^= net_hash_mix(net);
316 hash = hash_32(hash, tcp_metrics_hash_log);
318 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
319 tm = rcu_dereference(tm->tcpm_next)) {
320 if (addr_same(&tm->tcpm_saddr, &saddr) &&
321 addr_same(&tm->tcpm_daddr, &daddr) &&
322 net_eq(tm_net(tm), net))
323 break;
325 return tm;
328 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
329 struct dst_entry *dst,
330 bool create)
332 struct tcp_metrics_block *tm;
333 struct inetpeer_addr saddr, daddr;
334 unsigned int hash;
335 struct net *net;
337 if (sk->sk_family == AF_INET) {
338 saddr.family = AF_INET;
339 saddr.addr.a4 = inet_sk(sk)->inet_saddr;
340 daddr.family = AF_INET;
341 daddr.addr.a4 = inet_sk(sk)->inet_daddr;
342 hash = (__force unsigned int) daddr.addr.a4;
344 #if IS_ENABLED(CONFIG_IPV6)
345 else if (sk->sk_family == AF_INET6) {
346 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
347 saddr.family = AF_INET;
348 saddr.addr.a4 = inet_sk(sk)->inet_saddr;
349 daddr.family = AF_INET;
350 daddr.addr.a4 = inet_sk(sk)->inet_daddr;
351 hash = (__force unsigned int) daddr.addr.a4;
352 } else {
353 saddr.family = AF_INET6;
354 saddr.addr.in6 = sk->sk_v6_rcv_saddr;
355 daddr.family = AF_INET6;
356 daddr.addr.in6 = sk->sk_v6_daddr;
357 hash = ipv6_addr_hash(&sk->sk_v6_daddr);
360 #endif
361 else
362 return NULL;
364 net = dev_net(dst->dev);
365 hash ^= net_hash_mix(net);
366 hash = hash_32(hash, tcp_metrics_hash_log);
368 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
369 if (tm == TCP_METRICS_RECLAIM_PTR)
370 tm = NULL;
371 if (!tm && create)
372 tm = tcpm_new(dst, &saddr, &daddr, hash);
373 else
374 tcpm_check_stamp(tm, dst);
376 return tm;
379 /* Save metrics learned by this TCP session. This function is called
380 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
381 * or goes from LAST-ACK to CLOSE.
383 void tcp_update_metrics(struct sock *sk)
385 const struct inet_connection_sock *icsk = inet_csk(sk);
386 struct dst_entry *dst = __sk_dst_get(sk);
387 struct tcp_sock *tp = tcp_sk(sk);
388 struct tcp_metrics_block *tm;
389 unsigned long rtt;
390 u32 val;
391 int m;
393 if (sysctl_tcp_nometrics_save || !dst)
394 return;
396 if (dst->flags & DST_HOST)
397 dst_confirm(dst);
399 rcu_read_lock();
400 if (icsk->icsk_backoff || !tp->srtt_us) {
401 /* This session failed to estimate rtt. Why?
402 * Probably, no packets returned in time. Reset our
403 * results.
405 tm = tcp_get_metrics(sk, dst, false);
406 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
407 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
408 goto out_unlock;
409 } else
410 tm = tcp_get_metrics(sk, dst, true);
412 if (!tm)
413 goto out_unlock;
415 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
416 m = rtt - tp->srtt_us;
418 /* If newly calculated rtt larger than stored one, store new
419 * one. Otherwise, use EWMA. Remember, rtt overestimation is
420 * always better than underestimation.
422 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
423 if (m <= 0)
424 rtt = tp->srtt_us;
425 else
426 rtt -= (m >> 3);
427 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
430 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
431 unsigned long var;
433 if (m < 0)
434 m = -m;
436 /* Scale deviation to rttvar fixed point */
437 m >>= 1;
438 if (m < tp->mdev_us)
439 m = tp->mdev_us;
441 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
442 if (m >= var)
443 var = m;
444 else
445 var -= (var - m) >> 2;
447 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
450 if (tcp_in_initial_slowstart(tp)) {
451 /* Slow start still did not finish. */
452 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
453 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
454 if (val && (tp->snd_cwnd >> 1) > val)
455 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
456 tp->snd_cwnd >> 1);
458 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
459 val = tcp_metric_get(tm, TCP_METRIC_CWND);
460 if (tp->snd_cwnd > val)
461 tcp_metric_set(tm, TCP_METRIC_CWND,
462 tp->snd_cwnd);
464 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
465 icsk->icsk_ca_state == TCP_CA_Open) {
466 /* Cong. avoidance phase, cwnd is reliable. */
467 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
468 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
469 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
470 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
471 val = tcp_metric_get(tm, TCP_METRIC_CWND);
472 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
474 } else {
475 /* Else slow start did not finish, cwnd is non-sense,
476 * ssthresh may be also invalid.
478 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
479 val = tcp_metric_get(tm, TCP_METRIC_CWND);
480 tcp_metric_set(tm, TCP_METRIC_CWND,
481 (val + tp->snd_ssthresh) >> 1);
483 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
484 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
485 if (val && tp->snd_ssthresh > val)
486 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
487 tp->snd_ssthresh);
489 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
490 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
491 if (val < tp->reordering &&
492 tp->reordering != sysctl_tcp_reordering)
493 tcp_metric_set(tm, TCP_METRIC_REORDERING,
494 tp->reordering);
497 tm->tcpm_stamp = jiffies;
498 out_unlock:
499 rcu_read_unlock();
502 /* Initialize metrics on socket. */
504 void tcp_init_metrics(struct sock *sk)
506 struct dst_entry *dst = __sk_dst_get(sk);
507 struct tcp_sock *tp = tcp_sk(sk);
508 struct tcp_metrics_block *tm;
509 u32 val, crtt = 0; /* cached RTT scaled by 8 */
511 if (!dst)
512 goto reset;
514 dst_confirm(dst);
516 rcu_read_lock();
517 tm = tcp_get_metrics(sk, dst, true);
518 if (!tm) {
519 rcu_read_unlock();
520 goto reset;
523 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
524 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
526 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
527 if (val) {
528 tp->snd_ssthresh = val;
529 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
530 tp->snd_ssthresh = tp->snd_cwnd_clamp;
531 } else {
532 /* ssthresh may have been reduced unnecessarily during.
533 * 3WHS. Restore it back to its initial default.
535 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
537 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
538 if (val && tp->reordering != val) {
539 tcp_disable_fack(tp);
540 tcp_disable_early_retrans(tp);
541 tp->reordering = val;
544 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
545 rcu_read_unlock();
546 reset:
547 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
548 * to seed the RTO for later data packets because SYN packets are
549 * small. Use the per-dst cached values to seed the RTO but keep
550 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
551 * Later the RTO will be updated immediately upon obtaining the first
552 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
553 * influences the first RTO but not later RTT estimation.
555 * But if RTT is not available from the SYN (due to retransmits or
556 * syn cookies) or the cache, force a conservative 3secs timeout.
558 * A bit of theory. RTT is time passed after "normal" sized packet
559 * is sent until it is ACKed. In normal circumstances sending small
560 * packets force peer to delay ACKs and calculation is correct too.
561 * The algorithm is adaptive and, provided we follow specs, it
562 * NEVER underestimate RTT. BUT! If peer tries to make some clever
563 * tricks sort of "quick acks" for time long enough to decrease RTT
564 * to low value, and then abruptly stops to do it and starts to delay
565 * ACKs, wait for troubles.
567 if (crtt > tp->srtt_us) {
568 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
569 crtt /= 8 * USEC_PER_SEC / HZ;
570 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
571 } else if (tp->srtt_us == 0) {
572 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
573 * 3WHS. This is most likely due to retransmission,
574 * including spurious one. Reset the RTO back to 3secs
575 * from the more aggressive 1sec to avoid more spurious
576 * retransmission.
578 tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
579 tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
581 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
583 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
584 * retransmitted. In light of RFC6298 more aggressive 1sec
585 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
586 * retransmission has occurred.
588 if (tp->total_retrans > 1)
589 tp->snd_cwnd = 1;
590 else
591 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
592 tp->snd_cwnd_stamp = tcp_time_stamp;
595 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
596 bool paws_check, bool timestamps)
598 struct tcp_metrics_block *tm;
599 bool ret;
601 if (!dst)
602 return false;
604 rcu_read_lock();
605 tm = __tcp_get_metrics_req(req, dst);
606 if (paws_check) {
607 if (tm &&
608 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
609 ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
610 !timestamps))
611 ret = false;
612 else
613 ret = true;
614 } else {
615 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
616 ret = true;
617 else
618 ret = false;
620 rcu_read_unlock();
622 return ret;
624 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
626 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
628 struct tcp_metrics_block *tm;
630 rcu_read_lock();
631 tm = tcp_get_metrics(sk, dst, true);
632 if (tm) {
633 struct tcp_sock *tp = tcp_sk(sk);
635 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
636 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
637 tp->rx_opt.ts_recent = tm->tcpm_ts;
640 rcu_read_unlock();
642 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
644 /* VJ's idea. Save last timestamp seen from this destination and hold
645 * it at least for normal timewait interval to use for duplicate
646 * segment detection in subsequent connections, before they enter
647 * synchronized state.
649 bool tcp_remember_stamp(struct sock *sk)
651 struct dst_entry *dst = __sk_dst_get(sk);
652 bool ret = false;
654 if (dst) {
655 struct tcp_metrics_block *tm;
657 rcu_read_lock();
658 tm = tcp_get_metrics(sk, dst, true);
659 if (tm) {
660 struct tcp_sock *tp = tcp_sk(sk);
662 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
663 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
664 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
665 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
666 tm->tcpm_ts = tp->rx_opt.ts_recent;
668 ret = true;
670 rcu_read_unlock();
672 return ret;
675 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
677 struct tcp_metrics_block *tm;
678 bool ret = false;
680 rcu_read_lock();
681 tm = __tcp_get_metrics_tw(tw);
682 if (tm) {
683 const struct tcp_timewait_sock *tcptw;
684 struct sock *sk = (struct sock *) tw;
686 tcptw = tcp_twsk(sk);
687 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
688 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
689 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
690 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
691 tm->tcpm_ts = tcptw->tw_ts_recent;
693 ret = true;
695 rcu_read_unlock();
697 return ret;
700 static DEFINE_SEQLOCK(fastopen_seqlock);
702 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
703 struct tcp_fastopen_cookie *cookie,
704 int *syn_loss, unsigned long *last_syn_loss)
706 struct tcp_metrics_block *tm;
708 rcu_read_lock();
709 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
710 if (tm) {
711 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
712 unsigned int seq;
714 do {
715 seq = read_seqbegin(&fastopen_seqlock);
716 if (tfom->mss)
717 *mss = tfom->mss;
718 *cookie = tfom->cookie;
719 if (cookie->len <= 0 && tfom->try_exp == 1)
720 cookie->exp = true;
721 *syn_loss = tfom->syn_loss;
722 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
723 } while (read_seqretry(&fastopen_seqlock, seq));
725 rcu_read_unlock();
728 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
729 struct tcp_fastopen_cookie *cookie, bool syn_lost,
730 u16 try_exp)
732 struct dst_entry *dst = __sk_dst_get(sk);
733 struct tcp_metrics_block *tm;
735 if (!dst)
736 return;
737 rcu_read_lock();
738 tm = tcp_get_metrics(sk, dst, true);
739 if (tm) {
740 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
742 write_seqlock_bh(&fastopen_seqlock);
743 if (mss)
744 tfom->mss = mss;
745 if (cookie && cookie->len > 0)
746 tfom->cookie = *cookie;
747 else if (try_exp > tfom->try_exp &&
748 tfom->cookie.len <= 0 && !tfom->cookie.exp)
749 tfom->try_exp = try_exp;
750 if (syn_lost) {
751 ++tfom->syn_loss;
752 tfom->last_syn_loss = jiffies;
753 } else
754 tfom->syn_loss = 0;
755 write_sequnlock_bh(&fastopen_seqlock);
757 rcu_read_unlock();
760 static struct genl_family tcp_metrics_nl_family = {
761 .id = GENL_ID_GENERATE,
762 .hdrsize = 0,
763 .name = TCP_METRICS_GENL_NAME,
764 .version = TCP_METRICS_GENL_VERSION,
765 .maxattr = TCP_METRICS_ATTR_MAX,
766 .netnsok = true,
769 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
770 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
771 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
772 .len = sizeof(struct in6_addr), },
773 /* Following attributes are not received for GET/DEL,
774 * we keep them for reference
776 #if 0
777 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
778 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
779 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
780 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
781 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
782 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
783 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
784 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
785 .len = TCP_FASTOPEN_COOKIE_MAX, },
786 #endif
789 /* Add attributes, caller cancels its header on failure */
790 static int tcp_metrics_fill_info(struct sk_buff *msg,
791 struct tcp_metrics_block *tm)
793 struct nlattr *nest;
794 int i;
796 switch (tm->tcpm_daddr.family) {
797 case AF_INET:
798 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
799 tm->tcpm_daddr.addr.a4) < 0)
800 goto nla_put_failure;
801 if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
802 tm->tcpm_saddr.addr.a4) < 0)
803 goto nla_put_failure;
804 break;
805 case AF_INET6:
806 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
807 &tm->tcpm_daddr.addr.in6) < 0)
808 goto nla_put_failure;
809 if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
810 &tm->tcpm_saddr.addr.in6) < 0)
811 goto nla_put_failure;
812 break;
813 default:
814 return -EAFNOSUPPORT;
817 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
818 jiffies - tm->tcpm_stamp) < 0)
819 goto nla_put_failure;
820 if (tm->tcpm_ts_stamp) {
821 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
822 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
823 goto nla_put_failure;
824 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
825 tm->tcpm_ts) < 0)
826 goto nla_put_failure;
830 int n = 0;
832 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
833 if (!nest)
834 goto nla_put_failure;
835 for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
836 u32 val = tm->tcpm_vals[i];
838 if (!val)
839 continue;
840 if (i == TCP_METRIC_RTT) {
841 if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
842 val) < 0)
843 goto nla_put_failure;
844 n++;
845 val = max(val / 1000, 1U);
847 if (i == TCP_METRIC_RTTVAR) {
848 if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
849 val) < 0)
850 goto nla_put_failure;
851 n++;
852 val = max(val / 1000, 1U);
854 if (nla_put_u32(msg, i + 1, val) < 0)
855 goto nla_put_failure;
856 n++;
858 if (n)
859 nla_nest_end(msg, nest);
860 else
861 nla_nest_cancel(msg, nest);
865 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
866 unsigned int seq;
868 do {
869 seq = read_seqbegin(&fastopen_seqlock);
870 tfom_copy[0] = tm->tcpm_fastopen;
871 } while (read_seqretry(&fastopen_seqlock, seq));
873 tfom = tfom_copy;
874 if (tfom->mss &&
875 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
876 tfom->mss) < 0)
877 goto nla_put_failure;
878 if (tfom->syn_loss &&
879 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
880 tfom->syn_loss) < 0 ||
881 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
882 jiffies - tfom->last_syn_loss) < 0))
883 goto nla_put_failure;
884 if (tfom->cookie.len > 0 &&
885 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
886 tfom->cookie.len, tfom->cookie.val) < 0)
887 goto nla_put_failure;
890 return 0;
892 nla_put_failure:
893 return -EMSGSIZE;
896 static int tcp_metrics_dump_info(struct sk_buff *skb,
897 struct netlink_callback *cb,
898 struct tcp_metrics_block *tm)
900 void *hdr;
902 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
903 &tcp_metrics_nl_family, NLM_F_MULTI,
904 TCP_METRICS_CMD_GET);
905 if (!hdr)
906 return -EMSGSIZE;
908 if (tcp_metrics_fill_info(skb, tm) < 0)
909 goto nla_put_failure;
911 genlmsg_end(skb, hdr);
912 return 0;
914 nla_put_failure:
915 genlmsg_cancel(skb, hdr);
916 return -EMSGSIZE;
919 static int tcp_metrics_nl_dump(struct sk_buff *skb,
920 struct netlink_callback *cb)
922 struct net *net = sock_net(skb->sk);
923 unsigned int max_rows = 1U << tcp_metrics_hash_log;
924 unsigned int row, s_row = cb->args[0];
925 int s_col = cb->args[1], col = s_col;
927 for (row = s_row; row < max_rows; row++, s_col = 0) {
928 struct tcp_metrics_block *tm;
929 struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
931 rcu_read_lock();
932 for (col = 0, tm = rcu_dereference(hb->chain); tm;
933 tm = rcu_dereference(tm->tcpm_next), col++) {
934 if (!net_eq(tm_net(tm), net))
935 continue;
936 if (col < s_col)
937 continue;
938 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
939 rcu_read_unlock();
940 goto done;
943 rcu_read_unlock();
946 done:
947 cb->args[0] = row;
948 cb->args[1] = col;
949 return skb->len;
952 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
953 unsigned int *hash, int optional, int v4, int v6)
955 struct nlattr *a;
957 a = info->attrs[v4];
958 if (a) {
959 addr->family = AF_INET;
960 addr->addr.a4 = nla_get_in_addr(a);
961 if (hash)
962 *hash = (__force unsigned int) addr->addr.a4;
963 return 0;
965 a = info->attrs[v6];
966 if (a) {
967 if (nla_len(a) != sizeof(struct in6_addr))
968 return -EINVAL;
969 addr->family = AF_INET6;
970 addr->addr.in6 = nla_get_in6_addr(a);
971 if (hash)
972 *hash = ipv6_addr_hash(&addr->addr.in6);
973 return 0;
975 return optional ? 1 : -EAFNOSUPPORT;
978 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
979 unsigned int *hash, int optional)
981 return __parse_nl_addr(info, addr, hash, optional,
982 TCP_METRICS_ATTR_ADDR_IPV4,
983 TCP_METRICS_ATTR_ADDR_IPV6);
986 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
988 return __parse_nl_addr(info, addr, NULL, 0,
989 TCP_METRICS_ATTR_SADDR_IPV4,
990 TCP_METRICS_ATTR_SADDR_IPV6);
993 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
995 struct tcp_metrics_block *tm;
996 struct inetpeer_addr saddr, daddr;
997 unsigned int hash;
998 struct sk_buff *msg;
999 struct net *net = genl_info_net(info);
1000 void *reply;
1001 int ret;
1002 bool src = true;
1004 ret = parse_nl_addr(info, &daddr, &hash, 0);
1005 if (ret < 0)
1006 return ret;
1008 ret = parse_nl_saddr(info, &saddr);
1009 if (ret < 0)
1010 src = false;
1012 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1013 if (!msg)
1014 return -ENOMEM;
1016 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
1017 info->genlhdr->cmd);
1018 if (!reply)
1019 goto nla_put_failure;
1021 hash ^= net_hash_mix(net);
1022 hash = hash_32(hash, tcp_metrics_hash_log);
1023 ret = -ESRCH;
1024 rcu_read_lock();
1025 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
1026 tm = rcu_dereference(tm->tcpm_next)) {
1027 if (addr_same(&tm->tcpm_daddr, &daddr) &&
1028 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1029 net_eq(tm_net(tm), net)) {
1030 ret = tcp_metrics_fill_info(msg, tm);
1031 break;
1034 rcu_read_unlock();
1035 if (ret < 0)
1036 goto out_free;
1038 genlmsg_end(msg, reply);
1039 return genlmsg_reply(msg, info);
1041 nla_put_failure:
1042 ret = -EMSGSIZE;
1044 out_free:
1045 nlmsg_free(msg);
1046 return ret;
1049 static void tcp_metrics_flush_all(struct net *net)
1051 unsigned int max_rows = 1U << tcp_metrics_hash_log;
1052 struct tcpm_hash_bucket *hb = tcp_metrics_hash;
1053 struct tcp_metrics_block *tm;
1054 unsigned int row;
1056 for (row = 0; row < max_rows; row++, hb++) {
1057 struct tcp_metrics_block __rcu **pp;
1058 spin_lock_bh(&tcp_metrics_lock);
1059 pp = &hb->chain;
1060 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
1061 if (net_eq(tm_net(tm), net)) {
1062 *pp = tm->tcpm_next;
1063 kfree_rcu(tm, rcu_head);
1064 } else {
1065 pp = &tm->tcpm_next;
1068 spin_unlock_bh(&tcp_metrics_lock);
1072 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
1074 struct tcpm_hash_bucket *hb;
1075 struct tcp_metrics_block *tm;
1076 struct tcp_metrics_block __rcu **pp;
1077 struct inetpeer_addr saddr, daddr;
1078 unsigned int hash;
1079 struct net *net = genl_info_net(info);
1080 int ret;
1081 bool src = true, found = false;
1083 ret = parse_nl_addr(info, &daddr, &hash, 1);
1084 if (ret < 0)
1085 return ret;
1086 if (ret > 0) {
1087 tcp_metrics_flush_all(net);
1088 return 0;
1090 ret = parse_nl_saddr(info, &saddr);
1091 if (ret < 0)
1092 src = false;
1094 hash ^= net_hash_mix(net);
1095 hash = hash_32(hash, tcp_metrics_hash_log);
1096 hb = tcp_metrics_hash + hash;
1097 pp = &hb->chain;
1098 spin_lock_bh(&tcp_metrics_lock);
1099 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
1100 if (addr_same(&tm->tcpm_daddr, &daddr) &&
1101 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1102 net_eq(tm_net(tm), net)) {
1103 *pp = tm->tcpm_next;
1104 kfree_rcu(tm, rcu_head);
1105 found = true;
1106 } else {
1107 pp = &tm->tcpm_next;
1110 spin_unlock_bh(&tcp_metrics_lock);
1111 if (!found)
1112 return -ESRCH;
1113 return 0;
1116 static const struct genl_ops tcp_metrics_nl_ops[] = {
1118 .cmd = TCP_METRICS_CMD_GET,
1119 .doit = tcp_metrics_nl_cmd_get,
1120 .dumpit = tcp_metrics_nl_dump,
1121 .policy = tcp_metrics_nl_policy,
1124 .cmd = TCP_METRICS_CMD_DEL,
1125 .doit = tcp_metrics_nl_cmd_del,
1126 .policy = tcp_metrics_nl_policy,
1127 .flags = GENL_ADMIN_PERM,
1131 static unsigned int tcpmhash_entries;
1132 static int __init set_tcpmhash_entries(char *str)
1134 ssize_t ret;
1136 if (!str)
1137 return 0;
1139 ret = kstrtouint(str, 0, &tcpmhash_entries);
1140 if (ret)
1141 return 0;
1143 return 1;
1145 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1147 static int __net_init tcp_net_metrics_init(struct net *net)
1149 size_t size;
1150 unsigned int slots;
1152 if (!net_eq(net, &init_net))
1153 return 0;
1155 slots = tcpmhash_entries;
1156 if (!slots) {
1157 if (totalram_pages >= 128 * 1024)
1158 slots = 16 * 1024;
1159 else
1160 slots = 8 * 1024;
1163 tcp_metrics_hash_log = order_base_2(slots);
1164 size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1166 tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1167 if (!tcp_metrics_hash)
1168 tcp_metrics_hash = vzalloc(size);
1170 if (!tcp_metrics_hash)
1171 return -ENOMEM;
1173 return 0;
1176 static void __net_exit tcp_net_metrics_exit(struct net *net)
1178 tcp_metrics_flush_all(net);
1181 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1182 .init = tcp_net_metrics_init,
1183 .exit = tcp_net_metrics_exit,
1186 void __init tcp_metrics_init(void)
1188 int ret;
1190 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1191 if (ret < 0)
1192 panic("Could not allocate the tcp_metrics hash table\n");
1194 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1195 tcp_metrics_nl_ops);
1196 if (ret < 0)
1197 panic("Could not register tcp_metrics generic netlink\n");