Linux 3.11-rc3
[cris-mirror.git] / net / ipv4 / tcp_metrics.c
blobf6a005c485a94e77c3ed73358048ad4d4b6d7b37
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
23 int sysctl_tcp_nometrics_save __read_mostly;
25 struct tcp_fastopen_metrics {
26 u16 mss;
27 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
28 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
29 struct tcp_fastopen_cookie cookie;
32 struct tcp_metrics_block {
33 struct tcp_metrics_block __rcu *tcpm_next;
34 struct inetpeer_addr tcpm_addr;
35 unsigned long tcpm_stamp;
36 u32 tcpm_ts;
37 u32 tcpm_ts_stamp;
38 u32 tcpm_lock;
39 u32 tcpm_vals[TCP_METRIC_MAX + 1];
40 struct tcp_fastopen_metrics tcpm_fastopen;
42 struct rcu_head rcu_head;
45 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
46 enum tcp_metric_index idx)
48 return tm->tcpm_lock & (1 << idx);
51 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
52 enum tcp_metric_index idx)
54 return tm->tcpm_vals[idx];
57 static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
58 enum tcp_metric_index idx)
60 return msecs_to_jiffies(tm->tcpm_vals[idx]);
63 static void tcp_metric_set(struct tcp_metrics_block *tm,
64 enum tcp_metric_index idx,
65 u32 val)
67 tm->tcpm_vals[idx] = val;
70 static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
71 enum tcp_metric_index idx,
72 u32 val)
74 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
77 static bool addr_same(const struct inetpeer_addr *a,
78 const struct inetpeer_addr *b)
80 const struct in6_addr *a6, *b6;
82 if (a->family != b->family)
83 return false;
84 if (a->family == AF_INET)
85 return a->addr.a4 == b->addr.a4;
87 a6 = (const struct in6_addr *) &a->addr.a6[0];
88 b6 = (const struct in6_addr *) &b->addr.a6[0];
90 return ipv6_addr_equal(a6, b6);
93 struct tcpm_hash_bucket {
94 struct tcp_metrics_block __rcu *chain;
97 static DEFINE_SPINLOCK(tcp_metrics_lock);
99 static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
100 bool fastopen_clear)
102 u32 val;
104 tm->tcpm_stamp = jiffies;
106 val = 0;
107 if (dst_metric_locked(dst, RTAX_RTT))
108 val |= 1 << TCP_METRIC_RTT;
109 if (dst_metric_locked(dst, RTAX_RTTVAR))
110 val |= 1 << TCP_METRIC_RTTVAR;
111 if (dst_metric_locked(dst, RTAX_SSTHRESH))
112 val |= 1 << TCP_METRIC_SSTHRESH;
113 if (dst_metric_locked(dst, RTAX_CWND))
114 val |= 1 << TCP_METRIC_CWND;
115 if (dst_metric_locked(dst, RTAX_REORDERING))
116 val |= 1 << TCP_METRIC_REORDERING;
117 tm->tcpm_lock = val;
119 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
120 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
121 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
122 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
123 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
124 tm->tcpm_ts = 0;
125 tm->tcpm_ts_stamp = 0;
126 if (fastopen_clear) {
127 tm->tcpm_fastopen.mss = 0;
128 tm->tcpm_fastopen.syn_loss = 0;
129 tm->tcpm_fastopen.cookie.len = 0;
133 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
134 struct inetpeer_addr *addr,
135 unsigned int hash,
136 bool reclaim)
138 struct tcp_metrics_block *tm;
139 struct net *net;
141 spin_lock_bh(&tcp_metrics_lock);
142 net = dev_net(dst->dev);
143 if (unlikely(reclaim)) {
144 struct tcp_metrics_block *oldest;
146 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
147 for (tm = rcu_dereference(oldest->tcpm_next); tm;
148 tm = rcu_dereference(tm->tcpm_next)) {
149 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
150 oldest = tm;
152 tm = oldest;
153 } else {
154 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
155 if (!tm)
156 goto out_unlock;
158 tm->tcpm_addr = *addr;
160 tcpm_suck_dst(tm, dst, true);
162 if (likely(!reclaim)) {
163 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
164 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
167 out_unlock:
168 spin_unlock_bh(&tcp_metrics_lock);
169 return tm;
172 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
174 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
176 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
177 tcpm_suck_dst(tm, dst, false);
180 #define TCP_METRICS_RECLAIM_DEPTH 5
181 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
183 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
185 if (tm)
186 return tm;
187 if (depth > TCP_METRICS_RECLAIM_DEPTH)
188 return TCP_METRICS_RECLAIM_PTR;
189 return NULL;
192 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
193 struct net *net, unsigned int hash)
195 struct tcp_metrics_block *tm;
196 int depth = 0;
198 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
199 tm = rcu_dereference(tm->tcpm_next)) {
200 if (addr_same(&tm->tcpm_addr, addr))
201 break;
202 depth++;
204 return tcp_get_encode(tm, depth);
207 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
208 struct dst_entry *dst)
210 struct tcp_metrics_block *tm;
211 struct inetpeer_addr addr;
212 unsigned int hash;
213 struct net *net;
215 addr.family = req->rsk_ops->family;
216 switch (addr.family) {
217 case AF_INET:
218 addr.addr.a4 = inet_rsk(req)->rmt_addr;
219 hash = (__force unsigned int) addr.addr.a4;
220 break;
221 case AF_INET6:
222 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
223 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
224 break;
225 default:
226 return NULL;
229 net = dev_net(dst->dev);
230 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
232 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
233 tm = rcu_dereference(tm->tcpm_next)) {
234 if (addr_same(&tm->tcpm_addr, &addr))
235 break;
237 tcpm_check_stamp(tm, dst);
238 return tm;
241 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
243 struct inet6_timewait_sock *tw6;
244 struct tcp_metrics_block *tm;
245 struct inetpeer_addr addr;
246 unsigned int hash;
247 struct net *net;
249 addr.family = tw->tw_family;
250 switch (addr.family) {
251 case AF_INET:
252 addr.addr.a4 = tw->tw_daddr;
253 hash = (__force unsigned int) addr.addr.a4;
254 break;
255 case AF_INET6:
256 tw6 = inet6_twsk((struct sock *)tw);
257 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
258 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
259 break;
260 default:
261 return NULL;
264 net = twsk_net(tw);
265 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
267 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
268 tm = rcu_dereference(tm->tcpm_next)) {
269 if (addr_same(&tm->tcpm_addr, &addr))
270 break;
272 return tm;
275 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
276 struct dst_entry *dst,
277 bool create)
279 struct tcp_metrics_block *tm;
280 struct inetpeer_addr addr;
281 unsigned int hash;
282 struct net *net;
283 bool reclaim;
285 addr.family = sk->sk_family;
286 switch (addr.family) {
287 case AF_INET:
288 addr.addr.a4 = inet_sk(sk)->inet_daddr;
289 hash = (__force unsigned int) addr.addr.a4;
290 break;
291 case AF_INET6:
292 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
293 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
294 break;
295 default:
296 return NULL;
299 net = dev_net(dst->dev);
300 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
302 tm = __tcp_get_metrics(&addr, net, hash);
303 reclaim = false;
304 if (tm == TCP_METRICS_RECLAIM_PTR) {
305 reclaim = true;
306 tm = NULL;
308 if (!tm && create)
309 tm = tcpm_new(dst, &addr, hash, reclaim);
310 else
311 tcpm_check_stamp(tm, dst);
313 return tm;
316 /* Save metrics learned by this TCP session. This function is called
317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
318 * or goes from LAST-ACK to CLOSE.
320 void tcp_update_metrics(struct sock *sk)
322 const struct inet_connection_sock *icsk = inet_csk(sk);
323 struct dst_entry *dst = __sk_dst_get(sk);
324 struct tcp_sock *tp = tcp_sk(sk);
325 struct tcp_metrics_block *tm;
326 unsigned long rtt;
327 u32 val;
328 int m;
330 if (sysctl_tcp_nometrics_save || !dst)
331 return;
333 if (dst->flags & DST_HOST)
334 dst_confirm(dst);
336 rcu_read_lock();
337 if (icsk->icsk_backoff || !tp->srtt) {
338 /* This session failed to estimate rtt. Why?
339 * Probably, no packets returned in time. Reset our
340 * results.
342 tm = tcp_get_metrics(sk, dst, false);
343 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
344 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
345 goto out_unlock;
346 } else
347 tm = tcp_get_metrics(sk, dst, true);
349 if (!tm)
350 goto out_unlock;
352 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
353 m = rtt - tp->srtt;
355 /* If newly calculated rtt larger than stored one, store new
356 * one. Otherwise, use EWMA. Remember, rtt overestimation is
357 * always better than underestimation.
359 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
360 if (m <= 0)
361 rtt = tp->srtt;
362 else
363 rtt -= (m >> 3);
364 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
367 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
368 unsigned long var;
370 if (m < 0)
371 m = -m;
373 /* Scale deviation to rttvar fixed point */
374 m >>= 1;
375 if (m < tp->mdev)
376 m = tp->mdev;
378 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
379 if (m >= var)
380 var = m;
381 else
382 var -= (var - m) >> 2;
384 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
387 if (tcp_in_initial_slowstart(tp)) {
388 /* Slow start still did not finish. */
389 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
390 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
391 if (val && (tp->snd_cwnd >> 1) > val)
392 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
393 tp->snd_cwnd >> 1);
395 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
396 val = tcp_metric_get(tm, TCP_METRIC_CWND);
397 if (tp->snd_cwnd > val)
398 tcp_metric_set(tm, TCP_METRIC_CWND,
399 tp->snd_cwnd);
401 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
402 icsk->icsk_ca_state == TCP_CA_Open) {
403 /* Cong. avoidance phase, cwnd is reliable. */
404 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
405 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
406 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
407 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
408 val = tcp_metric_get(tm, TCP_METRIC_CWND);
409 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
411 } else {
412 /* Else slow start did not finish, cwnd is non-sense,
413 * ssthresh may be also invalid.
415 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
416 val = tcp_metric_get(tm, TCP_METRIC_CWND);
417 tcp_metric_set(tm, TCP_METRIC_CWND,
418 (val + tp->snd_ssthresh) >> 1);
420 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
421 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
422 if (val && tp->snd_ssthresh > val)
423 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
424 tp->snd_ssthresh);
426 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
427 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
428 if (val < tp->reordering &&
429 tp->reordering != sysctl_tcp_reordering)
430 tcp_metric_set(tm, TCP_METRIC_REORDERING,
431 tp->reordering);
434 tm->tcpm_stamp = jiffies;
435 out_unlock:
436 rcu_read_unlock();
439 /* Initialize metrics on socket. */
441 void tcp_init_metrics(struct sock *sk)
443 struct dst_entry *dst = __sk_dst_get(sk);
444 struct tcp_sock *tp = tcp_sk(sk);
445 struct tcp_metrics_block *tm;
446 u32 val;
448 if (dst == NULL)
449 goto reset;
451 dst_confirm(dst);
453 rcu_read_lock();
454 tm = tcp_get_metrics(sk, dst, true);
455 if (!tm) {
456 rcu_read_unlock();
457 goto reset;
460 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
461 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
463 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
464 if (val) {
465 tp->snd_ssthresh = val;
466 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
467 tp->snd_ssthresh = tp->snd_cwnd_clamp;
468 } else {
469 /* ssthresh may have been reduced unnecessarily during.
470 * 3WHS. Restore it back to its initial default.
472 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
474 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
475 if (val && tp->reordering != val) {
476 tcp_disable_fack(tp);
477 tcp_disable_early_retrans(tp);
478 tp->reordering = val;
481 val = tcp_metric_get(tm, TCP_METRIC_RTT);
482 if (val == 0 || tp->srtt == 0) {
483 rcu_read_unlock();
484 goto reset;
486 /* Initial rtt is determined from SYN,SYN-ACK.
487 * The segment is small and rtt may appear much
488 * less than real one. Use per-dst memory
489 * to make it more realistic.
491 * A bit of theory. RTT is time passed after "normal" sized packet
492 * is sent until it is ACKed. In normal circumstances sending small
493 * packets force peer to delay ACKs and calculation is correct too.
494 * The algorithm is adaptive and, provided we follow specs, it
495 * NEVER underestimate RTT. BUT! If peer tries to make some clever
496 * tricks sort of "quick acks" for time long enough to decrease RTT
497 * to low value, and then abruptly stops to do it and starts to delay
498 * ACKs, wait for troubles.
500 val = msecs_to_jiffies(val);
501 if (val > tp->srtt) {
502 tp->srtt = val;
503 tp->rtt_seq = tp->snd_nxt;
505 val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
506 if (val > tp->mdev) {
507 tp->mdev = val;
508 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
510 rcu_read_unlock();
512 tcp_set_rto(sk);
513 reset:
514 if (tp->srtt == 0) {
515 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
516 * 3WHS. This is most likely due to retransmission,
517 * including spurious one. Reset the RTO back to 3secs
518 * from the more aggressive 1sec to avoid more spurious
519 * retransmission.
521 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
522 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
524 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
525 * retransmitted. In light of RFC6298 more aggressive 1sec
526 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
527 * retransmission has occurred.
529 if (tp->total_retrans > 1)
530 tp->snd_cwnd = 1;
531 else
532 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
533 tp->snd_cwnd_stamp = tcp_time_stamp;
536 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
538 struct tcp_metrics_block *tm;
539 bool ret;
541 if (!dst)
542 return false;
544 rcu_read_lock();
545 tm = __tcp_get_metrics_req(req, dst);
546 if (paws_check) {
547 if (tm &&
548 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
549 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
550 ret = false;
551 else
552 ret = true;
553 } else {
554 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
555 ret = true;
556 else
557 ret = false;
559 rcu_read_unlock();
561 return ret;
563 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
565 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
567 struct tcp_metrics_block *tm;
569 rcu_read_lock();
570 tm = tcp_get_metrics(sk, dst, true);
571 if (tm) {
572 struct tcp_sock *tp = tcp_sk(sk);
574 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
575 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
576 tp->rx_opt.ts_recent = tm->tcpm_ts;
579 rcu_read_unlock();
581 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
583 /* VJ's idea. Save last timestamp seen from this destination and hold
584 * it at least for normal timewait interval to use for duplicate
585 * segment detection in subsequent connections, before they enter
586 * synchronized state.
588 bool tcp_remember_stamp(struct sock *sk)
590 struct dst_entry *dst = __sk_dst_get(sk);
591 bool ret = false;
593 if (dst) {
594 struct tcp_metrics_block *tm;
596 rcu_read_lock();
597 tm = tcp_get_metrics(sk, dst, true);
598 if (tm) {
599 struct tcp_sock *tp = tcp_sk(sk);
601 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
602 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
603 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
604 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
605 tm->tcpm_ts = tp->rx_opt.ts_recent;
607 ret = true;
609 rcu_read_unlock();
611 return ret;
614 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
616 struct tcp_metrics_block *tm;
617 bool ret = false;
619 rcu_read_lock();
620 tm = __tcp_get_metrics_tw(tw);
621 if (tm) {
622 const struct tcp_timewait_sock *tcptw;
623 struct sock *sk = (struct sock *) tw;
625 tcptw = tcp_twsk(sk);
626 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
627 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
628 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
629 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
630 tm->tcpm_ts = tcptw->tw_ts_recent;
632 ret = true;
634 rcu_read_unlock();
636 return ret;
639 static DEFINE_SEQLOCK(fastopen_seqlock);
641 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
642 struct tcp_fastopen_cookie *cookie,
643 int *syn_loss, unsigned long *last_syn_loss)
645 struct tcp_metrics_block *tm;
647 rcu_read_lock();
648 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
649 if (tm) {
650 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
651 unsigned int seq;
653 do {
654 seq = read_seqbegin(&fastopen_seqlock);
655 if (tfom->mss)
656 *mss = tfom->mss;
657 *cookie = tfom->cookie;
658 *syn_loss = tfom->syn_loss;
659 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
660 } while (read_seqretry(&fastopen_seqlock, seq));
662 rcu_read_unlock();
665 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
666 struct tcp_fastopen_cookie *cookie, bool syn_lost)
668 struct tcp_metrics_block *tm;
670 rcu_read_lock();
671 tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
672 if (tm) {
673 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
675 write_seqlock_bh(&fastopen_seqlock);
676 tfom->mss = mss;
677 if (cookie->len > 0)
678 tfom->cookie = *cookie;
679 if (syn_lost) {
680 ++tfom->syn_loss;
681 tfom->last_syn_loss = jiffies;
682 } else
683 tfom->syn_loss = 0;
684 write_sequnlock_bh(&fastopen_seqlock);
686 rcu_read_unlock();
689 static struct genl_family tcp_metrics_nl_family = {
690 .id = GENL_ID_GENERATE,
691 .hdrsize = 0,
692 .name = TCP_METRICS_GENL_NAME,
693 .version = TCP_METRICS_GENL_VERSION,
694 .maxattr = TCP_METRICS_ATTR_MAX,
695 .netnsok = true,
698 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
699 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
700 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
701 .len = sizeof(struct in6_addr), },
702 /* Following attributes are not received for GET/DEL,
703 * we keep them for reference
705 #if 0
706 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
707 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
708 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
709 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
710 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
711 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
712 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
713 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
714 .len = TCP_FASTOPEN_COOKIE_MAX, },
715 #endif
718 /* Add attributes, caller cancels its header on failure */
719 static int tcp_metrics_fill_info(struct sk_buff *msg,
720 struct tcp_metrics_block *tm)
722 struct nlattr *nest;
723 int i;
725 switch (tm->tcpm_addr.family) {
726 case AF_INET:
727 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
728 tm->tcpm_addr.addr.a4) < 0)
729 goto nla_put_failure;
730 break;
731 case AF_INET6:
732 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
733 tm->tcpm_addr.addr.a6) < 0)
734 goto nla_put_failure;
735 break;
736 default:
737 return -EAFNOSUPPORT;
740 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
741 jiffies - tm->tcpm_stamp) < 0)
742 goto nla_put_failure;
743 if (tm->tcpm_ts_stamp) {
744 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
745 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
746 goto nla_put_failure;
747 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
748 tm->tcpm_ts) < 0)
749 goto nla_put_failure;
753 int n = 0;
755 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
756 if (!nest)
757 goto nla_put_failure;
758 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
759 if (!tm->tcpm_vals[i])
760 continue;
761 if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
762 goto nla_put_failure;
763 n++;
765 if (n)
766 nla_nest_end(msg, nest);
767 else
768 nla_nest_cancel(msg, nest);
772 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
773 unsigned int seq;
775 do {
776 seq = read_seqbegin(&fastopen_seqlock);
777 tfom_copy[0] = tm->tcpm_fastopen;
778 } while (read_seqretry(&fastopen_seqlock, seq));
780 tfom = tfom_copy;
781 if (tfom->mss &&
782 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
783 tfom->mss) < 0)
784 goto nla_put_failure;
785 if (tfom->syn_loss &&
786 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
787 tfom->syn_loss) < 0 ||
788 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
789 jiffies - tfom->last_syn_loss) < 0))
790 goto nla_put_failure;
791 if (tfom->cookie.len > 0 &&
792 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
793 tfom->cookie.len, tfom->cookie.val) < 0)
794 goto nla_put_failure;
797 return 0;
799 nla_put_failure:
800 return -EMSGSIZE;
803 static int tcp_metrics_dump_info(struct sk_buff *skb,
804 struct netlink_callback *cb,
805 struct tcp_metrics_block *tm)
807 void *hdr;
809 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
810 &tcp_metrics_nl_family, NLM_F_MULTI,
811 TCP_METRICS_CMD_GET);
812 if (!hdr)
813 return -EMSGSIZE;
815 if (tcp_metrics_fill_info(skb, tm) < 0)
816 goto nla_put_failure;
818 return genlmsg_end(skb, hdr);
820 nla_put_failure:
821 genlmsg_cancel(skb, hdr);
822 return -EMSGSIZE;
825 static int tcp_metrics_nl_dump(struct sk_buff *skb,
826 struct netlink_callback *cb)
828 struct net *net = sock_net(skb->sk);
829 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
830 unsigned int row, s_row = cb->args[0];
831 int s_col = cb->args[1], col = s_col;
833 for (row = s_row; row < max_rows; row++, s_col = 0) {
834 struct tcp_metrics_block *tm;
835 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
837 rcu_read_lock();
838 for (col = 0, tm = rcu_dereference(hb->chain); tm;
839 tm = rcu_dereference(tm->tcpm_next), col++) {
840 if (col < s_col)
841 continue;
842 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
843 rcu_read_unlock();
844 goto done;
847 rcu_read_unlock();
850 done:
851 cb->args[0] = row;
852 cb->args[1] = col;
853 return skb->len;
856 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
857 unsigned int *hash, int optional)
859 struct nlattr *a;
861 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
862 if (a) {
863 addr->family = AF_INET;
864 addr->addr.a4 = nla_get_be32(a);
865 *hash = (__force unsigned int) addr->addr.a4;
866 return 0;
868 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
869 if (a) {
870 if (nla_len(a) != sizeof(struct in6_addr))
871 return -EINVAL;
872 addr->family = AF_INET6;
873 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
874 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
875 return 0;
877 return optional ? 1 : -EAFNOSUPPORT;
880 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
882 struct tcp_metrics_block *tm;
883 struct inetpeer_addr addr;
884 unsigned int hash;
885 struct sk_buff *msg;
886 struct net *net = genl_info_net(info);
887 void *reply;
888 int ret;
890 ret = parse_nl_addr(info, &addr, &hash, 0);
891 if (ret < 0)
892 return ret;
894 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
895 if (!msg)
896 return -ENOMEM;
898 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
899 info->genlhdr->cmd);
900 if (!reply)
901 goto nla_put_failure;
903 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
904 ret = -ESRCH;
905 rcu_read_lock();
906 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
907 tm = rcu_dereference(tm->tcpm_next)) {
908 if (addr_same(&tm->tcpm_addr, &addr)) {
909 ret = tcp_metrics_fill_info(msg, tm);
910 break;
913 rcu_read_unlock();
914 if (ret < 0)
915 goto out_free;
917 genlmsg_end(msg, reply);
918 return genlmsg_reply(msg, info);
920 nla_put_failure:
921 ret = -EMSGSIZE;
923 out_free:
924 nlmsg_free(msg);
925 return ret;
928 #define deref_locked_genl(p) \
929 rcu_dereference_protected(p, lockdep_genl_is_held() && \
930 lockdep_is_held(&tcp_metrics_lock))
932 #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
934 static int tcp_metrics_flush_all(struct net *net)
936 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
937 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
938 struct tcp_metrics_block *tm;
939 unsigned int row;
941 for (row = 0; row < max_rows; row++, hb++) {
942 spin_lock_bh(&tcp_metrics_lock);
943 tm = deref_locked_genl(hb->chain);
944 if (tm)
945 hb->chain = NULL;
946 spin_unlock_bh(&tcp_metrics_lock);
947 while (tm) {
948 struct tcp_metrics_block *next;
950 next = deref_genl(tm->tcpm_next);
951 kfree_rcu(tm, rcu_head);
952 tm = next;
955 return 0;
958 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
960 struct tcpm_hash_bucket *hb;
961 struct tcp_metrics_block *tm;
962 struct tcp_metrics_block __rcu **pp;
963 struct inetpeer_addr addr;
964 unsigned int hash;
965 struct net *net = genl_info_net(info);
966 int ret;
968 ret = parse_nl_addr(info, &addr, &hash, 1);
969 if (ret < 0)
970 return ret;
971 if (ret > 0)
972 return tcp_metrics_flush_all(net);
974 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
975 hb = net->ipv4.tcp_metrics_hash + hash;
976 pp = &hb->chain;
977 spin_lock_bh(&tcp_metrics_lock);
978 for (tm = deref_locked_genl(*pp); tm;
979 pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
980 if (addr_same(&tm->tcpm_addr, &addr)) {
981 *pp = tm->tcpm_next;
982 break;
985 spin_unlock_bh(&tcp_metrics_lock);
986 if (!tm)
987 return -ESRCH;
988 kfree_rcu(tm, rcu_head);
989 return 0;
992 static struct genl_ops tcp_metrics_nl_ops[] = {
994 .cmd = TCP_METRICS_CMD_GET,
995 .doit = tcp_metrics_nl_cmd_get,
996 .dumpit = tcp_metrics_nl_dump,
997 .policy = tcp_metrics_nl_policy,
998 .flags = GENL_ADMIN_PERM,
1001 .cmd = TCP_METRICS_CMD_DEL,
1002 .doit = tcp_metrics_nl_cmd_del,
1003 .policy = tcp_metrics_nl_policy,
1004 .flags = GENL_ADMIN_PERM,
1008 static unsigned int tcpmhash_entries;
1009 static int __init set_tcpmhash_entries(char *str)
1011 ssize_t ret;
1013 if (!str)
1014 return 0;
1016 ret = kstrtouint(str, 0, &tcpmhash_entries);
1017 if (ret)
1018 return 0;
1020 return 1;
1022 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1024 static int __net_init tcp_net_metrics_init(struct net *net)
1026 size_t size;
1027 unsigned int slots;
1029 slots = tcpmhash_entries;
1030 if (!slots) {
1031 if (totalram_pages >= 128 * 1024)
1032 slots = 16 * 1024;
1033 else
1034 slots = 8 * 1024;
1037 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1038 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1040 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1041 if (!net->ipv4.tcp_metrics_hash)
1042 net->ipv4.tcp_metrics_hash = vzalloc(size);
1044 if (!net->ipv4.tcp_metrics_hash)
1045 return -ENOMEM;
1047 return 0;
1050 static void __net_exit tcp_net_metrics_exit(struct net *net)
1052 unsigned int i;
1054 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1055 struct tcp_metrics_block *tm, *next;
1057 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1058 while (tm) {
1059 next = rcu_dereference_protected(tm->tcpm_next, 1);
1060 kfree(tm);
1061 tm = next;
1064 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1065 vfree(net->ipv4.tcp_metrics_hash);
1066 else
1067 kfree(net->ipv4.tcp_metrics_hash);
1070 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1071 .init = tcp_net_metrics_init,
1072 .exit = tcp_net_metrics_exit,
1075 void __init tcp_metrics_init(void)
1077 int ret;
1079 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1080 if (ret < 0)
1081 goto cleanup;
1082 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1083 tcp_metrics_nl_ops,
1084 ARRAY_SIZE(tcp_metrics_nl_ops));
1085 if (ret < 0)
1086 goto cleanup_subsys;
1087 return;
1089 cleanup_subsys:
1090 unregister_pernet_subsys(&tcp_net_metrics_ops);
1092 cleanup:
1093 return;