mfd: wm8350-i2c: Make sure the i2c regmap functions are compiled
[linux/fpc-iii.git] / net / ipv4 / tcp_metrics.c
blob75c7f5391fb17b1fa70d2ca39151cfe394ed85b4
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
23 int sysctl_tcp_nometrics_save __read_mostly;
25 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
26 struct net *net, unsigned int hash);
28 struct tcp_fastopen_metrics {
29 u16 mss;
30 u16 syn_loss:10; /* Recurring Fast Open SYN losses */
31 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
32 struct tcp_fastopen_cookie cookie;
35 struct tcp_metrics_block {
36 struct tcp_metrics_block __rcu *tcpm_next;
37 struct inetpeer_addr tcpm_addr;
38 unsigned long tcpm_stamp;
39 u32 tcpm_ts;
40 u32 tcpm_ts_stamp;
41 u32 tcpm_lock;
42 u32 tcpm_vals[TCP_METRIC_MAX + 1];
43 struct tcp_fastopen_metrics tcpm_fastopen;
45 struct rcu_head rcu_head;
48 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
49 enum tcp_metric_index idx)
51 return tm->tcpm_lock & (1 << idx);
54 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
55 enum tcp_metric_index idx)
57 return tm->tcpm_vals[idx];
60 static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
61 enum tcp_metric_index idx)
63 return msecs_to_jiffies(tm->tcpm_vals[idx]);
66 static void tcp_metric_set(struct tcp_metrics_block *tm,
67 enum tcp_metric_index idx,
68 u32 val)
70 tm->tcpm_vals[idx] = val;
73 static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
74 enum tcp_metric_index idx,
75 u32 val)
77 tm->tcpm_vals[idx] = jiffies_to_msecs(val);
80 static bool addr_same(const struct inetpeer_addr *a,
81 const struct inetpeer_addr *b)
83 const struct in6_addr *a6, *b6;
85 if (a->family != b->family)
86 return false;
87 if (a->family == AF_INET)
88 return a->addr.a4 == b->addr.a4;
90 a6 = (const struct in6_addr *) &a->addr.a6[0];
91 b6 = (const struct in6_addr *) &b->addr.a6[0];
93 return ipv6_addr_equal(a6, b6);
96 struct tcpm_hash_bucket {
97 struct tcp_metrics_block __rcu *chain;
100 static DEFINE_SPINLOCK(tcp_metrics_lock);
102 static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
103 bool fastopen_clear)
105 u32 val;
107 tm->tcpm_stamp = jiffies;
109 val = 0;
110 if (dst_metric_locked(dst, RTAX_RTT))
111 val |= 1 << TCP_METRIC_RTT;
112 if (dst_metric_locked(dst, RTAX_RTTVAR))
113 val |= 1 << TCP_METRIC_RTTVAR;
114 if (dst_metric_locked(dst, RTAX_SSTHRESH))
115 val |= 1 << TCP_METRIC_SSTHRESH;
116 if (dst_metric_locked(dst, RTAX_CWND))
117 val |= 1 << TCP_METRIC_CWND;
118 if (dst_metric_locked(dst, RTAX_REORDERING))
119 val |= 1 << TCP_METRIC_REORDERING;
120 tm->tcpm_lock = val;
122 tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
123 tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
124 tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
125 tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
126 tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
127 tm->tcpm_ts = 0;
128 tm->tcpm_ts_stamp = 0;
129 if (fastopen_clear) {
130 tm->tcpm_fastopen.mss = 0;
131 tm->tcpm_fastopen.syn_loss = 0;
132 tm->tcpm_fastopen.cookie.len = 0;
136 #define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
138 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
140 if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
141 tcpm_suck_dst(tm, dst, false);
144 #define TCP_METRICS_RECLAIM_DEPTH 5
145 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
147 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
148 struct inetpeer_addr *addr,
149 unsigned int hash)
151 struct tcp_metrics_block *tm;
152 struct net *net;
153 bool reclaim = false;
155 spin_lock_bh(&tcp_metrics_lock);
156 net = dev_net(dst->dev);
158 /* While waiting for the spin-lock the cache might have been populated
159 * with this entry and so we have to check again.
161 tm = __tcp_get_metrics(addr, net, hash);
162 if (tm == TCP_METRICS_RECLAIM_PTR) {
163 reclaim = true;
164 tm = NULL;
166 if (tm) {
167 tcpm_check_stamp(tm, dst);
168 goto out_unlock;
171 if (unlikely(reclaim)) {
172 struct tcp_metrics_block *oldest;
174 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
175 for (tm = rcu_dereference(oldest->tcpm_next); tm;
176 tm = rcu_dereference(tm->tcpm_next)) {
177 if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
178 oldest = tm;
180 tm = oldest;
181 } else {
182 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
183 if (!tm)
184 goto out_unlock;
186 tm->tcpm_addr = *addr;
188 tcpm_suck_dst(tm, dst, true);
190 if (likely(!reclaim)) {
191 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
192 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
195 out_unlock:
196 spin_unlock_bh(&tcp_metrics_lock);
197 return tm;
200 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
202 if (tm)
203 return tm;
204 if (depth > TCP_METRICS_RECLAIM_DEPTH)
205 return TCP_METRICS_RECLAIM_PTR;
206 return NULL;
209 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
210 struct net *net, unsigned int hash)
212 struct tcp_metrics_block *tm;
213 int depth = 0;
215 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
216 tm = rcu_dereference(tm->tcpm_next)) {
217 if (addr_same(&tm->tcpm_addr, addr))
218 break;
219 depth++;
221 return tcp_get_encode(tm, depth);
224 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
225 struct dst_entry *dst)
227 struct tcp_metrics_block *tm;
228 struct inetpeer_addr addr;
229 unsigned int hash;
230 struct net *net;
232 addr.family = req->rsk_ops->family;
233 switch (addr.family) {
234 case AF_INET:
235 addr.addr.a4 = inet_rsk(req)->rmt_addr;
236 hash = (__force unsigned int) addr.addr.a4;
237 break;
238 case AF_INET6:
239 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
240 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
241 break;
242 default:
243 return NULL;
246 net = dev_net(dst->dev);
247 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
249 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
250 tm = rcu_dereference(tm->tcpm_next)) {
251 if (addr_same(&tm->tcpm_addr, &addr))
252 break;
254 tcpm_check_stamp(tm, dst);
255 return tm;
258 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
260 struct inet6_timewait_sock *tw6;
261 struct tcp_metrics_block *tm;
262 struct inetpeer_addr addr;
263 unsigned int hash;
264 struct net *net;
266 addr.family = tw->tw_family;
267 switch (addr.family) {
268 case AF_INET:
269 addr.addr.a4 = tw->tw_daddr;
270 hash = (__force unsigned int) addr.addr.a4;
271 break;
272 case AF_INET6:
273 tw6 = inet6_twsk((struct sock *)tw);
274 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
275 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
276 break;
277 default:
278 return NULL;
281 net = twsk_net(tw);
282 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
284 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
285 tm = rcu_dereference(tm->tcpm_next)) {
286 if (addr_same(&tm->tcpm_addr, &addr))
287 break;
289 return tm;
292 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
293 struct dst_entry *dst,
294 bool create)
296 struct tcp_metrics_block *tm;
297 struct inetpeer_addr addr;
298 unsigned int hash;
299 struct net *net;
301 addr.family = sk->sk_family;
302 switch (addr.family) {
303 case AF_INET:
304 addr.addr.a4 = inet_sk(sk)->inet_daddr;
305 hash = (__force unsigned int) addr.addr.a4;
306 break;
307 case AF_INET6:
308 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
309 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
310 break;
311 default:
312 return NULL;
315 net = dev_net(dst->dev);
316 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
318 tm = __tcp_get_metrics(&addr, net, hash);
319 if (tm == TCP_METRICS_RECLAIM_PTR)
320 tm = NULL;
321 if (!tm && create)
322 tm = tcpm_new(dst, &addr, hash);
323 else
324 tcpm_check_stamp(tm, dst);
326 return tm;
329 /* Save metrics learned by this TCP session. This function is called
330 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
331 * or goes from LAST-ACK to CLOSE.
333 void tcp_update_metrics(struct sock *sk)
335 const struct inet_connection_sock *icsk = inet_csk(sk);
336 struct dst_entry *dst = __sk_dst_get(sk);
337 struct tcp_sock *tp = tcp_sk(sk);
338 struct tcp_metrics_block *tm;
339 unsigned long rtt;
340 u32 val;
341 int m;
343 if (sysctl_tcp_nometrics_save || !dst)
344 return;
346 if (dst->flags & DST_HOST)
347 dst_confirm(dst);
349 rcu_read_lock();
350 if (icsk->icsk_backoff || !tp->srtt) {
351 /* This session failed to estimate rtt. Why?
352 * Probably, no packets returned in time. Reset our
353 * results.
355 tm = tcp_get_metrics(sk, dst, false);
356 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
357 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
358 goto out_unlock;
359 } else
360 tm = tcp_get_metrics(sk, dst, true);
362 if (!tm)
363 goto out_unlock;
365 rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
366 m = rtt - tp->srtt;
368 /* If newly calculated rtt larger than stored one, store new
369 * one. Otherwise, use EWMA. Remember, rtt overestimation is
370 * always better than underestimation.
372 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
373 if (m <= 0)
374 rtt = tp->srtt;
375 else
376 rtt -= (m >> 3);
377 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
380 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
381 unsigned long var;
383 if (m < 0)
384 m = -m;
386 /* Scale deviation to rttvar fixed point */
387 m >>= 1;
388 if (m < tp->mdev)
389 m = tp->mdev;
391 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
392 if (m >= var)
393 var = m;
394 else
395 var -= (var - m) >> 2;
397 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
400 if (tcp_in_initial_slowstart(tp)) {
401 /* Slow start still did not finish. */
402 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
403 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
404 if (val && (tp->snd_cwnd >> 1) > val)
405 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
406 tp->snd_cwnd >> 1);
408 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
409 val = tcp_metric_get(tm, TCP_METRIC_CWND);
410 if (tp->snd_cwnd > val)
411 tcp_metric_set(tm, TCP_METRIC_CWND,
412 tp->snd_cwnd);
414 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
415 icsk->icsk_ca_state == TCP_CA_Open) {
416 /* Cong. avoidance phase, cwnd is reliable. */
417 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
418 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
419 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
420 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
421 val = tcp_metric_get(tm, TCP_METRIC_CWND);
422 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
424 } else {
425 /* Else slow start did not finish, cwnd is non-sense,
426 * ssthresh may be also invalid.
428 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
429 val = tcp_metric_get(tm, TCP_METRIC_CWND);
430 tcp_metric_set(tm, TCP_METRIC_CWND,
431 (val + tp->snd_ssthresh) >> 1);
433 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
434 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
435 if (val && tp->snd_ssthresh > val)
436 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
437 tp->snd_ssthresh);
439 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
440 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
441 if (val < tp->reordering &&
442 tp->reordering != sysctl_tcp_reordering)
443 tcp_metric_set(tm, TCP_METRIC_REORDERING,
444 tp->reordering);
447 tm->tcpm_stamp = jiffies;
448 out_unlock:
449 rcu_read_unlock();
452 /* Initialize metrics on socket. */
454 void tcp_init_metrics(struct sock *sk)
456 struct dst_entry *dst = __sk_dst_get(sk);
457 struct tcp_sock *tp = tcp_sk(sk);
458 struct tcp_metrics_block *tm;
459 u32 val, crtt = 0; /* cached RTT scaled by 8 */
461 if (dst == NULL)
462 goto reset;
464 dst_confirm(dst);
466 rcu_read_lock();
467 tm = tcp_get_metrics(sk, dst, true);
468 if (!tm) {
469 rcu_read_unlock();
470 goto reset;
473 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
474 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
476 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
477 if (val) {
478 tp->snd_ssthresh = val;
479 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
480 tp->snd_ssthresh = tp->snd_cwnd_clamp;
481 } else {
482 /* ssthresh may have been reduced unnecessarily during.
483 * 3WHS. Restore it back to its initial default.
485 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
487 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
488 if (val && tp->reordering != val) {
489 tcp_disable_fack(tp);
490 tcp_disable_early_retrans(tp);
491 tp->reordering = val;
494 crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
495 rcu_read_unlock();
496 reset:
497 /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
498 * to seed the RTO for later data packets because SYN packets are
499 * small. Use the per-dst cached values to seed the RTO but keep
500 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
501 * Later the RTO will be updated immediately upon obtaining the first
502 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
503 * influences the first RTO but not later RTT estimation.
505 * But if RTT is not available from the SYN (due to retransmits or
506 * syn cookies) or the cache, force a conservative 3secs timeout.
508 * A bit of theory. RTT is time passed after "normal" sized packet
509 * is sent until it is ACKed. In normal circumstances sending small
510 * packets force peer to delay ACKs and calculation is correct too.
511 * The algorithm is adaptive and, provided we follow specs, it
512 * NEVER underestimate RTT. BUT! If peer tries to make some clever
513 * tricks sort of "quick acks" for time long enough to decrease RTT
514 * to low value, and then abruptly stops to do it and starts to delay
515 * ACKs, wait for troubles.
517 if (crtt > tp->srtt) {
518 /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
519 crtt >>= 3;
520 inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
521 } else if (tp->srtt == 0) {
522 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
523 * 3WHS. This is most likely due to retransmission,
524 * including spurious one. Reset the RTO back to 3secs
525 * from the more aggressive 1sec to avoid more spurious
526 * retransmission.
528 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
529 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
531 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
532 * retransmitted. In light of RFC6298 more aggressive 1sec
533 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
534 * retransmission has occurred.
536 if (tp->total_retrans > 1)
537 tp->snd_cwnd = 1;
538 else
539 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
540 tp->snd_cwnd_stamp = tcp_time_stamp;
543 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
545 struct tcp_metrics_block *tm;
546 bool ret;
548 if (!dst)
549 return false;
551 rcu_read_lock();
552 tm = __tcp_get_metrics_req(req, dst);
553 if (paws_check) {
554 if (tm &&
555 (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
556 (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
557 ret = false;
558 else
559 ret = true;
560 } else {
561 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
562 ret = true;
563 else
564 ret = false;
566 rcu_read_unlock();
568 return ret;
570 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
572 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
574 struct tcp_metrics_block *tm;
576 rcu_read_lock();
577 tm = tcp_get_metrics(sk, dst, true);
578 if (tm) {
579 struct tcp_sock *tp = tcp_sk(sk);
581 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
582 tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
583 tp->rx_opt.ts_recent = tm->tcpm_ts;
586 rcu_read_unlock();
588 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
590 /* VJ's idea. Save last timestamp seen from this destination and hold
591 * it at least for normal timewait interval to use for duplicate
592 * segment detection in subsequent connections, before they enter
593 * synchronized state.
595 bool tcp_remember_stamp(struct sock *sk)
597 struct dst_entry *dst = __sk_dst_get(sk);
598 bool ret = false;
600 if (dst) {
601 struct tcp_metrics_block *tm;
603 rcu_read_lock();
604 tm = tcp_get_metrics(sk, dst, true);
605 if (tm) {
606 struct tcp_sock *tp = tcp_sk(sk);
608 if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
609 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
610 tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
611 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
612 tm->tcpm_ts = tp->rx_opt.ts_recent;
614 ret = true;
616 rcu_read_unlock();
618 return ret;
621 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
623 struct tcp_metrics_block *tm;
624 bool ret = false;
626 rcu_read_lock();
627 tm = __tcp_get_metrics_tw(tw);
628 if (tm) {
629 const struct tcp_timewait_sock *tcptw;
630 struct sock *sk = (struct sock *) tw;
632 tcptw = tcp_twsk(sk);
633 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
634 ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
635 tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
636 tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
637 tm->tcpm_ts = tcptw->tw_ts_recent;
639 ret = true;
641 rcu_read_unlock();
643 return ret;
646 static DEFINE_SEQLOCK(fastopen_seqlock);
648 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
649 struct tcp_fastopen_cookie *cookie,
650 int *syn_loss, unsigned long *last_syn_loss)
652 struct tcp_metrics_block *tm;
654 rcu_read_lock();
655 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
656 if (tm) {
657 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
658 unsigned int seq;
660 do {
661 seq = read_seqbegin(&fastopen_seqlock);
662 if (tfom->mss)
663 *mss = tfom->mss;
664 *cookie = tfom->cookie;
665 *syn_loss = tfom->syn_loss;
666 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
667 } while (read_seqretry(&fastopen_seqlock, seq));
669 rcu_read_unlock();
672 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
673 struct tcp_fastopen_cookie *cookie, bool syn_lost)
675 struct dst_entry *dst = __sk_dst_get(sk);
676 struct tcp_metrics_block *tm;
678 if (!dst)
679 return;
680 rcu_read_lock();
681 tm = tcp_get_metrics(sk, dst, true);
682 if (tm) {
683 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
685 write_seqlock_bh(&fastopen_seqlock);
686 tfom->mss = mss;
687 if (cookie->len > 0)
688 tfom->cookie = *cookie;
689 if (syn_lost) {
690 ++tfom->syn_loss;
691 tfom->last_syn_loss = jiffies;
692 } else
693 tfom->syn_loss = 0;
694 write_sequnlock_bh(&fastopen_seqlock);
696 rcu_read_unlock();
699 static struct genl_family tcp_metrics_nl_family = {
700 .id = GENL_ID_GENERATE,
701 .hdrsize = 0,
702 .name = TCP_METRICS_GENL_NAME,
703 .version = TCP_METRICS_GENL_VERSION,
704 .maxattr = TCP_METRICS_ATTR_MAX,
705 .netnsok = true,
708 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
709 [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, },
710 [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY,
711 .len = sizeof(struct in6_addr), },
712 /* Following attributes are not received for GET/DEL,
713 * we keep them for reference
715 #if 0
716 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, },
717 [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, },
718 [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, },
719 [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, },
720 [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, },
721 [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, },
722 [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, },
723 [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
724 .len = TCP_FASTOPEN_COOKIE_MAX, },
725 #endif
728 /* Add attributes, caller cancels its header on failure */
729 static int tcp_metrics_fill_info(struct sk_buff *msg,
730 struct tcp_metrics_block *tm)
732 struct nlattr *nest;
733 int i;
735 switch (tm->tcpm_addr.family) {
736 case AF_INET:
737 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
738 tm->tcpm_addr.addr.a4) < 0)
739 goto nla_put_failure;
740 break;
741 case AF_INET6:
742 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
743 tm->tcpm_addr.addr.a6) < 0)
744 goto nla_put_failure;
745 break;
746 default:
747 return -EAFNOSUPPORT;
750 if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
751 jiffies - tm->tcpm_stamp) < 0)
752 goto nla_put_failure;
753 if (tm->tcpm_ts_stamp) {
754 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
755 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
756 goto nla_put_failure;
757 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
758 tm->tcpm_ts) < 0)
759 goto nla_put_failure;
763 int n = 0;
765 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
766 if (!nest)
767 goto nla_put_failure;
768 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
769 if (!tm->tcpm_vals[i])
770 continue;
771 if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
772 goto nla_put_failure;
773 n++;
775 if (n)
776 nla_nest_end(msg, nest);
777 else
778 nla_nest_cancel(msg, nest);
782 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
783 unsigned int seq;
785 do {
786 seq = read_seqbegin(&fastopen_seqlock);
787 tfom_copy[0] = tm->tcpm_fastopen;
788 } while (read_seqretry(&fastopen_seqlock, seq));
790 tfom = tfom_copy;
791 if (tfom->mss &&
792 nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
793 tfom->mss) < 0)
794 goto nla_put_failure;
795 if (tfom->syn_loss &&
796 (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
797 tfom->syn_loss) < 0 ||
798 nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
799 jiffies - tfom->last_syn_loss) < 0))
800 goto nla_put_failure;
801 if (tfom->cookie.len > 0 &&
802 nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
803 tfom->cookie.len, tfom->cookie.val) < 0)
804 goto nla_put_failure;
807 return 0;
809 nla_put_failure:
810 return -EMSGSIZE;
813 static int tcp_metrics_dump_info(struct sk_buff *skb,
814 struct netlink_callback *cb,
815 struct tcp_metrics_block *tm)
817 void *hdr;
819 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
820 &tcp_metrics_nl_family, NLM_F_MULTI,
821 TCP_METRICS_CMD_GET);
822 if (!hdr)
823 return -EMSGSIZE;
825 if (tcp_metrics_fill_info(skb, tm) < 0)
826 goto nla_put_failure;
828 return genlmsg_end(skb, hdr);
830 nla_put_failure:
831 genlmsg_cancel(skb, hdr);
832 return -EMSGSIZE;
835 static int tcp_metrics_nl_dump(struct sk_buff *skb,
836 struct netlink_callback *cb)
838 struct net *net = sock_net(skb->sk);
839 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
840 unsigned int row, s_row = cb->args[0];
841 int s_col = cb->args[1], col = s_col;
843 for (row = s_row; row < max_rows; row++, s_col = 0) {
844 struct tcp_metrics_block *tm;
845 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
847 rcu_read_lock();
848 for (col = 0, tm = rcu_dereference(hb->chain); tm;
849 tm = rcu_dereference(tm->tcpm_next), col++) {
850 if (col < s_col)
851 continue;
852 if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
853 rcu_read_unlock();
854 goto done;
857 rcu_read_unlock();
860 done:
861 cb->args[0] = row;
862 cb->args[1] = col;
863 return skb->len;
866 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
867 unsigned int *hash, int optional)
869 struct nlattr *a;
871 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
872 if (a) {
873 addr->family = AF_INET;
874 addr->addr.a4 = nla_get_be32(a);
875 *hash = (__force unsigned int) addr->addr.a4;
876 return 0;
878 a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
879 if (a) {
880 if (nla_len(a) != sizeof(struct in6_addr))
881 return -EINVAL;
882 addr->family = AF_INET6;
883 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
884 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
885 return 0;
887 return optional ? 1 : -EAFNOSUPPORT;
890 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
892 struct tcp_metrics_block *tm;
893 struct inetpeer_addr addr;
894 unsigned int hash;
895 struct sk_buff *msg;
896 struct net *net = genl_info_net(info);
897 void *reply;
898 int ret;
900 ret = parse_nl_addr(info, &addr, &hash, 0);
901 if (ret < 0)
902 return ret;
904 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
905 if (!msg)
906 return -ENOMEM;
908 reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
909 info->genlhdr->cmd);
910 if (!reply)
911 goto nla_put_failure;
913 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
914 ret = -ESRCH;
915 rcu_read_lock();
916 for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
917 tm = rcu_dereference(tm->tcpm_next)) {
918 if (addr_same(&tm->tcpm_addr, &addr)) {
919 ret = tcp_metrics_fill_info(msg, tm);
920 break;
923 rcu_read_unlock();
924 if (ret < 0)
925 goto out_free;
927 genlmsg_end(msg, reply);
928 return genlmsg_reply(msg, info);
930 nla_put_failure:
931 ret = -EMSGSIZE;
933 out_free:
934 nlmsg_free(msg);
935 return ret;
938 #define deref_locked_genl(p) \
939 rcu_dereference_protected(p, lockdep_genl_is_held() && \
940 lockdep_is_held(&tcp_metrics_lock))
942 #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held())
944 static int tcp_metrics_flush_all(struct net *net)
946 unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
947 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
948 struct tcp_metrics_block *tm;
949 unsigned int row;
951 for (row = 0; row < max_rows; row++, hb++) {
952 spin_lock_bh(&tcp_metrics_lock);
953 tm = deref_locked_genl(hb->chain);
954 if (tm)
955 hb->chain = NULL;
956 spin_unlock_bh(&tcp_metrics_lock);
957 while (tm) {
958 struct tcp_metrics_block *next;
960 next = deref_genl(tm->tcpm_next);
961 kfree_rcu(tm, rcu_head);
962 tm = next;
965 return 0;
968 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
970 struct tcpm_hash_bucket *hb;
971 struct tcp_metrics_block *tm;
972 struct tcp_metrics_block __rcu **pp;
973 struct inetpeer_addr addr;
974 unsigned int hash;
975 struct net *net = genl_info_net(info);
976 int ret;
978 ret = parse_nl_addr(info, &addr, &hash, 1);
979 if (ret < 0)
980 return ret;
981 if (ret > 0)
982 return tcp_metrics_flush_all(net);
984 hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
985 hb = net->ipv4.tcp_metrics_hash + hash;
986 pp = &hb->chain;
987 spin_lock_bh(&tcp_metrics_lock);
988 for (tm = deref_locked_genl(*pp); tm;
989 pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
990 if (addr_same(&tm->tcpm_addr, &addr)) {
991 *pp = tm->tcpm_next;
992 break;
995 spin_unlock_bh(&tcp_metrics_lock);
996 if (!tm)
997 return -ESRCH;
998 kfree_rcu(tm, rcu_head);
999 return 0;
1002 static struct genl_ops tcp_metrics_nl_ops[] = {
1004 .cmd = TCP_METRICS_CMD_GET,
1005 .doit = tcp_metrics_nl_cmd_get,
1006 .dumpit = tcp_metrics_nl_dump,
1007 .policy = tcp_metrics_nl_policy,
1008 .flags = GENL_ADMIN_PERM,
1011 .cmd = TCP_METRICS_CMD_DEL,
1012 .doit = tcp_metrics_nl_cmd_del,
1013 .policy = tcp_metrics_nl_policy,
1014 .flags = GENL_ADMIN_PERM,
1018 static unsigned int tcpmhash_entries;
1019 static int __init set_tcpmhash_entries(char *str)
1021 ssize_t ret;
1023 if (!str)
1024 return 0;
1026 ret = kstrtouint(str, 0, &tcpmhash_entries);
1027 if (ret)
1028 return 0;
1030 return 1;
1032 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1034 static int __net_init tcp_net_metrics_init(struct net *net)
1036 size_t size;
1037 unsigned int slots;
1039 slots = tcpmhash_entries;
1040 if (!slots) {
1041 if (totalram_pages >= 128 * 1024)
1042 slots = 16 * 1024;
1043 else
1044 slots = 8 * 1024;
1047 net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1048 size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1050 net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1051 if (!net->ipv4.tcp_metrics_hash)
1052 net->ipv4.tcp_metrics_hash = vzalloc(size);
1054 if (!net->ipv4.tcp_metrics_hash)
1055 return -ENOMEM;
1057 return 0;
1060 static void __net_exit tcp_net_metrics_exit(struct net *net)
1062 unsigned int i;
1064 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1065 struct tcp_metrics_block *tm, *next;
1067 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1068 while (tm) {
1069 next = rcu_dereference_protected(tm->tcpm_next, 1);
1070 kfree(tm);
1071 tm = next;
1074 if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1075 vfree(net->ipv4.tcp_metrics_hash);
1076 else
1077 kfree(net->ipv4.tcp_metrics_hash);
1080 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1081 .init = tcp_net_metrics_init,
1082 .exit = tcp_net_metrics_exit,
1085 void __init tcp_metrics_init(void)
1087 int ret;
1089 ret = register_pernet_subsys(&tcp_net_metrics_ops);
1090 if (ret < 0)
1091 goto cleanup;
1092 ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1093 tcp_metrics_nl_ops,
1094 ARRAY_SIZE(tcp_metrics_nl_ops));
1095 if (ret < 0)
1096 goto cleanup_subsys;
1097 return;
1099 cleanup_subsys:
1100 unregister_pernet_subsys(&tcp_net_metrics_ops);
1102 cleanup:
1103 return;