2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
5 * Copyright (c) 2021 Gleb Smirnoff <glebius@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote
17 * products derived from this software without specific prior written
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
35 * table to a dedicated structure indexed by the remote IP address. It keeps
36 * information on the measured TCP parameters of past TCP sessions to allow
37 * better initial start values to be used with later connections to/from the
38 * same source. Depending on the network parameters (delay, max MTU,
39 * congestion window) between local and remote sites, this can lead to
40 * significant speed-ups for new TCP connections after the first one.
42 * Due to the tcp_hostcache, all TCP-specific metrics information in the
43 * routing table have been removed. The inpcb no longer keeps a pointer to
44 * the routing entry, and protocol-initiated route cloning has been removed
45 * as well. With these changes, the routing table has gone back to being
46 * more lightwight and only carries information related to packet forwarding.
48 * tcp_hostcache is designed for multiple concurrent access in SMP
49 * environments and high contention. It is a straight hash. Each bucket row
50 * is protected by its own lock for modification. Readers are protected by
51 * SMR. This puts certain restrictions on writers, e.g. a writer shall only
52 * insert a fully populated entry into a row. Writer can't reuse least used
53 * entry if a hash is full. Value updates for an entry shall be atomic.
55 * TCP stack(s) communication with tcp_hostcache() is done via KBI functions
56 * tcp_hc_*() and the hc_metrics_lite structure.
58 * Since tcp_hostcache is only caching information, there are no fatal
59 * consequences if we either can't allocate a new entry or have to drop
60 * an existing entry, or return somewhat stale information.
64 * Many thanks to jlemon for basic structure of tcp_syncache which is being
68 #include <sys/cdefs.h>
69 #include "opt_inet6.h"
71 #include <sys/param.h>
72 #include <sys/systm.h>
75 #include <sys/kernel.h>
77 #include <sys/mutex.h>
78 #include <sys/malloc.h>
82 #include <sys/socket.h>
83 #include <sys/sysctl.h>
87 #include <netinet/in.h>
88 #include <netinet/in_pcb.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_var.h>
95 CK_SLIST_HEAD(hc_qhead
, hc_metrics
) hch_bucket
;
102 CK_SLIST_ENTRY(hc_metrics
) hc_q
;
103 struct in_addr ip4
; /* IP address */
104 struct in6_addr ip6
; /* IP6 address */
105 uint32_t ip6_zoneid
; /* IPv6 scope zone id */
106 /* endpoint specific values for tcp */
107 uint32_t hc_mtu
; /* MTU for this path */
108 uint32_t hc_ssthresh
; /* outbound gateway buffer limit */
109 uint32_t hc_rtt
; /* estimated round trip time */
110 uint32_t hc_rttvar
; /* estimated rtt variance */
111 uint32_t hc_cwnd
; /* congestion window */
112 uint32_t hc_sendpipe
; /* outbound delay-bandwidth product */
113 uint32_t hc_recvpipe
; /* inbound delay-bandwidth product */
114 /* TCP hostcache internal data */
115 int hc_expire
; /* lifetime for object */
116 #ifdef TCP_HC_COUNTERS
117 u_long hc_hits
; /* number of hits */
118 u_long hc_updates
; /* number of updates */
122 struct tcp_hostcache
{
123 struct hc_head
*hashbase
;
137 /* Arbitrary values */
138 #define TCP_HOSTCACHE_HASHSIZE 512
139 #define TCP_HOSTCACHE_BUCKETLIMIT 30
140 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */
141 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */
143 VNET_DEFINE_STATIC(struct tcp_hostcache
, tcp_hostcache
);
144 #define V_tcp_hostcache VNET(tcp_hostcache)
146 VNET_DEFINE_STATIC(struct callout
, tcp_hc_callout
);
147 #define V_tcp_hc_callout VNET(tcp_hc_callout)
149 static struct hc_metrics
*tcp_hc_lookup(const struct in_conninfo
*);
150 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS
);
151 static int sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS
);
152 static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS
);
153 static void tcp_hc_purge_internal(int);
154 static void tcp_hc_purge(void *);
156 static SYSCTL_NODE(_net_inet_tcp
, OID_AUTO
, hostcache
,
157 CTLFLAG_RW
| CTLFLAG_MPSAFE
, 0,
160 VNET_DEFINE(int, tcp_use_hostcache
) = 1;
161 #define V_tcp_use_hostcache VNET(tcp_use_hostcache)
162 SYSCTL_INT(_net_inet_tcp_hostcache
, OID_AUTO
, enable
, CTLFLAG_VNET
| CTLFLAG_RW
,
163 &VNET_NAME(tcp_use_hostcache
), 0,
164 "Enable the TCP hostcache");
166 SYSCTL_UINT(_net_inet_tcp_hostcache
, OID_AUTO
, cachelimit
, CTLFLAG_VNET
| CTLFLAG_RDTUN
,
167 &VNET_NAME(tcp_hostcache
.cache_limit
), 0,
168 "Overall entry limit for hostcache");
170 SYSCTL_UINT(_net_inet_tcp_hostcache
, OID_AUTO
, hashsize
, CTLFLAG_VNET
| CTLFLAG_RDTUN
,
171 &VNET_NAME(tcp_hostcache
.hashsize
), 0,
172 "Size of TCP hostcache hashtable");
174 SYSCTL_UINT(_net_inet_tcp_hostcache
, OID_AUTO
, bucketlimit
,
175 CTLFLAG_VNET
| CTLFLAG_RDTUN
, &VNET_NAME(tcp_hostcache
.bucket_limit
), 0,
176 "Per-bucket hash limit for hostcache");
178 SYSCTL_UINT(_net_inet_tcp_hostcache
, OID_AUTO
, count
, CTLFLAG_VNET
| CTLFLAG_RD
,
179 &VNET_NAME(tcp_hostcache
.cache_count
), 0,
180 "Current number of entries in hostcache");
182 SYSCTL_INT(_net_inet_tcp_hostcache
, OID_AUTO
, expire
, CTLFLAG_VNET
| CTLFLAG_RW
,
183 &VNET_NAME(tcp_hostcache
.expire
), 0,
184 "Expire time of TCP hostcache entries");
186 SYSCTL_INT(_net_inet_tcp_hostcache
, OID_AUTO
, prune
, CTLFLAG_VNET
| CTLFLAG_RW
,
187 &VNET_NAME(tcp_hostcache
.prune
), 0,
188 "Time between purge runs");
190 SYSCTL_INT(_net_inet_tcp_hostcache
, OID_AUTO
, purge
, CTLFLAG_VNET
| CTLFLAG_RW
,
191 &VNET_NAME(tcp_hostcache
.purgeall
), 0,
192 "Expire all entries on next purge run");
194 SYSCTL_PROC(_net_inet_tcp_hostcache
, OID_AUTO
, list
,
195 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_SKIP
| CTLFLAG_MPSAFE
,
196 0, 0, sysctl_tcp_hc_list
, "A",
197 "List of all hostcache entries");
199 SYSCTL_PROC(_net_inet_tcp_hostcache
, OID_AUTO
, histo
,
200 CTLTYPE_STRING
| CTLFLAG_RD
| CTLFLAG_SKIP
| CTLFLAG_MPSAFE
,
201 0, 0, sysctl_tcp_hc_histo
, "A",
202 "Print a histogram of hostcache hashbucket utilization");
204 SYSCTL_PROC(_net_inet_tcp_hostcache
, OID_AUTO
, purgenow
,
205 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_MPSAFE
,
206 NULL
, 0, sysctl_tcp_hc_purgenow
, "I",
207 "Immediately purge all entries");
209 static MALLOC_DEFINE(M_HOSTCACHE
, "hostcache", "TCP hostcache");
211 /* Use jenkins_hash32(), as in other parts of the tcp stack */
212 #define HOSTCACHE_HASH(inc) \
213 ((inc)->inc_flags & INC_ISIPV6) ? \
214 (jenkins_hash32((inc)->inc6_faddr.s6_addr32, 4, \
215 V_tcp_hostcache.hashsalt) & V_tcp_hostcache.hashmask) \
217 (jenkins_hash32(&(inc)->inc_faddr.s_addr, 1, \
218 V_tcp_hostcache.hashsalt) & V_tcp_hostcache.hashmask)
220 #define THC_LOCK(h) mtx_lock(&(h)->hch_mtx)
221 #define THC_UNLOCK(h) mtx_unlock(&(h)->hch_mtx)
230 * Initialize hostcache structures.
232 atomic_store_int(&V_tcp_hostcache
.cache_count
, 0);
233 V_tcp_hostcache
.hashsize
= TCP_HOSTCACHE_HASHSIZE
;
234 V_tcp_hostcache
.bucket_limit
= TCP_HOSTCACHE_BUCKETLIMIT
;
235 V_tcp_hostcache
.expire
= TCP_HOSTCACHE_EXPIRE
;
236 V_tcp_hostcache
.prune
= TCP_HOSTCACHE_PRUNE
;
237 V_tcp_hostcache
.hashsalt
= arc4random();
239 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
240 &V_tcp_hostcache
.hashsize
);
241 if (!powerof2(V_tcp_hostcache
.hashsize
)) {
242 printf("WARNING: hostcache hash size is not a power of 2.\n");
243 V_tcp_hostcache
.hashsize
= TCP_HOSTCACHE_HASHSIZE
; /* default */
245 V_tcp_hostcache
.hashmask
= V_tcp_hostcache
.hashsize
- 1;
247 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
248 &V_tcp_hostcache
.bucket_limit
);
250 cache_limit
= V_tcp_hostcache
.hashsize
* V_tcp_hostcache
.bucket_limit
;
251 V_tcp_hostcache
.cache_limit
= cache_limit
;
252 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
253 &V_tcp_hostcache
.cache_limit
);
254 if (V_tcp_hostcache
.cache_limit
> cache_limit
)
255 V_tcp_hostcache
.cache_limit
= cache_limit
;
258 * Allocate the hash table.
260 V_tcp_hostcache
.hashbase
= (struct hc_head
*)
261 malloc(V_tcp_hostcache
.hashsize
* sizeof(struct hc_head
),
262 M_HOSTCACHE
, M_WAITOK
| M_ZERO
);
265 * Initialize the hash buckets.
267 for (i
= 0; i
< V_tcp_hostcache
.hashsize
; i
++) {
268 CK_SLIST_INIT(&V_tcp_hostcache
.hashbase
[i
].hch_bucket
);
269 V_tcp_hostcache
.hashbase
[i
].hch_length
= 0;
270 mtx_init(&V_tcp_hostcache
.hashbase
[i
].hch_mtx
, "tcp_hc_entry",
275 * Allocate the hostcache entries.
277 V_tcp_hostcache
.zone
=
278 uma_zcreate("hostcache", sizeof(struct hc_metrics
),
279 NULL
, NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_SMR
);
280 uma_zone_set_max(V_tcp_hostcache
.zone
, V_tcp_hostcache
.cache_limit
);
281 V_tcp_hostcache
.smr
= uma_zone_get_smr(V_tcp_hostcache
.zone
);
284 * Set up periodic cache cleanup.
286 callout_init(&V_tcp_hc_callout
, 1);
287 callout_reset(&V_tcp_hc_callout
, V_tcp_hostcache
.prune
* hz
,
288 tcp_hc_purge
, curvnet
);
297 callout_drain(&V_tcp_hc_callout
);
299 /* Purge all hc entries. */
300 tcp_hc_purge_internal(1);
302 /* Free the uma zone and the allocated hash table. */
303 uma_zdestroy(V_tcp_hostcache
.zone
);
305 for (i
= 0; i
< V_tcp_hostcache
.hashsize
; i
++)
306 mtx_destroy(&V_tcp_hostcache
.hashbase
[i
].hch_mtx
);
307 free(V_tcp_hostcache
.hashbase
, M_HOSTCACHE
);
312 * Internal function: compare cache entry to a connection.
315 tcp_hc_cmp(struct hc_metrics
*hc_entry
, const struct in_conninfo
*inc
)
318 if (inc
->inc_flags
& INC_ISIPV6
) {
319 /* XXX: check ip6_zoneid */
320 if (memcmp(&inc
->inc6_faddr
, &hc_entry
->ip6
,
321 sizeof(inc
->inc6_faddr
)) == 0)
324 if (memcmp(&inc
->inc_faddr
, &hc_entry
->ip4
,
325 sizeof(inc
->inc_faddr
)) == 0)
333 * Internal function: look up an entry in the hostcache for read.
334 * On success returns in SMR section.
336 static struct hc_metrics
*
337 tcp_hc_lookup(const struct in_conninfo
*inc
)
339 struct hc_head
*hc_head
;
340 struct hc_metrics
*hc_entry
;
342 KASSERT(inc
!= NULL
, ("%s: NULL in_conninfo", __func__
));
344 hc_head
= &V_tcp_hostcache
.hashbase
[HOSTCACHE_HASH(inc
)];
347 * Iterate through entries in bucket row looking for a match.
349 smr_enter(V_tcp_hostcache
.smr
);
350 CK_SLIST_FOREACH(hc_entry
, &hc_head
->hch_bucket
, hc_q
)
351 if (tcp_hc_cmp(hc_entry
, inc
))
354 if (hc_entry
!= NULL
) {
355 if (atomic_load_int(&hc_entry
->hc_expire
) !=
356 V_tcp_hostcache
.expire
)
357 atomic_store_int(&hc_entry
->hc_expire
,
358 V_tcp_hostcache
.expire
);
359 #ifdef TCP_HC_COUNTERS
363 smr_exit(V_tcp_hostcache
.smr
);
369 * External function: look up an entry in the hostcache and fill out the
370 * supplied TCP metrics structure. Fills in NULL when no entry was found or
371 * a value is not set.
374 tcp_hc_get(const struct in_conninfo
*inc
,
375 struct hc_metrics_lite
*hc_metrics_lite
)
377 struct hc_metrics
*hc_entry
;
379 if (!V_tcp_use_hostcache
) {
380 bzero(hc_metrics_lite
, sizeof(*hc_metrics_lite
));
385 * Find the right bucket.
387 hc_entry
= tcp_hc_lookup(inc
);
390 * If we don't have an existing object.
392 if (hc_entry
== NULL
) {
393 bzero(hc_metrics_lite
, sizeof(*hc_metrics_lite
));
397 hc_metrics_lite
->hc_mtu
= atomic_load_32(&hc_entry
->hc_mtu
);
398 hc_metrics_lite
->hc_ssthresh
= atomic_load_32(&hc_entry
->hc_ssthresh
);
399 hc_metrics_lite
->hc_rtt
= atomic_load_32(&hc_entry
->hc_rtt
);
400 hc_metrics_lite
->hc_rttvar
= atomic_load_32(&hc_entry
->hc_rttvar
);
401 hc_metrics_lite
->hc_cwnd
= atomic_load_32(&hc_entry
->hc_cwnd
);
402 hc_metrics_lite
->hc_sendpipe
= atomic_load_32(&hc_entry
->hc_sendpipe
);
403 hc_metrics_lite
->hc_recvpipe
= atomic_load_32(&hc_entry
->hc_recvpipe
);
405 smr_exit(V_tcp_hostcache
.smr
);
409 * External function: look up an entry in the hostcache and return the
410 * discovered path MTU. Returns 0 if no entry is found or value is not
414 tcp_hc_getmtu(const struct in_conninfo
*inc
)
416 struct hc_metrics
*hc_entry
;
419 if (!V_tcp_use_hostcache
)
422 hc_entry
= tcp_hc_lookup(inc
);
423 if (hc_entry
== NULL
) {
427 mtu
= atomic_load_32(&hc_entry
->hc_mtu
);
428 smr_exit(V_tcp_hostcache
.smr
);
434 * External function: update the MTU value of an entry in the hostcache.
435 * Creates a new entry if none was found.
438 tcp_hc_updatemtu(const struct in_conninfo
*inc
, uint32_t mtu
)
440 struct hc_metrics_lite hcml
= { .hc_mtu
= mtu
};
442 return (tcp_hc_update(inc
, &hcml
));
446 * External function: update the TCP metrics of an entry in the hostcache.
447 * Creates a new entry if none was found.
450 tcp_hc_update(const struct in_conninfo
*inc
, struct hc_metrics_lite
*hcml
)
452 struct hc_head
*hc_head
;
453 struct hc_metrics
*hc_entry
, *hc_prev
;
457 if (!V_tcp_use_hostcache
)
460 hc_head
= &V_tcp_hostcache
.hashbase
[HOSTCACHE_HASH(inc
)];
464 CK_SLIST_FOREACH(hc_entry
, &hc_head
->hch_bucket
, hc_q
) {
465 if (tcp_hc_cmp(hc_entry
, inc
))
467 if (CK_SLIST_NEXT(hc_entry
, hc_q
) != NULL
)
471 if (hc_entry
!= NULL
) {
472 if (atomic_load_int(&hc_entry
->hc_expire
) !=
473 V_tcp_hostcache
.expire
)
474 atomic_store_int(&hc_entry
->hc_expire
,
475 V_tcp_hostcache
.expire
);
476 #ifdef TCP_HC_COUNTERS
477 hc_entry
->hc_updates
++;
482 * Try to allocate a new entry. If the bucket limit is
483 * reached, delete the least-used element, located at the end
484 * of the CK_SLIST. During lookup we saved the pointer to
485 * the second to last element, in case if list has at least 2
486 * elements. This will allow to delete last element without
489 * Give up if the row is empty.
491 if (hc_head
->hch_length
>= V_tcp_hostcache
.bucket_limit
||
492 atomic_load_int(&V_tcp_hostcache
.cache_count
) >=
493 V_tcp_hostcache
.cache_limit
) {
494 if (hc_prev
!= NULL
) {
495 hc_entry
= CK_SLIST_NEXT(hc_prev
, hc_q
);
496 KASSERT(CK_SLIST_NEXT(hc_entry
, hc_q
) == NULL
,
497 ("%s: %p is not one to last",
499 CK_SLIST_REMOVE_AFTER(hc_prev
, hc_q
);
500 } else if ((hc_entry
=
501 CK_SLIST_FIRST(&hc_head
->hch_bucket
)) != NULL
) {
502 KASSERT(CK_SLIST_NEXT(hc_entry
, hc_q
) == NULL
,
503 ("%s: %p is not the only element",
504 __func__
, hc_entry
));
505 CK_SLIST_REMOVE_HEAD(&hc_head
->hch_bucket
,
511 KASSERT(hc_head
->hch_length
> 0 &&
512 hc_head
->hch_length
<= V_tcp_hostcache
.bucket_limit
,
513 ("tcp_hostcache: bucket length violated at %p",
515 hc_head
->hch_length
--;
516 atomic_subtract_int(&V_tcp_hostcache
.cache_count
, 1);
517 TCPSTAT_INC(tcps_hc_bucketoverflow
);
518 uma_zfree_smr(V_tcp_hostcache
.zone
, hc_entry
);
522 * Allocate a new entry, or balk if not possible.
524 hc_entry
= uma_zalloc_smr(V_tcp_hostcache
.zone
, M_NOWAIT
);
525 if (hc_entry
== NULL
) {
531 * Initialize basic information of hostcache entry.
533 bzero(hc_entry
, sizeof(*hc_entry
));
534 if (inc
->inc_flags
& INC_ISIPV6
) {
535 hc_entry
->ip6
= inc
->inc6_faddr
;
536 hc_entry
->ip6_zoneid
= inc
->inc6_zoneid
;
538 hc_entry
->ip4
= inc
->inc_faddr
;
539 hc_entry
->hc_expire
= V_tcp_hostcache
.expire
;
544 * Fill in data. Use atomics, since an existing entry is
545 * accessible by readers in SMR section.
547 if (hcml
->hc_mtu
!= 0) {
548 atomic_store_32(&hc_entry
->hc_mtu
, hcml
->hc_mtu
);
550 if (hcml
->hc_rtt
!= 0) {
551 if (hc_entry
->hc_rtt
== 0)
554 v
= ((uint64_t)hc_entry
->hc_rtt
+
555 (uint64_t)hcml
->hc_rtt
) / 2;
556 atomic_store_32(&hc_entry
->hc_rtt
, v
);
557 TCPSTAT_INC(tcps_cachedrtt
);
559 if (hcml
->hc_rttvar
!= 0) {
560 if (hc_entry
->hc_rttvar
== 0)
563 v
= ((uint64_t)hc_entry
->hc_rttvar
+
564 (uint64_t)hcml
->hc_rttvar
) / 2;
565 atomic_store_32(&hc_entry
->hc_rttvar
, v
);
566 TCPSTAT_INC(tcps_cachedrttvar
);
568 if (hcml
->hc_ssthresh
!= 0) {
569 if (hc_entry
->hc_ssthresh
== 0)
570 v
= hcml
->hc_ssthresh
;
572 v
= (hc_entry
->hc_ssthresh
+ hcml
->hc_ssthresh
) / 2;
573 atomic_store_32(&hc_entry
->hc_ssthresh
, v
);
574 TCPSTAT_INC(tcps_cachedssthresh
);
576 if (hcml
->hc_cwnd
!= 0) {
577 if (hc_entry
->hc_cwnd
== 0)
580 v
= ((uint64_t)hc_entry
->hc_cwnd
+
581 (uint64_t)hcml
->hc_cwnd
) / 2;
582 atomic_store_32(&hc_entry
->hc_cwnd
, v
);
583 /* TCPSTAT_INC(tcps_cachedcwnd); */
585 if (hcml
->hc_sendpipe
!= 0) {
586 if (hc_entry
->hc_sendpipe
== 0)
587 v
= hcml
->hc_sendpipe
;
589 v
= ((uint64_t)hc_entry
->hc_sendpipe
+
590 (uint64_t)hcml
->hc_sendpipe
) /2;
591 atomic_store_32(&hc_entry
->hc_sendpipe
, v
);
592 /* TCPSTAT_INC(tcps_cachedsendpipe); */
594 if (hcml
->hc_recvpipe
!= 0) {
595 if (hc_entry
->hc_recvpipe
== 0)
596 v
= hcml
->hc_recvpipe
;
598 v
= ((uint64_t)hc_entry
->hc_recvpipe
+
599 (uint64_t)hcml
->hc_recvpipe
) /2;
600 atomic_store_32(&hc_entry
->hc_recvpipe
, v
);
601 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
608 CK_SLIST_INSERT_HEAD(&hc_head
->hch_bucket
, hc_entry
, hc_q
);
609 hc_head
->hch_length
++;
610 KASSERT(hc_head
->hch_length
<= V_tcp_hostcache
.bucket_limit
,
611 ("tcp_hostcache: bucket length too high at %p", hc_head
));
612 atomic_add_int(&V_tcp_hostcache
.cache_count
, 1);
613 TCPSTAT_INC(tcps_hc_added
);
614 } else if (hc_entry
!= CK_SLIST_FIRST(&hc_head
->hch_bucket
)) {
615 KASSERT(CK_SLIST_NEXT(hc_prev
, hc_q
) == hc_entry
,
616 ("%s: %p next is not %p", __func__
, hc_prev
, hc_entry
));
617 CK_SLIST_REMOVE_AFTER(hc_prev
, hc_q
);
618 CK_SLIST_INSERT_HEAD(&hc_head
->hch_bucket
, hc_entry
, hc_q
);
624 * Sysctl function: prints the list and values of all hostcache entries in
628 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS
)
630 const int linesize
= 128;
633 struct hc_metrics
*hc_entry
;
634 char ip4buf
[INET_ADDRSTRLEN
];
636 char ip6buf
[INET6_ADDRSTRLEN
];
639 if (jailed_without_vnet(curthread
->td_ucred
) != 0)
642 /* Optimize Buffer length query by sbin/sysctl */
643 if (req
->oldptr
== NULL
) {
644 len
= (atomic_load_int(&V_tcp_hostcache
.cache_count
) + 1) *
646 return (SYSCTL_OUT(req
, NULL
, len
));
649 error
= sysctl_wire_old_buffer(req
, 0);
654 /* Use a buffer sized for one full bucket */
655 sbuf_new_for_sysctl(&sb
, NULL
, V_tcp_hostcache
.bucket_limit
*
659 "\nIP address MTU SSTRESH RTT RTTVAR "
660 " CWND SENDPIPE RECVPIPE "
661 #ifdef TCP_HC_COUNTERS
667 #define msec(u) (((u) + 500) / 1000)
668 for (i
= 0; i
< V_tcp_hostcache
.hashsize
; i
++) {
669 THC_LOCK(&V_tcp_hostcache
.hashbase
[i
]);
670 CK_SLIST_FOREACH(hc_entry
,
671 &V_tcp_hostcache
.hashbase
[i
].hch_bucket
, hc_q
) {
673 "%-15s %5u %8u %6lums %6lums %8u %8u %8u "
674 #ifdef TCP_HC_COUNTERS
678 hc_entry
->ip4
.s_addr
?
679 inet_ntoa_r(hc_entry
->ip4
, ip4buf
) :
681 ip6_sprintf(ip6buf
, &hc_entry
->ip6
),
686 hc_entry
->hc_ssthresh
,
687 msec((u_long
)hc_entry
->hc_rtt
*
688 (RTM_RTTUNIT
/ (hz
* TCP_RTT_SCALE
))),
689 msec((u_long
)hc_entry
->hc_rttvar
*
690 (RTM_RTTUNIT
/ (hz
* TCP_RTTVAR_SCALE
))),
692 hc_entry
->hc_sendpipe
,
693 hc_entry
->hc_recvpipe
,
694 #ifdef TCP_HC_COUNTERS
696 hc_entry
->hc_updates
,
698 hc_entry
->hc_expire
);
700 THC_UNLOCK(&V_tcp_hostcache
.hashbase
[i
]);
704 error
= sbuf_finish(&sb
);
710 * Sysctl function: prints a histogram of the hostcache hashbucket
714 sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS
)
716 const int linesize
= 50;
722 if (jailed_without_vnet(curthread
->td_ucred
) != 0)
725 histo
= (int *)malloc(sizeof(int) * (V_tcp_hostcache
.bucket_limit
+ 1),
726 M_TEMP
, M_NOWAIT
|M_ZERO
);
730 for (i
= 0; i
< V_tcp_hostcache
.hashsize
; i
++) {
731 hch_length
= V_tcp_hostcache
.hashbase
[i
].hch_length
;
732 KASSERT(hch_length
<= V_tcp_hostcache
.bucket_limit
,
733 ("tcp_hostcache: bucket limit exceeded at %u: %u",
738 /* Use a buffer for 16 lines */
739 sbuf_new_for_sysctl(&sb
, NULL
, 16 * linesize
, req
);
741 sbuf_printf(&sb
, "\nLength\tCount\n");
742 for (i
= 0; i
<= V_tcp_hostcache
.bucket_limit
; i
++) {
743 sbuf_printf(&sb
, "%u\t%u\n", i
, histo
[i
]);
745 error
= sbuf_finish(&sb
);
752 * Caller has to make sure the curvnet is set properly.
755 tcp_hc_purge_internal(int all
)
757 struct hc_head
*head
;
758 struct hc_metrics
*hc_entry
, *hc_next
, *hc_prev
;
761 for (i
= 0; i
< V_tcp_hostcache
.hashsize
; i
++) {
762 head
= &V_tcp_hostcache
.hashbase
[i
];
765 CK_SLIST_FOREACH_SAFE(hc_entry
, &head
->hch_bucket
, hc_q
,
767 KASSERT(head
->hch_length
> 0 && head
->hch_length
<=
768 V_tcp_hostcache
.bucket_limit
, ("tcp_hostcache: "
769 "bucket length out of range at %u: %u", i
,
772 atomic_load_int(&hc_entry
->hc_expire
) <= 0) {
773 if (hc_prev
!= NULL
) {
775 CK_SLIST_NEXT(hc_prev
, hc_q
),
776 ("%s: %p is not next to %p",
777 __func__
, hc_entry
, hc_prev
));
778 CK_SLIST_REMOVE_AFTER(hc_prev
, hc_q
);
781 CK_SLIST_FIRST(&head
->hch_bucket
),
782 ("%s: %p is not first",
783 __func__
, hc_entry
));
784 CK_SLIST_REMOVE_HEAD(&head
->hch_bucket
,
787 uma_zfree_smr(V_tcp_hostcache
.zone
, hc_entry
);
789 atomic_subtract_int(&V_tcp_hostcache
.cache_count
, 1);
791 atomic_subtract_int(&hc_entry
->hc_expire
,
792 V_tcp_hostcache
.prune
);
801 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
802 * periodically from the callout.
805 tcp_hc_purge(void *arg
)
807 CURVNET_SET((struct vnet
*) arg
);
810 if (V_tcp_hostcache
.purgeall
) {
811 if (V_tcp_hostcache
.purgeall
== 2)
812 V_tcp_hostcache
.hashsalt
= arc4random();
814 V_tcp_hostcache
.purgeall
= 0;
817 tcp_hc_purge_internal(all
);
819 callout_reset(&V_tcp_hc_callout
, V_tcp_hostcache
.prune
* hz
,
825 * Expire and purge all entries in hostcache immediately.
828 sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS
)
833 error
= sysctl_handle_int(oidp
, &val
, 0, req
);
834 if (error
|| !req
->newptr
)
838 V_tcp_hostcache
.hashsalt
= arc4random();
839 tcp_hc_purge_internal(1);
841 callout_reset(&V_tcp_hc_callout
, V_tcp_hostcache
.prune
* hz
,
842 tcp_hc_purge
, curvnet
);