1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC remote transport endpoint record management
4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/slab.h>
17 #include <linux/hashtable.h>
19 #include <net/af_rxrpc.h>
21 #include <net/route.h>
22 #include <net/ip6_route.h>
23 #include "ar-internal.h"
25 static const struct sockaddr_rxrpc rxrpc_null_addr
;
30 static unsigned long rxrpc_peer_hash_key(struct rxrpc_local
*local
,
31 const struct sockaddr_rxrpc
*srx
)
35 unsigned long hash_key
;
39 hash_key
= (unsigned long)local
/ __alignof__(*local
);
40 hash_key
+= srx
->transport_type
;
41 hash_key
+= srx
->transport_len
;
42 hash_key
+= srx
->transport
.family
;
44 switch (srx
->transport
.family
) {
46 hash_key
+= (u16 __force
)srx
->transport
.sin
.sin_port
;
47 size
= sizeof(srx
->transport
.sin
.sin_addr
);
48 p
= (u16
*)&srx
->transport
.sin
.sin_addr
;
50 #ifdef CONFIG_AF_RXRPC_IPV6
52 hash_key
+= (u16 __force
)srx
->transport
.sin
.sin_port
;
53 size
= sizeof(srx
->transport
.sin6
.sin6_addr
);
54 p
= (u16
*)&srx
->transport
.sin6
.sin6_addr
;
58 WARN(1, "AF_RXRPC: Unsupported transport address family\n");
62 /* Step through the peer address in 16-bit portions for speed */
63 for (i
= 0; i
< size
; i
+= sizeof(*p
), p
++)
66 _leave(" 0x%lx", hash_key
);
71 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
74 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
75 * buckets and mid-bucket insertion, so we don't make full use of this
76 * information at this point.
78 static long rxrpc_peer_cmp_key(const struct rxrpc_peer
*peer
,
79 struct rxrpc_local
*local
,
80 const struct sockaddr_rxrpc
*srx
,
81 unsigned long hash_key
)
85 diff
= ((peer
->hash_key
- hash_key
) ?:
86 ((unsigned long)peer
->local
- (unsigned long)local
) ?:
87 (peer
->srx
.transport_type
- srx
->transport_type
) ?:
88 (peer
->srx
.transport_len
- srx
->transport_len
) ?:
89 (peer
->srx
.transport
.family
- srx
->transport
.family
));
93 switch (srx
->transport
.family
) {
95 return ((u16 __force
)peer
->srx
.transport
.sin
.sin_port
-
96 (u16 __force
)srx
->transport
.sin
.sin_port
) ?:
97 memcmp(&peer
->srx
.transport
.sin
.sin_addr
,
98 &srx
->transport
.sin
.sin_addr
,
99 sizeof(struct in_addr
));
100 #ifdef CONFIG_AF_RXRPC_IPV6
102 return ((u16 __force
)peer
->srx
.transport
.sin6
.sin6_port
-
103 (u16 __force
)srx
->transport
.sin6
.sin6_port
) ?:
104 memcmp(&peer
->srx
.transport
.sin6
.sin6_addr
,
105 &srx
->transport
.sin6
.sin6_addr
,
106 sizeof(struct in6_addr
));
114 * Look up a remote transport endpoint for the specified address using RCU.
116 static struct rxrpc_peer
*__rxrpc_lookup_peer_rcu(
117 struct rxrpc_local
*local
,
118 const struct sockaddr_rxrpc
*srx
,
119 unsigned long hash_key
)
121 struct rxrpc_peer
*peer
;
122 struct rxrpc_net
*rxnet
= local
->rxnet
;
124 hash_for_each_possible_rcu(rxnet
->peer_hash
, peer
, hash_link
, hash_key
) {
125 if (rxrpc_peer_cmp_key(peer
, local
, srx
, hash_key
) == 0 &&
126 refcount_read(&peer
->ref
) > 0)
134 * Look up a remote transport endpoint for the specified address using RCU.
136 struct rxrpc_peer
*rxrpc_lookup_peer_rcu(struct rxrpc_local
*local
,
137 const struct sockaddr_rxrpc
*srx
)
139 struct rxrpc_peer
*peer
;
140 unsigned long hash_key
= rxrpc_peer_hash_key(local
, srx
);
142 peer
= __rxrpc_lookup_peer_rcu(local
, srx
, hash_key
);
144 _leave(" = %p {u=%d}", peer
, refcount_read(&peer
->ref
));
149 * assess the MTU size for the network interface through which this peer is
152 static void rxrpc_assess_MTU_size(struct rxrpc_local
*local
,
153 struct rxrpc_peer
*peer
)
155 struct net
*net
= local
->net
;
156 struct dst_entry
*dst
;
159 struct flowi4
*fl4
= &fl
.u
.ip4
;
160 #ifdef CONFIG_AF_RXRPC_IPV6
161 struct flowi6
*fl6
= &fl
.u
.ip6
;
166 memset(&fl
, 0, sizeof(fl
));
167 switch (peer
->srx
.transport
.family
) {
169 rt
= ip_route_output_ports(
171 peer
->srx
.transport
.sin
.sin_addr
.s_addr
, 0,
172 htons(7000), htons(7001), IPPROTO_UDP
, 0, 0);
174 _leave(" [route err %ld]", PTR_ERR(rt
));
180 #ifdef CONFIG_AF_RXRPC_IPV6
182 fl6
->flowi6_iif
= LOOPBACK_IFINDEX
;
183 fl6
->flowi6_scope
= RT_SCOPE_UNIVERSE
;
184 fl6
->flowi6_proto
= IPPROTO_UDP
;
185 memcpy(&fl6
->daddr
, &peer
->srx
.transport
.sin6
.sin6_addr
,
186 sizeof(struct in6_addr
));
187 fl6
->fl6_dport
= htons(7001);
188 fl6
->fl6_sport
= htons(7000);
189 dst
= ip6_route_output(net
, NULL
, fl6
);
191 _leave(" [route err %d]", dst
->error
);
201 peer
->if_mtu
= dst_mtu(dst
);
204 _leave(" [if_mtu %u]", peer
->if_mtu
);
210 struct rxrpc_peer
*rxrpc_alloc_peer(struct rxrpc_local
*local
, gfp_t gfp
,
211 enum rxrpc_peer_trace why
)
213 struct rxrpc_peer
*peer
;
217 peer
= kzalloc(sizeof(struct rxrpc_peer
), gfp
);
219 refcount_set(&peer
->ref
, 1);
220 peer
->local
= rxrpc_get_local(local
, rxrpc_local_get_peer
);
221 INIT_HLIST_HEAD(&peer
->error_targets
);
222 peer
->service_conns
= RB_ROOT
;
223 seqlock_init(&peer
->service_conn_lock
);
224 spin_lock_init(&peer
->lock
);
225 spin_lock_init(&peer
->rtt_input_lock
);
226 peer
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
228 rxrpc_peer_init_rtt(peer
);
230 peer
->cong_ssthresh
= RXRPC_TX_MAX_WINDOW
;
231 trace_rxrpc_peer(peer
->debug_id
, 1, why
);
234 _leave(" = %p", peer
);
239 * Initialise peer record.
241 static void rxrpc_init_peer(struct rxrpc_local
*local
, struct rxrpc_peer
*peer
,
242 unsigned long hash_key
)
244 peer
->hash_key
= hash_key
;
245 rxrpc_assess_MTU_size(local
, peer
);
246 peer
->mtu
= peer
->if_mtu
;
247 peer
->rtt_last_req
= ktime_get_real();
249 switch (peer
->srx
.transport
.family
) {
251 peer
->hdrsize
= sizeof(struct iphdr
);
253 #ifdef CONFIG_AF_RXRPC_IPV6
255 peer
->hdrsize
= sizeof(struct ipv6hdr
);
262 switch (peer
->srx
.transport_type
) {
264 peer
->hdrsize
+= sizeof(struct udphdr
);
270 peer
->hdrsize
+= sizeof(struct rxrpc_wire_header
);
271 peer
->maxdata
= peer
->mtu
- peer
->hdrsize
;
277 static struct rxrpc_peer
*rxrpc_create_peer(struct rxrpc_local
*local
,
278 struct sockaddr_rxrpc
*srx
,
279 unsigned long hash_key
,
282 struct rxrpc_peer
*peer
;
286 peer
= rxrpc_alloc_peer(local
, gfp
, rxrpc_peer_new_client
);
288 memcpy(&peer
->srx
, srx
, sizeof(*srx
));
289 rxrpc_init_peer(local
, peer
, hash_key
);
292 _leave(" = %p", peer
);
296 static void rxrpc_free_peer(struct rxrpc_peer
*peer
)
298 trace_rxrpc_peer(peer
->debug_id
, 0, rxrpc_peer_free
);
299 rxrpc_put_local(peer
->local
, rxrpc_local_put_peer
);
300 kfree_rcu(peer
, rcu
);
304 * Set up a new incoming peer. There shouldn't be any other matching peers
305 * since we've already done a search in the list from the non-reentrant context
306 * (the data_ready handler) that is the only place we can add new peers.
308 void rxrpc_new_incoming_peer(struct rxrpc_local
*local
, struct rxrpc_peer
*peer
)
310 struct rxrpc_net
*rxnet
= local
->rxnet
;
311 unsigned long hash_key
;
313 hash_key
= rxrpc_peer_hash_key(local
, &peer
->srx
);
314 rxrpc_init_peer(local
, peer
, hash_key
);
316 spin_lock(&rxnet
->peer_hash_lock
);
317 hash_add_rcu(rxnet
->peer_hash
, &peer
->hash_link
, hash_key
);
318 list_add_tail(&peer
->keepalive_link
, &rxnet
->peer_keepalive_new
);
319 spin_unlock(&rxnet
->peer_hash_lock
);
323 * obtain a remote transport endpoint for the specified address
325 struct rxrpc_peer
*rxrpc_lookup_peer(struct rxrpc_local
*local
,
326 struct sockaddr_rxrpc
*srx
, gfp_t gfp
)
328 struct rxrpc_peer
*peer
, *candidate
;
329 struct rxrpc_net
*rxnet
= local
->rxnet
;
330 unsigned long hash_key
= rxrpc_peer_hash_key(local
, srx
);
332 _enter("{%pISp}", &srx
->transport
);
334 /* search the peer list first */
336 peer
= __rxrpc_lookup_peer_rcu(local
, srx
, hash_key
);
337 if (peer
&& !rxrpc_get_peer_maybe(peer
, rxrpc_peer_get_lookup_client
))
342 /* The peer is not yet present in hash - create a candidate
343 * for a new record and then redo the search.
345 candidate
= rxrpc_create_peer(local
, srx
, hash_key
, gfp
);
347 _leave(" = NULL [nomem]");
351 spin_lock(&rxnet
->peer_hash_lock
);
353 /* Need to check that we aren't racing with someone else */
354 peer
= __rxrpc_lookup_peer_rcu(local
, srx
, hash_key
);
355 if (peer
&& !rxrpc_get_peer_maybe(peer
, rxrpc_peer_get_lookup_client
))
358 hash_add_rcu(rxnet
->peer_hash
,
359 &candidate
->hash_link
, hash_key
);
360 list_add_tail(&candidate
->keepalive_link
,
361 &rxnet
->peer_keepalive_new
);
364 spin_unlock(&rxnet
->peer_hash_lock
);
367 rxrpc_free_peer(candidate
);
372 _leave(" = %p {u=%d}", peer
, refcount_read(&peer
->ref
));
377 * Get a ref on a peer record.
379 struct rxrpc_peer
*rxrpc_get_peer(struct rxrpc_peer
*peer
, enum rxrpc_peer_trace why
)
383 __refcount_inc(&peer
->ref
, &r
);
384 trace_rxrpc_peer(peer
->debug_id
, r
+ 1, why
);
389 * Get a ref on a peer record unless its usage has already reached 0.
391 struct rxrpc_peer
*rxrpc_get_peer_maybe(struct rxrpc_peer
*peer
,
392 enum rxrpc_peer_trace why
)
397 if (__refcount_inc_not_zero(&peer
->ref
, &r
))
398 trace_rxrpc_peer(peer
->debug_id
, r
+ 1, why
);
406 * Discard a peer record.
408 static void __rxrpc_put_peer(struct rxrpc_peer
*peer
)
410 struct rxrpc_net
*rxnet
= peer
->local
->rxnet
;
412 ASSERT(hlist_empty(&peer
->error_targets
));
414 spin_lock(&rxnet
->peer_hash_lock
);
415 hash_del_rcu(&peer
->hash_link
);
416 list_del_init(&peer
->keepalive_link
);
417 spin_unlock(&rxnet
->peer_hash_lock
);
419 rxrpc_free_peer(peer
);
423 * Drop a ref on a peer record.
425 void rxrpc_put_peer(struct rxrpc_peer
*peer
, enum rxrpc_peer_trace why
)
427 unsigned int debug_id
;
432 debug_id
= peer
->debug_id
;
433 dead
= __refcount_dec_and_test(&peer
->ref
, &r
);
434 trace_rxrpc_peer(debug_id
, r
- 1, why
);
436 __rxrpc_put_peer(peer
);
441 * Make sure all peer records have been discarded.
443 void rxrpc_destroy_all_peers(struct rxrpc_net
*rxnet
)
445 struct rxrpc_peer
*peer
;
448 for (i
= 0; i
< HASH_SIZE(rxnet
->peer_hash
); i
++) {
449 if (hlist_empty(&rxnet
->peer_hash
[i
]))
452 hlist_for_each_entry(peer
, &rxnet
->peer_hash
[i
], hash_link
) {
453 pr_err("Leaked peer %u {%u} %pISp\n",
455 refcount_read(&peer
->ref
),
456 &peer
->srx
.transport
);
462 * rxrpc_kernel_get_call_peer - Get the peer address of a call
463 * @sock: The socket on which the call is in progress.
464 * @call: The call to query
466 * Get a record for the remote peer in a call.
468 struct rxrpc_peer
*rxrpc_kernel_get_call_peer(struct socket
*sock
, struct rxrpc_call
*call
)
472 EXPORT_SYMBOL(rxrpc_kernel_get_call_peer
);
475 * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
476 * @peer: The peer to query
478 * Get the call's peer smoothed RTT in uS or UINT_MAX if we have no samples.
480 unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer
*peer
)
482 return peer
->rtt_count
> 0 ? peer
->srtt_us
>> 3 : UINT_MAX
;
484 EXPORT_SYMBOL(rxrpc_kernel_get_srtt
);
487 * rxrpc_kernel_remote_srx - Get the address of a peer
488 * @peer: The peer to query
490 * Get a pointer to the address from a peer record. The caller is responsible
491 * for making sure that the address is not deallocated.
493 const struct sockaddr_rxrpc
*rxrpc_kernel_remote_srx(const struct rxrpc_peer
*peer
)
495 return peer
? &peer
->srx
: &rxrpc_null_addr
;
497 EXPORT_SYMBOL(rxrpc_kernel_remote_srx
);
500 * rxrpc_kernel_remote_addr - Get the peer transport address of a call
501 * @peer: The peer to query
503 * Get a pointer to the transport address from a peer record. The caller is
504 * responsible for making sure that the address is not deallocated.
506 const struct sockaddr
*rxrpc_kernel_remote_addr(const struct rxrpc_peer
*peer
)
508 return (const struct sockaddr
*)
509 (peer
? &peer
->srx
.transport
: &rxrpc_null_addr
.transport
);
511 EXPORT_SYMBOL(rxrpc_kernel_remote_addr
);