1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC remote transport endpoint record management
4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/slab.h>
17 #include <linux/hashtable.h>
19 #include <net/af_rxrpc.h>
21 #include <net/route.h>
22 #include <net/ip6_route.h>
23 #include "ar-internal.h"
28 static unsigned long rxrpc_peer_hash_key(struct rxrpc_local
*local
,
29 const struct sockaddr_rxrpc
*srx
)
33 unsigned long hash_key
;
37 hash_key
= (unsigned long)local
/ __alignof__(*local
);
38 hash_key
+= srx
->transport_type
;
39 hash_key
+= srx
->transport_len
;
40 hash_key
+= srx
->transport
.family
;
42 switch (srx
->transport
.family
) {
44 hash_key
+= (u16 __force
)srx
->transport
.sin
.sin_port
;
45 size
= sizeof(srx
->transport
.sin
.sin_addr
);
46 p
= (u16
*)&srx
->transport
.sin
.sin_addr
;
48 #ifdef CONFIG_AF_RXRPC_IPV6
50 hash_key
+= (u16 __force
)srx
->transport
.sin
.sin_port
;
51 size
= sizeof(srx
->transport
.sin6
.sin6_addr
);
52 p
= (u16
*)&srx
->transport
.sin6
.sin6_addr
;
56 WARN(1, "AF_RXRPC: Unsupported transport address family\n");
60 /* Step through the peer address in 16-bit portions for speed */
61 for (i
= 0; i
< size
; i
+= sizeof(*p
), p
++)
64 _leave(" 0x%lx", hash_key
);
69 * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same
72 * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted
73 * buckets and mid-bucket insertion, so we don't make full use of this
74 * information at this point.
76 static long rxrpc_peer_cmp_key(const struct rxrpc_peer
*peer
,
77 struct rxrpc_local
*local
,
78 const struct sockaddr_rxrpc
*srx
,
79 unsigned long hash_key
)
83 diff
= ((peer
->hash_key
- hash_key
) ?:
84 ((unsigned long)peer
->local
- (unsigned long)local
) ?:
85 (peer
->srx
.transport_type
- srx
->transport_type
) ?:
86 (peer
->srx
.transport_len
- srx
->transport_len
) ?:
87 (peer
->srx
.transport
.family
- srx
->transport
.family
));
91 switch (srx
->transport
.family
) {
93 return ((u16 __force
)peer
->srx
.transport
.sin
.sin_port
-
94 (u16 __force
)srx
->transport
.sin
.sin_port
) ?:
95 memcmp(&peer
->srx
.transport
.sin
.sin_addr
,
96 &srx
->transport
.sin
.sin_addr
,
97 sizeof(struct in_addr
));
98 #ifdef CONFIG_AF_RXRPC_IPV6
100 return ((u16 __force
)peer
->srx
.transport
.sin6
.sin6_port
-
101 (u16 __force
)srx
->transport
.sin6
.sin6_port
) ?:
102 memcmp(&peer
->srx
.transport
.sin6
.sin6_addr
,
103 &srx
->transport
.sin6
.sin6_addr
,
104 sizeof(struct in6_addr
));
112 * Look up a remote transport endpoint for the specified address using RCU.
114 static struct rxrpc_peer
*__rxrpc_lookup_peer_rcu(
115 struct rxrpc_local
*local
,
116 const struct sockaddr_rxrpc
*srx
,
117 unsigned long hash_key
)
119 struct rxrpc_peer
*peer
;
120 struct rxrpc_net
*rxnet
= local
->rxnet
;
122 hash_for_each_possible_rcu(rxnet
->peer_hash
, peer
, hash_link
, hash_key
) {
123 if (rxrpc_peer_cmp_key(peer
, local
, srx
, hash_key
) == 0 &&
124 atomic_read(&peer
->usage
) > 0)
132 * Look up a remote transport endpoint for the specified address using RCU.
134 struct rxrpc_peer
*rxrpc_lookup_peer_rcu(struct rxrpc_local
*local
,
135 const struct sockaddr_rxrpc
*srx
)
137 struct rxrpc_peer
*peer
;
138 unsigned long hash_key
= rxrpc_peer_hash_key(local
, srx
);
140 peer
= __rxrpc_lookup_peer_rcu(local
, srx
, hash_key
);
142 _net("PEER %d {%pISp}", peer
->debug_id
, &peer
->srx
.transport
);
143 _leave(" = %p {u=%d}", peer
, atomic_read(&peer
->usage
));
149 * assess the MTU size for the network interface through which this peer is
152 static void rxrpc_assess_MTU_size(struct rxrpc_sock
*rx
,
153 struct rxrpc_peer
*peer
)
155 struct net
*net
= sock_net(&rx
->sk
);
156 struct dst_entry
*dst
;
159 struct flowi4
*fl4
= &fl
.u
.ip4
;
160 #ifdef CONFIG_AF_RXRPC_IPV6
161 struct flowi6
*fl6
= &fl
.u
.ip6
;
166 memset(&fl
, 0, sizeof(fl
));
167 switch (peer
->srx
.transport
.family
) {
169 rt
= ip_route_output_ports(
171 peer
->srx
.transport
.sin
.sin_addr
.s_addr
, 0,
172 htons(7000), htons(7001), IPPROTO_UDP
, 0, 0);
174 _leave(" [route err %ld]", PTR_ERR(rt
));
180 #ifdef CONFIG_AF_RXRPC_IPV6
182 fl6
->flowi6_iif
= LOOPBACK_IFINDEX
;
183 fl6
->flowi6_scope
= RT_SCOPE_UNIVERSE
;
184 fl6
->flowi6_proto
= IPPROTO_UDP
;
185 memcpy(&fl6
->daddr
, &peer
->srx
.transport
.sin6
.sin6_addr
,
186 sizeof(struct in6_addr
));
187 fl6
->fl6_dport
= htons(7001);
188 fl6
->fl6_sport
= htons(7000);
189 dst
= ip6_route_output(net
, NULL
, fl6
);
191 _leave(" [route err %d]", dst
->error
);
201 peer
->if_mtu
= dst_mtu(dst
);
204 _leave(" [if_mtu %u]", peer
->if_mtu
);
210 struct rxrpc_peer
*rxrpc_alloc_peer(struct rxrpc_local
*local
, gfp_t gfp
)
212 const void *here
= __builtin_return_address(0);
213 struct rxrpc_peer
*peer
;
217 peer
= kzalloc(sizeof(struct rxrpc_peer
), gfp
);
219 atomic_set(&peer
->usage
, 1);
220 peer
->local
= rxrpc_get_local(local
);
221 INIT_HLIST_HEAD(&peer
->error_targets
);
222 peer
->service_conns
= RB_ROOT
;
223 seqlock_init(&peer
->service_conn_lock
);
224 spin_lock_init(&peer
->lock
);
225 spin_lock_init(&peer
->rtt_input_lock
);
226 peer
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
228 rxrpc_peer_init_rtt(peer
);
230 if (RXRPC_TX_SMSS
> 2190)
232 else if (RXRPC_TX_SMSS
> 1095)
236 trace_rxrpc_peer(peer
->debug_id
, rxrpc_peer_new
, 1, here
);
239 _leave(" = %p", peer
);
244 * Initialise peer record.
246 static void rxrpc_init_peer(struct rxrpc_sock
*rx
, struct rxrpc_peer
*peer
,
247 unsigned long hash_key
)
249 peer
->hash_key
= hash_key
;
250 rxrpc_assess_MTU_size(rx
, peer
);
251 peer
->mtu
= peer
->if_mtu
;
252 peer
->rtt_last_req
= ktime_get_real();
254 switch (peer
->srx
.transport
.family
) {
256 peer
->hdrsize
= sizeof(struct iphdr
);
258 #ifdef CONFIG_AF_RXRPC_IPV6
260 peer
->hdrsize
= sizeof(struct ipv6hdr
);
267 switch (peer
->srx
.transport_type
) {
269 peer
->hdrsize
+= sizeof(struct udphdr
);
275 peer
->hdrsize
+= sizeof(struct rxrpc_wire_header
);
276 peer
->maxdata
= peer
->mtu
- peer
->hdrsize
;
282 static struct rxrpc_peer
*rxrpc_create_peer(struct rxrpc_sock
*rx
,
283 struct rxrpc_local
*local
,
284 struct sockaddr_rxrpc
*srx
,
285 unsigned long hash_key
,
288 struct rxrpc_peer
*peer
;
292 peer
= rxrpc_alloc_peer(local
, gfp
);
294 memcpy(&peer
->srx
, srx
, sizeof(*srx
));
295 rxrpc_init_peer(rx
, peer
, hash_key
);
298 _leave(" = %p", peer
);
303 * Set up a new incoming peer. There shouldn't be any other matching peers
304 * since we've already done a search in the list from the non-reentrant context
305 * (the data_ready handler) that is the only place we can add new peers.
307 void rxrpc_new_incoming_peer(struct rxrpc_sock
*rx
, struct rxrpc_local
*local
,
308 struct rxrpc_peer
*peer
)
310 struct rxrpc_net
*rxnet
= local
->rxnet
;
311 unsigned long hash_key
;
313 hash_key
= rxrpc_peer_hash_key(local
, &peer
->srx
);
314 rxrpc_init_peer(rx
, peer
, hash_key
);
316 spin_lock(&rxnet
->peer_hash_lock
);
317 hash_add_rcu(rxnet
->peer_hash
, &peer
->hash_link
, hash_key
);
318 list_add_tail(&peer
->keepalive_link
, &rxnet
->peer_keepalive_new
);
319 spin_unlock(&rxnet
->peer_hash_lock
);
323 * obtain a remote transport endpoint for the specified address
325 struct rxrpc_peer
*rxrpc_lookup_peer(struct rxrpc_sock
*rx
,
326 struct rxrpc_local
*local
,
327 struct sockaddr_rxrpc
*srx
, gfp_t gfp
)
329 struct rxrpc_peer
*peer
, *candidate
;
330 struct rxrpc_net
*rxnet
= local
->rxnet
;
331 unsigned long hash_key
= rxrpc_peer_hash_key(local
, srx
);
333 _enter("{%pISp}", &srx
->transport
);
335 /* search the peer list first */
337 peer
= __rxrpc_lookup_peer_rcu(local
, srx
, hash_key
);
338 if (peer
&& !rxrpc_get_peer_maybe(peer
))
343 /* The peer is not yet present in hash - create a candidate
344 * for a new record and then redo the search.
346 candidate
= rxrpc_create_peer(rx
, local
, srx
, hash_key
, gfp
);
348 _leave(" = NULL [nomem]");
352 spin_lock_bh(&rxnet
->peer_hash_lock
);
354 /* Need to check that we aren't racing with someone else */
355 peer
= __rxrpc_lookup_peer_rcu(local
, srx
, hash_key
);
356 if (peer
&& !rxrpc_get_peer_maybe(peer
))
359 hash_add_rcu(rxnet
->peer_hash
,
360 &candidate
->hash_link
, hash_key
);
361 list_add_tail(&candidate
->keepalive_link
,
362 &rxnet
->peer_keepalive_new
);
365 spin_unlock_bh(&rxnet
->peer_hash_lock
);
373 _net("PEER %d {%pISp}", peer
->debug_id
, &peer
->srx
.transport
);
375 _leave(" = %p {u=%d}", peer
, atomic_read(&peer
->usage
));
380 * Get a ref on a peer record.
382 struct rxrpc_peer
*rxrpc_get_peer(struct rxrpc_peer
*peer
)
384 const void *here
= __builtin_return_address(0);
387 n
= atomic_inc_return(&peer
->usage
);
388 trace_rxrpc_peer(peer
->debug_id
, rxrpc_peer_got
, n
, here
);
393 * Get a ref on a peer record unless its usage has already reached 0.
395 struct rxrpc_peer
*rxrpc_get_peer_maybe(struct rxrpc_peer
*peer
)
397 const void *here
= __builtin_return_address(0);
400 int n
= atomic_fetch_add_unless(&peer
->usage
, 1, 0);
402 trace_rxrpc_peer(peer
->debug_id
, rxrpc_peer_got
, n
+ 1, here
);
410 * Discard a peer record.
412 static void __rxrpc_put_peer(struct rxrpc_peer
*peer
)
414 struct rxrpc_net
*rxnet
= peer
->local
->rxnet
;
416 ASSERT(hlist_empty(&peer
->error_targets
));
418 spin_lock_bh(&rxnet
->peer_hash_lock
);
419 hash_del_rcu(&peer
->hash_link
);
420 list_del_init(&peer
->keepalive_link
);
421 spin_unlock_bh(&rxnet
->peer_hash_lock
);
423 rxrpc_put_local(peer
->local
);
424 kfree_rcu(peer
, rcu
);
428 * Drop a ref on a peer record.
430 void rxrpc_put_peer(struct rxrpc_peer
*peer
)
432 const void *here
= __builtin_return_address(0);
433 unsigned int debug_id
;
437 debug_id
= peer
->debug_id
;
438 n
= atomic_dec_return(&peer
->usage
);
439 trace_rxrpc_peer(debug_id
, rxrpc_peer_put
, n
, here
);
441 __rxrpc_put_peer(peer
);
446 * Drop a ref on a peer record where the caller already holds the
449 void rxrpc_put_peer_locked(struct rxrpc_peer
*peer
)
451 const void *here
= __builtin_return_address(0);
452 unsigned int debug_id
= peer
->debug_id
;
455 n
= atomic_dec_return(&peer
->usage
);
456 trace_rxrpc_peer(debug_id
, rxrpc_peer_put
, n
, here
);
458 hash_del_rcu(&peer
->hash_link
);
459 list_del_init(&peer
->keepalive_link
);
460 rxrpc_put_local(peer
->local
);
461 kfree_rcu(peer
, rcu
);
466 * Make sure all peer records have been discarded.
468 void rxrpc_destroy_all_peers(struct rxrpc_net
*rxnet
)
470 struct rxrpc_peer
*peer
;
473 for (i
= 0; i
< HASH_SIZE(rxnet
->peer_hash
); i
++) {
474 if (hlist_empty(&rxnet
->peer_hash
[i
]))
477 hlist_for_each_entry(peer
, &rxnet
->peer_hash
[i
], hash_link
) {
478 pr_err("Leaked peer %u {%u} %pISp\n",
480 atomic_read(&peer
->usage
),
481 &peer
->srx
.transport
);
487 * rxrpc_kernel_get_peer - Get the peer address of a call
488 * @sock: The socket on which the call is in progress.
489 * @call: The call to query
490 * @_srx: Where to place the result
492 * Get the address of the remote peer in a call.
494 void rxrpc_kernel_get_peer(struct socket
*sock
, struct rxrpc_call
*call
,
495 struct sockaddr_rxrpc
*_srx
)
497 *_srx
= call
->peer
->srx
;
499 EXPORT_SYMBOL(rxrpc_kernel_get_peer
);
502 * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
503 * @sock: The socket on which the call is in progress.
504 * @call: The call to query
506 * Get the call's peer smoothed RTT.
508 u32
rxrpc_kernel_get_srtt(struct socket
*sock
, struct rxrpc_call
*call
)
510 return call
->peer
->srtt_us
>> 3;
512 EXPORT_SYMBOL(rxrpc_kernel_get_srtt
);