1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Local endpoint object management
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/udp.h>
16 #include <linux/hashtable.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
22 static void rxrpc_local_processor(struct work_struct
*);
23 static void rxrpc_local_rcu(struct rcu_head
*);
26 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
27 * same or greater than.
29 * We explicitly don't compare the RxRPC service ID as we want to reject
30 * conflicting uses by differing services. Further, we don't want to share
31 * addresses with different options (IPv6), so we don't compare those bits
34 static long rxrpc_local_cmp_key(const struct rxrpc_local
*local
,
35 const struct sockaddr_rxrpc
*srx
)
39 diff
= ((local
->srx
.transport_type
- srx
->transport_type
) ?:
40 (local
->srx
.transport_len
- srx
->transport_len
) ?:
41 (local
->srx
.transport
.family
- srx
->transport
.family
));
45 switch (srx
->transport
.family
) {
47 /* If the choice of UDP port is left up to the transport, then
48 * the endpoint record doesn't match.
50 return ((u16 __force
)local
->srx
.transport
.sin
.sin_port
-
51 (u16 __force
)srx
->transport
.sin
.sin_port
) ?:
52 memcmp(&local
->srx
.transport
.sin
.sin_addr
,
53 &srx
->transport
.sin
.sin_addr
,
54 sizeof(struct in_addr
));
55 #ifdef CONFIG_AF_RXRPC_IPV6
57 /* If the choice of UDP6 port is left up to the transport, then
58 * the endpoint record doesn't match.
60 return ((u16 __force
)local
->srx
.transport
.sin6
.sin6_port
-
61 (u16 __force
)srx
->transport
.sin6
.sin6_port
) ?:
62 memcmp(&local
->srx
.transport
.sin6
.sin6_addr
,
63 &srx
->transport
.sin6
.sin6_addr
,
64 sizeof(struct in6_addr
));
72 * Allocate a new local endpoint.
74 static struct rxrpc_local
*rxrpc_alloc_local(struct rxrpc_net
*rxnet
,
75 const struct sockaddr_rxrpc
*srx
)
77 struct rxrpc_local
*local
;
79 local
= kzalloc(sizeof(struct rxrpc_local
), GFP_KERNEL
);
81 atomic_set(&local
->usage
, 1);
82 atomic_set(&local
->active_users
, 1);
84 INIT_LIST_HEAD(&local
->link
);
85 INIT_WORK(&local
->processor
, rxrpc_local_processor
);
86 init_rwsem(&local
->defrag_sem
);
87 skb_queue_head_init(&local
->reject_queue
);
88 skb_queue_head_init(&local
->event_queue
);
89 local
->client_bundles
= RB_ROOT
;
90 spin_lock_init(&local
->client_bundles_lock
);
91 spin_lock_init(&local
->lock
);
92 rwlock_init(&local
->services_lock
);
93 local
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
94 memcpy(&local
->srx
, srx
, sizeof(*srx
));
95 local
->srx
.srx_service
= 0;
96 trace_rxrpc_local(local
->debug_id
, rxrpc_local_new
, 1, NULL
);
99 _leave(" = %p", local
);
104 * create the local socket
105 * - must be called with rxrpc_local_mutex locked
107 static int rxrpc_open_socket(struct rxrpc_local
*local
, struct net
*net
)
113 local
, local
->srx
.transport_type
, local
->srx
.transport
.family
);
115 /* create a socket to represent the local endpoint */
116 ret
= sock_create_kern(net
, local
->srx
.transport
.family
,
117 local
->srx
.transport_type
, 0, &local
->socket
);
119 _leave(" = %d [socket]", ret
);
123 /* set the socket up */
124 usk
= local
->socket
->sk
;
125 inet_sk(usk
)->mc_loop
= 0;
127 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
128 inet_inc_convert_csum(usk
);
130 rcu_assign_sk_user_data(usk
, local
);
132 udp_sk(usk
)->encap_type
= UDP_ENCAP_RXRPC
;
133 udp_sk(usk
)->encap_rcv
= rxrpc_input_packet
;
134 udp_sk(usk
)->encap_destroy
= NULL
;
135 udp_sk(usk
)->gro_receive
= NULL
;
136 udp_sk(usk
)->gro_complete
= NULL
;
139 #if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
140 if (local
->srx
.transport
.family
== AF_INET6
)
141 udpv6_encap_enable();
143 usk
->sk_error_report
= rxrpc_error_report
;
145 /* if a local address was supplied then bind it */
146 if (local
->srx
.transport_len
> sizeof(sa_family_t
)) {
148 ret
= kernel_bind(local
->socket
,
149 (struct sockaddr
*)&local
->srx
.transport
,
150 local
->srx
.transport_len
);
152 _debug("bind failed %d", ret
);
157 switch (local
->srx
.transport
.family
) {
159 /* we want to receive ICMPv6 errors */
160 ip6_sock_set_recverr(local
->socket
->sk
);
162 /* Fall through and set IPv4 options too otherwise we don't get
163 * errors from IPv4 packets sent through the IPv6 socket.
167 /* we want to receive ICMP errors */
168 ip_sock_set_recverr(local
->socket
->sk
);
170 /* we want to set the don't fragment bit */
171 ip_sock_set_mtu_discover(local
->socket
->sk
, IP_PMTUDISC_DO
);
173 /* We want receive timestamps. */
174 sock_enable_timestamps(local
->socket
->sk
);
185 kernel_sock_shutdown(local
->socket
, SHUT_RDWR
);
186 local
->socket
->sk
->sk_user_data
= NULL
;
187 sock_release(local
->socket
);
188 local
->socket
= NULL
;
190 _leave(" = %d", ret
);
195 * Look up or create a new local endpoint using the specified local address.
197 struct rxrpc_local
*rxrpc_lookup_local(struct net
*net
,
198 const struct sockaddr_rxrpc
*srx
)
200 struct rxrpc_local
*local
;
201 struct rxrpc_net
*rxnet
= rxrpc_net(net
);
202 struct list_head
*cursor
;
207 _enter("{%d,%d,%pISp}",
208 srx
->transport_type
, srx
->transport
.family
, &srx
->transport
);
210 mutex_lock(&rxnet
->local_mutex
);
212 for (cursor
= rxnet
->local_endpoints
.next
;
213 cursor
!= &rxnet
->local_endpoints
;
214 cursor
= cursor
->next
) {
215 local
= list_entry(cursor
, struct rxrpc_local
, link
);
217 diff
= rxrpc_local_cmp_key(local
, srx
);
223 /* Services aren't allowed to share transport sockets, so
224 * reject that here. It is possible that the object is dying -
225 * but it may also still have the local transport address that
228 if (srx
->srx_service
) {
233 /* Found a match. We replace a dying object. Attempting to
234 * bind the transport socket may still fail if we're attempting
235 * to use a local address that the dying object is still using.
237 if (!rxrpc_use_local(local
))
244 local
= rxrpc_alloc_local(rxnet
, srx
);
248 ret
= rxrpc_open_socket(local
, net
);
252 if (cursor
!= &rxnet
->local_endpoints
)
253 list_replace_init(cursor
, &local
->link
);
255 list_add_tail(&local
->link
, cursor
);
259 mutex_unlock(&rxnet
->local_mutex
);
261 _net("LOCAL %s %d {%pISp}",
262 age
, local
->debug_id
, &local
->srx
.transport
);
264 _leave(" = %p", local
);
270 mutex_unlock(&rxnet
->local_mutex
);
272 call_rcu(&local
->rcu
, rxrpc_local_rcu
);
273 _leave(" = %d", ret
);
277 mutex_unlock(&rxnet
->local_mutex
);
278 _leave(" = -EADDRINUSE");
279 return ERR_PTR(-EADDRINUSE
);
283 * Get a ref on a local endpoint.
285 struct rxrpc_local
*rxrpc_get_local(struct rxrpc_local
*local
)
287 const void *here
= __builtin_return_address(0);
290 n
= atomic_inc_return(&local
->usage
);
291 trace_rxrpc_local(local
->debug_id
, rxrpc_local_got
, n
, here
);
296 * Get a ref on a local endpoint unless its usage has already reached 0.
298 struct rxrpc_local
*rxrpc_get_local_maybe(struct rxrpc_local
*local
)
300 const void *here
= __builtin_return_address(0);
303 int n
= atomic_fetch_add_unless(&local
->usage
, 1, 0);
305 trace_rxrpc_local(local
->debug_id
, rxrpc_local_got
,
314 * Queue a local endpoint and pass the caller's reference to the work item.
316 void rxrpc_queue_local(struct rxrpc_local
*local
)
318 const void *here
= __builtin_return_address(0);
319 unsigned int debug_id
= local
->debug_id
;
320 int n
= atomic_read(&local
->usage
);
322 if (rxrpc_queue_work(&local
->processor
))
323 trace_rxrpc_local(debug_id
, rxrpc_local_queued
, n
, here
);
325 rxrpc_put_local(local
);
329 * Drop a ref on a local endpoint.
331 void rxrpc_put_local(struct rxrpc_local
*local
)
333 const void *here
= __builtin_return_address(0);
334 unsigned int debug_id
;
338 debug_id
= local
->debug_id
;
340 n
= atomic_dec_return(&local
->usage
);
341 trace_rxrpc_local(debug_id
, rxrpc_local_put
, n
, here
);
344 call_rcu(&local
->rcu
, rxrpc_local_rcu
);
349 * Start using a local endpoint.
351 struct rxrpc_local
*rxrpc_use_local(struct rxrpc_local
*local
)
353 local
= rxrpc_get_local_maybe(local
);
357 if (!__rxrpc_use_local(local
)) {
358 rxrpc_put_local(local
);
366 * Cease using a local endpoint. Once the number of active users reaches 0, we
367 * start the closure of the transport in the work processor.
369 void rxrpc_unuse_local(struct rxrpc_local
*local
)
372 if (__rxrpc_unuse_local(local
)) {
373 rxrpc_get_local(local
);
374 rxrpc_queue_local(local
);
380 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
383 * Closing the socket cannot be done from bottom half context or RCU callback
384 * context because it might sleep.
386 static void rxrpc_local_destroyer(struct rxrpc_local
*local
)
388 struct socket
*socket
= local
->socket
;
389 struct rxrpc_net
*rxnet
= local
->rxnet
;
391 _enter("%d", local
->debug_id
);
395 mutex_lock(&rxnet
->local_mutex
);
396 list_del_init(&local
->link
);
397 mutex_unlock(&rxnet
->local_mutex
);
399 rxrpc_clean_up_local_conns(local
);
400 rxrpc_service_connection_reaper(&rxnet
->service_conn_reaper
);
401 ASSERT(!local
->service
);
404 local
->socket
= NULL
;
405 kernel_sock_shutdown(socket
, SHUT_RDWR
);
406 socket
->sk
->sk_user_data
= NULL
;
407 sock_release(socket
);
410 /* At this point, there should be no more packets coming in to the
413 rxrpc_purge_queue(&local
->reject_queue
);
414 rxrpc_purge_queue(&local
->event_queue
);
418 * Process events on an endpoint. The work item carries a ref which
421 static void rxrpc_local_processor(struct work_struct
*work
)
423 struct rxrpc_local
*local
=
424 container_of(work
, struct rxrpc_local
, processor
);
427 trace_rxrpc_local(local
->debug_id
, rxrpc_local_processing
,
428 atomic_read(&local
->usage
), NULL
);
432 if (!__rxrpc_use_local(local
)) {
433 rxrpc_local_destroyer(local
);
437 if (!skb_queue_empty(&local
->reject_queue
)) {
438 rxrpc_reject_packets(local
);
442 if (!skb_queue_empty(&local
->event_queue
)) {
443 rxrpc_process_local_events(local
);
447 __rxrpc_unuse_local(local
);
450 rxrpc_put_local(local
);
454 * Destroy a local endpoint after the RCU grace period expires.
456 static void rxrpc_local_rcu(struct rcu_head
*rcu
)
458 struct rxrpc_local
*local
= container_of(rcu
, struct rxrpc_local
, rcu
);
460 _enter("%d", local
->debug_id
);
462 ASSERT(!work_pending(&local
->processor
));
464 _net("DESTROY LOCAL %d", local
->debug_id
);
470 * Verify the local endpoint list is empty by this point.
472 void rxrpc_destroy_all_locals(struct rxrpc_net
*rxnet
)
474 struct rxrpc_local
*local
;
478 flush_workqueue(rxrpc_workqueue
);
480 if (!list_empty(&rxnet
->local_endpoints
)) {
481 mutex_lock(&rxnet
->local_mutex
);
482 list_for_each_entry(local
, &rxnet
->local_endpoints
, link
) {
483 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
484 local
, atomic_read(&local
->usage
));
486 mutex_unlock(&rxnet
->local_mutex
);