1 /* Local endpoint object management
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/udp.h>
20 #include <linux/hashtable.h>
22 #include <net/af_rxrpc.h>
23 #include "ar-internal.h"
25 static void rxrpc_local_processor(struct work_struct
*);
26 static void rxrpc_local_rcu(struct rcu_head
*);
28 static DEFINE_MUTEX(rxrpc_local_mutex
);
29 static LIST_HEAD(rxrpc_local_endpoints
);
32 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
33 * same or greater than.
35 * We explicitly don't compare the RxRPC service ID as we want to reject
36 * conflicting uses by differing services. Further, we don't want to share
37 * addresses with different options (IPv6), so we don't compare those bits
40 static long rxrpc_local_cmp_key(const struct rxrpc_local
*local
,
41 const struct sockaddr_rxrpc
*srx
)
45 diff
= ((local
->srx
.transport_type
- srx
->transport_type
) ?:
46 (local
->srx
.transport_len
- srx
->transport_len
) ?:
47 (local
->srx
.transport
.family
- srx
->transport
.family
));
51 switch (srx
->transport
.family
) {
53 /* If the choice of UDP port is left up to the transport, then
54 * the endpoint record doesn't match.
56 return ((u16 __force
)local
->srx
.transport
.sin
.sin_port
-
57 (u16 __force
)srx
->transport
.sin
.sin_port
) ?:
58 memcmp(&local
->srx
.transport
.sin
.sin_addr
,
59 &srx
->transport
.sin
.sin_addr
,
60 sizeof(struct in_addr
));
67 * Allocate a new local endpoint.
69 static struct rxrpc_local
*rxrpc_alloc_local(const struct sockaddr_rxrpc
*srx
)
71 struct rxrpc_local
*local
;
73 local
= kzalloc(sizeof(struct rxrpc_local
), GFP_KERNEL
);
75 atomic_set(&local
->usage
, 1);
76 INIT_LIST_HEAD(&local
->link
);
77 INIT_WORK(&local
->processor
, rxrpc_local_processor
);
78 INIT_LIST_HEAD(&local
->services
);
79 init_rwsem(&local
->defrag_sem
);
80 skb_queue_head_init(&local
->accept_queue
);
81 skb_queue_head_init(&local
->reject_queue
);
82 skb_queue_head_init(&local
->event_queue
);
83 local
->client_conns
= RB_ROOT
;
84 spin_lock_init(&local
->client_conns_lock
);
85 spin_lock_init(&local
->lock
);
86 rwlock_init(&local
->services_lock
);
87 local
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
88 memcpy(&local
->srx
, srx
, sizeof(*srx
));
91 _leave(" = %p", local
);
96 * create the local socket
97 * - must be called with rxrpc_local_mutex locked
99 static int rxrpc_open_socket(struct rxrpc_local
*local
)
104 _enter("%p{%d}", local
, local
->srx
.transport_type
);
106 /* create a socket to represent the local endpoint */
107 ret
= sock_create_kern(&init_net
, PF_INET
, local
->srx
.transport_type
,
108 IPPROTO_UDP
, &local
->socket
);
110 _leave(" = %d [socket]", ret
);
114 /* if a local address was supplied then bind it */
115 if (local
->srx
.transport_len
> sizeof(sa_family_t
)) {
117 ret
= kernel_bind(local
->socket
,
118 (struct sockaddr
*)&local
->srx
.transport
,
119 local
->srx
.transport_len
);
121 _debug("bind failed %d", ret
);
126 /* we want to receive ICMP errors */
128 ret
= kernel_setsockopt(local
->socket
, SOL_IP
, IP_RECVERR
,
129 (char *) &opt
, sizeof(opt
));
131 _debug("setsockopt failed");
135 /* we want to set the don't fragment bit */
136 opt
= IP_PMTUDISC_DO
;
137 ret
= kernel_setsockopt(local
->socket
, SOL_IP
, IP_MTU_DISCOVER
,
138 (char *) &opt
, sizeof(opt
));
140 _debug("setsockopt failed");
144 /* set the socket up */
145 sock
= local
->socket
->sk
;
146 sock
->sk_user_data
= local
;
147 sock
->sk_data_ready
= rxrpc_data_ready
;
148 sock
->sk_error_report
= rxrpc_error_report
;
153 kernel_sock_shutdown(local
->socket
, SHUT_RDWR
);
154 local
->socket
->sk
->sk_user_data
= NULL
;
155 sock_release(local
->socket
);
156 local
->socket
= NULL
;
158 _leave(" = %d", ret
);
163 * Look up or create a new local endpoint using the specified local address.
165 struct rxrpc_local
*rxrpc_lookup_local(const struct sockaddr_rxrpc
*srx
)
167 struct rxrpc_local
*local
;
168 struct list_head
*cursor
;
173 if (srx
->transport
.family
== AF_INET
) {
174 _enter("{%d,%u,%pI4+%hu}",
176 srx
->transport
.family
,
177 &srx
->transport
.sin
.sin_addr
,
178 ntohs(srx
->transport
.sin
.sin_port
));
182 srx
->transport
.family
);
183 return ERR_PTR(-EAFNOSUPPORT
);
186 mutex_lock(&rxrpc_local_mutex
);
188 for (cursor
= rxrpc_local_endpoints
.next
;
189 cursor
!= &rxrpc_local_endpoints
;
190 cursor
= cursor
->next
) {
191 local
= list_entry(cursor
, struct rxrpc_local
, link
);
193 diff
= rxrpc_local_cmp_key(local
, srx
);
199 /* Services aren't allowed to share transport sockets, so
200 * reject that here. It is possible that the object is dying -
201 * but it may also still have the local transport address that
204 if (srx
->srx_service
) {
209 /* Found a match. We replace a dying object. Attempting to
210 * bind the transport socket may still fail if we're attempting
211 * to use a local address that the dying object is still using.
213 if (!rxrpc_get_local_maybe(local
)) {
214 cursor
= cursor
->next
;
215 list_del_init(&local
->link
);
223 local
= rxrpc_alloc_local(srx
);
227 ret
= rxrpc_open_socket(local
);
231 list_add_tail(&local
->link
, cursor
);
235 mutex_unlock(&rxrpc_local_mutex
);
237 _net("LOCAL %s %d {%d,%u,%pI4+%hu}",
240 local
->srx
.transport_type
,
241 local
->srx
.transport
.family
,
242 &local
->srx
.transport
.sin
.sin_addr
,
243 ntohs(local
->srx
.transport
.sin
.sin_port
));
245 _leave(" = %p", local
);
251 mutex_unlock(&rxrpc_local_mutex
);
253 _leave(" = %d", ret
);
257 mutex_unlock(&rxrpc_local_mutex
);
258 _leave(" = -EADDRINUSE");
259 return ERR_PTR(-EADDRINUSE
);
263 * A local endpoint reached its end of life.
265 void __rxrpc_put_local(struct rxrpc_local
*local
)
267 _enter("%d", local
->debug_id
);
268 rxrpc_queue_work(&local
->processor
);
272 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
275 * Closing the socket cannot be done from bottom half context or RCU callback
276 * context because it might sleep.
278 static void rxrpc_local_destroyer(struct rxrpc_local
*local
)
280 struct socket
*socket
= local
->socket
;
282 _enter("%d", local
->debug_id
);
284 /* We can get a race between an incoming call packet queueing the
285 * processor again and the work processor starting the destruction
286 * process which will shut down the UDP socket.
289 _leave(" [already dead]");
294 mutex_lock(&rxrpc_local_mutex
);
295 list_del_init(&local
->link
);
296 mutex_unlock(&rxrpc_local_mutex
);
298 ASSERT(RB_EMPTY_ROOT(&local
->client_conns
));
299 ASSERT(list_empty(&local
->services
));
302 local
->socket
= NULL
;
303 kernel_sock_shutdown(socket
, SHUT_RDWR
);
304 socket
->sk
->sk_user_data
= NULL
;
305 sock_release(socket
);
308 /* At this point, there should be no more packets coming in to the
311 rxrpc_purge_queue(&local
->accept_queue
);
312 rxrpc_purge_queue(&local
->reject_queue
);
313 rxrpc_purge_queue(&local
->event_queue
);
315 _debug("rcu local %d", local
->debug_id
);
316 call_rcu(&local
->rcu
, rxrpc_local_rcu
);
320 * Process events on an endpoint
322 static void rxrpc_local_processor(struct work_struct
*work
)
324 struct rxrpc_local
*local
=
325 container_of(work
, struct rxrpc_local
, processor
);
328 _enter("%d", local
->debug_id
);
332 if (atomic_read(&local
->usage
) == 0)
333 return rxrpc_local_destroyer(local
);
335 if (!skb_queue_empty(&local
->accept_queue
)) {
336 rxrpc_accept_incoming_calls(local
);
340 if (!skb_queue_empty(&local
->reject_queue
)) {
341 rxrpc_reject_packets(local
);
345 if (!skb_queue_empty(&local
->event_queue
)) {
346 rxrpc_process_local_events(local
);
353 * Destroy a local endpoint after the RCU grace period expires.
355 static void rxrpc_local_rcu(struct rcu_head
*rcu
)
357 struct rxrpc_local
*local
= container_of(rcu
, struct rxrpc_local
, rcu
);
359 _enter("%d", local
->debug_id
);
361 ASSERT(!work_pending(&local
->processor
));
363 _net("DESTROY LOCAL %d", local
->debug_id
);
369 * Verify the local endpoint list is empty by this point.
371 void __exit
rxrpc_destroy_all_locals(void)
373 struct rxrpc_local
*local
;
377 flush_workqueue(rxrpc_workqueue
);
379 if (!list_empty(&rxrpc_local_endpoints
)) {
380 mutex_lock(&rxrpc_local_mutex
);
381 list_for_each_entry(local
, &rxrpc_local_endpoints
, link
) {
382 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
383 local
, atomic_read(&local
->usage
));
385 mutex_unlock(&rxrpc_local_mutex
);