1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* incoming call handling
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/icmp.h>
18 #include <linux/gfp.h>
19 #include <linux/circ_buf.h>
21 #include <net/af_rxrpc.h>
23 #include "ar-internal.h"
26 * Preallocate a single service call, connection and peer and, if possible,
27 * give them a user ID and attach the user's side of the ID to them.
29 static int rxrpc_service_prealloc_one(struct rxrpc_sock
*rx
,
30 struct rxrpc_backlog
*b
,
31 rxrpc_notify_rx_t notify_rx
,
32 rxrpc_user_attach_call_t user_attach_call
,
33 unsigned long user_call_ID
, gfp_t gfp
,
34 unsigned int debug_id
)
36 const void *here
= __builtin_return_address(0);
37 struct rxrpc_call
*call
;
38 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&rx
->sk
));
40 unsigned int size
= RXRPC_BACKLOG_MAX
;
41 unsigned int head
, tail
, call_head
, call_tail
;
43 max
= rx
->sk
.sk_max_ack_backlog
;
44 tmp
= rx
->sk
.sk_ack_backlog
;
46 _leave(" = -ENOBUFS [full %u]", max
);
51 /* We don't need more conns and peers than we have calls, but on the
52 * other hand, we shouldn't ever use more peers than conns or conns
55 call_head
= b
->call_backlog_head
;
56 call_tail
= READ_ONCE(b
->call_backlog_tail
);
57 tmp
= CIRC_CNT(call_head
, call_tail
, size
);
59 _leave(" = -ENOBUFS [enough %u]", tmp
);
64 head
= b
->peer_backlog_head
;
65 tail
= READ_ONCE(b
->peer_backlog_tail
);
66 if (CIRC_CNT(head
, tail
, size
) < max
) {
67 struct rxrpc_peer
*peer
= rxrpc_alloc_peer(rx
->local
, gfp
);
70 b
->peer_backlog
[head
] = peer
;
71 smp_store_release(&b
->peer_backlog_head
,
72 (head
+ 1) & (size
- 1));
75 head
= b
->conn_backlog_head
;
76 tail
= READ_ONCE(b
->conn_backlog_tail
);
77 if (CIRC_CNT(head
, tail
, size
) < max
) {
78 struct rxrpc_connection
*conn
;
80 conn
= rxrpc_prealloc_service_connection(rxnet
, gfp
);
83 b
->conn_backlog
[head
] = conn
;
84 smp_store_release(&b
->conn_backlog_head
,
85 (head
+ 1) & (size
- 1));
87 trace_rxrpc_conn(conn
->debug_id
, rxrpc_conn_new_service
,
88 atomic_read(&conn
->usage
), here
);
91 /* Now it gets complicated, because calls get registered with the
92 * socket here, particularly if a user ID is preassigned by the user.
94 call
= rxrpc_alloc_call(rx
, gfp
, debug_id
);
97 call
->flags
|= (1 << RXRPC_CALL_IS_SERVICE
);
98 call
->state
= RXRPC_CALL_SERVER_PREALLOC
;
100 trace_rxrpc_call(call
->debug_id
, rxrpc_call_new_service
,
101 atomic_read(&call
->usage
),
102 here
, (const void *)user_call_ID
);
104 write_lock(&rx
->call_lock
);
105 if (user_attach_call
) {
106 struct rxrpc_call
*xcall
;
107 struct rb_node
*parent
, **pp
;
109 /* Check the user ID isn't already in use */
110 pp
= &rx
->calls
.rb_node
;
114 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
115 if (user_call_ID
< xcall
->user_call_ID
)
116 pp
= &(*pp
)->rb_left
;
117 else if (user_call_ID
> xcall
->user_call_ID
)
118 pp
= &(*pp
)->rb_right
;
123 call
->user_call_ID
= user_call_ID
;
124 call
->notify_rx
= notify_rx
;
125 rxrpc_get_call(call
, rxrpc_call_got_kernel
);
126 user_attach_call(call
, user_call_ID
);
127 rxrpc_get_call(call
, rxrpc_call_got_userid
);
128 rb_link_node(&call
->sock_node
, parent
, pp
);
129 rb_insert_color(&call
->sock_node
, &rx
->calls
);
130 set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
133 list_add(&call
->sock_link
, &rx
->sock_calls
);
135 write_unlock(&rx
->call_lock
);
138 write_lock(&rxnet
->call_lock
);
139 list_add_tail(&call
->link
, &rxnet
->calls
);
140 write_unlock(&rxnet
->call_lock
);
142 b
->call_backlog
[call_head
] = call
;
143 smp_store_release(&b
->call_backlog_head
, (call_head
+ 1) & (size
- 1));
144 _leave(" = 0 [%d -> %lx]", call
->debug_id
, user_call_ID
);
148 write_unlock(&rx
->call_lock
);
149 rxrpc_cleanup_call(call
);
150 _leave(" = -EBADSLT");
155 * Preallocate sufficient service connections, calls and peers to cover the
156 * entire backlog of a socket. When a new call comes in, if we don't have
157 * sufficient of each available, the call gets rejected as busy or ignored.
159 * The backlog is replenished when a connection is accepted or rejected.
161 int rxrpc_service_prealloc(struct rxrpc_sock
*rx
, gfp_t gfp
)
163 struct rxrpc_backlog
*b
= rx
->backlog
;
166 b
= kzalloc(sizeof(struct rxrpc_backlog
), gfp
);
172 if (rx
->discard_new_call
)
175 while (rxrpc_service_prealloc_one(rx
, b
, NULL
, NULL
, 0, gfp
,
176 atomic_inc_return(&rxrpc_debug_id
)) == 0)
183 * Discard the preallocation on a service.
185 void rxrpc_discard_prealloc(struct rxrpc_sock
*rx
)
187 struct rxrpc_backlog
*b
= rx
->backlog
;
188 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&rx
->sk
));
189 unsigned int size
= RXRPC_BACKLOG_MAX
, head
, tail
;
195 /* Make sure that there aren't any incoming calls in progress before we
196 * clear the preallocation buffers.
198 spin_lock_bh(&rx
->incoming_lock
);
199 spin_unlock_bh(&rx
->incoming_lock
);
201 head
= b
->peer_backlog_head
;
202 tail
= b
->peer_backlog_tail
;
203 while (CIRC_CNT(head
, tail
, size
) > 0) {
204 struct rxrpc_peer
*peer
= b
->peer_backlog
[tail
];
206 tail
= (tail
+ 1) & (size
- 1);
209 head
= b
->conn_backlog_head
;
210 tail
= b
->conn_backlog_tail
;
211 while (CIRC_CNT(head
, tail
, size
) > 0) {
212 struct rxrpc_connection
*conn
= b
->conn_backlog
[tail
];
213 write_lock(&rxnet
->conn_lock
);
214 list_del(&conn
->link
);
215 list_del(&conn
->proc_link
);
216 write_unlock(&rxnet
->conn_lock
);
218 if (atomic_dec_and_test(&rxnet
->nr_conns
))
219 wake_up_var(&rxnet
->nr_conns
);
220 tail
= (tail
+ 1) & (size
- 1);
223 head
= b
->call_backlog_head
;
224 tail
= b
->call_backlog_tail
;
225 while (CIRC_CNT(head
, tail
, size
) > 0) {
226 struct rxrpc_call
*call
= b
->call_backlog
[tail
];
227 rcu_assign_pointer(call
->socket
, rx
);
228 if (rx
->discard_new_call
) {
229 _debug("discard %lx", call
->user_call_ID
);
230 rx
->discard_new_call(call
, call
->user_call_ID
);
231 rxrpc_put_call(call
, rxrpc_call_put_kernel
);
233 rxrpc_call_completed(call
);
234 rxrpc_release_call(rx
, call
);
235 rxrpc_put_call(call
, rxrpc_call_put
);
236 tail
= (tail
+ 1) & (size
- 1);
243 * Ping the other end to fill our RTT cache and to retrieve the rwind
244 * and MTU parameters.
246 static void rxrpc_send_ping(struct rxrpc_call
*call
, struct sk_buff
*skb
)
248 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
249 ktime_t now
= skb
->tstamp
;
251 if (call
->peer
->rtt_usage
< 3 ||
252 ktime_before(ktime_add_ms(call
->peer
->rtt_last_req
, 1000), now
))
253 rxrpc_propose_ACK(call
, RXRPC_ACK_PING
, sp
->hdr
.serial
,
255 rxrpc_propose_ack_ping_for_params
);
259 * Allocate a new incoming call from the prealloc pool, along with a connection
260 * and a peer as necessary.
262 static struct rxrpc_call
*rxrpc_alloc_incoming_call(struct rxrpc_sock
*rx
,
263 struct rxrpc_local
*local
,
264 struct rxrpc_peer
*peer
,
265 struct rxrpc_connection
*conn
,
266 const struct rxrpc_security
*sec
,
270 struct rxrpc_backlog
*b
= rx
->backlog
;
271 struct rxrpc_call
*call
;
272 unsigned short call_head
, conn_head
, peer_head
;
273 unsigned short call_tail
, conn_tail
, peer_tail
;
274 unsigned short call_count
, conn_count
;
276 /* #calls >= #conns >= #peers must hold true. */
277 call_head
= smp_load_acquire(&b
->call_backlog_head
);
278 call_tail
= b
->call_backlog_tail
;
279 call_count
= CIRC_CNT(call_head
, call_tail
, RXRPC_BACKLOG_MAX
);
280 conn_head
= smp_load_acquire(&b
->conn_backlog_head
);
281 conn_tail
= b
->conn_backlog_tail
;
282 conn_count
= CIRC_CNT(conn_head
, conn_tail
, RXRPC_BACKLOG_MAX
);
283 ASSERTCMP(conn_count
, >=, call_count
);
284 peer_head
= smp_load_acquire(&b
->peer_backlog_head
);
285 peer_tail
= b
->peer_backlog_tail
;
286 ASSERTCMP(CIRC_CNT(peer_head
, peer_tail
, RXRPC_BACKLOG_MAX
), >=,
293 if (peer
&& !rxrpc_get_peer_maybe(peer
))
296 peer
= b
->peer_backlog
[peer_tail
];
297 if (rxrpc_extract_addr_from_skb(&peer
->srx
, skb
) < 0)
299 b
->peer_backlog
[peer_tail
] = NULL
;
300 smp_store_release(&b
->peer_backlog_tail
,
302 (RXRPC_BACKLOG_MAX
- 1));
304 rxrpc_new_incoming_peer(rx
, local
, peer
);
307 /* Now allocate and set up the connection */
308 conn
= b
->conn_backlog
[conn_tail
];
309 b
->conn_backlog
[conn_tail
] = NULL
;
310 smp_store_release(&b
->conn_backlog_tail
,
311 (conn_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
312 conn
->params
.local
= rxrpc_get_local(local
);
313 conn
->params
.peer
= peer
;
314 rxrpc_see_connection(conn
);
315 rxrpc_new_incoming_connection(rx
, conn
, sec
, key
, skb
);
317 rxrpc_get_connection(conn
);
320 /* And now we can allocate and set up a new call */
321 call
= b
->call_backlog
[call_tail
];
322 b
->call_backlog
[call_tail
] = NULL
;
323 smp_store_release(&b
->call_backlog_tail
,
324 (call_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
326 rxrpc_see_call(call
);
328 call
->security
= conn
->security
;
329 call
->peer
= rxrpc_get_peer(conn
->params
.peer
);
330 call
->cong_cwnd
= call
->peer
->cong_cwnd
;
335 * Set up a new incoming call. Called in BH context with the RCU read lock
338 * If this is for a kernel service, when we allocate the call, it will have
339 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
340 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
341 * services only have the ref from the backlog buffer. We want to pass this
342 * ref to non-BH context to dispose of.
344 * If we want to report an error, we mark the skb with the packet type and
345 * abort code and return NULL.
347 * The call is returned with the user access mutex held.
349 struct rxrpc_call
*rxrpc_new_incoming_call(struct rxrpc_local
*local
,
350 struct rxrpc_sock
*rx
,
353 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
354 const struct rxrpc_security
*sec
= NULL
;
355 struct rxrpc_connection
*conn
;
356 struct rxrpc_peer
*peer
= NULL
;
357 struct rxrpc_call
*call
= NULL
;
358 struct key
*key
= NULL
;
362 spin_lock(&rx
->incoming_lock
);
363 if (rx
->sk
.sk_state
== RXRPC_SERVER_LISTEN_DISABLED
||
364 rx
->sk
.sk_state
== RXRPC_CLOSE
) {
365 trace_rxrpc_abort(0, "CLS", sp
->hdr
.cid
, sp
->hdr
.callNumber
,
366 sp
->hdr
.seq
, RX_INVALID_OPERATION
, ESHUTDOWN
);
367 skb
->mark
= RXRPC_SKB_MARK_REJECT_ABORT
;
368 skb
->priority
= RX_INVALID_OPERATION
;
372 /* The peer, connection and call may all have sprung into existence due
373 * to a duplicate packet being handled on another CPU in parallel, so
374 * we have to recheck the routing. However, we're now holding
375 * rx->incoming_lock, so the values should remain stable.
377 conn
= rxrpc_find_connection_rcu(local
, skb
, &peer
);
379 if (!conn
&& !rxrpc_look_up_server_security(local
, rx
, &sec
, &key
, skb
))
382 call
= rxrpc_alloc_incoming_call(rx
, local
, peer
, conn
, sec
, key
, skb
);
385 skb
->mark
= RXRPC_SKB_MARK_REJECT_BUSY
;
389 trace_rxrpc_receive(call
, rxrpc_receive_incoming
,
390 sp
->hdr
.serial
, sp
->hdr
.seq
);
392 /* Make the call live. */
393 rxrpc_incoming_call(rx
, call
, skb
);
396 if (rx
->notify_new_call
)
397 rx
->notify_new_call(&rx
->sk
, call
, call
->user_call_ID
);
399 sk_acceptq_added(&rx
->sk
);
401 spin_lock(&conn
->state_lock
);
402 switch (conn
->state
) {
403 case RXRPC_CONN_SERVICE_UNSECURED
:
404 conn
->state
= RXRPC_CONN_SERVICE_CHALLENGING
;
405 set_bit(RXRPC_CONN_EV_CHALLENGE
, &call
->conn
->events
);
406 rxrpc_queue_conn(call
->conn
);
409 case RXRPC_CONN_SERVICE
:
410 write_lock(&call
->state_lock
);
411 if (call
->state
< RXRPC_CALL_COMPLETE
) {
412 if (rx
->discard_new_call
)
413 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
415 call
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
417 write_unlock(&call
->state_lock
);
420 case RXRPC_CONN_REMOTELY_ABORTED
:
421 rxrpc_set_call_completion(call
, RXRPC_CALL_REMOTELY_ABORTED
,
422 conn
->abort_code
, conn
->error
);
424 case RXRPC_CONN_LOCALLY_ABORTED
:
425 rxrpc_abort_call("CON", call
, sp
->hdr
.seq
,
426 conn
->abort_code
, conn
->error
);
431 spin_unlock(&conn
->state_lock
);
432 spin_unlock(&rx
->incoming_lock
);
434 rxrpc_send_ping(call
, skb
);
436 if (call
->state
== RXRPC_CALL_SERVER_ACCEPTING
)
437 rxrpc_notify_socket(call
);
439 /* We have to discard the prealloc queue's ref here and rely on a
440 * combination of the RCU read lock and refs held either by the socket
441 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
442 * service to prevent the call from being deallocated too early.
444 rxrpc_put_call(call
, rxrpc_call_put
);
446 _leave(" = %p{%d}", call
, call
->debug_id
);
450 spin_unlock(&rx
->incoming_lock
);
451 _leave(" = NULL [%u]", skb
->mark
);
456 * handle acceptance of a call by userspace
457 * - assign the user call ID to the call at the front of the queue
458 * - called with the socket locked.
460 struct rxrpc_call
*rxrpc_accept_call(struct rxrpc_sock
*rx
,
461 unsigned long user_call_ID
,
462 rxrpc_notify_rx_t notify_rx
)
463 __releases(&rx
->sk
.sk_lock
.slock
)
464 __acquires(call
->user_mutex
)
466 struct rxrpc_call
*call
;
467 struct rb_node
*parent
, **pp
;
470 _enter(",%lx", user_call_ID
);
472 ASSERT(!irqs_disabled());
474 write_lock(&rx
->call_lock
);
476 if (list_empty(&rx
->to_be_accepted
)) {
477 write_unlock(&rx
->call_lock
);
478 release_sock(&rx
->sk
);
479 kleave(" = -ENODATA [empty]");
480 return ERR_PTR(-ENODATA
);
483 /* check the user ID isn't already in use */
484 pp
= &rx
->calls
.rb_node
;
488 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
490 if (user_call_ID
< call
->user_call_ID
)
491 pp
= &(*pp
)->rb_left
;
492 else if (user_call_ID
> call
->user_call_ID
)
493 pp
= &(*pp
)->rb_right
;
498 /* Dequeue the first call and check it's still valid. We gain
499 * responsibility for the queue's reference.
501 call
= list_entry(rx
->to_be_accepted
.next
,
502 struct rxrpc_call
, accept_link
);
503 write_unlock(&rx
->call_lock
);
505 /* We need to gain the mutex from the interrupt handler without
506 * upsetting lockdep, so we have to release it there and take it here.
507 * We are, however, still holding the socket lock, so other accepts
508 * must wait for us and no one can add the user ID behind our backs.
510 if (mutex_lock_interruptible(&call
->user_mutex
) < 0) {
511 release_sock(&rx
->sk
);
512 kleave(" = -ERESTARTSYS");
513 return ERR_PTR(-ERESTARTSYS
);
516 write_lock(&rx
->call_lock
);
517 list_del_init(&call
->accept_link
);
518 sk_acceptq_removed(&rx
->sk
);
519 rxrpc_see_call(call
);
521 /* Find the user ID insertion point. */
522 pp
= &rx
->calls
.rb_node
;
526 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
528 if (user_call_ID
< call
->user_call_ID
)
529 pp
= &(*pp
)->rb_left
;
530 else if (user_call_ID
> call
->user_call_ID
)
531 pp
= &(*pp
)->rb_right
;
536 write_lock_bh(&call
->state_lock
);
537 switch (call
->state
) {
538 case RXRPC_CALL_SERVER_ACCEPTING
:
539 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
541 case RXRPC_CALL_COMPLETE
:
548 /* formalise the acceptance */
549 call
->notify_rx
= notify_rx
;
550 call
->user_call_ID
= user_call_ID
;
551 rxrpc_get_call(call
, rxrpc_call_got_userid
);
552 rb_link_node(&call
->sock_node
, parent
, pp
);
553 rb_insert_color(&call
->sock_node
, &rx
->calls
);
554 if (test_and_set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
))
557 write_unlock_bh(&call
->state_lock
);
558 write_unlock(&rx
->call_lock
);
559 rxrpc_notify_socket(call
);
560 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
561 release_sock(&rx
->sk
);
562 _leave(" = %p{%d}", call
, call
->debug_id
);
566 _debug("release %p", call
);
567 write_unlock_bh(&call
->state_lock
);
568 write_unlock(&rx
->call_lock
);
569 rxrpc_release_call(rx
, call
);
570 rxrpc_put_call(call
, rxrpc_call_put
);
575 write_unlock(&rx
->call_lock
);
577 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
578 release_sock(&rx
->sk
);
579 _leave(" = %d", ret
);
584 * Handle rejection of a call by userspace
585 * - reject the call at the front of the queue
587 int rxrpc_reject_call(struct rxrpc_sock
*rx
)
589 struct rxrpc_call
*call
;
595 ASSERT(!irqs_disabled());
597 write_lock(&rx
->call_lock
);
599 if (list_empty(&rx
->to_be_accepted
)) {
600 write_unlock(&rx
->call_lock
);
604 /* Dequeue the first call and check it's still valid. We gain
605 * responsibility for the queue's reference.
607 call
= list_entry(rx
->to_be_accepted
.next
,
608 struct rxrpc_call
, accept_link
);
609 list_del_init(&call
->accept_link
);
610 sk_acceptq_removed(&rx
->sk
);
611 rxrpc_see_call(call
);
613 write_lock_bh(&call
->state_lock
);
614 switch (call
->state
) {
615 case RXRPC_CALL_SERVER_ACCEPTING
:
616 __rxrpc_abort_call("REJ", call
, 1, RX_USER_ABORT
, -ECONNABORTED
);
619 case RXRPC_CALL_COMPLETE
:
627 write_unlock_bh(&call
->state_lock
);
628 write_unlock(&rx
->call_lock
);
630 rxrpc_send_abort_packet(call
);
631 rxrpc_release_call(rx
, call
);
632 rxrpc_put_call(call
, rxrpc_call_put
);
634 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
635 _leave(" = %d", ret
);
640 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
641 * @sock: The socket on which to preallocate
642 * @notify_rx: Event notification function for the call
643 * @user_attach_call: Func to attach call to user_call_ID
644 * @user_call_ID: The tag to attach to the preallocated call
645 * @gfp: The allocation conditions.
646 * @debug_id: The tracing debug ID.
648 * Charge up the socket with preallocated calls, each with a user ID. A
649 * function should be provided to effect the attachment from the user's side.
650 * The user is given a ref to hold on the call.
652 * Note that the call may be come connected before this function returns.
654 int rxrpc_kernel_charge_accept(struct socket
*sock
,
655 rxrpc_notify_rx_t notify_rx
,
656 rxrpc_user_attach_call_t user_attach_call
,
657 unsigned long user_call_ID
, gfp_t gfp
,
658 unsigned int debug_id
)
660 struct rxrpc_sock
*rx
= rxrpc_sk(sock
->sk
);
661 struct rxrpc_backlog
*b
= rx
->backlog
;
663 if (sock
->sk
->sk_state
== RXRPC_CLOSE
)
666 return rxrpc_service_prealloc_one(rx
, b
, notify_rx
,
667 user_attach_call
, user_call_ID
,
670 EXPORT_SYMBOL(rxrpc_kernel_charge_accept
);