1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* incoming call handling
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/icmp.h>
18 #include <linux/gfp.h>
19 #include <linux/circ_buf.h>
21 #include <net/af_rxrpc.h>
23 #include "ar-internal.h"
25 static void rxrpc_dummy_notify(struct sock
*sk
, struct rxrpc_call
*call
,
26 unsigned long user_call_ID
)
31 * Preallocate a single service call, connection and peer and, if possible,
32 * give them a user ID and attach the user's side of the ID to them.
34 static int rxrpc_service_prealloc_one(struct rxrpc_sock
*rx
,
35 struct rxrpc_backlog
*b
,
36 rxrpc_notify_rx_t notify_rx
,
37 rxrpc_user_attach_call_t user_attach_call
,
38 unsigned long user_call_ID
, gfp_t gfp
,
39 unsigned int debug_id
)
41 const void *here
= __builtin_return_address(0);
42 struct rxrpc_call
*call
;
43 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&rx
->sk
));
45 unsigned int size
= RXRPC_BACKLOG_MAX
;
46 unsigned int head
, tail
, call_head
, call_tail
;
48 max
= rx
->sk
.sk_max_ack_backlog
;
49 tmp
= rx
->sk
.sk_ack_backlog
;
51 _leave(" = -ENOBUFS [full %u]", max
);
56 /* We don't need more conns and peers than we have calls, but on the
57 * other hand, we shouldn't ever use more peers than conns or conns
60 call_head
= b
->call_backlog_head
;
61 call_tail
= READ_ONCE(b
->call_backlog_tail
);
62 tmp
= CIRC_CNT(call_head
, call_tail
, size
);
64 _leave(" = -ENOBUFS [enough %u]", tmp
);
69 head
= b
->peer_backlog_head
;
70 tail
= READ_ONCE(b
->peer_backlog_tail
);
71 if (CIRC_CNT(head
, tail
, size
) < max
) {
72 struct rxrpc_peer
*peer
= rxrpc_alloc_peer(rx
->local
, gfp
);
75 b
->peer_backlog
[head
] = peer
;
76 smp_store_release(&b
->peer_backlog_head
,
77 (head
+ 1) & (size
- 1));
80 head
= b
->conn_backlog_head
;
81 tail
= READ_ONCE(b
->conn_backlog_tail
);
82 if (CIRC_CNT(head
, tail
, size
) < max
) {
83 struct rxrpc_connection
*conn
;
85 conn
= rxrpc_prealloc_service_connection(rxnet
, gfp
);
88 b
->conn_backlog
[head
] = conn
;
89 smp_store_release(&b
->conn_backlog_head
,
90 (head
+ 1) & (size
- 1));
92 trace_rxrpc_conn(conn
->debug_id
, rxrpc_conn_new_service
,
93 atomic_read(&conn
->usage
), here
);
96 /* Now it gets complicated, because calls get registered with the
97 * socket here, particularly if a user ID is preassigned by the user.
99 call
= rxrpc_alloc_call(rx
, gfp
, debug_id
);
102 call
->flags
|= (1 << RXRPC_CALL_IS_SERVICE
);
103 call
->state
= RXRPC_CALL_SERVER_PREALLOC
;
105 trace_rxrpc_call(call
->debug_id
, rxrpc_call_new_service
,
106 atomic_read(&call
->usage
),
107 here
, (const void *)user_call_ID
);
109 write_lock(&rx
->call_lock
);
110 if (user_attach_call
) {
111 struct rxrpc_call
*xcall
;
112 struct rb_node
*parent
, **pp
;
114 /* Check the user ID isn't already in use */
115 pp
= &rx
->calls
.rb_node
;
119 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
120 if (user_call_ID
< xcall
->user_call_ID
)
121 pp
= &(*pp
)->rb_left
;
122 else if (user_call_ID
> xcall
->user_call_ID
)
123 pp
= &(*pp
)->rb_right
;
128 call
->user_call_ID
= user_call_ID
;
129 call
->notify_rx
= notify_rx
;
130 rxrpc_get_call(call
, rxrpc_call_got_kernel
);
131 user_attach_call(call
, user_call_ID
);
132 rxrpc_get_call(call
, rxrpc_call_got_userid
);
133 rb_link_node(&call
->sock_node
, parent
, pp
);
134 rb_insert_color(&call
->sock_node
, &rx
->calls
);
135 set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
138 list_add(&call
->sock_link
, &rx
->sock_calls
);
140 write_unlock(&rx
->call_lock
);
143 write_lock(&rxnet
->call_lock
);
144 list_add_tail(&call
->link
, &rxnet
->calls
);
145 write_unlock(&rxnet
->call_lock
);
147 b
->call_backlog
[call_head
] = call
;
148 smp_store_release(&b
->call_backlog_head
, (call_head
+ 1) & (size
- 1));
149 _leave(" = 0 [%d -> %lx]", call
->debug_id
, user_call_ID
);
153 write_unlock(&rx
->call_lock
);
154 rxrpc_cleanup_call(call
);
155 _leave(" = -EBADSLT");
160 * Preallocate sufficient service connections, calls and peers to cover the
161 * entire backlog of a socket. When a new call comes in, if we don't have
162 * sufficient of each available, the call gets rejected as busy or ignored.
164 * The backlog is replenished when a connection is accepted or rejected.
166 int rxrpc_service_prealloc(struct rxrpc_sock
*rx
, gfp_t gfp
)
168 struct rxrpc_backlog
*b
= rx
->backlog
;
171 b
= kzalloc(sizeof(struct rxrpc_backlog
), gfp
);
177 if (rx
->discard_new_call
)
180 while (rxrpc_service_prealloc_one(rx
, b
, NULL
, NULL
, 0, gfp
,
181 atomic_inc_return(&rxrpc_debug_id
)) == 0)
188 * Discard the preallocation on a service.
190 void rxrpc_discard_prealloc(struct rxrpc_sock
*rx
)
192 struct rxrpc_backlog
*b
= rx
->backlog
;
193 struct rxrpc_net
*rxnet
= rxrpc_net(sock_net(&rx
->sk
));
194 unsigned int size
= RXRPC_BACKLOG_MAX
, head
, tail
;
200 /* Make sure that there aren't any incoming calls in progress before we
201 * clear the preallocation buffers.
203 spin_lock_bh(&rx
->incoming_lock
);
204 spin_unlock_bh(&rx
->incoming_lock
);
206 head
= b
->peer_backlog_head
;
207 tail
= b
->peer_backlog_tail
;
208 while (CIRC_CNT(head
, tail
, size
) > 0) {
209 struct rxrpc_peer
*peer
= b
->peer_backlog
[tail
];
211 tail
= (tail
+ 1) & (size
- 1);
214 head
= b
->conn_backlog_head
;
215 tail
= b
->conn_backlog_tail
;
216 while (CIRC_CNT(head
, tail
, size
) > 0) {
217 struct rxrpc_connection
*conn
= b
->conn_backlog
[tail
];
218 write_lock(&rxnet
->conn_lock
);
219 list_del(&conn
->link
);
220 list_del(&conn
->proc_link
);
221 write_unlock(&rxnet
->conn_lock
);
223 if (atomic_dec_and_test(&rxnet
->nr_conns
))
224 wake_up_var(&rxnet
->nr_conns
);
225 tail
= (tail
+ 1) & (size
- 1);
228 head
= b
->call_backlog_head
;
229 tail
= b
->call_backlog_tail
;
230 while (CIRC_CNT(head
, tail
, size
) > 0) {
231 struct rxrpc_call
*call
= b
->call_backlog
[tail
];
232 rcu_assign_pointer(call
->socket
, rx
);
233 if (rx
->discard_new_call
) {
234 _debug("discard %lx", call
->user_call_ID
);
235 rx
->discard_new_call(call
, call
->user_call_ID
);
237 call
->notify_rx
= rxrpc_dummy_notify
;
238 rxrpc_put_call(call
, rxrpc_call_put_kernel
);
240 rxrpc_call_completed(call
);
241 rxrpc_release_call(rx
, call
);
242 rxrpc_put_call(call
, rxrpc_call_put
);
243 tail
= (tail
+ 1) & (size
- 1);
250 * Ping the other end to fill our RTT cache and to retrieve the rwind
251 * and MTU parameters.
253 static void rxrpc_send_ping(struct rxrpc_call
*call
, struct sk_buff
*skb
)
255 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
256 ktime_t now
= skb
->tstamp
;
258 if (call
->peer
->rtt_count
< 3 ||
259 ktime_before(ktime_add_ms(call
->peer
->rtt_last_req
, 1000), now
))
260 rxrpc_propose_ACK(call
, RXRPC_ACK_PING
, sp
->hdr
.serial
,
262 rxrpc_propose_ack_ping_for_params
);
266 * Allocate a new incoming call from the prealloc pool, along with a connection
267 * and a peer as necessary.
269 static struct rxrpc_call
*rxrpc_alloc_incoming_call(struct rxrpc_sock
*rx
,
270 struct rxrpc_local
*local
,
271 struct rxrpc_peer
*peer
,
272 struct rxrpc_connection
*conn
,
273 const struct rxrpc_security
*sec
,
277 struct rxrpc_backlog
*b
= rx
->backlog
;
278 struct rxrpc_call
*call
;
279 unsigned short call_head
, conn_head
, peer_head
;
280 unsigned short call_tail
, conn_tail
, peer_tail
;
281 unsigned short call_count
, conn_count
;
283 /* #calls >= #conns >= #peers must hold true. */
284 call_head
= smp_load_acquire(&b
->call_backlog_head
);
285 call_tail
= b
->call_backlog_tail
;
286 call_count
= CIRC_CNT(call_head
, call_tail
, RXRPC_BACKLOG_MAX
);
287 conn_head
= smp_load_acquire(&b
->conn_backlog_head
);
288 conn_tail
= b
->conn_backlog_tail
;
289 conn_count
= CIRC_CNT(conn_head
, conn_tail
, RXRPC_BACKLOG_MAX
);
290 ASSERTCMP(conn_count
, >=, call_count
);
291 peer_head
= smp_load_acquire(&b
->peer_backlog_head
);
292 peer_tail
= b
->peer_backlog_tail
;
293 ASSERTCMP(CIRC_CNT(peer_head
, peer_tail
, RXRPC_BACKLOG_MAX
), >=,
300 if (peer
&& !rxrpc_get_peer_maybe(peer
))
303 peer
= b
->peer_backlog
[peer_tail
];
304 if (rxrpc_extract_addr_from_skb(&peer
->srx
, skb
) < 0)
306 b
->peer_backlog
[peer_tail
] = NULL
;
307 smp_store_release(&b
->peer_backlog_tail
,
309 (RXRPC_BACKLOG_MAX
- 1));
311 rxrpc_new_incoming_peer(rx
, local
, peer
);
314 /* Now allocate and set up the connection */
315 conn
= b
->conn_backlog
[conn_tail
];
316 b
->conn_backlog
[conn_tail
] = NULL
;
317 smp_store_release(&b
->conn_backlog_tail
,
318 (conn_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
319 conn
->params
.local
= rxrpc_get_local(local
);
320 conn
->params
.peer
= peer
;
321 rxrpc_see_connection(conn
);
322 rxrpc_new_incoming_connection(rx
, conn
, sec
, key
, skb
);
324 rxrpc_get_connection(conn
);
327 /* And now we can allocate and set up a new call */
328 call
= b
->call_backlog
[call_tail
];
329 b
->call_backlog
[call_tail
] = NULL
;
330 smp_store_release(&b
->call_backlog_tail
,
331 (call_tail
+ 1) & (RXRPC_BACKLOG_MAX
- 1));
333 rxrpc_see_call(call
);
335 call
->security
= conn
->security
;
336 call
->peer
= rxrpc_get_peer(conn
->params
.peer
);
337 call
->cong_cwnd
= call
->peer
->cong_cwnd
;
342 * Set up a new incoming call. Called in BH context with the RCU read lock
345 * If this is for a kernel service, when we allocate the call, it will have
346 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
347 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
348 * services only have the ref from the backlog buffer. We want to pass this
349 * ref to non-BH context to dispose of.
351 * If we want to report an error, we mark the skb with the packet type and
352 * abort code and return NULL.
354 * The call is returned with the user access mutex held.
356 struct rxrpc_call
*rxrpc_new_incoming_call(struct rxrpc_local
*local
,
357 struct rxrpc_sock
*rx
,
360 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
361 const struct rxrpc_security
*sec
= NULL
;
362 struct rxrpc_connection
*conn
;
363 struct rxrpc_peer
*peer
= NULL
;
364 struct rxrpc_call
*call
= NULL
;
365 struct key
*key
= NULL
;
369 spin_lock(&rx
->incoming_lock
);
370 if (rx
->sk
.sk_state
== RXRPC_SERVER_LISTEN_DISABLED
||
371 rx
->sk
.sk_state
== RXRPC_CLOSE
) {
372 trace_rxrpc_abort(0, "CLS", sp
->hdr
.cid
, sp
->hdr
.callNumber
,
373 sp
->hdr
.seq
, RX_INVALID_OPERATION
, ESHUTDOWN
);
374 skb
->mark
= RXRPC_SKB_MARK_REJECT_ABORT
;
375 skb
->priority
= RX_INVALID_OPERATION
;
379 /* The peer, connection and call may all have sprung into existence due
380 * to a duplicate packet being handled on another CPU in parallel, so
381 * we have to recheck the routing. However, we're now holding
382 * rx->incoming_lock, so the values should remain stable.
384 conn
= rxrpc_find_connection_rcu(local
, skb
, &peer
);
386 if (!conn
&& !rxrpc_look_up_server_security(local
, rx
, &sec
, &key
, skb
))
389 call
= rxrpc_alloc_incoming_call(rx
, local
, peer
, conn
, sec
, key
, skb
);
392 skb
->mark
= RXRPC_SKB_MARK_REJECT_BUSY
;
396 trace_rxrpc_receive(call
, rxrpc_receive_incoming
,
397 sp
->hdr
.serial
, sp
->hdr
.seq
);
399 /* Make the call live. */
400 rxrpc_incoming_call(rx
, call
, skb
);
403 if (rx
->notify_new_call
)
404 rx
->notify_new_call(&rx
->sk
, call
, call
->user_call_ID
);
406 sk_acceptq_added(&rx
->sk
);
408 spin_lock(&conn
->state_lock
);
409 switch (conn
->state
) {
410 case RXRPC_CONN_SERVICE_UNSECURED
:
411 conn
->state
= RXRPC_CONN_SERVICE_CHALLENGING
;
412 set_bit(RXRPC_CONN_EV_CHALLENGE
, &call
->conn
->events
);
413 rxrpc_queue_conn(call
->conn
);
416 case RXRPC_CONN_SERVICE
:
417 write_lock(&call
->state_lock
);
418 if (call
->state
< RXRPC_CALL_COMPLETE
) {
419 if (rx
->discard_new_call
)
420 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
422 call
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
424 write_unlock(&call
->state_lock
);
427 case RXRPC_CONN_REMOTELY_ABORTED
:
428 rxrpc_set_call_completion(call
, RXRPC_CALL_REMOTELY_ABORTED
,
429 conn
->abort_code
, conn
->error
);
431 case RXRPC_CONN_LOCALLY_ABORTED
:
432 rxrpc_abort_call("CON", call
, sp
->hdr
.seq
,
433 conn
->abort_code
, conn
->error
);
438 spin_unlock(&conn
->state_lock
);
439 spin_unlock(&rx
->incoming_lock
);
441 rxrpc_send_ping(call
, skb
);
443 if (call
->state
== RXRPC_CALL_SERVER_ACCEPTING
)
444 rxrpc_notify_socket(call
);
446 /* We have to discard the prealloc queue's ref here and rely on a
447 * combination of the RCU read lock and refs held either by the socket
448 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
449 * service to prevent the call from being deallocated too early.
451 rxrpc_put_call(call
, rxrpc_call_put
);
453 _leave(" = %p{%d}", call
, call
->debug_id
);
457 spin_unlock(&rx
->incoming_lock
);
458 _leave(" = NULL [%u]", skb
->mark
);
463 * handle acceptance of a call by userspace
464 * - assign the user call ID to the call at the front of the queue
465 * - called with the socket locked.
467 struct rxrpc_call
*rxrpc_accept_call(struct rxrpc_sock
*rx
,
468 unsigned long user_call_ID
,
469 rxrpc_notify_rx_t notify_rx
)
470 __releases(&rx
->sk
.sk_lock
.slock
)
471 __acquires(call
->user_mutex
)
473 struct rxrpc_call
*call
;
474 struct rb_node
*parent
, **pp
;
477 _enter(",%lx", user_call_ID
);
479 ASSERT(!irqs_disabled());
481 write_lock(&rx
->call_lock
);
483 if (list_empty(&rx
->to_be_accepted
)) {
484 write_unlock(&rx
->call_lock
);
485 release_sock(&rx
->sk
);
486 kleave(" = -ENODATA [empty]");
487 return ERR_PTR(-ENODATA
);
490 /* check the user ID isn't already in use */
491 pp
= &rx
->calls
.rb_node
;
495 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
497 if (user_call_ID
< call
->user_call_ID
)
498 pp
= &(*pp
)->rb_left
;
499 else if (user_call_ID
> call
->user_call_ID
)
500 pp
= &(*pp
)->rb_right
;
505 /* Dequeue the first call and check it's still valid. We gain
506 * responsibility for the queue's reference.
508 call
= list_entry(rx
->to_be_accepted
.next
,
509 struct rxrpc_call
, accept_link
);
510 write_unlock(&rx
->call_lock
);
512 /* We need to gain the mutex from the interrupt handler without
513 * upsetting lockdep, so we have to release it there and take it here.
514 * We are, however, still holding the socket lock, so other accepts
515 * must wait for us and no one can add the user ID behind our backs.
517 if (mutex_lock_interruptible(&call
->user_mutex
) < 0) {
518 release_sock(&rx
->sk
);
519 kleave(" = -ERESTARTSYS");
520 return ERR_PTR(-ERESTARTSYS
);
523 write_lock(&rx
->call_lock
);
524 list_del_init(&call
->accept_link
);
525 sk_acceptq_removed(&rx
->sk
);
526 rxrpc_see_call(call
);
528 /* Find the user ID insertion point. */
529 pp
= &rx
->calls
.rb_node
;
533 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
535 if (user_call_ID
< call
->user_call_ID
)
536 pp
= &(*pp
)->rb_left
;
537 else if (user_call_ID
> call
->user_call_ID
)
538 pp
= &(*pp
)->rb_right
;
543 write_lock_bh(&call
->state_lock
);
544 switch (call
->state
) {
545 case RXRPC_CALL_SERVER_ACCEPTING
:
546 call
->state
= RXRPC_CALL_SERVER_RECV_REQUEST
;
548 case RXRPC_CALL_COMPLETE
:
555 /* formalise the acceptance */
556 call
->notify_rx
= notify_rx
;
557 call
->user_call_ID
= user_call_ID
;
558 rxrpc_get_call(call
, rxrpc_call_got_userid
);
559 rb_link_node(&call
->sock_node
, parent
, pp
);
560 rb_insert_color(&call
->sock_node
, &rx
->calls
);
561 if (test_and_set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
))
564 write_unlock_bh(&call
->state_lock
);
565 write_unlock(&rx
->call_lock
);
566 rxrpc_notify_socket(call
);
567 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
568 release_sock(&rx
->sk
);
569 _leave(" = %p{%d}", call
, call
->debug_id
);
573 _debug("release %p", call
);
574 write_unlock_bh(&call
->state_lock
);
575 write_unlock(&rx
->call_lock
);
576 rxrpc_release_call(rx
, call
);
577 rxrpc_put_call(call
, rxrpc_call_put
);
582 write_unlock(&rx
->call_lock
);
584 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
585 release_sock(&rx
->sk
);
586 _leave(" = %d", ret
);
591 * Handle rejection of a call by userspace
592 * - reject the call at the front of the queue
594 int rxrpc_reject_call(struct rxrpc_sock
*rx
)
596 struct rxrpc_call
*call
;
602 ASSERT(!irqs_disabled());
604 write_lock(&rx
->call_lock
);
606 if (list_empty(&rx
->to_be_accepted
)) {
607 write_unlock(&rx
->call_lock
);
611 /* Dequeue the first call and check it's still valid. We gain
612 * responsibility for the queue's reference.
614 call
= list_entry(rx
->to_be_accepted
.next
,
615 struct rxrpc_call
, accept_link
);
616 list_del_init(&call
->accept_link
);
617 sk_acceptq_removed(&rx
->sk
);
618 rxrpc_see_call(call
);
620 write_lock_bh(&call
->state_lock
);
621 switch (call
->state
) {
622 case RXRPC_CALL_SERVER_ACCEPTING
:
623 __rxrpc_abort_call("REJ", call
, 1, RX_USER_ABORT
, -ECONNABORTED
);
626 case RXRPC_CALL_COMPLETE
:
634 write_unlock_bh(&call
->state_lock
);
635 write_unlock(&rx
->call_lock
);
637 rxrpc_send_abort_packet(call
);
638 rxrpc_release_call(rx
, call
);
639 rxrpc_put_call(call
, rxrpc_call_put
);
641 rxrpc_service_prealloc(rx
, GFP_KERNEL
);
642 _leave(" = %d", ret
);
647 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
648 * @sock: The socket on which to preallocate
649 * @notify_rx: Event notification function for the call
650 * @user_attach_call: Func to attach call to user_call_ID
651 * @user_call_ID: The tag to attach to the preallocated call
652 * @gfp: The allocation conditions.
653 * @debug_id: The tracing debug ID.
655 * Charge up the socket with preallocated calls, each with a user ID. A
656 * function should be provided to effect the attachment from the user's side.
657 * The user is given a ref to hold on the call.
659 * Note that the call may be come connected before this function returns.
661 int rxrpc_kernel_charge_accept(struct socket
*sock
,
662 rxrpc_notify_rx_t notify_rx
,
663 rxrpc_user_attach_call_t user_attach_call
,
664 unsigned long user_call_ID
, gfp_t gfp
,
665 unsigned int debug_id
)
667 struct rxrpc_sock
*rx
= rxrpc_sk(sock
->sk
);
668 struct rxrpc_backlog
*b
= rx
->backlog
;
670 if (sock
->sk
->sk_state
== RXRPC_CLOSE
)
673 return rxrpc_service_prealloc_one(rx
, b
, notify_rx
,
674 user_attach_call
, user_call_ID
,
677 EXPORT_SYMBOL(rxrpc_kernel_charge_accept
);