1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
23 * Maximum lifetime of a call (in jiffies).
25 unsigned int rxrpc_max_call_lifetime
= 60 * HZ
;
28 * Time till dead call expires after last use (in jiffies).
30 unsigned int rxrpc_dead_call_expiry
= 2 * HZ
;
32 const char *const rxrpc_call_states
[NR__RXRPC_CALL_STATES
] = {
33 [RXRPC_CALL_UNINITIALISED
] = "Uninit",
34 [RXRPC_CALL_CLIENT_AWAIT_CONN
] = "ClWtConn",
35 [RXRPC_CALL_CLIENT_SEND_REQUEST
] = "ClSndReq",
36 [RXRPC_CALL_CLIENT_AWAIT_REPLY
] = "ClAwtRpl",
37 [RXRPC_CALL_CLIENT_RECV_REPLY
] = "ClRcvRpl",
38 [RXRPC_CALL_CLIENT_FINAL_ACK
] = "ClFnlACK",
39 [RXRPC_CALL_SERVER_SECURING
] = "SvSecure",
40 [RXRPC_CALL_SERVER_ACCEPTING
] = "SvAccept",
41 [RXRPC_CALL_SERVER_RECV_REQUEST
] = "SvRcvReq",
42 [RXRPC_CALL_SERVER_ACK_REQUEST
] = "SvAckReq",
43 [RXRPC_CALL_SERVER_SEND_REPLY
] = "SvSndRpl",
44 [RXRPC_CALL_SERVER_AWAIT_ACK
] = "SvAwtACK",
45 [RXRPC_CALL_COMPLETE
] = "Complete",
46 [RXRPC_CALL_SERVER_BUSY
] = "SvBusy ",
47 [RXRPC_CALL_REMOTELY_ABORTED
] = "RmtAbort",
48 [RXRPC_CALL_LOCALLY_ABORTED
] = "LocAbort",
49 [RXRPC_CALL_NETWORK_ERROR
] = "NetError",
50 [RXRPC_CALL_DEAD
] = "Dead ",
53 struct kmem_cache
*rxrpc_call_jar
;
54 LIST_HEAD(rxrpc_calls
);
55 DEFINE_RWLOCK(rxrpc_call_lock
);
57 static void rxrpc_destroy_call(struct work_struct
*work
);
58 static void rxrpc_call_life_expired(unsigned long _call
);
59 static void rxrpc_dead_call_expired(unsigned long _call
);
60 static void rxrpc_ack_time_expired(unsigned long _call
);
61 static void rxrpc_resend_time_expired(unsigned long _call
);
64 * find an extant server call
65 * - called in process context with IRQs enabled
67 struct rxrpc_call
*rxrpc_find_call_by_user_ID(struct rxrpc_sock
*rx
,
68 unsigned long user_call_ID
)
70 struct rxrpc_call
*call
;
73 _enter("%p,%lx", rx
, user_call_ID
);
75 read_lock(&rx
->call_lock
);
77 p
= rx
->calls
.rb_node
;
79 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
81 if (user_call_ID
< call
->user_call_ID
)
83 else if (user_call_ID
> call
->user_call_ID
)
86 goto found_extant_call
;
89 read_unlock(&rx
->call_lock
);
95 read_unlock(&rx
->call_lock
);
96 _leave(" = %p [%d]", call
, atomic_read(&call
->usage
));
101 * allocate a new call
103 static struct rxrpc_call
*rxrpc_alloc_call(gfp_t gfp
)
105 struct rxrpc_call
*call
;
107 call
= kmem_cache_zalloc(rxrpc_call_jar
, gfp
);
111 call
->acks_winsz
= 16;
112 call
->acks_window
= kmalloc(call
->acks_winsz
* sizeof(unsigned long),
114 if (!call
->acks_window
) {
115 kmem_cache_free(rxrpc_call_jar
, call
);
119 setup_timer(&call
->lifetimer
, &rxrpc_call_life_expired
,
120 (unsigned long) call
);
121 setup_timer(&call
->deadspan
, &rxrpc_dead_call_expired
,
122 (unsigned long) call
);
123 setup_timer(&call
->ack_timer
, &rxrpc_ack_time_expired
,
124 (unsigned long) call
);
125 setup_timer(&call
->resend_timer
, &rxrpc_resend_time_expired
,
126 (unsigned long) call
);
127 INIT_WORK(&call
->destroyer
, &rxrpc_destroy_call
);
128 INIT_WORK(&call
->processor
, &rxrpc_process_call
);
129 INIT_LIST_HEAD(&call
->link
);
130 INIT_LIST_HEAD(&call
->accept_link
);
131 skb_queue_head_init(&call
->rx_queue
);
132 skb_queue_head_init(&call
->rx_oos_queue
);
133 init_waitqueue_head(&call
->tx_waitq
);
134 spin_lock_init(&call
->lock
);
135 rwlock_init(&call
->state_lock
);
136 atomic_set(&call
->usage
, 1);
137 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
139 memset(&call
->sock_node
, 0xed, sizeof(call
->sock_node
));
141 call
->rx_data_expect
= 1;
142 call
->rx_data_eaten
= 0;
143 call
->rx_first_oos
= 0;
144 call
->ackr_win_top
= call
->rx_data_eaten
+ 1 + rxrpc_rx_window_size
;
145 call
->creation_jif
= jiffies
;
150 * Allocate a new client call.
152 static struct rxrpc_call
*rxrpc_alloc_client_call(struct rxrpc_sock
*rx
,
153 struct sockaddr_rxrpc
*srx
,
156 struct rxrpc_call
*call
;
160 ASSERT(rx
->local
!= NULL
);
162 call
= rxrpc_alloc_call(gfp
);
164 return ERR_PTR(-ENOMEM
);
165 call
->state
= RXRPC_CALL_CLIENT_AWAIT_CONN
;
169 call
->rx_data_post
= 1;
171 call
->local
= rx
->local
;
172 call
->service_id
= srx
->srx_service
;
173 call
->in_clientflag
= 0;
175 _leave(" = %p", call
);
182 static int rxrpc_begin_client_call(struct rxrpc_call
*call
,
183 struct rxrpc_conn_parameters
*cp
,
184 struct sockaddr_rxrpc
*srx
,
189 /* Set up or get a connection record and set the protocol parameters,
190 * including channel number and call ID.
192 ret
= rxrpc_connect_call(call
, cp
, srx
, gfp
);
196 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
198 spin_lock(&call
->conn
->params
.peer
->lock
);
199 hlist_add_head(&call
->error_link
, &call
->conn
->params
.peer
->error_targets
);
200 spin_unlock(&call
->conn
->params
.peer
->lock
);
202 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
203 add_timer(&call
->lifetimer
);
208 * set up a call for the given data
209 * - called in process context with IRQs enabled
211 struct rxrpc_call
*rxrpc_new_client_call(struct rxrpc_sock
*rx
,
212 struct rxrpc_conn_parameters
*cp
,
213 struct sockaddr_rxrpc
*srx
,
214 unsigned long user_call_ID
,
217 struct rxrpc_call
*call
, *xcall
;
218 struct rb_node
*parent
, **pp
;
221 _enter("%p,%lx", rx
, user_call_ID
);
223 call
= rxrpc_alloc_client_call(rx
, srx
, gfp
);
225 _leave(" = %ld", PTR_ERR(call
));
229 /* Publish the call, even though it is incompletely set up as yet */
230 call
->user_call_ID
= user_call_ID
;
231 __set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
233 write_lock(&rx
->call_lock
);
235 pp
= &rx
->calls
.rb_node
;
239 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
241 if (user_call_ID
< xcall
->user_call_ID
)
242 pp
= &(*pp
)->rb_left
;
243 else if (user_call_ID
> xcall
->user_call_ID
)
244 pp
= &(*pp
)->rb_right
;
246 goto found_user_ID_now_present
;
249 rxrpc_get_call(call
);
251 rb_link_node(&call
->sock_node
, parent
, pp
);
252 rb_insert_color(&call
->sock_node
, &rx
->calls
);
253 write_unlock(&rx
->call_lock
);
255 write_lock_bh(&rxrpc_call_lock
);
256 list_add_tail(&call
->link
, &rxrpc_calls
);
257 write_unlock_bh(&rxrpc_call_lock
);
259 ret
= rxrpc_begin_client_call(call
, cp
, srx
, gfp
);
263 _net("CALL new %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
265 _leave(" = %p [new]", call
);
269 write_lock(&rx
->call_lock
);
270 rb_erase(&call
->sock_node
, &rx
->calls
);
271 write_unlock(&rx
->call_lock
);
272 rxrpc_put_call(call
);
274 write_lock_bh(&rxrpc_call_lock
);
275 list_del_init(&call
->link
);
276 write_unlock_bh(&rxrpc_call_lock
);
278 set_bit(RXRPC_CALL_RELEASED
, &call
->flags
);
279 call
->state
= RXRPC_CALL_DEAD
;
280 rxrpc_put_call(call
);
281 _leave(" = %d", ret
);
284 /* We unexpectedly found the user ID in the list after taking
285 * the call_lock. This shouldn't happen unless the user races
286 * with itself and tries to add the same user ID twice at the
287 * same time in different threads.
289 found_user_ID_now_present
:
290 write_unlock(&rx
->call_lock
);
291 set_bit(RXRPC_CALL_RELEASED
, &call
->flags
);
292 call
->state
= RXRPC_CALL_DEAD
;
293 rxrpc_put_call(call
);
294 _leave(" = -EEXIST [%p]", call
);
295 return ERR_PTR(-EEXIST
);
299 * set up an incoming call
300 * - called in process context with IRQs enabled
302 struct rxrpc_call
*rxrpc_incoming_call(struct rxrpc_sock
*rx
,
303 struct rxrpc_connection
*conn
,
306 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
307 struct rxrpc_call
*call
, *candidate
;
310 _enter(",%d", conn
->debug_id
);
314 candidate
= rxrpc_alloc_call(GFP_NOIO
);
316 return ERR_PTR(-EBUSY
);
318 chan
= sp
->hdr
.cid
& RXRPC_CHANNELMASK
;
319 candidate
->socket
= rx
;
320 candidate
->conn
= conn
;
321 candidate
->cid
= sp
->hdr
.cid
;
322 candidate
->call_id
= sp
->hdr
.callNumber
;
323 candidate
->channel
= chan
;
324 candidate
->rx_data_post
= 0;
325 candidate
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
326 if (conn
->security_ix
> 0)
327 candidate
->state
= RXRPC_CALL_SERVER_SECURING
;
329 spin_lock(&conn
->channel_lock
);
331 /* set the channel for this call */
332 call
= rcu_dereference_protected(conn
->channels
[chan
].call
,
333 lockdep_is_held(&conn
->channel_lock
));
335 _debug("channel[%u] is %p", candidate
->channel
, call
);
336 if (call
&& call
->call_id
== sp
->hdr
.callNumber
) {
337 /* already set; must've been a duplicate packet */
338 _debug("extant call [%d]", call
->state
);
339 ASSERTCMP(call
->conn
, ==, conn
);
341 read_lock(&call
->state_lock
);
342 switch (call
->state
) {
343 case RXRPC_CALL_LOCALLY_ABORTED
:
344 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
345 rxrpc_queue_call(call
);
346 case RXRPC_CALL_REMOTELY_ABORTED
:
347 read_unlock(&call
->state_lock
);
350 rxrpc_get_call(call
);
351 read_unlock(&call
->state_lock
);
357 /* it seems the channel is still in use from the previous call
358 * - ditch the old binding if its call is now complete */
359 _debug("CALL: %u { %s }",
360 call
->debug_id
, rxrpc_call_states
[call
->state
]);
362 if (call
->state
>= RXRPC_CALL_COMPLETE
) {
363 __rxrpc_disconnect_call(call
);
365 spin_unlock(&conn
->channel_lock
);
366 kmem_cache_free(rxrpc_call_jar
, candidate
);
368 return ERR_PTR(-EBUSY
);
372 /* check the call number isn't duplicate */
374 call_id
= sp
->hdr
.callNumber
;
376 /* We just ignore calls prior to the current call ID. Terminated calls
377 * are handled via the connection.
379 if (call_id
<= conn
->channels
[chan
].call_counter
)
380 goto old_call
; /* TODO: Just drop packet */
382 /* make the call available */
386 conn
->channels
[chan
].call_counter
= call_id
;
387 rcu_assign_pointer(conn
->channels
[chan
].call
, call
);
389 rxrpc_get_connection(conn
);
390 spin_unlock(&conn
->channel_lock
);
392 spin_lock(&conn
->params
.peer
->lock
);
393 hlist_add_head(&call
->error_link
, &conn
->params
.peer
->error_targets
);
394 spin_unlock(&conn
->params
.peer
->lock
);
396 write_lock_bh(&rxrpc_call_lock
);
397 list_add_tail(&call
->link
, &rxrpc_calls
);
398 write_unlock_bh(&rxrpc_call_lock
);
400 call
->local
= conn
->params
.local
;
401 call
->epoch
= conn
->proto
.epoch
;
402 call
->service_id
= conn
->params
.service_id
;
403 call
->in_clientflag
= RXRPC_CLIENT_INITIATED
;
405 _net("CALL incoming %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
407 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
408 add_timer(&call
->lifetimer
);
409 _leave(" = %p {%d} [new]", call
, call
->debug_id
);
413 spin_unlock(&conn
->channel_lock
);
414 kmem_cache_free(rxrpc_call_jar
, candidate
);
415 _leave(" = %p {%d} [extant]", call
, call
? call
->debug_id
: -1);
419 spin_unlock(&conn
->channel_lock
);
420 kmem_cache_free(rxrpc_call_jar
, candidate
);
421 _leave(" = -ECONNABORTED");
422 return ERR_PTR(-ECONNABORTED
);
425 spin_unlock(&conn
->channel_lock
);
426 kmem_cache_free(rxrpc_call_jar
, candidate
);
427 _leave(" = -ECONNRESET [old]");
428 return ERR_PTR(-ECONNRESET
);
432 * detach a call from a socket and set up for release
434 void rxrpc_release_call(struct rxrpc_call
*call
)
436 struct rxrpc_connection
*conn
= call
->conn
;
437 struct rxrpc_sock
*rx
= call
->socket
;
439 _enter("{%d,%d,%d,%d}",
440 call
->debug_id
, atomic_read(&call
->usage
),
441 atomic_read(&call
->ackr_not_idle
),
444 spin_lock_bh(&call
->lock
);
445 if (test_and_set_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
447 spin_unlock_bh(&call
->lock
);
449 /* dissociate from the socket
450 * - the socket's ref on the call is passed to the death timer
452 _debug("RELEASE CALL %p (%d CONN %p)", call
, call
->debug_id
, conn
);
454 spin_lock(&conn
->params
.peer
->lock
);
455 hlist_del_init(&call
->error_link
);
456 spin_unlock(&conn
->params
.peer
->lock
);
458 write_lock_bh(&rx
->call_lock
);
459 if (!list_empty(&call
->accept_link
)) {
460 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
461 call
, call
->events
, call
->flags
);
462 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
));
463 list_del_init(&call
->accept_link
);
464 sk_acceptq_removed(&rx
->sk
);
465 } else if (test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
466 rb_erase(&call
->sock_node
, &rx
->calls
);
467 memset(&call
->sock_node
, 0xdd, sizeof(call
->sock_node
));
468 clear_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
470 write_unlock_bh(&rx
->call_lock
);
472 /* free up the channel for reuse */
473 write_lock_bh(&call
->state_lock
);
475 if (call
->state
< RXRPC_CALL_COMPLETE
&&
476 call
->state
!= RXRPC_CALL_CLIENT_FINAL_ACK
) {
477 _debug("+++ ABORTING STATE %d +++\n", call
->state
);
478 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
479 call
->local_abort
= RX_CALL_DEAD
;
481 write_unlock_bh(&call
->state_lock
);
483 rxrpc_disconnect_call(call
);
485 /* clean up the Rx queue */
486 if (!skb_queue_empty(&call
->rx_queue
) ||
487 !skb_queue_empty(&call
->rx_oos_queue
)) {
488 struct rxrpc_skb_priv
*sp
;
491 _debug("purge Rx queues");
493 spin_lock_bh(&call
->lock
);
494 while ((skb
= skb_dequeue(&call
->rx_queue
)) ||
495 (skb
= skb_dequeue(&call
->rx_oos_queue
))) {
496 spin_unlock_bh(&call
->lock
);
499 _debug("- zap %s %%%u #%u",
500 rxrpc_pkts
[sp
->hdr
.type
],
501 sp
->hdr
.serial
, sp
->hdr
.seq
);
503 spin_lock_bh(&call
->lock
);
505 spin_unlock_bh(&call
->lock
);
507 ASSERTCMP(call
->state
, !=, RXRPC_CALL_COMPLETE
);
510 del_timer_sync(&call
->resend_timer
);
511 del_timer_sync(&call
->ack_timer
);
512 del_timer_sync(&call
->lifetimer
);
513 call
->deadspan
.expires
= jiffies
+ rxrpc_dead_call_expiry
;
514 add_timer(&call
->deadspan
);
520 * handle a dead call being ready for reaping
522 static void rxrpc_dead_call_expired(unsigned long _call
)
524 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
526 _enter("{%d}", call
->debug_id
);
528 write_lock_bh(&call
->state_lock
);
529 call
->state
= RXRPC_CALL_DEAD
;
530 write_unlock_bh(&call
->state_lock
);
531 rxrpc_put_call(call
);
535 * mark a call as to be released, aborting it if it's still in progress
536 * - called with softirqs disabled
538 static void rxrpc_mark_call_released(struct rxrpc_call
*call
)
542 write_lock(&call
->state_lock
);
543 if (call
->state
< RXRPC_CALL_DEAD
) {
545 if (call
->state
< RXRPC_CALL_COMPLETE
) {
546 _debug("abort call %p", call
);
547 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
548 call
->local_abort
= RX_CALL_DEAD
;
549 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
552 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
))
555 rxrpc_queue_call(call
);
557 write_unlock(&call
->state_lock
);
561 * release all the calls associated with a socket
563 void rxrpc_release_calls_on_socket(struct rxrpc_sock
*rx
)
565 struct rxrpc_call
*call
;
570 read_lock_bh(&rx
->call_lock
);
572 /* mark all the calls as no longer wanting incoming packets */
573 for (p
= rb_first(&rx
->calls
); p
; p
= rb_next(p
)) {
574 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
575 rxrpc_mark_call_released(call
);
578 /* kill the not-yet-accepted incoming calls */
579 list_for_each_entry(call
, &rx
->secureq
, accept_link
) {
580 rxrpc_mark_call_released(call
);
583 list_for_each_entry(call
, &rx
->acceptq
, accept_link
) {
584 rxrpc_mark_call_released(call
);
587 read_unlock_bh(&rx
->call_lock
);
594 void __rxrpc_put_call(struct rxrpc_call
*call
)
596 ASSERT(call
!= NULL
);
598 _enter("%p{u=%d}", call
, atomic_read(&call
->usage
));
600 ASSERTCMP(atomic_read(&call
->usage
), >, 0);
602 if (atomic_dec_and_test(&call
->usage
)) {
603 _debug("call %d dead", call
->debug_id
);
604 WARN_ON(atomic_read(&call
->skb_count
) != 0);
605 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
606 rxrpc_queue_work(&call
->destroyer
);
612 * Final call destruction under RCU.
614 static void rxrpc_rcu_destroy_call(struct rcu_head
*rcu
)
616 struct rxrpc_call
*call
= container_of(rcu
, struct rxrpc_call
, rcu
);
618 rxrpc_purge_queue(&call
->rx_queue
);
619 kmem_cache_free(rxrpc_call_jar
, call
);
625 static void rxrpc_cleanup_call(struct rxrpc_call
*call
)
627 _net("DESTROY CALL %d", call
->debug_id
);
629 ASSERT(call
->socket
);
631 memset(&call
->sock_node
, 0xcd, sizeof(call
->sock_node
));
633 del_timer_sync(&call
->lifetimer
);
634 del_timer_sync(&call
->deadspan
);
635 del_timer_sync(&call
->ack_timer
);
636 del_timer_sync(&call
->resend_timer
);
638 ASSERT(test_bit(RXRPC_CALL_RELEASED
, &call
->flags
));
639 ASSERTCMP(call
->events
, ==, 0);
640 if (work_pending(&call
->processor
)) {
641 _debug("defer destroy");
642 rxrpc_queue_work(&call
->destroyer
);
646 ASSERTCMP(call
->conn
, ==, NULL
);
648 if (call
->acks_window
) {
649 _debug("kill Tx window %d",
650 CIRC_CNT(call
->acks_head
, call
->acks_tail
,
653 while (CIRC_CNT(call
->acks_head
, call
->acks_tail
,
654 call
->acks_winsz
) > 0) {
655 struct rxrpc_skb_priv
*sp
;
658 _skb
= call
->acks_window
[call
->acks_tail
] & ~1;
659 sp
= rxrpc_skb((struct sk_buff
*)_skb
);
660 _debug("+++ clear Tx %u", sp
->hdr
.seq
);
661 rxrpc_free_skb((struct sk_buff
*)_skb
);
663 (call
->acks_tail
+ 1) & (call
->acks_winsz
- 1);
666 kfree(call
->acks_window
);
669 rxrpc_free_skb(call
->tx_pending
);
671 rxrpc_purge_queue(&call
->rx_queue
);
672 ASSERT(skb_queue_empty(&call
->rx_oos_queue
));
673 sock_put(&call
->socket
->sk
);
674 call_rcu(&call
->rcu
, rxrpc_rcu_destroy_call
);
680 static void rxrpc_destroy_call(struct work_struct
*work
)
682 struct rxrpc_call
*call
=
683 container_of(work
, struct rxrpc_call
, destroyer
);
685 _enter("%p{%d,%d,%p}",
686 call
, atomic_read(&call
->usage
), call
->channel
, call
->conn
);
688 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
690 write_lock_bh(&rxrpc_call_lock
);
691 list_del_init(&call
->link
);
692 write_unlock_bh(&rxrpc_call_lock
);
694 rxrpc_cleanup_call(call
);
699 * preemptively destroy all the call records from a transport endpoint rather
700 * than waiting for them to time out
702 void __exit
rxrpc_destroy_all_calls(void)
704 struct rxrpc_call
*call
;
707 write_lock_bh(&rxrpc_call_lock
);
709 while (!list_empty(&rxrpc_calls
)) {
710 call
= list_entry(rxrpc_calls
.next
, struct rxrpc_call
, link
);
711 _debug("Zapping call %p", call
);
713 list_del_init(&call
->link
);
715 switch (atomic_read(&call
->usage
)) {
717 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
720 if (del_timer_sync(&call
->deadspan
) != 0 &&
721 call
->state
!= RXRPC_CALL_DEAD
)
722 rxrpc_dead_call_expired((unsigned long) call
);
723 if (call
->state
!= RXRPC_CALL_DEAD
)
726 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
727 call
, atomic_read(&call
->usage
),
728 atomic_read(&call
->ackr_not_idle
),
729 rxrpc_call_states
[call
->state
],
730 call
->flags
, call
->events
);
731 if (!skb_queue_empty(&call
->rx_queue
))
732 pr_err("Rx queue occupied\n");
733 if (!skb_queue_empty(&call
->rx_oos_queue
))
734 pr_err("OOS queue occupied\n");
738 write_unlock_bh(&rxrpc_call_lock
);
740 write_lock_bh(&rxrpc_call_lock
);
743 write_unlock_bh(&rxrpc_call_lock
);
748 * handle call lifetime being exceeded
750 static void rxrpc_call_life_expired(unsigned long _call
)
752 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
754 if (call
->state
>= RXRPC_CALL_COMPLETE
)
757 _enter("{%d}", call
->debug_id
);
758 read_lock_bh(&call
->state_lock
);
759 if (call
->state
< RXRPC_CALL_COMPLETE
) {
760 set_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
);
761 rxrpc_queue_call(call
);
763 read_unlock_bh(&call
->state_lock
);
767 * handle resend timer expiry
768 * - may not take call->state_lock as this can deadlock against del_timer_sync()
770 static void rxrpc_resend_time_expired(unsigned long _call
)
772 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
774 _enter("{%d}", call
->debug_id
);
776 if (call
->state
>= RXRPC_CALL_COMPLETE
)
779 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
780 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
))
781 rxrpc_queue_call(call
);
785 * handle ACK timer expiry
787 static void rxrpc_ack_time_expired(unsigned long _call
)
789 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
791 _enter("{%d}", call
->debug_id
);
793 if (call
->state
>= RXRPC_CALL_COMPLETE
)
796 read_lock_bh(&call
->state_lock
);
797 if (call
->state
< RXRPC_CALL_COMPLETE
&&
798 !test_and_set_bit(RXRPC_CALL_EV_ACK
, &call
->events
))
799 rxrpc_queue_call(call
);
800 read_unlock_bh(&call
->state_lock
);