1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/circ_buf.h>
15 #include <linux/hashtable.h>
16 #include <linux/spinlock_types.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
22 * Maximum lifetime of a call (in jiffies).
24 unsigned int rxrpc_max_call_lifetime
= 60 * HZ
;
27 * Time till dead call expires after last use (in jiffies).
29 unsigned int rxrpc_dead_call_expiry
= 2 * HZ
;
31 const char *const rxrpc_call_states
[NR__RXRPC_CALL_STATES
] = {
32 [RXRPC_CALL_CLIENT_SEND_REQUEST
] = "ClSndReq",
33 [RXRPC_CALL_CLIENT_AWAIT_REPLY
] = "ClAwtRpl",
34 [RXRPC_CALL_CLIENT_RECV_REPLY
] = "ClRcvRpl",
35 [RXRPC_CALL_CLIENT_FINAL_ACK
] = "ClFnlACK",
36 [RXRPC_CALL_SERVER_SECURING
] = "SvSecure",
37 [RXRPC_CALL_SERVER_ACCEPTING
] = "SvAccept",
38 [RXRPC_CALL_SERVER_RECV_REQUEST
] = "SvRcvReq",
39 [RXRPC_CALL_SERVER_ACK_REQUEST
] = "SvAckReq",
40 [RXRPC_CALL_SERVER_SEND_REPLY
] = "SvSndRpl",
41 [RXRPC_CALL_SERVER_AWAIT_ACK
] = "SvAwtACK",
42 [RXRPC_CALL_COMPLETE
] = "Complete",
43 [RXRPC_CALL_SERVER_BUSY
] = "SvBusy ",
44 [RXRPC_CALL_REMOTELY_ABORTED
] = "RmtAbort",
45 [RXRPC_CALL_LOCALLY_ABORTED
] = "LocAbort",
46 [RXRPC_CALL_NETWORK_ERROR
] = "NetError",
47 [RXRPC_CALL_DEAD
] = "Dead ",
50 struct kmem_cache
*rxrpc_call_jar
;
51 LIST_HEAD(rxrpc_calls
);
52 DEFINE_RWLOCK(rxrpc_call_lock
);
54 static void rxrpc_destroy_call(struct work_struct
*work
);
55 static void rxrpc_call_life_expired(unsigned long _call
);
56 static void rxrpc_dead_call_expired(unsigned long _call
);
57 static void rxrpc_ack_time_expired(unsigned long _call
);
58 static void rxrpc_resend_time_expired(unsigned long _call
);
60 static DEFINE_SPINLOCK(rxrpc_call_hash_lock
);
61 static DEFINE_HASHTABLE(rxrpc_call_hash
, 10);
64 * Hash function for rxrpc_call_hash
66 static unsigned long rxrpc_call_hashfunc(
74 unsigned int addr_size
,
83 key
= (unsigned long)localptr
;
84 /* We just want to add up the __be32 values, so forcing the
85 * cast should be okay.
90 key
+= (cid
& RXRPC_CIDMASK
) >> RXRPC_CIDSHIFT
;
91 key
+= cid
& RXRPC_CHANNELMASK
;
94 /* Step through the peer address in 16-bit portions for speed */
95 for (i
= 0, p
= (const u16
*)peer_addr
; i
< addr_size
>> 1; i
++, p
++)
97 _leave(" key = 0x%lx", key
);
102 * Add a call to the hashtable
104 static void rxrpc_call_hash_add(struct rxrpc_call
*call
)
107 unsigned int addr_size
= 0;
110 switch (call
->proto
) {
112 addr_size
= sizeof(call
->peer_ip
.ipv4_addr
);
115 addr_size
= sizeof(call
->peer_ip
.ipv6_addr
);
120 key
= rxrpc_call_hashfunc(call
->in_clientflag
, call
->cid
,
121 call
->call_id
, call
->epoch
,
122 call
->service_id
, call
->proto
,
123 call
->conn
->trans
->local
, addr_size
,
124 call
->peer_ip
.ipv6_addr
);
125 /* Store the full key in the call */
126 call
->hash_key
= key
;
127 spin_lock(&rxrpc_call_hash_lock
);
128 hash_add_rcu(rxrpc_call_hash
, &call
->hash_node
, key
);
129 spin_unlock(&rxrpc_call_hash_lock
);
134 * Remove a call from the hashtable
136 static void rxrpc_call_hash_del(struct rxrpc_call
*call
)
139 spin_lock(&rxrpc_call_hash_lock
);
140 hash_del_rcu(&call
->hash_node
);
141 spin_unlock(&rxrpc_call_hash_lock
);
146 * Find a call in the hashtable and return it, or NULL if it
149 struct rxrpc_call
*rxrpc_find_call_hash(
150 struct rxrpc_host_header
*hdr
,
153 const void *peer_addr
)
156 unsigned int addr_size
= 0;
157 struct rxrpc_call
*call
= NULL
;
158 struct rxrpc_call
*ret
= NULL
;
159 u8 in_clientflag
= hdr
->flags
& RXRPC_CLIENT_INITIATED
;
164 addr_size
= sizeof(call
->peer_ip
.ipv4_addr
);
167 addr_size
= sizeof(call
->peer_ip
.ipv6_addr
);
173 key
= rxrpc_call_hashfunc(in_clientflag
, hdr
->cid
, hdr
->callNumber
,
174 hdr
->epoch
, hdr
->serviceId
,
175 proto
, localptr
, addr_size
,
177 hash_for_each_possible_rcu(rxrpc_call_hash
, call
, hash_node
, key
) {
178 if (call
->hash_key
== key
&&
179 call
->call_id
== hdr
->callNumber
&&
180 call
->cid
== hdr
->cid
&&
181 call
->in_clientflag
== in_clientflag
&&
182 call
->service_id
== hdr
->serviceId
&&
183 call
->proto
== proto
&&
184 call
->local
== localptr
&&
185 memcmp(call
->peer_ip
.ipv6_addr
, peer_addr
,
187 call
->epoch
== hdr
->epoch
) {
192 _leave(" = %p", ret
);
197 * allocate a new call
199 static struct rxrpc_call
*rxrpc_alloc_call(gfp_t gfp
)
201 struct rxrpc_call
*call
;
203 call
= kmem_cache_zalloc(rxrpc_call_jar
, gfp
);
207 call
->acks_winsz
= 16;
208 call
->acks_window
= kmalloc(call
->acks_winsz
* sizeof(unsigned long),
210 if (!call
->acks_window
) {
211 kmem_cache_free(rxrpc_call_jar
, call
);
215 setup_timer(&call
->lifetimer
, &rxrpc_call_life_expired
,
216 (unsigned long) call
);
217 setup_timer(&call
->deadspan
, &rxrpc_dead_call_expired
,
218 (unsigned long) call
);
219 setup_timer(&call
->ack_timer
, &rxrpc_ack_time_expired
,
220 (unsigned long) call
);
221 setup_timer(&call
->resend_timer
, &rxrpc_resend_time_expired
,
222 (unsigned long) call
);
223 INIT_WORK(&call
->destroyer
, &rxrpc_destroy_call
);
224 INIT_WORK(&call
->processor
, &rxrpc_process_call
);
225 INIT_LIST_HEAD(&call
->accept_link
);
226 skb_queue_head_init(&call
->rx_queue
);
227 skb_queue_head_init(&call
->rx_oos_queue
);
228 init_waitqueue_head(&call
->tx_waitq
);
229 spin_lock_init(&call
->lock
);
230 rwlock_init(&call
->state_lock
);
231 atomic_set(&call
->usage
, 1);
232 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
233 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
235 memset(&call
->sock_node
, 0xed, sizeof(call
->sock_node
));
237 call
->rx_data_expect
= 1;
238 call
->rx_data_eaten
= 0;
239 call
->rx_first_oos
= 0;
240 call
->ackr_win_top
= call
->rx_data_eaten
+ 1 + rxrpc_rx_window_size
;
241 call
->creation_jif
= jiffies
;
246 * allocate a new client call and attempt to get a connection slot for it
248 static struct rxrpc_call
*rxrpc_alloc_client_call(
249 struct rxrpc_sock
*rx
,
250 struct rxrpc_transport
*trans
,
251 struct rxrpc_conn_bundle
*bundle
,
254 struct rxrpc_call
*call
;
260 ASSERT(trans
!= NULL
);
261 ASSERT(bundle
!= NULL
);
263 call
= rxrpc_alloc_call(gfp
);
265 return ERR_PTR(-ENOMEM
);
269 call
->rx_data_post
= 1;
271 ret
= rxrpc_connect_call(rx
, trans
, bundle
, call
, gfp
);
273 kmem_cache_free(rxrpc_call_jar
, call
);
277 /* Record copies of information for hashtable lookup */
278 call
->proto
= rx
->proto
;
279 call
->local
= trans
->local
;
280 switch (call
->proto
) {
282 call
->peer_ip
.ipv4_addr
=
283 trans
->peer
->srx
.transport
.sin
.sin_addr
.s_addr
;
286 memcpy(call
->peer_ip
.ipv6_addr
,
287 trans
->peer
->srx
.transport
.sin6
.sin6_addr
.in6_u
.u6_addr8
,
288 sizeof(call
->peer_ip
.ipv6_addr
));
291 call
->epoch
= call
->conn
->epoch
;
292 call
->service_id
= call
->conn
->service_id
;
293 call
->in_clientflag
= call
->conn
->in_clientflag
;
294 /* Add the new call to the hashtable */
295 rxrpc_call_hash_add(call
);
297 spin_lock(&call
->conn
->trans
->peer
->lock
);
298 list_add(&call
->error_link
, &call
->conn
->trans
->peer
->error_targets
);
299 spin_unlock(&call
->conn
->trans
->peer
->lock
);
301 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
302 add_timer(&call
->lifetimer
);
304 _leave(" = %p", call
);
309 * set up a call for the given data
310 * - called in process context with IRQs enabled
312 struct rxrpc_call
*rxrpc_get_client_call(struct rxrpc_sock
*rx
,
313 struct rxrpc_transport
*trans
,
314 struct rxrpc_conn_bundle
*bundle
,
315 unsigned long user_call_ID
,
319 struct rxrpc_call
*call
, *candidate
;
320 struct rb_node
*p
, *parent
, **pp
;
322 _enter("%p,%d,%d,%lx,%d",
323 rx
, trans
? trans
->debug_id
: -1, bundle
? bundle
->debug_id
: -1,
324 user_call_ID
, create
);
326 /* search the extant calls first for one that matches the specified
328 read_lock(&rx
->call_lock
);
330 p
= rx
->calls
.rb_node
;
332 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
334 if (user_call_ID
< call
->user_call_ID
)
336 else if (user_call_ID
> call
->user_call_ID
)
339 goto found_extant_call
;
342 read_unlock(&rx
->call_lock
);
344 if (!create
|| !trans
)
345 return ERR_PTR(-EBADSLT
);
347 /* not yet present - create a candidate for a new record and then
349 candidate
= rxrpc_alloc_client_call(rx
, trans
, bundle
, gfp
);
350 if (IS_ERR(candidate
)) {
351 _leave(" = %ld", PTR_ERR(candidate
));
355 candidate
->user_call_ID
= user_call_ID
;
356 __set_bit(RXRPC_CALL_HAS_USERID
, &candidate
->flags
);
358 write_lock(&rx
->call_lock
);
360 pp
= &rx
->calls
.rb_node
;
364 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
366 if (user_call_ID
< call
->user_call_ID
)
367 pp
= &(*pp
)->rb_left
;
368 else if (user_call_ID
> call
->user_call_ID
)
369 pp
= &(*pp
)->rb_right
;
371 goto found_extant_second
;
374 /* second search also failed; add the new call */
377 rxrpc_get_call(call
);
379 rb_link_node(&call
->sock_node
, parent
, pp
);
380 rb_insert_color(&call
->sock_node
, &rx
->calls
);
381 write_unlock(&rx
->call_lock
);
383 write_lock_bh(&rxrpc_call_lock
);
384 list_add_tail(&call
->link
, &rxrpc_calls
);
385 write_unlock_bh(&rxrpc_call_lock
);
387 _net("CALL new %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
389 _leave(" = %p [new]", call
);
392 /* we found the call in the list immediately */
394 rxrpc_get_call(call
);
395 read_unlock(&rx
->call_lock
);
396 _leave(" = %p [extant %d]", call
, atomic_read(&call
->usage
));
399 /* we found the call on the second time through the list */
401 rxrpc_get_call(call
);
402 write_unlock(&rx
->call_lock
);
403 rxrpc_put_call(candidate
);
404 _leave(" = %p [second %d]", call
, atomic_read(&call
->usage
));
409 * set up an incoming call
410 * - called in process context with IRQs enabled
412 struct rxrpc_call
*rxrpc_incoming_call(struct rxrpc_sock
*rx
,
413 struct rxrpc_connection
*conn
,
414 struct rxrpc_host_header
*hdr
,
417 struct rxrpc_call
*call
, *candidate
;
418 struct rb_node
**p
, *parent
;
421 _enter(",%d,,%x", conn
->debug_id
, gfp
);
425 candidate
= rxrpc_alloc_call(gfp
);
427 return ERR_PTR(-EBUSY
);
429 candidate
->socket
= rx
;
430 candidate
->conn
= conn
;
431 candidate
->cid
= hdr
->cid
;
432 candidate
->call_id
= hdr
->callNumber
;
433 candidate
->channel
= hdr
->cid
& RXRPC_CHANNELMASK
;
434 candidate
->rx_data_post
= 0;
435 candidate
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
436 if (conn
->security_ix
> 0)
437 candidate
->state
= RXRPC_CALL_SERVER_SECURING
;
439 write_lock_bh(&conn
->lock
);
441 /* set the channel for this call */
442 call
= conn
->channels
[candidate
->channel
];
443 _debug("channel[%u] is %p", candidate
->channel
, call
);
444 if (call
&& call
->call_id
== hdr
->callNumber
) {
445 /* already set; must've been a duplicate packet */
446 _debug("extant call [%d]", call
->state
);
447 ASSERTCMP(call
->conn
, ==, conn
);
449 read_lock(&call
->state_lock
);
450 switch (call
->state
) {
451 case RXRPC_CALL_LOCALLY_ABORTED
:
452 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
453 rxrpc_queue_call(call
);
454 case RXRPC_CALL_REMOTELY_ABORTED
:
455 read_unlock(&call
->state_lock
);
458 rxrpc_get_call(call
);
459 read_unlock(&call
->state_lock
);
465 /* it seems the channel is still in use from the previous call
466 * - ditch the old binding if its call is now complete */
467 _debug("CALL: %u { %s }",
468 call
->debug_id
, rxrpc_call_states
[call
->state
]);
470 if (call
->state
>= RXRPC_CALL_COMPLETE
) {
471 conn
->channels
[call
->channel
] = NULL
;
473 write_unlock_bh(&conn
->lock
);
474 kmem_cache_free(rxrpc_call_jar
, candidate
);
476 return ERR_PTR(-EBUSY
);
480 /* check the call number isn't duplicate */
482 call_id
= hdr
->callNumber
;
483 p
= &conn
->calls
.rb_node
;
487 call
= rb_entry(parent
, struct rxrpc_call
, conn_node
);
489 /* The tree is sorted in order of the __be32 value without
490 * turning it into host order.
492 if (call_id
< call
->call_id
)
494 else if (call_id
> call
->call_id
)
500 /* make the call available */
504 rb_link_node(&call
->conn_node
, parent
, p
);
505 rb_insert_color(&call
->conn_node
, &conn
->calls
);
506 conn
->channels
[call
->channel
] = call
;
508 atomic_inc(&conn
->usage
);
509 write_unlock_bh(&conn
->lock
);
511 spin_lock(&conn
->trans
->peer
->lock
);
512 list_add(&call
->error_link
, &conn
->trans
->peer
->error_targets
);
513 spin_unlock(&conn
->trans
->peer
->lock
);
515 write_lock_bh(&rxrpc_call_lock
);
516 list_add_tail(&call
->link
, &rxrpc_calls
);
517 write_unlock_bh(&rxrpc_call_lock
);
519 /* Record copies of information for hashtable lookup */
520 call
->proto
= rx
->proto
;
521 call
->local
= conn
->trans
->local
;
522 switch (call
->proto
) {
524 call
->peer_ip
.ipv4_addr
=
525 conn
->trans
->peer
->srx
.transport
.sin
.sin_addr
.s_addr
;
528 memcpy(call
->peer_ip
.ipv6_addr
,
529 conn
->trans
->peer
->srx
.transport
.sin6
.sin6_addr
.in6_u
.u6_addr8
,
530 sizeof(call
->peer_ip
.ipv6_addr
));
535 call
->epoch
= conn
->epoch
;
536 call
->service_id
= conn
->service_id
;
537 call
->in_clientflag
= conn
->in_clientflag
;
538 /* Add the new call to the hashtable */
539 rxrpc_call_hash_add(call
);
541 _net("CALL incoming %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
543 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
544 add_timer(&call
->lifetimer
);
545 _leave(" = %p {%d} [new]", call
, call
->debug_id
);
549 write_unlock_bh(&conn
->lock
);
550 kmem_cache_free(rxrpc_call_jar
, candidate
);
551 _leave(" = %p {%d} [extant]", call
, call
? call
->debug_id
: -1);
555 write_unlock_bh(&conn
->lock
);
556 kmem_cache_free(rxrpc_call_jar
, candidate
);
557 _leave(" = -ECONNABORTED");
558 return ERR_PTR(-ECONNABORTED
);
561 write_unlock_bh(&conn
->lock
);
562 kmem_cache_free(rxrpc_call_jar
, candidate
);
563 _leave(" = -ECONNRESET [old]");
564 return ERR_PTR(-ECONNRESET
);
568 * find an extant server call
569 * - called in process context with IRQs enabled
571 struct rxrpc_call
*rxrpc_find_server_call(struct rxrpc_sock
*rx
,
572 unsigned long user_call_ID
)
574 struct rxrpc_call
*call
;
577 _enter("%p,%lx", rx
, user_call_ID
);
579 /* search the extant calls for one that matches the specified user
581 read_lock(&rx
->call_lock
);
583 p
= rx
->calls
.rb_node
;
585 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
587 if (user_call_ID
< call
->user_call_ID
)
589 else if (user_call_ID
> call
->user_call_ID
)
592 goto found_extant_call
;
595 read_unlock(&rx
->call_lock
);
599 /* we found the call in the list immediately */
601 rxrpc_get_call(call
);
602 read_unlock(&rx
->call_lock
);
603 _leave(" = %p [%d]", call
, atomic_read(&call
->usage
));
608 * detach a call from a socket and set up for release
610 void rxrpc_release_call(struct rxrpc_call
*call
)
612 struct rxrpc_connection
*conn
= call
->conn
;
613 struct rxrpc_sock
*rx
= call
->socket
;
615 _enter("{%d,%d,%d,%d}",
616 call
->debug_id
, atomic_read(&call
->usage
),
617 atomic_read(&call
->ackr_not_idle
),
620 spin_lock_bh(&call
->lock
);
621 if (test_and_set_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
623 spin_unlock_bh(&call
->lock
);
625 /* dissociate from the socket
626 * - the socket's ref on the call is passed to the death timer
628 _debug("RELEASE CALL %p (%d CONN %p)", call
, call
->debug_id
, conn
);
630 write_lock_bh(&rx
->call_lock
);
631 if (!list_empty(&call
->accept_link
)) {
632 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
633 call
, call
->events
, call
->flags
);
634 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
));
635 list_del_init(&call
->accept_link
);
636 sk_acceptq_removed(&rx
->sk
);
637 } else if (test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
638 rb_erase(&call
->sock_node
, &rx
->calls
);
639 memset(&call
->sock_node
, 0xdd, sizeof(call
->sock_node
));
640 clear_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
642 write_unlock_bh(&rx
->call_lock
);
644 /* free up the channel for reuse */
645 spin_lock(&conn
->trans
->client_lock
);
646 write_lock_bh(&conn
->lock
);
647 write_lock(&call
->state_lock
);
649 if (conn
->channels
[call
->channel
] == call
)
650 conn
->channels
[call
->channel
] = NULL
;
652 if (conn
->out_clientflag
&& conn
->bundle
) {
654 switch (conn
->avail_calls
) {
656 list_move_tail(&conn
->bundle_link
,
657 &conn
->bundle
->avail_conns
);
658 case 2 ... RXRPC_MAXCALLS
- 1:
659 ASSERT(conn
->channels
[0] == NULL
||
660 conn
->channels
[1] == NULL
||
661 conn
->channels
[2] == NULL
||
662 conn
->channels
[3] == NULL
);
665 list_move_tail(&conn
->bundle_link
,
666 &conn
->bundle
->unused_conns
);
667 ASSERT(conn
->channels
[0] == NULL
&&
668 conn
->channels
[1] == NULL
&&
669 conn
->channels
[2] == NULL
&&
670 conn
->channels
[3] == NULL
);
673 printk(KERN_ERR
"RxRPC: conn->avail_calls=%d\n",
679 spin_unlock(&conn
->trans
->client_lock
);
681 if (call
->state
< RXRPC_CALL_COMPLETE
&&
682 call
->state
!= RXRPC_CALL_CLIENT_FINAL_ACK
) {
683 _debug("+++ ABORTING STATE %d +++\n", call
->state
);
684 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
685 call
->abort_code
= RX_CALL_DEAD
;
686 set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
);
687 rxrpc_queue_call(call
);
689 write_unlock(&call
->state_lock
);
690 write_unlock_bh(&conn
->lock
);
692 /* clean up the Rx queue */
693 if (!skb_queue_empty(&call
->rx_queue
) ||
694 !skb_queue_empty(&call
->rx_oos_queue
)) {
695 struct rxrpc_skb_priv
*sp
;
698 _debug("purge Rx queues");
700 spin_lock_bh(&call
->lock
);
701 while ((skb
= skb_dequeue(&call
->rx_queue
)) ||
702 (skb
= skb_dequeue(&call
->rx_oos_queue
))) {
705 ASSERTCMP(sp
->call
, ==, call
);
706 rxrpc_put_call(call
);
709 skb
->destructor
= NULL
;
710 spin_unlock_bh(&call
->lock
);
712 _debug("- zap %s %%%u #%u",
713 rxrpc_pkts
[sp
->hdr
.type
],
714 sp
->hdr
.serial
, sp
->hdr
.seq
);
716 spin_lock_bh(&call
->lock
);
718 spin_unlock_bh(&call
->lock
);
720 ASSERTCMP(call
->state
, !=, RXRPC_CALL_COMPLETE
);
723 del_timer_sync(&call
->resend_timer
);
724 del_timer_sync(&call
->ack_timer
);
725 del_timer_sync(&call
->lifetimer
);
726 call
->deadspan
.expires
= jiffies
+ rxrpc_dead_call_expiry
;
727 add_timer(&call
->deadspan
);
733 * handle a dead call being ready for reaping
735 static void rxrpc_dead_call_expired(unsigned long _call
)
737 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
739 _enter("{%d}", call
->debug_id
);
741 write_lock_bh(&call
->state_lock
);
742 call
->state
= RXRPC_CALL_DEAD
;
743 write_unlock_bh(&call
->state_lock
);
744 rxrpc_put_call(call
);
748 * mark a call as to be released, aborting it if it's still in progress
749 * - called with softirqs disabled
751 static void rxrpc_mark_call_released(struct rxrpc_call
*call
)
755 write_lock(&call
->state_lock
);
756 if (call
->state
< RXRPC_CALL_DEAD
) {
758 if (call
->state
< RXRPC_CALL_COMPLETE
) {
759 _debug("abort call %p", call
);
760 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
761 call
->abort_code
= RX_CALL_DEAD
;
762 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
765 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
))
768 rxrpc_queue_call(call
);
770 write_unlock(&call
->state_lock
);
774 * release all the calls associated with a socket
776 void rxrpc_release_calls_on_socket(struct rxrpc_sock
*rx
)
778 struct rxrpc_call
*call
;
783 read_lock_bh(&rx
->call_lock
);
785 /* mark all the calls as no longer wanting incoming packets */
786 for (p
= rb_first(&rx
->calls
); p
; p
= rb_next(p
)) {
787 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
788 rxrpc_mark_call_released(call
);
791 /* kill the not-yet-accepted incoming calls */
792 list_for_each_entry(call
, &rx
->secureq
, accept_link
) {
793 rxrpc_mark_call_released(call
);
796 list_for_each_entry(call
, &rx
->acceptq
, accept_link
) {
797 rxrpc_mark_call_released(call
);
800 read_unlock_bh(&rx
->call_lock
);
807 void __rxrpc_put_call(struct rxrpc_call
*call
)
809 ASSERT(call
!= NULL
);
811 _enter("%p{u=%d}", call
, atomic_read(&call
->usage
));
813 ASSERTCMP(atomic_read(&call
->usage
), >, 0);
815 if (atomic_dec_and_test(&call
->usage
)) {
816 _debug("call %d dead", call
->debug_id
);
817 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
818 rxrpc_queue_work(&call
->destroyer
);
826 static void rxrpc_cleanup_call(struct rxrpc_call
*call
)
828 _net("DESTROY CALL %d", call
->debug_id
);
830 ASSERT(call
->socket
);
832 memset(&call
->sock_node
, 0xcd, sizeof(call
->sock_node
));
834 del_timer_sync(&call
->lifetimer
);
835 del_timer_sync(&call
->deadspan
);
836 del_timer_sync(&call
->ack_timer
);
837 del_timer_sync(&call
->resend_timer
);
839 ASSERT(test_bit(RXRPC_CALL_RELEASED
, &call
->flags
));
840 ASSERTCMP(call
->events
, ==, 0);
841 if (work_pending(&call
->processor
)) {
842 _debug("defer destroy");
843 rxrpc_queue_work(&call
->destroyer
);
848 spin_lock(&call
->conn
->trans
->peer
->lock
);
849 list_del(&call
->error_link
);
850 spin_unlock(&call
->conn
->trans
->peer
->lock
);
852 write_lock_bh(&call
->conn
->lock
);
853 rb_erase(&call
->conn_node
, &call
->conn
->calls
);
854 write_unlock_bh(&call
->conn
->lock
);
855 rxrpc_put_connection(call
->conn
);
858 /* Remove the call from the hash */
859 rxrpc_call_hash_del(call
);
861 if (call
->acks_window
) {
862 _debug("kill Tx window %d",
863 CIRC_CNT(call
->acks_head
, call
->acks_tail
,
866 while (CIRC_CNT(call
->acks_head
, call
->acks_tail
,
867 call
->acks_winsz
) > 0) {
868 struct rxrpc_skb_priv
*sp
;
871 _skb
= call
->acks_window
[call
->acks_tail
] & ~1;
872 sp
= rxrpc_skb((struct sk_buff
*)_skb
);
873 _debug("+++ clear Tx %u", sp
->hdr
.seq
);
874 rxrpc_free_skb((struct sk_buff
*)_skb
);
876 (call
->acks_tail
+ 1) & (call
->acks_winsz
- 1);
879 kfree(call
->acks_window
);
882 rxrpc_free_skb(call
->tx_pending
);
884 rxrpc_purge_queue(&call
->rx_queue
);
885 ASSERT(skb_queue_empty(&call
->rx_oos_queue
));
886 sock_put(&call
->socket
->sk
);
887 kmem_cache_free(rxrpc_call_jar
, call
);
893 static void rxrpc_destroy_call(struct work_struct
*work
)
895 struct rxrpc_call
*call
=
896 container_of(work
, struct rxrpc_call
, destroyer
);
898 _enter("%p{%d,%d,%p}",
899 call
, atomic_read(&call
->usage
), call
->channel
, call
->conn
);
901 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
903 write_lock_bh(&rxrpc_call_lock
);
904 list_del_init(&call
->link
);
905 write_unlock_bh(&rxrpc_call_lock
);
907 rxrpc_cleanup_call(call
);
912 * preemptively destroy all the call records from a transport endpoint rather
913 * than waiting for them to time out
915 void __exit
rxrpc_destroy_all_calls(void)
917 struct rxrpc_call
*call
;
920 write_lock_bh(&rxrpc_call_lock
);
922 while (!list_empty(&rxrpc_calls
)) {
923 call
= list_entry(rxrpc_calls
.next
, struct rxrpc_call
, link
);
924 _debug("Zapping call %p", call
);
926 list_del_init(&call
->link
);
928 switch (atomic_read(&call
->usage
)) {
930 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
933 if (del_timer_sync(&call
->deadspan
) != 0 &&
934 call
->state
!= RXRPC_CALL_DEAD
)
935 rxrpc_dead_call_expired((unsigned long) call
);
936 if (call
->state
!= RXRPC_CALL_DEAD
)
939 printk(KERN_ERR
"RXRPC:"
940 " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
941 call
, atomic_read(&call
->usage
),
942 atomic_read(&call
->ackr_not_idle
),
943 rxrpc_call_states
[call
->state
],
944 call
->flags
, call
->events
);
945 if (!skb_queue_empty(&call
->rx_queue
))
946 printk(KERN_ERR
"RXRPC: Rx queue occupied\n");
947 if (!skb_queue_empty(&call
->rx_oos_queue
))
948 printk(KERN_ERR
"RXRPC: OOS queue occupied\n");
952 write_unlock_bh(&rxrpc_call_lock
);
954 write_lock_bh(&rxrpc_call_lock
);
957 write_unlock_bh(&rxrpc_call_lock
);
962 * handle call lifetime being exceeded
964 static void rxrpc_call_life_expired(unsigned long _call
)
966 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
968 if (call
->state
>= RXRPC_CALL_COMPLETE
)
971 _enter("{%d}", call
->debug_id
);
972 read_lock_bh(&call
->state_lock
);
973 if (call
->state
< RXRPC_CALL_COMPLETE
) {
974 set_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
);
975 rxrpc_queue_call(call
);
977 read_unlock_bh(&call
->state_lock
);
981 * handle resend timer expiry
982 * - may not take call->state_lock as this can deadlock against del_timer_sync()
984 static void rxrpc_resend_time_expired(unsigned long _call
)
986 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
988 _enter("{%d}", call
->debug_id
);
990 if (call
->state
>= RXRPC_CALL_COMPLETE
)
993 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
994 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
))
995 rxrpc_queue_call(call
);
999 * handle ACK timer expiry
1001 static void rxrpc_ack_time_expired(unsigned long _call
)
1003 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
1005 _enter("{%d}", call
->debug_id
);
1007 if (call
->state
>= RXRPC_CALL_COMPLETE
)
1010 read_lock_bh(&call
->state_lock
);
1011 if (call
->state
< RXRPC_CALL_COMPLETE
&&
1012 !test_and_set_bit(RXRPC_CALL_EV_ACK
, &call
->events
))
1013 rxrpc_queue_call(call
);
1014 read_unlock_bh(&call
->state_lock
);