1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/circ_buf.h>
15 #include <linux/hashtable.h>
16 #include <linux/spinlock_types.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
22 * Maximum lifetime of a call (in jiffies).
24 unsigned int rxrpc_max_call_lifetime
= 60 * HZ
;
27 * Time till dead call expires after last use (in jiffies).
29 unsigned int rxrpc_dead_call_expiry
= 2 * HZ
;
31 const char *const rxrpc_call_states
[NR__RXRPC_CALL_STATES
] = {
32 [RXRPC_CALL_CLIENT_SEND_REQUEST
] = "ClSndReq",
33 [RXRPC_CALL_CLIENT_AWAIT_REPLY
] = "ClAwtRpl",
34 [RXRPC_CALL_CLIENT_RECV_REPLY
] = "ClRcvRpl",
35 [RXRPC_CALL_CLIENT_FINAL_ACK
] = "ClFnlACK",
36 [RXRPC_CALL_SERVER_SECURING
] = "SvSecure",
37 [RXRPC_CALL_SERVER_ACCEPTING
] = "SvAccept",
38 [RXRPC_CALL_SERVER_RECV_REQUEST
] = "SvRcvReq",
39 [RXRPC_CALL_SERVER_ACK_REQUEST
] = "SvAckReq",
40 [RXRPC_CALL_SERVER_SEND_REPLY
] = "SvSndRpl",
41 [RXRPC_CALL_SERVER_AWAIT_ACK
] = "SvAwtACK",
42 [RXRPC_CALL_COMPLETE
] = "Complete",
43 [RXRPC_CALL_SERVER_BUSY
] = "SvBusy ",
44 [RXRPC_CALL_REMOTELY_ABORTED
] = "RmtAbort",
45 [RXRPC_CALL_LOCALLY_ABORTED
] = "LocAbort",
46 [RXRPC_CALL_NETWORK_ERROR
] = "NetError",
47 [RXRPC_CALL_DEAD
] = "Dead ",
50 struct kmem_cache
*rxrpc_call_jar
;
51 LIST_HEAD(rxrpc_calls
);
52 DEFINE_RWLOCK(rxrpc_call_lock
);
54 static void rxrpc_destroy_call(struct work_struct
*work
);
55 static void rxrpc_call_life_expired(unsigned long _call
);
56 static void rxrpc_dead_call_expired(unsigned long _call
);
57 static void rxrpc_ack_time_expired(unsigned long _call
);
58 static void rxrpc_resend_time_expired(unsigned long _call
);
60 static DEFINE_SPINLOCK(rxrpc_call_hash_lock
);
61 static DEFINE_HASHTABLE(rxrpc_call_hash
, 10);
64 * Hash function for rxrpc_call_hash
66 static unsigned long rxrpc_call_hashfunc(
74 unsigned int addr_size
,
83 key
= (unsigned long)localptr
;
84 /* We just want to add up the __be32 values, so forcing the
85 * cast should be okay.
90 key
+= (cid
& RXRPC_CIDMASK
) >> RXRPC_CIDSHIFT
;
91 key
+= cid
& RXRPC_CHANNELMASK
;
94 /* Step through the peer address in 16-bit portions for speed */
95 for (i
= 0, p
= (const u16
*)peer_addr
; i
< addr_size
>> 1; i
++, p
++)
97 _leave(" key = 0x%lx", key
);
102 * Add a call to the hashtable
104 static void rxrpc_call_hash_add(struct rxrpc_call
*call
)
107 unsigned int addr_size
= 0;
110 switch (call
->proto
) {
112 addr_size
= sizeof(call
->peer_ip
.ipv4_addr
);
115 addr_size
= sizeof(call
->peer_ip
.ipv6_addr
);
120 key
= rxrpc_call_hashfunc(call
->in_clientflag
, call
->cid
,
121 call
->call_id
, call
->epoch
,
122 call
->service_id
, call
->proto
,
123 call
->conn
->trans
->local
, addr_size
,
124 call
->peer_ip
.ipv6_addr
);
125 /* Store the full key in the call */
126 call
->hash_key
= key
;
127 spin_lock(&rxrpc_call_hash_lock
);
128 hash_add_rcu(rxrpc_call_hash
, &call
->hash_node
, key
);
129 spin_unlock(&rxrpc_call_hash_lock
);
134 * Remove a call from the hashtable
136 static void rxrpc_call_hash_del(struct rxrpc_call
*call
)
139 spin_lock(&rxrpc_call_hash_lock
);
140 hash_del_rcu(&call
->hash_node
);
141 spin_unlock(&rxrpc_call_hash_lock
);
146 * Find a call in the hashtable and return it, or NULL if it
149 struct rxrpc_call
*rxrpc_find_call_hash(
150 struct rxrpc_host_header
*hdr
,
153 const void *peer_addr
)
156 unsigned int addr_size
= 0;
157 struct rxrpc_call
*call
= NULL
;
158 struct rxrpc_call
*ret
= NULL
;
159 u8 in_clientflag
= hdr
->flags
& RXRPC_CLIENT_INITIATED
;
164 addr_size
= sizeof(call
->peer_ip
.ipv4_addr
);
167 addr_size
= sizeof(call
->peer_ip
.ipv6_addr
);
173 key
= rxrpc_call_hashfunc(in_clientflag
, hdr
->cid
, hdr
->callNumber
,
174 hdr
->epoch
, hdr
->serviceId
,
175 proto
, localptr
, addr_size
,
177 hash_for_each_possible_rcu(rxrpc_call_hash
, call
, hash_node
, key
) {
178 if (call
->hash_key
== key
&&
179 call
->call_id
== hdr
->callNumber
&&
180 call
->cid
== hdr
->cid
&&
181 call
->in_clientflag
== in_clientflag
&&
182 call
->service_id
== hdr
->serviceId
&&
183 call
->proto
== proto
&&
184 call
->local
== localptr
&&
185 memcmp(call
->peer_ip
.ipv6_addr
, peer_addr
,
187 call
->epoch
== hdr
->epoch
) {
192 _leave(" = %p", ret
);
197 * allocate a new call
199 static struct rxrpc_call
*rxrpc_alloc_call(gfp_t gfp
)
201 struct rxrpc_call
*call
;
203 call
= kmem_cache_zalloc(rxrpc_call_jar
, gfp
);
207 call
->acks_winsz
= 16;
208 call
->acks_window
= kmalloc(call
->acks_winsz
* sizeof(unsigned long),
210 if (!call
->acks_window
) {
211 kmem_cache_free(rxrpc_call_jar
, call
);
215 setup_timer(&call
->lifetimer
, &rxrpc_call_life_expired
,
216 (unsigned long) call
);
217 setup_timer(&call
->deadspan
, &rxrpc_dead_call_expired
,
218 (unsigned long) call
);
219 setup_timer(&call
->ack_timer
, &rxrpc_ack_time_expired
,
220 (unsigned long) call
);
221 setup_timer(&call
->resend_timer
, &rxrpc_resend_time_expired
,
222 (unsigned long) call
);
223 INIT_WORK(&call
->destroyer
, &rxrpc_destroy_call
);
224 INIT_WORK(&call
->processor
, &rxrpc_process_call
);
225 INIT_LIST_HEAD(&call
->accept_link
);
226 skb_queue_head_init(&call
->rx_queue
);
227 skb_queue_head_init(&call
->rx_oos_queue
);
228 init_waitqueue_head(&call
->tx_waitq
);
229 spin_lock_init(&call
->lock
);
230 rwlock_init(&call
->state_lock
);
231 atomic_set(&call
->usage
, 1);
232 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
233 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
235 memset(&call
->sock_node
, 0xed, sizeof(call
->sock_node
));
237 call
->rx_data_expect
= 1;
238 call
->rx_data_eaten
= 0;
239 call
->rx_first_oos
= 0;
240 call
->ackr_win_top
= call
->rx_data_eaten
+ 1 + rxrpc_rx_window_size
;
241 call
->creation_jif
= jiffies
;
246 * allocate a new client call and attempt to get a connection slot for it
248 static struct rxrpc_call
*rxrpc_alloc_client_call(
249 struct rxrpc_sock
*rx
,
250 struct rxrpc_transport
*trans
,
251 struct rxrpc_conn_bundle
*bundle
,
254 struct rxrpc_call
*call
;
260 ASSERT(trans
!= NULL
);
261 ASSERT(bundle
!= NULL
);
263 call
= rxrpc_alloc_call(gfp
);
265 return ERR_PTR(-ENOMEM
);
269 call
->rx_data_post
= 1;
271 ret
= rxrpc_connect_call(rx
, trans
, bundle
, call
, gfp
);
273 kmem_cache_free(rxrpc_call_jar
, call
);
277 /* Record copies of information for hashtable lookup */
278 call
->proto
= rx
->proto
;
279 call
->local
= trans
->local
;
280 switch (call
->proto
) {
282 call
->peer_ip
.ipv4_addr
=
283 trans
->peer
->srx
.transport
.sin
.sin_addr
.s_addr
;
286 memcpy(call
->peer_ip
.ipv6_addr
,
287 trans
->peer
->srx
.transport
.sin6
.sin6_addr
.in6_u
.u6_addr8
,
288 sizeof(call
->peer_ip
.ipv6_addr
));
291 call
->epoch
= call
->conn
->epoch
;
292 call
->service_id
= call
->conn
->service_id
;
293 call
->in_clientflag
= call
->conn
->in_clientflag
;
294 /* Add the new call to the hashtable */
295 rxrpc_call_hash_add(call
);
297 spin_lock(&call
->conn
->trans
->peer
->lock
);
298 list_add(&call
->error_link
, &call
->conn
->trans
->peer
->error_targets
);
299 spin_unlock(&call
->conn
->trans
->peer
->lock
);
301 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
302 add_timer(&call
->lifetimer
);
304 _leave(" = %p", call
);
309 * set up a call for the given data
310 * - called in process context with IRQs enabled
312 struct rxrpc_call
*rxrpc_get_client_call(struct rxrpc_sock
*rx
,
313 struct rxrpc_transport
*trans
,
314 struct rxrpc_conn_bundle
*bundle
,
315 unsigned long user_call_ID
,
319 struct rxrpc_call
*call
, *candidate
;
320 struct rb_node
*p
, *parent
, **pp
;
322 _enter("%p,%d,%d,%lx,%d",
323 rx
, trans
? trans
->debug_id
: -1, bundle
? bundle
->debug_id
: -1,
324 user_call_ID
, create
);
326 /* search the extant calls first for one that matches the specified
328 read_lock(&rx
->call_lock
);
330 p
= rx
->calls
.rb_node
;
332 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
334 if (user_call_ID
< call
->user_call_ID
)
336 else if (user_call_ID
> call
->user_call_ID
)
339 goto found_extant_call
;
342 read_unlock(&rx
->call_lock
);
344 if (!create
|| !trans
)
345 return ERR_PTR(-EBADSLT
);
347 /* not yet present - create a candidate for a new record and then
349 candidate
= rxrpc_alloc_client_call(rx
, trans
, bundle
, gfp
);
350 if (IS_ERR(candidate
)) {
351 _leave(" = %ld", PTR_ERR(candidate
));
355 candidate
->user_call_ID
= user_call_ID
;
356 __set_bit(RXRPC_CALL_HAS_USERID
, &candidate
->flags
);
358 write_lock(&rx
->call_lock
);
360 pp
= &rx
->calls
.rb_node
;
364 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
366 if (user_call_ID
< call
->user_call_ID
)
367 pp
= &(*pp
)->rb_left
;
368 else if (user_call_ID
> call
->user_call_ID
)
369 pp
= &(*pp
)->rb_right
;
371 goto found_extant_second
;
374 /* second search also failed; add the new call */
377 rxrpc_get_call(call
);
379 rb_link_node(&call
->sock_node
, parent
, pp
);
380 rb_insert_color(&call
->sock_node
, &rx
->calls
);
381 write_unlock(&rx
->call_lock
);
383 write_lock_bh(&rxrpc_call_lock
);
384 list_add_tail(&call
->link
, &rxrpc_calls
);
385 write_unlock_bh(&rxrpc_call_lock
);
387 _net("CALL new %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
389 _leave(" = %p [new]", call
);
392 /* we found the call in the list immediately */
394 rxrpc_get_call(call
);
395 read_unlock(&rx
->call_lock
);
396 _leave(" = %p [extant %d]", call
, atomic_read(&call
->usage
));
399 /* we found the call on the second time through the list */
401 rxrpc_get_call(call
);
402 write_unlock(&rx
->call_lock
);
403 rxrpc_put_call(candidate
);
404 _leave(" = %p [second %d]", call
, atomic_read(&call
->usage
));
409 * set up an incoming call
410 * - called in process context with IRQs enabled
412 struct rxrpc_call
*rxrpc_incoming_call(struct rxrpc_sock
*rx
,
413 struct rxrpc_connection
*conn
,
414 struct rxrpc_host_header
*hdr
)
416 struct rxrpc_call
*call
, *candidate
;
417 struct rb_node
**p
, *parent
;
420 _enter(",%d", conn
->debug_id
);
424 candidate
= rxrpc_alloc_call(GFP_NOIO
);
426 return ERR_PTR(-EBUSY
);
428 candidate
->socket
= rx
;
429 candidate
->conn
= conn
;
430 candidate
->cid
= hdr
->cid
;
431 candidate
->call_id
= hdr
->callNumber
;
432 candidate
->channel
= hdr
->cid
& RXRPC_CHANNELMASK
;
433 candidate
->rx_data_post
= 0;
434 candidate
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
435 if (conn
->security_ix
> 0)
436 candidate
->state
= RXRPC_CALL_SERVER_SECURING
;
438 write_lock_bh(&conn
->lock
);
440 /* set the channel for this call */
441 call
= conn
->channels
[candidate
->channel
];
442 _debug("channel[%u] is %p", candidate
->channel
, call
);
443 if (call
&& call
->call_id
== hdr
->callNumber
) {
444 /* already set; must've been a duplicate packet */
445 _debug("extant call [%d]", call
->state
);
446 ASSERTCMP(call
->conn
, ==, conn
);
448 read_lock(&call
->state_lock
);
449 switch (call
->state
) {
450 case RXRPC_CALL_LOCALLY_ABORTED
:
451 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
452 rxrpc_queue_call(call
);
453 case RXRPC_CALL_REMOTELY_ABORTED
:
454 read_unlock(&call
->state_lock
);
457 rxrpc_get_call(call
);
458 read_unlock(&call
->state_lock
);
464 /* it seems the channel is still in use from the previous call
465 * - ditch the old binding if its call is now complete */
466 _debug("CALL: %u { %s }",
467 call
->debug_id
, rxrpc_call_states
[call
->state
]);
469 if (call
->state
>= RXRPC_CALL_COMPLETE
) {
470 conn
->channels
[call
->channel
] = NULL
;
472 write_unlock_bh(&conn
->lock
);
473 kmem_cache_free(rxrpc_call_jar
, candidate
);
475 return ERR_PTR(-EBUSY
);
479 /* check the call number isn't duplicate */
481 call_id
= hdr
->callNumber
;
482 p
= &conn
->calls
.rb_node
;
486 call
= rb_entry(parent
, struct rxrpc_call
, conn_node
);
488 /* The tree is sorted in order of the __be32 value without
489 * turning it into host order.
491 if (call_id
< call
->call_id
)
493 else if (call_id
> call
->call_id
)
499 /* make the call available */
503 rb_link_node(&call
->conn_node
, parent
, p
);
504 rb_insert_color(&call
->conn_node
, &conn
->calls
);
505 conn
->channels
[call
->channel
] = call
;
507 atomic_inc(&conn
->usage
);
508 write_unlock_bh(&conn
->lock
);
510 spin_lock(&conn
->trans
->peer
->lock
);
511 list_add(&call
->error_link
, &conn
->trans
->peer
->error_targets
);
512 spin_unlock(&conn
->trans
->peer
->lock
);
514 write_lock_bh(&rxrpc_call_lock
);
515 list_add_tail(&call
->link
, &rxrpc_calls
);
516 write_unlock_bh(&rxrpc_call_lock
);
518 /* Record copies of information for hashtable lookup */
519 call
->proto
= rx
->proto
;
520 call
->local
= conn
->trans
->local
;
521 switch (call
->proto
) {
523 call
->peer_ip
.ipv4_addr
=
524 conn
->trans
->peer
->srx
.transport
.sin
.sin_addr
.s_addr
;
527 memcpy(call
->peer_ip
.ipv6_addr
,
528 conn
->trans
->peer
->srx
.transport
.sin6
.sin6_addr
.in6_u
.u6_addr8
,
529 sizeof(call
->peer_ip
.ipv6_addr
));
534 call
->epoch
= conn
->epoch
;
535 call
->service_id
= conn
->service_id
;
536 call
->in_clientflag
= conn
->in_clientflag
;
537 /* Add the new call to the hashtable */
538 rxrpc_call_hash_add(call
);
540 _net("CALL incoming %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
542 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
543 add_timer(&call
->lifetimer
);
544 _leave(" = %p {%d} [new]", call
, call
->debug_id
);
548 write_unlock_bh(&conn
->lock
);
549 kmem_cache_free(rxrpc_call_jar
, candidate
);
550 _leave(" = %p {%d} [extant]", call
, call
? call
->debug_id
: -1);
554 write_unlock_bh(&conn
->lock
);
555 kmem_cache_free(rxrpc_call_jar
, candidate
);
556 _leave(" = -ECONNABORTED");
557 return ERR_PTR(-ECONNABORTED
);
560 write_unlock_bh(&conn
->lock
);
561 kmem_cache_free(rxrpc_call_jar
, candidate
);
562 _leave(" = -ECONNRESET [old]");
563 return ERR_PTR(-ECONNRESET
);
567 * find an extant server call
568 * - called in process context with IRQs enabled
570 struct rxrpc_call
*rxrpc_find_server_call(struct rxrpc_sock
*rx
,
571 unsigned long user_call_ID
)
573 struct rxrpc_call
*call
;
576 _enter("%p,%lx", rx
, user_call_ID
);
578 /* search the extant calls for one that matches the specified user
580 read_lock(&rx
->call_lock
);
582 p
= rx
->calls
.rb_node
;
584 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
586 if (user_call_ID
< call
->user_call_ID
)
588 else if (user_call_ID
> call
->user_call_ID
)
591 goto found_extant_call
;
594 read_unlock(&rx
->call_lock
);
598 /* we found the call in the list immediately */
600 rxrpc_get_call(call
);
601 read_unlock(&rx
->call_lock
);
602 _leave(" = %p [%d]", call
, atomic_read(&call
->usage
));
607 * detach a call from a socket and set up for release
609 void rxrpc_release_call(struct rxrpc_call
*call
)
611 struct rxrpc_connection
*conn
= call
->conn
;
612 struct rxrpc_sock
*rx
= call
->socket
;
614 _enter("{%d,%d,%d,%d}",
615 call
->debug_id
, atomic_read(&call
->usage
),
616 atomic_read(&call
->ackr_not_idle
),
619 spin_lock_bh(&call
->lock
);
620 if (test_and_set_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
622 spin_unlock_bh(&call
->lock
);
624 /* dissociate from the socket
625 * - the socket's ref on the call is passed to the death timer
627 _debug("RELEASE CALL %p (%d CONN %p)", call
, call
->debug_id
, conn
);
629 write_lock_bh(&rx
->call_lock
);
630 if (!list_empty(&call
->accept_link
)) {
631 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
632 call
, call
->events
, call
->flags
);
633 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
));
634 list_del_init(&call
->accept_link
);
635 sk_acceptq_removed(&rx
->sk
);
636 } else if (test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
637 rb_erase(&call
->sock_node
, &rx
->calls
);
638 memset(&call
->sock_node
, 0xdd, sizeof(call
->sock_node
));
639 clear_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
641 write_unlock_bh(&rx
->call_lock
);
643 /* free up the channel for reuse */
644 spin_lock(&conn
->trans
->client_lock
);
645 write_lock_bh(&conn
->lock
);
646 write_lock(&call
->state_lock
);
648 if (conn
->channels
[call
->channel
] == call
)
649 conn
->channels
[call
->channel
] = NULL
;
651 if (conn
->out_clientflag
&& conn
->bundle
) {
653 switch (conn
->avail_calls
) {
655 list_move_tail(&conn
->bundle_link
,
656 &conn
->bundle
->avail_conns
);
657 case 2 ... RXRPC_MAXCALLS
- 1:
658 ASSERT(conn
->channels
[0] == NULL
||
659 conn
->channels
[1] == NULL
||
660 conn
->channels
[2] == NULL
||
661 conn
->channels
[3] == NULL
);
664 list_move_tail(&conn
->bundle_link
,
665 &conn
->bundle
->unused_conns
);
666 ASSERT(conn
->channels
[0] == NULL
&&
667 conn
->channels
[1] == NULL
&&
668 conn
->channels
[2] == NULL
&&
669 conn
->channels
[3] == NULL
);
672 printk(KERN_ERR
"RxRPC: conn->avail_calls=%d\n",
678 spin_unlock(&conn
->trans
->client_lock
);
680 if (call
->state
< RXRPC_CALL_COMPLETE
&&
681 call
->state
!= RXRPC_CALL_CLIENT_FINAL_ACK
) {
682 _debug("+++ ABORTING STATE %d +++\n", call
->state
);
683 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
684 call
->local_abort
= RX_CALL_DEAD
;
685 set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
);
686 rxrpc_queue_call(call
);
688 write_unlock(&call
->state_lock
);
689 write_unlock_bh(&conn
->lock
);
691 /* clean up the Rx queue */
692 if (!skb_queue_empty(&call
->rx_queue
) ||
693 !skb_queue_empty(&call
->rx_oos_queue
)) {
694 struct rxrpc_skb_priv
*sp
;
697 _debug("purge Rx queues");
699 spin_lock_bh(&call
->lock
);
700 while ((skb
= skb_dequeue(&call
->rx_queue
)) ||
701 (skb
= skb_dequeue(&call
->rx_oos_queue
))) {
704 ASSERTCMP(sp
->call
, ==, call
);
705 rxrpc_put_call(call
);
708 skb
->destructor
= NULL
;
709 spin_unlock_bh(&call
->lock
);
711 _debug("- zap %s %%%u #%u",
712 rxrpc_pkts
[sp
->hdr
.type
],
713 sp
->hdr
.serial
, sp
->hdr
.seq
);
715 spin_lock_bh(&call
->lock
);
717 spin_unlock_bh(&call
->lock
);
719 ASSERTCMP(call
->state
, !=, RXRPC_CALL_COMPLETE
);
722 del_timer_sync(&call
->resend_timer
);
723 del_timer_sync(&call
->ack_timer
);
724 del_timer_sync(&call
->lifetimer
);
725 call
->deadspan
.expires
= jiffies
+ rxrpc_dead_call_expiry
;
726 add_timer(&call
->deadspan
);
732 * handle a dead call being ready for reaping
734 static void rxrpc_dead_call_expired(unsigned long _call
)
736 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
738 _enter("{%d}", call
->debug_id
);
740 write_lock_bh(&call
->state_lock
);
741 call
->state
= RXRPC_CALL_DEAD
;
742 write_unlock_bh(&call
->state_lock
);
743 rxrpc_put_call(call
);
747 * mark a call as to be released, aborting it if it's still in progress
748 * - called with softirqs disabled
750 static void rxrpc_mark_call_released(struct rxrpc_call
*call
)
754 write_lock(&call
->state_lock
);
755 if (call
->state
< RXRPC_CALL_DEAD
) {
757 if (call
->state
< RXRPC_CALL_COMPLETE
) {
758 _debug("abort call %p", call
);
759 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
760 call
->local_abort
= RX_CALL_DEAD
;
761 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
764 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
))
767 rxrpc_queue_call(call
);
769 write_unlock(&call
->state_lock
);
773 * release all the calls associated with a socket
775 void rxrpc_release_calls_on_socket(struct rxrpc_sock
*rx
)
777 struct rxrpc_call
*call
;
782 read_lock_bh(&rx
->call_lock
);
784 /* mark all the calls as no longer wanting incoming packets */
785 for (p
= rb_first(&rx
->calls
); p
; p
= rb_next(p
)) {
786 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
787 rxrpc_mark_call_released(call
);
790 /* kill the not-yet-accepted incoming calls */
791 list_for_each_entry(call
, &rx
->secureq
, accept_link
) {
792 rxrpc_mark_call_released(call
);
795 list_for_each_entry(call
, &rx
->acceptq
, accept_link
) {
796 rxrpc_mark_call_released(call
);
799 read_unlock_bh(&rx
->call_lock
);
806 void __rxrpc_put_call(struct rxrpc_call
*call
)
808 ASSERT(call
!= NULL
);
810 _enter("%p{u=%d}", call
, atomic_read(&call
->usage
));
812 ASSERTCMP(atomic_read(&call
->usage
), >, 0);
814 if (atomic_dec_and_test(&call
->usage
)) {
815 _debug("call %d dead", call
->debug_id
);
816 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
817 rxrpc_queue_work(&call
->destroyer
);
825 static void rxrpc_cleanup_call(struct rxrpc_call
*call
)
827 _net("DESTROY CALL %d", call
->debug_id
);
829 ASSERT(call
->socket
);
831 memset(&call
->sock_node
, 0xcd, sizeof(call
->sock_node
));
833 del_timer_sync(&call
->lifetimer
);
834 del_timer_sync(&call
->deadspan
);
835 del_timer_sync(&call
->ack_timer
);
836 del_timer_sync(&call
->resend_timer
);
838 ASSERT(test_bit(RXRPC_CALL_RELEASED
, &call
->flags
));
839 ASSERTCMP(call
->events
, ==, 0);
840 if (work_pending(&call
->processor
)) {
841 _debug("defer destroy");
842 rxrpc_queue_work(&call
->destroyer
);
847 spin_lock(&call
->conn
->trans
->peer
->lock
);
848 list_del(&call
->error_link
);
849 spin_unlock(&call
->conn
->trans
->peer
->lock
);
851 write_lock_bh(&call
->conn
->lock
);
852 rb_erase(&call
->conn_node
, &call
->conn
->calls
);
853 write_unlock_bh(&call
->conn
->lock
);
854 rxrpc_put_connection(call
->conn
);
857 /* Remove the call from the hash */
858 rxrpc_call_hash_del(call
);
860 if (call
->acks_window
) {
861 _debug("kill Tx window %d",
862 CIRC_CNT(call
->acks_head
, call
->acks_tail
,
865 while (CIRC_CNT(call
->acks_head
, call
->acks_tail
,
866 call
->acks_winsz
) > 0) {
867 struct rxrpc_skb_priv
*sp
;
870 _skb
= call
->acks_window
[call
->acks_tail
] & ~1;
871 sp
= rxrpc_skb((struct sk_buff
*)_skb
);
872 _debug("+++ clear Tx %u", sp
->hdr
.seq
);
873 rxrpc_free_skb((struct sk_buff
*)_skb
);
875 (call
->acks_tail
+ 1) & (call
->acks_winsz
- 1);
878 kfree(call
->acks_window
);
881 rxrpc_free_skb(call
->tx_pending
);
883 rxrpc_purge_queue(&call
->rx_queue
);
884 ASSERT(skb_queue_empty(&call
->rx_oos_queue
));
885 sock_put(&call
->socket
->sk
);
886 kmem_cache_free(rxrpc_call_jar
, call
);
892 static void rxrpc_destroy_call(struct work_struct
*work
)
894 struct rxrpc_call
*call
=
895 container_of(work
, struct rxrpc_call
, destroyer
);
897 _enter("%p{%d,%d,%p}",
898 call
, atomic_read(&call
->usage
), call
->channel
, call
->conn
);
900 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
902 write_lock_bh(&rxrpc_call_lock
);
903 list_del_init(&call
->link
);
904 write_unlock_bh(&rxrpc_call_lock
);
906 rxrpc_cleanup_call(call
);
911 * preemptively destroy all the call records from a transport endpoint rather
912 * than waiting for them to time out
914 void __exit
rxrpc_destroy_all_calls(void)
916 struct rxrpc_call
*call
;
919 write_lock_bh(&rxrpc_call_lock
);
921 while (!list_empty(&rxrpc_calls
)) {
922 call
= list_entry(rxrpc_calls
.next
, struct rxrpc_call
, link
);
923 _debug("Zapping call %p", call
);
925 list_del_init(&call
->link
);
927 switch (atomic_read(&call
->usage
)) {
929 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
932 if (del_timer_sync(&call
->deadspan
) != 0 &&
933 call
->state
!= RXRPC_CALL_DEAD
)
934 rxrpc_dead_call_expired((unsigned long) call
);
935 if (call
->state
!= RXRPC_CALL_DEAD
)
938 printk(KERN_ERR
"RXRPC:"
939 " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
940 call
, atomic_read(&call
->usage
),
941 atomic_read(&call
->ackr_not_idle
),
942 rxrpc_call_states
[call
->state
],
943 call
->flags
, call
->events
);
944 if (!skb_queue_empty(&call
->rx_queue
))
945 printk(KERN_ERR
"RXRPC: Rx queue occupied\n");
946 if (!skb_queue_empty(&call
->rx_oos_queue
))
947 printk(KERN_ERR
"RXRPC: OOS queue occupied\n");
951 write_unlock_bh(&rxrpc_call_lock
);
953 write_lock_bh(&rxrpc_call_lock
);
956 write_unlock_bh(&rxrpc_call_lock
);
961 * handle call lifetime being exceeded
963 static void rxrpc_call_life_expired(unsigned long _call
)
965 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
967 if (call
->state
>= RXRPC_CALL_COMPLETE
)
970 _enter("{%d}", call
->debug_id
);
971 read_lock_bh(&call
->state_lock
);
972 if (call
->state
< RXRPC_CALL_COMPLETE
) {
973 set_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
);
974 rxrpc_queue_call(call
);
976 read_unlock_bh(&call
->state_lock
);
980 * handle resend timer expiry
981 * - may not take call->state_lock as this can deadlock against del_timer_sync()
983 static void rxrpc_resend_time_expired(unsigned long _call
)
985 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
987 _enter("{%d}", call
->debug_id
);
989 if (call
->state
>= RXRPC_CALL_COMPLETE
)
992 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
993 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
))
994 rxrpc_queue_call(call
);
998 * handle ACK timer expiry
1000 static void rxrpc_ack_time_expired(unsigned long _call
)
1002 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
1004 _enter("{%d}", call
->debug_id
);
1006 if (call
->state
>= RXRPC_CALL_COMPLETE
)
1009 read_lock_bh(&call
->state_lock
);
1010 if (call
->state
< RXRPC_CALL_COMPLETE
&&
1011 !test_and_set_bit(RXRPC_CALL_EV_ACK
, &call
->events
))
1012 rxrpc_queue_call(call
);
1013 read_unlock_bh(&call
->state_lock
);