[PATCH] aic7xxx_osm build fix
[cris-mirror.git] / net / rxrpc / connection.c
blob61463c74f8cc8998ad727fe1adf9fa3ae4af9d37
1 /* connection.c: Rx connection routines
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <rxrpc/rxrpc.h>
16 #include <rxrpc/transport.h>
17 #include <rxrpc/peer.h>
18 #include <rxrpc/connection.h>
19 #include <rxrpc/call.h>
20 #include <rxrpc/message.h>
21 #include <linux/udp.h>
22 #include <linux/ip.h>
23 #include <net/sock.h>
24 #include <asm/uaccess.h>
25 #include "internal.h"
27 __RXACCT_DECL(atomic_t rxrpc_connection_count);
29 LIST_HEAD(rxrpc_conns);
30 DECLARE_RWSEM(rxrpc_conns_sem);
31 unsigned long rxrpc_conn_timeout = 60 * 60;
33 static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
35 static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
37 struct rxrpc_connection *conn =
38 list_entry(timer, struct rxrpc_connection, timeout);
40 _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
42 rxrpc_conn_do_timeout(conn);
45 static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
46 .timed_out = __rxrpc_conn_timeout,
49 /*****************************************************************************/
51 * create a new connection record
53 static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
54 struct rxrpc_connection **_conn)
56 struct rxrpc_connection *conn;
58 _enter("%p",peer);
60 /* allocate and initialise a connection record */
61 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
62 if (!conn) {
63 _leave(" = -ENOMEM");
64 return -ENOMEM;
67 memset(conn, 0, sizeof(struct rxrpc_connection));
68 atomic_set(&conn->usage, 1);
70 INIT_LIST_HEAD(&conn->link);
71 INIT_LIST_HEAD(&conn->id_link);
72 init_waitqueue_head(&conn->chanwait);
73 spin_lock_init(&conn->lock);
74 rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
76 do_gettimeofday(&conn->atime);
77 conn->mtu_size = 1024;
78 conn->peer = peer;
79 conn->trans = peer->trans;
81 __RXACCT(atomic_inc(&rxrpc_connection_count));
82 *_conn = conn;
83 _leave(" = 0 (%p)", conn);
85 return 0;
86 } /* end __rxrpc_create_connection() */
88 /*****************************************************************************/
90 * create a new connection record for outgoing connections
92 int rxrpc_create_connection(struct rxrpc_transport *trans,
93 __be16 port,
94 __be32 addr,
95 uint16_t service_id,
96 void *security,
97 struct rxrpc_connection **_conn)
99 struct rxrpc_connection *candidate, *conn;
100 struct rxrpc_peer *peer;
101 struct list_head *_p;
102 __be32 connid;
103 int ret;
105 _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
107 /* get a peer record */
108 ret = rxrpc_peer_lookup(trans, addr, &peer);
109 if (ret < 0) {
110 _leave(" = %d", ret);
111 return ret;
114 /* allocate and initialise a connection record */
115 ret = __rxrpc_create_connection(peer, &candidate);
116 if (ret < 0) {
117 rxrpc_put_peer(peer);
118 _leave(" = %d", ret);
119 return ret;
122 /* fill in the specific bits */
123 candidate->addr.sin_family = AF_INET;
124 candidate->addr.sin_port = port;
125 candidate->addr.sin_addr.s_addr = addr;
127 candidate->in_epoch = rxrpc_epoch;
128 candidate->out_epoch = rxrpc_epoch;
129 candidate->in_clientflag = 0;
130 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
131 candidate->service_id = htons(service_id);
133 /* invent a unique connection ID */
134 write_lock(&peer->conn_idlock);
136 try_next_id:
137 connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
138 peer->conn_idcounter += RXRPC_MAXCALLS;
140 list_for_each(_p, &peer->conn_idlist) {
141 conn = list_entry(_p, struct rxrpc_connection, id_link);
142 if (connid == conn->conn_id)
143 goto try_next_id;
144 if (connid > conn->conn_id)
145 break;
148 _debug("selected candidate conn ID %x.%u",
149 ntohl(peer->addr.s_addr), ntohl(connid));
151 candidate->conn_id = connid;
152 list_add_tail(&candidate->id_link, _p);
154 write_unlock(&peer->conn_idlock);
156 /* attach to peer */
157 candidate->peer = peer;
159 write_lock(&peer->conn_lock);
161 /* search the peer's transport graveyard list */
162 spin_lock(&peer->conn_gylock);
163 list_for_each(_p, &peer->conn_graveyard) {
164 conn = list_entry(_p, struct rxrpc_connection, link);
165 if (conn->addr.sin_port == candidate->addr.sin_port &&
166 conn->security_ix == candidate->security_ix &&
167 conn->service_id == candidate->service_id &&
168 conn->in_clientflag == 0)
169 goto found_in_graveyard;
171 spin_unlock(&peer->conn_gylock);
173 /* pick the new candidate */
174 _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id));
175 atomic_inc(&peer->conn_count);
176 conn = candidate;
177 candidate = NULL;
179 make_active:
180 list_add_tail(&conn->link, &peer->conn_active);
181 write_unlock(&peer->conn_lock);
183 if (candidate) {
184 write_lock(&peer->conn_idlock);
185 list_del(&candidate->id_link);
186 write_unlock(&peer->conn_idlock);
188 __RXACCT(atomic_dec(&rxrpc_connection_count));
189 kfree(candidate);
191 else {
192 down_write(&rxrpc_conns_sem);
193 list_add_tail(&conn->proc_link, &rxrpc_conns);
194 up_write(&rxrpc_conns_sem);
197 *_conn = conn;
198 _leave(" = 0 (%p)", conn);
200 return 0;
202 /* handle resurrecting a connection from the graveyard */
203 found_in_graveyard:
204 _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id));
205 rxrpc_get_connection(conn);
206 rxrpc_krxtimod_del_timer(&conn->timeout);
207 list_del_init(&conn->link);
208 spin_unlock(&peer->conn_gylock);
209 goto make_active;
210 } /* end rxrpc_create_connection() */
212 /*****************************************************************************/
214 * lookup the connection for an incoming packet
215 * - create a new connection record for unrecorded incoming connections
217 int rxrpc_connection_lookup(struct rxrpc_peer *peer,
218 struct rxrpc_message *msg,
219 struct rxrpc_connection **_conn)
221 struct rxrpc_connection *conn, *candidate = NULL;
222 struct list_head *_p;
223 int ret, fresh = 0;
224 __be32 x_epoch, x_connid;
225 __be16 x_port, x_servid;
226 __u32 x_secix;
227 u8 x_clflag;
229 _enter("%p{{%hu}},%u,%hu",
230 peer,
231 peer->trans->port,
232 ntohs(msg->pkt->h.uh->source),
233 ntohs(msg->hdr.serviceId));
235 x_port = msg->pkt->h.uh->source;
236 x_epoch = msg->hdr.epoch;
237 x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
238 x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
239 x_servid = msg->hdr.serviceId;
240 x_secix = msg->hdr.securityIndex;
242 /* [common case] search the transport's active list first */
243 read_lock(&peer->conn_lock);
244 list_for_each(_p, &peer->conn_active) {
245 conn = list_entry(_p, struct rxrpc_connection, link);
246 if (conn->addr.sin_port == x_port &&
247 conn->in_epoch == x_epoch &&
248 conn->conn_id == x_connid &&
249 conn->security_ix == x_secix &&
250 conn->service_id == x_servid &&
251 conn->in_clientflag == x_clflag)
252 goto found_active;
254 read_unlock(&peer->conn_lock);
256 /* [uncommon case] not active
257 * - create a candidate for a new record if an inbound connection
258 * - only examine the graveyard for an outbound connection
260 if (x_clflag) {
261 ret = __rxrpc_create_connection(peer, &candidate);
262 if (ret < 0) {
263 _leave(" = %d", ret);
264 return ret;
267 /* fill in the specifics */
268 candidate->addr.sin_family = AF_INET;
269 candidate->addr.sin_port = x_port;
270 candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
271 candidate->in_epoch = x_epoch;
272 candidate->out_epoch = x_epoch;
273 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
274 candidate->out_clientflag = 0;
275 candidate->conn_id = x_connid;
276 candidate->service_id = x_servid;
277 candidate->security_ix = x_secix;
280 /* search the active list again, just in case it appeared whilst we
281 * were busy */
282 write_lock(&peer->conn_lock);
283 list_for_each(_p, &peer->conn_active) {
284 conn = list_entry(_p, struct rxrpc_connection, link);
285 if (conn->addr.sin_port == x_port &&
286 conn->in_epoch == x_epoch &&
287 conn->conn_id == x_connid &&
288 conn->security_ix == x_secix &&
289 conn->service_id == x_servid &&
290 conn->in_clientflag == x_clflag)
291 goto found_active_second_chance;
294 /* search the transport's graveyard list */
295 spin_lock(&peer->conn_gylock);
296 list_for_each(_p, &peer->conn_graveyard) {
297 conn = list_entry(_p, struct rxrpc_connection, link);
298 if (conn->addr.sin_port == x_port &&
299 conn->in_epoch == x_epoch &&
300 conn->conn_id == x_connid &&
301 conn->security_ix == x_secix &&
302 conn->service_id == x_servid &&
303 conn->in_clientflag == x_clflag)
304 goto found_in_graveyard;
306 spin_unlock(&peer->conn_gylock);
308 /* outbound connections aren't created here */
309 if (!x_clflag) {
310 write_unlock(&peer->conn_lock);
311 _leave(" = -ENOENT");
312 return -ENOENT;
315 /* we can now add the new candidate to the list */
316 _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id));
317 rxrpc_get_peer(peer);
318 conn = candidate;
319 candidate = NULL;
320 atomic_inc(&peer->conn_count);
321 fresh = 1;
323 make_active:
324 list_add_tail(&conn->link, &peer->conn_active);
326 success_uwfree:
327 write_unlock(&peer->conn_lock);
329 if (candidate) {
330 write_lock(&peer->conn_idlock);
331 list_del(&candidate->id_link);
332 write_unlock(&peer->conn_idlock);
334 __RXACCT(atomic_dec(&rxrpc_connection_count));
335 kfree(candidate);
338 if (fresh) {
339 down_write(&rxrpc_conns_sem);
340 list_add_tail(&conn->proc_link, &rxrpc_conns);
341 up_write(&rxrpc_conns_sem);
344 success:
345 *_conn = conn;
346 _leave(" = 0 (%p)", conn);
347 return 0;
349 /* handle the connection being found in the active list straight off */
350 found_active:
351 rxrpc_get_connection(conn);
352 read_unlock(&peer->conn_lock);
353 goto success;
355 /* handle resurrecting a connection from the graveyard */
356 found_in_graveyard:
357 _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id));
358 rxrpc_get_peer(peer);
359 rxrpc_get_connection(conn);
360 rxrpc_krxtimod_del_timer(&conn->timeout);
361 list_del_init(&conn->link);
362 spin_unlock(&peer->conn_gylock);
363 goto make_active;
365 /* handle finding the connection on the second time through the active
366 * list */
367 found_active_second_chance:
368 rxrpc_get_connection(conn);
369 goto success_uwfree;
371 } /* end rxrpc_connection_lookup() */
373 /*****************************************************************************/
375 * finish using a connection record
376 * - it will be transferred to the peer's connection graveyard when refcount
377 * reaches 0
379 void rxrpc_put_connection(struct rxrpc_connection *conn)
381 struct rxrpc_peer *peer;
383 if (!conn)
384 return;
386 _enter("%p{u=%d p=%hu}",
387 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
389 peer = conn->peer;
390 spin_lock(&peer->conn_gylock);
392 /* sanity check */
393 if (atomic_read(&conn->usage) <= 0)
394 BUG();
396 if (likely(!atomic_dec_and_test(&conn->usage))) {
397 spin_unlock(&peer->conn_gylock);
398 _leave("");
399 return;
402 /* move to graveyard queue */
403 _debug("burying connection: {%08x}", ntohl(conn->conn_id));
404 list_del(&conn->link);
405 list_add_tail(&conn->link, &peer->conn_graveyard);
407 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
409 spin_unlock(&peer->conn_gylock);
411 rxrpc_put_peer(conn->peer);
413 _leave(" [killed]");
414 } /* end rxrpc_put_connection() */
416 /*****************************************************************************/
418 * free a connection record
420 static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
422 struct rxrpc_peer *peer;
424 _enter("%p{u=%d p=%hu}",
425 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
427 peer = conn->peer;
429 if (atomic_read(&conn->usage) < 0)
430 BUG();
432 /* remove from graveyard if still dead */
433 spin_lock(&peer->conn_gylock);
434 if (atomic_read(&conn->usage) == 0) {
435 list_del_init(&conn->link);
437 else {
438 conn = NULL;
440 spin_unlock(&peer->conn_gylock);
442 if (!conn) {
443 _leave("");
444 return; /* resurrected */
447 _debug("--- Destroying Connection %p{%08x} ---",
448 conn, ntohl(conn->conn_id));
450 down_write(&rxrpc_conns_sem);
451 list_del(&conn->proc_link);
452 up_write(&rxrpc_conns_sem);
454 write_lock(&peer->conn_idlock);
455 list_del(&conn->id_link);
456 write_unlock(&peer->conn_idlock);
458 __RXACCT(atomic_dec(&rxrpc_connection_count));
459 kfree(conn);
461 /* if the graveyard is now empty, wake up anyone waiting for that */
462 if (atomic_dec_and_test(&peer->conn_count))
463 wake_up(&peer->conn_gy_waitq);
465 _leave(" [destroyed]");
466 } /* end rxrpc_conn_do_timeout() */
468 /*****************************************************************************/
470 * clear all connection records from a peer endpoint
472 void rxrpc_conn_clearall(struct rxrpc_peer *peer)
474 DECLARE_WAITQUEUE(myself, current);
476 struct rxrpc_connection *conn;
477 int err;
479 _enter("%p", peer);
481 /* there shouldn't be any active conns remaining */
482 if (!list_empty(&peer->conn_active))
483 BUG();
485 /* manually timeout all conns in the graveyard */
486 spin_lock(&peer->conn_gylock);
487 while (!list_empty(&peer->conn_graveyard)) {
488 conn = list_entry(peer->conn_graveyard.next,
489 struct rxrpc_connection, link);
490 err = rxrpc_krxtimod_del_timer(&conn->timeout);
491 spin_unlock(&peer->conn_gylock);
493 if (err == 0)
494 rxrpc_conn_do_timeout(conn);
496 spin_lock(&peer->conn_gylock);
498 spin_unlock(&peer->conn_gylock);
500 /* wait for the the conn graveyard to be completely cleared */
501 set_current_state(TASK_UNINTERRUPTIBLE);
502 add_wait_queue(&peer->conn_gy_waitq, &myself);
504 while (atomic_read(&peer->conn_count) != 0) {
505 schedule();
506 set_current_state(TASK_UNINTERRUPTIBLE);
509 remove_wait_queue(&peer->conn_gy_waitq, &myself);
510 set_current_state(TASK_RUNNING);
512 _leave("");
513 } /* end rxrpc_conn_clearall() */
515 /*****************************************************************************/
517 * allocate and prepare a message for sending out through the transport
518 * endpoint
520 int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
521 struct rxrpc_call *call,
522 uint8_t type,
523 int dcount,
524 struct kvec diov[],
525 int alloc_flags,
526 struct rxrpc_message **_msg)
528 struct rxrpc_message *msg;
529 int loop;
531 _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
533 if (dcount > 3) {
534 _leave(" = -EINVAL");
535 return -EINVAL;
538 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags);
539 if (!msg) {
540 _leave(" = -ENOMEM");
541 return -ENOMEM;
544 memset(msg, 0, sizeof(*msg));
545 atomic_set(&msg->usage, 1);
547 INIT_LIST_HEAD(&msg->link);
549 msg->state = RXRPC_MSG_PREPARED;
551 msg->hdr.epoch = conn->out_epoch;
552 msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
553 msg->hdr.callNumber = call ? call->call_id : 0;
554 msg->hdr.type = type;
555 msg->hdr.flags = conn->out_clientflag;
556 msg->hdr.securityIndex = conn->security_ix;
557 msg->hdr.serviceId = conn->service_id;
559 /* generate sequence numbers for data packets */
560 if (call) {
561 switch (type) {
562 case RXRPC_PACKET_TYPE_DATA:
563 msg->seq = ++call->snd_seq_count;
564 msg->hdr.seq = htonl(msg->seq);
565 break;
566 case RXRPC_PACKET_TYPE_ACK:
567 /* ACK sequence numbers are complicated. The following
568 * may be wrong:
569 * - jumbo packet ACKs should have a seq number
570 * - normal ACKs should not
572 default:
573 break;
577 msg->dcount = dcount + 1;
578 msg->dsize = sizeof(msg->hdr);
579 msg->data[0].iov_len = sizeof(msg->hdr);
580 msg->data[0].iov_base = &msg->hdr;
582 for (loop=0; loop < dcount; loop++) {
583 msg->dsize += diov[loop].iov_len;
584 msg->data[loop+1].iov_len = diov[loop].iov_len;
585 msg->data[loop+1].iov_base = diov[loop].iov_base;
588 __RXACCT(atomic_inc(&rxrpc_message_count));
589 *_msg = msg;
590 _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
591 return 0;
592 } /* end rxrpc_conn_newmsg() */
594 /*****************************************************************************/
596 * free a message
598 void __rxrpc_put_message(struct rxrpc_message *msg)
600 int loop;
602 _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
604 if (msg->pkt)
605 kfree_skb(msg->pkt);
606 rxrpc_put_connection(msg->conn);
608 for (loop = 0; loop < 8; loop++)
609 if (test_bit(loop, &msg->dfree))
610 kfree(msg->data[loop].iov_base);
612 __RXACCT(atomic_dec(&rxrpc_message_count));
613 kfree(msg);
615 _leave("");
616 } /* end __rxrpc_put_message() */
618 /*****************************************************************************/
620 * send a message out through the transport endpoint
622 int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
623 struct rxrpc_message *msg)
625 struct msghdr msghdr;
626 int ret;
628 _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
630 /* fill in some fields in the header */
631 spin_lock(&conn->lock);
632 msg->hdr.serial = htonl(++conn->serial_counter);
633 msg->rttdone = 0;
634 spin_unlock(&conn->lock);
636 /* set up the message to be transmitted */
637 msghdr.msg_name = &conn->addr;
638 msghdr.msg_namelen = sizeof(conn->addr);
639 msghdr.msg_control = NULL;
640 msghdr.msg_controllen = 0;
641 msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
643 _net("Sending message type %d of %Zd bytes to %08x:%d",
644 msg->hdr.type,
645 msg->dsize,
646 ntohl(conn->addr.sin_addr.s_addr),
647 ntohs(conn->addr.sin_port));
649 /* send the message */
650 ret = kernel_sendmsg(conn->trans->socket, &msghdr,
651 msg->data, msg->dcount, msg->dsize);
652 if (ret < 0) {
653 msg->state = RXRPC_MSG_ERROR;
654 } else {
655 msg->state = RXRPC_MSG_SENT;
656 ret = 0;
658 spin_lock(&conn->lock);
659 do_gettimeofday(&conn->atime);
660 msg->stamp = conn->atime;
661 spin_unlock(&conn->lock);
664 _leave(" = %d", ret);
666 return ret;
667 } /* end rxrpc_conn_sendmsg() */
669 /*****************************************************************************/
671 * deal with a subsequent call packet
673 int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
674 struct rxrpc_call *call,
675 struct rxrpc_message *msg)
677 struct rxrpc_message *pmsg;
678 struct list_head *_p;
679 unsigned cix, seq;
680 int ret = 0;
682 _enter("%p,%p,%p", conn, call, msg);
684 if (!call) {
685 cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
687 spin_lock(&conn->lock);
688 call = conn->channels[cix];
690 if (!call || call->call_id != msg->hdr.callNumber) {
691 spin_unlock(&conn->lock);
692 rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
693 goto out;
695 else {
696 rxrpc_get_call(call);
697 spin_unlock(&conn->lock);
700 else {
701 rxrpc_get_call(call);
704 _proto("Received packet %%%u [%u] on call %hu:%u:%u",
705 ntohl(msg->hdr.serial),
706 ntohl(msg->hdr.seq),
707 ntohs(msg->hdr.serviceId),
708 ntohl(conn->conn_id),
709 ntohl(call->call_id));
711 call->pkt_rcv_count++;
713 if (msg->pkt->dst && msg->pkt->dst->dev)
714 conn->peer->if_mtu =
715 msg->pkt->dst->dev->mtu -
716 msg->pkt->dst->dev->hard_header_len;
718 /* queue on the call in seq order */
719 rxrpc_get_message(msg);
720 seq = msg->seq;
722 spin_lock(&call->lock);
723 list_for_each(_p, &call->rcv_receiveq) {
724 pmsg = list_entry(_p, struct rxrpc_message, link);
725 if (pmsg->seq > seq)
726 break;
728 list_add_tail(&msg->link, _p);
730 /* reset the activity timeout */
731 call->flags |= RXRPC_CALL_RCV_PKT;
732 mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
734 spin_unlock(&call->lock);
736 rxrpc_krxiod_queue_call(call);
738 rxrpc_put_call(call);
739 out:
740 _leave(" = %d", ret);
741 return ret;
742 } /* end rxrpc_conn_receive_call_packet() */
744 /*****************************************************************************/
746 * handle an ICMP error being applied to a connection
748 void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
749 int local, int errno)
751 struct rxrpc_call *calls[4];
752 int loop;
754 _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
756 /* get a ref to all my calls in one go */
757 memset(calls, 0, sizeof(calls));
758 spin_lock(&conn->lock);
760 for (loop = 3; loop >= 0; loop--) {
761 if (conn->channels[loop]) {
762 calls[loop] = conn->channels[loop];
763 rxrpc_get_call(calls[loop]);
767 spin_unlock(&conn->lock);
769 /* now kick them all */
770 for (loop = 3; loop >= 0; loop--) {
771 if (calls[loop]) {
772 rxrpc_call_handle_error(calls[loop], local, errno);
773 rxrpc_put_call(calls[loop]);
777 _leave("");
778 } /* end rxrpc_conn_handle_error() */