1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Client connection-specific management code.
4 * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 * Client connections need to be cached for a little while after they've made a
8 * call so as to handle retransmitted DATA packets in case the server didn't
9 * receive the final ACK or terminating ABORT we sent it.
11 * There are flags of relevance to the cache:
13 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
14 * should not be reused. This is set when an exclusive connection is used
15 * or a call ID counter overflows.
17 * The caching state may only be changed if the cache lock is held.
19 * There are two idle client connection expiry durations. If the total number
20 * of connections is below the reap threshold, we use the normal duration; if
21 * it's above, we use the fast duration.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/timer.h>
29 #include <linux/sched/signal.h>
31 #include "ar-internal.h"
33 __read_mostly
unsigned int rxrpc_reap_client_connections
= 900;
34 __read_mostly
unsigned long rxrpc_conn_idle_client_expiry
= 2 * 60 * HZ
;
35 __read_mostly
unsigned long rxrpc_conn_idle_client_fast_expiry
= 2 * HZ
;
37 static void rxrpc_activate_bundle(struct rxrpc_bundle
*bundle
)
39 atomic_inc(&bundle
->active
);
43 * Release a connection ID for a client connection.
45 static void rxrpc_put_client_connection_id(struct rxrpc_local
*local
,
46 struct rxrpc_connection
*conn
)
48 idr_remove(&local
->conn_ids
, conn
->proto
.cid
>> RXRPC_CIDSHIFT
);
52 * Destroy the client connection ID tree.
54 static void rxrpc_destroy_client_conn_ids(struct rxrpc_local
*local
)
56 struct rxrpc_connection
*conn
;
59 if (!idr_is_empty(&local
->conn_ids
)) {
60 idr_for_each_entry(&local
->conn_ids
, conn
, id
) {
61 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
62 conn
, refcount_read(&conn
->ref
));
67 idr_destroy(&local
->conn_ids
);
71 * Allocate a connection bundle.
73 static struct rxrpc_bundle
*rxrpc_alloc_bundle(struct rxrpc_call
*call
,
76 static atomic_t rxrpc_bundle_id
;
77 struct rxrpc_bundle
*bundle
;
79 bundle
= kzalloc(sizeof(*bundle
), gfp
);
81 bundle
->local
= call
->local
;
82 bundle
->peer
= rxrpc_get_peer(call
->peer
, rxrpc_peer_get_bundle
);
83 bundle
->key
= key_get(call
->key
);
84 bundle
->security
= call
->security
;
85 bundle
->exclusive
= test_bit(RXRPC_CALL_EXCLUSIVE
, &call
->flags
);
86 bundle
->upgrade
= test_bit(RXRPC_CALL_UPGRADE
, &call
->flags
);
87 bundle
->service_id
= call
->dest_srx
.srx_service
;
88 bundle
->security_level
= call
->security_level
;
89 bundle
->debug_id
= atomic_inc_return(&rxrpc_bundle_id
);
90 refcount_set(&bundle
->ref
, 1);
91 atomic_set(&bundle
->active
, 1);
92 INIT_LIST_HEAD(&bundle
->waiting_calls
);
93 trace_rxrpc_bundle(bundle
->debug_id
, 1, rxrpc_bundle_new
);
95 write_lock(&bundle
->local
->rxnet
->conn_lock
);
96 list_add_tail(&bundle
->proc_link
, &bundle
->local
->rxnet
->bundle_proc_list
);
97 write_unlock(&bundle
->local
->rxnet
->conn_lock
);
102 struct rxrpc_bundle
*rxrpc_get_bundle(struct rxrpc_bundle
*bundle
,
103 enum rxrpc_bundle_trace why
)
107 __refcount_inc(&bundle
->ref
, &r
);
108 trace_rxrpc_bundle(bundle
->debug_id
, r
+ 1, why
);
112 static void rxrpc_free_bundle(struct rxrpc_bundle
*bundle
)
114 trace_rxrpc_bundle(bundle
->debug_id
, refcount_read(&bundle
->ref
),
116 write_lock(&bundle
->local
->rxnet
->conn_lock
);
117 list_del(&bundle
->proc_link
);
118 write_unlock(&bundle
->local
->rxnet
->conn_lock
);
119 rxrpc_put_peer(bundle
->peer
, rxrpc_peer_put_bundle
);
120 key_put(bundle
->key
);
124 void rxrpc_put_bundle(struct rxrpc_bundle
*bundle
, enum rxrpc_bundle_trace why
)
131 id
= bundle
->debug_id
;
132 dead
= __refcount_dec_and_test(&bundle
->ref
, &r
);
133 trace_rxrpc_bundle(id
, r
- 1, why
);
135 rxrpc_free_bundle(bundle
);
140 * Get rid of outstanding client connection preallocations when a local
141 * endpoint is destroyed.
143 void rxrpc_purge_client_connections(struct rxrpc_local
*local
)
145 rxrpc_destroy_client_conn_ids(local
);
149 * Allocate a client connection.
151 static struct rxrpc_connection
*
152 rxrpc_alloc_client_connection(struct rxrpc_bundle
*bundle
)
154 struct rxrpc_connection
*conn
;
155 struct rxrpc_local
*local
= bundle
->local
;
156 struct rxrpc_net
*rxnet
= local
->rxnet
;
161 conn
= rxrpc_alloc_connection(rxnet
, GFP_ATOMIC
| __GFP_NOWARN
);
163 return ERR_PTR(-ENOMEM
);
165 id
= idr_alloc_cyclic(&local
->conn_ids
, conn
, 1, 0x40000000,
166 GFP_ATOMIC
| __GFP_NOWARN
);
172 refcount_set(&conn
->ref
, 1);
173 conn
->proto
.cid
= id
<< RXRPC_CIDSHIFT
;
174 conn
->proto
.epoch
= local
->rxnet
->epoch
;
175 conn
->out_clientflag
= RXRPC_CLIENT_INITIATED
;
176 conn
->bundle
= rxrpc_get_bundle(bundle
, rxrpc_bundle_get_client_conn
);
177 conn
->local
= rxrpc_get_local(bundle
->local
, rxrpc_local_get_client_conn
);
178 conn
->peer
= rxrpc_get_peer(bundle
->peer
, rxrpc_peer_get_client_conn
);
179 conn
->key
= key_get(bundle
->key
);
180 conn
->security
= bundle
->security
;
181 conn
->exclusive
= bundle
->exclusive
;
182 conn
->upgrade
= bundle
->upgrade
;
183 conn
->orig_service_id
= bundle
->service_id
;
184 conn
->security_level
= bundle
->security_level
;
185 conn
->state
= RXRPC_CONN_CLIENT_UNSECURED
;
186 conn
->service_id
= conn
->orig_service_id
;
188 if (conn
->security
== &rxrpc_no_security
)
189 conn
->state
= RXRPC_CONN_CLIENT
;
191 atomic_inc(&rxnet
->nr_conns
);
192 write_lock(&rxnet
->conn_lock
);
193 list_add_tail(&conn
->proc_link
, &rxnet
->conn_proc_list
);
194 write_unlock(&rxnet
->conn_lock
);
196 rxrpc_see_connection(conn
, rxrpc_conn_new_client
);
198 atomic_inc(&rxnet
->nr_client_conns
);
199 trace_rxrpc_client(conn
, -1, rxrpc_client_alloc
);
204 * Determine if a connection may be reused.
206 static bool rxrpc_may_reuse_conn(struct rxrpc_connection
*conn
)
208 struct rxrpc_net
*rxnet
;
209 int id_cursor
, id
, distance
, limit
;
215 if (test_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
))
218 if ((conn
->state
!= RXRPC_CONN_CLIENT_UNSECURED
&&
219 conn
->state
!= RXRPC_CONN_CLIENT
) ||
220 conn
->proto
.epoch
!= rxnet
->epoch
)
221 goto mark_dont_reuse
;
223 /* The IDR tree gets very expensive on memory if the connection IDs are
224 * widely scattered throughout the number space, so we shall want to
225 * kill off connections that, say, have an ID more than about four
226 * times the maximum number of client conns away from the current
227 * allocation point to try and keep the IDs concentrated.
229 id_cursor
= idr_get_cursor(&conn
->local
->conn_ids
);
230 id
= conn
->proto
.cid
>> RXRPC_CIDSHIFT
;
231 distance
= id
- id_cursor
;
233 distance
= -distance
;
234 limit
= max_t(unsigned long, atomic_read(&rxnet
->nr_conns
) * 4, 1024);
235 if (distance
> limit
)
236 goto mark_dont_reuse
;
241 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
247 * Look up the conn bundle that matches the connection parameters, adding it if
248 * it doesn't yet exist.
250 int rxrpc_look_up_bundle(struct rxrpc_call
*call
, gfp_t gfp
)
252 struct rxrpc_bundle
*bundle
, *candidate
;
253 struct rxrpc_local
*local
= call
->local
;
254 struct rb_node
*p
, **pp
, *parent
;
256 bool upgrade
= test_bit(RXRPC_CALL_UPGRADE
, &call
->flags
);
258 _enter("{%px,%x,%u,%u}",
259 call
->peer
, key_serial(call
->key
), call
->security_level
,
262 if (test_bit(RXRPC_CALL_EXCLUSIVE
, &call
->flags
)) {
263 call
->bundle
= rxrpc_alloc_bundle(call
, gfp
);
264 return call
->bundle
? 0 : -ENOMEM
;
267 /* First, see if the bundle is already there. */
269 spin_lock(&local
->client_bundles_lock
);
270 p
= local
->client_bundles
.rb_node
;
272 bundle
= rb_entry(p
, struct rxrpc_bundle
, local_node
);
274 #define cmp(X, Y) ((long)(X) - (long)(Y))
275 diff
= (cmp(bundle
->peer
, call
->peer
) ?:
276 cmp(bundle
->key
, call
->key
) ?:
277 cmp(bundle
->security_level
, call
->security_level
) ?:
278 cmp(bundle
->upgrade
, upgrade
));
287 spin_unlock(&local
->client_bundles_lock
);
290 /* It wasn't. We need to add one. */
291 candidate
= rxrpc_alloc_bundle(call
, gfp
);
296 spin_lock(&local
->client_bundles_lock
);
297 pp
= &local
->client_bundles
.rb_node
;
301 bundle
= rb_entry(parent
, struct rxrpc_bundle
, local_node
);
303 #define cmp(X, Y) ((long)(X) - (long)(Y))
304 diff
= (cmp(bundle
->peer
, call
->peer
) ?:
305 cmp(bundle
->key
, call
->key
) ?:
306 cmp(bundle
->security_level
, call
->security_level
) ?:
307 cmp(bundle
->upgrade
, upgrade
));
310 pp
= &(*pp
)->rb_left
;
312 pp
= &(*pp
)->rb_right
;
314 goto found_bundle_free
;
317 _debug("new bundle");
318 rb_link_node(&candidate
->local_node
, parent
, pp
);
319 rb_insert_color(&candidate
->local_node
, &local
->client_bundles
);
320 call
->bundle
= rxrpc_get_bundle(candidate
, rxrpc_bundle_get_client_call
);
321 spin_unlock(&local
->client_bundles_lock
);
322 _leave(" = B=%u [new]", call
->bundle
->debug_id
);
326 rxrpc_free_bundle(candidate
);
328 call
->bundle
= rxrpc_get_bundle(bundle
, rxrpc_bundle_get_client_call
);
329 rxrpc_activate_bundle(bundle
);
330 spin_unlock(&local
->client_bundles_lock
);
331 _leave(" = B=%u [found]", call
->bundle
->debug_id
);
336 * Allocate a new connection and add it into a bundle.
338 static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle
*bundle
,
341 struct rxrpc_connection
*conn
, *old
;
342 unsigned int shift
= slot
* RXRPC_MAXCALLS
;
345 old
= bundle
->conns
[slot
];
347 bundle
->conns
[slot
] = NULL
;
348 bundle
->conn_ids
[slot
] = 0;
349 trace_rxrpc_client(old
, -1, rxrpc_client_replace
);
350 rxrpc_put_connection(old
, rxrpc_conn_put_noreuse
);
353 conn
= rxrpc_alloc_client_connection(bundle
);
355 bundle
->alloc_error
= PTR_ERR(conn
);
359 rxrpc_activate_bundle(bundle
);
360 conn
->bundle_shift
= shift
;
361 bundle
->conns
[slot
] = conn
;
362 bundle
->conn_ids
[slot
] = conn
->debug_id
;
363 for (i
= 0; i
< RXRPC_MAXCALLS
; i
++)
364 set_bit(shift
+ i
, &bundle
->avail_chans
);
369 * Add a connection to a bundle if there are no usable connections or we have
370 * connections waiting for extra capacity.
372 static bool rxrpc_bundle_has_space(struct rxrpc_bundle
*bundle
)
374 int slot
= -1, i
, usable
;
378 bundle
->alloc_error
= 0;
380 /* See if there are any usable connections. */
382 for (i
= 0; i
< ARRAY_SIZE(bundle
->conns
); i
++) {
383 if (rxrpc_may_reuse_conn(bundle
->conns
[i
]))
389 if (!usable
&& bundle
->upgrade
)
390 bundle
->try_upgrade
= true;
395 if (!bundle
->avail_chans
&&
396 !bundle
->try_upgrade
&&
397 usable
< ARRAY_SIZE(bundle
->conns
))
404 return slot
>= 0 ? rxrpc_add_conn_to_bundle(bundle
, slot
) : false;
408 * Assign a channel to the call at the front of the queue and wake the call up.
409 * We don't increment the callNumber counter until this number has been exposed
412 static void rxrpc_activate_one_channel(struct rxrpc_connection
*conn
,
413 unsigned int channel
)
415 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
416 struct rxrpc_bundle
*bundle
= conn
->bundle
;
417 struct rxrpc_call
*call
= list_entry(bundle
->waiting_calls
.next
,
418 struct rxrpc_call
, wait_link
);
419 u32 call_id
= chan
->call_counter
+ 1;
421 _enter("C=%x,%u", conn
->debug_id
, channel
);
423 list_del_init(&call
->wait_link
);
425 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_activate
);
427 /* Cancel the final ACK on the previous call if it hasn't been sent yet
428 * as the DATA packet will implicitly ACK it.
430 clear_bit(RXRPC_CONN_FINAL_ACK_0
+ channel
, &conn
->flags
);
431 clear_bit(conn
->bundle_shift
+ channel
, &bundle
->avail_chans
);
433 rxrpc_see_call(call
, rxrpc_call_see_activate_client
);
434 call
->conn
= rxrpc_get_connection(conn
, rxrpc_conn_get_activate_call
);
435 call
->cid
= conn
->proto
.cid
| channel
;
436 call
->call_id
= call_id
;
437 call
->dest_srx
.srx_service
= conn
->service_id
;
438 call
->cong_ssthresh
= call
->peer
->cong_ssthresh
;
439 if (call
->cong_cwnd
>= call
->cong_ssthresh
)
440 call
->cong_mode
= RXRPC_CALL_CONGEST_AVOIDANCE
;
442 call
->cong_mode
= RXRPC_CALL_SLOW_START
;
444 chan
->call_id
= call_id
;
445 chan
->call_debug_id
= call
->debug_id
;
448 rxrpc_see_call(call
, rxrpc_call_see_connected
);
449 trace_rxrpc_connect_call(call
);
450 call
->tx_last_sent
= ktime_get_real();
451 rxrpc_start_call_timer(call
);
452 rxrpc_set_call_state(call
, RXRPC_CALL_CLIENT_SEND_REQUEST
);
453 wake_up(&call
->waitq
);
457 * Remove a connection from the idle list if it's on it.
459 static void rxrpc_unidle_conn(struct rxrpc_connection
*conn
)
461 if (!list_empty(&conn
->cache_link
)) {
462 list_del_init(&conn
->cache_link
);
463 rxrpc_put_connection(conn
, rxrpc_conn_put_unidle
);
468 * Assign channels and callNumbers to waiting calls.
470 static void rxrpc_activate_channels(struct rxrpc_bundle
*bundle
)
472 struct rxrpc_connection
*conn
;
473 unsigned long avail
, mask
;
474 unsigned int channel
, slot
;
476 trace_rxrpc_client(NULL
, -1, rxrpc_client_activate_chans
);
478 if (bundle
->try_upgrade
)
483 while (!list_empty(&bundle
->waiting_calls
)) {
484 avail
= bundle
->avail_chans
& mask
;
487 channel
= __ffs(avail
);
488 clear_bit(channel
, &bundle
->avail_chans
);
490 slot
= channel
/ RXRPC_MAXCALLS
;
491 conn
= bundle
->conns
[slot
];
495 if (bundle
->try_upgrade
)
496 set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE
, &conn
->flags
);
497 rxrpc_unidle_conn(conn
);
499 channel
&= (RXRPC_MAXCALLS
- 1);
500 conn
->act_chans
|= 1 << channel
;
501 rxrpc_activate_one_channel(conn
, channel
);
506 * Connect waiting channels (called from the I/O thread).
508 void rxrpc_connect_client_calls(struct rxrpc_local
*local
)
510 struct rxrpc_call
*call
;
512 while ((call
= list_first_entry_or_null(&local
->new_client_calls
,
513 struct rxrpc_call
, wait_link
))
515 struct rxrpc_bundle
*bundle
= call
->bundle
;
517 spin_lock(&local
->client_call_lock
);
518 list_move_tail(&call
->wait_link
, &bundle
->waiting_calls
);
519 rxrpc_see_call(call
, rxrpc_call_see_waiting_call
);
520 spin_unlock(&local
->client_call_lock
);
522 if (rxrpc_bundle_has_space(bundle
))
523 rxrpc_activate_channels(bundle
);
528 * Note that a call, and thus a connection, is about to be exposed to the
531 void rxrpc_expose_client_call(struct rxrpc_call
*call
)
533 unsigned int channel
= call
->cid
& RXRPC_CHANNELMASK
;
534 struct rxrpc_connection
*conn
= call
->conn
;
535 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
537 if (!test_and_set_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
538 /* Mark the call ID as being used. If the callNumber counter
539 * exceeds ~2 billion, we kill the connection after its
540 * outstanding calls have finished so that the counter doesn't
543 chan
->call_counter
++;
544 if (chan
->call_counter
>= INT_MAX
)
545 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
546 trace_rxrpc_client(conn
, channel
, rxrpc_client_exposed
);
548 spin_lock(&call
->peer
->lock
);
549 hlist_add_head(&call
->error_link
, &call
->peer
->error_targets
);
550 spin_unlock(&call
->peer
->lock
);
555 * Set the reap timer.
557 static void rxrpc_set_client_reap_timer(struct rxrpc_local
*local
)
559 if (!local
->kill_all_client_conns
) {
560 unsigned long now
= jiffies
;
561 unsigned long reap_at
= now
+ rxrpc_conn_idle_client_expiry
;
563 if (local
->rxnet
->live
)
564 timer_reduce(&local
->client_conn_reap_timer
, reap_at
);
569 * Disconnect a client call.
571 void rxrpc_disconnect_client_call(struct rxrpc_bundle
*bundle
, struct rxrpc_call
*call
)
573 struct rxrpc_connection
*conn
;
574 struct rxrpc_channel
*chan
= NULL
;
575 struct rxrpc_local
*local
= bundle
->local
;
576 unsigned int channel
;
580 _enter("c=%x", call
->debug_id
);
582 /* Calls that have never actually been assigned a channel can simply be
587 _debug("call is waiting");
588 ASSERTCMP(call
->call_id
, ==, 0);
589 ASSERT(!test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
));
590 /* May still be on ->new_client_calls. */
591 spin_lock(&local
->client_call_lock
);
592 list_del_init(&call
->wait_link
);
593 spin_unlock(&local
->client_call_lock
);
598 channel
= cid
& RXRPC_CHANNELMASK
;
599 chan
= &conn
->channels
[channel
];
600 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_disconnect
);
602 if (WARN_ON(chan
->call
!= call
))
605 may_reuse
= rxrpc_may_reuse_conn(conn
);
607 /* If a client call was exposed to the world, we save the result for
610 * We use a barrier here so that the call number and abort code can be
611 * read without needing to take a lock.
613 * TODO: Make the incoming packet handler check this and handle
614 * terminal retransmission without requiring access to the call.
616 if (test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
617 _debug("exposed %u,%u", call
->call_id
, call
->abort_code
);
618 __rxrpc_disconnect_call(conn
, call
);
620 if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE
, &conn
->flags
)) {
621 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_active
);
622 bundle
->try_upgrade
= false;
624 rxrpc_activate_channels(bundle
);
628 /* See if we can pass the channel directly to another call. */
629 if (may_reuse
&& !list_empty(&bundle
->waiting_calls
)) {
630 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_pass
);
631 rxrpc_activate_one_channel(conn
, channel
);
635 /* Schedule the final ACK to be transmitted in a short while so that it
636 * can be skipped if we find a follow-on call. The first DATA packet
637 * of the follow on call will implicitly ACK this call.
639 if (call
->completion
== RXRPC_CALL_SUCCEEDED
&&
640 test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
641 unsigned long final_ack_at
= jiffies
+ 2;
643 chan
->final_ack_at
= final_ack_at
;
644 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
645 set_bit(RXRPC_CONN_FINAL_ACK_0
+ channel
, &conn
->flags
);
646 rxrpc_reduce_conn_timer(conn
, final_ack_at
);
649 /* Deactivate the channel. */
651 set_bit(conn
->bundle_shift
+ channel
, &conn
->bundle
->avail_chans
);
652 conn
->act_chans
&= ~(1 << channel
);
654 /* If no channels remain active, then put the connection on the idle
655 * list for a short while. Give it a ref to stop it going away if it
658 if (!conn
->act_chans
) {
659 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_idle
);
660 conn
->idle_timestamp
= jiffies
;
662 rxrpc_get_connection(conn
, rxrpc_conn_get_idle
);
663 list_move_tail(&conn
->cache_link
, &local
->idle_client_conns
);
665 rxrpc_set_client_reap_timer(local
);
670 * Remove a connection from a bundle.
672 static void rxrpc_unbundle_conn(struct rxrpc_connection
*conn
)
674 struct rxrpc_bundle
*bundle
= conn
->bundle
;
678 _enter("C=%x", conn
->debug_id
);
680 if (conn
->flags
& RXRPC_CONN_FINAL_ACK_MASK
)
681 rxrpc_process_delayed_final_acks(conn
, true);
683 bindex
= conn
->bundle_shift
/ RXRPC_MAXCALLS
;
684 if (bundle
->conns
[bindex
] == conn
) {
685 _debug("clear slot %u", bindex
);
686 bundle
->conns
[bindex
] = NULL
;
687 bundle
->conn_ids
[bindex
] = 0;
688 for (i
= 0; i
< RXRPC_MAXCALLS
; i
++)
689 clear_bit(conn
->bundle_shift
+ i
, &bundle
->avail_chans
);
690 rxrpc_put_client_connection_id(bundle
->local
, conn
);
691 rxrpc_deactivate_bundle(bundle
);
692 rxrpc_put_connection(conn
, rxrpc_conn_put_unbundle
);
697 * Drop the active count on a bundle.
699 void rxrpc_deactivate_bundle(struct rxrpc_bundle
*bundle
)
701 struct rxrpc_local
*local
;
702 bool need_put
= false;
707 local
= bundle
->local
;
708 if (atomic_dec_and_lock(&bundle
->active
, &local
->client_bundles_lock
)) {
709 if (!bundle
->exclusive
) {
710 _debug("erase bundle");
711 rb_erase(&bundle
->local_node
, &local
->client_bundles
);
715 spin_unlock(&local
->client_bundles_lock
);
717 rxrpc_put_bundle(bundle
, rxrpc_bundle_put_discard
);
722 * Clean up a dead client connection.
724 void rxrpc_kill_client_conn(struct rxrpc_connection
*conn
)
726 struct rxrpc_local
*local
= conn
->local
;
727 struct rxrpc_net
*rxnet
= local
->rxnet
;
729 _enter("C=%x", conn
->debug_id
);
731 trace_rxrpc_client(conn
, -1, rxrpc_client_cleanup
);
732 atomic_dec(&rxnet
->nr_client_conns
);
734 rxrpc_put_client_connection_id(local
, conn
);
738 * Discard expired client connections from the idle list. Each conn in the
739 * idle list has been exposed and holds an extra ref because of that.
741 * This may be called from conn setup or from a work item so cannot be
742 * considered non-reentrant.
744 void rxrpc_discard_expired_client_conns(struct rxrpc_local
*local
)
746 struct rxrpc_connection
*conn
;
747 unsigned long expiry
, conn_expires_at
, now
;
748 unsigned int nr_conns
;
752 /* We keep an estimate of what the number of conns ought to be after
753 * we've discarded some so that we don't overdo the discarding.
755 nr_conns
= atomic_read(&local
->rxnet
->nr_client_conns
);
758 conn
= list_first_entry_or_null(&local
->idle_client_conns
,
759 struct rxrpc_connection
, cache_link
);
763 if (!local
->kill_all_client_conns
) {
764 /* If the number of connections is over the reap limit, we
765 * expedite discard by reducing the expiry timeout. We must,
766 * however, have at least a short grace period to be able to do
767 * final-ACK or ABORT retransmission.
769 expiry
= rxrpc_conn_idle_client_expiry
;
770 if (nr_conns
> rxrpc_reap_client_connections
)
771 expiry
= rxrpc_conn_idle_client_fast_expiry
;
772 if (conn
->local
->service_closed
)
773 expiry
= rxrpc_closed_conn_expiry
* HZ
;
775 conn_expires_at
= conn
->idle_timestamp
+ expiry
;
778 if (time_after(conn_expires_at
, now
))
779 goto not_yet_expired
;
782 atomic_dec(&conn
->active
);
783 trace_rxrpc_client(conn
, -1, rxrpc_client_discard
);
784 list_del_init(&conn
->cache_link
);
786 rxrpc_unbundle_conn(conn
);
787 /* Drop the ->cache_link ref */
788 rxrpc_put_connection(conn
, rxrpc_conn_put_discard_idle
);
794 /* The connection at the front of the queue hasn't yet expired, so
795 * schedule the work item for that point if we discarded something.
797 * We don't worry if the work item is already scheduled - it can look
798 * after rescheduling itself at a later time. We could cancel it, but
799 * then things get messier.
802 if (!local
->kill_all_client_conns
)
803 timer_reduce(&local
->client_conn_reap_timer
, conn_expires_at
);
809 * Clean up the client connections on a local endpoint.
811 void rxrpc_clean_up_local_conns(struct rxrpc_local
*local
)
813 struct rxrpc_connection
*conn
;
817 local
->kill_all_client_conns
= true;
819 del_timer_sync(&local
->client_conn_reap_timer
);
821 while ((conn
= list_first_entry_or_null(&local
->idle_client_conns
,
822 struct rxrpc_connection
, cache_link
))) {
823 list_del_init(&conn
->cache_link
);
824 atomic_dec(&conn
->active
);
825 trace_rxrpc_client(conn
, -1, rxrpc_client_discard
);
826 rxrpc_unbundle_conn(conn
);
827 rxrpc_put_connection(conn
, rxrpc_conn_put_local_dead
);