1 /* Client connection-specific management code.
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 * Client connections need to be cached for a little while after they've made a
13 * call so as to handle retransmitted DATA packets in case the server didn't
14 * receive the final ACK or terminating ABORT we sent it.
16 * Client connections can be in one of a number of cache states:
18 * (1) INACTIVE - The connection is not held in any list and may not have been
19 * exposed to the world. If it has been previously exposed, it was
20 * discarded from the idle list after expiring.
22 * (2) WAITING - The connection is waiting for the number of client conns to
23 * drop below the maximum capacity. Calls may be in progress upon it from
24 * when it was active and got culled.
26 * The connection is on the rxrpc_waiting_client_conns list which is kept
27 * in to-be-granted order. Culled conns with waiters go to the back of
28 * the queue just like new conns.
30 * (3) ACTIVE - The connection has at least one call in progress upon it, it
31 * may freely grant available channels to new calls and calls may be
32 * waiting on it for channels to become available.
34 * The connection is on the rxrpc_active_client_conns list which is kept
35 * in activation order for culling purposes.
37 * rxrpc_nr_active_client_conns is held incremented also.
39 * (4) CULLED - The connection got summarily culled to try and free up
40 * capacity. Calls currently in progress on the connection are allowed to
41 * continue, but new calls will have to wait. There can be no waiters in
42 * this state - the conn would have to go to the WAITING state instead.
44 * (5) IDLE - The connection has no calls in progress upon it and must have
45 * been exposed to the world (ie. the EXPOSED flag must be set). When it
46 * expires, the EXPOSED flag is cleared and the connection transitions to
49 * The connection is on the rxrpc_idle_client_conns list which is kept in
50 * order of how soon they'll expire.
52 * There are flags of relevance to the cache:
54 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
55 * set, an extra ref is added to the connection preventing it from being
56 * reaped when it has no calls outstanding. This flag is cleared and the
57 * ref dropped when a conn is discarded from the idle list.
59 * This allows us to move terminal call state retransmission to the
60 * connection and to discard the call immediately we think it is done
61 * with. It also give us a chance to reuse the connection.
63 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
64 * should not be reused. This is set when an exclusive connection is used
65 * or a call ID counter overflows.
67 * The caching state may only be changed if the cache lock is held.
69 * There are two idle client connection expiry durations. If the total number
70 * of connections is below the reap threshold, we use the normal duration; if
71 * it's above, we use the fast duration.
74 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
76 #include <linux/slab.h>
77 #include <linux/idr.h>
78 #include <linux/timer.h>
79 #include <linux/sched/signal.h>
81 #include "ar-internal.h"
83 __read_mostly
unsigned int rxrpc_max_client_connections
= 1000;
84 __read_mostly
unsigned int rxrpc_reap_client_connections
= 900;
85 __read_mostly
unsigned int rxrpc_conn_idle_client_expiry
= 2 * 60 * HZ
;
86 __read_mostly
unsigned int rxrpc_conn_idle_client_fast_expiry
= 2 * HZ
;
88 static unsigned int rxrpc_nr_client_conns
;
89 static unsigned int rxrpc_nr_active_client_conns
;
90 static __read_mostly
bool rxrpc_kill_all_client_conns
;
92 static DEFINE_SPINLOCK(rxrpc_client_conn_cache_lock
);
93 static DEFINE_SPINLOCK(rxrpc_client_conn_discard_mutex
);
94 static LIST_HEAD(rxrpc_waiting_client_conns
);
95 static LIST_HEAD(rxrpc_active_client_conns
);
96 static LIST_HEAD(rxrpc_idle_client_conns
);
99 * We use machine-unique IDs for our client connections.
101 DEFINE_IDR(rxrpc_client_conn_ids
);
102 static DEFINE_SPINLOCK(rxrpc_conn_id_lock
);
104 static void rxrpc_cull_active_client_conns(void);
105 static void rxrpc_discard_expired_client_conns(struct work_struct
*);
107 static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap
,
108 rxrpc_discard_expired_client_conns
);
111 * Get a connection ID and epoch for a client connection from the global pool.
112 * The connection struct pointer is then recorded in the idr radix tree. The
113 * epoch doesn't change until the client is rebooted (or, at least, unless the
114 * module is unloaded).
116 static int rxrpc_get_client_connection_id(struct rxrpc_connection
*conn
,
124 spin_lock(&rxrpc_conn_id_lock
);
126 id
= idr_alloc_cyclic(&rxrpc_client_conn_ids
, conn
,
127 1, 0x40000000, GFP_NOWAIT
);
131 spin_unlock(&rxrpc_conn_id_lock
);
134 conn
->proto
.epoch
= rxrpc_epoch
;
135 conn
->proto
.cid
= id
<< RXRPC_CIDSHIFT
;
136 set_bit(RXRPC_CONN_HAS_IDR
, &conn
->flags
);
137 _leave(" [CID %x]", conn
->proto
.cid
);
141 spin_unlock(&rxrpc_conn_id_lock
);
148 * Release a connection ID for a client connection from the global pool.
150 static void rxrpc_put_client_connection_id(struct rxrpc_connection
*conn
)
152 if (test_bit(RXRPC_CONN_HAS_IDR
, &conn
->flags
)) {
153 spin_lock(&rxrpc_conn_id_lock
);
154 idr_remove(&rxrpc_client_conn_ids
,
155 conn
->proto
.cid
>> RXRPC_CIDSHIFT
);
156 spin_unlock(&rxrpc_conn_id_lock
);
161 * Destroy the client connection ID tree.
163 void rxrpc_destroy_client_conn_ids(void)
165 struct rxrpc_connection
*conn
;
168 if (!idr_is_empty(&rxrpc_client_conn_ids
)) {
169 idr_for_each_entry(&rxrpc_client_conn_ids
, conn
, id
) {
170 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
171 conn
, atomic_read(&conn
->usage
));
176 idr_destroy(&rxrpc_client_conn_ids
);
180 * Allocate a client connection.
182 static struct rxrpc_connection
*
183 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters
*cp
, gfp_t gfp
)
185 struct rxrpc_connection
*conn
;
190 conn
= rxrpc_alloc_connection(gfp
);
192 _leave(" = -ENOMEM");
193 return ERR_PTR(-ENOMEM
);
196 atomic_set(&conn
->usage
, 1);
198 __set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
201 conn
->out_clientflag
= RXRPC_CLIENT_INITIATED
;
202 conn
->state
= RXRPC_CONN_CLIENT
;
204 ret
= rxrpc_get_client_connection_id(conn
, gfp
);
208 ret
= rxrpc_init_client_conn_security(conn
);
212 ret
= conn
->security
->prime_packet_security(conn
);
216 write_lock(&rxrpc_connection_lock
);
217 list_add_tail(&conn
->proc_link
, &rxrpc_connection_proc_list
);
218 write_unlock(&rxrpc_connection_lock
);
220 /* We steal the caller's peer ref. */
222 rxrpc_get_local(conn
->params
.local
);
223 key_get(conn
->params
.key
);
225 trace_rxrpc_conn(conn
, rxrpc_conn_new_client
, atomic_read(&conn
->usage
),
226 __builtin_return_address(0));
227 trace_rxrpc_client(conn
, -1, rxrpc_client_alloc
);
228 _leave(" = %p", conn
);
232 conn
->security
->clear(conn
);
234 rxrpc_put_client_connection_id(conn
);
237 _leave(" = %d", ret
);
242 * Determine if a connection may be reused.
244 static bool rxrpc_may_reuse_conn(struct rxrpc_connection
*conn
)
246 int id_cursor
, id
, distance
, limit
;
248 if (test_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
))
251 if (conn
->proto
.epoch
!= rxrpc_epoch
)
252 goto mark_dont_reuse
;
254 /* The IDR tree gets very expensive on memory if the connection IDs are
255 * widely scattered throughout the number space, so we shall want to
256 * kill off connections that, say, have an ID more than about four
257 * times the maximum number of client conns away from the current
258 * allocation point to try and keep the IDs concentrated.
260 id_cursor
= idr_get_cursor(&rxrpc_client_conn_ids
);
261 id
= conn
->proto
.cid
>> RXRPC_CIDSHIFT
;
262 distance
= id
- id_cursor
;
264 distance
= -distance
;
265 limit
= max(rxrpc_max_client_connections
* 4, 1024U);
266 if (distance
> limit
)
267 goto mark_dont_reuse
;
272 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
278 * Create or find a client connection to use for a call.
280 * If we return with a connection, the call will be on its waiting list. It's
281 * left to the caller to assign a channel and wake up the call.
283 static int rxrpc_get_client_conn(struct rxrpc_call
*call
,
284 struct rxrpc_conn_parameters
*cp
,
285 struct sockaddr_rxrpc
*srx
,
288 struct rxrpc_connection
*conn
, *candidate
= NULL
;
289 struct rxrpc_local
*local
= cp
->local
;
290 struct rb_node
*p
, **pp
, *parent
;
294 _enter("{%d,%lx},", call
->debug_id
, call
->user_call_ID
);
296 cp
->peer
= rxrpc_lookup_peer(cp
->local
, srx
, gfp
);
300 /* If the connection is not meant to be exclusive, search the available
301 * connections to see if the connection we want to use already exists.
303 if (!cp
->exclusive
) {
305 spin_lock(&local
->client_conns_lock
);
306 p
= local
->client_conns
.rb_node
;
308 conn
= rb_entry(p
, struct rxrpc_connection
, client_node
);
310 #define cmp(X) ((long)conn->params.X - (long)cp->X)
313 cmp(security_level
));
317 } else if (diff
> 0) {
320 if (rxrpc_may_reuse_conn(conn
) &&
321 rxrpc_get_connection_maybe(conn
))
322 goto found_extant_conn
;
323 /* The connection needs replacing. It's better
324 * to effect that when we have something to
325 * replace it with so that we don't have to
326 * rebalance the tree twice.
331 spin_unlock(&local
->client_conns_lock
);
334 /* There wasn't a connection yet or we need an exclusive connection.
335 * We need to create a candidate and then potentially redo the search
336 * in case we're racing with another thread also trying to connect on a
337 * shareable connection.
340 candidate
= rxrpc_alloc_client_connection(cp
, gfp
);
341 if (IS_ERR(candidate
)) {
342 ret
= PTR_ERR(candidate
);
346 /* Add the call to the new connection's waiting list in case we're
347 * going to have to wait for the connection to come live. It's our
348 * connection, so we want first dibs on the channel slots. We would
349 * normally have to take channel_lock but we do this before anyone else
350 * can see the connection.
352 list_add_tail(&call
->chan_wait_link
, &candidate
->waiting_calls
);
355 call
->conn
= candidate
;
356 call
->security_ix
= candidate
->security_ix
;
357 _leave(" = 0 [exclusive %d]", candidate
->debug_id
);
361 /* Publish the new connection for userspace to find. We need to redo
362 * the search before doing this lest we race with someone else adding a
363 * conflicting instance.
366 spin_lock(&local
->client_conns_lock
);
368 pp
= &local
->client_conns
.rb_node
;
372 conn
= rb_entry(parent
, struct rxrpc_connection
, client_node
);
374 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
377 cmp(security_level
));
380 pp
= &(*pp
)->rb_left
;
381 } else if (diff
> 0) {
382 pp
= &(*pp
)->rb_right
;
384 if (rxrpc_may_reuse_conn(conn
) &&
385 rxrpc_get_connection_maybe(conn
))
386 goto found_extant_conn
;
387 /* The old connection is from an outdated epoch. */
388 _debug("replace conn");
389 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &conn
->flags
);
390 rb_replace_node(&conn
->client_node
,
391 &candidate
->client_node
,
392 &local
->client_conns
);
393 trace_rxrpc_client(conn
, -1, rxrpc_client_replace
);
394 goto candidate_published
;
399 rb_link_node(&candidate
->client_node
, parent
, pp
);
400 rb_insert_color(&candidate
->client_node
, &local
->client_conns
);
403 set_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &candidate
->flags
);
404 call
->conn
= candidate
;
405 call
->security_ix
= candidate
->security_ix
;
406 spin_unlock(&local
->client_conns_lock
);
407 _leave(" = 0 [new %d]", candidate
->debug_id
);
410 /* We come here if we found a suitable connection already in existence.
411 * Discard any candidate we may have allocated, and try to get a
412 * channel on this one.
415 _debug("found conn");
416 spin_unlock(&local
->client_conns_lock
);
419 trace_rxrpc_client(candidate
, -1, rxrpc_client_duplicate
);
420 rxrpc_put_connection(candidate
);
424 spin_lock(&conn
->channel_lock
);
426 call
->security_ix
= conn
->security_ix
;
427 list_add(&call
->chan_wait_link
, &conn
->waiting_calls
);
428 spin_unlock(&conn
->channel_lock
);
429 _leave(" = 0 [extant %d]", conn
->debug_id
);
433 rxrpc_put_peer(cp
->peer
);
436 _leave(" = %d", ret
);
441 * Activate a connection.
443 static void rxrpc_activate_conn(struct rxrpc_connection
*conn
)
445 trace_rxrpc_client(conn
, -1, rxrpc_client_to_active
);
446 conn
->cache_state
= RXRPC_CONN_CLIENT_ACTIVE
;
447 rxrpc_nr_active_client_conns
++;
448 list_move_tail(&conn
->cache_link
, &rxrpc_active_client_conns
);
452 * Attempt to animate a connection for a new call.
454 * If it's not exclusive, the connection is in the endpoint tree, and we're in
455 * the conn's list of those waiting to grab a channel. There is, however, a
456 * limit on the number of live connections allowed at any one time, so we may
457 * have to wait for capacity to become available.
459 * Note that a connection on the waiting queue might *also* have active
460 * channels if it has been culled to make space and then re-requested by a new
463 static void rxrpc_animate_client_conn(struct rxrpc_connection
*conn
)
465 unsigned int nr_conns
;
467 _enter("%d,%d", conn
->debug_id
, conn
->cache_state
);
469 if (conn
->cache_state
== RXRPC_CONN_CLIENT_ACTIVE
)
472 spin_lock(&rxrpc_client_conn_cache_lock
);
474 nr_conns
= rxrpc_nr_client_conns
;
475 if (!test_and_set_bit(RXRPC_CONN_COUNTED
, &conn
->flags
)) {
476 trace_rxrpc_client(conn
, -1, rxrpc_client_count
);
477 rxrpc_nr_client_conns
= nr_conns
+ 1;
480 switch (conn
->cache_state
) {
481 case RXRPC_CONN_CLIENT_ACTIVE
:
482 case RXRPC_CONN_CLIENT_WAITING
:
485 case RXRPC_CONN_CLIENT_INACTIVE
:
486 case RXRPC_CONN_CLIENT_CULLED
:
487 case RXRPC_CONN_CLIENT_IDLE
:
488 if (nr_conns
>= rxrpc_max_client_connections
)
489 goto wait_for_capacity
;
497 spin_unlock(&rxrpc_client_conn_cache_lock
);
499 _leave(" [%d]", conn
->cache_state
);
504 rxrpc_activate_conn(conn
);
509 trace_rxrpc_client(conn
, -1, rxrpc_client_to_waiting
);
510 conn
->cache_state
= RXRPC_CONN_CLIENT_WAITING
;
511 list_move_tail(&conn
->cache_link
, &rxrpc_waiting_client_conns
);
516 * Deactivate a channel.
518 static void rxrpc_deactivate_one_channel(struct rxrpc_connection
*conn
,
519 unsigned int channel
)
521 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
523 rcu_assign_pointer(chan
->call
, NULL
);
524 conn
->active_chans
&= ~(1 << channel
);
528 * Assign a channel to the call at the front of the queue and wake the call up.
529 * We don't increment the callNumber counter until this number has been exposed
532 static void rxrpc_activate_one_channel(struct rxrpc_connection
*conn
,
533 unsigned int channel
)
535 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
536 struct rxrpc_call
*call
= list_entry(conn
->waiting_calls
.next
,
537 struct rxrpc_call
, chan_wait_link
);
538 u32 call_id
= chan
->call_counter
+ 1;
540 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_activate
);
542 write_lock_bh(&call
->state_lock
);
543 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
544 write_unlock_bh(&call
->state_lock
);
546 rxrpc_see_call(call
);
547 list_del_init(&call
->chan_wait_link
);
548 conn
->active_chans
|= 1 << channel
;
549 call
->peer
= rxrpc_get_peer(conn
->params
.peer
);
550 call
->cid
= conn
->proto
.cid
| channel
;
551 call
->call_id
= call_id
;
553 _net("CONNECT call %08x:%08x as call %d on conn %d",
554 call
->cid
, call
->call_id
, call
->debug_id
, conn
->debug_id
);
556 /* Paired with the read barrier in rxrpc_wait_for_channel(). This
557 * orders cid and epoch in the connection wrt to call_id without the
558 * need to take the channel_lock.
560 * We provisionally assign a callNumber at this point, but we don't
561 * confirm it until the call is about to be exposed.
563 * TODO: Pair with a barrier in the data_ready handler when that looks
564 * at the call ID through a connection channel.
567 chan
->call_id
= call_id
;
568 rcu_assign_pointer(chan
->call
, call
);
569 wake_up(&call
->waitq
);
573 * Assign channels and callNumbers to waiting calls with channel_lock
576 static void rxrpc_activate_channels_locked(struct rxrpc_connection
*conn
)
580 switch (conn
->cache_state
) {
581 case RXRPC_CONN_CLIENT_ACTIVE
:
582 mask
= RXRPC_ACTIVE_CHANS_MASK
;
588 while (!list_empty(&conn
->waiting_calls
) &&
589 (avail
= ~conn
->active_chans
,
592 rxrpc_activate_one_channel(conn
, __ffs(avail
));
596 * Assign channels and callNumbers to waiting calls.
598 static void rxrpc_activate_channels(struct rxrpc_connection
*conn
)
600 _enter("%d", conn
->debug_id
);
602 trace_rxrpc_client(conn
, -1, rxrpc_client_activate_chans
);
604 if (conn
->active_chans
== RXRPC_ACTIVE_CHANS_MASK
)
607 spin_lock(&conn
->channel_lock
);
608 rxrpc_activate_channels_locked(conn
);
609 spin_unlock(&conn
->channel_lock
);
614 * Wait for a callNumber and a channel to be granted to a call.
616 static int rxrpc_wait_for_channel(struct rxrpc_call
*call
, gfp_t gfp
)
620 _enter("%d", call
->debug_id
);
622 if (!call
->call_id
) {
623 DECLARE_WAITQUEUE(myself
, current
);
625 if (!gfpflags_allow_blocking(gfp
)) {
630 add_wait_queue_exclusive(&call
->waitq
, &myself
);
632 set_current_state(TASK_INTERRUPTIBLE
);
635 if (signal_pending(current
)) {
641 remove_wait_queue(&call
->waitq
, &myself
);
642 __set_current_state(TASK_RUNNING
);
645 /* Paired with the write barrier in rxrpc_activate_one_channel(). */
649 _leave(" = %d", ret
);
654 * find a connection for a call
655 * - called in process context with IRQs enabled
657 int rxrpc_connect_call(struct rxrpc_call
*call
,
658 struct rxrpc_conn_parameters
*cp
,
659 struct sockaddr_rxrpc
*srx
,
664 _enter("{%d,%lx},", call
->debug_id
, call
->user_call_ID
);
666 rxrpc_discard_expired_client_conns(NULL
);
667 rxrpc_cull_active_client_conns();
669 ret
= rxrpc_get_client_conn(call
, cp
, srx
, gfp
);
673 rxrpc_animate_client_conn(call
->conn
);
674 rxrpc_activate_channels(call
->conn
);
676 ret
= rxrpc_wait_for_channel(call
, gfp
);
678 rxrpc_disconnect_client_call(call
);
680 _leave(" = %d", ret
);
685 * Note that a connection is about to be exposed to the world. Once it is
686 * exposed, we maintain an extra ref on it that stops it from being summarily
687 * discarded before it's (a) had a chance to deal with retransmission and (b)
688 * had a chance at re-use (the per-connection security negotiation is
691 static void rxrpc_expose_client_conn(struct rxrpc_connection
*conn
,
692 unsigned int channel
)
694 if (!test_and_set_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
)) {
695 trace_rxrpc_client(conn
, channel
, rxrpc_client_exposed
);
696 rxrpc_get_connection(conn
);
701 * Note that a call, and thus a connection, is about to be exposed to the
704 void rxrpc_expose_client_call(struct rxrpc_call
*call
)
706 unsigned int channel
= call
->cid
& RXRPC_CHANNELMASK
;
707 struct rxrpc_connection
*conn
= call
->conn
;
708 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
710 if (!test_and_set_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
711 /* Mark the call ID as being used. If the callNumber counter
712 * exceeds ~2 billion, we kill the connection after its
713 * outstanding calls have finished so that the counter doesn't
716 chan
->call_counter
++;
717 if (chan
->call_counter
>= INT_MAX
)
718 set_bit(RXRPC_CONN_DONT_REUSE
, &conn
->flags
);
719 rxrpc_expose_client_conn(conn
, channel
);
724 * Disconnect a client call.
726 void rxrpc_disconnect_client_call(struct rxrpc_call
*call
)
728 unsigned int channel
= call
->cid
& RXRPC_CHANNELMASK
;
729 struct rxrpc_connection
*conn
= call
->conn
;
730 struct rxrpc_channel
*chan
= &conn
->channels
[channel
];
732 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_disconnect
);
735 spin_lock(&conn
->channel_lock
);
737 /* Calls that have never actually been assigned a channel can simply be
738 * discarded. If the conn didn't get used either, it will follow
739 * immediately unless someone else grabs it in the meantime.
741 if (!list_empty(&call
->chan_wait_link
)) {
742 _debug("call is waiting");
743 ASSERTCMP(call
->call_id
, ==, 0);
744 ASSERT(!test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
));
745 list_del_init(&call
->chan_wait_link
);
747 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_unstarted
);
749 /* We must deactivate or idle the connection if it's now
750 * waiting for nothing.
752 spin_lock(&rxrpc_client_conn_cache_lock
);
753 if (conn
->cache_state
== RXRPC_CONN_CLIENT_WAITING
&&
754 list_empty(&conn
->waiting_calls
) &&
756 goto idle_connection
;
760 ASSERTCMP(rcu_access_pointer(chan
->call
), ==, call
);
762 /* If a client call was exposed to the world, we save the result for
765 * We use a barrier here so that the call number and abort code can be
766 * read without needing to take a lock.
768 * TODO: Make the incoming packet handler check this and handle
769 * terminal retransmission without requiring access to the call.
771 if (test_bit(RXRPC_CALL_EXPOSED
, &call
->flags
)) {
772 _debug("exposed %u,%u", call
->call_id
, call
->abort_code
);
773 __rxrpc_disconnect_call(conn
, call
);
776 /* See if we can pass the channel directly to another call. */
777 if (conn
->cache_state
== RXRPC_CONN_CLIENT_ACTIVE
&&
778 !list_empty(&conn
->waiting_calls
)) {
779 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_pass
);
780 rxrpc_activate_one_channel(conn
, channel
);
784 /* Things are more complex and we need the cache lock. We might be
785 * able to simply idle the conn or it might now be lurking on the wait
786 * list. It might even get moved back to the active list whilst we're
787 * waiting for the lock.
789 spin_lock(&rxrpc_client_conn_cache_lock
);
791 switch (conn
->cache_state
) {
792 case RXRPC_CONN_CLIENT_ACTIVE
:
793 if (list_empty(&conn
->waiting_calls
)) {
794 rxrpc_deactivate_one_channel(conn
, channel
);
795 if (!conn
->active_chans
) {
796 rxrpc_nr_active_client_conns
--;
797 goto idle_connection
;
802 trace_rxrpc_client(conn
, channel
, rxrpc_client_chan_pass
);
803 rxrpc_activate_one_channel(conn
, channel
);
806 case RXRPC_CONN_CLIENT_CULLED
:
807 rxrpc_deactivate_one_channel(conn
, channel
);
808 ASSERT(list_empty(&conn
->waiting_calls
));
809 if (!conn
->active_chans
)
810 goto idle_connection
;
813 case RXRPC_CONN_CLIENT_WAITING
:
814 rxrpc_deactivate_one_channel(conn
, channel
);
822 spin_unlock(&rxrpc_client_conn_cache_lock
);
824 spin_unlock(&conn
->channel_lock
);
825 rxrpc_put_connection(conn
);
830 /* As no channels remain active, the connection gets deactivated
831 * immediately or moved to the idle list for a short while.
833 if (test_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
)) {
834 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_idle
);
835 conn
->idle_timestamp
= jiffies
;
836 conn
->cache_state
= RXRPC_CONN_CLIENT_IDLE
;
837 list_move_tail(&conn
->cache_link
, &rxrpc_idle_client_conns
);
838 if (rxrpc_idle_client_conns
.next
== &conn
->cache_link
&&
839 !rxrpc_kill_all_client_conns
)
840 queue_delayed_work(rxrpc_workqueue
,
841 &rxrpc_client_conn_reap
,
842 rxrpc_conn_idle_client_expiry
);
844 trace_rxrpc_client(conn
, channel
, rxrpc_client_to_inactive
);
845 conn
->cache_state
= RXRPC_CONN_CLIENT_INACTIVE
;
846 list_del_init(&conn
->cache_link
);
852 * Clean up a dead client connection.
854 static struct rxrpc_connection
*
855 rxrpc_put_one_client_conn(struct rxrpc_connection
*conn
)
857 struct rxrpc_connection
*next
= NULL
;
858 struct rxrpc_local
*local
= conn
->params
.local
;
859 unsigned int nr_conns
;
861 trace_rxrpc_client(conn
, -1, rxrpc_client_cleanup
);
863 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS
, &conn
->flags
)) {
864 spin_lock(&local
->client_conns_lock
);
865 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS
,
867 rb_erase(&conn
->client_node
, &local
->client_conns
);
868 spin_unlock(&local
->client_conns_lock
);
871 rxrpc_put_client_connection_id(conn
);
873 ASSERTCMP(conn
->cache_state
, ==, RXRPC_CONN_CLIENT_INACTIVE
);
875 if (test_bit(RXRPC_CONN_COUNTED
, &conn
->flags
)) {
876 trace_rxrpc_client(conn
, -1, rxrpc_client_uncount
);
877 spin_lock(&rxrpc_client_conn_cache_lock
);
878 nr_conns
= --rxrpc_nr_client_conns
;
880 if (nr_conns
< rxrpc_max_client_connections
&&
881 !list_empty(&rxrpc_waiting_client_conns
)) {
882 next
= list_entry(rxrpc_waiting_client_conns
.next
,
883 struct rxrpc_connection
, cache_link
);
884 rxrpc_get_connection(next
);
885 rxrpc_activate_conn(next
);
888 spin_unlock(&rxrpc_client_conn_cache_lock
);
891 rxrpc_kill_connection(conn
);
893 rxrpc_activate_channels(next
);
895 /* We need to get rid of the temporary ref we took upon next, but we
896 * can't call rxrpc_put_connection() recursively.
902 * Clean up a dead client connections.
904 void rxrpc_put_client_conn(struct rxrpc_connection
*conn
)
906 const void *here
= __builtin_return_address(0);
910 n
= atomic_dec_return(&conn
->usage
);
911 trace_rxrpc_conn(conn
, rxrpc_conn_put_client
, n
, here
);
916 conn
= rxrpc_put_one_client_conn(conn
);
921 * Kill the longest-active client connections to make room for new ones.
923 static void rxrpc_cull_active_client_conns(void)
925 struct rxrpc_connection
*conn
;
926 unsigned int nr_conns
= rxrpc_nr_client_conns
;
927 unsigned int nr_active
, limit
;
931 ASSERTCMP(nr_conns
, >=, 0);
932 if (nr_conns
< rxrpc_max_client_connections
) {
936 limit
= rxrpc_reap_client_connections
;
938 spin_lock(&rxrpc_client_conn_cache_lock
);
939 nr_active
= rxrpc_nr_active_client_conns
;
941 while (nr_active
> limit
) {
942 ASSERT(!list_empty(&rxrpc_active_client_conns
));
943 conn
= list_entry(rxrpc_active_client_conns
.next
,
944 struct rxrpc_connection
, cache_link
);
945 ASSERTCMP(conn
->cache_state
, ==, RXRPC_CONN_CLIENT_ACTIVE
);
947 if (list_empty(&conn
->waiting_calls
)) {
948 trace_rxrpc_client(conn
, -1, rxrpc_client_to_culled
);
949 conn
->cache_state
= RXRPC_CONN_CLIENT_CULLED
;
950 list_del_init(&conn
->cache_link
);
952 trace_rxrpc_client(conn
, -1, rxrpc_client_to_waiting
);
953 conn
->cache_state
= RXRPC_CONN_CLIENT_WAITING
;
954 list_move_tail(&conn
->cache_link
,
955 &rxrpc_waiting_client_conns
);
961 rxrpc_nr_active_client_conns
= nr_active
;
962 spin_unlock(&rxrpc_client_conn_cache_lock
);
963 ASSERTCMP(nr_active
, >=, 0);
968 * Discard expired client connections from the idle list. Each conn in the
969 * idle list has been exposed and holds an extra ref because of that.
971 * This may be called from conn setup or from a work item so cannot be
972 * considered non-reentrant.
974 static void rxrpc_discard_expired_client_conns(struct work_struct
*work
)
976 struct rxrpc_connection
*conn
;
977 unsigned long expiry
, conn_expires_at
, now
;
978 unsigned int nr_conns
;
979 bool did_discard
= false;
981 _enter("%c", work
? 'w' : 'n');
983 if (list_empty(&rxrpc_idle_client_conns
)) {
988 /* Don't double up on the discarding */
989 if (!spin_trylock(&rxrpc_client_conn_discard_mutex
)) {
990 _leave(" [already]");
994 /* We keep an estimate of what the number of conns ought to be after
995 * we've discarded some so that we don't overdo the discarding.
997 nr_conns
= rxrpc_nr_client_conns
;
1000 spin_lock(&rxrpc_client_conn_cache_lock
);
1002 if (list_empty(&rxrpc_idle_client_conns
))
1005 conn
= list_entry(rxrpc_idle_client_conns
.next
,
1006 struct rxrpc_connection
, cache_link
);
1007 ASSERT(test_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
));
1009 if (!rxrpc_kill_all_client_conns
) {
1010 /* If the number of connections is over the reap limit, we
1011 * expedite discard by reducing the expiry timeout. We must,
1012 * however, have at least a short grace period to be able to do
1013 * final-ACK or ABORT retransmission.
1015 expiry
= rxrpc_conn_idle_client_expiry
;
1016 if (nr_conns
> rxrpc_reap_client_connections
)
1017 expiry
= rxrpc_conn_idle_client_fast_expiry
;
1019 conn_expires_at
= conn
->idle_timestamp
+ expiry
;
1021 now
= READ_ONCE(jiffies
);
1022 if (time_after(conn_expires_at
, now
))
1023 goto not_yet_expired
;
1026 trace_rxrpc_client(conn
, -1, rxrpc_client_discard
);
1027 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED
, &conn
->flags
))
1029 conn
->cache_state
= RXRPC_CONN_CLIENT_INACTIVE
;
1030 list_del_init(&conn
->cache_link
);
1032 spin_unlock(&rxrpc_client_conn_cache_lock
);
1034 /* When we cleared the EXPOSED flag, we took on responsibility for the
1035 * reference that that had on the usage count. We deal with that here.
1036 * If someone re-sets the flag and re-gets the ref, that's fine.
1038 rxrpc_put_connection(conn
);
1044 /* The connection at the front of the queue hasn't yet expired, so
1045 * schedule the work item for that point if we discarded something.
1047 * We don't worry if the work item is already scheduled - it can look
1048 * after rescheduling itself at a later time. We could cancel it, but
1049 * then things get messier.
1052 if (!rxrpc_kill_all_client_conns
)
1053 queue_delayed_work(rxrpc_workqueue
,
1054 &rxrpc_client_conn_reap
,
1055 conn_expires_at
- now
);
1058 spin_unlock(&rxrpc_client_conn_cache_lock
);
1059 spin_unlock(&rxrpc_client_conn_discard_mutex
);
1064 * Preemptively destroy all the client connection records rather than waiting
1065 * for them to time out
1067 void __exit
rxrpc_destroy_all_client_connections(void)
1071 spin_lock(&rxrpc_client_conn_cache_lock
);
1072 rxrpc_kill_all_client_conns
= true;
1073 spin_unlock(&rxrpc_client_conn_cache_lock
);
1075 cancel_delayed_work(&rxrpc_client_conn_reap
);
1077 if (!queue_delayed_work(rxrpc_workqueue
, &rxrpc_client_conn_reap
, 0))
1078 _debug("destroy: queue failed");