net: dsa: b53: Rework ARL bin logic
[linux/fpc-iii.git] / net / rxrpc / conn_client.c
blob4ffc7b87fec0b278da6083882c6282a027880ee0
1 /* Client connection-specific management code.
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 * Client connections need to be cached for a little while after they've made a
13 * call so as to handle retransmitted DATA packets in case the server didn't
14 * receive the final ACK or terminating ABORT we sent it.
16 * Client connections can be in one of a number of cache states:
18 * (1) INACTIVE - The connection is not held in any list and may not have been
19 * exposed to the world. If it has been previously exposed, it was
20 * discarded from the idle list after expiring.
22 * (2) WAITING - The connection is waiting for the number of client conns to
23 * drop below the maximum capacity. Calls may be in progress upon it from
24 * when it was active and got culled.
26 * The connection is on the rxrpc_waiting_client_conns list which is kept
27 * in to-be-granted order. Culled conns with waiters go to the back of
28 * the queue just like new conns.
30 * (3) ACTIVE - The connection has at least one call in progress upon it, it
31 * may freely grant available channels to new calls and calls may be
32 * waiting on it for channels to become available.
34 * The connection is on the rxnet->active_client_conns list which is kept
35 * in activation order for culling purposes.
37 * rxrpc_nr_active_client_conns is held incremented also.
39 * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
40 * being used to probe for service upgrade.
42 * (5) CULLED - The connection got summarily culled to try and free up
43 * capacity. Calls currently in progress on the connection are allowed to
44 * continue, but new calls will have to wait. There can be no waiters in
45 * this state - the conn would have to go to the WAITING state instead.
47 * (6) IDLE - The connection has no calls in progress upon it and must have
48 * been exposed to the world (ie. the EXPOSED flag must be set). When it
49 * expires, the EXPOSED flag is cleared and the connection transitions to
50 * the INACTIVE state.
52 * The connection is on the rxnet->idle_client_conns list which is kept in
53 * order of how soon they'll expire.
55 * There are flags of relevance to the cache:
57 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
58 * set, an extra ref is added to the connection preventing it from being
59 * reaped when it has no calls outstanding. This flag is cleared and the
60 * ref dropped when a conn is discarded from the idle list.
62 * This allows us to move terminal call state retransmission to the
63 * connection and to discard the call immediately we think it is done
64 * with. It also give us a chance to reuse the connection.
66 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
67 * should not be reused. This is set when an exclusive connection is used
68 * or a call ID counter overflows.
70 * The caching state may only be changed if the cache lock is held.
72 * There are two idle client connection expiry durations. If the total number
73 * of connections is below the reap threshold, we use the normal duration; if
74 * it's above, we use the fast duration.
77 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 #include <linux/slab.h>
80 #include <linux/idr.h>
81 #include <linux/timer.h>
82 #include <linux/sched/signal.h>
84 #include "ar-internal.h"
86 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
87 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
88 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
89 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
92 * We use machine-unique IDs for our client connections.
94 DEFINE_IDR(rxrpc_client_conn_ids);
95 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
97 static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
100 * Get a connection ID and epoch for a client connection from the global pool.
101 * The connection struct pointer is then recorded in the idr radix tree. The
102 * epoch doesn't change until the client is rebooted (or, at least, unless the
103 * module is unloaded).
105 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
106 gfp_t gfp)
108 struct rxrpc_net *rxnet = conn->params.local->rxnet;
109 int id;
111 _enter("");
113 idr_preload(gfp);
114 spin_lock(&rxrpc_conn_id_lock);
116 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
117 1, 0x40000000, GFP_NOWAIT);
118 if (id < 0)
119 goto error;
121 spin_unlock(&rxrpc_conn_id_lock);
122 idr_preload_end();
124 conn->proto.epoch = rxnet->epoch;
125 conn->proto.cid = id << RXRPC_CIDSHIFT;
126 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
127 _leave(" [CID %x]", conn->proto.cid);
128 return 0;
130 error:
131 spin_unlock(&rxrpc_conn_id_lock);
132 idr_preload_end();
133 _leave(" = %d", id);
134 return id;
138 * Release a connection ID for a client connection from the global pool.
140 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
142 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
143 spin_lock(&rxrpc_conn_id_lock);
144 idr_remove(&rxrpc_client_conn_ids,
145 conn->proto.cid >> RXRPC_CIDSHIFT);
146 spin_unlock(&rxrpc_conn_id_lock);
151 * Destroy the client connection ID tree.
153 void rxrpc_destroy_client_conn_ids(void)
155 struct rxrpc_connection *conn;
156 int id;
158 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
159 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
160 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
161 conn, atomic_read(&conn->usage));
163 BUG();
166 idr_destroy(&rxrpc_client_conn_ids);
170 * Allocate a client connection.
172 static struct rxrpc_connection *
173 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
175 struct rxrpc_connection *conn;
176 struct rxrpc_net *rxnet = cp->local->rxnet;
177 int ret;
179 _enter("");
181 conn = rxrpc_alloc_connection(gfp);
182 if (!conn) {
183 _leave(" = -ENOMEM");
184 return ERR_PTR(-ENOMEM);
187 atomic_set(&conn->usage, 1);
188 if (cp->exclusive)
189 __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
190 if (cp->upgrade)
191 __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
193 conn->params = *cp;
194 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
195 conn->state = RXRPC_CONN_CLIENT;
196 conn->service_id = cp->service_id;
198 ret = rxrpc_get_client_connection_id(conn, gfp);
199 if (ret < 0)
200 goto error_0;
202 ret = rxrpc_init_client_conn_security(conn);
203 if (ret < 0)
204 goto error_1;
206 ret = conn->security->prime_packet_security(conn);
207 if (ret < 0)
208 goto error_2;
210 atomic_inc(&rxnet->nr_conns);
211 write_lock(&rxnet->conn_lock);
212 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
213 write_unlock(&rxnet->conn_lock);
215 /* We steal the caller's peer ref. */
216 cp->peer = NULL;
217 rxrpc_get_local(conn->params.local);
218 key_get(conn->params.key);
220 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
221 atomic_read(&conn->usage),
222 __builtin_return_address(0));
223 trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
224 _leave(" = %p", conn);
225 return conn;
227 error_2:
228 conn->security->clear(conn);
229 error_1:
230 rxrpc_put_client_connection_id(conn);
231 error_0:
232 kfree(conn);
233 _leave(" = %d", ret);
234 return ERR_PTR(ret);
238 * Determine if a connection may be reused.
240 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
242 struct rxrpc_net *rxnet = conn->params.local->rxnet;
243 int id_cursor, id, distance, limit;
245 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
246 goto dont_reuse;
248 if (conn->proto.epoch != rxnet->epoch)
249 goto mark_dont_reuse;
251 /* The IDR tree gets very expensive on memory if the connection IDs are
252 * widely scattered throughout the number space, so we shall want to
253 * kill off connections that, say, have an ID more than about four
254 * times the maximum number of client conns away from the current
255 * allocation point to try and keep the IDs concentrated.
257 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
258 id = conn->proto.cid >> RXRPC_CIDSHIFT;
259 distance = id - id_cursor;
260 if (distance < 0)
261 distance = -distance;
262 limit = max(rxrpc_max_client_connections * 4, 1024U);
263 if (distance > limit)
264 goto mark_dont_reuse;
266 return true;
268 mark_dont_reuse:
269 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
270 dont_reuse:
271 return false;
275 * Create or find a client connection to use for a call.
277 * If we return with a connection, the call will be on its waiting list. It's
278 * left to the caller to assign a channel and wake up the call.
280 static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
281 struct rxrpc_call *call,
282 struct rxrpc_conn_parameters *cp,
283 struct sockaddr_rxrpc *srx,
284 gfp_t gfp)
286 struct rxrpc_connection *conn, *candidate = NULL;
287 struct rxrpc_local *local = cp->local;
288 struct rb_node *p, **pp, *parent;
289 long diff;
290 int ret = -ENOMEM;
292 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
294 cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
295 if (!cp->peer)
296 goto error;
298 call->cong_cwnd = cp->peer->cong_cwnd;
299 if (call->cong_cwnd >= call->cong_ssthresh)
300 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
301 else
302 call->cong_mode = RXRPC_CALL_SLOW_START;
304 /* If the connection is not meant to be exclusive, search the available
305 * connections to see if the connection we want to use already exists.
307 if (!cp->exclusive) {
308 _debug("search 1");
309 spin_lock(&local->client_conns_lock);
310 p = local->client_conns.rb_node;
311 while (p) {
312 conn = rb_entry(p, struct rxrpc_connection, client_node);
314 #define cmp(X) ((long)conn->params.X - (long)cp->X)
315 diff = (cmp(peer) ?:
316 cmp(key) ?:
317 cmp(security_level) ?:
318 cmp(upgrade));
319 #undef cmp
320 if (diff < 0) {
321 p = p->rb_left;
322 } else if (diff > 0) {
323 p = p->rb_right;
324 } else {
325 if (rxrpc_may_reuse_conn(conn) &&
326 rxrpc_get_connection_maybe(conn))
327 goto found_extant_conn;
328 /* The connection needs replacing. It's better
329 * to effect that when we have something to
330 * replace it with so that we don't have to
331 * rebalance the tree twice.
333 break;
336 spin_unlock(&local->client_conns_lock);
339 /* There wasn't a connection yet or we need an exclusive connection.
340 * We need to create a candidate and then potentially redo the search
341 * in case we're racing with another thread also trying to connect on a
342 * shareable connection.
344 _debug("new conn");
345 candidate = rxrpc_alloc_client_connection(cp, gfp);
346 if (IS_ERR(candidate)) {
347 ret = PTR_ERR(candidate);
348 goto error_peer;
351 /* Add the call to the new connection's waiting list in case we're
352 * going to have to wait for the connection to come live. It's our
353 * connection, so we want first dibs on the channel slots. We would
354 * normally have to take channel_lock but we do this before anyone else
355 * can see the connection.
357 list_add(&call->chan_wait_link, &candidate->waiting_calls);
359 if (cp->exclusive) {
360 call->conn = candidate;
361 call->security_ix = candidate->security_ix;
362 call->service_id = candidate->service_id;
363 _leave(" = 0 [exclusive %d]", candidate->debug_id);
364 return 0;
367 /* Publish the new connection for userspace to find. We need to redo
368 * the search before doing this lest we race with someone else adding a
369 * conflicting instance.
371 _debug("search 2");
372 spin_lock(&local->client_conns_lock);
374 pp = &local->client_conns.rb_node;
375 parent = NULL;
376 while (*pp) {
377 parent = *pp;
378 conn = rb_entry(parent, struct rxrpc_connection, client_node);
380 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
381 diff = (cmp(peer) ?:
382 cmp(key) ?:
383 cmp(security_level) ?:
384 cmp(upgrade));
385 #undef cmp
386 if (diff < 0) {
387 pp = &(*pp)->rb_left;
388 } else if (diff > 0) {
389 pp = &(*pp)->rb_right;
390 } else {
391 if (rxrpc_may_reuse_conn(conn) &&
392 rxrpc_get_connection_maybe(conn))
393 goto found_extant_conn;
394 /* The old connection is from an outdated epoch. */
395 _debug("replace conn");
396 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
397 rb_replace_node(&conn->client_node,
398 &candidate->client_node,
399 &local->client_conns);
400 trace_rxrpc_client(conn, -1, rxrpc_client_replace);
401 goto candidate_published;
405 _debug("new conn");
406 rb_link_node(&candidate->client_node, parent, pp);
407 rb_insert_color(&candidate->client_node, &local->client_conns);
409 candidate_published:
410 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
411 call->conn = candidate;
412 call->security_ix = candidate->security_ix;
413 call->service_id = candidate->service_id;
414 spin_unlock(&local->client_conns_lock);
415 _leave(" = 0 [new %d]", candidate->debug_id);
416 return 0;
418 /* We come here if we found a suitable connection already in existence.
419 * Discard any candidate we may have allocated, and try to get a
420 * channel on this one.
422 found_extant_conn:
423 _debug("found conn");
424 spin_unlock(&local->client_conns_lock);
426 if (candidate) {
427 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
428 rxrpc_put_connection(candidate);
429 candidate = NULL;
432 spin_lock(&conn->channel_lock);
433 call->conn = conn;
434 call->security_ix = conn->security_ix;
435 call->service_id = conn->service_id;
436 list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
437 spin_unlock(&conn->channel_lock);
438 _leave(" = 0 [extant %d]", conn->debug_id);
439 return 0;
441 error_peer:
442 rxrpc_put_peer(cp->peer);
443 cp->peer = NULL;
444 error:
445 _leave(" = %d", ret);
446 return ret;
450 * Activate a connection.
452 static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
453 struct rxrpc_connection *conn)
455 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
456 trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
457 conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
458 } else {
459 trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
460 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
462 rxnet->nr_active_client_conns++;
463 list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
467 * Attempt to animate a connection for a new call.
469 * If it's not exclusive, the connection is in the endpoint tree, and we're in
470 * the conn's list of those waiting to grab a channel. There is, however, a
471 * limit on the number of live connections allowed at any one time, so we may
472 * have to wait for capacity to become available.
474 * Note that a connection on the waiting queue might *also* have active
475 * channels if it has been culled to make space and then re-requested by a new
476 * call.
478 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
479 struct rxrpc_connection *conn)
481 unsigned int nr_conns;
483 _enter("%d,%d", conn->debug_id, conn->cache_state);
485 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
486 conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
487 goto out;
489 spin_lock(&rxnet->client_conn_cache_lock);
491 nr_conns = rxnet->nr_client_conns;
492 if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
493 trace_rxrpc_client(conn, -1, rxrpc_client_count);
494 rxnet->nr_client_conns = nr_conns + 1;
497 switch (conn->cache_state) {
498 case RXRPC_CONN_CLIENT_ACTIVE:
499 case RXRPC_CONN_CLIENT_UPGRADE:
500 case RXRPC_CONN_CLIENT_WAITING:
501 break;
503 case RXRPC_CONN_CLIENT_INACTIVE:
504 case RXRPC_CONN_CLIENT_CULLED:
505 case RXRPC_CONN_CLIENT_IDLE:
506 if (nr_conns >= rxrpc_max_client_connections)
507 goto wait_for_capacity;
508 goto activate_conn;
510 default:
511 BUG();
514 out_unlock:
515 spin_unlock(&rxnet->client_conn_cache_lock);
516 out:
517 _leave(" [%d]", conn->cache_state);
518 return;
520 activate_conn:
521 _debug("activate");
522 rxrpc_activate_conn(rxnet, conn);
523 goto out_unlock;
525 wait_for_capacity:
526 _debug("wait");
527 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
528 conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
529 list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
530 goto out_unlock;
534 * Deactivate a channel.
536 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
537 unsigned int channel)
539 struct rxrpc_channel *chan = &conn->channels[channel];
541 rcu_assign_pointer(chan->call, NULL);
542 conn->active_chans &= ~(1 << channel);
546 * Assign a channel to the call at the front of the queue and wake the call up.
547 * We don't increment the callNumber counter until this number has been exposed
548 * to the world.
550 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
551 unsigned int channel)
553 struct rxrpc_channel *chan = &conn->channels[channel];
554 struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
555 struct rxrpc_call, chan_wait_link);
556 u32 call_id = chan->call_counter + 1;
558 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
560 /* Cancel the final ACK on the previous call if it hasn't been sent yet
561 * as the DATA packet will implicitly ACK it.
563 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
565 write_lock_bh(&call->state_lock);
566 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
567 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
568 else
569 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
570 write_unlock_bh(&call->state_lock);
572 rxrpc_see_call(call);
573 list_del_init(&call->chan_wait_link);
574 conn->active_chans |= 1 << channel;
575 call->peer = rxrpc_get_peer(conn->params.peer);
576 call->cid = conn->proto.cid | channel;
577 call->call_id = call_id;
579 trace_rxrpc_connect_call(call);
580 _net("CONNECT call %08x:%08x as call %d on conn %d",
581 call->cid, call->call_id, call->debug_id, conn->debug_id);
583 /* Paired with the read barrier in rxrpc_wait_for_channel(). This
584 * orders cid and epoch in the connection wrt to call_id without the
585 * need to take the channel_lock.
587 * We provisionally assign a callNumber at this point, but we don't
588 * confirm it until the call is about to be exposed.
590 * TODO: Pair with a barrier in the data_ready handler when that looks
591 * at the call ID through a connection channel.
593 smp_wmb();
594 chan->call_id = call_id;
595 chan->call_debug_id = call->debug_id;
596 rcu_assign_pointer(chan->call, call);
597 wake_up(&call->waitq);
601 * Assign channels and callNumbers to waiting calls with channel_lock
602 * held by caller.
604 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
606 u8 avail, mask;
608 switch (conn->cache_state) {
609 case RXRPC_CONN_CLIENT_ACTIVE:
610 mask = RXRPC_ACTIVE_CHANS_MASK;
611 break;
612 case RXRPC_CONN_CLIENT_UPGRADE:
613 mask = 0x01;
614 break;
615 default:
616 return;
619 while (!list_empty(&conn->waiting_calls) &&
620 (avail = ~conn->active_chans,
621 avail &= mask,
622 avail != 0))
623 rxrpc_activate_one_channel(conn, __ffs(avail));
627 * Assign channels and callNumbers to waiting calls.
629 static void rxrpc_activate_channels(struct rxrpc_connection *conn)
631 _enter("%d", conn->debug_id);
633 trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
635 if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
636 return;
638 spin_lock(&conn->channel_lock);
639 rxrpc_activate_channels_locked(conn);
640 spin_unlock(&conn->channel_lock);
641 _leave("");
645 * Wait for a callNumber and a channel to be granted to a call.
647 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
649 int ret = 0;
651 _enter("%d", call->debug_id);
653 if (!call->call_id) {
654 DECLARE_WAITQUEUE(myself, current);
656 if (!gfpflags_allow_blocking(gfp)) {
657 ret = -EAGAIN;
658 goto out;
661 add_wait_queue_exclusive(&call->waitq, &myself);
662 for (;;) {
663 set_current_state(TASK_INTERRUPTIBLE);
664 if (call->call_id)
665 break;
666 if (signal_pending(current)) {
667 ret = -ERESTARTSYS;
668 break;
670 schedule();
672 remove_wait_queue(&call->waitq, &myself);
673 __set_current_state(TASK_RUNNING);
676 /* Paired with the write barrier in rxrpc_activate_one_channel(). */
677 smp_rmb();
679 out:
680 _leave(" = %d", ret);
681 return ret;
685 * find a connection for a call
686 * - called in process context with IRQs enabled
688 int rxrpc_connect_call(struct rxrpc_sock *rx,
689 struct rxrpc_call *call,
690 struct rxrpc_conn_parameters *cp,
691 struct sockaddr_rxrpc *srx,
692 gfp_t gfp)
694 struct rxrpc_net *rxnet = cp->local->rxnet;
695 int ret;
697 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
699 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
700 rxrpc_cull_active_client_conns(rxnet);
702 ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
703 if (ret < 0)
704 goto out;
706 rxrpc_animate_client_conn(rxnet, call->conn);
707 rxrpc_activate_channels(call->conn);
709 ret = rxrpc_wait_for_channel(call, gfp);
710 if (ret < 0) {
711 trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
712 rxrpc_disconnect_client_call(call);
713 goto out;
716 spin_lock_bh(&call->conn->params.peer->lock);
717 hlist_add_head_rcu(&call->error_link,
718 &call->conn->params.peer->error_targets);
719 spin_unlock_bh(&call->conn->params.peer->lock);
721 out:
722 _leave(" = %d", ret);
723 return ret;
727 * Note that a connection is about to be exposed to the world. Once it is
728 * exposed, we maintain an extra ref on it that stops it from being summarily
729 * discarded before it's (a) had a chance to deal with retransmission and (b)
730 * had a chance at re-use (the per-connection security negotiation is
731 * expensive).
733 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
734 unsigned int channel)
736 if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
737 trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
738 rxrpc_get_connection(conn);
743 * Note that a call, and thus a connection, is about to be exposed to the
744 * world.
746 void rxrpc_expose_client_call(struct rxrpc_call *call)
748 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
749 struct rxrpc_connection *conn = call->conn;
750 struct rxrpc_channel *chan = &conn->channels[channel];
752 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
753 /* Mark the call ID as being used. If the callNumber counter
754 * exceeds ~2 billion, we kill the connection after its
755 * outstanding calls have finished so that the counter doesn't
756 * wrap.
758 chan->call_counter++;
759 if (chan->call_counter >= INT_MAX)
760 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
761 rxrpc_expose_client_conn(conn, channel);
766 * Set the reap timer.
768 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
770 unsigned long now = jiffies;
771 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
773 if (rxnet->live)
774 timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
778 * Disconnect a client call.
780 void rxrpc_disconnect_client_call(struct rxrpc_call *call)
782 struct rxrpc_connection *conn = call->conn;
783 struct rxrpc_channel *chan = NULL;
784 struct rxrpc_net *rxnet = conn->params.local->rxnet;
785 unsigned int channel = -1;
786 u32 cid;
788 spin_lock(&conn->channel_lock);
789 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
791 cid = call->cid;
792 if (cid) {
793 channel = cid & RXRPC_CHANNELMASK;
794 chan = &conn->channels[channel];
796 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
798 /* Calls that have never actually been assigned a channel can simply be
799 * discarded. If the conn didn't get used either, it will follow
800 * immediately unless someone else grabs it in the meantime.
802 if (!list_empty(&call->chan_wait_link)) {
803 _debug("call is waiting");
804 ASSERTCMP(call->call_id, ==, 0);
805 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
806 list_del_init(&call->chan_wait_link);
808 trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
810 /* We must deactivate or idle the connection if it's now
811 * waiting for nothing.
813 spin_lock(&rxnet->client_conn_cache_lock);
814 if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
815 list_empty(&conn->waiting_calls) &&
816 !conn->active_chans)
817 goto idle_connection;
818 goto out;
821 if (rcu_access_pointer(chan->call) != call) {
822 spin_unlock(&conn->channel_lock);
823 BUG();
826 /* If a client call was exposed to the world, we save the result for
827 * retransmission.
829 * We use a barrier here so that the call number and abort code can be
830 * read without needing to take a lock.
832 * TODO: Make the incoming packet handler check this and handle
833 * terminal retransmission without requiring access to the call.
835 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
836 _debug("exposed %u,%u", call->call_id, call->abort_code);
837 __rxrpc_disconnect_call(conn, call);
840 /* See if we can pass the channel directly to another call. */
841 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
842 !list_empty(&conn->waiting_calls)) {
843 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
844 rxrpc_activate_one_channel(conn, channel);
845 goto out_2;
848 /* Schedule the final ACK to be transmitted in a short while so that it
849 * can be skipped if we find a follow-on call. The first DATA packet
850 * of the follow on call will implicitly ACK this call.
852 if (call->completion == RXRPC_CALL_SUCCEEDED &&
853 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
854 unsigned long final_ack_at = jiffies + 2;
856 WRITE_ONCE(chan->final_ack_at, final_ack_at);
857 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
858 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
859 rxrpc_reduce_conn_timer(conn, final_ack_at);
862 /* Things are more complex and we need the cache lock. We might be
863 * able to simply idle the conn or it might now be lurking on the wait
864 * list. It might even get moved back to the active list whilst we're
865 * waiting for the lock.
867 spin_lock(&rxnet->client_conn_cache_lock);
869 switch (conn->cache_state) {
870 case RXRPC_CONN_CLIENT_UPGRADE:
871 /* Deal with termination of a service upgrade probe. */
872 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
873 clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
874 trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
875 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
876 rxrpc_activate_channels_locked(conn);
878 /* fall through */
879 case RXRPC_CONN_CLIENT_ACTIVE:
880 if (list_empty(&conn->waiting_calls)) {
881 rxrpc_deactivate_one_channel(conn, channel);
882 if (!conn->active_chans) {
883 rxnet->nr_active_client_conns--;
884 goto idle_connection;
886 goto out;
889 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
890 rxrpc_activate_one_channel(conn, channel);
891 goto out;
893 case RXRPC_CONN_CLIENT_CULLED:
894 rxrpc_deactivate_one_channel(conn, channel);
895 ASSERT(list_empty(&conn->waiting_calls));
896 if (!conn->active_chans)
897 goto idle_connection;
898 goto out;
900 case RXRPC_CONN_CLIENT_WAITING:
901 rxrpc_deactivate_one_channel(conn, channel);
902 goto out;
904 default:
905 BUG();
908 out:
909 spin_unlock(&rxnet->client_conn_cache_lock);
910 out_2:
911 spin_unlock(&conn->channel_lock);
912 _leave("");
913 return;
915 idle_connection:
916 /* As no channels remain active, the connection gets deactivated
917 * immediately or moved to the idle list for a short while.
919 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
920 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
921 conn->idle_timestamp = jiffies;
922 conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
923 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
924 if (rxnet->idle_client_conns.next == &conn->cache_link &&
925 !rxnet->kill_all_client_conns)
926 rxrpc_set_client_reap_timer(rxnet);
927 } else {
928 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
929 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
930 list_del_init(&conn->cache_link);
932 goto out;
936 * Clean up a dead client connection.
938 static struct rxrpc_connection *
939 rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
941 struct rxrpc_connection *next = NULL;
942 struct rxrpc_local *local = conn->params.local;
943 struct rxrpc_net *rxnet = local->rxnet;
944 unsigned int nr_conns;
946 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
948 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
949 spin_lock(&local->client_conns_lock);
950 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
951 &conn->flags))
952 rb_erase(&conn->client_node, &local->client_conns);
953 spin_unlock(&local->client_conns_lock);
956 rxrpc_put_client_connection_id(conn);
958 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
960 if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
961 trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
962 spin_lock(&rxnet->client_conn_cache_lock);
963 nr_conns = --rxnet->nr_client_conns;
965 if (nr_conns < rxrpc_max_client_connections &&
966 !list_empty(&rxnet->waiting_client_conns)) {
967 next = list_entry(rxnet->waiting_client_conns.next,
968 struct rxrpc_connection, cache_link);
969 rxrpc_get_connection(next);
970 rxrpc_activate_conn(rxnet, next);
973 spin_unlock(&rxnet->client_conn_cache_lock);
976 rxrpc_kill_connection(conn);
977 if (next)
978 rxrpc_activate_channels(next);
980 /* We need to get rid of the temporary ref we took upon next, but we
981 * can't call rxrpc_put_connection() recursively.
983 return next;
987 * Clean up a dead client connections.
989 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
991 const void *here = __builtin_return_address(0);
992 unsigned int debug_id = conn->debug_id;
993 int n;
995 do {
996 n = atomic_dec_return(&conn->usage);
997 trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
998 if (n > 0)
999 return;
1000 ASSERTCMP(n, >=, 0);
1002 conn = rxrpc_put_one_client_conn(conn);
1003 } while (conn);
1007 * Kill the longest-active client connections to make room for new ones.
1009 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
1011 struct rxrpc_connection *conn;
1012 unsigned int nr_conns = rxnet->nr_client_conns;
1013 unsigned int nr_active, limit;
1015 _enter("");
1017 ASSERTCMP(nr_conns, >=, 0);
1018 if (nr_conns < rxrpc_max_client_connections) {
1019 _leave(" [ok]");
1020 return;
1022 limit = rxrpc_reap_client_connections;
1024 spin_lock(&rxnet->client_conn_cache_lock);
1025 nr_active = rxnet->nr_active_client_conns;
1027 while (nr_active > limit) {
1028 ASSERT(!list_empty(&rxnet->active_client_conns));
1029 conn = list_entry(rxnet->active_client_conns.next,
1030 struct rxrpc_connection, cache_link);
1031 ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
1032 conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
1034 if (list_empty(&conn->waiting_calls)) {
1035 trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
1036 conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
1037 list_del_init(&conn->cache_link);
1038 } else {
1039 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
1040 conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
1041 list_move_tail(&conn->cache_link,
1042 &rxnet->waiting_client_conns);
1045 nr_active--;
1048 rxnet->nr_active_client_conns = nr_active;
1049 spin_unlock(&rxnet->client_conn_cache_lock);
1050 ASSERTCMP(nr_active, >=, 0);
1051 _leave(" [culled]");
1055 * Discard expired client connections from the idle list. Each conn in the
1056 * idle list has been exposed and holds an extra ref because of that.
1058 * This may be called from conn setup or from a work item so cannot be
1059 * considered non-reentrant.
1061 void rxrpc_discard_expired_client_conns(struct work_struct *work)
1063 struct rxrpc_connection *conn;
1064 struct rxrpc_net *rxnet =
1065 container_of(work, struct rxrpc_net, client_conn_reaper);
1066 unsigned long expiry, conn_expires_at, now;
1067 unsigned int nr_conns;
1069 _enter("");
1071 if (list_empty(&rxnet->idle_client_conns)) {
1072 _leave(" [empty]");
1073 return;
1076 /* Don't double up on the discarding */
1077 if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1078 _leave(" [already]");
1079 return;
1082 /* We keep an estimate of what the number of conns ought to be after
1083 * we've discarded some so that we don't overdo the discarding.
1085 nr_conns = rxnet->nr_client_conns;
1087 next:
1088 spin_lock(&rxnet->client_conn_cache_lock);
1090 if (list_empty(&rxnet->idle_client_conns))
1091 goto out;
1093 conn = list_entry(rxnet->idle_client_conns.next,
1094 struct rxrpc_connection, cache_link);
1095 ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
1097 if (!rxnet->kill_all_client_conns) {
1098 /* If the number of connections is over the reap limit, we
1099 * expedite discard by reducing the expiry timeout. We must,
1100 * however, have at least a short grace period to be able to do
1101 * final-ACK or ABORT retransmission.
1103 expiry = rxrpc_conn_idle_client_expiry;
1104 if (nr_conns > rxrpc_reap_client_connections)
1105 expiry = rxrpc_conn_idle_client_fast_expiry;
1106 if (conn->params.local->service_closed)
1107 expiry = rxrpc_closed_conn_expiry * HZ;
1109 conn_expires_at = conn->idle_timestamp + expiry;
1111 now = READ_ONCE(jiffies);
1112 if (time_after(conn_expires_at, now))
1113 goto not_yet_expired;
1116 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1117 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1118 BUG();
1119 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1120 list_del_init(&conn->cache_link);
1122 spin_unlock(&rxnet->client_conn_cache_lock);
1124 /* When we cleared the EXPOSED flag, we took on responsibility for the
1125 * reference that that had on the usage count. We deal with that here.
1126 * If someone re-sets the flag and re-gets the ref, that's fine.
1128 rxrpc_put_connection(conn);
1129 nr_conns--;
1130 goto next;
1132 not_yet_expired:
1133 /* The connection at the front of the queue hasn't yet expired, so
1134 * schedule the work item for that point if we discarded something.
1136 * We don't worry if the work item is already scheduled - it can look
1137 * after rescheduling itself at a later time. We could cancel it, but
1138 * then things get messier.
1140 _debug("not yet");
1141 if (!rxnet->kill_all_client_conns)
1142 timer_reduce(&rxnet->client_conn_reap_timer,
1143 conn_expires_at);
1145 out:
1146 spin_unlock(&rxnet->client_conn_cache_lock);
1147 spin_unlock(&rxnet->client_conn_discard_lock);
1148 _leave("");
1152 * Preemptively destroy all the client connection records rather than waiting
1153 * for them to time out
1155 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1157 _enter("");
1159 spin_lock(&rxnet->client_conn_cache_lock);
1160 rxnet->kill_all_client_conns = true;
1161 spin_unlock(&rxnet->client_conn_cache_lock);
1163 del_timer_sync(&rxnet->client_conn_reap_timer);
1165 if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1166 _debug("destroy: queue failed");
1168 _leave("");
1172 * Clean up the client connections on a local endpoint.
1174 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1176 struct rxrpc_connection *conn, *tmp;
1177 struct rxrpc_net *rxnet = local->rxnet;
1178 unsigned int nr_active;
1179 LIST_HEAD(graveyard);
1181 _enter("");
1183 spin_lock(&rxnet->client_conn_cache_lock);
1184 nr_active = rxnet->nr_active_client_conns;
1186 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1187 cache_link) {
1188 if (conn->params.local == local) {
1189 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
1191 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1192 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1193 BUG();
1194 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1195 list_move(&conn->cache_link, &graveyard);
1196 nr_active--;
1200 rxnet->nr_active_client_conns = nr_active;
1201 spin_unlock(&rxnet->client_conn_cache_lock);
1202 ASSERTCMP(nr_active, >=, 0);
1204 while (!list_empty(&graveyard)) {
1205 conn = list_entry(graveyard.next,
1206 struct rxrpc_connection, cache_link);
1207 list_del_init(&conn->cache_link);
1209 rxrpc_put_connection(conn);
1212 _leave(" [culled]");