mm: introduce vmf_insert_pfn_prot()
[linux/fpc-iii.git] / net / rxrpc / conn_client.c
blob521189f4b6667fee627bf27fb0742882227a51b3
1 /* Client connection-specific management code.
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 * Client connections need to be cached for a little while after they've made a
13 * call so as to handle retransmitted DATA packets in case the server didn't
14 * receive the final ACK or terminating ABORT we sent it.
16 * Client connections can be in one of a number of cache states:
18 * (1) INACTIVE - The connection is not held in any list and may not have been
19 * exposed to the world. If it has been previously exposed, it was
20 * discarded from the idle list after expiring.
22 * (2) WAITING - The connection is waiting for the number of client conns to
23 * drop below the maximum capacity. Calls may be in progress upon it from
24 * when it was active and got culled.
26 * The connection is on the rxrpc_waiting_client_conns list which is kept
27 * in to-be-granted order. Culled conns with waiters go to the back of
28 * the queue just like new conns.
30 * (3) ACTIVE - The connection has at least one call in progress upon it, it
31 * may freely grant available channels to new calls and calls may be
32 * waiting on it for channels to become available.
34 * The connection is on the rxnet->active_client_conns list which is kept
35 * in activation order for culling purposes.
37 * rxrpc_nr_active_client_conns is held incremented also.
39 * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
40 * being used to probe for service upgrade.
42 * (5) CULLED - The connection got summarily culled to try and free up
43 * capacity. Calls currently in progress on the connection are allowed to
44 * continue, but new calls will have to wait. There can be no waiters in
45 * this state - the conn would have to go to the WAITING state instead.
47 * (6) IDLE - The connection has no calls in progress upon it and must have
48 * been exposed to the world (ie. the EXPOSED flag must be set). When it
49 * expires, the EXPOSED flag is cleared and the connection transitions to
50 * the INACTIVE state.
52 * The connection is on the rxnet->idle_client_conns list which is kept in
53 * order of how soon they'll expire.
55 * There are flags of relevance to the cache:
57 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
58 * set, an extra ref is added to the connection preventing it from being
59 * reaped when it has no calls outstanding. This flag is cleared and the
60 * ref dropped when a conn is discarded from the idle list.
62 * This allows us to move terminal call state retransmission to the
63 * connection and to discard the call immediately we think it is done
64 * with. It also give us a chance to reuse the connection.
66 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
67 * should not be reused. This is set when an exclusive connection is used
68 * or a call ID counter overflows.
70 * The caching state may only be changed if the cache lock is held.
72 * There are two idle client connection expiry durations. If the total number
73 * of connections is below the reap threshold, we use the normal duration; if
74 * it's above, we use the fast duration.
77 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79 #include <linux/slab.h>
80 #include <linux/idr.h>
81 #include <linux/timer.h>
82 #include <linux/sched/signal.h>
84 #include "ar-internal.h"
86 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
87 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
88 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
89 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
92 * We use machine-unique IDs for our client connections.
94 DEFINE_IDR(rxrpc_client_conn_ids);
95 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
97 static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
100 * Get a connection ID and epoch for a client connection from the global pool.
101 * The connection struct pointer is then recorded in the idr radix tree. The
102 * epoch doesn't change until the client is rebooted (or, at least, unless the
103 * module is unloaded).
105 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
106 gfp_t gfp)
108 struct rxrpc_net *rxnet = conn->params.local->rxnet;
109 int id;
111 _enter("");
113 idr_preload(gfp);
114 spin_lock(&rxrpc_conn_id_lock);
116 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
117 1, 0x40000000, GFP_NOWAIT);
118 if (id < 0)
119 goto error;
121 spin_unlock(&rxrpc_conn_id_lock);
122 idr_preload_end();
124 conn->proto.epoch = rxnet->epoch;
125 conn->proto.cid = id << RXRPC_CIDSHIFT;
126 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
127 _leave(" [CID %x]", conn->proto.cid);
128 return 0;
130 error:
131 spin_unlock(&rxrpc_conn_id_lock);
132 idr_preload_end();
133 _leave(" = %d", id);
134 return id;
138 * Release a connection ID for a client connection from the global pool.
140 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
142 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
143 spin_lock(&rxrpc_conn_id_lock);
144 idr_remove(&rxrpc_client_conn_ids,
145 conn->proto.cid >> RXRPC_CIDSHIFT);
146 spin_unlock(&rxrpc_conn_id_lock);
151 * Destroy the client connection ID tree.
153 void rxrpc_destroy_client_conn_ids(void)
155 struct rxrpc_connection *conn;
156 int id;
158 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
159 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
160 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
161 conn, atomic_read(&conn->usage));
163 BUG();
166 idr_destroy(&rxrpc_client_conn_ids);
170 * Allocate a client connection.
172 static struct rxrpc_connection *
173 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
175 struct rxrpc_connection *conn;
176 struct rxrpc_net *rxnet = cp->local->rxnet;
177 int ret;
179 _enter("");
181 conn = rxrpc_alloc_connection(gfp);
182 if (!conn) {
183 _leave(" = -ENOMEM");
184 return ERR_PTR(-ENOMEM);
187 atomic_set(&conn->usage, 1);
188 if (cp->exclusive)
189 __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
190 if (cp->upgrade)
191 __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
193 conn->params = *cp;
194 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
195 conn->state = RXRPC_CONN_CLIENT;
196 conn->service_id = cp->service_id;
198 ret = rxrpc_get_client_connection_id(conn, gfp);
199 if (ret < 0)
200 goto error_0;
202 ret = rxrpc_init_client_conn_security(conn);
203 if (ret < 0)
204 goto error_1;
206 ret = conn->security->prime_packet_security(conn);
207 if (ret < 0)
208 goto error_2;
210 atomic_inc(&rxnet->nr_conns);
211 write_lock(&rxnet->conn_lock);
212 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
213 write_unlock(&rxnet->conn_lock);
215 /* We steal the caller's peer ref. */
216 cp->peer = NULL;
217 rxrpc_get_local(conn->params.local);
218 key_get(conn->params.key);
220 trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
221 __builtin_return_address(0));
222 trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
223 _leave(" = %p", conn);
224 return conn;
226 error_2:
227 conn->security->clear(conn);
228 error_1:
229 rxrpc_put_client_connection_id(conn);
230 error_0:
231 kfree(conn);
232 _leave(" = %d", ret);
233 return ERR_PTR(ret);
237 * Determine if a connection may be reused.
239 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
241 struct rxrpc_net *rxnet = conn->params.local->rxnet;
242 int id_cursor, id, distance, limit;
244 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
245 goto dont_reuse;
247 if (conn->proto.epoch != rxnet->epoch)
248 goto mark_dont_reuse;
250 /* The IDR tree gets very expensive on memory if the connection IDs are
251 * widely scattered throughout the number space, so we shall want to
252 * kill off connections that, say, have an ID more than about four
253 * times the maximum number of client conns away from the current
254 * allocation point to try and keep the IDs concentrated.
256 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
257 id = conn->proto.cid >> RXRPC_CIDSHIFT;
258 distance = id - id_cursor;
259 if (distance < 0)
260 distance = -distance;
261 limit = max(rxrpc_max_client_connections * 4, 1024U);
262 if (distance > limit)
263 goto mark_dont_reuse;
265 return true;
267 mark_dont_reuse:
268 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
269 dont_reuse:
270 return false;
274 * Create or find a client connection to use for a call.
276 * If we return with a connection, the call will be on its waiting list. It's
277 * left to the caller to assign a channel and wake up the call.
279 static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
280 struct rxrpc_call *call,
281 struct rxrpc_conn_parameters *cp,
282 struct sockaddr_rxrpc *srx,
283 gfp_t gfp)
285 struct rxrpc_connection *conn, *candidate = NULL;
286 struct rxrpc_local *local = cp->local;
287 struct rb_node *p, **pp, *parent;
288 long diff;
289 int ret = -ENOMEM;
291 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
293 cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
294 if (!cp->peer)
295 goto error;
297 call->cong_cwnd = cp->peer->cong_cwnd;
298 if (call->cong_cwnd >= call->cong_ssthresh)
299 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
300 else
301 call->cong_mode = RXRPC_CALL_SLOW_START;
303 /* If the connection is not meant to be exclusive, search the available
304 * connections to see if the connection we want to use already exists.
306 if (!cp->exclusive) {
307 _debug("search 1");
308 spin_lock(&local->client_conns_lock);
309 p = local->client_conns.rb_node;
310 while (p) {
311 conn = rb_entry(p, struct rxrpc_connection, client_node);
313 #define cmp(X) ((long)conn->params.X - (long)cp->X)
314 diff = (cmp(peer) ?:
315 cmp(key) ?:
316 cmp(security_level) ?:
317 cmp(upgrade));
318 #undef cmp
319 if (diff < 0) {
320 p = p->rb_left;
321 } else if (diff > 0) {
322 p = p->rb_right;
323 } else {
324 if (rxrpc_may_reuse_conn(conn) &&
325 rxrpc_get_connection_maybe(conn))
326 goto found_extant_conn;
327 /* The connection needs replacing. It's better
328 * to effect that when we have something to
329 * replace it with so that we don't have to
330 * rebalance the tree twice.
332 break;
335 spin_unlock(&local->client_conns_lock);
338 /* There wasn't a connection yet or we need an exclusive connection.
339 * We need to create a candidate and then potentially redo the search
340 * in case we're racing with another thread also trying to connect on a
341 * shareable connection.
343 _debug("new conn");
344 candidate = rxrpc_alloc_client_connection(cp, gfp);
345 if (IS_ERR(candidate)) {
346 ret = PTR_ERR(candidate);
347 goto error_peer;
350 /* Add the call to the new connection's waiting list in case we're
351 * going to have to wait for the connection to come live. It's our
352 * connection, so we want first dibs on the channel slots. We would
353 * normally have to take channel_lock but we do this before anyone else
354 * can see the connection.
356 list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
358 if (cp->exclusive) {
359 call->conn = candidate;
360 call->security_ix = candidate->security_ix;
361 call->service_id = candidate->service_id;
362 _leave(" = 0 [exclusive %d]", candidate->debug_id);
363 return 0;
366 /* Publish the new connection for userspace to find. We need to redo
367 * the search before doing this lest we race with someone else adding a
368 * conflicting instance.
370 _debug("search 2");
371 spin_lock(&local->client_conns_lock);
373 pp = &local->client_conns.rb_node;
374 parent = NULL;
375 while (*pp) {
376 parent = *pp;
377 conn = rb_entry(parent, struct rxrpc_connection, client_node);
379 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
380 diff = (cmp(peer) ?:
381 cmp(key) ?:
382 cmp(security_level) ?:
383 cmp(upgrade));
384 #undef cmp
385 if (diff < 0) {
386 pp = &(*pp)->rb_left;
387 } else if (diff > 0) {
388 pp = &(*pp)->rb_right;
389 } else {
390 if (rxrpc_may_reuse_conn(conn) &&
391 rxrpc_get_connection_maybe(conn))
392 goto found_extant_conn;
393 /* The old connection is from an outdated epoch. */
394 _debug("replace conn");
395 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
396 rb_replace_node(&conn->client_node,
397 &candidate->client_node,
398 &local->client_conns);
399 trace_rxrpc_client(conn, -1, rxrpc_client_replace);
400 goto candidate_published;
404 _debug("new conn");
405 rb_link_node(&candidate->client_node, parent, pp);
406 rb_insert_color(&candidate->client_node, &local->client_conns);
408 candidate_published:
409 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
410 call->conn = candidate;
411 call->security_ix = candidate->security_ix;
412 call->service_id = candidate->service_id;
413 spin_unlock(&local->client_conns_lock);
414 _leave(" = 0 [new %d]", candidate->debug_id);
415 return 0;
417 /* We come here if we found a suitable connection already in existence.
418 * Discard any candidate we may have allocated, and try to get a
419 * channel on this one.
421 found_extant_conn:
422 _debug("found conn");
423 spin_unlock(&local->client_conns_lock);
425 if (candidate) {
426 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
427 rxrpc_put_connection(candidate);
428 candidate = NULL;
431 spin_lock(&conn->channel_lock);
432 call->conn = conn;
433 call->security_ix = conn->security_ix;
434 call->service_id = conn->service_id;
435 list_add(&call->chan_wait_link, &conn->waiting_calls);
436 spin_unlock(&conn->channel_lock);
437 _leave(" = 0 [extant %d]", conn->debug_id);
438 return 0;
440 error_peer:
441 rxrpc_put_peer(cp->peer);
442 cp->peer = NULL;
443 error:
444 _leave(" = %d", ret);
445 return ret;
449 * Activate a connection.
451 static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
452 struct rxrpc_connection *conn)
454 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
455 trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
456 conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
457 } else {
458 trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
459 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
461 rxnet->nr_active_client_conns++;
462 list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
466 * Attempt to animate a connection for a new call.
468 * If it's not exclusive, the connection is in the endpoint tree, and we're in
469 * the conn's list of those waiting to grab a channel. There is, however, a
470 * limit on the number of live connections allowed at any one time, so we may
471 * have to wait for capacity to become available.
473 * Note that a connection on the waiting queue might *also* have active
474 * channels if it has been culled to make space and then re-requested by a new
475 * call.
477 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
478 struct rxrpc_connection *conn)
480 unsigned int nr_conns;
482 _enter("%d,%d", conn->debug_id, conn->cache_state);
484 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
485 conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
486 goto out;
488 spin_lock(&rxnet->client_conn_cache_lock);
490 nr_conns = rxnet->nr_client_conns;
491 if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
492 trace_rxrpc_client(conn, -1, rxrpc_client_count);
493 rxnet->nr_client_conns = nr_conns + 1;
496 switch (conn->cache_state) {
497 case RXRPC_CONN_CLIENT_ACTIVE:
498 case RXRPC_CONN_CLIENT_UPGRADE:
499 case RXRPC_CONN_CLIENT_WAITING:
500 break;
502 case RXRPC_CONN_CLIENT_INACTIVE:
503 case RXRPC_CONN_CLIENT_CULLED:
504 case RXRPC_CONN_CLIENT_IDLE:
505 if (nr_conns >= rxrpc_max_client_connections)
506 goto wait_for_capacity;
507 goto activate_conn;
509 default:
510 BUG();
513 out_unlock:
514 spin_unlock(&rxnet->client_conn_cache_lock);
515 out:
516 _leave(" [%d]", conn->cache_state);
517 return;
519 activate_conn:
520 _debug("activate");
521 rxrpc_activate_conn(rxnet, conn);
522 goto out_unlock;
524 wait_for_capacity:
525 _debug("wait");
526 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
527 conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
528 list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
529 goto out_unlock;
533 * Deactivate a channel.
535 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
536 unsigned int channel)
538 struct rxrpc_channel *chan = &conn->channels[channel];
540 rcu_assign_pointer(chan->call, NULL);
541 conn->active_chans &= ~(1 << channel);
545 * Assign a channel to the call at the front of the queue and wake the call up.
546 * We don't increment the callNumber counter until this number has been exposed
547 * to the world.
549 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
550 unsigned int channel)
552 struct rxrpc_channel *chan = &conn->channels[channel];
553 struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
554 struct rxrpc_call, chan_wait_link);
555 u32 call_id = chan->call_counter + 1;
557 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
559 /* Cancel the final ACK on the previous call if it hasn't been sent yet
560 * as the DATA packet will implicitly ACK it.
562 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
564 write_lock_bh(&call->state_lock);
565 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
566 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
567 else
568 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
569 write_unlock_bh(&call->state_lock);
571 rxrpc_see_call(call);
572 list_del_init(&call->chan_wait_link);
573 conn->active_chans |= 1 << channel;
574 call->peer = rxrpc_get_peer(conn->params.peer);
575 call->cid = conn->proto.cid | channel;
576 call->call_id = call_id;
578 trace_rxrpc_connect_call(call);
579 _net("CONNECT call %08x:%08x as call %d on conn %d",
580 call->cid, call->call_id, call->debug_id, conn->debug_id);
582 /* Paired with the read barrier in rxrpc_wait_for_channel(). This
583 * orders cid and epoch in the connection wrt to call_id without the
584 * need to take the channel_lock.
586 * We provisionally assign a callNumber at this point, but we don't
587 * confirm it until the call is about to be exposed.
589 * TODO: Pair with a barrier in the data_ready handler when that looks
590 * at the call ID through a connection channel.
592 smp_wmb();
593 chan->call_id = call_id;
594 chan->call_debug_id = call->debug_id;
595 rcu_assign_pointer(chan->call, call);
596 wake_up(&call->waitq);
600 * Assign channels and callNumbers to waiting calls with channel_lock
601 * held by caller.
603 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
605 u8 avail, mask;
607 switch (conn->cache_state) {
608 case RXRPC_CONN_CLIENT_ACTIVE:
609 mask = RXRPC_ACTIVE_CHANS_MASK;
610 break;
611 case RXRPC_CONN_CLIENT_UPGRADE:
612 mask = 0x01;
613 break;
614 default:
615 return;
618 while (!list_empty(&conn->waiting_calls) &&
619 (avail = ~conn->active_chans,
620 avail &= mask,
621 avail != 0))
622 rxrpc_activate_one_channel(conn, __ffs(avail));
626 * Assign channels and callNumbers to waiting calls.
628 static void rxrpc_activate_channels(struct rxrpc_connection *conn)
630 _enter("%d", conn->debug_id);
632 trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
634 if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
635 return;
637 spin_lock(&conn->channel_lock);
638 rxrpc_activate_channels_locked(conn);
639 spin_unlock(&conn->channel_lock);
640 _leave("");
644 * Wait for a callNumber and a channel to be granted to a call.
646 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
648 int ret = 0;
650 _enter("%d", call->debug_id);
652 if (!call->call_id) {
653 DECLARE_WAITQUEUE(myself, current);
655 if (!gfpflags_allow_blocking(gfp)) {
656 ret = -EAGAIN;
657 goto out;
660 add_wait_queue_exclusive(&call->waitq, &myself);
661 for (;;) {
662 set_current_state(TASK_INTERRUPTIBLE);
663 if (call->call_id)
664 break;
665 if (signal_pending(current)) {
666 ret = -ERESTARTSYS;
667 break;
669 schedule();
671 remove_wait_queue(&call->waitq, &myself);
672 __set_current_state(TASK_RUNNING);
675 /* Paired with the write barrier in rxrpc_activate_one_channel(). */
676 smp_rmb();
678 out:
679 _leave(" = %d", ret);
680 return ret;
684 * find a connection for a call
685 * - called in process context with IRQs enabled
687 int rxrpc_connect_call(struct rxrpc_sock *rx,
688 struct rxrpc_call *call,
689 struct rxrpc_conn_parameters *cp,
690 struct sockaddr_rxrpc *srx,
691 gfp_t gfp)
693 struct rxrpc_net *rxnet = cp->local->rxnet;
694 int ret;
696 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
698 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
699 rxrpc_cull_active_client_conns(rxnet);
701 ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
702 if (ret < 0)
703 goto out;
705 rxrpc_animate_client_conn(rxnet, call->conn);
706 rxrpc_activate_channels(call->conn);
708 ret = rxrpc_wait_for_channel(call, gfp);
709 if (ret < 0) {
710 rxrpc_disconnect_client_call(call);
711 goto out;
714 spin_lock_bh(&call->conn->params.peer->lock);
715 hlist_add_head_rcu(&call->error_link,
716 &call->conn->params.peer->error_targets);
717 spin_unlock_bh(&call->conn->params.peer->lock);
719 out:
720 _leave(" = %d", ret);
721 return ret;
725 * Note that a connection is about to be exposed to the world. Once it is
726 * exposed, we maintain an extra ref on it that stops it from being summarily
727 * discarded before it's (a) had a chance to deal with retransmission and (b)
728 * had a chance at re-use (the per-connection security negotiation is
729 * expensive).
731 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
732 unsigned int channel)
734 if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
735 trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
736 rxrpc_get_connection(conn);
741 * Note that a call, and thus a connection, is about to be exposed to the
742 * world.
744 void rxrpc_expose_client_call(struct rxrpc_call *call)
746 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
747 struct rxrpc_connection *conn = call->conn;
748 struct rxrpc_channel *chan = &conn->channels[channel];
750 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
751 /* Mark the call ID as being used. If the callNumber counter
752 * exceeds ~2 billion, we kill the connection after its
753 * outstanding calls have finished so that the counter doesn't
754 * wrap.
756 chan->call_counter++;
757 if (chan->call_counter >= INT_MAX)
758 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
759 rxrpc_expose_client_conn(conn, channel);
764 * Set the reap timer.
766 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
768 unsigned long now = jiffies;
769 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
771 if (rxnet->live)
772 timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
776 * Disconnect a client call.
778 void rxrpc_disconnect_client_call(struct rxrpc_call *call)
780 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
781 struct rxrpc_connection *conn = call->conn;
782 struct rxrpc_channel *chan = &conn->channels[channel];
783 struct rxrpc_net *rxnet = conn->params.local->rxnet;
785 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
786 call->conn = NULL;
788 spin_lock(&conn->channel_lock);
790 /* Calls that have never actually been assigned a channel can simply be
791 * discarded. If the conn didn't get used either, it will follow
792 * immediately unless someone else grabs it in the meantime.
794 if (!list_empty(&call->chan_wait_link)) {
795 _debug("call is waiting");
796 ASSERTCMP(call->call_id, ==, 0);
797 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
798 list_del_init(&call->chan_wait_link);
800 trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
802 /* We must deactivate or idle the connection if it's now
803 * waiting for nothing.
805 spin_lock(&rxnet->client_conn_cache_lock);
806 if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
807 list_empty(&conn->waiting_calls) &&
808 !conn->active_chans)
809 goto idle_connection;
810 goto out;
813 ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
815 /* If a client call was exposed to the world, we save the result for
816 * retransmission.
818 * We use a barrier here so that the call number and abort code can be
819 * read without needing to take a lock.
821 * TODO: Make the incoming packet handler check this and handle
822 * terminal retransmission without requiring access to the call.
824 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
825 _debug("exposed %u,%u", call->call_id, call->abort_code);
826 __rxrpc_disconnect_call(conn, call);
829 /* See if we can pass the channel directly to another call. */
830 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
831 !list_empty(&conn->waiting_calls)) {
832 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
833 rxrpc_activate_one_channel(conn, channel);
834 goto out_2;
837 /* Schedule the final ACK to be transmitted in a short while so that it
838 * can be skipped if we find a follow-on call. The first DATA packet
839 * of the follow on call will implicitly ACK this call.
841 if (call->completion == RXRPC_CALL_SUCCEEDED &&
842 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
843 unsigned long final_ack_at = jiffies + 2;
845 WRITE_ONCE(chan->final_ack_at, final_ack_at);
846 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
847 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
848 rxrpc_reduce_conn_timer(conn, final_ack_at);
851 /* Things are more complex and we need the cache lock. We might be
852 * able to simply idle the conn or it might now be lurking on the wait
853 * list. It might even get moved back to the active list whilst we're
854 * waiting for the lock.
856 spin_lock(&rxnet->client_conn_cache_lock);
858 switch (conn->cache_state) {
859 case RXRPC_CONN_CLIENT_UPGRADE:
860 /* Deal with termination of a service upgrade probe. */
861 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
862 clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
863 trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
864 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
865 rxrpc_activate_channels_locked(conn);
867 /* fall through */
868 case RXRPC_CONN_CLIENT_ACTIVE:
869 if (list_empty(&conn->waiting_calls)) {
870 rxrpc_deactivate_one_channel(conn, channel);
871 if (!conn->active_chans) {
872 rxnet->nr_active_client_conns--;
873 goto idle_connection;
875 goto out;
878 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
879 rxrpc_activate_one_channel(conn, channel);
880 goto out;
882 case RXRPC_CONN_CLIENT_CULLED:
883 rxrpc_deactivate_one_channel(conn, channel);
884 ASSERT(list_empty(&conn->waiting_calls));
885 if (!conn->active_chans)
886 goto idle_connection;
887 goto out;
889 case RXRPC_CONN_CLIENT_WAITING:
890 rxrpc_deactivate_one_channel(conn, channel);
891 goto out;
893 default:
894 BUG();
897 out:
898 spin_unlock(&rxnet->client_conn_cache_lock);
899 out_2:
900 spin_unlock(&conn->channel_lock);
901 rxrpc_put_connection(conn);
902 _leave("");
903 return;
905 idle_connection:
906 /* As no channels remain active, the connection gets deactivated
907 * immediately or moved to the idle list for a short while.
909 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
910 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
911 conn->idle_timestamp = jiffies;
912 conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
913 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
914 if (rxnet->idle_client_conns.next == &conn->cache_link &&
915 !rxnet->kill_all_client_conns)
916 rxrpc_set_client_reap_timer(rxnet);
917 } else {
918 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
919 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
920 list_del_init(&conn->cache_link);
922 goto out;
926 * Clean up a dead client connection.
928 static struct rxrpc_connection *
929 rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
931 struct rxrpc_connection *next = NULL;
932 struct rxrpc_local *local = conn->params.local;
933 struct rxrpc_net *rxnet = local->rxnet;
934 unsigned int nr_conns;
936 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
938 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
939 spin_lock(&local->client_conns_lock);
940 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
941 &conn->flags))
942 rb_erase(&conn->client_node, &local->client_conns);
943 spin_unlock(&local->client_conns_lock);
946 rxrpc_put_client_connection_id(conn);
948 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
950 if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
951 trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
952 spin_lock(&rxnet->client_conn_cache_lock);
953 nr_conns = --rxnet->nr_client_conns;
955 if (nr_conns < rxrpc_max_client_connections &&
956 !list_empty(&rxnet->waiting_client_conns)) {
957 next = list_entry(rxnet->waiting_client_conns.next,
958 struct rxrpc_connection, cache_link);
959 rxrpc_get_connection(next);
960 rxrpc_activate_conn(rxnet, next);
963 spin_unlock(&rxnet->client_conn_cache_lock);
966 rxrpc_kill_connection(conn);
967 if (next)
968 rxrpc_activate_channels(next);
970 /* We need to get rid of the temporary ref we took upon next, but we
971 * can't call rxrpc_put_connection() recursively.
973 return next;
977 * Clean up a dead client connections.
979 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
981 const void *here = __builtin_return_address(0);
982 int n;
984 do {
985 n = atomic_dec_return(&conn->usage);
986 trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
987 if (n > 0)
988 return;
989 ASSERTCMP(n, >=, 0);
991 conn = rxrpc_put_one_client_conn(conn);
992 } while (conn);
996 * Kill the longest-active client connections to make room for new ones.
998 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
1000 struct rxrpc_connection *conn;
1001 unsigned int nr_conns = rxnet->nr_client_conns;
1002 unsigned int nr_active, limit;
1004 _enter("");
1006 ASSERTCMP(nr_conns, >=, 0);
1007 if (nr_conns < rxrpc_max_client_connections) {
1008 _leave(" [ok]");
1009 return;
1011 limit = rxrpc_reap_client_connections;
1013 spin_lock(&rxnet->client_conn_cache_lock);
1014 nr_active = rxnet->nr_active_client_conns;
1016 while (nr_active > limit) {
1017 ASSERT(!list_empty(&rxnet->active_client_conns));
1018 conn = list_entry(rxnet->active_client_conns.next,
1019 struct rxrpc_connection, cache_link);
1020 ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
1021 conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
1023 if (list_empty(&conn->waiting_calls)) {
1024 trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
1025 conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
1026 list_del_init(&conn->cache_link);
1027 } else {
1028 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
1029 conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
1030 list_move_tail(&conn->cache_link,
1031 &rxnet->waiting_client_conns);
1034 nr_active--;
1037 rxnet->nr_active_client_conns = nr_active;
1038 spin_unlock(&rxnet->client_conn_cache_lock);
1039 ASSERTCMP(nr_active, >=, 0);
1040 _leave(" [culled]");
1044 * Discard expired client connections from the idle list. Each conn in the
1045 * idle list has been exposed and holds an extra ref because of that.
1047 * This may be called from conn setup or from a work item so cannot be
1048 * considered non-reentrant.
1050 void rxrpc_discard_expired_client_conns(struct work_struct *work)
1052 struct rxrpc_connection *conn;
1053 struct rxrpc_net *rxnet =
1054 container_of(work, struct rxrpc_net, client_conn_reaper);
1055 unsigned long expiry, conn_expires_at, now;
1056 unsigned int nr_conns;
1058 _enter("");
1060 if (list_empty(&rxnet->idle_client_conns)) {
1061 _leave(" [empty]");
1062 return;
1065 /* Don't double up on the discarding */
1066 if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1067 _leave(" [already]");
1068 return;
1071 /* We keep an estimate of what the number of conns ought to be after
1072 * we've discarded some so that we don't overdo the discarding.
1074 nr_conns = rxnet->nr_client_conns;
1076 next:
1077 spin_lock(&rxnet->client_conn_cache_lock);
1079 if (list_empty(&rxnet->idle_client_conns))
1080 goto out;
1082 conn = list_entry(rxnet->idle_client_conns.next,
1083 struct rxrpc_connection, cache_link);
1084 ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
1086 if (!rxnet->kill_all_client_conns) {
1087 /* If the number of connections is over the reap limit, we
1088 * expedite discard by reducing the expiry timeout. We must,
1089 * however, have at least a short grace period to be able to do
1090 * final-ACK or ABORT retransmission.
1092 expiry = rxrpc_conn_idle_client_expiry;
1093 if (nr_conns > rxrpc_reap_client_connections)
1094 expiry = rxrpc_conn_idle_client_fast_expiry;
1095 if (conn->params.local->service_closed)
1096 expiry = rxrpc_closed_conn_expiry * HZ;
1098 conn_expires_at = conn->idle_timestamp + expiry;
1100 now = READ_ONCE(jiffies);
1101 if (time_after(conn_expires_at, now))
1102 goto not_yet_expired;
1105 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1106 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1107 BUG();
1108 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1109 list_del_init(&conn->cache_link);
1111 spin_unlock(&rxnet->client_conn_cache_lock);
1113 /* When we cleared the EXPOSED flag, we took on responsibility for the
1114 * reference that that had on the usage count. We deal with that here.
1115 * If someone re-sets the flag and re-gets the ref, that's fine.
1117 rxrpc_put_connection(conn);
1118 nr_conns--;
1119 goto next;
1121 not_yet_expired:
1122 /* The connection at the front of the queue hasn't yet expired, so
1123 * schedule the work item for that point if we discarded something.
1125 * We don't worry if the work item is already scheduled - it can look
1126 * after rescheduling itself at a later time. We could cancel it, but
1127 * then things get messier.
1129 _debug("not yet");
1130 if (!rxnet->kill_all_client_conns)
1131 timer_reduce(&rxnet->client_conn_reap_timer,
1132 conn_expires_at);
1134 out:
1135 spin_unlock(&rxnet->client_conn_cache_lock);
1136 spin_unlock(&rxnet->client_conn_discard_lock);
1137 _leave("");
1141 * Preemptively destroy all the client connection records rather than waiting
1142 * for them to time out
1144 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1146 _enter("");
1148 spin_lock(&rxnet->client_conn_cache_lock);
1149 rxnet->kill_all_client_conns = true;
1150 spin_unlock(&rxnet->client_conn_cache_lock);
1152 del_timer_sync(&rxnet->client_conn_reap_timer);
1154 if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1155 _debug("destroy: queue failed");
1157 _leave("");