Linux 4.8-rc8
[linux/fpc-iii.git] / net / rxrpc / conn_client.c
blob9e91f27b0d0f1d0be31ced57c03036c582198a06
1 /* Client connection-specific management code.
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/idr.h>
16 #include <linux/timer.h>
17 #include "ar-internal.h"
20 * We use machine-unique IDs for our client connections.
22 DEFINE_IDR(rxrpc_client_conn_ids);
23 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
26 * Get a connection ID and epoch for a client connection from the global pool.
27 * The connection struct pointer is then recorded in the idr radix tree. The
28 * epoch is changed if this wraps.
30 * TODO: The IDR tree gets very expensive on memory if the connection IDs are
31 * widely scattered throughout the number space, so we shall need to retire
32 * connections that have, say, an ID more than four times the maximum number of
33 * client conns away from the current allocation point to try and keep the IDs
34 * concentrated. We will also need to retire connections from an old epoch.
36 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
37 gfp_t gfp)
39 u32 epoch;
40 int id;
42 _enter("");
44 idr_preload(gfp);
45 spin_lock(&rxrpc_conn_id_lock);
47 epoch = rxrpc_epoch;
49 /* We could use idr_alloc_cyclic() here, but we really need to know
50 * when the thing wraps so that we can advance the epoch.
52 if (rxrpc_client_conn_ids.cur == 0)
53 rxrpc_client_conn_ids.cur = 1;
54 id = idr_alloc(&rxrpc_client_conn_ids, conn,
55 rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
56 if (id < 0) {
57 if (id != -ENOSPC)
58 goto error;
59 id = idr_alloc(&rxrpc_client_conn_ids, conn,
60 1, 0x40000000, GFP_NOWAIT);
61 if (id < 0)
62 goto error;
63 epoch++;
64 rxrpc_epoch = epoch;
66 rxrpc_client_conn_ids.cur = id + 1;
68 spin_unlock(&rxrpc_conn_id_lock);
69 idr_preload_end();
71 conn->proto.epoch = epoch;
72 conn->proto.cid = id << RXRPC_CIDSHIFT;
73 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
74 _leave(" [CID %x:%x]", epoch, conn->proto.cid);
75 return 0;
77 error:
78 spin_unlock(&rxrpc_conn_id_lock);
79 idr_preload_end();
80 _leave(" = %d", id);
81 return id;
85 * Release a connection ID for a client connection from the global pool.
87 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
89 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
90 spin_lock(&rxrpc_conn_id_lock);
91 idr_remove(&rxrpc_client_conn_ids,
92 conn->proto.cid >> RXRPC_CIDSHIFT);
93 spin_unlock(&rxrpc_conn_id_lock);
98 * Destroy the client connection ID tree.
100 void rxrpc_destroy_client_conn_ids(void)
102 struct rxrpc_connection *conn;
103 int id;
105 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
106 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
107 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
108 conn, atomic_read(&conn->usage));
110 BUG();
113 idr_destroy(&rxrpc_client_conn_ids);
117 * Allocate a client connection. The caller must take care to clear any
118 * padding bytes in *cp.
120 static struct rxrpc_connection *
121 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
123 struct rxrpc_connection *conn;
124 int ret;
126 _enter("");
128 conn = rxrpc_alloc_connection(gfp);
129 if (!conn) {
130 _leave(" = -ENOMEM");
131 return ERR_PTR(-ENOMEM);
134 conn->params = *cp;
135 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
136 conn->state = RXRPC_CONN_CLIENT;
138 ret = rxrpc_get_client_connection_id(conn, gfp);
139 if (ret < 0)
140 goto error_0;
142 ret = rxrpc_init_client_conn_security(conn);
143 if (ret < 0)
144 goto error_1;
146 ret = conn->security->prime_packet_security(conn);
147 if (ret < 0)
148 goto error_2;
150 write_lock(&rxrpc_connection_lock);
151 list_add_tail(&conn->link, &rxrpc_connections);
152 write_unlock(&rxrpc_connection_lock);
154 /* We steal the caller's peer ref. */
155 cp->peer = NULL;
156 rxrpc_get_local(conn->params.local);
157 key_get(conn->params.key);
159 _leave(" = %p", conn);
160 return conn;
162 error_2:
163 conn->security->clear(conn);
164 error_1:
165 rxrpc_put_client_connection_id(conn);
166 error_0:
167 kfree(conn);
168 _leave(" = %d", ret);
169 return ERR_PTR(ret);
173 * find a connection for a call
174 * - called in process context with IRQs enabled
176 int rxrpc_connect_call(struct rxrpc_call *call,
177 struct rxrpc_conn_parameters *cp,
178 struct sockaddr_rxrpc *srx,
179 gfp_t gfp)
181 struct rxrpc_connection *conn, *candidate = NULL;
182 struct rxrpc_local *local = cp->local;
183 struct rb_node *p, **pp, *parent;
184 long diff;
185 int chan;
187 DECLARE_WAITQUEUE(myself, current);
189 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
191 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
192 if (!cp->peer)
193 return -ENOMEM;
195 if (!cp->exclusive) {
196 /* Search for a existing client connection unless this is going
197 * to be a connection that's used exclusively for a single call.
199 _debug("search 1");
200 spin_lock(&local->client_conns_lock);
201 p = local->client_conns.rb_node;
202 while (p) {
203 conn = rb_entry(p, struct rxrpc_connection, client_node);
205 #define cmp(X) ((long)conn->params.X - (long)cp->X)
206 diff = (cmp(peer) ?:
207 cmp(key) ?:
208 cmp(security_level));
209 if (diff < 0)
210 p = p->rb_left;
211 else if (diff > 0)
212 p = p->rb_right;
213 else
214 goto found_extant_conn;
216 spin_unlock(&local->client_conns_lock);
219 /* We didn't find a connection or we want an exclusive one. */
220 _debug("get new conn");
221 candidate = rxrpc_alloc_client_connection(cp, gfp);
222 if (!candidate) {
223 _leave(" = -ENOMEM");
224 return -ENOMEM;
227 if (cp->exclusive) {
228 /* Assign the call on an exclusive connection to channel 0 and
229 * don't add the connection to the endpoint's shareable conn
230 * lookup tree.
232 _debug("exclusive chan 0");
233 conn = candidate;
234 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
235 spin_lock(&conn->channel_lock);
236 chan = 0;
237 goto found_channel;
240 /* We need to redo the search before attempting to add a new connection
241 * lest we race with someone else adding a conflicting instance.
243 _debug("search 2");
244 spin_lock(&local->client_conns_lock);
246 pp = &local->client_conns.rb_node;
247 parent = NULL;
248 while (*pp) {
249 parent = *pp;
250 conn = rb_entry(parent, struct rxrpc_connection, client_node);
252 diff = (cmp(peer) ?:
253 cmp(key) ?:
254 cmp(security_level));
255 if (diff < 0)
256 pp = &(*pp)->rb_left;
257 else if (diff > 0)
258 pp = &(*pp)->rb_right;
259 else
260 goto found_extant_conn;
263 /* The second search also failed; simply add the new connection with
264 * the new call in channel 0. Note that we need to take the channel
265 * lock before dropping the client conn lock.
267 _debug("new conn");
268 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
269 rb_link_node(&candidate->client_node, parent, pp);
270 rb_insert_color(&candidate->client_node, &local->client_conns);
271 attached:
272 conn = candidate;
273 candidate = NULL;
275 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
276 spin_lock(&conn->channel_lock);
277 spin_unlock(&local->client_conns_lock);
278 chan = 0;
280 found_channel:
281 _debug("found chan");
282 call->conn = conn;
283 call->channel = chan;
284 call->epoch = conn->proto.epoch;
285 call->cid = conn->proto.cid | chan;
286 call->call_id = ++conn->channels[chan].call_counter;
287 conn->channels[chan].call_id = call->call_id;
288 rcu_assign_pointer(conn->channels[chan].call, call);
290 _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
292 spin_unlock(&conn->channel_lock);
293 rxrpc_put_peer(cp->peer);
294 cp->peer = NULL;
295 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
296 return 0;
298 /* We found a potentially suitable connection already in existence. If
299 * we can reuse it (ie. its usage count hasn't been reduced to 0 by the
300 * reaper), discard any candidate we may have allocated, and try to get
301 * a channel on this one, otherwise we have to replace it.
303 found_extant_conn:
304 _debug("found conn");
305 if (!rxrpc_get_connection_maybe(conn)) {
306 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
307 rb_replace_node(&conn->client_node,
308 &candidate->client_node,
309 &local->client_conns);
310 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
311 goto attached;
314 spin_unlock(&local->client_conns_lock);
316 rxrpc_put_connection(candidate);
318 if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
319 if (!gfpflags_allow_blocking(gfp)) {
320 rxrpc_put_connection(conn);
321 _leave(" = -EAGAIN");
322 return -EAGAIN;
325 add_wait_queue(&conn->channel_wq, &myself);
326 for (;;) {
327 set_current_state(TASK_INTERRUPTIBLE);
328 if (atomic_add_unless(&conn->avail_chans, -1, 0))
329 break;
330 if (signal_pending(current))
331 goto interrupted;
332 schedule();
334 remove_wait_queue(&conn->channel_wq, &myself);
335 __set_current_state(TASK_RUNNING);
338 /* The connection allegedly now has a free channel and we can now
339 * attach the call to it.
341 spin_lock(&conn->channel_lock);
343 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
344 if (!conn->channels[chan].call)
345 goto found_channel;
346 BUG();
348 interrupted:
349 remove_wait_queue(&conn->channel_wq, &myself);
350 __set_current_state(TASK_RUNNING);
351 rxrpc_put_connection(conn);
352 rxrpc_put_peer(cp->peer);
353 cp->peer = NULL;
354 _leave(" = -ERESTARTSYS");
355 return -ERESTARTSYS;
359 * Remove a client connection from the local endpoint's tree, thereby removing
360 * it as a target for reuse for new client calls.
362 void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn)
364 struct rxrpc_local *local = conn->params.local;
366 spin_lock(&local->client_conns_lock);
367 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags))
368 rb_erase(&conn->client_node, &local->client_conns);
369 spin_unlock(&local->client_conns_lock);
371 rxrpc_put_client_connection_id(conn);