1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Service connection management
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
8 #include <linux/slab.h>
9 #include "ar-internal.h"
12 * Find a service connection under RCU conditions.
14 * We could use a hash table, but that is subject to bucket stuffing by an
15 * attacker as the client gets to pick the epoch and cid values and would know
16 * the hash function. So, instead, we use a hash table for the peer and from
17 * that an rbtree to find the service connection. Under ordinary circumstances
18 * it might be slower than a large hash table, but it is at least limited in
21 struct rxrpc_connection
*rxrpc_find_service_conn_rcu(struct rxrpc_peer
*peer
,
24 struct rxrpc_connection
*conn
= NULL
;
25 struct rxrpc_conn_proto k
;
26 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
30 k
.epoch
= sp
->hdr
.epoch
;
31 k
.cid
= sp
->hdr
.cid
& RXRPC_CIDMASK
;
34 /* Unfortunately, rbtree walking doesn't give reliable results
35 * under just the RCU read lock, so we have to check for
38 read_seqbegin_or_lock(&peer
->service_conn_lock
, &seq
);
40 p
= rcu_dereference_raw(peer
->service_conns
.rb_node
);
42 conn
= rb_entry(p
, struct rxrpc_connection
, service_node
);
44 if (conn
->proto
.index_key
< k
.index_key
)
45 p
= rcu_dereference_raw(p
->rb_left
);
46 else if (conn
->proto
.index_key
> k
.index_key
)
47 p
= rcu_dereference_raw(p
->rb_right
);
52 } while (need_seqretry(&peer
->service_conn_lock
, seq
));
54 done_seqretry(&peer
->service_conn_lock
, seq
);
55 _leave(" = %d", conn
? conn
->debug_id
: -1);
60 * Insert a service connection into a peer's tree, thereby making it a target
61 * for incoming packets.
63 static void rxrpc_publish_service_conn(struct rxrpc_peer
*peer
,
64 struct rxrpc_connection
*conn
)
66 struct rxrpc_connection
*cursor
= NULL
;
67 struct rxrpc_conn_proto k
= conn
->proto
;
68 struct rb_node
**pp
, *parent
;
70 write_seqlock_bh(&peer
->service_conn_lock
);
72 pp
= &peer
->service_conns
.rb_node
;
76 cursor
= rb_entry(parent
,
77 struct rxrpc_connection
, service_node
);
79 if (cursor
->proto
.index_key
< k
.index_key
)
81 else if (cursor
->proto
.index_key
> k
.index_key
)
82 pp
= &(*pp
)->rb_right
;
84 goto found_extant_conn
;
87 rb_link_node_rcu(&conn
->service_node
, parent
, pp
);
88 rb_insert_color(&conn
->service_node
, &peer
->service_conns
);
90 set_bit(RXRPC_CONN_IN_SERVICE_CONNS
, &conn
->flags
);
91 write_sequnlock_bh(&peer
->service_conn_lock
);
92 _leave(" = %d [new]", conn
->debug_id
);
96 if (atomic_read(&cursor
->usage
) == 0)
97 goto replace_old_connection
;
98 write_sequnlock_bh(&peer
->service_conn_lock
);
99 /* We should not be able to get here. rxrpc_incoming_connection() is
100 * called in a non-reentrant context, so there can't be a race to
101 * insert a new connection.
105 replace_old_connection
:
106 /* The old connection is from an outdated epoch. */
107 _debug("replace conn");
108 rb_replace_node_rcu(&cursor
->service_node
,
110 &peer
->service_conns
);
111 clear_bit(RXRPC_CONN_IN_SERVICE_CONNS
, &cursor
->flags
);
116 * Preallocate a service connection. The connection is placed on the proc and
117 * reap lists so that we don't have to get the lock from BH context.
119 struct rxrpc_connection
*rxrpc_prealloc_service_connection(struct rxrpc_net
*rxnet
,
122 struct rxrpc_connection
*conn
= rxrpc_alloc_connection(gfp
);
125 /* We maintain an extra ref on the connection whilst it is on
126 * the rxrpc_connections list.
128 conn
->state
= RXRPC_CONN_SERVICE_PREALLOC
;
129 atomic_set(&conn
->usage
, 2);
131 atomic_inc(&rxnet
->nr_conns
);
132 write_lock(&rxnet
->conn_lock
);
133 list_add_tail(&conn
->link
, &rxnet
->service_conns
);
134 list_add_tail(&conn
->proc_link
, &rxnet
->conn_proc_list
);
135 write_unlock(&rxnet
->conn_lock
);
137 trace_rxrpc_conn(conn
->debug_id
, rxrpc_conn_new_service
,
138 atomic_read(&conn
->usage
),
139 __builtin_return_address(0));
146 * Set up an incoming connection. This is called in BH context with the RCU
149 void rxrpc_new_incoming_connection(struct rxrpc_sock
*rx
,
150 struct rxrpc_connection
*conn
,
151 const struct rxrpc_security
*sec
,
155 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
159 conn
->proto
.epoch
= sp
->hdr
.epoch
;
160 conn
->proto
.cid
= sp
->hdr
.cid
& RXRPC_CIDMASK
;
161 conn
->params
.service_id
= sp
->hdr
.serviceId
;
162 conn
->service_id
= sp
->hdr
.serviceId
;
163 conn
->security_ix
= sp
->hdr
.securityIndex
;
164 conn
->out_clientflag
= 0;
165 conn
->security
= sec
;
166 conn
->server_key
= key_get(key
);
167 if (conn
->security_ix
)
168 conn
->state
= RXRPC_CONN_SERVICE_UNSECURED
;
170 conn
->state
= RXRPC_CONN_SERVICE
;
172 /* See if we should upgrade the service. This can only happen on the
173 * first packet on a new connection. Once done, it applies to all
174 * subsequent calls on that connection.
176 if (sp
->hdr
.userStatus
== RXRPC_USERSTATUS_SERVICE_UPGRADE
&&
177 conn
->service_id
== rx
->service_upgrade
.from
)
178 conn
->service_id
= rx
->service_upgrade
.to
;
180 /* Make the connection a target for incoming packets. */
181 rxrpc_publish_service_conn(conn
->params
.peer
, conn
);
183 _net("CONNECTION new %d {%x}", conn
->debug_id
, conn
->proto
.cid
);
187 * Remove the service connection from the peer's tree, thereby removing it as a
188 * target for incoming packets.
190 void rxrpc_unpublish_service_conn(struct rxrpc_connection
*conn
)
192 struct rxrpc_peer
*peer
= conn
->params
.peer
;
194 write_seqlock_bh(&peer
->service_conn_lock
);
195 if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS
, &conn
->flags
))
196 rb_erase(&conn
->service_node
, &peer
->service_conns
);
197 write_sequnlock_bh(&peer
->service_conn_lock
);