2 * net/tipc/net.c: TIPC network routing code
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_table.h"
43 #include "name_distr.h"
53 * The TIPC locking policy is designed to ensure a very fine locking
54 * granularity, permitting complete parallel access to individual
55 * port and node/link instances. The code consists of three major
56 * locking domains, each protected with their own disjunct set of locks.
58 * 1: The routing hierarchy.
59 * Comprises the structures 'zone', 'cluster', 'node', 'link'
60 * and 'bearer'. The whole hierarchy is protected by a big
61 * read/write lock, tipc_net_lock, to enssure that nothing is added
62 * or removed while code is accessing any of these structures.
63 * This layer must not be called from the two others while they
64 * hold any of their own locks.
65 * Neither must it itself do any upcalls to the other two before
66 * it has released tipc_net_lock and other protective locks.
68 * Within the tipc_net_lock domain there are two sub-domains;'node' and
69 * 'bearer', where local write operations are permitted,
70 * provided that those are protected by individual spin_locks
71 * per instance. Code holding tipc_net_lock(read) and a node spin_lock
72 * is permitted to poke around in both the node itself and its
73 * subordinate links. I.e, it can update link counters and queues,
74 * change link state, send protocol messages, and alter the
75 * "active_links" array in the node; but it can _not_ remove a link
76 * or a node from the overall structure.
77 * Correspondingly, individual bearers may change status within a
78 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
79 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
82 * 2: The transport level of the protocol.
83 * This consists of the structures port, (and its user level
84 * representations, such as user_port and tipc_sock), reference and
85 * tipc_user (port.c, reg.c, socket.c).
87 * This layer has four different locks:
88 * - The tipc_port spin_lock. This is protecting each port instance
89 * from parallel data access and removal. Since we can not place
90 * this lock in the port itself, it has been placed in the
91 * corresponding reference table entry, which has the same life
92 * cycle as the module. This entry is difficult to access from
93 * outside the TIPC core, however, so a pointer to the lock has
94 * been added in the port instance, -to be used for unlocking
96 * - A read/write lock to protect the reference table itself (teg.c).
97 * (Nobody is using read-only access to this, so it can just as
98 * well be changed to a spin_lock)
99 * - A spin lock to protect the registry of kernel/driver users (reg.c)
100 * - A global spin_lock (tipc_port_lock), which only task is to ensure
101 * consistency where more than one port is involved in an operation,
102 * i.e., whe a port is part of a linked list of ports.
103 * There are two such lists; 'port_list', which is used for management,
104 * and 'wait_list', which is used to queue ports during congestion.
106 * 3: The name table (name_table.c, name_distr.c, subscription.c)
107 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
108 * overall name table structure. Nothing must be added/removed to
109 * this structure without holding write access to it.
110 * - There is one local spin_lock per sub_sequence, which can be seen
111 * as a sub-domain to the tipc_nametbl_lock domain. It is used only
112 * for translation operations, and is needed because a translation
113 * steps the root of the 'publication' linked list between each lookup.
114 * This is always used within the scope of a tipc_nametbl_lock(read).
115 * - A local spin_lock protecting the queue of subscriber events.
118 rwlock_t tipc_net_lock
= RW_LOCK_UNLOCKED
;
119 struct network tipc_net
= { 0 };
121 struct node
*tipc_net_select_remote_node(u32 addr
, u32 ref
)
123 return tipc_zone_select_remote_node(tipc_net
.zones
[tipc_zone(addr
)], addr
, ref
);
126 u32
tipc_net_select_router(u32 addr
, u32 ref
)
128 return tipc_zone_select_router(tipc_net
.zones
[tipc_zone(addr
)], addr
, ref
);
132 u32
tipc_net_next_node(u32 a
)
134 if (tipc_net
.zones
[tipc_zone(a
)])
135 return tipc_zone_next_node(a
);
139 void tipc_net_remove_as_router(u32 router
)
143 for (z_num
= 1; z_num
<= tipc_max_zones
; z_num
++) {
144 if (!tipc_net
.zones
[z_num
])
146 tipc_zone_remove_as_router(tipc_net
.zones
[z_num
], router
);
150 void tipc_net_send_external_routes(u32 dest
)
154 for (z_num
= 1; z_num
<= tipc_max_zones
; z_num
++) {
155 if (tipc_net
.zones
[z_num
])
156 tipc_zone_send_external_routes(tipc_net
.zones
[z_num
], dest
);
160 static int net_init(void)
162 u32 sz
= sizeof(struct _zone
*) * (tipc_max_zones
+ 1);
164 memset(&tipc_net
, 0, sizeof(tipc_net
));
165 tipc_net
.zones
= (struct _zone
**)kmalloc(sz
, GFP_ATOMIC
);
166 if (!tipc_net
.zones
) {
169 memset(tipc_net
.zones
, 0, sz
);
173 static void net_stop(void)
180 for (z_num
= 1; z_num
<= tipc_max_zones
; z_num
++) {
181 tipc_zone_delete(tipc_net
.zones
[z_num
]);
183 kfree(tipc_net
.zones
);
187 static void net_route_named_msg(struct sk_buff
*buf
)
189 struct tipc_msg
*msg
= buf_msg(buf
);
193 if (!msg_named(msg
)) {
194 msg_dbg(msg
, "tipc_net->drop_nam:");
199 dnode
= addr_domain(msg_lookup_scope(msg
));
200 dport
= tipc_nametbl_translate(msg_nametype(msg
), msg_nameinst(msg
), &dnode
);
201 dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
202 msg_nametype(msg
), msg_nameinst(msg
), dport
, dnode
);
204 msg_set_destnode(msg
, dnode
);
205 msg_set_destport(msg
, dport
);
206 tipc_net_route_msg(buf
);
209 msg_dbg(msg
, "tipc_net->rej:NO NAME: ");
210 tipc_reject_msg(buf
, TIPC_ERR_NO_NAME
);
213 void tipc_net_route_msg(struct sk_buff
*buf
)
215 struct tipc_msg
*msg
;
222 msg_incr_reroute_cnt(msg
);
223 if (msg_reroute_cnt(msg
) > 6) {
224 if (msg_errcode(msg
)) {
225 msg_dbg(msg
, "NET>DISC>:");
228 msg_dbg(msg
, "NET>REJ>:");
229 tipc_reject_msg(buf
, msg_destport(msg
) ?
230 TIPC_ERR_NO_PORT
: TIPC_ERR_NO_NAME
);
235 msg_dbg(msg
, "tipc_net->rout: ");
237 /* Handle message for this node */
238 dnode
= msg_short(msg
) ? tipc_own_addr
: msg_destnode(msg
);
239 if (in_scope(dnode
, tipc_own_addr
)) {
240 if (msg_isdata(msg
)) {
242 tipc_port_recv_mcast(buf
, NULL
);
243 else if (msg_destport(msg
))
244 tipc_port_recv_msg(buf
);
246 net_route_named_msg(buf
);
249 switch (msg_user(msg
)) {
250 case ROUTE_DISTRIBUTOR
:
251 tipc_cltr_recv_routing_table(buf
);
253 case NAME_DISTRIBUTOR
:
254 tipc_named_recv(buf
);
257 tipc_port_recv_proto_msg(buf
);
260 msg_dbg(msg
,"DROP/NET/<REC<");
266 /* Handle message for another node */
267 msg_dbg(msg
, "NET>SEND>: ");
268 tipc_link_send(buf
, dnode
, msg_link_selector(msg
));
271 int tipc_net_start(void)
273 char addr_string
[16];
276 if (tipc_mode
!= TIPC_NODE_MODE
)
279 tipc_mode
= TIPC_NET_MODE
;
283 if ((res
= tipc_bearer_init()) ||
284 (res
= net_init()) ||
285 (res
= tipc_cltr_init()) ||
286 (res
= tipc_bclink_init())) {
291 tipc_k_signal((Handler
)tipc_subscr_start
, 0);
292 tipc_k_signal((Handler
)tipc_cfg_init
, 0);
293 info("Started in network mode\n");
294 info("Own node address %s, network identity %u\n",
295 addr_string_fill(addr_string
, tipc_own_addr
), tipc_net_id
);
299 void tipc_net_stop(void)
301 if (tipc_mode
!= TIPC_NET_MODE
)
303 write_lock_bh(&tipc_net_lock
);
305 tipc_mode
= TIPC_NODE_MODE
;
308 write_unlock_bh(&tipc_net_lock
);
309 info("Left network mode \n");