2 * net/tipc/net.c: TIPC network routing code
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
39 #include "name_distr.h"
47 * The TIPC locking policy is designed to ensure a very fine locking
48 * granularity, permitting complete parallel access to individual
49 * port and node/link instances. The code consists of four major
50 * locking domains, each protected with their own disjunct set of locks.
52 * 1: The bearer level.
53 * RTNL lock is used to serialize the process of configuring bearer
54 * on update side, and RCU lock is applied on read side to make
55 * bearer instance valid on both paths of message transmission and
58 * 2: The node and link level.
59 * All node instances are saved into two tipc_node_list and node_htable
60 * lists. The two lists are protected by node_list_lock on write side,
61 * and they are guarded with RCU lock on read side. Especially node
62 * instance is destroyed only when TIPC module is removed, and we can
63 * confirm that there has no any user who is accessing the node at the
64 * moment. Therefore, Except for iterating the two lists within RCU
65 * protection, it's no needed to hold RCU that we access node instance
68 * In addition, all members in node structure including link instances
69 * are protected by node spin lock.
71 * 3: The transport level of the protocol.
72 * This consists of the structures port, (and its user level
73 * representations, such as user_port and tipc_sock), reference and
74 * tipc_user (port.c, reg.c, socket.c).
76 * This layer has four different locks:
77 * - The tipc_port spin_lock. This is protecting each port instance
78 * from parallel data access and removal. Since we can not place
79 * this lock in the port itself, it has been placed in the
80 * corresponding reference table entry, which has the same life
81 * cycle as the module. This entry is difficult to access from
82 * outside the TIPC core, however, so a pointer to the lock has
83 * been added in the port instance, -to be used for unlocking
85 * - A read/write lock to protect the reference table itself (teg.c).
86 * (Nobody is using read-only access to this, so it can just as
87 * well be changed to a spin_lock)
88 * - A spin lock to protect the registry of kernel/driver users (reg.c)
89 * - A global spin_lock (tipc_port_lock), which only task is to ensure
90 * consistency where more than one port is involved in an operation,
91 * i.e., whe a port is part of a linked list of ports.
92 * There are two such lists; 'port_list', which is used for management,
93 * and 'wait_list', which is used to queue ports during congestion.
95 * 4: The name table (name_table.c, name_distr.c, subscription.c)
96 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
97 * overall name table structure. Nothing must be added/removed to
98 * this structure without holding write access to it.
99 * - There is one local spin_lock per sub_sequence, which can be seen
100 * as a sub-domain to the tipc_nametbl_lock domain. It is used only
101 * for translation operations, and is needed because a translation
102 * steps the root of the 'publication' linked list between each lookup.
103 * This is always used within the scope of a tipc_nametbl_lock(read).
104 * - A local spin_lock protecting the queue of subscriber events.
107 static void net_route_named_msg(struct sk_buff
*buf
)
109 struct tipc_msg
*msg
= buf_msg(buf
);
113 if (!msg_named(msg
)) {
118 dnode
= addr_domain(msg_lookup_scope(msg
));
119 dport
= tipc_nametbl_translate(msg_nametype(msg
), msg_nameinst(msg
), &dnode
);
121 msg_set_destnode(msg
, dnode
);
122 msg_set_destport(msg
, dport
);
123 tipc_net_route_msg(buf
);
126 tipc_reject_msg(buf
, TIPC_ERR_NO_NAME
);
129 void tipc_net_route_msg(struct sk_buff
*buf
)
131 struct tipc_msg
*msg
;
138 /* Handle message for this node */
139 dnode
= msg_short(msg
) ? tipc_own_addr
: msg_destnode(msg
);
140 if (tipc_in_scope(dnode
, tipc_own_addr
)) {
141 if (msg_isdata(msg
)) {
143 tipc_port_mcast_rcv(buf
, NULL
);
144 else if (msg_destport(msg
))
147 net_route_named_msg(buf
);
150 switch (msg_user(msg
)) {
151 case NAME_DISTRIBUTOR
:
155 tipc_port_proto_rcv(buf
);
163 /* Handle message for another node */
164 skb_trim(buf
, msg_size(msg
));
165 tipc_link_xmit(buf
, dnode
, msg_link_selector(msg
));
168 int tipc_net_start(u32 addr
)
170 char addr_string
[16];
173 tipc_own_addr
= addr
;
176 res
= tipc_bclink_init();
180 tipc_nametbl_publish(TIPC_CFG_SRV
, tipc_own_addr
, tipc_own_addr
,
181 TIPC_ZONE_SCOPE
, 0, tipc_own_addr
);
183 pr_info("Started in network mode\n");
184 pr_info("Own node address %s, network identity %u\n",
185 tipc_addr_string_fill(addr_string
, tipc_own_addr
), tipc_net_id
);
189 void tipc_net_stop(void)
194 tipc_nametbl_withdraw(TIPC_CFG_SRV
, tipc_own_addr
, 0, tipc_own_addr
);
201 pr_info("Left network mode\n");