2 * net/tipc/net.c: TIPC network routing code
4 * Copyright (c) 1995-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
39 #include "name_distr.h"
48 * The TIPC locking policy is designed to ensure a very fine locking
49 * granularity, permitting complete parallel access to individual
50 * port and node/link instances. The code consists of four major
51 * locking domains, each protected with their own disjunct set of locks.
53 * 1: The bearer level.
54 * RTNL lock is used to serialize the process of configuring bearer
55 * on update side, and RCU lock is applied on read side to make
56 * bearer instance valid on both paths of message transmission and
59 * 2: The node and link level.
60 * All node instances are saved into two tipc_node_list and node_htable
61 * lists. The two lists are protected by node_list_lock on write side,
62 * and they are guarded with RCU lock on read side. Especially node
63 * instance is destroyed only when TIPC module is removed, and we can
64 * confirm that there has no any user who is accessing the node at the
65 * moment. Therefore, Except for iterating the two lists within RCU
66 * protection, it's no needed to hold RCU that we access node instance
69 * In addition, all members in node structure including link instances
70 * are protected by node spin lock.
72 * 3: The transport level of the protocol.
73 * This consists of the structures port, (and its user level
74 * representations, such as user_port and tipc_sock), reference and
75 * tipc_user (port.c, reg.c, socket.c).
77 * This layer has four different locks:
78 * - The tipc_port spin_lock. This is protecting each port instance
79 * from parallel data access and removal. Since we can not place
80 * this lock in the port itself, it has been placed in the
81 * corresponding reference table entry, which has the same life
82 * cycle as the module. This entry is difficult to access from
83 * outside the TIPC core, however, so a pointer to the lock has
84 * been added in the port instance, -to be used for unlocking
86 * - A read/write lock to protect the reference table itself (teg.c).
87 * (Nobody is using read-only access to this, so it can just as
88 * well be changed to a spin_lock)
89 * - A spin lock to protect the registry of kernel/driver users (reg.c)
90 * - A global spin_lock (tipc_port_lock), which only task is to ensure
91 * consistency where more than one port is involved in an operation,
92 * i.e., whe a port is part of a linked list of ports.
93 * There are two such lists; 'port_list', which is used for management,
94 * and 'wait_list', which is used to queue ports during congestion.
96 * 4: The name table (name_table.c, name_distr.c, subscription.c)
97 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
98 * overall name table structure. Nothing must be added/removed to
99 * this structure without holding write access to it.
100 * - There is one local spin_lock per sub_sequence, which can be seen
101 * as a sub-domain to the tipc_nametbl_lock domain. It is used only
102 * for translation operations, and is needed because a translation
103 * steps the root of the 'publication' linked list between each lookup.
104 * This is always used within the scope of a tipc_nametbl_lock(read).
105 * - A local spin_lock protecting the queue of subscriber events.
108 static void tipc_net_finalize(struct net
*net
, u32 addr
);
110 int tipc_net_init(struct net
*net
, u8
*node_id
, u32 addr
)
112 if (tipc_own_id(net
)) {
113 pr_info("Cannot configure node identity twice\n");
116 pr_info("Started in network mode\n");
119 tipc_set_node_id(net
, node_id
);
121 tipc_net_finalize(net
, addr
);
125 static void tipc_net_finalize(struct net
*net
, u32 addr
)
127 struct tipc_net
*tn
= tipc_net(net
);
129 if (cmpxchg(&tn
->node_addr
, 0, addr
))
131 tipc_set_node_addr(net
, addr
);
132 tipc_named_reinit(net
);
134 tipc_mon_reinit_self(net
);
135 tipc_nametbl_publish(net
, TIPC_NODE_STATE
, addr
, addr
,
136 TIPC_CLUSTER_SCOPE
, 0, addr
);
139 void tipc_net_finalize_work(struct work_struct
*work
)
141 struct tipc_net_work
*fwork
;
143 fwork
= container_of(work
, struct tipc_net_work
, work
);
144 tipc_net_finalize(fwork
->net
, fwork
->addr
);
147 void tipc_sched_net_finalize(struct net
*net
, u32 addr
)
149 struct tipc_net
*tn
= tipc_net(net
);
151 tn
->final_work
.net
= net
;
152 tn
->final_work
.addr
= addr
;
153 schedule_work(&tn
->final_work
.work
);
156 void tipc_net_stop(struct net
*net
)
158 if (!tipc_own_id(net
))
162 tipc_bearer_stop(net
);
166 pr_info("Left network mode\n");
169 static int __tipc_nl_add_net(struct net
*net
, struct tipc_nl_msg
*msg
)
171 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
172 u64
*w0
= (u64
*)&tn
->node_id
[0];
173 u64
*w1
= (u64
*)&tn
->node_id
[8];
174 struct nlattr
*attrs
;
177 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
178 NLM_F_MULTI
, TIPC_NL_NET_GET
);
182 attrs
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_NET
);
186 if (nla_put_u32(msg
->skb
, TIPC_NLA_NET_ID
, tn
->net_id
))
188 if (nla_put_u64_64bit(msg
->skb
, TIPC_NLA_NET_NODEID
, *w0
, 0))
190 if (nla_put_u64_64bit(msg
->skb
, TIPC_NLA_NET_NODEID_W1
, *w1
, 0))
192 nla_nest_end(msg
->skb
, attrs
);
193 genlmsg_end(msg
->skb
, hdr
);
198 nla_nest_cancel(msg
->skb
, attrs
);
200 genlmsg_cancel(msg
->skb
, hdr
);
205 int tipc_nl_net_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
207 struct net
*net
= sock_net(skb
->sk
);
209 int done
= cb
->args
[0];
210 struct tipc_nl_msg msg
;
216 msg
.portid
= NETLINK_CB(cb
->skb
).portid
;
217 msg
.seq
= cb
->nlh
->nlmsg_seq
;
219 err
= __tipc_nl_add_net(net
, &msg
);
230 int __tipc_nl_net_set(struct sk_buff
*skb
, struct genl_info
*info
)
232 struct nlattr
*attrs
[TIPC_NLA_NET_MAX
+ 1];
233 struct net
*net
= sock_net(skb
->sk
);
234 struct tipc_net
*tn
= tipc_net(net
);
237 if (!info
->attrs
[TIPC_NLA_NET
])
240 err
= nla_parse_nested_deprecated(attrs
, TIPC_NLA_NET_MAX
,
241 info
->attrs
[TIPC_NLA_NET
],
242 tipc_nl_net_policy
, info
->extack
);
247 /* Can't change net id once TIPC has joined a network */
248 if (tipc_own_addr(net
))
251 if (attrs
[TIPC_NLA_NET_ID
]) {
254 val
= nla_get_u32(attrs
[TIPC_NLA_NET_ID
]);
255 if (val
< 1 || val
> 9999)
261 if (attrs
[TIPC_NLA_NET_ADDR
]) {
264 addr
= nla_get_u32(attrs
[TIPC_NLA_NET_ADDR
]);
267 tn
->legacy_addr_format
= true;
268 tipc_net_init(net
, NULL
, addr
);
271 if (attrs
[TIPC_NLA_NET_NODEID
]) {
272 u8 node_id
[NODE_ID_LEN
];
273 u64
*w0
= (u64
*)&node_id
[0];
274 u64
*w1
= (u64
*)&node_id
[8];
276 if (!attrs
[TIPC_NLA_NET_NODEID_W1
])
278 *w0
= nla_get_u64(attrs
[TIPC_NLA_NET_NODEID
]);
279 *w1
= nla_get_u64(attrs
[TIPC_NLA_NET_NODEID_W1
]);
280 tipc_net_init(net
, node_id
, 0);
285 int tipc_nl_net_set(struct sk_buff
*skb
, struct genl_info
*info
)
290 err
= __tipc_nl_net_set(skb
, info
);
296 static int __tipc_nl_addr_legacy_get(struct net
*net
, struct tipc_nl_msg
*msg
)
298 struct tipc_net
*tn
= tipc_net(net
);
299 struct nlattr
*attrs
;
302 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
303 0, TIPC_NL_ADDR_LEGACY_GET
);
307 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_NET
);
311 if (tn
->legacy_addr_format
)
312 if (nla_put_flag(msg
->skb
, TIPC_NLA_NET_ADDR_LEGACY
))
315 nla_nest_end(msg
->skb
, attrs
);
316 genlmsg_end(msg
->skb
, hdr
);
321 nla_nest_cancel(msg
->skb
, attrs
);
323 genlmsg_cancel(msg
->skb
, hdr
);
328 int tipc_nl_net_addr_legacy_get(struct sk_buff
*skb
, struct genl_info
*info
)
330 struct net
*net
= sock_net(skb
->sk
);
331 struct tipc_nl_msg msg
;
335 rep
= nlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
340 msg
.portid
= info
->snd_portid
;
341 msg
.seq
= info
->snd_seq
;
343 err
= __tipc_nl_addr_legacy_get(net
, &msg
);
349 return genlmsg_reply(msg
.skb
, info
);