Linux 2.6.25-rc4
[linux-2.6/next.git] / net / tipc / discover.c
blob5d643e5721eb45ec36d3fb43d2f205e90f1ec5b5
1 /*
2 * net/tipc/discover.c
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include "core.h"
38 #include "dbg.h"
39 #include "link.h"
40 #include "zone.h"
41 #include "discover.h"
42 #include "port.h"
43 #include "name_table.h"
45 #define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
46 #define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */
47 #define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */
49 #if 0
50 #define GET_NODE_INFO 300
51 #define GET_NODE_INFO_RESULT 301
52 #define FORWARD_LINK_PROBE 302
53 #define LINK_REQUEST_REJECTED 303
54 #define LINK_REQUEST_ACCEPTED 304
55 #define DROP_LINK_REQUEST 305
56 #define CHECK_LINK_COUNT 306
57 #endif
60 * TODO: Most of the inter-cluster setup stuff should be
61 * rewritten, and be made conformant with specification.
65 /**
66 * struct link_req - information about an ongoing link setup request
67 * @bearer: bearer issuing requests
68 * @dest: destination address for request messages
69 * @buf: request message to be (repeatedly) sent
70 * @timer: timer governing period between requests
71 * @timer_intv: current interval between requests (in ms)
73 struct link_req {
74 struct bearer *bearer;
75 struct tipc_media_addr dest;
76 struct sk_buff *buf;
77 struct timer_list timer;
78 unsigned int timer_intv;
82 #if 0
83 int disc_create_link(const struct tipc_link_create *argv)
86 * Code for inter cluster link setup here
88 return TIPC_OK;
90 #endif
93 * disc_lost_link(): A link has lost contact
96 void tipc_disc_link_event(u32 addr, char *name, int up)
98 if (in_own_cluster(addr))
99 return;
101 * Code for inter cluster link setup here
106 * tipc_disc_init_msg - initialize a link setup message
107 * @type: message type (request or response)
108 * @req_links: number of links associated with message
109 * @dest_domain: network domain of node(s) which should respond to message
110 * @b_ptr: ptr to bearer issuing message
113 static struct sk_buff *tipc_disc_init_msg(u32 type,
114 u32 req_links,
115 u32 dest_domain,
116 struct bearer *b_ptr)
118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
119 struct tipc_msg *msg;
121 if (buf) {
122 msg = buf_msg(buf);
123 msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE,
124 dest_domain);
125 msg_set_non_seq(msg);
126 msg_set_req_links(msg, req_links);
127 msg_set_dest_domain(msg, dest_domain);
128 msg_set_bc_netid(msg, tipc_net_id);
129 msg_set_media_addr(msg, &b_ptr->publ.addr);
131 return buf;
135 * disc_dupl_alert - issue node address duplication alert
136 * @b_ptr: pointer to bearer detecting duplication
137 * @node_addr: duplicated node address
138 * @media_addr: media address advertised by duplicated node
141 static void disc_dupl_alert(struct bearer *b_ptr, u32 node_addr,
142 struct tipc_media_addr *media_addr)
144 char node_addr_str[16];
145 char media_addr_str[64];
146 struct print_buf pb;
148 addr_string_fill(node_addr_str, node_addr);
149 tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str));
150 tipc_media_addr_printf(&pb, media_addr);
151 tipc_printbuf_validate(&pb);
152 warn("Duplicate %s using %s seen on <%s>\n",
153 node_addr_str, media_addr_str, b_ptr->publ.name);
157 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
158 * @buf: buffer containing message
161 void tipc_disc_recv_msg(struct sk_buff *buf)
163 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
164 struct link *link;
165 struct tipc_media_addr media_addr;
166 struct tipc_msg *msg = buf_msg(buf);
167 u32 dest = msg_dest_domain(msg);
168 u32 orig = msg_prevnode(msg);
169 u32 net_id = msg_bc_netid(msg);
170 u32 type = msg_type(msg);
172 msg_get_media_addr(msg,&media_addr);
173 msg_dbg(msg, "RECV:");
174 buf_discard(buf);
176 if (net_id != tipc_net_id)
177 return;
178 if (!tipc_addr_domain_valid(dest))
179 return;
180 if (!tipc_addr_node_valid(orig))
181 return;
182 if (orig == tipc_own_addr) {
183 if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
184 disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
185 return;
187 if (!in_scope(dest, tipc_own_addr))
188 return;
189 if (is_slave(tipc_own_addr) && is_slave(orig))
190 return;
191 if (is_slave(orig) && !in_own_cluster(orig))
192 return;
193 if (in_own_cluster(orig)) {
194 /* Always accept link here */
195 struct sk_buff *rbuf;
196 struct tipc_media_addr *addr;
197 struct node *n_ptr = tipc_node_find(orig);
198 int link_fully_up;
200 dbg(" in own cluster\n");
201 if (n_ptr == NULL) {
202 n_ptr = tipc_node_create(orig);
204 if (n_ptr == NULL) {
205 return;
207 spin_lock_bh(&n_ptr->lock);
208 link = n_ptr->links[b_ptr->identity];
209 if (!link) {
210 dbg("creating link\n");
211 link = tipc_link_create(b_ptr, orig, &media_addr);
212 if (!link) {
213 spin_unlock_bh(&n_ptr->lock);
214 return;
217 addr = &link->media_addr;
218 if (memcmp(addr, &media_addr, sizeof(*addr))) {
219 if (tipc_link_is_up(link) || (!link->started)) {
220 disc_dupl_alert(b_ptr, orig, &media_addr);
221 spin_unlock_bh(&n_ptr->lock);
222 return;
224 warn("Resetting link <%s>, peer interface address changed\n",
225 link->name);
226 memcpy(addr, &media_addr, sizeof(*addr));
227 tipc_link_reset(link);
229 link_fully_up = (link->state == WORKING_WORKING);
230 spin_unlock_bh(&n_ptr->lock);
231 if ((type == DSC_RESP_MSG) || link_fully_up)
232 return;
233 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
234 if (rbuf != NULL) {
235 msg_dbg(buf_msg(rbuf),"SEND:");
236 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
237 buf_discard(rbuf);
243 * tipc_disc_stop_link_req - stop sending periodic link setup requests
244 * @req: ptr to link request structure
247 void tipc_disc_stop_link_req(struct link_req *req)
249 if (!req)
250 return;
252 k_cancel_timer(&req->timer);
253 k_term_timer(&req->timer);
254 buf_discard(req->buf);
255 kfree(req);
259 * tipc_disc_update_link_req - update frequency of periodic link setup requests
260 * @req: ptr to link request structure
263 void tipc_disc_update_link_req(struct link_req *req)
265 if (!req)
266 return;
268 if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
269 if (!req->bearer->nodes.count) {
270 req->timer_intv = TIPC_LINK_REQ_FAST;
271 k_start_timer(&req->timer, req->timer_intv);
273 } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
274 if (req->bearer->nodes.count) {
275 req->timer_intv = TIPC_LINK_REQ_SLOW;
276 k_start_timer(&req->timer, req->timer_intv);
278 } else {
279 /* leave timer "as is" if haven't yet reached a "normal" rate */
284 * disc_timeout - send a periodic link setup request
285 * @req: ptr to link request structure
287 * Called whenever a link setup request timer associated with a bearer expires.
290 static void disc_timeout(struct link_req *req)
292 spin_lock_bh(&req->bearer->publ.lock);
294 req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
296 if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
297 (req->timer_intv == TIPC_LINK_REQ_FAST)) {
298 /* leave timer interval "as is" if already at a "normal" rate */
299 } else {
300 req->timer_intv *= 2;
301 if (req->timer_intv > TIPC_LINK_REQ_FAST)
302 req->timer_intv = TIPC_LINK_REQ_FAST;
303 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
304 (req->bearer->nodes.count))
305 req->timer_intv = TIPC_LINK_REQ_SLOW;
307 k_start_timer(&req->timer, req->timer_intv);
309 spin_unlock_bh(&req->bearer->publ.lock);
313 * tipc_disc_init_link_req - start sending periodic link setup requests
314 * @b_ptr: ptr to bearer issuing requests
315 * @dest: destination address for request messages
316 * @dest_domain: network domain of node(s) which should respond to message
317 * @req_links: max number of desired links
319 * Returns pointer to link request structure, or NULL if unable to create.
322 struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
323 const struct tipc_media_addr *dest,
324 u32 dest_domain,
325 u32 req_links)
327 struct link_req *req;
329 req = kmalloc(sizeof(*req), GFP_ATOMIC);
330 if (!req)
331 return NULL;
333 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
334 if (!req->buf) {
335 kfree(req);
336 return NULL;
339 memcpy(&req->dest, dest, sizeof(*dest));
340 req->bearer = b_ptr;
341 req->timer_intv = TIPC_LINK_REQ_INIT;
342 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
343 k_start_timer(&req->timer, req->timer_intv);
344 return req;