kernel message sending bugfixes, connection specific timeouts
[cor_2_6_31.git] / net / cor / common.c
blobaf041be4fce3c268517f2a6ed6a83a706a3e2e5f
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_MUTEX(cor_bindnodes);
26 DEFINE_MUTEX(conn_free);
28 DEFINE_MUTEX(connid_gen);
30 LIST_HEAD(openports);
33 struct cell_hdr{
34 spinlock_t lock;
37 struct kmem_cache *conn_slab;
39 struct htable connid_table;
41 struct kmem_cache *bindnode_slab;
42 struct kmem_cache *connlistener_slab;
44 static inline int hdr_size(void)
46 return ((sizeof(struct cell_hdr) + sizeof(void *) - 1) / sizeof(void *)
47 ) * sizeof(void *);
50 static inline int elements_per_cell(int cell_size)
52 return (cell_size - hdr_size())/sizeof(void *);
55 static inline struct cell_hdr *cell_addr(struct htable *ht, __u32 id)
57 int idx = (id%ht->htable_size) / (elements_per_cell(ht->cell_size));
58 return (struct cell_hdr *) (((char *)ht->htable) + ht->cell_size * idx);
61 static inline char **element_addr(struct htable *ht, __u32 id)
63 int idx = (id%ht->htable_size) % (elements_per_cell(ht->cell_size));
64 return (char **)
65 ( ((char *)cell_addr(ht, id)) +
66 hdr_size() + idx*sizeof(void *) );
70 static inline char **next_element(struct htable *ht, char *element)
72 return (char **)(element + ht->entry_offset);
75 static inline struct kref *element_kref(struct htable *ht, char *element)
77 return (struct kref *)(element + ht->kref_offset);
81 static inline void unlock_element(struct htable *ht, __u32 key)
83 struct cell_hdr *hdr = cell_addr(ht, key);
84 spin_unlock( &(hdr->lock) );
88 static char **get_element_nounlock(struct htable *ht, __u32 key,
89 void *searcheditem)
91 struct cell_hdr *hdr = cell_addr(ht, key);
92 char **element = element_addr(ht, key);
94 BUG_ON(0 == element);
96 spin_lock( &(hdr->lock) );
98 while (1) {
99 if (*element == 0)
100 break;
101 if (searcheditem != 0 && ht->matches(*element, searcheditem))
102 break;
103 element = next_element(ht, *element);
106 return element;
109 char *htable_get(struct htable *ht, __u32 key, void *searcheditem)
111 unsigned long iflags;
112 char *element;
114 if (ht->htable == 0)
115 return 0;
117 local_irq_save(iflags);
118 element = *(get_element_nounlock(ht, key, searcheditem));
119 if (element != 0)
120 kref_get(element_kref(ht, element));
121 unlock_element(ht, key);
122 local_irq_restore(iflags);
124 return element;
127 int htable_delete(struct htable *ht, __u32 key,
128 void *searcheditem, void (*free) (struct kref *ref))
130 unsigned long iflags;
131 char **element;
132 char **next;
133 int rc = 0;
135 if (ht->htable == 0)
136 return 1;
138 local_irq_save(iflags);
140 element = get_element_nounlock(ht, key, searcheditem);
141 BUG_ON(0 == element);
143 if (0 == *element) {
144 /* key not in table */
145 rc = 1;
146 goto out;
149 next = next_element(ht, *element);
150 kref_put(element_kref(ht, *element), free);
151 *element = *next;
153 out:
154 unlock_element(ht, key);
155 local_irq_restore(iflags);
157 return rc;
160 void htable_insert(struct htable *ht, char *newelement, __u32 key)
162 unsigned long iflags;
163 char **element;
165 if (ht->htable == 0)
166 return;
168 BUG_ON(*next_element(ht, newelement) != 0);
169 local_irq_save(iflags);
171 element = get_element_nounlock(ht, key, 0);
173 BUG_ON(element == 0);
174 BUG_ON(*element != 0);
176 *element = newelement;
177 kref_get(element_kref(ht, newelement));
179 unlock_element(ht, key);
180 local_irq_restore(iflags);
184 void htable_init(struct htable *ht, int (*matches)(void *htentry,
185 void *searcheditem), __u32 entry_offset, __u32 kref_offset)
187 int num_cells;
188 int j;
190 BUG_ON(0 == ht);
192 ht->htable = kmalloc(PAGE_SIZE, GFP_KERNEL);
193 if (ht->htable == 0) {
194 printk(KERN_CRIT "cor: error allocating htable (out of "
195 "memory?)");
196 return;
198 memset(ht->htable, 0, PAGE_SIZE);
199 ht->cell_size = 256;
201 num_cells = PAGE_SIZE/ht->cell_size;
203 for (j=0;j<num_cells;j++) {
204 struct cell_hdr *hdr = (struct cell_hdr *)
205 ( ((char *) ht->htable) + j * ht->cell_size );
206 spin_lock_init(&(hdr->lock));
209 ht->htable_size = num_cells * elements_per_cell(ht->cell_size);
210 ht->num_elements = 0;
212 ht->matches = matches;
213 ht->entry_offset = entry_offset;
214 ht->kref_offset = kref_offset;
217 struct conn *get_conn(__u32 conn_id)
219 return (struct conn *) htable_get(&connid_table, conn_id, &conn_id);
222 static int connid_alloc(struct conn *sconn)
224 __u32 conn_id;
225 int i;
227 BUG_ON(sconn->sourcetype != SOURCE_IN);
229 mutex_lock(&connid_gen);
230 for(i=0;i<16;i++) {
231 struct conn *tmp;
233 conn_id = 0;
234 get_random_bytes((char *) &conn_id, sizeof(conn_id));
236 if (conn_id == 0)
237 continue;
239 tmp = get_conn(conn_id);
240 if (tmp != 0) {
241 kref_put(&(tmp->ref), free_conn);
242 continue;
245 goto found;
247 mutex_unlock(&connid_gen);
249 return 1;
251 found:
252 sconn->source.in.conn_id = conn_id;
253 htable_insert(&connid_table, (char *) sconn, conn_id);
254 mutex_unlock(&connid_gen);
255 return 0;
258 void free_conn(struct kref *ref)
260 struct conn *conn = container_of(ref, struct conn, ref);
262 BUG_ON(conn->isreset == 0);
264 mutex_lock(&conn_free);
266 if (conn->isreset != 3) {
267 conn->reversedir->isreset = 3;
268 goto out;
271 if (conn->reversedir != 0) {
272 conn->reversedir->reversedir = 0;
273 free_conn(&(conn->reversedir->ref));
274 conn->reversedir = 0;
277 if (conn->sourcetype == SOURCE_IN) {
278 kref_put(&(conn->source.in.nb->ref), neighbor_free);
279 conn->source.in.nb = 0;
282 if (conn->targettype == TARGET_OUT) {
283 kref_put(&(conn->target.out.nb->ref), neighbor_free);
284 conn->target.out.nb = 0;
287 databuf_free(&(conn->buf));
289 kmem_cache_free(conn_slab, conn);
291 out:
292 mutex_unlock(&conn_free);
296 * rconn ==> the connection we received the commend from
297 * ==> init rconn->target.out + rconn->reversedir->source.in
298 * rc == 0 ==> ok
299 * rc == 1 ==> connid allocation failed
301 int conn_init_out(struct conn *rconn, struct neighbor *nb)
303 struct conn *sconn = rconn->reversedir;
305 __u32 stall_timeout_ms = rconn->target.unconnected.stall_timeout_ms;
307 BUG_ON(rconn->targettype != TARGET_UNCONNECTED);
308 BUG_ON(sconn == 0);
309 BUG_ON(sconn->sourcetype != SOURCE_NONE);
311 memset(&(rconn->target.out), 0, sizeof(rconn->target.out));
312 memset(&(sconn->source.in), 0, sizeof(sconn->source.in));
314 rconn->targettype = TARGET_OUT;
315 sconn->sourcetype = SOURCE_IN;
317 rconn->target.out.nb = nb;
318 sconn->source.in.nb = nb;
320 rconn->target.out.stall_timeout_ms = stall_timeout_ms;
321 skb_queue_head_init(&(sconn->source.in.reorder_queue));
324 * connid_alloc has to be called last, because packets may be received
325 * immediately after its execution
327 if (connid_alloc(sconn))
328 return 1;
330 mutex_lock(&(nb->conn_list_lock));
331 list_add_tail(&(sconn->source.in.nb_list), &(nb->rcv_conn_list));
332 list_add_tail(&(rconn->target.out.nb_list), &(nb->snd_conn_list));
333 mutex_unlock(&(nb->conn_list_lock));
335 /* neighbor lists */
336 kref_get(&(rconn->ref));
337 kref_get(&(sconn->ref));
339 return 0;
342 void conn_init_sock_source(struct conn *conn)
344 BUG_ON(conn == 0);
345 conn->sourcetype = SOURCE_SOCK;
346 memset(&(conn->source.sock), 0, sizeof(conn->source.sock));
347 init_waitqueue_head(&(conn->source.sock.wait));
350 void conn_init_sock_target(struct conn *conn)
352 BUG_ON(conn == 0);
353 conn->targettype = TARGET_SOCK;
354 memset(&(conn->target.sock), 0, sizeof(conn->target.sock));
355 init_waitqueue_head(&(conn->target.sock.wait));
358 struct conn* alloc_conn(gfp_t allocflags)
360 struct conn *rconn = 0;
361 struct conn *sconn = 0;
363 rconn = kmem_cache_alloc(conn_slab, allocflags);
364 if (unlikely(0 == rconn))
365 goto out_err0;
367 sconn = kmem_cache_alloc(conn_slab, allocflags);
368 if (unlikely(0 == sconn))
369 goto out_err1;
371 memset(rconn, 0, sizeof(struct conn));
372 memset(sconn, 0, sizeof(struct conn));
374 rconn->reversedir = sconn;
375 sconn->reversedir = rconn;
377 kref_init(&(rconn->ref));
378 kref_init(&(sconn->ref));
380 mutex_init(&(rconn->rcv_lock));
381 mutex_init(&(sconn->rcv_lock));
383 rconn->sockstate = SOCKSTATE_CONN;
384 sconn->sockstate = SOCKSTATE_CONN;
386 databuf_init(&(rconn->buf));
387 databuf_init(&(sconn->buf));
389 rconn->sourcetype = SOURCE_NONE;
390 sconn->sourcetype = SOURCE_NONE;
391 rconn->targettype = TARGET_UNCONNECTED;
392 sconn->targettype = TARGET_UNCONNECTED;
394 rconn->target.unconnected.stall_timeout_ms =
395 CONN_STALL_DEFAULT_TIMEOUT_MS;
396 sconn->target.unconnected.stall_timeout_ms =
397 CONN_STALL_DEFAULT_TIMEOUT_MS;
399 return rconn;
401 out_err1:
402 kmem_cache_free(conn_slab, rconn);
403 out_err0:
404 return 0;
407 static struct connlistener *get_connlistener(__be64 port)
409 struct list_head *curr = openports.next;
411 while (curr != &openports) {
412 struct bindnode *currnode = ((struct bindnode *)
413 (((char *)curr) - offsetof(struct bindnode, lh)));
414 if (currnode->port == port) {
415 BUG_ON(currnode->owner == 0);
416 return currnode->owner;
419 curr = curr->next;
422 return 0;
425 void close_port(struct connlistener *listener)
427 mutex_lock(&cor_bindnodes);
429 if (listener->bn != 0) {
430 list_del(&(listener->bn->lh));
431 kmem_cache_free(bindnode_slab, listener->bn);
432 listener->bn = 0;
435 while (list_empty(&(listener->conn_queue)) == 0) {
436 struct conn *rconn = container_of(listener->conn_queue.next,
437 struct conn, source.sock.cl_list);
438 rconn->reversedir->isreset = 1;
439 reset_conn(rconn);
440 list_del(&(rconn->source.sock.cl_list));
441 kref_put(&(rconn->ref), free_conn);
444 kmem_cache_free(connlistener_slab, listener);
446 mutex_unlock(&cor_bindnodes);
449 struct connlistener *open_port(__be64 port)
452 struct bindnode *bn = 0;
453 struct connlistener *listener = 0;
455 mutex_lock(&cor_bindnodes);
456 if (get_connlistener(port) != 0)
457 goto out;
460 bn = kmem_cache_alloc(bindnode_slab, GFP_KERNEL);
461 listener = kmem_cache_alloc(connlistener_slab, GFP_KERNEL);
463 memset(bn, 0, sizeof(struct bindnode));
464 memset(listener, 0, sizeof(struct connlistener));
466 bn->owner = listener;
467 bn->port = port;
469 /* kref is not actually used */
470 listener->sockstate = SOCKSTATE_LISTENER;
471 listener->bn = bn;
472 mutex_init(&(listener->lock));
473 INIT_LIST_HEAD(&(listener->conn_queue));
474 init_waitqueue_head(&(listener->wait));
476 list_add_tail((struct list_head *) &(bn->lh), &openports);
478 out:
479 mutex_unlock(&cor_bindnodes);
481 return listener;
485 * rc == 0 connected
486 * rc == 2 port not open
487 * rc == 3 listener queue full
489 int connect_port(struct conn *rconn, __be64 port)
492 struct connlistener *listener;
493 int rc = 0;
495 mutex_lock(&cor_bindnodes);
497 listener = get_connlistener(port);
498 if (listener == 0) {
499 rc = 2;
500 goto out;
503 mutex_lock(&(listener->lock));
505 if (listener->queue_len >= listener->queue_maxlen) {
506 if (listener->queue_maxlen <= 0)
507 rc = 2;
508 else
509 rc = 3;
511 goto out2;
514 kref_get(&(rconn->reversedir->ref));
516 conn_init_sock_target(rconn);
517 conn_init_sock_source(rconn->reversedir);
519 list_add_tail(&(rconn->reversedir->source.sock.cl_list),
520 &(listener->conn_queue));
521 listener->queue_len++;
522 wake_up_interruptible(&(listener->wait));
524 out2:
525 mutex_unlock(&(listener->lock));
527 out:
528 mutex_unlock(&cor_bindnodes);
529 return rc;
533 * rc == 0 connected
534 * rc == 2 addrtype not found
535 * rc == 3 addr not found
536 * rc == 4 ==> connid allocation failed
537 * rc == 5 ==> control msg alloc failed
539 int connect_neigh(struct conn *rconn,
540 __u16 addrtypelen, __u8 *addrtype,
541 __u16 addrlen, __u8 *addr)
543 int rc = 0;
544 struct control_msg_out *cm;
545 struct neighbor *nb = find_neigh(addrtypelen, addrtype, addrlen, addr);
546 if (nb == 0)
547 return 3;
548 if (conn_init_out(rconn, nb)) {
549 rc = 4;
550 goto neigh_kref;
553 cm = alloc_control_msg();
554 if (unlikely(cm == 0)) {
555 rc = 5;
556 goto neigh_kref;
559 send_connect_nb(cm, nb, rconn->reversedir->source.in.conn_id);
561 if (0) {
562 neigh_kref:
563 kref_put(&(nb->ref), neighbor_free);
566 return rc;
569 static void _reset_conn(struct conn *conn)
571 if (conn->isreset == 1)
572 goto free;
574 if (conn->isreset == 2 || conn->isreset == 3)
575 return;
577 if (conn->targettype == TARGET_OUT && conn->target.out.conn_id != 0) {
578 struct control_msg_out *cm = alloc_control_msg();
579 if (likely(cm != 0))
580 send_reset_conn(cm, conn->target.out.nb,
581 conn->target.out.conn_id);
584 free:
586 if (conn->sourcetype == SOURCE_IN) {
587 mutex_lock(&(conn->source.in.nb->conn_list_lock));
588 list_del(&(conn->source.in.nb_list));
589 mutex_unlock(&(conn->source.in.nb->conn_list_lock));
591 kref_put(&(conn->ref), free_conn);
593 if (conn->source.in.conn_id != 0)
594 htable_delete(&connid_table, conn->source.in.conn_id,
595 &(conn->source.in.conn_id), free_conn);
598 if (conn->targettype == TARGET_OUT) {
599 mutex_lock(&(conn->target.out.nb->conn_list_lock));
600 list_del(&(conn->target.out.nb_list));
601 mutex_unlock(&(conn->target.out.nb->conn_list_lock));
602 kref_put(&(conn->ref), free_conn);
605 conn->isreset = 2;
608 /* warning: do not hold the rcv_lock while calling this! */
609 void reset_conn(struct conn *conn)
611 printk(KERN_ERR "reset_conn");
612 _reset_conn(conn);
613 _reset_conn(conn->reversedir);
616 static int matches_connid_in(void *htentry, void *searcheditem)
618 struct conn *conn = (struct conn *) htentry;
619 __u32 conn_id = *((__u32 *) searcheditem);
620 BUG_ON(conn->sourcetype != SOURCE_IN);
621 return (conn->source.in.conn_id == conn_id);
624 static int __init cor_common_init(void)
626 int rc;
628 struct conn c;
630 printk(KERN_ERR "sizeof conn: %d", sizeof(c));
631 printk(KERN_ERR " conn.source: %d", sizeof(c.source));
632 printk(KERN_ERR " conn.target: %d", sizeof(c.target));
633 printk(KERN_ERR " conn.target.out: %d", sizeof(c.target.out));
634 printk(KERN_ERR " conn.buf: %d", sizeof(c.buf));
636 printk(KERN_ERR " mutex: %d", sizeof(struct mutex));
637 printk(KERN_ERR " spinlock: %d", sizeof(spinlock_t));
638 printk(KERN_ERR " kref: %d", sizeof(struct kref));
641 conn_slab = kmem_cache_create("cor_conn", sizeof(struct conn), 8, 0, 0);
642 htable_init(&connid_table, matches_connid_in,
643 offsetof(struct conn, source.in.htab_entry),
644 offsetof(struct conn, ref));
646 bindnode_slab = kmem_cache_create("cor_bindnode",
647 sizeof(struct bindnode), 8, 0, 0);
648 connlistener_slab = kmem_cache_create("cor_connlistener",
649 sizeof(struct connlistener), 8, 0, 0);
651 forward_init();
653 rc = cor_snd_init();
654 if (rc != 0)
655 return rc;
657 rc = cor_neighbor_init();
658 if (rc != 0)
659 return rc;
661 rc = cor_rcv_init();
662 if (rc != 0)
663 return rc;
665 return 0;
668 module_init(cor_common_init);
669 MODULE_LICENSE("GPL");