rcv reorder queue bugfix
[cor_2_6_31.git] / net / cor / common.c
blob69b1cb1da877df1f9bad6cd06baf4e66c614e916
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 DEFINE_MUTEX(cor_bindnodes);
26 DEFINE_MUTEX(conn_free);
28 DEFINE_MUTEX(connid_gen);
30 LIST_HEAD(openports);
33 struct cell_hdr{
34 spinlock_t lock;
37 struct kmem_cache *conn_slab;
39 struct htable connid_table;
41 struct kmem_cache *bindnode_slab;
42 struct kmem_cache *connlistener_slab;
44 void ref_counter_decr(struct ref_counter *cnt)
46 unsigned long iflags;
48 BUG_ON(0 == cnt);
49 BUG_ON(0 == cnt->def);
51 spin_lock_irqsave(&(cnt->lock), iflags);
52 cnt->refs--;
53 if (unlikely(cnt->refs == 0)) {
54 cnt->def->free(cnt);
55 local_irq_restore(iflags);
56 return;
58 spin_unlock_irqrestore(&(cnt->lock), iflags);
61 int ref_counter_incr(struct ref_counter *cnt)
63 unsigned long iflags;
64 int ret = 1;
66 spin_lock_irqsave(&(cnt->lock), iflags);
67 ret = 0;
68 cnt->refs++;
69 spin_unlock_irqrestore(&(cnt->lock), iflags);
71 return 0;
74 void ref_counter_init(struct ref_counter *cnt, struct ref_counter_def *def)
76 BUG_ON(0 == cnt);
77 BUG_ON(0 == def);
79 spin_lock_init(&(cnt->lock));
80 cnt->refs = 1;
81 cnt->def = def;
85 static inline int hdr_size(void)
87 return ((sizeof(struct cell_hdr) + sizeof(void *) - 1) / sizeof(void *)
88 ) * sizeof(void *);
91 static inline int elements_per_cell(int cell_size)
93 return (cell_size - hdr_size())/sizeof(void *);
96 static inline struct cell_hdr *cell_addr(struct htable *ht, __u32 id)
98 int idx = (id%ht->htable_size) / (elements_per_cell(ht->cell_size));
99 return (struct cell_hdr *) (((char *)ht->htable) + ht->cell_size * idx);
102 static inline char **element_addr(struct htable *ht, __u32 id)
104 int idx = (id%ht->htable_size) % (elements_per_cell(ht->cell_size));
105 return (char **)
106 ( ((char *)cell_addr(ht, id)) +
107 hdr_size() + idx*sizeof(void *) );
111 static inline char **next_element(struct htable *ht, char *element)
113 return (char **)(element + ht->entry_offset);
116 static inline struct ref_counter *element_refcnt(struct htable *ht, char *element)
118 return (struct ref_counter *)(element + ht->ref_counter_offset);
122 static inline void unlock_element(struct htable *ht, __u32 key)
124 struct cell_hdr *hdr = cell_addr(ht, key);
125 spin_unlock( &(hdr->lock) );
129 static char **get_element_nounlock(struct htable *ht, __u32 key,
130 void *searcheditem)
132 struct cell_hdr *hdr = cell_addr(ht, key);
133 char **element = element_addr(ht, key);
135 BUG_ON(0 == element);
137 spin_lock( &(hdr->lock) );
139 while (1) {
140 if (*element == 0)
141 break;
142 if (searcheditem != 0 && ht->matches(*element, searcheditem))
143 break;
144 element = next_element(ht, *element);
147 return element;
150 char *htable_get(struct htable *ht, __u32 key, void *searcheditem)
152 unsigned long iflags;
153 char *element;
155 printk(KERN_ERR "get %d", key);
157 if (ht->htable == 0)
158 return 0;
160 local_irq_save(iflags);
161 element = *(get_element_nounlock(ht, key, searcheditem));
162 if (element != 0)
163 ref_counter_incr(element_refcnt(ht, element));
164 unlock_element(ht, key);
165 local_irq_restore(iflags);
167 return element;
170 int htable_delete(struct htable *ht, __u32 key,
171 void *searcheditem)
173 unsigned long iflags;
174 char **element;
175 char **next;
176 int rc = 0;
178 printk(KERN_ERR "des %d", key);
180 if (ht->htable == 0)
181 return 1;
183 local_irq_save(iflags);
185 element = get_element_nounlock(ht, key, searcheditem);
186 BUG_ON(0 == element);
188 if (0 == *element) {
189 /* key not in table */
190 rc = 1;
191 goto out;
194 next = next_element(ht, *element);
195 ref_counter_decr(element_refcnt(ht, *element));
196 *element = *next;
198 out:
199 unlock_element(ht, key);
200 local_irq_restore(iflags);
202 return rc;
205 void htable_insert(struct htable *ht, char *newelement, __u32 key)
207 unsigned long iflags;
208 char **element;
210 printk(KERN_ERR "insert %d %d", (int) newelement, key);
212 if (ht->htable == 0)
213 return;
215 BUG_ON(*next_element(ht, newelement) != 0);
216 local_irq_save(iflags);
218 element = get_element_nounlock(ht, key, 0);
220 BUG_ON(element == 0);
221 BUG_ON(*element != 0);
223 *element = newelement;
224 ref_counter_incr(element_refcnt(ht, newelement));
226 unlock_element(ht, key);
227 local_irq_restore(iflags);
231 void htable_init(struct htable *ht, int (*matches)(void *htentry,
232 void *searcheditem), __u32 entry_offset, __u32 ref_counter_offset)
234 int num_cells;
235 int j;
237 BUG_ON(0 == ht);
239 ht->htable = kmalloc(PAGE_SIZE, GFP_KERNEL);
240 if (ht->htable == 0) {
241 printk(KERN_CRIT "cor: error allocating htable (out of "
242 "memory?)");
243 return;
245 memset(ht->htable, 0, PAGE_SIZE);
246 ht->cell_size = 256;
248 num_cells = PAGE_SIZE/ht->cell_size;
250 for (j=0;j<num_cells;j++) {
251 struct cell_hdr *hdr = (struct cell_hdr *)
252 ( ((char *) ht->htable) + j * ht->cell_size );
253 spin_lock_init(&(hdr->lock));
256 ht->htable_size = num_cells * elements_per_cell(ht->cell_size);
257 ht->num_elements = 0;
259 ht->matches = matches;
260 ht->entry_offset = entry_offset;
261 ht->ref_counter_offset = ref_counter_offset;
264 struct conn *get_conn(__u32 conn_id)
266 return (struct conn *) htable_get(&connid_table, conn_id, &conn_id);
269 static int connid_alloc(struct conn *sconn)
271 __u32 conn_id;
272 int i;
274 BUG_ON(sconn->sourcetype != SOURCE_IN);
276 mutex_lock(&connid_gen);
277 for(i=0;i<16;i++) {
278 struct conn *tmp;
280 conn_id = 0;
281 get_random_bytes((char *) &conn_id, sizeof(conn_id));
283 if (conn_id == 0)
284 continue;
286 tmp = get_conn(conn_id);
287 if (tmp != 0) {
288 ref_counter_decr(&(tmp->refs));
289 continue;
292 goto found;
294 mutex_unlock(&connid_gen);
296 return 1;
298 found:
299 sconn->source.in.conn_id = conn_id;
300 htable_insert(&connid_table, (char *) sconn, conn_id);
301 mutex_unlock(&connid_gen);
302 return 0;
305 static void free_conn(struct ref_counter *cnt)
307 struct conn *conn = container_of(cnt, struct conn, refs);
309 BUG_ON(conn->isreset == 0);
311 mutex_lock(&conn_free);
313 if (conn->isreset != 3) {
314 conn->reversedir->isreset = 3;
315 goto out;
318 if (conn->reversedir != 0) {
319 conn->reversedir->reversedir = 0;
320 free_conn(&(conn->reversedir->refs));
321 conn->reversedir = 0;
324 if (conn->sourcetype == SOURCE_IN) {
325 ref_counter_decr(&(conn->source.in.nb->refs));
326 conn->source.in.nb = 0;
329 if (conn->targettype == TARGET_OUT) {
330 ref_counter_decr(&(conn->target.out.nb->refs));
331 conn->target.out.nb = 0;
334 databuf_free(&(conn->buf));
336 kmem_cache_free(conn_slab, conn);
338 out:
339 mutex_unlock(&conn_free);
342 static struct ref_counter_def conn_refcnt = {
343 .free = free_conn
347 * rconn ==> the connection we received the commend from
348 * ==> init rconn->target.out + rconn->reversedir->source.in
349 * rc == 0 ==> ok
350 * rc == 1 ==> connid allocation failed
352 int conn_init_out(struct conn *rconn, struct neighbor *nb)
354 struct conn *sconn = rconn->reversedir;
356 BUG_ON(TARGET_UNCONNECTED != rconn->targettype);
357 BUG_ON(0 == sconn);
358 BUG_ON(SOURCE_NONE != sconn->sourcetype);
360 memset(&(rconn->target.out), 0, sizeof(rconn->target.out));
361 memset(&(sconn->source.in), 0, sizeof(sconn->source.in));
363 rconn->targettype = TARGET_OUT;
364 sconn->sourcetype = SOURCE_IN;
366 rconn->target.out.nb = nb;
367 sconn->source.in.nb = nb;
369 skb_queue_head_init(&(sconn->source.in.reorder_queue));
372 * connid_alloc has to be called last, because packets may be received
373 * immediately after its execution
375 if (connid_alloc(sconn))
376 return 1;
378 mutex_lock(&(nb->conn_list_lock));
379 list_add_tail(&(sconn->source.in.nb_list), &(nb->rcv_conn_list));
380 list_add_tail(&(rconn->target.out.nb_list), &(nb->snd_conn_list));
381 mutex_unlock(&(nb->conn_list_lock));
383 /* neighbor lists */
384 ref_counter_incr(&(rconn->refs));
385 ref_counter_incr(&(sconn->refs));
387 return 0;
390 void conn_init_sock_source(struct conn *conn)
392 BUG_ON(conn == 0);
394 conn->sourcetype = SOURCE_SOCK;
396 memset(&(conn->source.sock), 0, sizeof(conn->source.sock));
398 init_waitqueue_head(&(conn->source.sock.wait));
401 void conn_init_sock_target(struct conn *conn)
403 BUG_ON(conn == 0);
404 conn->targettype = TARGET_SOCK;
405 init_waitqueue_head(&(conn->target.sock.wait));
408 struct conn* alloc_conn(gfp_t allocflags)
410 struct conn *rconn = 0;
411 struct conn *sconn = 0;
413 rconn = kmem_cache_alloc(conn_slab, allocflags);
414 if (unlikely(0 == rconn))
415 goto out_err0;
417 sconn = kmem_cache_alloc(conn_slab, allocflags);
418 if (unlikely(0 == sconn))
419 goto out_err1;
421 memset(rconn, 0, sizeof(struct conn));
422 memset(sconn, 0, sizeof(struct conn));
424 rconn->reversedir = sconn;
425 sconn->reversedir = rconn;
427 ref_counter_init(&(rconn->refs), &conn_refcnt);
428 ref_counter_init(&(sconn->refs), &conn_refcnt);
430 mutex_init(&(rconn->rcv_lock));
431 mutex_init(&(sconn->rcv_lock));
433 rconn->sockstate = SOCKSTATE_CONN;
434 sconn->sockstate = SOCKSTATE_CONN;
436 databuf_init(&(rconn->buf));
437 databuf_init(&(sconn->buf));
439 return rconn;
441 out_err1:
442 kmem_cache_free(conn_slab, rconn);
443 out_err0:
444 return 0;
447 static struct connlistener *get_connlistener(__be64 port)
449 struct list_head *curr = openports.next;
451 while (curr != &openports) {
452 struct bindnode *currnode = ((struct bindnode *)
453 (((char *)curr) - offsetof(struct bindnode, lh)));
454 if (currnode->port == port) {
455 BUG_ON(currnode->owner == 0);
456 return currnode->owner;
459 curr = curr->next;
462 return 0;
465 void close_port(struct connlistener *listener)
467 mutex_lock(&cor_bindnodes);
469 if (listener->bn != 0) {
470 list_del(&(listener->bn->lh));
471 kmem_cache_free(bindnode_slab, listener->bn);
472 listener->bn = 0;
475 while (list_empty(&(listener->conn_queue)) == 0) {
476 struct conn *rconn = container_of(listener->conn_queue.next,
477 struct conn, source.sock.cl_list);
478 rconn->reversedir->isreset = 1;
479 reset_conn(rconn);
480 list_del(&(rconn->source.sock.cl_list));
481 ref_counter_decr(&(rconn->refs));
484 kmem_cache_free(connlistener_slab, listener);
486 mutex_unlock(&cor_bindnodes);
489 struct connlistener *open_port(__be64 port)
492 struct bindnode *bn = 0;
493 struct connlistener *listener = 0;
495 mutex_lock(&cor_bindnodes);
496 if (get_connlistener(port) != 0)
497 goto out;
500 bn = kmem_cache_alloc(bindnode_slab, GFP_KERNEL);
501 listener = kmem_cache_alloc(connlistener_slab, GFP_KERNEL);
503 memset(bn, 0, sizeof(struct bindnode));
504 memset(listener, 0, sizeof(struct connlistener));
506 bn->owner = listener;
507 bn->port = port;
509 /* refcounter is not actually used */
510 listener->sockstate = SOCKSTATE_LISTENER;
511 listener->bn = bn;
512 mutex_init(&(listener->lock));
513 INIT_LIST_HEAD(&(listener->conn_queue));
514 init_waitqueue_head(&(listener->wait));
516 list_add_tail((struct list_head *) &(bn->lh), &openports);
518 out:
519 mutex_unlock(&cor_bindnodes);
521 return listener;
525 * rc == 0 connected
526 * rc == 2 port not open
527 * rc == 3 listener queue full
529 int connect_port(struct conn *rconn, __be64 port)
532 struct connlistener *listener;
533 int rc = 0;
535 mutex_lock(&cor_bindnodes);
537 listener = get_connlistener(port);
538 if (listener == 0) {
539 rc = 2;
540 goto out;
543 mutex_lock(&(listener->lock));
545 if (listener->queue_len >= listener->queue_maxlen) {
546 if (listener->queue_maxlen <= 0)
547 rc = 2;
548 else
549 rc = 3;
551 goto out2;
554 if (ref_counter_incr(&(rconn->reversedir->refs)))
555 BUG();
557 conn_init_sock_target(rconn);
558 conn_init_sock_source(rconn->reversedir);
560 list_add_tail(&(rconn->reversedir->source.sock.cl_list),
561 &(listener->conn_queue));
562 listener->queue_len++;
563 wake_up_interruptible(&(listener->wait));
565 out2:
566 mutex_unlock(&(listener->lock));
568 out:
569 mutex_unlock(&cor_bindnodes);
570 return rc;
574 * rc == 0 connected
575 * rc == 2 addrtype not found
576 * rc == 3 addr not found
577 * rc == 4 ==> connid allocation failed
578 * rc == 5 ==> control msg alloc failed
580 int connect_neigh(struct conn *rconn,
581 __u16 addrtypelen, __u8 *addrtype,
582 __u16 addrlen, __u8 *addr)
584 int rc = 0;
585 struct control_msg_out *cm;
586 struct neighbor *nb = find_neigh(addrtypelen, addrtype, addrlen, addr);
587 if (nb == 0)
588 return 3;
589 if (conn_init_out(rconn, nb)) {
590 rc = 4;
591 goto neigh_refcnt;
594 cm = alloc_control_msg();
595 if (unlikely(cm == 0)) {
596 rc = 5;
597 goto neigh_refcnt;
600 send_connect_nb(cm, nb, rconn->reversedir->source.in.conn_id);
602 if (0) {
603 neigh_refcnt:
604 ref_counter_decr(&(nb->refs));
607 return rc;
610 static void _reset_conn(struct conn *conn)
612 if (conn->isreset == 1)
613 goto free;
615 if (conn->isreset == 2 || conn->isreset == 3)
616 return;
618 if (conn->targettype == TARGET_OUT && conn->target.out.conn_id != 0) {
619 struct control_msg_out *cm = alloc_control_msg();
620 if (likely(cm != 0))
621 send_reset_conn(cm, conn->target.out.nb,
622 conn->target.out.conn_id);
625 free:
627 if (conn->sourcetype == SOURCE_IN) {
628 mutex_lock(&(conn->source.in.nb->conn_list_lock));
629 list_del(&(conn->source.in.nb_list));
630 mutex_unlock(&(conn->source.in.nb->conn_list_lock));
632 ref_counter_decr(&(conn->refs));
634 if (conn->source.in.conn_id != 0)
635 htable_delete(&connid_table, conn->source.in.conn_id,
636 &(conn->source.in.conn_id));
639 if (conn->targettype == TARGET_OUT) {
640 mutex_lock(&(conn->target.out.nb->conn_list_lock));
641 list_del(&(conn->target.out.nb_list));
642 mutex_unlock(&(conn->target.out.nb->conn_list_lock));
643 ref_counter_decr(&(conn->refs));
646 conn->isreset = 2;
649 /* warning: do not hold the rcv_lock while calling this! */
650 void reset_conn(struct conn *conn)
652 _reset_conn(conn);
653 _reset_conn(conn->reversedir);
656 static int matches_connid_in(void *htentry, void *searcheditem)
658 struct conn *conn = (struct conn *) htentry;
659 __u32 conn_id = *((__u32 *) searcheditem);
660 BUG_ON(conn->sourcetype != SOURCE_IN);
661 return (conn->source.in.conn_id == conn_id);
664 static int __init cor_common_init(void)
666 int rc;
668 struct conn c;
670 printk(KERN_ERR "sizeof conn: %d", sizeof(c));
671 printk(KERN_ERR " conn.source: %d", sizeof(c.source));
672 printk(KERN_ERR " conn.target: %d", sizeof(c.target));
673 printk(KERN_ERR " conn.target.out: %d", sizeof(c.target.out));
674 printk(KERN_ERR " conn.target.buf: %d", sizeof(c.buf));
676 printk(KERN_ERR " mutex: %d", sizeof(struct mutex));
677 printk(KERN_ERR " spinlock: %d", sizeof(spinlock_t));
678 printk(KERN_ERR " kref: %d", sizeof(struct kref));
681 conn_slab = kmem_cache_create("cor_conn", sizeof(struct conn), 8, 0, 0);
682 htable_init(&connid_table, matches_connid_in,
683 offsetof(struct conn, source.in.htab_entry),
684 offsetof(struct conn, refs));
686 bindnode_slab = kmem_cache_create("cor_bindnode",
687 sizeof(struct bindnode), 8, 0, 0);
688 connlistener_slab = kmem_cache_create("cor_connlistener",
689 sizeof(struct connlistener), 8, 0, 0);
691 forward_init();
693 rc = cor_snd_init();
694 if (rc != 0)
695 return rc;
697 rc = cor_neighbor_init();
698 if (rc != 0)
699 return rc;
701 rc = cor_rcv_init();
702 if (rc != 0)
703 return rc;
705 return 0;
708 module_init(cor_common_init);
709 MODULE_LICENSE("GPL");