2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
29 #include <linux/module.h>
30 #include <linux/version.h>
31 #include <linux/kernel.h>
32 #include <linux/init.h>
38 atomic_t packets_in_workqueue
= ATOMIC_INIT(0);
40 atomic_t ooo_packets
= ATOMIC_INIT(0);
42 static struct workqueue_struct
*packet_wq
;
44 void drain_ooo_queue(struct conn
*rconn
)
48 BUG_ON(SOURCE_IN
!= rconn
->sourcetype
);
50 mutex_lock(&(rconn
->rcv_lock
));
52 skb
= rconn
->source
.in
.reorder_queue
.next
;
54 while ((void *) skb
!= (void *) &(rconn
->source
.in
.reorder_queue
)) {
55 struct skb_procstate
*ps
= skb_pstate(skb
);
58 BUG_ON(rconn
!= ps
->rconn
);
60 if (rconn
->source
.in
.next_seqno
!= ps
->funcstate
.rcv2
.seqno
)
63 drop
= receive_skb(rconn
, skb
);
67 skb_unlink(skb
, &(rconn
->source
.in
.reorder_queue
));
68 rconn
->source
.in
.ooo_packets
--;
69 atomic_dec(&(rconn
->source
.in
.nb
->ooo_packets
));
70 atomic_dec(&ooo_packets
);
72 rconn
->source
.in
.next_seqno
+= skb
->len
;
75 mutex_unlock(&(rconn
->rcv_lock
));
78 static int _conn_rcv_ooo(struct sk_buff
*skb
)
80 struct skb_procstate
*ps
= skb_pstate(skb
);
81 struct conn
*rconn
= ps
->rconn
;
82 struct sk_buff_head
*reorder_queue
= &(rconn
->source
.in
.reorder_queue
);
83 struct sk_buff
*curr
= reorder_queue
->next
;
87 rconn
->source
.in
.ooo_packets
++;
88 if (rconn
->source
.in
.ooo_packets
> MAX_TOTAL_OOO_PER_CONN
)
91 ooo
= atomic_inc_return(&(rconn
->source
.in
.nb
->ooo_packets
));
92 if (ooo
> MAX_TOTAL_OOO_PER_NEIGH
)
95 ooo
= atomic_inc_return(&ooo_packets
);
96 if (ooo
> MAX_TOTAL_OOO_PACKETS
)
101 struct skb_procstate
*ps2
= skb_pstate(curr
);
103 if ((void *) curr
!= (void *) reorder_queue
) {
104 skb_queue_tail(reorder_queue
, skb
);
108 BUG_ON(rconn
!= ps2
->rconn
);
110 if (ps
->funcstate
.rcv2
.seqno
> ps2
->funcstate
.rcv2
.seqno
) {
111 skb_insert(curr
, skb
, reorder_queue
);
118 atomic_dec(&ooo_packets
);
120 atomic_dec(&(rconn
->source
.in
.nb
->ooo_packets
));
122 rconn
->source
.in
.ooo_packets
--;
130 static void _conn_rcv(struct sk_buff
*skb
)
132 struct skb_procstate
*ps
= skb_pstate(skb
);
133 struct conn
*rconn
= ps
->rconn
;
134 struct control_msg_out
*cm
= alloc_control_msg();
139 BUG_ON(rconn
->sourcetype
!= SOURCE_IN
);
141 if (unlikely(cm
== 0)) {
146 mutex_lock(&(rconn
->rcv_lock
));
148 in_order
= rconn
->source
.in
.next_seqno
!= ps
->funcstate
.rcv2
.seqno
;
151 drop
= _conn_rcv_ooo(skb
);
153 rconn
->source
.in
.next_seqno
+= skb
->len
;
154 drop
= receive_skb(rconn
, skb
);
159 free_control_msg(cm
);
161 send_ack(cm
, rconn
->source
.in
.nb
, rconn
->source
.in
.conn_id
,
162 ps
->funcstate
.rcv2
.seqno
);
165 mutex_unlock(&(rconn
->rcv_lock
));
168 drain_ooo_queue(rconn
);
170 ref_counter_decr(&(rconn
->refs
));
173 static void conn_rcv(struct sk_buff
*skb
, __u32 conn_id
, __u32 seqno
)
175 struct skb_procstate
*ps
= skb_pstate(skb
);
177 ps
->funcstate
.rcv2
.conn_id
= conn_id
;
178 ps
->funcstate
.rcv2
.seqno
= seqno
;
180 ps
->rconn
= get_conn(ps
->funcstate
.rcv2
.conn_id
);
184 void conn_rcv_buildskb(char *data
, __u32 datalen
, __u32 conn_id
, __u32 seqno
)
186 struct sk_buff
*skb
= alloc_skb(datalen
, GFP_KERNEL
);
187 char *dst
= skb_put(skb
, datalen
);
188 memcpy(dst
, data
, datalen
);
189 conn_rcv(skb
, conn_id
, seqno
);
192 static void rcv_decrypt(struct sk_buff
*skb
, __u32 keyid
, __u32 keyseq
)
197 char *connid_p
= cor_pull_skb(skb
, 4);
198 char *seqno_p
= cor_pull_skb(skb
, 4);
200 struct neighbor
*nb
= 0;
202 #warning todo neighbor/crypto
204 ((char *)&conn_id
)[0] = connid_p
[0];
205 ((char *)&conn_id
)[1] = connid_p
[1];
206 ((char *)&conn_id
)[2] = connid_p
[2];
207 ((char *)&conn_id
)[3] = connid_p
[3];
209 ((char *)&seqno
)[0] = seqno_p
[0];
210 ((char *)&seqno
)[1] = seqno_p
[1];
211 ((char *)&seqno
)[2] = seqno_p
[2];
212 ((char *)&seqno
)[3] = seqno_p
[3];
214 conn_id
= be32_to_cpu(conn_id
);
215 seqno
= be32_to_cpu(seqno
);
218 kernel_packet(nb
, skb
, seqno
);
220 conn_rcv(skb
, conn_id
, seqno
);
223 static void rcv(struct work_struct
*work
)
225 struct sk_buff
*skb
= skb_from_pstate(container_of(work
,
226 struct skb_procstate
, funcstate
.rcv
.work
));
236 atomic_dec(&packets_in_workqueue
);
238 packet_type_p
= cor_pull_skb(skb
, 1);
240 if (packet_type_p
== 0)
243 packet_type
= *packet_type_p
;
245 if (packet_type
== PACKET_TYPE_ANNOUNCE
) {
250 if (packet_type
!= PACKET_TYPE_DATA
)
253 keyid_p
= cor_pull_skb(skb
, 4);
258 keyseq_p
= cor_pull_skb(skb
, 4);
263 ((char *)&keyid
)[0] = keyid_p
[0];
264 ((char *)&keyid
)[1] = keyid_p
[1];
265 ((char *)&keyid
)[2] = keyid_p
[2];
266 ((char *)&keyid
)[3] = keyid_p
[3];
268 ((char *)&keyseq
)[0] = keyseq_p
[0];
269 ((char *)&keyseq
)[1] = keyseq_p
[1];
270 ((char *)&keyseq
)[2] = keyseq_p
[2];
271 ((char *)&keyseq
)[3] = keyseq_p
[3];
273 keyid
= be32_to_cpu(keyid
);
274 keyseq
= be32_to_cpu(keyseq
);
276 rcv_decrypt(skb
, keyid
, keyseq
);
284 static int queue_rcv_processing(struct sk_buff
*skb
, struct net_device
*dev
,
285 struct packet_type
*pt
, struct net_device
*orig_dev
)
287 struct skb_procstate
*ps
= skb_pstate(skb
);
290 BUG_ON(skb
->next
!= 0);
292 queuelen
= atomic_inc_return(&packets_in_workqueue
);
294 BUG_ON(queuelen
<= 0);
296 if (queuelen
> MAX_PACKETS_IN_RCVQUEUE
) {
297 atomic_dec(&packets_in_workqueue
);
302 INIT_WORK(&(ps
->funcstate
.rcv
.work
), rcv
);
303 queue_work(packet_wq
, &(ps
->funcstate
.rcv
.work
));
304 return NET_RX_SUCCESS
;
307 static struct packet_type ptype_cor
= {
308 .type
= htons(ETH_P_COR
),
310 .func
= queue_rcv_processing
313 int __init
cor_rcv_init(void)
315 BUG_ON(sizeof(struct skb_procstate
) > 48);
316 packet_wq
= create_workqueue("cor_packet");
317 dev_add_pack(&ptype_cor
);
321 MODULE_LICENSE("GPL");