qos queue
[cor_2_6_31.git] / net / cor / kpacket_parse.c
blobc59fbedd520ff19bc496735138ef89e61d0f9f28
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 /* not used, avoid compiler warning
26 * static __u64 pull_u64(struct sk_buff *skb, int convbo)
28 char *ptr = cor_pull_skb(skb, 8);
30 __u64 ret = 0;
32 BUG_ON(0 == ptr);
34 ((char *)&ret)[0] = ptr[0];
35 ((char *)&ret)[1] = ptr[1];
36 ((char *)&ret)[2] = ptr[2];
37 ((char *)&ret)[3] = ptr[3];
38 ((char *)&ret)[4] = ptr[4];
39 ((char *)&ret)[5] = ptr[5];
40 ((char *)&ret)[6] = ptr[6];
41 ((char *)&ret)[7] = ptr[7];
43 if (convbo)
44 return be64_to_cpu(ret);
45 return ret;
46 } */
48 static __u32 pull_u32(struct sk_buff *skb, int convbo)
50 char *ptr = cor_pull_skb(skb, 4);
52 __u32 ret = 0;
54 BUG_ON(0 == ptr);
56 ((char *)&ret)[0] = ptr[0];
57 ((char *)&ret)[1] = ptr[1];
58 ((char *)&ret)[2] = ptr[2];
59 ((char *)&ret)[3] = ptr[3];
61 if (convbo)
62 return be32_to_cpu(ret);
63 return ret;
66 static __u16 pull_u16(struct sk_buff *skb, int convbo)
68 char *ptr = cor_pull_skb(skb, 2);
70 __u16 ret = 0;
72 BUG_ON(0 == ptr);
74 ((char *)&ret)[0] = ptr[0];
75 ((char *)&ret)[1] = ptr[1];
77 if (convbo)
78 return be16_to_cpu(ret);
79 return ret;
82 static __u8 pull_u8(struct sk_buff *skb)
84 char *ptr = cor_pull_skb(skb, 1);
85 BUG_ON(0 == ptr);
86 return *ptr;
89 static void pong_rcvd(struct conn *rconn)
91 struct neighbor *nb = rconn->source.in.nb;
92 if (atomic_read(&(rconn->source.in.pong_awaiting)) != 0) {
93 mutex_lock(&(nb->conn_list_lock));
94 if (atomic_read(&(rconn->source.in.pong_awaiting)) == 0)
95 goto unlock;
97 atomic_set(&(rconn->source.in.pong_awaiting), 0);
98 nb->pong_conns_expected--;
99 unlock:
100 mutex_unlock(&(nb->conn_list_lock));
104 static void ping_all_conns(struct neighbor *nb)
106 struct conn *rconn;
108 mutex_lock(&(nb->conn_list_lock));
110 if (list_empty(&(nb->snd_conn_list))) {
111 BUG_ON(nb->num_send_conns != 0);
112 goto out;
115 rconn = container_of(nb->snd_conn_list.next, struct conn,
116 target.out.nb_list);
118 BUG_ON(rconn->targettype != TARGET_OUT);
120 nb->next_ping_conn = rconn;
121 nb->ping_conns_remaining = nb->num_send_conns;
122 kref_get(&(rconn->ref));
124 out:
125 mutex_unlock(&(nb->conn_list_lock));
128 static void discard_ack_conn(struct neighbor *nb, struct sk_buff *skb)
130 pull_u32(skb, 1); /* seqno */
131 pull_u8(skb); /* window */
134 static void parse_ack_conn(struct neighbor *nb, struct sk_buff *skb,
135 __u32 kpacket_seqno, struct conn *rconn)
137 __u32 seqno = pull_u32(skb, 1);
138 __u8 window = pull_u8(skb);
140 pong_rcvd(rconn);
142 conn_ack_rcvd(kpacket_seqno, rconn->reversedir, seqno, window, 0, 0);
145 static void discard_ack_conn_ooo(struct neighbor *nb, struct sk_buff *skb)
147 pull_u32(skb, 1); /* seqno */
148 pull_u8(skb); /* window */
149 pull_u32(skb, 1); /* seqno_ooo */
150 pull_u32(skb, 1); /* length */
153 static void parse_ack_conn_ooo(struct neighbor *nb, struct sk_buff *skb,
154 __u32 kpacket_seqno, struct conn *rconn)
156 __u32 seqno = pull_u32(skb, 1);
157 __u8 window = pull_u8(skb);
158 __u32 seqno_ooo = pull_u32(skb, 1);
159 __u32 length = pull_u32(skb, 1);
161 conn_ack_rcvd(kpacket_seqno, rconn->reversedir, seqno, window,
162 seqno_ooo, length);
165 static void discard_conn_success(struct neighbor *nb, struct sk_buff *skb)
167 __u32 conn_id = pull_u32(skb, 1);
168 struct control_msg_out *cm = alloc_control_msg(nb, ACM_PRIORITY_HIGH);
170 if (unlikely(cm == 0))
171 send_ping_all_conns(nb);
172 else
173 send_reset_conn(cm, conn_id);
176 static void parse_conn_success(struct neighbor *nb, struct sk_buff *skb,
177 __u32 seqno, struct conn *rconn)
179 struct conn *sconn = rconn->reversedir;
181 __u32 conn_id = pull_u32(skb, 1);
183 BUG_ON(sconn == 0);
185 mutex_lock(&(sconn->rcv_lock));
187 if (unlikely(unlikely(sconn->targettype != TARGET_OUT) ||
188 unlikely(sconn->target.out.nb != nb) ||
189 unlikely(sconn->target.out.conn_id != 0 &&
190 sconn->target.out.conn_id != conn_id))) {
191 goto reset;
194 if (likely(sconn->target.out.conn_id == 0)) {
195 sconn->target.out.conn_id = conn_id;
197 if (unlikely(atomic_read(&(sconn->isreset)) != 0))
198 goto reset;
200 insert_reverse_connid(sconn);
203 flush_out(sconn);
205 if (0) {
206 struct control_msg_out *cm;
207 reset:
208 cm = alloc_control_msg(nb, ACM_PRIORITY_HIGH);
209 if (unlikely(cm == 0))
210 send_ping_all_conns(nb);
211 else
212 send_reset_conn(cm, conn_id);
214 mutex_unlock(&(sconn->rcv_lock));
217 static void parse_reset(struct neighbor *nb, struct sk_buff *skb, __u32 seqno,
218 struct conn *rconn)
220 atomic_cmpxchg(&(rconn->reversedir->isreset), 0, 1);
221 reset_conn(rconn);
224 static void parse_ping_conn(struct neighbor *nb, struct sk_buff *skb,
225 __u32 seqno, struct conn *rconn)
227 struct control_msg_out *cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
229 if (unlikely(cm == 0))
230 return;
232 mutex_lock(&(rconn->rcv_lock));
233 #warning todo set window
234 send_ack_conn(cm, rconn->reversedir->target.out.conn_id,
235 rconn->source.in.next_seqno, enc_window(65536));
236 mutex_unlock(&(rconn->rcv_lock));
239 static void conn_cmd(struct neighbor *nb, struct sk_buff *skb, __u32 seqno,
240 __u8 code, void (*parsefunc)(struct neighbor *nb,
241 struct sk_buff *skb, __u32 seqno, struct conn *rconn),
242 void (*readdiscardfunc)(struct neighbor *nb,
243 struct sk_buff *skb))
245 __u32 conn_id = pull_u32(skb, 1);
246 struct conn *rconn = get_conn(conn_id);
248 if (unlikely(rconn == 0))
249 goto err;
251 BUG_ON(rconn->sourcetype != SOURCE_IN);
252 BUG_ON(rconn->reversedir == 0);
254 if (unlikely(rconn->source.in.nb != nb))
255 goto err;
257 parsefunc(nb, skb, seqno, rconn);
258 if (0) {
259 struct control_msg_out *cm;
260 err:
261 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
262 if (likely(cm != 0))
263 send_connid_unknown(cm, conn_id);
265 if (readdiscardfunc != 0)
266 readdiscardfunc(nb, skb);
268 if (likely(rconn != 0))
269 kref_put(&(rconn->ref), free_conn);
272 static void parse_conndata(struct neighbor *nb, struct sk_buff *skb)
274 __u32 conn_id = pull_u32(skb, 1);
275 __u32 seqno = pull_u32(skb, 1);
276 __u16 datalength = pull_u16(skb, 1);
277 char *data = cor_pull_skb(skb, datalength);
279 BUG_ON(data == 0);
281 conn_rcv_buildskb(data, datalength, conn_id, seqno);
284 static void parse_connect(struct neighbor *nb, struct sk_buff *skb)
286 struct conn *rconn;
287 __u32 conn_id = pull_u32(skb, 1);
288 struct control_msg_out *cm = alloc_control_msg(nb, ACM_PRIORITY_HIGH);
290 if (unlikely(cm == 0))
291 return;
293 rconn = alloc_conn(GFP_KERNEL);
295 if (unlikely(rconn == 0))
296 goto err;
298 if (unlikely(conn_init_out(rconn->reversedir, nb)))
299 goto err;
301 rconn->reversedir->target.out.conn_id = conn_id;
303 send_connect_success(cm, rconn->reversedir->target.out.conn_id,
304 rconn->source.in.conn_id);
306 if (0) {
307 err:
308 send_reset_conn(cm, conn_id);
312 #warning todo set window on connect/connect_success
313 static void kernel_packet2(struct neighbor *nb, struct sk_buff *skb,
314 __u32 seqno1)
316 int ack = 0;
318 while (1) {
319 struct conn *conn;
320 __u32 seqno2;
322 __u32 conn_id;
324 __u32 cookie;
325 __u32 respdelay;
327 __u32 max_cmsg_dly;
329 __u8 *codeptr = cor_pull_skb(skb, 1);
330 __u8 code;
332 if (codeptr == 0)
333 break;
335 code = *codeptr;
337 switch (code) {
338 case KP_PADDING:
339 break;
340 case KP_PING:
341 cookie = pull_u32(skb, 0);
342 send_pong(nb, cookie);
343 break;
344 case KP_PONG:
345 cookie = pull_u32(skb, 0);
346 respdelay = pull_u32(skb, 1);
347 ping_resp(nb, cookie, respdelay);
348 ack = 1;
349 break;
350 case KP_ACK:
351 seqno2 = pull_u32(skb, 1);
352 kern_ack_rcvd(nb, seqno2);
353 break;
354 case KP_ACK_CONN:
355 conn_cmd(nb, skb, seqno1, code, parse_ack_conn,
356 discard_ack_conn);
357 ack = 1;
358 break;
359 case KP_ACK_CONN_OOO:
360 conn_cmd(nb, skb, seqno1, code, parse_ack_conn_ooo,
361 discard_ack_conn_ooo);
362 ack = 1;
363 break;
364 case KP_CONNECT:
365 parse_connect(nb, skb);
366 ack = 1;
367 break;
368 case KP_CONNECT_SUCCESS:
369 conn_cmd(nb, skb, seqno1, code, parse_conn_success,
370 discard_conn_success);
371 ack = 1;
372 break;
373 case KP_CONN_DATA:
374 parse_conndata(nb, skb);
375 break;
376 case KP_PING_CONN:
377 conn_id = pull_u32(skb, 1);
378 conn_cmd(nb, skb, seqno1, code, parse_ping_conn, 0);
379 ack = 1;
380 break;
381 case KP_RESET_CONN:
382 conn_cmd(nb, skb, seqno1, code, parse_reset, 0);
383 ack = 1;
384 break;
385 case KP_CONNID_UNKNOWN:
386 conn_id = pull_u32(skb, 1);
387 conn = get_conn_reverse(nb, conn_id);
388 if (conn != 0) {
389 BUG_ON(conn->reversedir->sourcetype !=
390 SOURCE_IN);
391 pong_rcvd(conn->reversedir);
392 atomic_cmpxchg(&(conn->isreset), 0, 1);
393 reset_conn(conn);
394 conn = 0;
396 ack = 1;
397 break;
398 case KP_PING_ALL_CONNS:
399 ping_all_conns(nb);
400 ack = 1;
401 break;
402 case KP_SET_MAX_CMSG_DELAY:
403 max_cmsg_dly = pull_u32(skb, 1);
404 if (((__u64) max_cmsg_dly) * 1000 > ((__u64)
405 (max_cmsg_dly * 1000)))
406 max_cmsg_dly = 400000000;
407 else
408 max_cmsg_dly *= 100;
409 atomic_set(&(nb->max_remote_cmsg_delay), max_cmsg_dly);
410 ack = 1;
411 break;
412 default:
413 BUG();
417 if (ack)
418 send_ack(nb, seqno1);
421 void kernel_packet(struct neighbor *nb, struct sk_buff *skb, __u32 seqno)
423 struct sk_buff *skb2 = skb_clone(skb, __GFP_DMA | GFP_KERNEL);
425 while (1) {
426 __u8 *codeptr = cor_pull_skb(skb2, 1);
427 __u8 code;
429 char *lengthptr;
430 __u32 length;
432 if (codeptr == 0)
433 break;
434 code = *codeptr;
436 switch (code) {
437 case KP_PADDING:
438 break;
439 case KP_PING:
440 if (cor_pull_skb(skb2, 4) == 0)
441 goto discard;
442 break;
443 case KP_PONG:
444 if (cor_pull_skb(skb2, 8) == 0)
445 goto discard;
446 break;
447 case KP_ACK:
448 if (cor_pull_skb(skb2, 4) == 0)
449 goto discard;
450 break;
451 case KP_ACK_CONN:
452 if (cor_pull_skb(skb2, 9) == 0)
453 goto discard;
454 break;
455 case KP_ACK_CONN_OOO:
456 if (cor_pull_skb(skb2, 17) == 0)
457 goto discard;
458 break;
459 case KP_CONNECT:
460 if (cor_pull_skb(skb2, 4) == 0)
461 goto discard;
462 break;
463 case KP_CONNECT_SUCCESS:
464 if (cor_pull_skb(skb2, 8) == 0)
465 goto discard;
466 break;
467 case KP_CONN_DATA:
468 if (cor_pull_skb(skb2, 8) == 0)
469 goto discard;
470 lengthptr = cor_pull_skb(skb2, 2);
471 if (lengthptr == 0)
472 goto discard;
473 length = ntohs(*((__u16 *)lengthptr));
474 if (cor_pull_skb(skb2, length) == 0)
475 goto discard;
476 break;
477 case KP_PING_CONN:
478 case KP_RESET_CONN:
479 case KP_CONNID_UNKNOWN:
480 if (cor_pull_skb(skb2, 4) == 0)
481 goto discard;
482 break;
483 case KP_PING_ALL_CONNS:
484 break;
485 case KP_SET_MAX_CMSG_DELAY:
486 if (cor_pull_skb(skb2, 4) == 0)
487 goto discard;
488 break;
489 default:
490 goto discard;
493 kfree_skb(skb2);
494 kernel_packet2(nb, skb, seqno);
495 kfree_skb(skb);
496 return;
497 discard:
498 kfree_skb(skb2);
499 kfree_skb(skb);