2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <asm/byteorder.h>
25 /* not used, avoid compiler warning
26 * static __u64 pull_u64(struct sk_buff *skb, int convbo)
28 char *ptr = cor_pull_skb(skb, 8);
34 ((char *)&ret)[0] = ptr[0];
35 ((char *)&ret)[1] = ptr[1];
36 ((char *)&ret)[2] = ptr[2];
37 ((char *)&ret)[3] = ptr[3];
38 ((char *)&ret)[4] = ptr[4];
39 ((char *)&ret)[5] = ptr[5];
40 ((char *)&ret)[6] = ptr[6];
41 ((char *)&ret)[7] = ptr[7];
44 return be64_to_cpu(ret);
48 static __u32
pull_u32(struct sk_buff
*skb
, int convbo
)
50 char *ptr
= cor_pull_skb(skb
, 4);
56 ((char *)&ret
)[0] = ptr
[0];
57 ((char *)&ret
)[1] = ptr
[1];
58 ((char *)&ret
)[2] = ptr
[2];
59 ((char *)&ret
)[3] = ptr
[3];
62 return be32_to_cpu(ret
);
66 static __u16
pull_u16(struct sk_buff
*skb
, int convbo
)
68 char *ptr
= cor_pull_skb(skb
, 2);
74 ((char *)&ret
)[0] = ptr
[0];
75 ((char *)&ret
)[1] = ptr
[1];
78 return be16_to_cpu(ret
);
82 static __u8
pull_u8(struct sk_buff
*skb
)
84 char *ptr
= cor_pull_skb(skb
, 1);
89 static void pong_rcvd(struct conn
*rconn
)
91 struct neighbor
*nb
= rconn
->source
.in
.nb
;
92 if (atomic_read(&(rconn
->source
.in
.pong_awaiting
)) != 0) {
93 mutex_lock(&(nb
->conn_list_lock
));
94 if (atomic_read(&(rconn
->source
.in
.pong_awaiting
)) == 0)
97 atomic_set(&(rconn
->source
.in
.pong_awaiting
), 0);
98 nb
->pong_conns_expected
--;
100 mutex_unlock(&(nb
->conn_list_lock
));
104 static void ping_all_conns(struct neighbor
*nb
)
108 mutex_lock(&(nb
->conn_list_lock
));
110 if (list_empty(&(nb
->snd_conn_list
))) {
111 BUG_ON(nb
->num_send_conns
!= 0);
115 rconn
= container_of(nb
->snd_conn_list
.next
, struct conn
,
118 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
120 nb
->next_ping_conn
= rconn
;
121 nb
->ping_conns_remaining
= nb
->num_send_conns
;
122 kref_get(&(rconn
->ref
));
125 mutex_unlock(&(nb
->conn_list_lock
));
128 static void discard_ack_conn(struct neighbor
*nb
, struct sk_buff
*skb
)
130 pull_u32(skb
, 1); /* seqno */
131 pull_u8(skb
); /* window */
134 static void parse_ack_conn(struct neighbor
*nb
, struct sk_buff
*skb
,
135 __u32 kpacket_seqno
, struct conn
*rconn
)
137 __u32 seqno
= pull_u32(skb
, 1);
138 __u8 window
= pull_u8(skb
);
142 conn_ack_rcvd(kpacket_seqno
, rconn
->reversedir
, seqno
, window
, 0, 0);
145 static void discard_ack_conn_ooo(struct neighbor
*nb
, struct sk_buff
*skb
)
147 pull_u32(skb
, 1); /* seqno */
148 pull_u8(skb
); /* window */
149 pull_u32(skb
, 1); /* seqno_ooo */
150 pull_u32(skb
, 1); /* length */
153 static void parse_ack_conn_ooo(struct neighbor
*nb
, struct sk_buff
*skb
,
154 __u32 kpacket_seqno
, struct conn
*rconn
)
156 __u32 seqno
= pull_u32(skb
, 1);
157 __u8 window
= pull_u8(skb
);
158 __u32 seqno_ooo
= pull_u32(skb
, 1);
159 __u32 length
= pull_u32(skb
, 1);
161 conn_ack_rcvd(kpacket_seqno
, rconn
->reversedir
, seqno
, window
,
165 static void discard_conn_success(struct neighbor
*nb
, struct sk_buff
*skb
)
167 __u32 conn_id
= pull_u32(skb
, 1);
168 struct control_msg_out
*cm
= alloc_control_msg(nb
, ACM_PRIORITY_HIGH
);
170 if (unlikely(cm
== 0))
171 send_ping_all_conns(nb
);
173 send_reset_conn(cm
, conn_id
);
176 static void parse_conn_success(struct neighbor
*nb
, struct sk_buff
*skb
,
177 __u32 seqno
, struct conn
*rconn
)
179 struct conn
*sconn
= rconn
->reversedir
;
181 __u32 conn_id
= pull_u32(skb
, 1);
185 mutex_lock(&(sconn
->rcv_lock
));
187 if (unlikely(unlikely(sconn
->targettype
!= TARGET_OUT
) ||
188 unlikely(sconn
->target
.out
.nb
!= nb
) ||
189 unlikely(sconn
->target
.out
.conn_id
!= 0 &&
190 sconn
->target
.out
.conn_id
!= conn_id
))) {
194 if (likely(sconn
->target
.out
.conn_id
== 0)) {
195 sconn
->target
.out
.conn_id
= conn_id
;
197 if (unlikely(atomic_read(&(sconn
->isreset
)) != 0))
200 insert_reverse_connid(sconn
);
206 struct control_msg_out
*cm
;
208 cm
= alloc_control_msg(nb
, ACM_PRIORITY_HIGH
);
209 if (unlikely(cm
== 0))
210 send_ping_all_conns(nb
);
212 send_reset_conn(cm
, conn_id
);
214 mutex_unlock(&(sconn
->rcv_lock
));
217 static void parse_reset(struct neighbor
*nb
, struct sk_buff
*skb
, __u32 seqno
,
220 atomic_cmpxchg(&(rconn
->reversedir
->isreset
), 0, 1);
224 static void parse_ping_conn(struct neighbor
*nb
, struct sk_buff
*skb
,
225 __u32 seqno
, struct conn
*rconn
)
227 struct control_msg_out
*cm
= alloc_control_msg(nb
, ACM_PRIORITY_MED
);
229 if (unlikely(cm
== 0))
232 mutex_lock(&(rconn
->rcv_lock
));
233 #warning todo set window
234 send_ack_conn(cm
, rconn
->reversedir
->target
.out
.conn_id
,
235 rconn
->source
.in
.next_seqno
, enc_window(65536));
236 mutex_unlock(&(rconn
->rcv_lock
));
239 static void conn_cmd(struct neighbor
*nb
, struct sk_buff
*skb
, __u32 seqno
,
240 __u8 code
, void (*parsefunc
)(struct neighbor
*nb
,
241 struct sk_buff
*skb
, __u32 seqno
, struct conn
*rconn
),
242 void (*readdiscardfunc
)(struct neighbor
*nb
,
243 struct sk_buff
*skb
))
245 __u32 conn_id
= pull_u32(skb
, 1);
246 struct conn
*rconn
= get_conn(conn_id
);
248 if (unlikely(rconn
== 0))
251 BUG_ON(rconn
->sourcetype
!= SOURCE_IN
);
252 BUG_ON(rconn
->reversedir
== 0);
254 if (unlikely(rconn
->source
.in
.nb
!= nb
))
257 parsefunc(nb
, skb
, seqno
, rconn
);
259 struct control_msg_out
*cm
;
261 cm
= alloc_control_msg(nb
, ACM_PRIORITY_MED
);
263 send_connid_unknown(cm
, conn_id
);
265 if (readdiscardfunc
!= 0)
266 readdiscardfunc(nb
, skb
);
268 if (likely(rconn
!= 0))
269 kref_put(&(rconn
->ref
), free_conn
);
272 static void parse_conndata(struct neighbor
*nb
, struct sk_buff
*skb
)
274 __u32 conn_id
= pull_u32(skb
, 1);
275 __u32 seqno
= pull_u32(skb
, 1);
276 __u16 datalength
= pull_u16(skb
, 1);
277 char *data
= cor_pull_skb(skb
, datalength
);
281 conn_rcv_buildskb(data
, datalength
, conn_id
, seqno
);
284 static void parse_connect(struct neighbor
*nb
, struct sk_buff
*skb
)
287 __u32 conn_id
= pull_u32(skb
, 1);
288 struct control_msg_out
*cm
= alloc_control_msg(nb
, ACM_PRIORITY_HIGH
);
290 if (unlikely(cm
== 0))
293 rconn
= alloc_conn(GFP_KERNEL
);
295 if (unlikely(rconn
== 0))
298 if (unlikely(conn_init_out(rconn
->reversedir
, nb
)))
301 rconn
->reversedir
->target
.out
.conn_id
= conn_id
;
303 send_connect_success(cm
, rconn
->reversedir
->target
.out
.conn_id
,
304 rconn
->source
.in
.conn_id
);
308 send_reset_conn(cm
, conn_id
);
312 #warning todo set window on connect/connect_success
313 static void kernel_packet2(struct neighbor
*nb
, struct sk_buff
*skb
,
329 __u8
*codeptr
= cor_pull_skb(skb
, 1);
341 cookie
= pull_u32(skb
, 0);
342 send_pong(nb
, cookie
);
345 cookie
= pull_u32(skb
, 0);
346 respdelay
= pull_u32(skb
, 1);
347 ping_resp(nb
, cookie
, respdelay
);
351 seqno2
= pull_u32(skb
, 1);
352 kern_ack_rcvd(nb
, seqno2
);
355 conn_cmd(nb
, skb
, seqno1
, code
, parse_ack_conn
,
359 case KP_ACK_CONN_OOO
:
360 conn_cmd(nb
, skb
, seqno1
, code
, parse_ack_conn_ooo
,
361 discard_ack_conn_ooo
);
365 parse_connect(nb
, skb
);
368 case KP_CONNECT_SUCCESS
:
369 conn_cmd(nb
, skb
, seqno1
, code
, parse_conn_success
,
370 discard_conn_success
);
374 parse_conndata(nb
, skb
);
377 conn_id
= pull_u32(skb
, 1);
378 conn_cmd(nb
, skb
, seqno1
, code
, parse_ping_conn
, 0);
382 conn_cmd(nb
, skb
, seqno1
, code
, parse_reset
, 0);
385 case KP_CONNID_UNKNOWN
:
386 conn_id
= pull_u32(skb
, 1);
387 conn
= get_conn_reverse(nb
, conn_id
);
389 BUG_ON(conn
->reversedir
->sourcetype
!=
391 pong_rcvd(conn
->reversedir
);
392 atomic_cmpxchg(&(conn
->isreset
), 0, 1);
398 case KP_PING_ALL_CONNS
:
402 case KP_SET_MAX_CMSG_DELAY
:
403 max_cmsg_dly
= pull_u32(skb
, 1);
404 if (((__u64
) max_cmsg_dly
) * 1000 > ((__u64
)
405 (max_cmsg_dly
* 1000)))
406 max_cmsg_dly
= 400000000;
409 atomic_set(&(nb
->max_remote_cmsg_delay
), max_cmsg_dly
);
418 send_ack(nb
, seqno1
);
421 void kernel_packet(struct neighbor
*nb
, struct sk_buff
*skb
, __u32 seqno
)
423 struct sk_buff
*skb2
= skb_clone(skb
, __GFP_DMA
| GFP_KERNEL
);
426 __u8
*codeptr
= cor_pull_skb(skb2
, 1);
440 if (cor_pull_skb(skb2
, 4) == 0)
444 if (cor_pull_skb(skb2
, 8) == 0)
448 if (cor_pull_skb(skb2
, 4) == 0)
452 if (cor_pull_skb(skb2
, 9) == 0)
455 case KP_ACK_CONN_OOO
:
456 if (cor_pull_skb(skb2
, 17) == 0)
460 if (cor_pull_skb(skb2
, 4) == 0)
463 case KP_CONNECT_SUCCESS
:
464 if (cor_pull_skb(skb2
, 8) == 0)
468 if (cor_pull_skb(skb2
, 8) == 0)
470 lengthptr
= cor_pull_skb(skb2
, 2);
473 length
= ntohs(*((__u16
*)lengthptr
));
474 if (cor_pull_skb(skb2
, length
) == 0)
479 case KP_CONNID_UNKNOWN
:
480 if (cor_pull_skb(skb2
, 4) == 0)
483 case KP_PING_ALL_CONNS
:
485 case KP_SET_MAX_CMSG_DELAY
:
486 if (cor_pull_skb(skb2
, 4) == 0)
494 kernel_packet2(nb
, skb
, seqno
);