2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/mutex.h>
25 struct kmem_cache
*data_buf_item_slab
;
27 #define PAGESIZE (1 << PAGE_SHIFT)
29 void databuf_init(struct data_buf
*data
)
31 memset(data
, 0, sizeof(struct data_buf
));
32 INIT_LIST_HEAD(&(data
->items
));
35 static void databuf_item_free(struct data_buf_item
*item
)
37 if (item
->type
== TYPE_BUF
) {
38 kfree(item
->data
.buf
.buf
);
39 } else if (item
->type
== TYPE_SKB
) {
40 kfree_skb(item
->data
.skb
);
45 list_del(&(item
->buf_list
));
47 kmem_cache_free(data_buf_item_slab
, item
);
50 void databuf_free(struct data_buf
*data
)
52 while (!list_empty(&(data
->items
))) {
53 struct data_buf_item
*item
= container_of(data
->items
.next
,
54 struct data_buf_item
, buf_list
);
55 databuf_item_free(item
);
59 static void databuf_nextreadchunk(struct data_buf
*data
)
61 if (data
->lastread
== 0) {
62 BUG_ON(data
->last_read_offset
!= 0);
63 BUG_ON(list_empty(&(data
->items
)));
64 data
->lastread
= container_of(data
->items
.next
,
65 struct data_buf_item
, buf_list
);
66 } else if (&(data
->lastread
->buf_list
) != data
->items
.prev
) {
67 data
->lastread
= container_of(data
->lastread
->buf_list
.next
,
68 struct data_buf_item
, buf_list
);
70 data
->last_read_offset
= 0;
74 int _databuf_pull(struct data_buf
*data
, char *dst
, int len
, int userbuf
)
78 BUG_ON(data
->read_remaining
< len
);
80 if (data
->lastread
== 0)
81 databuf_nextreadchunk(data
);
90 char *srcbufcpystart
= 0;
93 BUG_ON(data
->lastread
== 0);
95 if (data
->lastread
->type
== TYPE_BUF
) {
96 srcbuf
= data
->lastread
->data
.buf
.buf
;
97 srcbuflen
= data
->lastread
->data
.buf
.datalen
;
98 } else if (data
->lastread
->type
== TYPE_SKB
) {
99 srcbuf
= data
->lastread
->data
.skb
->data
;
100 srcbuflen
= data
->lastread
->data
.skb
->len
;
105 srcbufcpystart
= srcbuf
+ data
->last_read_offset
;
106 srcbufcpylen
= srcbuflen
- data
->last_read_offset
;
108 if (cpy
> srcbufcpylen
)
112 int notcopied
= copy_to_user(dst
, srcbufcpystart
, cpy
);
117 memcpy(dst
, srcbufcpystart
, cpy
);
124 data
->read_remaining
-= cpy
;
125 data
->last_read_offset
+= cpy
;
127 if (cpy
== srcbufcpylen
)
128 databuf_nextreadchunk(data
);
140 void databuf_pull(struct data_buf
*data
, char *dst
, int len
)
142 _databuf_pull(data
, dst
, len
, 0);
145 size_t databuf_pulluser(struct conn
*sconn
, struct msghdr
*msg
)
151 while (iovidx
< msg
->msg_iovlen
) {
154 struct iovec
*iov
= msg
->msg_iov
+ iovidx
;
155 __user
char *msg
= iov
->iov_base
+ iovread
;
156 unsigned int len
= iov
->iov_len
- iovread
;
164 if (sconn
->buf
.read_remaining
== 0) {
165 if (sconn
->sourcetype
== SOURCE_NONE
)
170 if (len
> sconn
->buf
.read_remaining
)
171 len
= sconn
->buf
.read_remaining
;
173 rc
= _databuf_pull(&(sconn
->buf
), msg
, len
, 1);
191 void databuf_pullold(struct data_buf
*data
, __u32 startpos
, char *dst
, int len
)
193 __u32 pos
= data
->first_offset
;
194 struct data_buf_item
*dbi
= container_of(data
->items
.next
,
195 struct data_buf_item
, buf_list
);
199 BUG_ON(&(dbi
->buf_list
) == &(data
->items
));
201 if (data
->lastread
->type
== TYPE_BUF
) {
202 srcbuflen
= dbi
->data
.buf
.datalen
;
203 } else if (data
->lastread
->type
== TYPE_SKB
) {
204 srcbuflen
= dbi
->data
.skb
->len
;
209 if (pos
+ srcbuflen
> len
)
212 dbi
= container_of(dbi
->buf_list
.next
, struct data_buf_item
,
222 char *srcbufcpystart
= 0;
223 int srcbufcpylen
= 0;
225 BUG_ON(&(dbi
->buf_list
) == &(data
->items
));
227 if (data
->lastread
->type
== TYPE_BUF
) {
228 srcbuf
= data
->lastread
->data
.buf
.buf
;
229 srcbuflen
= data
->lastread
->data
.buf
.datalen
;
230 } else if (data
->lastread
->type
== TYPE_SKB
) {
231 srcbuf
= data
->lastread
->data
.skb
->data
;
232 srcbuflen
= data
->lastread
->data
.skb
->len
;
237 srcbufcpystart
= srcbuf
+ (startpos
- pos
);
238 srcbufcpylen
= srcbuflen
- (startpos
- pos
);
239 if (cpy
> srcbufcpylen
)
242 memcpy(dst
, srcbufcpystart
, cpy
);
249 dbi
= container_of(dbi
->buf_list
.next
, struct data_buf_item
,
254 /* ack up to *not* including pos */
255 void databuf_ack(struct data_buf
*buf
, __u32 pos
)
257 while (!list_empty(&(buf
->items
))) {
258 struct data_buf_item
*firstitem
= container_of(buf
->items
.next
,
259 struct data_buf_item
, buf_list
);
262 if (firstitem
== buf
->lastread
)
265 if (firstitem
->type
== TYPE_BUF
) {
266 firstlen
= firstitem
->data
.buf
.datalen
;
267 } else if (firstitem
->type
== TYPE_SKB
) {
268 firstlen
= firstitem
->data
.skb
->len
;
273 if (((__s32
)(buf
->first_offset
+ firstlen
- pos
)) > 0)
276 buf
->first_offset
+= firstlen
;
278 databuf_item_free(firstitem
);
282 void databuf_ackread(struct data_buf
*buf
)
284 while (!list_empty(&(buf
->items
)) && buf
->lastread
!= 0) {
285 struct data_buf_item
*firstitem
= container_of(buf
->items
.next
,
286 struct data_buf_item
, buf_list
);
288 if (firstitem
== buf
->lastread
)
291 if (firstitem
->type
== TYPE_BUF
) {
292 buf
->first_offset
+= firstitem
->data
.buf
.datalen
;
293 } else if (firstitem
->type
== TYPE_SKB
) {
294 buf
->first_offset
+= firstitem
->data
.skb
->len
;
299 databuf_item_free(firstitem
);
303 int databuf_maypush(struct data_buf
*buf
)
305 return 16384 - buf
->read_remaining
;
308 static void local_delivery(struct conn
*rconn
)
310 wake_up_interruptible(&(rconn
->target
.sock
.wait
));
313 void flush_buf(struct conn
*rconn
)
315 switch (rconn
->targettype
) {
316 case TARGET_UNCONNECTED
:
318 if (rconn
->targettype
!= TARGET_UNCONNECTED
)
322 local_delivery(rconn
);
332 static int _receive_buf(struct conn
*rconn
, char *buf
, int len
, int userbuf
)
334 struct data_buf_item
*item
= 0;
338 BUG_ON(databuf_maypush(&(rconn
->buf
)) < len
);
340 if (list_empty(&(rconn
->buf
.items
)) == 0) {
341 struct list_head
*last
= rconn
->buf
.items
.prev
;
342 item
= container_of(last
, struct data_buf_item
, buf_list
);
344 if (item
->type
!= TYPE_BUF
|| rconn
->buf
.last_buflen
<=
345 item
->data
.buf
.datalen
)
353 __u32 buflen
= PAGESIZE
;
356 item
= kmem_cache_alloc(data_buf_item_slab
, GFP_KERNEL
);
361 memset(item
, 0, sizeof(item
));
362 item
->type
= TYPE_BUF
;
363 item
->data
.buf
.buf
= kmalloc(buflen
, GFP_KERNEL
);
366 if (item
->data
.buf
.buf
== 0) {
367 kmem_cache_free(data_buf_item_slab
, item
);
371 item
->data
.buf
.datalen
= 0;
372 list_add_tail(&(item
->buf_list
), &(rconn
->buf
.items
));
373 rconn
->buf
.last_buflen
= buflen
;
376 BUG_ON(item
->type
!= TYPE_BUF
);
377 BUG_ON(rconn
->buf
.last_buflen
<= item
->data
.buf
.datalen
);
379 if (rconn
->buf
.last_buflen
- item
->data
.buf
.datalen
< cpy
)
380 cpy
= (rconn
->buf
.last_buflen
- item
->data
.buf
.datalen
);
383 int notcopied
= copy_from_user(item
->data
.buf
.buf
+
384 item
->data
.buf
.datalen
, buf
, cpy
);
389 memcpy(item
->data
.buf
.buf
+ item
->data
.buf
.datalen
,
397 item
->data
.buf
.datalen
+= cpy
;
407 rconn
->buf
.read_remaining
+= totalcpy
;
412 int receive_userbuf(struct conn
*rconn
, struct msghdr
*msg
)
418 if (databuf_maypush(&(rconn
->buf
)) <= 0)
421 while (iovidx
< msg
->msg_iovlen
) {
422 struct iovec
*iov
= msg
->msg_iov
+ iovidx
;
423 __user
char *userbuf
= iov
->iov_base
+ iovread
;
424 int len
= iov
->iov_len
- iovread
;
434 pushlimit
= databuf_maypush(&(rconn
->buf
));
436 if (pushlimit
<= 0) {
437 if (rconn
->targettype
== TARGET_UNCONNECTED
)
445 rc
= _receive_buf(rconn
, userbuf
, len
, 1);
464 void receive_buf(struct conn
*rconn
, char *buf
, int len
)
466 BUG_ON(databuf_maypush(&(rconn
->buf
)) < len
);
467 _receive_buf(rconn
, buf
, len
, 0);
471 int receive_skb(struct conn
*rconn
, struct sk_buff
*skb
)
473 struct data_buf_item
*item
;
475 if (databuf_maypush(&(rconn
->buf
)) < skb
->len
)
478 item
= kmem_cache_alloc(data_buf_item_slab
, GFP_KERNEL
);
483 item
->data
.skb
= skb
;
484 item
->type
= TYPE_SKB
;
485 list_add_tail(&(item
->buf_list
), &(rconn
->buf
.items
));
486 rconn
->buf
.read_remaining
+= skb
->len
;
487 rconn
->buf
.last_buflen
= 0;
494 void wake_sender(struct conn
*rconn
)
496 switch (rconn
->sourcetype
) {
501 wake_up_interruptible(&(rconn
->source
.sock
.wait
));
504 drain_ooo_queue(rconn
);
511 void forward_init(void)
513 data_buf_item_slab
= kmem_cache_create("cor_data_buf_item",
514 sizeof(struct data_buf_item
), 8, 0, 0);