new retransmit
[cor_2_6_31.git] / net / cor / forward.c
blob47b9d4ea0f528264ccd835a2948f658e14d0d5eb
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/mutex.h>
23 #include "cor.h"
25 struct kmem_cache *data_buf_item_slab;
27 #define PAGESIZE (1 << PAGE_SHIFT)
29 void databuf_init(struct data_buf *data)
31 memset(data, 0, sizeof(struct data_buf));
32 INIT_LIST_HEAD(&(data->items));
35 static void databuf_item_free(struct data_buf_item *item)
37 if (item->type == TYPE_BUF) {
38 kfree(item->data.buf.buf);
39 } else if (item->type == TYPE_SKB) {
40 kfree_skb(item->data.skb);
41 } else {
42 BUG();
45 list_del(&(item->buf_list));
47 kmem_cache_free(data_buf_item_slab, item);
50 void databuf_free(struct data_buf *data)
52 while (!list_empty(&(data->items))) {
53 struct data_buf_item *item = container_of(data->items.next,
54 struct data_buf_item, buf_list);
55 databuf_item_free(item);
59 static void databuf_nextreadchunk(struct data_buf *data)
61 if (data->lastread == 0) {
62 BUG_ON(data->last_read_offset != 0);
63 BUG_ON(list_empty(&(data->items)));
64 data->lastread = container_of(data->items.next,
65 struct data_buf_item, buf_list);
66 } else if (&(data->lastread->buf_list) != data->items.prev) {
67 data->lastread = container_of(data->lastread->buf_list.next,
68 struct data_buf_item, buf_list);
70 data->last_read_offset = 0;
74 int _databuf_pull(struct data_buf *data, char *dst, int len, int userbuf)
76 int totalcpy = 0;
78 BUG_ON(data->read_remaining < len);
80 if (data->lastread == 0)
81 databuf_nextreadchunk(data);
83 while(len > 0) {
84 int rc = 0;
85 int cpy = len;
87 char *srcbuf = 0;
88 int srcbuflen = 0;
90 char *srcbufcpystart = 0;
91 int srcbufcpylen = 0;
93 BUG_ON(data->lastread == 0);
95 if (data->lastread->type == TYPE_BUF) {
96 srcbuf = data->lastread->data.buf.buf;
97 srcbuflen = data->lastread->data.buf.datalen;
98 } else if (data->lastread->type == TYPE_SKB) {
99 srcbuf = data->lastread->data.skb->data;
100 srcbuflen = data->lastread->data.skb->len;
101 } else {
102 BUG();
105 srcbufcpystart = srcbuf + data->last_read_offset;
106 srcbufcpylen = srcbuflen - data->last_read_offset;
108 if (cpy > srcbufcpylen)
109 cpy = srcbufcpylen;
111 if (userbuf) {
112 int notcopied = copy_to_user(dst, srcbufcpystart, cpy);
113 cpy -= notcopied;
114 if (notcopied > 0)
115 rc = -EFAULT;
116 } else {
117 memcpy(dst, srcbufcpystart, cpy);
120 dst += cpy;
121 len -= cpy;
122 totalcpy += cpy;
124 data->read_remaining -= cpy;
125 data->last_read_offset += cpy;
127 if (cpy == srcbufcpylen)
128 databuf_nextreadchunk(data);
130 if (rc < 0) {
131 if (totalcpy == 0)
132 totalcpy = rc;
133 break;
137 return totalcpy;
140 void databuf_pull(struct data_buf *data, char *dst, int len)
142 _databuf_pull(data, dst, len, 0);
145 size_t databuf_pulluser(struct conn *sconn, struct msghdr *msg)
147 size_t copied = 0;
148 int iovidx = 0;
149 int iovread = 0;
151 while (iovidx < msg->msg_iovlen) {
152 int rc;
154 struct iovec *iov = msg->msg_iov + iovidx;
155 __user char *msg = iov->iov_base + iovread;
156 unsigned int len = iov->iov_len - iovread;
158 if (len == 0) {
159 iovidx++;
160 iovread = 0;
161 continue;
164 if (sconn->buf.read_remaining == 0) {
165 if (sconn->sourcetype == SOURCE_NONE)
166 rc = -EPIPE;
167 else
168 rc = -EAGAIN;
169 } else {
170 if (len > sconn->buf.read_remaining)
171 len = sconn->buf.read_remaining;
173 rc = _databuf_pull(&(sconn->buf), msg, len, 1);
176 BUG_ON(rc == 0);
178 if (rc < 0) {
179 if (copied == 0)
180 copied = rc;
181 break;
184 copied += rc;
185 iovread += rc;
188 return copied;
191 void databuf_pullold(struct data_buf *data, __u32 startpos, char *dst, int len)
193 __u32 pos = data->first_offset;
194 struct data_buf_item *dbi = container_of(data->items.next,
195 struct data_buf_item, buf_list);
196 while(1) {
197 int srcbuflen;
199 BUG_ON(&(dbi->buf_list) == &(data->items));
201 if (data->lastread->type == TYPE_BUF) {
202 srcbuflen = dbi->data.buf.datalen;
203 } else if (data->lastread->type == TYPE_SKB) {
204 srcbuflen = dbi->data.skb->len;
205 } else {
206 BUG();
209 if (pos + srcbuflen > len)
210 break;
211 pos += srcbuflen;
212 dbi = container_of(dbi->buf_list.next, struct data_buf_item,
213 buf_list);
216 while (len > 0) {
217 int cpy = len;
219 char *srcbuf = 0;
220 int srcbuflen = 0;
222 char *srcbufcpystart = 0;
223 int srcbufcpylen = 0;
225 BUG_ON(&(dbi->buf_list) == &(data->items));
227 if (data->lastread->type == TYPE_BUF) {
228 srcbuf = data->lastread->data.buf.buf;
229 srcbuflen = data->lastread->data.buf.datalen;
230 } else if (data->lastread->type == TYPE_SKB) {
231 srcbuf = data->lastread->data.skb->data;
232 srcbuflen = data->lastread->data.skb->len;
233 } else {
234 BUG();
237 srcbufcpystart = srcbuf + (startpos - pos);
238 srcbufcpylen = srcbuflen - (startpos - pos);
239 if (cpy > srcbufcpylen)
240 cpy = srcbufcpylen;
242 memcpy(dst, srcbufcpystart, cpy);
244 dst += cpy;
245 len -= cpy;
246 startpos += cpy;
248 pos += srcbuflen;
249 dbi = container_of(dbi->buf_list.next, struct data_buf_item,
250 buf_list);
254 /* ack up to *not* including pos */
255 void databuf_ack(struct data_buf *buf, __u32 pos)
257 while (!list_empty(&(buf->items))) {
258 struct data_buf_item *firstitem = container_of(buf->items.next,
259 struct data_buf_item, buf_list);
260 int firstlen = 0;
262 if (firstitem == buf->lastread)
263 break;
265 if (firstitem->type == TYPE_BUF) {
266 firstlen = firstitem->data.buf.datalen;
267 } else if (firstitem->type == TYPE_SKB) {
268 firstlen = firstitem->data.skb->len;
269 } else {
270 BUG();
273 if (((__s32)(buf->first_offset + firstlen - pos)) > 0)
274 break;
276 buf->first_offset += firstlen;
278 databuf_item_free(firstitem);
282 void databuf_ackread(struct data_buf *buf)
284 while (!list_empty(&(buf->items)) && buf->lastread != 0) {
285 struct data_buf_item *firstitem = container_of(buf->items.next,
286 struct data_buf_item, buf_list);
288 if (firstitem == buf->lastread)
289 break;
291 if (firstitem->type == TYPE_BUF) {
292 buf->first_offset += firstitem->data.buf.datalen;
293 } else if (firstitem->type == TYPE_SKB) {
294 buf->first_offset += firstitem->data.skb->len;
295 } else {
296 BUG();
299 databuf_item_free(firstitem);
303 int databuf_maypush(struct data_buf *buf)
305 return 16384 - buf->read_remaining;
308 static void local_delivery(struct conn *rconn)
310 wake_up_interruptible(&(rconn->target.sock.wait));
313 void flush_buf(struct conn *rconn)
315 switch (rconn->targettype) {
316 case TARGET_UNCONNECTED:
317 parse(rconn);
318 if (rconn->targettype != TARGET_UNCONNECTED)
319 flush_buf(rconn);
320 break;
321 case TARGET_SOCK:
322 local_delivery(rconn);
323 break;
324 case TARGET_OUT:
325 flush_out(rconn);
326 break;
327 default:
328 BUG();
332 static int _receive_buf(struct conn *rconn, char *buf, int len, int userbuf)
334 struct data_buf_item *item = 0;
336 int totalcpy = 0;
338 BUG_ON(databuf_maypush(&(rconn->buf)) < len);
340 if (list_empty(&(rconn->buf.items)) == 0) {
341 struct list_head *last = rconn->buf.items.prev;
342 item = container_of(last, struct data_buf_item, buf_list);
344 if (item->type != TYPE_BUF || rconn->buf.last_buflen <=
345 item->data.buf.datalen)
346 item = 0;
349 while (len > 0) {
350 int rc = 0;
351 int cpy = len;
352 if (item == 0) {
353 __u32 buflen = PAGESIZE;
354 if (buflen > 32768)
355 buflen = 32768;
356 item = kmem_cache_alloc(data_buf_item_slab, GFP_KERNEL);
357 if (item == 0) {
358 rc = -ENOMEM;
359 goto error;
361 memset(item, 0, sizeof(item));
362 item->type = TYPE_BUF;
363 item->data.buf.buf = kmalloc(buflen, GFP_KERNEL);
366 if (item->data.buf.buf == 0) {
367 kmem_cache_free(data_buf_item_slab, item);
368 rc = -ENOMEM;
369 goto error;
371 item->data.buf.datalen = 0;
372 list_add_tail(&(item->buf_list), &(rconn->buf.items));
373 rconn->buf.last_buflen = buflen;
376 BUG_ON(item->type != TYPE_BUF);
377 BUG_ON(rconn->buf.last_buflen <= item->data.buf.datalen);
379 if (rconn->buf.last_buflen - item->data.buf.datalen < cpy)
380 cpy = (rconn->buf.last_buflen - item->data.buf.datalen);
382 if (userbuf) {
383 int notcopied = copy_from_user(item->data.buf.buf +
384 item->data.buf.datalen, buf, cpy);
385 cpy -= notcopied;
386 if (notcopied > 0)
387 rc = -EFAULT;
388 } else {
389 memcpy(item->data.buf.buf + item->data.buf.datalen,
390 buf, cpy);
393 buf += cpy;
394 len -= cpy;
395 totalcpy += cpy;
397 item->data.buf.datalen += cpy;
399 error:
400 if (rc < 0) {
401 if (totalcpy == 0)
402 return rc;
403 break;
407 rconn->buf.read_remaining += totalcpy;
409 return totalcpy;
412 int receive_userbuf(struct conn *rconn, struct msghdr *msg)
414 int copied = 0;
415 int iovidx = 0;
416 int iovread = 0;
418 if (databuf_maypush(&(rconn->buf)) <= 0)
419 return -EAGAIN;
421 while (iovidx < msg->msg_iovlen) {
422 struct iovec *iov = msg->msg_iov + iovidx;
423 __user char *userbuf = iov->iov_base + iovread;
424 int len = iov->iov_len - iovread;
425 int rc;
426 int pushlimit;
428 if (len == 0) {
429 iovidx++;
430 iovread = 0;
431 continue;
434 pushlimit = databuf_maypush(&(rconn->buf));
436 if (pushlimit <= 0) {
437 if (rconn->targettype == TARGET_UNCONNECTED)
438 rc = -EPIPE;
439 else
440 rc = -EAGAIN;
441 } else {
442 if (pushlimit < len)
443 len = pushlimit;
445 rc = _receive_buf(rconn, userbuf, len, 1);
448 if (rc < 0) {
449 if (copied == 0)
450 copied = rc;
451 break;
454 copied += rc;
455 iovread += rc;
458 if (copied > 0)
459 flush_buf(rconn);;
461 return copied;
464 void receive_buf(struct conn *rconn, char *buf, int len)
466 BUG_ON(databuf_maypush(&(rconn->buf)) < len);
467 _receive_buf(rconn, buf, len, 0);
468 flush_buf(rconn);
471 int receive_skb(struct conn *rconn, struct sk_buff *skb)
473 struct data_buf_item *item;
475 if (databuf_maypush(&(rconn->buf)) < skb->len)
476 return 1;
478 item = kmem_cache_alloc(data_buf_item_slab, GFP_KERNEL);
480 if (item == 0)
481 return 1;
483 item->data.skb = skb;
484 item->type = TYPE_SKB;
485 list_add_tail(&(item->buf_list), &(rconn->buf.items));
486 rconn->buf.read_remaining += skb->len;
487 rconn->buf.last_buflen = 0;
489 flush_buf(rconn);
491 return 0;
494 void wake_sender(struct conn *rconn)
496 switch (rconn->sourcetype) {
497 case SOURCE_NONE:
498 /* nothing */
499 break;
500 case SOURCE_SOCK:
501 wake_up_interruptible(&(rconn->source.sock.wait));
502 break;
503 case SOURCE_IN:
504 drain_ooo_queue(rconn);
505 break;
506 default:
507 BUG();
511 void forward_init(void)
513 data_buf_item_slab = kmem_cache_create("cor_data_buf_item",
514 sizeof(struct data_buf_item), 8, 0, 0);