fix for corrupted graphics when manipulating config files
[open-ps2-loader.git] / modules / network / SMSTCPIP / pbuf.c
blob07f460938e686146077fe2994140a010de4ae3f0
1 /**
2 * @file
3 * Packet buffer management
5 * Packets are built from the pbuf data structure. It supports dynamic
6 * memory allocation for packet contents or can reference externally
7 * managed packet contents both in RAM and ROM. Quick allocation for
8 * incoming packets is provided through pools with fixed sized pbufs.
10 * A packet may span over multiple pbufs, chained as a singly linked
11 * list. This is called a "pbuf chain".
13 * Multiple packets may be queued, also using this singly linked list.
14 * This is called a "packet queue". So, a packet queue consists of one
15 * or more pbuf chains, each of which consist of one or more pbufs.
16 * The differences between a pbuf chain and a packet queue are very
17 * subtle. Currently, queues are only supported in a limited section
18 * of lwIP, this is the etharp queueing code. Outside of this section
19 * no packet queues are supported as of yet.
21 * The last pbuf of a packet has a ->tot_len field that equals the
22 * ->len field. It can be found by traversing the list. If the last
23 * pbuf of a packet has a ->next field other than NULL, more packets
24 * are on the queue.
26 * Therefore, looping through a pbuf of a single packet, has an
27 * loop end condition (tot_len == p->len), NOT (next == NULL).
31 * Copyright (c) 2001-2003 Swedish Institute of Computer Science.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without modification,
35 * are permitted provided that the following conditions are met:
37 * 1. Redistributions of source code must retain the above copyright notice,
38 * this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright notice,
40 * this list of conditions and the following disclaimer in the documentation
41 * and/or other materials provided with the distribution.
42 * 3. The name of the author may not be used to endorse or promote products
43 * derived from this software without specific prior written permission.
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
46 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
47 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
48 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
49 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
50 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
53 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
54 * OF SUCH DAMAGE.
56 * This file is part of the lwIP TCP/IP stack.
58 * Author: Adam Dunkels <adam@sics.se>
62 #include "lwip/opt.h"
64 #include "lwip/stats.h"
66 #include "lwip/def.h"
67 #include "lwip/mem.h"
68 #include "lwip/memp.h"
69 #include "lwip/pbuf.h"
71 #include "lwip/sys.h"
73 #include <sysclib.h>
74 #include <intrman.h>
76 #include "smsutils.h"
78 static u8_t pbuf_pool_memory[(PBUF_POOL_SIZE * MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE + sizeof(struct pbuf)))];
80 #if !SYS_LIGHTWEIGHT_PROT
81 static volatile u8_t pbuf_pool_free_lock, pbuf_pool_alloc_lock;
82 static sys_sem_t pbuf_pool_free_sem;
83 #endif
85 static struct pbuf *pbuf_pool = NULL;
87 /**
88 * Initializes the pbuf module.
90 * A large part of memory is allocated for holding the pool of pbufs.
91 * The size of the individual pbufs in the pool is given by the size
92 * parameter, and the number of pbufs in the pool by the num parameter.
94 * After the memory has been allocated, the pbufs are set up. The
95 * ->next pointer in each pbuf is set up to point to the next pbuf in
96 * the pool.
99 void
100 pbuf_init(void)
102 struct pbuf *p, *q = NULL;
103 u16_t i;
105 pbuf_pool = (struct pbuf *)&pbuf_pool_memory[0];
106 LWIP_ASSERT("pbuf_init: pool aligned", (long)pbuf_pool % MEM_ALIGNMENT == 0);
108 #if PBUF_STATS
109 lwip_stats.pbuf.avail = PBUF_POOL_SIZE;
110 #endif /* PBUF_STATS */
112 /* Set up ->next pointers to link the pbufs of the pool together */
113 p = pbuf_pool;
115 for(i = 0; i < PBUF_POOL_SIZE; ++i) {
116 p->next = (struct pbuf *)((u8_t *)p + PBUF_POOL_BUFSIZE + sizeof(struct pbuf));
117 p->len = p->tot_len = PBUF_POOL_BUFSIZE;
118 p->payload = MEM_ALIGN((void *)((u8_t *)p + sizeof(struct pbuf)));
119 p->flags = PBUF_FLAG_POOL;
120 q = p;
121 p = p->next;
124 /* The ->next pointer of last pbuf is NULL to indicate that there
125 are no more pbufs in the pool */
126 q->next = NULL;
128 #if !SYS_LIGHTWEIGHT_PROT
129 pbuf_pool_alloc_lock = 0;
130 pbuf_pool_free_lock = 0;
131 pbuf_pool_free_sem = sys_sem_new(1);
132 #endif
136 * @internal only called from pbuf_alloc()
138 static struct pbuf *
139 pbuf_pool_alloc(void)
141 struct pbuf *p = NULL;
143 SYS_ARCH_DECL_PROTECT(old_level);
144 SYS_ARCH_PROTECT(old_level);
146 #if !SYS_LIGHTWEIGHT_PROT
147 /* Next, check the actual pbuf pool, but if the pool is locked, we
148 pretend to be out of buffers and return NULL. */
149 if (pbuf_pool_free_lock) {
150 #if PBUF_STATS
151 ++lwip_stats.pbuf.alloc_locked;
152 #endif /* PBUF_STATS */
153 return NULL;
155 pbuf_pool_alloc_lock = 1;
156 if (!pbuf_pool_free_lock) {
157 #endif /* SYS_LIGHTWEIGHT_PROT */
158 p = pbuf_pool;
159 if (p) {
160 pbuf_pool = p->next;
162 #if !SYS_LIGHTWEIGHT_PROT
163 #if PBUF_STATS
164 } else {
165 ++lwip_stats.pbuf.alloc_locked;
166 #endif /* PBUF_STATS */
168 pbuf_pool_alloc_lock = 0;
169 #endif /* SYS_LIGHTWEIGHT_PROT */
171 #if PBUF_STATS
172 if (p != NULL) {
173 ++lwip_stats.pbuf.used;
174 if (lwip_stats.pbuf.used > lwip_stats.pbuf.max) {
175 lwip_stats.pbuf.max = lwip_stats.pbuf.used;
178 #endif /* PBUF_STATS */
180 SYS_ARCH_UNPROTECT(old_level);
181 return p;
186 * Allocates a pbuf.
188 * The actual memory allocated for the pbuf is determined by the
189 * layer at which the pbuf is allocated and the requested size
190 * (from the size parameter).
192 * @param flag this parameter decides how and where the pbuf
193 * should be allocated as follows:
195 * - PBUF_RAM: buffer memory for pbuf is allocated as one large
196 * chunk. This includes protocol headers as well.
197 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
198 * protocol headers. Additional headers must be prepended
199 * by allocating another pbuf and chain in to the front of
200 * the ROM pbuf. It is assumed that the memory used is really
201 * similar to ROM in that it is immutable and will not be
202 * changed. Memory which is dynamic should generally not
203 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
204 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
205 * protocol headers. It is assumed that the pbuf is only
206 * being used in a single thread. If the pbuf gets queued,
207 * then pbuf_take should be called to copy the buffer.
208 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
209 * the pbuf pool that is allocated during pbuf_init().
211 * @return the allocated pbuf. If multiple pbufs where allocated, this
212 * is the first pbuf of a pbuf chain.
215 struct pbuf *
216 pbuf_alloc(pbuf_layer l, u16_t length, pbuf_flag flag)
218 struct pbuf *p, *q, *r;
219 u16_t offset;
220 s32_t rem_len; /* remaining length */
221 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_alloc(length=%u)\n", length));
223 /* determine header offset */
224 offset = 0;
225 switch (l) {
226 case PBUF_TRANSPORT:
227 /* add room for transport (often TCP) layer header */
228 offset += PBUF_TRANSPORT_HLEN;
229 /* FALLTHROUGH */
230 case PBUF_IP:
231 /* add room for IP layer header */
232 offset += PBUF_IP_HLEN;
233 /* FALLTHROUGH */
234 case PBUF_LINK:
235 /* add room for link layer header */
236 offset += PBUF_LINK_HLEN;
237 break;
238 case PBUF_RAW:
239 break;
240 default:
241 LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
242 return NULL;
245 switch (flag) {
246 case PBUF_POOL:
247 /* allocate head of pbuf chain into p */
248 p = pbuf_pool_alloc();
249 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
250 if (p == NULL) {
251 #if PBUF_STATS
252 ++lwip_stats.pbuf.err;
253 #endif /* PBUF_STATS */
254 return NULL;
256 p->next = NULL;
258 /* make the payload pointer point 'offset' bytes into pbuf data memory */
259 p->payload = MEM_ALIGN((void *)((u8_t *)p + (sizeof(struct pbuf) + offset)));
260 LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
261 ((u32_t)p->payload % MEM_ALIGNMENT) == 0);
262 /* the total length of the pbuf chain is the requested size */
263 p->tot_len = length;
264 /* set the length of the first pbuf in the chain */
265 p->len = length > PBUF_POOL_BUFSIZE - offset? PBUF_POOL_BUFSIZE - offset: length;
266 /* set reference count (needed here in case we fail) */
267 p->ref = 1;
269 /* now allocate the tail of the pbuf chain */
271 /* remember first pbuf for linkage in next iteration */
272 r = p;
273 /* remaining length to be allocated */
274 rem_len = length - p->len;
275 /* any remaining pbufs to be allocated? */
276 while (rem_len > 0) {
277 q = pbuf_pool_alloc();
278 if (q == NULL) {
279 LWIP_DEBUGF(PBUF_DEBUG | 2, ("pbuf_alloc: Out of pbufs in pool.\n"));
280 #if PBUF_STATS
281 ++lwip_stats.pbuf.err;
282 #endif /* PBUF_STATS */
283 /* free chain so far allocated */
284 pbuf_free(p);
285 /* bail out unsuccesfully */
286 return NULL;
288 q->next = NULL;
289 /* make previous pbuf point to this pbuf */
290 r->next = q;
291 /* set total length of this pbuf and next in chain */
292 q->tot_len = rem_len;
293 /* this pbuf length is pool size, unless smaller sized tail */
294 q->len = rem_len > PBUF_POOL_BUFSIZE? PBUF_POOL_BUFSIZE: rem_len;
295 q->payload = (void *)((u8_t *)q + sizeof(struct pbuf));
296 LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
297 ((u32_t)q->payload % MEM_ALIGNMENT) == 0);
298 q->ref = 1;
299 /* calculate remaining length to be allocated */
300 rem_len -= q->len;
301 /* remember this pbuf for linkage in next iteration */
302 r = q;
304 /* end of chain */
305 /*r->next = NULL;*/
307 break;
308 case PBUF_RAM:
309 /* If pbuf is to be allocated in RAM, allocate memory for it. */
310 p = mem_malloc(MEM_ALIGN_SIZE(sizeof(struct pbuf) + offset + length));
311 if (p == NULL) {
312 return NULL;
314 /* Set up internal structure of the pbuf. */
316 //Boman666: The memory isn't allocated to allow payload to be aligned. If payload is aligned according to the commented out
317 //line the last two bytes in payload will be located outside the allocated memoryblock. Which will have the effect of
318 //screwing up the memory-allocation structures, causing a crash.
319 // p->payload = MEM_ALIGN((void *)((u8_t *)p + sizeof(struct pbuf) + offset));
320 p->payload = (void*)((u8_t *)p + sizeof(struct pbuf) + offset);
322 p->len = p->tot_len = length;
323 p->next = NULL;
324 p->flags = PBUF_FLAG_RAM;
326 LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
327 ((u32_t)p->payload % MEM_ALIGNMENT) == 0);
328 break;
329 /* pbuf references existing (static constant) ROM payload? */
330 case PBUF_ROM:
331 /* pbuf references existing (externally allocated) RAM payload? */
332 case PBUF_REF:
333 /* only allocate memory for the pbuf structure */
334 p = memp_malloc(MEMP_PBUF);
335 if (p == NULL) {
336 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n", flag == PBUF_ROM?"ROM":"REF"));
337 return NULL;
339 /* caller must set this field properly, afterwards */
340 p->payload = NULL;
341 p->len = p->tot_len = length;
342 p->next = NULL;
343 p->flags = (flag == PBUF_ROM? PBUF_FLAG_ROM: PBUF_FLAG_REF);
344 break;
345 default:
346 LWIP_ASSERT("pbuf_alloc: erroneous flag", 0);
347 return NULL;
349 /* set reference count */
350 p->ref = 1;
351 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_alloc(length=%u) == %p\n", length, (void *)p));
352 return p;
356 #if PBUF_STATS
357 #define DEC_PBUF_STATS do { --lwip_stats.pbuf.used; } while (0)
358 #else /* PBUF_STATS */
359 #define DEC_PBUF_STATS
360 #endif /* PBUF_STATS */
362 #define PBUF_POOL_FAST_FREE(p) do { \
363 p->next = pbuf_pool; \
364 pbuf_pool = p; \
365 DEC_PBUF_STATS; \
366 } while (0)
368 #if SYS_LIGHTWEIGHT_PROT
369 #define PBUF_POOL_FREE(p) do { \
370 SYS_ARCH_DECL_PROTECT(old_level); \
371 SYS_ARCH_PROTECT(old_level); \
372 PBUF_POOL_FAST_FREE(p); \
373 SYS_ARCH_UNPROTECT(old_level); \
374 } while (0)
375 #else /* SYS_LIGHTWEIGHT_PROT */
376 #define PBUF_POOL_FREE(p) do { \
377 sys_sem_wait(pbuf_pool_free_sem); \
378 PBUF_POOL_FAST_FREE(p); \
379 sys_sem_signal(pbuf_pool_free_sem); \
380 } while (0)
381 #endif /* SYS_LIGHTWEIGHT_PROT */
384 * Shrink a pbuf chain to a desired length.
386 * @param p pbuf to shrink.
387 * @param new_len desired new length of pbuf chain
389 * Depending on the desired length, the first few pbufs in a chain might
390 * be skipped and left unchanged. The new last pbuf in the chain will be
391 * resized, and any remaining pbufs will be freed.
393 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
394 * @note May not be called on a packet queue.
396 * @bug Cannot grow the size of a pbuf (chain) (yet).
398 void
399 pbuf_realloc(struct pbuf *p, u16_t new_len)
401 struct pbuf *q;
402 u16_t rem_len; /* remaining length */
403 s16_t grow;
405 /* desired length larger than current length? */
406 if (new_len >= p->tot_len) {
407 /* enlarging not yet supported */
408 return;
411 /* the pbuf chain grows by (new_len - p->tot_len) bytes
412 * (which may be negative in case of shrinking) */
413 grow = new_len - p->tot_len;
415 /* first, step over any pbufs that should remain in the chain */
416 rem_len = new_len;
417 q = p;
418 /* should this pbuf be kept? */
419 while (rem_len > q->len) {
420 /* decrease remaining length by pbuf length */
421 rem_len -= q->len;
422 /* decrease total length indicator */
423 q->tot_len += grow;
424 /* proceed to next pbuf in chain */
425 q = q->next;
427 /* we have now reached the new last pbuf (in q) */
428 /* rem_len == desired length for pbuf q */
430 /* shrink allocated memory for PBUF_RAM */
431 /* (other types merely adjust their length fields */
432 if ((q->flags == PBUF_FLAG_RAM) && (rem_len != q->len)) {
433 /* reallocate and adjust the length of the pbuf that will be split */
434 mem_realloc(q, (u8_t *)q->payload - (u8_t *)q + rem_len);
436 /* adjust length fields for new last pbuf */
437 q->len = rem_len;
438 q->tot_len = q->len;
440 /* any remaining pbufs in chain? */
441 if (q->next != NULL) {
442 /* free remaining pbufs in chain */
443 pbuf_free(q->next);
445 /* q is last packet in chain */
446 q->next = NULL;
451 * Adjusts the payload pointer to hide or reveal headers in the payload.
453 * Adjusts the ->payload pointer so that space for a header
454 * (dis)appears in the pbuf payload.
456 * The ->payload, ->tot_len and ->len fields are adjusted.
458 * @param hdr_size Number of bytes to increment header size which
459 * increases the size of the pbuf. New space is on the front.
460 * (Using a negative value decreases the header size.)
462 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
463 * the call will fail. A check is made that the increase in header size does
464 * not move the payload pointer in front of the start of the buffer.
465 * @return 1 on failure, 0 on success.
467 * @note May not be called on a packet queue.
469 u8_t
470 pbuf_header(struct pbuf *p, s16_t header_size)
472 void *payload;
474 /* remember current payload pointer */
475 payload = p->payload;
477 /* pbuf types containing payloads? */
478 if (p->flags == PBUF_FLAG_RAM || p->flags == PBUF_FLAG_POOL) {
479 /* set new payload pointer */
480 p->payload = (u8_t *)p->payload - header_size;
481 /* boundary check fails? */
482 if ((u8_t *)p->payload < (u8_t *)p + sizeof(struct pbuf)) {
483 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_header: failed as %p < %p\n",
484 (u8_t *)p->payload,
485 (u8_t *)p + sizeof(struct pbuf)) );\
486 /* restore old payload pointer */
487 p->payload = payload;
488 /* bail out unsuccesfully */
489 return 1;
491 /* pbuf types refering to payloads? */
492 } else if (p->flags == PBUF_FLAG_REF || p->flags == PBUF_FLAG_ROM) {
493 /* hide a header in the payload? */
494 if ((header_size < 0) && (header_size - p->len <= 0)) {
495 /* increase payload pointer */
496 p->payload = (u8_t *)p->payload - header_size;
497 } else {
498 /* cannot expand payload to front (yet!)
499 * bail out unsuccesfully */
500 return 1;
503 LWIP_DEBUGF( PBUF_DEBUG, ("pbuf_header: old %p new %p (%d)\n", (void *)payload, (void *)p->payload, header_size) );
504 /* modify pbuf length fields */
505 p->len += header_size;
506 p->tot_len += header_size;
508 return 0;
512 * Dereference a pbuf (chain) and deallocate any no-longer-used
513 * pbufs at the head of this chain.
515 * Decrements the pbuf reference count. If it reaches
516 * zero, the pbuf is deallocated.
518 * For a pbuf chain, this is repeated for each pbuf in the chain,
519 * up to a pbuf which has a non-zero reference count after
520 * decrementing. (This might de-allocate the whole chain.)
522 * @param pbuf The pbuf (chain) to be dereferenced.
524 * @return the number of pbufs that were de-allocated
525 * from the head of the chain.
527 * @note MUST NOT be called on a packet queue.
528 * @note the reference counter of a pbuf equals the number of pointers
529 * that refer to the pbuf (or into the pbuf).
531 * @internal examples:
533 * Assuming existing chains a->b->c with the following reference
534 * counts, calling pbuf_free(a) results in:
536 * 1->2->3 becomes ...1->3
537 * 3->3->3 becomes 2->3->3
538 * 1->1->2 becomes ......1
539 * 2->1->1 becomes 1->1->1
540 * 1->1->1 becomes .......
543 u8_t
544 pbuf_free(struct pbuf *p)
546 struct pbuf *q;
547 u8_t count;
548 SYS_ARCH_DECL_PROTECT(old_level);
550 if (p == NULL) {
551 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_free(p == NULL) was called.\n"));
552 return 0;
554 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_free(%p)\n", (void *)p));
556 LWIP_ASSERT("pbuf_free: sane flags",
557 p->flags == PBUF_FLAG_RAM || p->flags == PBUF_FLAG_ROM ||
558 p->flags == PBUF_FLAG_REF || p->flags == PBUF_FLAG_POOL);
560 count = 0;
561 /* Since decrementing ref cannot be guaranteed to be a single machine operation
562 * we must protect it. Also, the later test of ref must be protected.
564 SYS_ARCH_PROTECT(old_level);
565 /* de-allocate all consecutive pbufs from the head of the chain that
566 * obtain a zero reference count after decrementing*/
567 while (p != NULL) {
568 /* all pbufs in a chain are referenced at least once */
569 LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
570 /* decrease reference count (number of pointers to pbuf) */
571 p->ref--;
572 /* this pbuf is no longer referenced to? */
573 if (p->ref == 0) {
574 /* remember next pbuf in chain for next iteration */
575 q = p->next;
576 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: deallocating %p\n", (void *)p));
577 /* is this a pbuf from the pool? */
578 if (p->flags == PBUF_FLAG_POOL) {
579 p->len = p->tot_len = PBUF_POOL_BUFSIZE;
580 p->payload = (void *)((u8_t *)p + sizeof(struct pbuf));
581 PBUF_POOL_FREE(p);
582 /* a ROM or RAM referencing pbuf */
583 } else if (p->flags == PBUF_FLAG_ROM || p->flags == PBUF_FLAG_REF) {
584 memp_free(MEMP_PBUF, p);
585 /* p->flags == PBUF_FLAG_RAM */
586 } else {
587 mem_free(p);
589 count++;
590 /* proceed to next pbuf */
591 p = q;
592 /* p->ref > 0, this pbuf is still referenced to */
593 /* (and so the remaining pbufs in chain as well) */
594 } else {
595 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: %p has ref %u, ending here.\n", (void *)p, (unsigned int)p->ref));
596 /* stop walking through chain */
597 p = NULL;
600 SYS_ARCH_UNPROTECT(old_level);
601 /* return number of de-allocated pbufs */
602 return count;
606 * Count number of pbufs in a chain
608 * @param p first pbuf of chain
609 * @return the number of pbufs in a chain
612 u8_t
613 pbuf_clen(struct pbuf *p)
615 u8_t len;
617 len = 0;
618 while (p != NULL) {
619 ++len;
620 p = p->next;
622 return len;
626 * Increment the reference count of the pbuf.
628 * @param p pbuf to increase reference counter of
631 void
632 pbuf_ref(struct pbuf *p)
634 SYS_ARCH_DECL_PROTECT(old_level);
635 /* pbuf given? */
636 if (p != NULL) {
637 SYS_ARCH_PROTECT(old_level);
638 ++(p->ref);
639 SYS_ARCH_UNPROTECT(old_level);
644 * Concatenate two pbufs (each may be a pbuf chain) and take over
645 * the caller's reference of the tail pbuf.
647 * @note The caller MAY NOT reference the tail pbuf afterwards.
648 * Use pbuf_chain() for that purpose.
650 * @see pbuf_chain()
653 void
654 pbuf_cat(struct pbuf *h, struct pbuf *t)
656 struct pbuf *p;
658 LWIP_ASSERT("h != NULL", h != NULL);
659 LWIP_ASSERT("t != NULL", t != NULL);
660 if ((h == NULL) || (t == NULL)) return;
662 /* proceed to last pbuf of chain */
663 for (p = h; p->next != NULL; p = p->next) {
664 /* add total length of second chain to all totals of first chain */
665 p->tot_len += t->tot_len;
667 /* { p is last pbuf of first h chain, p->next == NULL } */
668 LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
669 /* add total length of second chain to last pbuf total of first chain */
670 p->tot_len += t->tot_len;
671 /* chain last pbuf of head (p) with first of tail (t) */
672 p->next = t;
676 * Chain two pbufs (or pbuf chains) together.
678 * The caller MUST call pbuf_free(t) once it has stopped
679 * using it. Use pbuf_cat() instead if you no longer use t.
681 * @param h head pbuf (chain)
682 * @param t tail pbuf (chain)
683 * @note The pbufs MUST belong to the same packet.
684 * @note MAY NOT be called on a packet queue.
686 * The ->tot_len fields of all pbufs of the head chain are adjusted.
687 * The ->next field of the last pbuf of the head chain is adjusted.
688 * The ->ref field of the first pbuf of the tail chain is adjusted.
691 void
692 pbuf_chain(struct pbuf *h, struct pbuf *t)
694 pbuf_cat(h, t);
695 /* t is now referenced by h */
696 pbuf_ref(t);
697 LWIP_DEBUGF(PBUF_DEBUG | DBG_FRESH | 2, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
702 * Create PBUF_POOL (or PBUF_RAM) copies of PBUF_REF pbufs.
704 * Used to queue packets on behalf of the lwIP stack, such as
705 * ARP based queueing.
707 * Go through a pbuf chain and replace any PBUF_REF buffers
708 * with PBUF_POOL (or PBUF_RAM) pbufs, each taking a copy of
709 * the referenced data.
711 * @note You MUST explicitly use p = pbuf_take(p);
712 * The pbuf you give as argument, may have been replaced
713 * by pbuf_take()!
715 * @note Any replaced pbufs will be freed through pbuf_free().
716 * This may deallocate them if they become no longer referenced.
718 * @param p Head of pbuf chain to process
720 * @return Pointer to head of pbuf chain
722 struct pbuf *
723 pbuf_take(struct pbuf *p)
725 struct pbuf *q , *prev, *head;
726 LWIP_ASSERT("pbuf_take: p != NULL\n", p != NULL);
727 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_take(%p)\n", (void*)p));
729 prev = NULL;
730 head = p;
731 /* iterate through pbuf chain */
734 /* pbuf is of type PBUF_REF? */
735 if (p->flags == PBUF_FLAG_REF) {
736 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE, ("pbuf_take: encountered PBUF_REF %p\n", (void *)p));
737 /* allocate a pbuf (w/ payload) fully in RAM */
738 /* PBUF_POOL buffers are faster if we can use them */
739 if (p->len <= PBUF_POOL_BUFSIZE) {
740 q = pbuf_alloc(PBUF_RAW, p->len, PBUF_POOL);
741 if (q == NULL) LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_take: Could not allocate PBUF_POOL\n"));
742 } else {
743 /* no replacement pbuf yet */
744 q = NULL;
745 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_take: PBUF_POOL too small to replace PBUF_REF\n"));
747 /* no (large enough) PBUF_POOL was available? retry with PBUF_RAM */
748 if (q == NULL) {
749 q = pbuf_alloc(PBUF_RAW, p->len, PBUF_RAM);
750 if (q == NULL) LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_take: Could not allocate PBUF_RAM\n"));
752 /* replacement pbuf could be allocated? */
753 if (q != NULL)
755 /* copy p to q */
756 /* copy successor */
757 q->next = p->next;
758 /* remove linkage from original pbuf */
759 p->next = NULL;
760 /* remove linkage to original pbuf */
761 if (prev != NULL) {
762 /* prev->next == p at this point */
763 LWIP_ASSERT("prev->next == p", prev->next == p);
764 /* break chain and insert new pbuf instead */
765 prev->next = q;
766 /* prev == NULL, so we replaced the head pbuf of the chain */
767 } else {
768 head = q;
770 /* copy pbuf payload */
771 mips_memcpy(q->payload, p->payload, p->len);
772 q->tot_len = p->tot_len;
773 q->len = p->len;
774 /* in case p was the first pbuf, it is no longer refered to by
775 * our caller, as the caller MUST do p = pbuf_take(p);
776 * in case p was not the first pbuf, it is no longer refered to
777 * by prev. we can safely free the pbuf here.
778 * (note that we have set p->next to NULL already so that
779 * we will not free the rest of the chain by accident.)
781 pbuf_free(p);
782 /* do not copy ref, since someone else might be using the old buffer */
783 LWIP_DEBUGF(PBUF_DEBUG, ("pbuf_take: replaced PBUF_REF %p with %p\n", (void *)p, (void *)q));
784 p = q;
785 } else {
786 /* deallocate chain */
787 pbuf_free(head);
788 LWIP_DEBUGF(PBUF_DEBUG | 2, ("pbuf_take: failed to allocate replacement pbuf for %p\n", (void *)p));
789 return NULL;
791 /* p->flags != PBUF_FLAG_REF */
792 } else {
793 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 1, ("pbuf_take: skipping pbuf not of type PBUF_REF\n"));
795 /* remember this pbuf */
796 prev = p;
797 /* proceed to next pbuf in original chain */
798 p = p->next;
799 } while (p);
800 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 1, ("pbuf_take: end of chain reached.\n"));
802 return head;