1 /* LWIP service - pchain.c - pbuf chain utility functions */
6 * Allocate a chain of pbuf buffers as though it were a PBUF_POOL allocation,
7 * except that each buffer is of type PBUF_RAM. Return the pbuf chain on
8 * success, or NULL on memory allocation failure.
11 pchain_alloc(int layer
, size_t size
)
13 struct pbuf
*pbuf
, *phead
, **pnext
;
18 * Check for length overflow. Note that we do this before prepending
19 * the header, because otherwise we could never send a full-sized
20 * (65535-byte) IP packet. This does mean that we are generating a
21 * pbuf chain that has over 64KB worth of allocated space, but our
22 * header hiding ensures that tot_len stays under 64KB. A check in
23 * pbuf_header() prevents that later header adjustments end up lifting
24 * tot_len over this limit.
26 if (size
> UINT16_MAX
)
30 * Unfortunately, we have no choice but to replicate this block from
31 * lwIP's pbuf_alloc() code. It is however unlikely that the offsets
32 * change for the currently supported layer types, and we do not need
33 * to support any layer types that we do not use ourselves.
37 offset
= PBUF_LINK_ENCAPSULATION_HLEN
+ PBUF_LINK_HLEN
+
38 PBUF_IP_HLEN
+ PBUF_TRANSPORT_HLEN
;
41 offset
= PBUF_LINK_ENCAPSULATION_HLEN
+ PBUF_LINK_HLEN
+
45 offset
= PBUF_LINK_ENCAPSULATION_HLEN
+ PBUF_LINK_HLEN
;
48 offset
= PBUF_LINK_ENCAPSULATION_HLEN
;
54 panic("invalid pbuf layer: %d", layer
);
57 chunk
= size
+ offset
;
58 if (chunk
> MEMPOOL_BUFSIZE
)
59 chunk
= MEMPOOL_BUFSIZE
;
61 if ((phead
= pbuf_alloc(PBUF_RAW
, chunk
, PBUF_RAM
)) == NULL
)
65 util_pbuf_header(phead
, -offset
);
67 phead
->tot_len
= size
;
71 for (left
= size
- (chunk
- offset
); left
> 0; left
-= chunk
) {
72 chunk
= (left
< MEMPOOL_BUFSIZE
) ? left
: MEMPOOL_BUFSIZE
;
74 if ((pbuf
= pbuf_alloc(PBUF_RAW
, chunk
, PBUF_RAM
)) == NULL
) {
76 * Adjust tot_len to match the actual length of the
77 * chain so far, just in case pbuf_free() starts caring
78 * about this in the future.
80 for (pbuf
= phead
; pbuf
!= NULL
; pbuf
= pbuf
->next
)
81 pbuf
->tot_len
-= left
;
98 * Given the (non-empty) chain of buffers 'pbuf', return a pointer to the
99 * 'next' field of the last buffer in the chain. This function is packet queue
100 * friendly. A packet queue is a queue of packet chains, where each chain is
101 * delimited using the 'tot_len' field. As a result, while the pointer
102 * returned is never NULL, the value pointed to by the returned pointer may or
103 * may not be NULL (and will point to the next chain if not NULL). As notable
104 * exception, in cases where the buffer type is a single PBUF_REF, 'tot_len'
105 * may be zero and 'len' may be non-zero. In such cases, the chain consists of
106 * that single buffer only. This function must handle that case as well.
109 pchain_end(struct pbuf
* pbuf
)
112 assert(pbuf
!= NULL
);
114 while (pbuf
->tot_len
> pbuf
->len
) {
117 assert(pbuf
!= NULL
);
124 * Given the (non-empty) chain of buffers 'pbuf', return a byte size estimation
125 * of the memory used by the chain, rounded up to pool buffer sizes. This
126 * function is packet queue friendly.
129 pchain_size(struct pbuf
* pbuf
)
133 assert(pbuf
!= NULL
);
136 * Count the first buffer separately, as its length may be seriously
137 * off due to header hiding. While the caller should always provide
138 * exactly the same pbuf chain twice if it intends to get back the same
139 * size twice, this also protects against accidental size differences
140 * due to header hiding in that case.
142 size
= MEMPOOL_BUFSIZE
;
145 * Round up the size of the rest of the chain to whole buffers.
147 if (pbuf
->tot_len
> pbuf
->len
) {
148 size
+= pbuf
->tot_len
- pbuf
->len
+ MEMPOOL_BUFSIZE
- 1;
150 size
-= size
% MEMPOOL_BUFSIZE
;