3 * Dynamic memory manager
8 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
11 * Redistribution and use in source and binary forms, with or without modification,
12 * are permitted provided that the following conditions are met:
14 * 1. Redistributions of source code must retain the above copyright notice,
15 * this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
25 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
27 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
33 * This file is part of the lwIP TCP/IP stack.
35 * Author: Adam Dunkels <adam@sics.se>
41 #include "lwip/arch.h"
48 #include "lwip/stats.h"
51 mem_size_t next
, prev
;
52 #if MEM_ALIGNMENT == 1
54 #elif MEM_ALIGNMENT == 2
56 #elif MEM_ALIGNMENT == 4
58 #elif MEM_ALIGNMENT == 8
61 #error "unhandled MEM_ALIGNMENT size"
62 #endif /* MEM_ALIGNMENT */
65 static struct mem
*ram_end
;
66 static u8_t ram
[MEM_SIZE
+ sizeof(struct mem
) + MEM_ALIGNMENT
];
69 #if 0 /* this one does not align correctly for some, resulting in crashes */
70 #define SIZEOF_STRUCT_MEM (unsigned int)MEM_ALIGN_SIZE(sizeof(struct mem))
72 #define SIZEOF_STRUCT_MEM (sizeof(struct mem) + \
73 (((sizeof(struct mem) % MEM_ALIGNMENT) == 0)? 0 : \
74 (4 - (sizeof(struct mem) % MEM_ALIGNMENT))))
77 static struct mem
*lfree
; /* pointer to the lowest free block */
79 static sys_sem mem_sem
;
82 plug_holes(struct mem
*mem
)
87 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t
*)mem
>= ram
);
88 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t
*)mem
< (u8_t
*)ram_end
);
89 LWIP_ASSERT("plug_holes: mem->used == 0", mem
->used
== 0);
91 /* plug hole forward */
92 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE", mem
->next
<= MEM_SIZE
);
94 nmem
= (struct mem
*)&ram
[mem
->next
];
95 if (mem
!= nmem
&& nmem
->used
== 0 && (u8_t
*)nmem
!= (u8_t
*)ram_end
) {
99 mem
->next
= nmem
->next
;
100 ((struct mem
*)&ram
[nmem
->next
])->prev
= (u8_t
*)mem
- ram
;
103 /* plug hole backward */
104 pmem
= (struct mem
*)&ram
[mem
->prev
];
105 if (pmem
!= mem
&& pmem
->used
== 0) {
109 pmem
->next
= mem
->next
;
110 ((struct mem
*)&ram
[mem
->next
])->prev
= (u8_t
*)pmem
- ram
;
119 memset(ram
, 0, MEM_SIZE
);
120 mem
= (struct mem
*)ram
;
121 mem
->next
= MEM_SIZE
;
124 ram_end
= (struct mem
*)&ram
[MEM_SIZE
];
126 ram_end
->next
= MEM_SIZE
;
127 ram_end
->prev
= MEM_SIZE
;
129 LWP_SemInit(&mem_sem
,1,1);
131 lfree
= (struct mem
*)ram
;
134 lwip_stats
.mem
.avail
= MEM_SIZE
;
135 #endif /* MEM_STATS */
143 LWIP_DEBUGF(MEM_DEBUG
| DBG_TRACE
| 2, ("mem_free(p == NULL) was called.\n"));
147 LWP_SemWait(mem_sem
);
149 LWIP_ASSERT("mem_free: legal memory", (u8_t
*)rmem
>= (u8_t
*)ram
&&
150 (u8_t
*)rmem
< (u8_t
*)ram_end
);
152 if ((u8_t
*)rmem
< (u8_t
*)ram
|| (u8_t
*)rmem
>= (u8_t
*)ram_end
) {
153 LWIP_DEBUGF(MEM_DEBUG
| 3, ("mem_free: illegal memory\n"));
155 ++lwip_stats
.mem
.err
;
156 #endif /* MEM_STATS */
157 LWP_SemPost(mem_sem
);
160 mem
= (struct mem
*)((u8_t
*)rmem
- SIZEOF_STRUCT_MEM
);
162 LWIP_ASSERT("mem_free: mem->used", mem
->used
);
171 lwip_stats
.mem
.used
-= mem
->next
- ((u8_t
*)mem
- ram
);
173 #endif /* MEM_STATS */
175 LWP_SemPost(mem_sem
);
178 mem_reallocm(void *rmem
, mem_size_t newsize
)
181 nmem
= mem_malloc(newsize
);
183 return mem_realloc(rmem
, newsize
);
185 memcpy(nmem
, rmem
, newsize
);
191 mem_realloc(void *rmem
, mem_size_t newsize
)
194 mem_size_t ptr
, ptr2
;
195 struct mem
*mem
, *mem2
;
197 /* Expand the size of the allocated memory region so that we can
198 adjust for alignment. */
199 if ((newsize
% MEM_ALIGNMENT
) != 0) {
200 newsize
+= MEM_ALIGNMENT
- ((newsize
+ SIZEOF_STRUCT_MEM
) % MEM_ALIGNMENT
);
203 if (newsize
> MEM_SIZE
) {
207 LWP_SemWait(mem_sem
);
209 LWIP_ASSERT("mem_realloc: legal memory", (u8_t
*)rmem
>= (u8_t
*)ram
&&
210 (u8_t
*)rmem
< (u8_t
*)ram_end
);
212 if ((u8_t
*)rmem
< (u8_t
*)ram
|| (u8_t
*)rmem
>= (u8_t
*)ram_end
) {
213 LWIP_DEBUGF(MEM_DEBUG
| 3, ("mem_realloc: illegal memory\n"));
216 mem
= (struct mem
*)((u8_t
*)rmem
- SIZEOF_STRUCT_MEM
);
218 ptr
= (u8_t
*)mem
- ram
;
220 size
= mem
->next
- ptr
- SIZEOF_STRUCT_MEM
;
222 lwip_stats
.mem
.used
-= (size
- newsize
);
223 #endif /* MEM_STATS */
225 if (newsize
+ SIZEOF_STRUCT_MEM
+ MIN_SIZE
< size
) {
226 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ newsize
;
227 mem2
= (struct mem
*)&ram
[ptr2
];
229 mem2
->next
= mem
->next
;
232 if (mem2
->next
!= MEM_SIZE
) {
233 ((struct mem
*)&ram
[mem2
->next
])->prev
= ptr2
;
238 LWP_SemPost(mem_sem
);
242 mem_malloc(mem_size_t size
)
244 mem_size_t ptr
, ptr2
;
245 struct mem
*mem
, *mem2
;
251 /* Expand the size of the allocated memory region so that we can
252 adjust for alignment. */
253 if ((size
% MEM_ALIGNMENT
) != 0) {
254 size
+= MEM_ALIGNMENT
- ((size
+ SIZEOF_STRUCT_MEM
) % MEM_ALIGNMENT
);
257 if (size
> MEM_SIZE
) {
261 LWP_SemWait(mem_sem
);
263 for (ptr
= (u8_t
*)lfree
- ram
; ptr
< MEM_SIZE
; ptr
= ((struct mem
*)&ram
[ptr
])->next
) {
264 mem
= (struct mem
*)&ram
[ptr
];
266 mem
->next
- (ptr
+ SIZEOF_STRUCT_MEM
) >= size
+ SIZEOF_STRUCT_MEM
) {
267 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ size
;
268 mem2
= (struct mem
*)&ram
[ptr2
];
271 mem2
->next
= mem
->next
;
273 if (mem2
->next
!= MEM_SIZE
) {
274 ((struct mem
*)&ram
[mem2
->next
])->prev
= ptr2
;
280 lwip_stats
.mem
.used
+= (size
+ SIZEOF_STRUCT_MEM
);
281 /* if (lwip_stats.mem.max < lwip_stats.mem.used) {
282 lwip_stats.mem.max = lwip_stats.mem.used;
284 if (lwip_stats
.mem
.max
< ptr2
) {
285 lwip_stats
.mem
.max
= ptr2
;
287 #endif /* MEM_STATS */
290 /* Find next free block after mem */
291 while (lfree
->used
&& lfree
!= ram_end
) {
292 lfree
= (struct mem
*)&ram
[lfree
->next
];
294 LWIP_ASSERT("mem_malloc: !lfree->used", !lfree
->used
);
296 LWP_SemPost(mem_sem
);
297 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
298 (mem_ptr_t
)mem
+ SIZEOF_STRUCT_MEM
+ size
<= (mem_ptr_t
)ram_end
);
299 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
300 (unsigned long)((u8_t
*)mem
+ SIZEOF_STRUCT_MEM
) % MEM_ALIGNMENT
== 0);
301 return (u8_t
*)mem
+ SIZEOF_STRUCT_MEM
;
304 LWIP_DEBUGF(MEM_DEBUG
| 2, ("mem_malloc: could not allocate %"S16_F
" bytes\n", (s16_t
)size
));
306 ++lwip_stats
.mem
.err
;
307 #endif /* MEM_STATS */
308 LWP_SemPost(mem_sem
);