2 * Wireshark memory management and garbage collection functions
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
34 #ifdef HAVE_SYS_TIME_H
44 #include "app_mem_usage.h"
46 #include "exceptions.h"
48 #include "wmem/wmem.h"
51 #include <windows.h> /* VirtualAlloc, VirtualProtect */
52 #include <process.h> /* getpid */
55 /* Print out statistics about our memory allocations? */
56 /*#define SHOW_EMEM_STATS*/
58 /* Do we want to use guardpages? if available */
59 #define WANT_GUARD_PAGES 1
61 #ifdef WANT_GUARD_PAGES
62 /* Add guard pages at each end of our allocated memory */
64 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
67 #ifdef HAVE_SYS_TYPES_H
68 #include <sys/types.h>
69 #endif /* HAVE_SYS_TYPES_H */
73 #if defined(MAP_ANONYMOUS)
74 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
75 #elif defined(MAP_ANON)
76 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
78 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
80 #endif /* defined(MAP_ANONYMOUS) */
84 static int dev_zero_fd
;
85 #define ANON_FD dev_zero_fd
88 #endif /* NEED_DEV_ZERO */
90 #define USE_GUARD_PAGES 1
91 #endif /* defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H) */
92 #endif /* WANT_GUARD_PAGES */
94 /* When required, allocate more memory from the OS in this size chunks */
95 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
97 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
98 * allow future allocations to be 4- or 8-byte aligned.
99 * All but the last byte of the canary are randomly generated; the last byte is
100 * NULL to separate the canary and the pointer to the next canary.
102 * For example, if the allocation is a multiple of 8 bytes, the canary and
103 * pointer would look like:
104 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
105 * |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
106 * |c|c|c|c|c|c|c|0||p|p|p|p| (32-bit)
108 * If the allocation was, for example, 12 bytes, the canary would look like:
109 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
110 * [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
112 #define EMEM_CANARY_SIZE 8
113 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
115 typedef struct _emem_chunk_t
{
116 struct _emem_chunk_t
*next
;
119 unsigned int amount_free_init
;
120 unsigned int amount_free
;
121 unsigned int free_offset_init
;
122 unsigned int free_offset
;
126 typedef struct _emem_pool_t
{
127 emem_chunk_t
*free_list
;
128 emem_chunk_t
*used_list
;
130 emem_tree_t
*trees
; /* only used by se_mem allocator */
132 guint8 canary
[EMEM_CANARY_DATA_SIZE
];
133 void *(*memory_alloc
)(size_t size
, struct _emem_pool_t
*);
136 * Tools like Valgrind and ElectricFence don't work well with memchunks.
137 * Export the following environment variables to make {ep|se}_alloc() allocate each
138 * object individually.
140 * WIRESHARK_DEBUG_EP_NO_CHUNKS
141 * WIRESHARK_DEBUG_SE_NO_CHUNKS
143 gboolean debug_use_chunks
;
145 /* Do we want to use canaries?
146 * Export the following environment variables to disable/enable canaries
148 * WIRESHARK_DEBUG_EP_NO_CANARY
149 * For SE memory use of canary is default off as the memory overhead
151 * WIRESHARK_DEBUG_SE_USE_CANARY
153 gboolean debug_use_canary
;
155 /* Do we want to verify no one is using a pointer to an ep_ or se_
156 * allocated thing where they shouldn't be?
158 * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
161 gboolean debug_verify_pointers
;
165 static emem_pool_t ep_packet_mem
;
166 static emem_pool_t se_packet_mem
;
169 * Memory scrubbing is expensive but can be useful to ensure we don't:
170 * - use memory before initializing it
171 * - use memory after freeing it
172 * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
174 static gboolean debug_use_memory_scrubber
= FALSE
;
177 static SYSTEM_INFO sysinfo
;
178 static gboolean iswindowsplatform
;
180 #elif defined(USE_GUARD_PAGES)
181 static intptr_t pagesize
;
182 #endif /* _WIN32 / USE_GUARD_PAGES */
184 static void *emem_alloc_chunk(size_t size
, emem_pool_t
*mem
);
185 static void *emem_alloc_glib(size_t size
, emem_pool_t
*mem
);
188 * Set a canary value to be placed between memchunks.
191 emem_canary_init(guint8
*canary
)
194 static GRand
*rand_state
= NULL
;
196 if (rand_state
== NULL
) {
197 rand_state
= g_rand_new();
199 for (i
= 0; i
< EMEM_CANARY_DATA_SIZE
; i
++) {
200 canary
[i
] = (guint8
) g_rand_int_range(rand_state
, 1, 0x100);
206 emem_canary_next(guint8
*mem_canary
, guint8
*canary
, int *len
)
211 for (i
= 0; i
< EMEM_CANARY_SIZE
-1; i
++)
212 if (mem_canary
[i
] != canary
[i
])
215 for (; i
< EMEM_CANARY_DATA_SIZE
; i
++) {
216 if (canary
[i
] == '\0') {
217 memcpy(&ptr
, &canary
[i
+1], sizeof(void *));
220 *len
= i
+ 1 + (int)sizeof(void *);
224 if (mem_canary
[i
] != canary
[i
])
232 * Given an allocation size, return the amount of room needed for the canary
233 * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
237 emem_canary_pad (size_t allocation
)
241 pad
= EMEM_CANARY_SIZE
- (allocation
% EMEM_CANARY_SIZE
);
242 if (pad
< EMEM_CANARY_SIZE
)
243 pad
+= EMEM_CANARY_SIZE
;
248 /* used for debugging canaries, will block */
249 #ifdef DEBUG_INTENSE_CANARY_CHECKS
250 gboolean intense_canary_checking
= FALSE
;
252 /* used to intensivelly check ep canaries
255 ep_check_canary_integrity(const char* fmt
, ...)
258 static gchar there
[128] = {
259 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
260 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
261 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
262 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
264 emem_chunk_t
* npc
= NULL
;
266 if (! intense_canary_checking
) return;
269 g_vsnprintf(here
, sizeof(here
), fmt
, ap
);
272 for (npc
= ep_packet_mem
.free_list
; npc
!= NULL
; npc
= npc
->next
) {
273 void *canary_next
= npc
->canary_last
;
275 while (canary_next
!= NULL
) {
276 canary_next
= emem_canary_next(ep_packet_mem
.canary
, canary_next
, NULL
);
277 /* XXX, check if canary_next is inside allocated memory? */
279 if (canary_next
== (void *) -1)
280 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there
, here
);
284 g_strlcpy(there
, here
, sizeof(there
));
289 emem_init_chunk(emem_pool_t
*mem
)
291 if (mem
->debug_use_canary
)
292 emem_canary_init(mem
->canary
);
294 if (mem
->debug_use_chunks
)
295 mem
->memory_alloc
= emem_alloc_chunk
;
297 mem
->memory_alloc
= emem_alloc_glib
;
301 emem_memory_usage(const emem_pool_t
*pool
)
303 gsize total_used
= 0;
306 for (chunk
= pool
->used_list
; chunk
; chunk
= chunk
->next
)
307 total_used
+= (chunk
->amount_free_init
- chunk
->amount_free
);
309 for (chunk
= pool
->free_list
; chunk
; chunk
= chunk
->next
)
310 total_used
+= (chunk
->amount_free_init
- chunk
->amount_free
);
316 ep_memory_usage(void)
318 return emem_memory_usage(&ep_packet_mem
);
321 /* Initialize the packet-lifetime memory allocation pool.
322 * This function should be called only once when Wireshark or TShark starts
328 static const ws_mem_usage_t ep_stats
= { "EP", ep_memory_usage
, NULL
};
330 ep_packet_mem
.free_list
=NULL
;
331 ep_packet_mem
.used_list
=NULL
;
332 ep_packet_mem
.trees
=NULL
; /* not used by this allocator */
334 ep_packet_mem
.debug_use_chunks
= (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL
);
335 ep_packet_mem
.debug_use_canary
= ep_packet_mem
.debug_use_chunks
&& (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL
);
336 ep_packet_mem
.debug_verify_pointers
= (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL
);
338 #ifdef DEBUG_INTENSE_CANARY_CHECKS
339 intense_canary_checking
= (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL
);
342 emem_init_chunk(&ep_packet_mem
);
344 memory_usage_component_register(&ep_stats
);
348 se_memory_usage(void)
350 return emem_memory_usage(&se_packet_mem
);
353 /* Initialize the capture-lifetime memory allocation pool.
354 * This function should be called only once when Wireshark or TShark starts
360 static const ws_mem_usage_t se_stats
= { "SE", se_memory_usage
, NULL
};
362 se_packet_mem
.free_list
= NULL
;
363 se_packet_mem
.used_list
= NULL
;
364 se_packet_mem
.trees
= NULL
;
366 se_packet_mem
.debug_use_chunks
= (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL
);
367 se_packet_mem
.debug_use_canary
= se_packet_mem
.debug_use_chunks
&& (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL
);
368 se_packet_mem
.debug_verify_pointers
= (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL
);
370 emem_init_chunk(&se_packet_mem
);
372 memory_usage_component_register(&se_stats
);
375 /* Initialize all the allocators here.
376 * This function should be called only once when Wireshark or TShark starts
385 if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
386 debug_use_memory_scrubber
= TRUE
;
389 /* Set up our guard page info for Win32 */
390 GetSystemInfo(&sysinfo
);
391 pagesize
= sysinfo
.dwPageSize
;
393 #if (_MSC_VER >= 1800)
395 * On VS2103, GetVersionEx is deprecated. Microsoft recommend to
396 * use VerifyVersionInfo instead
399 OSVERSIONINFOEX osvi
;
400 DWORDLONG dwlConditionMask
= 0;
403 SecureZeroMemory(&osvi
, sizeof(OSVERSIONINFOEX
));
404 osvi
.dwOSVersionInfoSize
= sizeof(OSVERSIONINFOEX
);
405 osvi
.dwPlatformId
= VER_PLATFORM_WIN32_WINDOWS
;
406 VER_SET_CONDITION(dwlConditionMask
, VER_PLATFORMID
, op
);
407 iswindowsplatform
= VerifyVersionInfo(&osvi
, VER_PLATFORMID
, dwlConditionMask
);
410 /* calling GetVersionEx using the OSVERSIONINFO structure.
411 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
412 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
414 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
415 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
416 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
419 OSVERSIONINFO versinfo
;
421 SecureZeroMemory(&versinfo
, sizeof(OSVERSIONINFO
));
422 versinfo
.dwOSVersionInfoSize
= sizeof(OSVERSIONINFO
);
423 GetVersionEx(&versinfo
);
424 iswindowsplatform
= (versinfo
.dwPlatformId
== VER_PLATFORM_WIN32_WINDOWS
);
428 #elif defined(USE_GUARD_PAGES)
429 pagesize
= sysconf(_SC_PAGESIZE
);
431 fprintf(stderr
, "Warning: call to sysconf() for _SC_PAGESIZE has failed...\n");
433 dev_zero_fd
= ws_open("/dev/zero", O_RDWR
);
434 g_assert(dev_zero_fd
!= -1);
436 #endif /* _WIN32 / USE_GUARD_PAGES */
439 #ifdef SHOW_EMEM_STATS
440 #define NUM_ALLOC_DIST 10
441 static guint allocations
[NUM_ALLOC_DIST
] = { 0 };
442 static guint total_no_chunks
= 0;
445 print_alloc_stats(void)
447 guint num_chunks
= 0;
448 guint num_allocs
= 0;
449 guint total_used
= 0;
450 guint total_allocation
= 0;
451 guint used_for_canaries
= 0;
455 guint total_space_allocated_from_os
, total_space_wasted
;
456 gboolean ep_stat
=TRUE
;
458 fprintf(stderr
, "\n-------- EP allocator statistics --------\n");
459 fprintf(stderr
, "%s chunks, %s canaries, %s memory scrubber\n",
460 ep_packet_mem
.debug_use_chunks
? "Using" : "Not using",
461 ep_packet_mem
.debug_use_canary
? "using" : "not using",
462 debug_use_memory_scrubber
? "using" : "not using");
464 if (! (ep_packet_mem
.free_list
|| !ep_packet_mem
.used_list
)) {
465 fprintf(stderr
, "No memory allocated\n");
468 if (ep_packet_mem
.debug_use_chunks
&& ep_stat
) {
469 /* Nothing interesting without chunks */
470 /* Only look at the used_list since those chunks are fully
471 * used. Looking at the free list would skew our view of what
474 for (chunk
= ep_packet_mem
.used_list
; chunk
; chunk
= chunk
->next
) {
476 total_used
+= (chunk
->amount_free_init
- chunk
->amount_free
);
477 total_allocation
+= chunk
->amount_free_init
;
479 if (num_chunks
> 0) {
480 fprintf (stderr
, "\n");
481 fprintf (stderr
, "\n---- Buffer space ----\n");
482 fprintf (stderr
, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE
);
483 fprintf (stderr
, "\t* Number of chunks: %10u\n", num_chunks
);
484 fprintf (stderr
, "\t-------------------------------------------\n");
485 fprintf (stderr
, "\t= %u (%u including guard pages) total space used for buffers\n",
486 total_allocation
, EMEM_PACKET_CHUNK_SIZE
* num_chunks
);
487 fprintf (stderr
, "\t-------------------------------------------\n");
488 total_space_allocated_from_os
= total_allocation
489 + sizeof(emem_chunk_t
) * num_chunks
;
490 fprintf (stderr
, "Total allocated from OS: %u\n\n",
491 total_space_allocated_from_os
);
493 fprintf (stderr
, "No fully used chunks, nothing to do\n");
499 total_allocation
= 0;
500 used_for_canaries
= 0;
504 fprintf(stderr
, "\n-------- SE allocator statistics --------\n");
505 fprintf(stderr
, "Total number of chunk allocations %u\n",
507 fprintf(stderr
, "%s chunks, %s canaries\n",
508 se_packet_mem
.debug_use_chunks
? "Using" : "Not using",
509 se_packet_mem
.debug_use_canary
? "using" : "not using");
511 if (! (se_packet_mem
.free_list
|| !se_packet_mem
.used_list
)) {
512 fprintf(stderr
, "No memory allocated\n");
516 if (!se_packet_mem
.debug_use_chunks
)
517 return; /* Nothing interesting without chunks?? */
519 /* Only look at the used_list since those chunks are fully used.
520 * Looking at the free list would skew our view of what we have wasted.
522 for (chunk
= se_packet_mem
.used_list
; chunk
; chunk
= chunk
->next
) {
524 total_used
+= (chunk
->amount_free_init
- chunk
->amount_free
);
525 total_allocation
+= chunk
->amount_free_init
;
527 if (se_packet_mem
.debug_use_canary
){
528 void *ptr
= chunk
->canary_last
;
531 while (ptr
!= NULL
) {
532 ptr
= emem_canary_next(se_packet_mem
.canary
, (guint8
*)ptr
, &len
);
534 if (ptr
== (void *) -1)
535 g_error("Memory corrupted");
536 used_for_canaries
+= len
;
541 if (num_chunks
== 0) {
543 fprintf (stderr
, "No fully used chunks, nothing to do\n");
547 fprintf (stderr
, "\n");
548 fprintf (stderr
, "---------- Allocations from the OS ----------\n");
549 fprintf (stderr
, "---- Headers ----\n");
550 fprintf (stderr
, "\t( Chunk header size: %10lu\n",
551 sizeof(emem_chunk_t
));
552 fprintf (stderr
, "\t* Number of chunks: %10u\n", num_chunks
);
553 fprintf (stderr
, "\t-------------------------------------------\n");
555 total_headers
= sizeof(emem_chunk_t
) * num_chunks
;
556 fprintf (stderr
, "\t= %u bytes used for headers\n", total_headers
);
557 fprintf (stderr
, "\n---- Buffer space ----\n");
558 fprintf (stderr
, "\tChunk allocation size: %10u\n",
559 EMEM_PACKET_CHUNK_SIZE
);
560 fprintf (stderr
, "\t* Number of chunks: %10u\n", num_chunks
);
561 fprintf (stderr
, "\t-------------------------------------------\n");
562 fprintf (stderr
, "\t= %u (%u including guard pages) bytes used for buffers\n",
563 total_allocation
, EMEM_PACKET_CHUNK_SIZE
* num_chunks
);
564 fprintf (stderr
, "\t-------------------------------------------\n");
565 total_space_allocated_from_os
= (EMEM_PACKET_CHUNK_SIZE
* num_chunks
)
567 fprintf (stderr
, "Total bytes allocated from the OS: %u\n\n",
568 total_space_allocated_from_os
);
570 for (i
= 0; i
< NUM_ALLOC_DIST
; i
++)
571 num_allocs
+= allocations
[i
];
573 fprintf (stderr
, "---------- Allocations from the SE pool ----------\n");
574 fprintf (stderr
, " Number of SE allocations: %10u\n",
576 fprintf (stderr
, " Bytes used (incl. canaries): %10u\n",
578 fprintf (stderr
, " Bytes used for canaries: %10u\n",
580 fprintf (stderr
, "Bytes unused (wasted, excl. guard pages): %10u\n",
581 total_allocation
- total_used
);
582 fprintf (stderr
, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
583 total_space_allocated_from_os
- total_used
);
585 fprintf (stderr
, "---------- Statistics ----------\n");
586 fprintf (stderr
, "Average SE allocation size (incl. canaries): %6.2f\n",
587 (float)total_used
/(float)num_allocs
);
588 fprintf (stderr
, "Average SE allocation size (excl. canaries): %6.2f\n",
589 (float)(total_used
- used_for_canaries
)/(float)num_allocs
);
590 fprintf (stderr
, " Average wasted bytes per allocation: %6.2f\n",
591 (total_allocation
- total_used
)/(float)num_allocs
);
592 total_space_wasted
= (total_allocation
- total_used
)
593 + (sizeof(emem_chunk_t
));
594 fprintf (stderr
, " Space used for headers + unused allocation: %8u\n",
596 fprintf (stderr
, "--> %% overhead/waste: %4.2f\n",
597 100 * (float)total_space_wasted
/(float)total_space_allocated_from_os
);
599 fprintf (stderr
, "\nAllocation distribution (sizes include canaries):\n");
600 for (i
= 0; i
< (NUM_ALLOC_DIST
-1); i
++)
601 fprintf (stderr
, "size < %5d: %8u\n", 32<<i
, allocations
[i
]);
602 fprintf (stderr
, "size > %5d: %8u\n", 32<<i
, allocations
[i
]);
607 emem_verify_pointer_list(const emem_chunk_t
*chunk_list
, const void *ptr
)
609 const gchar
*cptr
= (gchar
*)ptr
;
610 const emem_chunk_t
*chunk
;
612 for (chunk
= chunk_list
; chunk
; chunk
= chunk
->next
) {
613 if (cptr
>= (chunk
->buf
+ chunk
->free_offset_init
) && cptr
< (chunk
->buf
+ chunk
->free_offset
))
620 emem_verify_pointer(const emem_pool_t
*hdr
, const void *ptr
)
622 return emem_verify_pointer_list(hdr
->free_list
, ptr
) || emem_verify_pointer_list(hdr
->used_list
, ptr
);
626 ep_verify_pointer(const void *ptr
)
628 if (ep_packet_mem
.debug_verify_pointers
)
629 return emem_verify_pointer(&ep_packet_mem
, ptr
);
635 se_verify_pointer(const void *ptr
)
637 if (se_packet_mem
.debug_verify_pointers
)
638 return emem_verify_pointer(&se_packet_mem
, ptr
);
644 emem_scrub_memory(char *buf
, size_t size
, gboolean alloc
)
646 guint scrubbed_value
;
649 if (!debug_use_memory_scrubber
)
652 if (alloc
) /* this memory is being allocated */
653 scrubbed_value
= 0xBADDCAFE;
654 else /* this memory is being freed */
655 scrubbed_value
= 0xDEADBEEF;
657 /* We shouldn't need to check the alignment of the starting address
658 * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
662 /* XXX - if the above is *NOT* true, we should use memcpy here,
663 * in order to avoid problems on alignment-sensitive platforms, e.g.
664 * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
667 for (offset
= 0; offset
+ sizeof(guint
) <= size
; offset
+= sizeof(guint
))
668 *(guint
*)(void*)(buf
+offset
) = scrubbed_value
;
670 /* Initialize the last bytes, if any */
672 *(guint8
*)(buf
+offset
) = scrubbed_value
>> 24;
675 *(guint8
*)(buf
+offset
) = (scrubbed_value
>> 16) & 0xFF;
678 *(guint8
*)(buf
+offset
) = (scrubbed_value
>> 8) & 0xFF;
686 static emem_chunk_t
*
687 emem_create_chunk(size_t size
)
691 npc
= g_new(emem_chunk_t
, 1);
693 npc
->canary_last
= NULL
;
697 * MSDN documents VirtualAlloc/VirtualProtect at
698 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
701 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
702 npc
->buf
= (char *)VirtualAlloc(NULL
, size
,
703 MEM_COMMIT
|MEM_RESERVE
, PAGE_READWRITE
);
705 if (npc
->buf
== NULL
) {
707 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
710 THROW(OutOfMemoryError
);
713 #elif defined(USE_GUARD_PAGES)
714 npc
->buf
= (char *)mmap(NULL
, size
,
715 PROT_READ
|PROT_WRITE
, ANON_PAGE_MODE
, ANON_FD
, 0);
717 if (npc
->buf
== MAP_FAILED
) {
719 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
722 THROW(OutOfMemoryError
);
725 #else /* Is there a draft in here? */
726 npc
->buf
= g_malloc(size
);
727 /* g_malloc() can't fail */
730 #ifdef SHOW_EMEM_STATS
734 npc
->amount_free
= npc
->amount_free_init
= (unsigned int) size
;
735 npc
->free_offset
= npc
->free_offset_init
= 0;
739 static emem_chunk_t
*
740 emem_create_chunk_gp(size_t size
)
744 char *buf_end
, *prot1
, *prot2
;
746 #elif defined(USE_GUARD_PAGES)
748 char *buf_end
, *prot1
, *prot2
;
749 #endif /* _WIN32 / USE_GUARD_PAGES */
752 npc
= emem_create_chunk(size
);
755 buf_end
= npc
->buf
+ size
;
757 /* Align our guard pages on page-sized boundaries */
758 prot1
= (char *) ((((intptr_t) npc
->buf
+ pagesize
- 1) / pagesize
) * pagesize
);
759 prot2
= (char *) ((((intptr_t) buf_end
- (1 * pagesize
)) / pagesize
) * pagesize
);
761 ret
= VirtualProtect(prot1
, pagesize
, PAGE_NOACCESS
, &oldprot
);
762 g_assert(ret
!= 0 || iswindowsplatform
);
763 ret
= VirtualProtect(prot2
, pagesize
, PAGE_NOACCESS
, &oldprot
);
764 g_assert(ret
!= 0 || iswindowsplatform
);
766 npc
->amount_free_init
= (unsigned int) (prot2
- prot1
- pagesize
);
767 npc
->free_offset_init
= (unsigned int) (prot1
- npc
->buf
) + pagesize
;
768 #elif defined(USE_GUARD_PAGES)
769 buf_end
= npc
->buf
+ size
;
771 /* Align our guard pages on page-sized boundaries */
772 prot1
= (char *) ((((intptr_t) npc
->buf
+ pagesize
- 1) / pagesize
) * pagesize
);
773 prot2
= (char *) ((((intptr_t) buf_end
- (1 * pagesize
)) / pagesize
) * pagesize
);
775 ret
= mprotect(prot1
, pagesize
, PROT_NONE
);
777 ret
= mprotect(prot2
, pagesize
, PROT_NONE
);
780 npc
->amount_free_init
= (unsigned int)(prot2
- prot1
- pagesize
);
781 npc
->free_offset_init
= (unsigned int)((prot1
- npc
->buf
) + pagesize
);
783 npc
->amount_free_init
= size
;
784 npc
->free_offset_init
= 0;
785 #endif /* USE_GUARD_PAGES */
787 npc
->amount_free
= npc
->amount_free_init
;
788 npc
->free_offset
= npc
->free_offset_init
;
793 emem_alloc_chunk(size_t size
, emem_pool_t
*mem
)
798 gboolean use_canary
= mem
->debug_use_canary
;
800 emem_chunk_t
*free_list
;
802 /* Allocate room for at least 8 bytes of canary plus some padding
803 * so the canary ends on an 8-byte boundary.
804 * But first add the room needed for the pointer to the next canary
805 * (so the entire allocation will end on an 8-byte boundary).
808 asize
+= sizeof(void *);
809 pad
= emem_canary_pad(asize
);
811 pad
= (WS_MEM_ALIGN
- (asize
& (WS_MEM_ALIGN
-1))) & (WS_MEM_ALIGN
-1);
815 #ifdef SHOW_EMEM_STATS
816 /* Do this check here so we can include the canary size */
817 if (mem
== &se_packet_mem
) {
822 else if (asize
< 128)
824 else if (asize
< 256)
826 else if (asize
< 512)
828 else if (asize
< 1024)
830 else if (asize
< 2048)
832 else if (asize
< 4096)
834 else if (asize
< 8192)
836 else if (asize
< 16384)
839 allocations
[(NUM_ALLOC_DIST
-1)]++;
843 /* make sure we dont try to allocate too much (arbitrary limit) */
844 DISSECTOR_ASSERT(size
<(EMEM_PACKET_CHUNK_SIZE
>>2));
847 mem
->free_list
= emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE
);
849 /* oops, we need to allocate more memory to serve this request
850 * than we have free. move this node to the used list and try again
852 if(asize
> mem
->free_list
->amount_free
) {
855 mem
->free_list
=mem
->free_list
->next
;
856 npc
->next
=mem
->used_list
;
860 mem
->free_list
= emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE
);
863 free_list
= mem
->free_list
;
865 buf
= free_list
->buf
+ free_list
->free_offset
;
867 free_list
->amount_free
-= (unsigned int) asize
;
868 free_list
->free_offset
+= (unsigned int) asize
;
871 char *cptr
= (char *)buf
+ size
;
873 memcpy(cptr
, mem
->canary
, pad
-1);
875 memcpy(cptr
+ pad
, &free_list
->canary_last
, sizeof(void *));
877 free_list
->canary_last
= cptr
;
884 emem_alloc_glib(size_t size
, emem_pool_t
*mem
)
888 npc
=g_new(emem_chunk_t
, 1);
889 npc
->next
=mem
->used_list
;
890 npc
->buf
=(char *)g_malloc(size
);
891 npc
->canary_last
= NULL
;
893 /* There's no padding/alignment involved (from our point of view) when
894 * we fetch the memory directly from the system pool, so WYSIWYG */
895 npc
->amount_free
= npc
->free_offset_init
= 0;
896 npc
->free_offset
= npc
->amount_free_init
= (unsigned int) size
;
901 /* allocate 'size' amount of memory. */
903 emem_alloc(size_t size
, emem_pool_t
*mem
)
908 /* For testing wmem, effectively redirects most emem memory to wmem.
909 * You will also have to comment out several assertions in wmem_core.c,
910 * specifically anything g_assert(allocator->in_scope), since it is much
911 * stricter about when it is permitted to be called. */
912 if (mem
== &ep_packet_mem
) {
913 return wmem_alloc(wmem_packet_scope(), size
);
915 else if (mem
== &se_packet_mem
) {
916 return wmem_alloc(wmem_file_scope(), size
);
920 buf
= mem
->memory_alloc(size
, mem
);
922 /* XXX - this is a waste of time if the allocator function is going to
923 * memset this straight back to 0.
925 emem_scrub_memory((char *)buf
, size
, TRUE
);
930 /* allocate 'size' amount of memory with an allocation lifetime until the
934 ep_alloc(size_t size
)
936 return emem_alloc(size
, &ep_packet_mem
);
939 /* allocate 'size' amount of memory with an allocation lifetime until the
943 se_alloc(size_t size
)
945 return emem_alloc(size
, &se_packet_mem
);
949 ep_alloc0(size_t size
)
951 return memset(ep_alloc(size
),'\0',size
);
955 se_alloc0(size_t size
)
957 return memset(se_alloc(size
),'\0',size
);
961 emem_strdup(const gchar
*src
, void *allocator(size_t))
966 /* If str is NULL, just return the string "<NULL>" so that the callers don't
967 * have to bother checking it.
972 len
= (guint
) strlen(src
);
973 dst
= (gchar
*)memcpy(allocator(len
+1), src
, len
+1);
979 ep_strdup(const gchar
*src
)
981 return emem_strdup(src
, ep_alloc
);
985 se_strdup(const gchar
*src
)
987 return emem_strdup(src
, se_alloc
);
991 emem_strndup(const gchar
*src
, size_t len
, void *allocator(size_t))
993 gchar
*dst
= (gchar
*)allocator(len
+1);
996 for (i
= 0; (i
< len
) && src
[i
]; i
++)
1005 ep_strndup(const gchar
*src
, size_t len
)
1007 return emem_strndup(src
, len
, ep_alloc
);
1011 se_strndup(const gchar
*src
, size_t len
)
1013 return emem_strndup(src
, len
, se_alloc
);
1019 ep_memdup(const void* src
, size_t len
)
1021 return memcpy(ep_alloc(len
), src
, len
);
1025 se_memdup(const void* src
, size_t len
)
1027 return memcpy(se_alloc(len
), src
, len
);
1031 emem_strdup_vprintf(const gchar
*fmt
, va_list ap
, void *allocator(size_t))
1039 len
= g_printf_string_upper_bound(fmt
, ap
);
1041 dst
= (gchar
*)allocator(len
+1);
1042 g_vsnprintf (dst
, (gulong
) len
, fmt
, ap2
);
1049 ep_strdup_vprintf(const gchar
*fmt
, va_list ap
)
1051 return emem_strdup_vprintf(fmt
, ap
, ep_alloc
);
1055 se_strdup_vprintf(const gchar
* fmt
, va_list ap
)
1057 return emem_strdup_vprintf(fmt
, ap
, se_alloc
);
1061 ep_strdup_printf(const gchar
*fmt
, ...)
1067 dst
= ep_strdup_vprintf(fmt
, ap
);
1073 se_strdup_printf(const gchar
*fmt
, ...)
1079 dst
= se_strdup_vprintf(fmt
, ap
);
1085 ep_strsplit(const gchar
* string
, const gchar
* sep
, int max_tokens
)
1094 enum { AT_START
, IN_PAD
, IN_TOKEN
} state
;
1102 s
= splitted
= ep_strdup(string
);
1103 str_len
= (guint
) strlen(splitted
);
1104 sep_len
= (guint
) strlen(sep
);
1106 if (max_tokens
< 1) max_tokens
= INT_MAX
;
1111 while (tokens
<= (guint
)max_tokens
&& ( s
= strstr(s
,sep
) )) {
1114 for(i
=0; i
< sep_len
; i
++ )
1121 vec
= ep_alloc_array(gchar
*,tokens
+1);
1124 for (i
=0; i
< str_len
; i
++) {
1127 switch(splitted
[i
]) {
1132 vec
[curr_tok
] = &(splitted
[i
]);
1138 switch(splitted
[i
]) {
1145 switch(splitted
[i
]) {
1147 vec
[curr_tok
] = &(splitted
[i
]);
1156 vec
[curr_tok
] = NULL
;
1162 ep_strconcat(const gchar
*string1
, ...)
1173 l
= 1 + strlen(string1
);
1174 va_start(args
, string1
);
1175 s
= va_arg(args
, gchar
*);
1178 s
= va_arg(args
, gchar
*);
1182 concat
= (gchar
*)ep_alloc(l
);
1185 ptr
= g_stpcpy(ptr
, string1
);
1186 va_start(args
, string1
);
1187 s
= va_arg(args
, gchar
*);
1189 ptr
= g_stpcpy(ptr
, s
);
1190 s
= va_arg(args
, gchar
*);
1199 /* release all allocated memory back to the pool. */
1201 emem_free_all(emem_pool_t
*mem
)
1203 gboolean use_chunks
= mem
->debug_use_chunks
;
1206 emem_tree_t
*tree_list
;
1208 /* move all used chunks over to the free list */
1209 while(mem
->used_list
){
1211 mem
->used_list
=mem
->used_list
->next
;
1212 npc
->next
=mem
->free_list
;
1216 /* clear them all out */
1217 npc
= mem
->free_list
;
1218 while (npc
!= NULL
) {
1220 while (npc
->canary_last
!= NULL
) {
1221 npc
->canary_last
= emem_canary_next(mem
->canary
, (guint8
*)npc
->canary_last
, NULL
);
1222 /* XXX, check if canary_last is inside allocated memory? */
1224 if (npc
->canary_last
== (void *) -1)
1225 g_error("Memory corrupted");
1228 emem_scrub_memory((npc
->buf
+ npc
->free_offset_init
),
1229 (npc
->free_offset
- npc
->free_offset_init
),
1232 npc
->amount_free
= npc
->amount_free_init
;
1233 npc
->free_offset
= npc
->free_offset_init
;
1236 emem_chunk_t
*next
= npc
->next
;
1238 emem_scrub_memory(npc
->buf
, npc
->amount_free_init
, FALSE
);
1247 /* We've freed all this memory already */
1248 mem
->free_list
= NULL
;
1251 /* release/reset all allocated trees */
1252 for(tree_list
=mem
->trees
;tree_list
;tree_list
=tree_list
->next
){
1253 tree_list
->tree
=NULL
;
1257 /* release all allocated memory back to the pool. */
1261 emem_free_all(&ep_packet_mem
);
1264 /* release all allocated memory back to the pool. */
1268 #ifdef SHOW_EMEM_STATS
1269 print_alloc_stats();
1272 emem_free_all(&se_packet_mem
);
1276 ep_stack_new(void) {
1277 ep_stack_t s
= ep_new(struct _ep_stack_frame_t
*);
1278 *s
= ep_new0(struct _ep_stack_frame_t
);
1282 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
1283 of allocating new ones.
1287 ep_stack_push(ep_stack_t stack
, void* data
)
1289 struct _ep_stack_frame_t
* frame
;
1290 struct _ep_stack_frame_t
* head
= (*stack
);
1293 frame
= head
->above
;
1295 frame
= ep_new(struct _ep_stack_frame_t
);
1296 head
->above
= frame
;
1297 frame
->below
= head
;
1298 frame
->above
= NULL
;
1301 frame
->payload
= data
;
1308 ep_stack_pop(ep_stack_t stack
)
1311 if ((*stack
)->below
) {
1312 (*stack
) = (*stack
)->below
;
1313 return (*stack
)->above
->payload
;
1320 se_tree_create(int type
, const char *name
)
1322 emem_tree_t
*tree_list
;
1324 tree_list
=(emem_tree_t
*)g_malloc(sizeof(emem_tree_t
));
1325 tree_list
->next
=se_packet_mem
.trees
;
1326 tree_list
->type
=type
;
1327 tree_list
->tree
=NULL
;
1328 tree_list
->name
=name
;
1329 tree_list
->malloc
=se_alloc
;
1330 se_packet_mem
.trees
=tree_list
;
1336 emem_tree_lookup32(emem_tree_t
*se_tree
, guint32 key
)
1338 emem_tree_node_t
*node
;
1343 if(key
==node
->key32
){
1346 if(key
<node
->key32
){
1350 if(key
>node
->key32
){
1359 emem_tree_lookup32_le(emem_tree_t
*se_tree
, guint32 key
)
1361 emem_tree_node_t
*node
;
1371 if(key
==node
->key32
){
1374 if(key
<node
->key32
){
1382 if(key
>node
->key32
){
1397 /* If we are still at the root of the tree this means that this node
1398 * is either smaller than the search key and then we return this
1399 * node or else there is no smaller key available and then
1403 if(key
>node
->key32
){
1410 if(node
->parent
->left
==node
){
1413 if(key
>node
->key32
){
1414 /* if this is a left child and its key is smaller than
1415 * the search key, then this is the node we want.
1419 /* if this is a left child and its key is bigger than
1420 * the search key, we have to check if any
1421 * of our ancestors are smaller than the search key.
1424 if(key
>node
->key32
){
1434 if(node
->key32
<key
){
1435 /* if this is the right child and its key is smaller
1436 * than the search key then this is the one we want.
1440 /* if this is the right child and its key is larger
1441 * than the search key then our parent is the one we
1444 return node
->parent
->data
;
1451 static inline emem_tree_node_t
*
1452 emem_tree_parent(emem_tree_node_t
*node
)
1454 return node
->parent
;
1457 static inline emem_tree_node_t
*
1458 emem_tree_grandparent(emem_tree_node_t
*node
)
1460 emem_tree_node_t
*parent
;
1462 parent
=emem_tree_parent(node
);
1464 return parent
->parent
;
1469 static inline emem_tree_node_t
*
1470 emem_tree_uncle(emem_tree_node_t
*node
)
1472 emem_tree_node_t
*parent
, *grandparent
;
1474 parent
=emem_tree_parent(node
);
1478 grandparent
=emem_tree_parent(parent
);
1482 if(parent
==grandparent
->left
){
1483 return grandparent
->right
;
1485 return grandparent
->left
;
1488 static inline void rb_insert_case1(emem_tree_t
*se_tree
, emem_tree_node_t
*node
);
1489 static inline void rb_insert_case2(emem_tree_t
*se_tree
, emem_tree_node_t
*node
);
1492 rotate_left(emem_tree_t
*se_tree
, emem_tree_node_t
*node
)
1495 if(node
->parent
->left
==node
){
1496 node
->parent
->left
=node
->right
;
1498 node
->parent
->right
=node
->right
;
1501 se_tree
->tree
=node
->right
;
1503 node
->right
->parent
=node
->parent
;
1504 node
->parent
=node
->right
;
1505 node
->right
=node
->right
->left
;
1507 node
->right
->parent
=node
;
1509 node
->parent
->left
=node
;
1513 rotate_right(emem_tree_t
*se_tree
, emem_tree_node_t
*node
)
1516 if(node
->parent
->left
==node
){
1517 node
->parent
->left
=node
->left
;
1519 node
->parent
->right
=node
->left
;
1522 se_tree
->tree
=node
->left
;
1524 node
->left
->parent
=node
->parent
;
1525 node
->parent
=node
->left
;
1526 node
->left
=node
->left
->right
;
1528 node
->left
->parent
=node
;
1530 node
->parent
->right
=node
;
1534 rb_insert_case5(emem_tree_t
*se_tree
, emem_tree_node_t
*node
)
1536 emem_tree_node_t
*grandparent
;
1537 emem_tree_node_t
*parent
;
1539 parent
=emem_tree_parent(node
);
1540 grandparent
=emem_tree_parent(parent
);
1541 parent
->u
.rb_color
=EMEM_TREE_RB_COLOR_BLACK
;
1542 grandparent
->u
.rb_color
=EMEM_TREE_RB_COLOR_RED
;
1543 if( (node
==parent
->left
) && (parent
==grandparent
->left
) ){
1544 rotate_right(se_tree
, grandparent
);
1546 rotate_left(se_tree
, grandparent
);
1551 rb_insert_case4(emem_tree_t
*se_tree
, emem_tree_node_t
*node
)
1553 emem_tree_node_t
*grandparent
;
1554 emem_tree_node_t
*parent
;
1556 parent
=emem_tree_parent(node
);
1557 grandparent
=emem_tree_parent(parent
);
1561 if( (node
==parent
->right
) && (parent
==grandparent
->left
) ){
1562 rotate_left(se_tree
, parent
);
1564 } else if( (node
==parent
->left
) && (parent
==grandparent
->right
) ){
1565 rotate_right(se_tree
, parent
);
1568 rb_insert_case5(se_tree
, node
);
1572 rb_insert_case3(emem_tree_t
*se_tree
, emem_tree_node_t
*node
)
1574 emem_tree_node_t
*grandparent
;
1575 emem_tree_node_t
*parent
;
1576 emem_tree_node_t
*uncle
;
1578 uncle
=emem_tree_uncle(node
);
1579 if(uncle
&& (uncle
->u
.rb_color
==EMEM_TREE_RB_COLOR_RED
)){
1580 parent
=emem_tree_parent(node
);
1581 parent
->u
.rb_color
=EMEM_TREE_RB_COLOR_BLACK
;
1582 uncle
->u
.rb_color
=EMEM_TREE_RB_COLOR_BLACK
;
1583 grandparent
=emem_tree_grandparent(node
);
1584 grandparent
->u
.rb_color
=EMEM_TREE_RB_COLOR_RED
;
1585 rb_insert_case1(se_tree
, grandparent
);
1587 rb_insert_case4(se_tree
, node
);
1592 rb_insert_case2(emem_tree_t
*se_tree
, emem_tree_node_t
*node
)
1594 emem_tree_node_t
*parent
;
1596 parent
=emem_tree_parent(node
);
1597 /* parent is always non-NULL here */
1598 if(parent
->u
.rb_color
==EMEM_TREE_RB_COLOR_BLACK
){
1601 rb_insert_case3(se_tree
, node
);
1605 rb_insert_case1(emem_tree_t
*se_tree
, emem_tree_node_t
*node
)
1607 emem_tree_node_t
*parent
;
1609 parent
=emem_tree_parent(node
);
1611 node
->u
.rb_color
=EMEM_TREE_RB_COLOR_BLACK
;
1614 rb_insert_case2(se_tree
, node
);
1617 /* insert a new node in the tree. if this node matches an already existing node
1618 * then just replace the data for that node */
1620 emem_tree_insert32(emem_tree_t
*se_tree
, guint32 key
, void *data
)
1622 emem_tree_node_t
*node
;
1626 /* is this the first node ?*/
1628 node
=(emem_tree_node_t
*)se_tree
->malloc(sizeof(emem_tree_node_t
));
1629 switch(se_tree
->type
){
1630 case EMEM_TREE_TYPE_RED_BLACK
:
1631 node
->u
.rb_color
=EMEM_TREE_RB_COLOR_BLACK
;
1639 node
->u
.is_subtree
= EMEM_TREE_NODE_IS_DATA
;
1644 /* it was not the new root so walk the tree until we find where to
1645 * insert this new leaf.
1648 /* this node already exists, so just replace the data pointer*/
1649 if(key
==node
->key32
){
1653 if(key
<node
->key32
) {
1655 /* new node to the left */
1656 emem_tree_node_t
*new_node
;
1657 new_node
=(emem_tree_node_t
*)se_tree
->malloc(sizeof(emem_tree_node_t
));
1658 node
->left
=new_node
;
1659 new_node
->parent
=node
;
1660 new_node
->left
=NULL
;
1661 new_node
->right
=NULL
;
1662 new_node
->key32
=key
;
1663 new_node
->data
=data
;
1664 new_node
->u
.is_subtree
=EMEM_TREE_NODE_IS_DATA
;
1671 if(key
>node
->key32
) {
1673 /* new node to the right */
1674 emem_tree_node_t
*new_node
;
1675 new_node
=(emem_tree_node_t
*)se_tree
->malloc(sizeof(emem_tree_node_t
));
1676 node
->right
=new_node
;
1677 new_node
->parent
=node
;
1678 new_node
->left
=NULL
;
1679 new_node
->right
=NULL
;
1680 new_node
->key32
=key
;
1681 new_node
->data
=data
;
1682 new_node
->u
.is_subtree
=EMEM_TREE_NODE_IS_DATA
;
1691 /* node will now point to the newly created node */
1692 switch(se_tree
->type
){
1693 case EMEM_TREE_TYPE_RED_BLACK
:
1694 node
->u
.rb_color
=EMEM_TREE_RB_COLOR_RED
;
1695 rb_insert_case1(se_tree
, node
);
1701 lookup_or_insert32(emem_tree_t
*se_tree
, guint32 key
, void*(*func
)(void*),void* ud
, int is_subtree
)
1703 emem_tree_node_t
*node
;
1707 /* is this the first node ?*/
1709 node
=(emem_tree_node_t
*)se_tree
->malloc(sizeof(emem_tree_node_t
));
1710 switch(se_tree
->type
){
1711 case EMEM_TREE_TYPE_RED_BLACK
:
1712 node
->u
.rb_color
=EMEM_TREE_RB_COLOR_BLACK
;
1719 node
->data
= func(ud
);
1720 node
->u
.is_subtree
= is_subtree
;
1725 /* it was not the new root so walk the tree until we find where to
1726 * insert this new leaf.
1729 /* this node already exists, so just return the data pointer*/
1730 if(key
==node
->key32
){
1733 if(key
<node
->key32
) {
1735 /* new node to the left */
1736 emem_tree_node_t
*new_node
;
1737 new_node
=(emem_tree_node_t
*)se_tree
->malloc(sizeof(emem_tree_node_t
));
1738 node
->left
=new_node
;
1739 new_node
->parent
=node
;
1740 new_node
->left
=NULL
;
1741 new_node
->right
=NULL
;
1742 new_node
->key32
=key
;
1743 new_node
->data
= func(ud
);
1744 new_node
->u
.is_subtree
= is_subtree
;
1751 if(key
>node
->key32
) {
1753 /* new node to the right */
1754 emem_tree_node_t
*new_node
;
1755 new_node
=(emem_tree_node_t
*)se_tree
->malloc(sizeof(emem_tree_node_t
));
1756 node
->right
=new_node
;
1757 new_node
->parent
=node
;
1758 new_node
->left
=NULL
;
1759 new_node
->right
=NULL
;
1760 new_node
->key32
=key
;
1761 new_node
->data
= func(ud
);
1762 new_node
->u
.is_subtree
= is_subtree
;
1771 /* node will now point to the newly created node */
1772 switch(se_tree
->type
){
1773 case EMEM_TREE_TYPE_RED_BLACK
:
1774 node
->u
.rb_color
=EMEM_TREE_RB_COLOR_RED
;
1775 rb_insert_case1(se_tree
, node
);
1782 /* create another (sub)tree using the same memory allocation scope
1783 * as the parent tree.
1785 static emem_tree_t
*
1786 emem_tree_create_subtree(emem_tree_t
*parent_tree
, const char *name
)
1788 emem_tree_t
*tree_list
;
1790 tree_list
=(emem_tree_t
*)parent_tree
->malloc(sizeof(emem_tree_t
));
1791 tree_list
->next
=NULL
;
1792 tree_list
->type
=parent_tree
->type
;
1793 tree_list
->tree
=NULL
;
1794 tree_list
->name
=name
;
1795 tree_list
->malloc
=parent_tree
->malloc
;
1801 create_sub_tree(void* d
)
1803 emem_tree_t
*se_tree
= (emem_tree_t
*)d
;
1804 return emem_tree_create_subtree(se_tree
, "subtree");
1807 /* insert a new node in the tree. if this node matches an already existing node
1808 * then just replace the data for that node */
1811 emem_tree_insert32_array(emem_tree_t
*se_tree
, emem_tree_key_t
*key
, void *data
)
1813 emem_tree_t
*insert_tree
= NULL
;
1814 emem_tree_key_t
*cur_key
;
1815 guint32 i
, insert_key32
= 0;
1817 if(!se_tree
|| !key
) return;
1819 for (cur_key
= key
; cur_key
->length
> 0; cur_key
++) {
1820 if(cur_key
->length
> 100) {
1821 DISSECTOR_ASSERT_NOT_REACHED();
1824 for (i
= 0; i
< cur_key
->length
; i
++) {
1825 /* Insert using the previous key32 */
1827 insert_tree
= se_tree
;
1829 insert_tree
= (emem_tree_t
*)lookup_or_insert32(insert_tree
, insert_key32
, create_sub_tree
, se_tree
, EMEM_TREE_NODE_IS_SUBTREE
);
1831 insert_key32
= cur_key
->key
[i
];
1836 /* We didn't get a valid key. Should we return NULL instead? */
1837 DISSECTOR_ASSERT_NOT_REACHED();
1840 emem_tree_insert32(insert_tree
, insert_key32
, data
);
1845 emem_tree_lookup32_array(emem_tree_t
*se_tree
, emem_tree_key_t
*key
)
1847 emem_tree_t
*lookup_tree
= NULL
;
1848 emem_tree_key_t
*cur_key
;
1849 guint32 i
, lookup_key32
= 0;
1851 if(!se_tree
|| !key
) return NULL
; /* prevent searching on NULL pointer */
1853 for (cur_key
= key
; cur_key
->length
> 0; cur_key
++) {
1854 if(cur_key
->length
> 100) {
1855 DISSECTOR_ASSERT_NOT_REACHED();
1858 for (i
= 0; i
< cur_key
->length
; i
++) {
1859 /* Lookup using the previous key32 */
1861 lookup_tree
= se_tree
;
1863 lookup_tree
= (emem_tree_t
*)emem_tree_lookup32(lookup_tree
, lookup_key32
);
1868 lookup_key32
= cur_key
->key
[i
];
1873 /* We didn't get a valid key. Should we return NULL instead? */
1874 DISSECTOR_ASSERT_NOT_REACHED();
1877 return emem_tree_lookup32(lookup_tree
, lookup_key32
);
1881 emem_tree_lookup32_array_le(emem_tree_t
*se_tree
, emem_tree_key_t
*key
)
1883 emem_tree_t
*lookup_tree
= NULL
;
1884 emem_tree_key_t
*cur_key
;
1885 guint32 i
, lookup_key32
= 0;
1887 if(!se_tree
|| !key
) return NULL
; /* prevent searching on NULL pointer */
1889 for (cur_key
= key
; cur_key
->length
> 0; cur_key
++) {
1890 if(cur_key
->length
> 100) {
1891 DISSECTOR_ASSERT_NOT_REACHED();
1894 for (i
= 0; i
< cur_key
->length
; i
++) {
1895 /* Lookup using the previous key32 */
1897 lookup_tree
= se_tree
;
1899 lookup_tree
= (emem_tree_t
*)emem_tree_lookup32_le(lookup_tree
, lookup_key32
);
1904 lookup_key32
= cur_key
->key
[i
];
1909 /* We didn't get a valid key. Should we return NULL instead? */
1910 DISSECTOR_ASSERT_NOT_REACHED();
1913 return emem_tree_lookup32_le(lookup_tree
, lookup_key32
);
1917 /* Strings are stored as an array of uint32 containing the string characters
1918 with 4 characters in each uint32.
1919 The first byte of the string is stored as the most significant byte.
1920 If the string is not a multiple of 4 characters in length the last
1921 uint32 containing the string bytes are padded with 0 bytes.
1922 After the uint32's containing the string, there is one final terminator
1923 uint32 with the value 0x00000001
1926 emem_tree_insert_string(emem_tree_t
* se_tree
, const gchar
* k
, void* v
, guint32 flags
)
1928 emem_tree_key_t key
[2];
1929 guint32
*aligned
=NULL
;
1930 guint32 len
= (guint32
) strlen(k
);
1931 guint32 divx
= (len
+3)/4+1;
1935 aligned
= (guint32
*)g_malloc(divx
* sizeof (guint32
));
1937 /* pack the bytes one one by one into guint32s */
1939 for (i
= 0;i
< len
;i
++) {
1942 ch
= (unsigned char)k
[i
];
1943 if (flags
& EMEM_TREE_STRING_NOCASE
) {
1955 /* add required padding to the last uint32 */
1961 aligned
[i
/4-1] = tmp
;
1964 /* add the terminator */
1965 aligned
[divx
-1] = 0x00000001;
1967 key
[0].length
= divx
;
1968 key
[0].key
= aligned
;
1973 emem_tree_insert32_array(se_tree
, key
, v
);
1978 emem_tree_lookup_string(emem_tree_t
* se_tree
, const gchar
* k
, guint32 flags
)
1980 emem_tree_key_t key
[2];
1981 guint32
*aligned
=NULL
;
1982 guint32 len
= (guint
) strlen(k
);
1983 guint32 divx
= (len
+3)/4+1;
1988 aligned
= (guint32
*)g_malloc(divx
* sizeof (guint32
));
1990 /* pack the bytes one one by one into guint32s */
1992 for (i
= 0;i
< len
;i
++) {
1995 ch
= (unsigned char)k
[i
];
1996 if (flags
& EMEM_TREE_STRING_NOCASE
) {
2008 /* add required padding to the last uint32 */
2014 aligned
[i
/4-1] = tmp
;
2017 /* add the terminator */
2018 aligned
[divx
-1] = 0x00000001;
2020 key
[0].length
= divx
;
2021 key
[0].key
= aligned
;
2026 ret
= emem_tree_lookup32_array(se_tree
, key
);
2032 emem_tree_foreach_nodes(emem_tree_node_t
* node
, tree_foreach_func callback
, void *user_data
)
2034 gboolean stop_traverse
= FALSE
;
2040 stop_traverse
= emem_tree_foreach_nodes(node
->left
, callback
, user_data
);
2041 if (stop_traverse
) {
2046 if (node
->u
.is_subtree
== EMEM_TREE_NODE_IS_SUBTREE
) {
2047 stop_traverse
= emem_tree_foreach((emem_tree_t
*)node
->data
, callback
, user_data
);
2049 stop_traverse
= callback(node
->data
, user_data
);
2052 if (stop_traverse
) {
2057 stop_traverse
= emem_tree_foreach_nodes(node
->right
, callback
, user_data
);
2058 if (stop_traverse
) {
2067 emem_tree_foreach(emem_tree_t
* emem_tree
, tree_foreach_func callback
, void *user_data
)
2072 if(!emem_tree
->tree
)
2075 return emem_tree_foreach_nodes(emem_tree
->tree
, callback
, user_data
);
2078 static void emem_print_subtree(emem_tree_t
* emem_tree
, guint32 level
);
2081 emem_tree_print_nodes(const char *prefix
, emem_tree_node_t
* node
, guint32 level
)
2088 for(i
=0;i
<level
;i
++){
2092 printf("%sNODE:%p parent:%p left:%p right:%p colour:%s key:%u %s:%p\n", prefix
,
2093 (void *)node
,(void *)(node
->parent
),(void *)(node
->left
),(void *)(node
->right
),
2094 (node
->u
.rb_color
)?"Black":"Red",(node
->key32
),(node
->u
.is_subtree
)?"tree":"data",node
->data
);
2096 emem_tree_print_nodes("L-", node
->left
, level
+1);
2098 emem_tree_print_nodes("R-", node
->right
, level
+1);
2100 if (node
->u
.is_subtree
)
2101 emem_print_subtree((emem_tree_t
*)node
->data
, level
+1);
2105 emem_print_subtree(emem_tree_t
* emem_tree
, guint32 level
)
2112 for(i
=0;i
<level
;i
++){
2116 printf("EMEM tree:%p type:%s name:%s root:%p\n",(void *)emem_tree
,(emem_tree
->type
==1)?"RedBlack":"unknown",emem_tree
->name
,(void *)(emem_tree
->tree
));
2118 emem_tree_print_nodes("Root-", emem_tree
->tree
, level
);
2122 emem_print_tree(emem_tree_t
* emem_tree
)
2124 emem_print_subtree(emem_tree
, 0);
2132 * Presumably we're using these routines for building strings for the tree.
2133 * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2136 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2137 #define MAX_STRBUF_LEN 65536
2140 next_size(gsize cur_alloc_len
, gsize wanted_alloc_len
, gsize max_alloc_len
)
2142 if (max_alloc_len
< 1 || max_alloc_len
> MAX_STRBUF_LEN
) {
2143 max_alloc_len
= MAX_STRBUF_LEN
;
2146 if (cur_alloc_len
< 1) {
2147 cur_alloc_len
= DEFAULT_STRBUF_LEN
;
2150 while (cur_alloc_len
< wanted_alloc_len
) {
2154 return cur_alloc_len
< max_alloc_len
? cur_alloc_len
: max_alloc_len
;
2158 ep_strbuf_grow(emem_strbuf_t
*strbuf
, gsize wanted_alloc_len
)
2160 gsize new_alloc_len
;
2163 if (!strbuf
|| (wanted_alloc_len
<= strbuf
->alloc_len
) || (strbuf
->alloc_len
>= strbuf
->max_alloc_len
)) {
2167 new_alloc_len
= next_size(strbuf
->alloc_len
, wanted_alloc_len
, strbuf
->max_alloc_len
);
2168 new_str
= (gchar
*)ep_alloc(new_alloc_len
);
2169 g_strlcpy(new_str
, strbuf
->str
, new_alloc_len
);
2171 strbuf
->alloc_len
= new_alloc_len
;
2172 strbuf
->str
= new_str
;
2176 ep_strbuf_sized_new(gsize alloc_len
, gsize max_alloc_len
)
2178 emem_strbuf_t
*strbuf
;
2180 strbuf
= ep_new(emem_strbuf_t
);
2182 if ((max_alloc_len
== 0) || (max_alloc_len
> MAX_STRBUF_LEN
))
2183 max_alloc_len
= MAX_STRBUF_LEN
;
2186 else if (alloc_len
> max_alloc_len
)
2187 alloc_len
= max_alloc_len
;
2189 strbuf
->str
= (char *)ep_alloc(alloc_len
);
2190 strbuf
->str
[0] = '\0';
2193 strbuf
->alloc_len
= alloc_len
;
2194 strbuf
->max_alloc_len
= max_alloc_len
;
2200 ep_strbuf_new(const gchar
*init
)
2202 emem_strbuf_t
*strbuf
;
2204 strbuf
= ep_strbuf_sized_new(next_size(0, init
?strlen(init
)+1:0, 0), 0); /* +1 for NULL terminator */
2207 full_len
= g_strlcpy(strbuf
->str
, init
, strbuf
->alloc_len
);
2208 strbuf
->len
= MIN(full_len
, strbuf
->alloc_len
-1);
2215 ep_strbuf_new_label(const gchar
*init
)
2217 emem_strbuf_t
*strbuf
;
2220 /* Be optimistic: Allocate default size strbuf string and only */
2221 /* request an increase if needed. */
2222 /* XXX: Is it reasonable to assume that much of the usage of */
2223 /* ep_strbuf_new_label will have init==NULL or */
2224 /* strlen(init) < DEFAULT_STRBUF_LEN) ??? */
2225 strbuf
= ep_strbuf_sized_new(DEFAULT_STRBUF_LEN
, ITEM_LABEL_LENGTH
);
2230 /* full_len does not count the trailing '\0'. */
2231 full_len
= g_strlcpy(strbuf
->str
, init
, strbuf
->alloc_len
);
2232 if (full_len
< strbuf
->alloc_len
) {
2233 strbuf
->len
+= full_len
;
2235 strbuf
= ep_strbuf_sized_new(full_len
+1, ITEM_LABEL_LENGTH
);
2236 full_len
= g_strlcpy(strbuf
->str
, init
, strbuf
->alloc_len
);
2237 strbuf
->len
= MIN(full_len
, strbuf
->alloc_len
-1);
2244 ep_strbuf_append(emem_strbuf_t
*strbuf
, const gchar
*str
)
2246 gsize add_len
, full_len
;
2248 if (!strbuf
|| !str
|| str
[0] == '\0') {
2252 /* Be optimistic; try the g_strlcpy first & see if enough room. */
2253 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
2254 add_len
= strbuf
->alloc_len
- strbuf
->len
;
2255 full_len
= g_strlcpy(&strbuf
->str
[strbuf
->len
], str
, add_len
);
2256 if (full_len
< add_len
) {
2257 strbuf
->len
+= full_len
;
2259 strbuf
->str
[strbuf
->len
] = '\0'; /* end string at original length again */
2260 ep_strbuf_grow(strbuf
, strbuf
->len
+ full_len
+ 1);
2261 add_len
= strbuf
->alloc_len
- strbuf
->len
;
2262 full_len
= g_strlcpy(&strbuf
->str
[strbuf
->len
], str
, add_len
);
2263 strbuf
->len
+= MIN(add_len
-1, full_len
);
2270 ep_strbuf_append_vprintf(emem_strbuf_t
*strbuf
, const gchar
*format
, va_list ap
)
2273 gsize add_len
, full_len
;
2277 /* Be optimistic; try the g_vsnprintf first & see if enough room. */
2278 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2279 add_len
= strbuf
->alloc_len
- strbuf
->len
;
2280 full_len
= g_vsnprintf(&strbuf
->str
[strbuf
->len
], (gulong
) add_len
, format
, ap
);
2281 if (full_len
< add_len
) {
2282 strbuf
->len
+= full_len
;
2284 strbuf
->str
[strbuf
->len
] = '\0'; /* end string at original length again */
2285 ep_strbuf_grow(strbuf
, strbuf
->len
+ full_len
+ 1);
2286 add_len
= strbuf
->alloc_len
- strbuf
->len
;
2287 full_len
= g_vsnprintf(&strbuf
->str
[strbuf
->len
], (gulong
) add_len
, format
, ap2
);
2288 strbuf
->len
+= MIN(add_len
-1, full_len
);
2295 ep_strbuf_append_printf(emem_strbuf_t
*strbuf
, const gchar
*format
, ...)
2299 va_start(ap
, format
);
2300 ep_strbuf_append_vprintf(strbuf
, format
, ap
);
2305 ep_strbuf_printf(emem_strbuf_t
*strbuf
, const gchar
*format
, ...)
2314 va_start(ap
, format
);
2315 ep_strbuf_append_vprintf(strbuf
, format
, ap
);
2320 ep_strbuf_append_c(emem_strbuf_t
*strbuf
, const gchar c
)
2326 /* +1 for the new character & +1 for the trailing '\0'. */
2327 if (strbuf
->alloc_len
< strbuf
->len
+ 1 + 1) {
2328 ep_strbuf_grow(strbuf
, strbuf
->len
+ 1 + 1);
2330 if (strbuf
->alloc_len
>= strbuf
->len
+ 1 + 1) {
2331 strbuf
->str
[strbuf
->len
] = c
;
2333 strbuf
->str
[strbuf
->len
] = '\0';
2340 ep_strbuf_append_unichar(emem_strbuf_t
*strbuf
, const gunichar c
)
2349 charlen
= g_unichar_to_utf8(c
, buf
);
2351 /* +charlen for the new character & +1 for the trailing '\0'. */
2352 if (strbuf
->alloc_len
< strbuf
->len
+ charlen
+ 1) {
2353 ep_strbuf_grow(strbuf
, strbuf
->len
+ charlen
+ 1);
2355 if (strbuf
->alloc_len
>= strbuf
->len
+ charlen
+ 1) {
2356 memcpy(&strbuf
->str
[strbuf
->len
], buf
, charlen
);
2357 strbuf
->len
+= charlen
;
2358 strbuf
->str
[strbuf
->len
] = '\0';
2365 ep_strbuf_truncate(emem_strbuf_t
*strbuf
, gsize len
)
2367 if (!strbuf
|| len
>= strbuf
->len
) {
2371 strbuf
->str
[len
] = '\0';
2383 * indent-tabs-mode: t
2386 * ex: set shiftwidth=8 tabstop=8 noexpandtab:
2387 * :indentSize=8:tabSize=8:noTabs=false: