Witness: add pidl output
[wireshark-wip.git] / epan / emem.c
blob7238862aa2654823d8ab3c1fb4520ee640cf5427
1 /* emem.c
2 * Wireshark memory management and garbage collection functions
3 * Ronnie Sahlberg 2005
5 * $Id$
7 * Wireshark - Network traffic analyzer
8 * By Gerald Combs <gerald@wireshark.org>
9 * Copyright 1998 Gerald Combs
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 #include "config.h"
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <stdarg.h>
31 #include <ctype.h>
33 #include <time.h>
34 #ifdef HAVE_SYS_TIME_H
35 #include <sys/time.h>
36 #endif
38 #ifdef HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
42 #include <glib.h>
44 #include "app_mem_usage.h"
45 #include "proto.h"
46 #include "exceptions.h"
47 #include "emem.h"
48 #include "wmem/wmem.h"
50 #ifdef _WIN32
51 #include <windows.h> /* VirtualAlloc, VirtualProtect */
52 #include <process.h> /* getpid */
53 #endif
55 /* Print out statistics about our memory allocations? */
56 /*#define SHOW_EMEM_STATS*/
58 /* Do we want to use guardpages? if available */
59 #define WANT_GUARD_PAGES 1
61 #ifdef WANT_GUARD_PAGES
62 /* Add guard pages at each end of our allocated memory */
64 #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
65 #include <stdint.h>
67 #ifdef HAVE_SYS_TYPES_H
68 #include <sys/types.h>
69 #endif /* HAVE_SYS_TYPES_H */
71 #include <sys/mman.h>
73 #if defined(MAP_ANONYMOUS)
74 #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
75 #elif defined(MAP_ANON)
76 #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
77 #else
78 #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
79 #define NEED_DEV_ZERO
80 #endif /* defined(MAP_ANONYMOUS) */
82 #ifdef NEED_DEV_ZERO
83 #include <fcntl.h>
84 static int dev_zero_fd;
85 #define ANON_FD dev_zero_fd
86 #else
87 #define ANON_FD -1
88 #endif /* NEED_DEV_ZERO */
90 #define USE_GUARD_PAGES 1
91 #endif /* defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H) */
92 #endif /* WANT_GUARD_PAGES */
94 /* When required, allocate more memory from the OS in this size chunks */
95 #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
97 /* The canary between allocations is at least 8 bytes and up to 16 bytes to
98 * allow future allocations to be 4- or 8-byte aligned.
99 * All but the last byte of the canary are randomly generated; the last byte is
100 * NULL to separate the canary and the pointer to the next canary.
102 * For example, if the allocation is a multiple of 8 bytes, the canary and
103 * pointer would look like:
104 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
105 * |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
106 * |c|c|c|c|c|c|c|0||p|p|p|p| (32-bit)
108 * If the allocation was, for example, 12 bytes, the canary would look like:
109 * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
110 * [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
112 #define EMEM_CANARY_SIZE 8
113 #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
115 typedef struct _emem_chunk_t {
116 struct _emem_chunk_t *next;
117 char *buf;
118 size_t size;
119 unsigned int amount_free_init;
120 unsigned int amount_free;
121 unsigned int free_offset_init;
122 unsigned int free_offset;
123 void *canary_last;
124 } emem_chunk_t;
126 typedef struct _emem_pool_t {
127 emem_chunk_t *free_list;
128 emem_chunk_t *used_list;
130 emem_tree_t *trees; /* only used by se_mem allocator */
132 guint8 canary[EMEM_CANARY_DATA_SIZE];
133 void *(*memory_alloc)(size_t size, struct _emem_pool_t *);
136 * Tools like Valgrind and ElectricFence don't work well with memchunks.
137 * Export the following environment variables to make {ep|se}_alloc() allocate each
138 * object individually.
140 * WIRESHARK_DEBUG_EP_NO_CHUNKS
141 * WIRESHARK_DEBUG_SE_NO_CHUNKS
143 gboolean debug_use_chunks;
145 /* Do we want to use canaries?
146 * Export the following environment variables to disable/enable canaries
148 * WIRESHARK_DEBUG_EP_NO_CANARY
149 * For SE memory use of canary is default off as the memory overhead
150 * is considerable.
151 * WIRESHARK_DEBUG_SE_USE_CANARY
153 gboolean debug_use_canary;
155 /* Do we want to verify no one is using a pointer to an ep_ or se_
156 * allocated thing where they shouldn't be?
158 * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
159 * to turn this on.
161 gboolean debug_verify_pointers;
163 } emem_pool_t;
165 static emem_pool_t ep_packet_mem;
166 static emem_pool_t se_packet_mem;
169 * Memory scrubbing is expensive but can be useful to ensure we don't:
170 * - use memory before initializing it
171 * - use memory after freeing it
172 * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
174 static gboolean debug_use_memory_scrubber = FALSE;
176 #if defined (_WIN32)
177 static SYSTEM_INFO sysinfo;
178 static gboolean iswindowsplatform;
179 static int pagesize;
180 #elif defined(USE_GUARD_PAGES)
181 static intptr_t pagesize;
182 #endif /* _WIN32 / USE_GUARD_PAGES */
184 static void *emem_alloc_chunk(size_t size, emem_pool_t *mem);
185 static void *emem_alloc_glib(size_t size, emem_pool_t *mem);
188 * Set a canary value to be placed between memchunks.
190 static void
191 emem_canary_init(guint8 *canary)
193 int i;
194 static GRand *rand_state = NULL;
196 if (rand_state == NULL) {
197 rand_state = g_rand_new();
199 for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
200 canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
202 return;
205 static void *
206 emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
208 void *ptr;
209 int i;
211 for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
212 if (mem_canary[i] != canary[i])
213 return (void *) -1;
215 for (; i < EMEM_CANARY_DATA_SIZE; i++) {
216 if (canary[i] == '\0') {
217 memcpy(&ptr, &canary[i+1], sizeof(void *));
219 if (len)
220 *len = i + 1 + (int)sizeof(void *);
221 return ptr;
224 if (mem_canary[i] != canary[i])
225 return (void *) -1;
228 return (void *) -1;
232 * Given an allocation size, return the amount of room needed for the canary
233 * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
234 * boundary.
236 static guint8
237 emem_canary_pad (size_t allocation)
239 guint8 pad;
241 pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
242 if (pad < EMEM_CANARY_SIZE)
243 pad += EMEM_CANARY_SIZE;
245 return pad;
248 /* used for debugging canaries, will block */
249 #ifdef DEBUG_INTENSE_CANARY_CHECKS
250 gboolean intense_canary_checking = FALSE;
252 /* used to intensivelly check ep canaries
254 void
255 ep_check_canary_integrity(const char* fmt, ...)
257 va_list ap;
258 static gchar there[128] = {
259 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
260 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
261 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
262 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
263 gchar here[128];
264 emem_chunk_t* npc = NULL;
266 if (! intense_canary_checking ) return;
268 va_start(ap,fmt);
269 g_vsnprintf(here, sizeof(here), fmt, ap);
270 va_end(ap);
272 for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
273 void *canary_next = npc->canary_last;
275 while (canary_next != NULL) {
276 canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
277 /* XXX, check if canary_next is inside allocated memory? */
279 if (canary_next == (void *) -1)
280 g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
284 g_strlcpy(there, here, sizeof(there));
286 #endif
288 static void
289 emem_init_chunk(emem_pool_t *mem)
291 if (mem->debug_use_canary)
292 emem_canary_init(mem->canary);
294 if (mem->debug_use_chunks)
295 mem->memory_alloc = emem_alloc_chunk;
296 else
297 mem->memory_alloc = emem_alloc_glib;
300 static gsize
301 emem_memory_usage(const emem_pool_t *pool)
303 gsize total_used = 0;
304 emem_chunk_t *chunk;
306 for (chunk = pool->used_list; chunk; chunk = chunk->next)
307 total_used += (chunk->amount_free_init - chunk->amount_free);
309 for (chunk = pool->free_list; chunk; chunk = chunk->next)
310 total_used += (chunk->amount_free_init - chunk->amount_free);
312 return total_used;
315 static gsize
316 ep_memory_usage(void)
318 return emem_memory_usage(&ep_packet_mem);
321 /* Initialize the packet-lifetime memory allocation pool.
322 * This function should be called only once when Wireshark or TShark starts
323 * up.
325 static void
326 ep_init_chunk(void)
328 static const ws_mem_usage_t ep_stats = { "EP", ep_memory_usage, NULL };
330 ep_packet_mem.free_list=NULL;
331 ep_packet_mem.used_list=NULL;
332 ep_packet_mem.trees=NULL; /* not used by this allocator */
334 ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
335 ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
336 ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
338 #ifdef DEBUG_INTENSE_CANARY_CHECKS
339 intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
340 #endif
342 emem_init_chunk(&ep_packet_mem);
344 memory_usage_component_register(&ep_stats);
347 static gsize
348 se_memory_usage(void)
350 return emem_memory_usage(&se_packet_mem);
353 /* Initialize the capture-lifetime memory allocation pool.
354 * This function should be called only once when Wireshark or TShark starts
355 * up.
357 static void
358 se_init_chunk(void)
360 static const ws_mem_usage_t se_stats = { "SE", se_memory_usage, NULL };
362 se_packet_mem.free_list = NULL;
363 se_packet_mem.used_list = NULL;
364 se_packet_mem.trees = NULL;
366 se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
367 se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
368 se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
370 emem_init_chunk(&se_packet_mem);
372 memory_usage_component_register(&se_stats);
375 /* Initialize all the allocators here.
376 * This function should be called only once when Wireshark or TShark starts
377 * up.
379 void
380 emem_init(void)
382 ep_init_chunk();
383 se_init_chunk();
385 if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
386 debug_use_memory_scrubber = TRUE;
388 #if defined (_WIN32)
389 /* Set up our guard page info for Win32 */
390 GetSystemInfo(&sysinfo);
391 pagesize = sysinfo.dwPageSize;
393 #if (_MSC_VER >= 1800)
395 * On VS2103, GetVersionEx is deprecated. Microsoft recommend to
396 * use VerifyVersionInfo instead
399 OSVERSIONINFOEX osvi;
400 DWORDLONG dwlConditionMask = 0;
401 int op = VER_EQUAL;
403 SecureZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
404 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
405 osvi.dwPlatformId = VER_PLATFORM_WIN32_WINDOWS;
406 VER_SET_CONDITION(dwlConditionMask, VER_PLATFORMID, op);
407 iswindowsplatform = VerifyVersionInfo(&osvi, VER_PLATFORMID, dwlConditionMask);
409 #else
410 /* calling GetVersionEx using the OSVERSIONINFO structure.
411 * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
412 * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
413 * See also:
414 * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
415 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
416 * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
419 OSVERSIONINFO versinfo;
421 SecureZeroMemory(&versinfo, sizeof(OSVERSIONINFO));
422 versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
423 GetVersionEx(&versinfo);
424 iswindowsplatform = (versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
426 #endif
428 #elif defined(USE_GUARD_PAGES)
429 pagesize = sysconf(_SC_PAGESIZE);
430 if (pagesize == -1)
431 fprintf(stderr, "Warning: call to sysconf() for _SC_PAGESIZE has failed...\n");
432 #ifdef NEED_DEV_ZERO
433 dev_zero_fd = ws_open("/dev/zero", O_RDWR);
434 g_assert(dev_zero_fd != -1);
435 #endif
436 #endif /* _WIN32 / USE_GUARD_PAGES */
439 #ifdef SHOW_EMEM_STATS
440 #define NUM_ALLOC_DIST 10
441 static guint allocations[NUM_ALLOC_DIST] = { 0 };
442 static guint total_no_chunks = 0;
444 static void
445 print_alloc_stats(void)
447 guint num_chunks = 0;
448 guint num_allocs = 0;
449 guint total_used = 0;
450 guint total_allocation = 0;
451 guint used_for_canaries = 0;
452 guint total_headers;
453 guint i;
454 emem_chunk_t *chunk;
455 guint total_space_allocated_from_os, total_space_wasted;
456 gboolean ep_stat=TRUE;
458 fprintf(stderr, "\n-------- EP allocator statistics --------\n");
459 fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
460 ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
461 ep_packet_mem.debug_use_canary ? "using" : "not using",
462 debug_use_memory_scrubber ? "using" : "not using");
464 if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
465 fprintf(stderr, "No memory allocated\n");
466 ep_stat = FALSE;
468 if (ep_packet_mem.debug_use_chunks && ep_stat) {
469 /* Nothing interesting without chunks */
470 /* Only look at the used_list since those chunks are fully
471 * used. Looking at the free list would skew our view of what
472 * we have wasted.
474 for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
475 num_chunks++;
476 total_used += (chunk->amount_free_init - chunk->amount_free);
477 total_allocation += chunk->amount_free_init;
479 if (num_chunks > 0) {
480 fprintf (stderr, "\n");
481 fprintf (stderr, "\n---- Buffer space ----\n");
482 fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
483 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
484 fprintf (stderr, "\t-------------------------------------------\n");
485 fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
486 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
487 fprintf (stderr, "\t-------------------------------------------\n");
488 total_space_allocated_from_os = total_allocation
489 + sizeof(emem_chunk_t) * num_chunks;
490 fprintf (stderr, "Total allocated from OS: %u\n\n",
491 total_space_allocated_from_os);
492 }else{
493 fprintf (stderr, "No fully used chunks, nothing to do\n");
495 /* Reset stats */
496 num_chunks = 0;
497 num_allocs = 0;
498 total_used = 0;
499 total_allocation = 0;
500 used_for_canaries = 0;
504 fprintf(stderr, "\n-------- SE allocator statistics --------\n");
505 fprintf(stderr, "Total number of chunk allocations %u\n",
506 total_no_chunks);
507 fprintf(stderr, "%s chunks, %s canaries\n",
508 se_packet_mem.debug_use_chunks ? "Using" : "Not using",
509 se_packet_mem.debug_use_canary ? "using" : "not using");
511 if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
512 fprintf(stderr, "No memory allocated\n");
513 return;
516 if (!se_packet_mem.debug_use_chunks )
517 return; /* Nothing interesting without chunks?? */
519 /* Only look at the used_list since those chunks are fully used.
520 * Looking at the free list would skew our view of what we have wasted.
522 for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
523 num_chunks++;
524 total_used += (chunk->amount_free_init - chunk->amount_free);
525 total_allocation += chunk->amount_free_init;
527 if (se_packet_mem.debug_use_canary){
528 void *ptr = chunk->canary_last;
529 int len;
531 while (ptr != NULL) {
532 ptr = emem_canary_next(se_packet_mem.canary, (guint8*)ptr, &len);
534 if (ptr == (void *) -1)
535 g_error("Memory corrupted");
536 used_for_canaries += len;
541 if (num_chunks == 0) {
543 fprintf (stderr, "No fully used chunks, nothing to do\n");
544 return;
547 fprintf (stderr, "\n");
548 fprintf (stderr, "---------- Allocations from the OS ----------\n");
549 fprintf (stderr, "---- Headers ----\n");
550 fprintf (stderr, "\t( Chunk header size: %10lu\n",
551 sizeof(emem_chunk_t));
552 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
553 fprintf (stderr, "\t-------------------------------------------\n");
555 total_headers = sizeof(emem_chunk_t) * num_chunks;
556 fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
557 fprintf (stderr, "\n---- Buffer space ----\n");
558 fprintf (stderr, "\tChunk allocation size: %10u\n",
559 EMEM_PACKET_CHUNK_SIZE);
560 fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
561 fprintf (stderr, "\t-------------------------------------------\n");
562 fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
563 total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
564 fprintf (stderr, "\t-------------------------------------------\n");
565 total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
566 + total_headers;
567 fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
568 total_space_allocated_from_os);
570 for (i = 0; i < NUM_ALLOC_DIST; i++)
571 num_allocs += allocations[i];
573 fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
574 fprintf (stderr, " Number of SE allocations: %10u\n",
575 num_allocs);
576 fprintf (stderr, " Bytes used (incl. canaries): %10u\n",
577 total_used);
578 fprintf (stderr, " Bytes used for canaries: %10u\n",
579 used_for_canaries);
580 fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
581 total_allocation - total_used);
582 fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
583 total_space_allocated_from_os - total_used);
585 fprintf (stderr, "---------- Statistics ----------\n");
586 fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
587 (float)total_used/(float)num_allocs);
588 fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
589 (float)(total_used - used_for_canaries)/(float)num_allocs);
590 fprintf (stderr, " Average wasted bytes per allocation: %6.2f\n",
591 (total_allocation - total_used)/(float)num_allocs);
592 total_space_wasted = (total_allocation - total_used)
593 + (sizeof(emem_chunk_t));
594 fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
595 total_space_wasted);
596 fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
597 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
599 fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
600 for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
601 fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
602 fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
604 #endif
606 static gboolean
607 emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
609 const gchar *cptr = (gchar *)ptr;
610 const emem_chunk_t *chunk;
612 for (chunk = chunk_list; chunk; chunk = chunk->next) {
613 if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
614 return TRUE;
616 return FALSE;
619 static gboolean
620 emem_verify_pointer(const emem_pool_t *hdr, const void *ptr)
622 return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
625 gboolean
626 ep_verify_pointer(const void *ptr)
628 if (ep_packet_mem.debug_verify_pointers)
629 return emem_verify_pointer(&ep_packet_mem, ptr);
630 else
631 return FALSE;
634 gboolean
635 se_verify_pointer(const void *ptr)
637 if (se_packet_mem.debug_verify_pointers)
638 return emem_verify_pointer(&se_packet_mem, ptr);
639 else
640 return FALSE;
643 static void
644 emem_scrub_memory(char *buf, size_t size, gboolean alloc)
646 guint scrubbed_value;
647 size_t offset;
649 if (!debug_use_memory_scrubber)
650 return;
652 if (alloc) /* this memory is being allocated */
653 scrubbed_value = 0xBADDCAFE;
654 else /* this memory is being freed */
655 scrubbed_value = 0xDEADBEEF;
657 /* We shouldn't need to check the alignment of the starting address
658 * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
659 * memory).
662 /* XXX - if the above is *NOT* true, we should use memcpy here,
663 * in order to avoid problems on alignment-sensitive platforms, e.g.
664 * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
667 for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
668 *(guint*)(void*)(buf+offset) = scrubbed_value;
670 /* Initialize the last bytes, if any */
671 if (offset < size) {
672 *(guint8*)(buf+offset) = scrubbed_value >> 24;
673 offset++;
674 if (offset < size) {
675 *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
676 offset++;
677 if (offset < size) {
678 *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
686 static emem_chunk_t *
687 emem_create_chunk(size_t size)
689 emem_chunk_t *npc;
691 npc = g_new(emem_chunk_t, 1);
692 npc->next = NULL;
693 npc->canary_last = NULL;
695 #if defined (_WIN32)
697 * MSDN documents VirtualAlloc/VirtualProtect at
698 * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
701 /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
702 npc->buf = (char *)VirtualAlloc(NULL, size,
703 MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
705 if (npc->buf == NULL) {
706 g_free(npc);
707 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
708 abort();
709 else
710 THROW(OutOfMemoryError);
713 #elif defined(USE_GUARD_PAGES)
714 npc->buf = (char *)mmap(NULL, size,
715 PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
717 if (npc->buf == MAP_FAILED) {
718 g_free(npc);
719 if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
720 abort();
721 else
722 THROW(OutOfMemoryError);
725 #else /* Is there a draft in here? */
726 npc->buf = g_malloc(size);
727 /* g_malloc() can't fail */
728 #endif
730 #ifdef SHOW_EMEM_STATS
731 total_no_chunks++;
732 #endif
734 npc->amount_free = npc->amount_free_init = (unsigned int) size;
735 npc->free_offset = npc->free_offset_init = 0;
736 return npc;
739 static emem_chunk_t *
740 emem_create_chunk_gp(size_t size)
742 #if defined (_WIN32)
743 BOOL ret;
744 char *buf_end, *prot1, *prot2;
745 DWORD oldprot;
746 #elif defined(USE_GUARD_PAGES)
747 int ret;
748 char *buf_end, *prot1, *prot2;
749 #endif /* _WIN32 / USE_GUARD_PAGES */
750 emem_chunk_t *npc;
752 npc = emem_create_chunk(size);
754 #if defined (_WIN32)
755 buf_end = npc->buf + size;
757 /* Align our guard pages on page-sized boundaries */
758 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
759 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
761 ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
762 g_assert(ret != 0 || iswindowsplatform);
763 ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
764 g_assert(ret != 0 || iswindowsplatform);
766 npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
767 npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
768 #elif defined(USE_GUARD_PAGES)
769 buf_end = npc->buf + size;
771 /* Align our guard pages on page-sized boundaries */
772 prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
773 prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
775 ret = mprotect(prot1, pagesize, PROT_NONE);
776 g_assert(ret != -1);
777 ret = mprotect(prot2, pagesize, PROT_NONE);
778 g_assert(ret != -1);
780 npc->amount_free_init = (unsigned int)(prot2 - prot1 - pagesize);
781 npc->free_offset_init = (unsigned int)((prot1 - npc->buf) + pagesize);
782 #else
783 npc->amount_free_init = size;
784 npc->free_offset_init = 0;
785 #endif /* USE_GUARD_PAGES */
787 npc->amount_free = npc->amount_free_init;
788 npc->free_offset = npc->free_offset_init;
789 return npc;
792 static void *
793 emem_alloc_chunk(size_t size, emem_pool_t *mem)
795 void *buf;
797 size_t asize = size;
798 gboolean use_canary = mem->debug_use_canary;
799 guint8 pad;
800 emem_chunk_t *free_list;
802 /* Allocate room for at least 8 bytes of canary plus some padding
803 * so the canary ends on an 8-byte boundary.
804 * But first add the room needed for the pointer to the next canary
805 * (so the entire allocation will end on an 8-byte boundary).
807 if (use_canary) {
808 asize += sizeof(void *);
809 pad = emem_canary_pad(asize);
810 } else
811 pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
813 asize += pad;
815 #ifdef SHOW_EMEM_STATS
816 /* Do this check here so we can include the canary size */
817 if (mem == &se_packet_mem) {
818 if (asize < 32)
819 allocations[0]++;
820 else if (asize < 64)
821 allocations[1]++;
822 else if (asize < 128)
823 allocations[2]++;
824 else if (asize < 256)
825 allocations[3]++;
826 else if (asize < 512)
827 allocations[4]++;
828 else if (asize < 1024)
829 allocations[5]++;
830 else if (asize < 2048)
831 allocations[6]++;
832 else if (asize < 4096)
833 allocations[7]++;
834 else if (asize < 8192)
835 allocations[8]++;
836 else if (asize < 16384)
837 allocations[8]++;
838 else
839 allocations[(NUM_ALLOC_DIST-1)]++;
841 #endif
843 /* make sure we dont try to allocate too much (arbitrary limit) */
844 DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
846 if (!mem->free_list)
847 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
849 /* oops, we need to allocate more memory to serve this request
850 * than we have free. move this node to the used list and try again
852 if(asize > mem->free_list->amount_free) {
853 emem_chunk_t *npc;
854 npc=mem->free_list;
855 mem->free_list=mem->free_list->next;
856 npc->next=mem->used_list;
857 mem->used_list=npc;
859 if (!mem->free_list)
860 mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
863 free_list = mem->free_list;
865 buf = free_list->buf + free_list->free_offset;
867 free_list->amount_free -= (unsigned int) asize;
868 free_list->free_offset += (unsigned int) asize;
870 if (use_canary) {
871 char *cptr = (char *)buf + size;
873 memcpy(cptr, mem->canary, pad-1);
874 cptr[pad-1] = '\0';
875 memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
877 free_list->canary_last = cptr;
880 return buf;
883 static void *
884 emem_alloc_glib(size_t size, emem_pool_t *mem)
886 emem_chunk_t *npc;
888 npc=g_new(emem_chunk_t, 1);
889 npc->next=mem->used_list;
890 npc->buf=(char *)g_malloc(size);
891 npc->canary_last = NULL;
892 mem->used_list=npc;
893 /* There's no padding/alignment involved (from our point of view) when
894 * we fetch the memory directly from the system pool, so WYSIWYG */
895 npc->amount_free = npc->free_offset_init = 0;
896 npc->free_offset = npc->amount_free_init = (unsigned int) size;
898 return npc->buf;
901 /* allocate 'size' amount of memory. */
902 static void *
903 emem_alloc(size_t size, emem_pool_t *mem)
905 void *buf;
907 #if 0
908 /* For testing wmem, effectively redirects most emem memory to wmem.
909 * You will also have to comment out several assertions in wmem_core.c,
910 * specifically anything g_assert(allocator->in_scope), since it is much
911 * stricter about when it is permitted to be called. */
912 if (mem == &ep_packet_mem) {
913 return wmem_alloc(wmem_packet_scope(), size);
915 else if (mem == &se_packet_mem) {
916 return wmem_alloc(wmem_file_scope(), size);
918 #endif
920 buf = mem->memory_alloc(size, mem);
922 /* XXX - this is a waste of time if the allocator function is going to
923 * memset this straight back to 0.
925 emem_scrub_memory((char *)buf, size, TRUE);
927 return buf;
930 /* allocate 'size' amount of memory with an allocation lifetime until the
931 * next packet.
933 void *
934 ep_alloc(size_t size)
936 return emem_alloc(size, &ep_packet_mem);
939 /* allocate 'size' amount of memory with an allocation lifetime until the
940 * next capture.
942 void *
943 se_alloc(size_t size)
945 return emem_alloc(size, &se_packet_mem);
948 void *
949 ep_alloc0(size_t size)
951 return memset(ep_alloc(size),'\0',size);
954 void *
955 se_alloc0(size_t size)
957 return memset(se_alloc(size),'\0',size);
960 static gchar *
961 emem_strdup(const gchar *src, void *allocator(size_t))
963 guint len;
964 gchar *dst;
966 /* If str is NULL, just return the string "<NULL>" so that the callers don't
967 * have to bother checking it.
969 if(!src)
970 src = "<NULL>";
972 len = (guint) strlen(src);
973 dst = (gchar *)memcpy(allocator(len+1), src, len+1);
975 return dst;
978 gchar *
979 ep_strdup(const gchar *src)
981 return emem_strdup(src, ep_alloc);
984 gchar *
985 se_strdup(const gchar *src)
987 return emem_strdup(src, se_alloc);
990 static gchar *
991 emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
993 gchar *dst = (gchar *)allocator(len+1);
994 guint i;
996 for (i = 0; (i < len) && src[i]; i++)
997 dst[i] = src[i];
999 dst[i] = '\0';
1001 return dst;
1004 gchar *
1005 ep_strndup(const gchar *src, size_t len)
1007 return emem_strndup(src, len, ep_alloc);
1010 gchar *
1011 se_strndup(const gchar *src, size_t len)
1013 return emem_strndup(src, len, se_alloc);
1018 void *
1019 ep_memdup(const void* src, size_t len)
1021 return memcpy(ep_alloc(len), src, len);
1024 void *
1025 se_memdup(const void* src, size_t len)
1027 return memcpy(se_alloc(len), src, len);
1030 static gchar *
1031 emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
1033 va_list ap2;
1034 gsize len;
1035 gchar* dst;
1037 G_VA_COPY(ap2, ap);
1039 len = g_printf_string_upper_bound(fmt, ap);
1041 dst = (gchar *)allocator(len+1);
1042 g_vsnprintf (dst, (gulong) len, fmt, ap2);
1043 va_end(ap2);
1045 return dst;
1048 gchar *
1049 ep_strdup_vprintf(const gchar *fmt, va_list ap)
1051 return emem_strdup_vprintf(fmt, ap, ep_alloc);
1054 gchar *
1055 se_strdup_vprintf(const gchar* fmt, va_list ap)
1057 return emem_strdup_vprintf(fmt, ap, se_alloc);
1060 gchar *
1061 ep_strdup_printf(const gchar *fmt, ...)
1063 va_list ap;
1064 gchar *dst;
1066 va_start(ap, fmt);
1067 dst = ep_strdup_vprintf(fmt, ap);
1068 va_end(ap);
1069 return dst;
1072 gchar *
1073 se_strdup_printf(const gchar *fmt, ...)
1075 va_list ap;
1076 gchar *dst;
1078 va_start(ap, fmt);
1079 dst = se_strdup_vprintf(fmt, ap);
1080 va_end(ap);
1081 return dst;
1084 gchar **
1085 ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
1087 gchar* splitted;
1088 gchar* s;
1089 guint tokens;
1090 guint str_len;
1091 guint sep_len;
1092 guint i;
1093 gchar** vec;
1094 enum { AT_START, IN_PAD, IN_TOKEN } state;
1095 guint curr_tok = 0;
1097 if ( ! string
1098 || ! sep
1099 || ! sep[0])
1100 return NULL;
1102 s = splitted = ep_strdup(string);
1103 str_len = (guint) strlen(splitted);
1104 sep_len = (guint) strlen(sep);
1106 if (max_tokens < 1) max_tokens = INT_MAX;
1108 tokens = 1;
1111 while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
1112 tokens++;
1114 for(i=0; i < sep_len; i++ )
1115 s[i] = '\0';
1117 s += sep_len;
1121 vec = ep_alloc_array(gchar*,tokens+1);
1122 state = AT_START;
1124 for (i=0; i< str_len; i++) {
1125 switch(state) {
1126 case AT_START:
1127 switch(splitted[i]) {
1128 case '\0':
1129 state = IN_PAD;
1130 continue;
1131 default:
1132 vec[curr_tok] = &(splitted[i]);
1133 curr_tok++;
1134 state = IN_TOKEN;
1135 continue;
1137 case IN_TOKEN:
1138 switch(splitted[i]) {
1139 case '\0':
1140 state = IN_PAD;
1141 default:
1142 continue;
1144 case IN_PAD:
1145 switch(splitted[i]) {
1146 default:
1147 vec[curr_tok] = &(splitted[i]);
1148 curr_tok++;
1149 state = IN_TOKEN;
1150 case '\0':
1151 continue;
1156 vec[curr_tok] = NULL;
1158 return vec;
1161 gchar *
1162 ep_strconcat(const gchar *string1, ...)
1164 gsize l;
1165 va_list args;
1166 gchar *s;
1167 gchar *concat;
1168 gchar *ptr;
1170 if (!string1)
1171 return NULL;
1173 l = 1 + strlen(string1);
1174 va_start(args, string1);
1175 s = va_arg(args, gchar*);
1176 while (s) {
1177 l += strlen(s);
1178 s = va_arg(args, gchar*);
1180 va_end(args);
1182 concat = (gchar *)ep_alloc(l);
1183 ptr = concat;
1185 ptr = g_stpcpy(ptr, string1);
1186 va_start(args, string1);
1187 s = va_arg(args, gchar*);
1188 while (s) {
1189 ptr = g_stpcpy(ptr, s);
1190 s = va_arg(args, gchar*);
1192 va_end(args);
1194 return concat;
1199 /* release all allocated memory back to the pool. */
1200 static void
1201 emem_free_all(emem_pool_t *mem)
1203 gboolean use_chunks = mem->debug_use_chunks;
1205 emem_chunk_t *npc;
1206 emem_tree_t *tree_list;
1208 /* move all used chunks over to the free list */
1209 while(mem->used_list){
1210 npc=mem->used_list;
1211 mem->used_list=mem->used_list->next;
1212 npc->next=mem->free_list;
1213 mem->free_list=npc;
1216 /* clear them all out */
1217 npc = mem->free_list;
1218 while (npc != NULL) {
1219 if (use_chunks) {
1220 while (npc->canary_last != NULL) {
1221 npc->canary_last = emem_canary_next(mem->canary, (guint8 *)npc->canary_last, NULL);
1222 /* XXX, check if canary_last is inside allocated memory? */
1224 if (npc->canary_last == (void *) -1)
1225 g_error("Memory corrupted");
1228 emem_scrub_memory((npc->buf + npc->free_offset_init),
1229 (npc->free_offset - npc->free_offset_init),
1230 FALSE);
1232 npc->amount_free = npc->amount_free_init;
1233 npc->free_offset = npc->free_offset_init;
1234 npc = npc->next;
1235 } else {
1236 emem_chunk_t *next = npc->next;
1238 emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
1240 g_free(npc->buf);
1241 g_free(npc);
1242 npc = next;
1246 if (!use_chunks) {
1247 /* We've freed all this memory already */
1248 mem->free_list = NULL;
1251 /* release/reset all allocated trees */
1252 for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
1253 tree_list->tree=NULL;
1257 /* release all allocated memory back to the pool. */
1258 void
1259 ep_free_all(void)
1261 emem_free_all(&ep_packet_mem);
1264 /* release all allocated memory back to the pool. */
1265 void
1266 se_free_all(void)
1268 #ifdef SHOW_EMEM_STATS
1269 print_alloc_stats();
1270 #endif
1272 emem_free_all(&se_packet_mem);
1275 ep_stack_t
1276 ep_stack_new(void) {
1277 ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
1278 *s = ep_new0(struct _ep_stack_frame_t);
1279 return s;
1282 /* for ep_stack_t we'll keep the popped frames so we reuse them instead
1283 of allocating new ones.
1286 void *
1287 ep_stack_push(ep_stack_t stack, void* data)
1289 struct _ep_stack_frame_t* frame;
1290 struct _ep_stack_frame_t* head = (*stack);
1292 if (head->above) {
1293 frame = head->above;
1294 } else {
1295 frame = ep_new(struct _ep_stack_frame_t);
1296 head->above = frame;
1297 frame->below = head;
1298 frame->above = NULL;
1301 frame->payload = data;
1302 (*stack) = frame;
1304 return data;
1307 void *
1308 ep_stack_pop(ep_stack_t stack)
1311 if ((*stack)->below) {
1312 (*stack) = (*stack)->below;
1313 return (*stack)->above->payload;
1314 } else {
1315 return NULL;
1319 emem_tree_t *
1320 se_tree_create(int type, const char *name)
1322 emem_tree_t *tree_list;
1324 tree_list=(emem_tree_t *)g_malloc(sizeof(emem_tree_t));
1325 tree_list->next=se_packet_mem.trees;
1326 tree_list->type=type;
1327 tree_list->tree=NULL;
1328 tree_list->name=name;
1329 tree_list->malloc=se_alloc;
1330 se_packet_mem.trees=tree_list;
1332 return tree_list;
1335 void *
1336 emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
1338 emem_tree_node_t *node;
1340 node=se_tree->tree;
1342 while(node){
1343 if(key==node->key32){
1344 return node->data;
1346 if(key<node->key32){
1347 node=node->left;
1348 continue;
1350 if(key>node->key32){
1351 node=node->right;
1352 continue;
1355 return NULL;
1358 void *
1359 emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
1361 emem_tree_node_t *node;
1363 node=se_tree->tree;
1365 if(!node){
1366 return NULL;
1370 while(node){
1371 if(key==node->key32){
1372 return node->data;
1374 if(key<node->key32){
1375 if(node->left){
1376 node=node->left;
1377 continue;
1378 } else {
1379 break;
1382 if(key>node->key32){
1383 if(node->right){
1384 node=node->right;
1385 continue;
1386 } else {
1387 break;
1393 if(!node){
1394 return NULL;
1397 /* If we are still at the root of the tree this means that this node
1398 * is either smaller than the search key and then we return this
1399 * node or else there is no smaller key available and then
1400 * we return NULL.
1402 if(!node->parent){
1403 if(key>node->key32){
1404 return node->data;
1405 } else {
1406 return NULL;
1410 if(node->parent->left==node){
1411 /* left child */
1413 if(key>node->key32){
1414 /* if this is a left child and its key is smaller than
1415 * the search key, then this is the node we want.
1417 return node->data;
1418 } else {
1419 /* if this is a left child and its key is bigger than
1420 * the search key, we have to check if any
1421 * of our ancestors are smaller than the search key.
1423 while(node){
1424 if(key>node->key32){
1425 return node->data;
1427 node=node->parent;
1429 return NULL;
1431 } else {
1432 /* right child */
1434 if(node->key32<key){
1435 /* if this is the right child and its key is smaller
1436 * than the search key then this is the one we want.
1438 return node->data;
1439 } else {
1440 /* if this is the right child and its key is larger
1441 * than the search key then our parent is the one we
1442 * want.
1444 return node->parent->data;
1451 static inline emem_tree_node_t *
1452 emem_tree_parent(emem_tree_node_t *node)
1454 return node->parent;
1457 static inline emem_tree_node_t *
1458 emem_tree_grandparent(emem_tree_node_t *node)
1460 emem_tree_node_t *parent;
1462 parent=emem_tree_parent(node);
1463 if(parent){
1464 return parent->parent;
1466 return NULL;
1469 static inline emem_tree_node_t *
1470 emem_tree_uncle(emem_tree_node_t *node)
1472 emem_tree_node_t *parent, *grandparent;
1474 parent=emem_tree_parent(node);
1475 if(!parent){
1476 return NULL;
1478 grandparent=emem_tree_parent(parent);
1479 if(!grandparent){
1480 return NULL;
1482 if(parent==grandparent->left){
1483 return grandparent->right;
1485 return grandparent->left;
1488 static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
1489 static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
1491 static inline void
1492 rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
1494 if(node->parent){
1495 if(node->parent->left==node){
1496 node->parent->left=node->right;
1497 } else {
1498 node->parent->right=node->right;
1500 } else {
1501 se_tree->tree=node->right;
1503 node->right->parent=node->parent;
1504 node->parent=node->right;
1505 node->right=node->right->left;
1506 if(node->right){
1507 node->right->parent=node;
1509 node->parent->left=node;
1512 static inline void
1513 rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
1515 if(node->parent){
1516 if(node->parent->left==node){
1517 node->parent->left=node->left;
1518 } else {
1519 node->parent->right=node->left;
1521 } else {
1522 se_tree->tree=node->left;
1524 node->left->parent=node->parent;
1525 node->parent=node->left;
1526 node->left=node->left->right;
1527 if(node->left){
1528 node->left->parent=node;
1530 node->parent->right=node;
1533 static inline void
1534 rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
1536 emem_tree_node_t *grandparent;
1537 emem_tree_node_t *parent;
1539 parent=emem_tree_parent(node);
1540 grandparent=emem_tree_parent(parent);
1541 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1542 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1543 if( (node==parent->left) && (parent==grandparent->left) ){
1544 rotate_right(se_tree, grandparent);
1545 } else {
1546 rotate_left(se_tree, grandparent);
1550 static inline void
1551 rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
1553 emem_tree_node_t *grandparent;
1554 emem_tree_node_t *parent;
1556 parent=emem_tree_parent(node);
1557 grandparent=emem_tree_parent(parent);
1558 if(!grandparent){
1559 return;
1561 if( (node==parent->right) && (parent==grandparent->left) ){
1562 rotate_left(se_tree, parent);
1563 node=node->left;
1564 } else if( (node==parent->left) && (parent==grandparent->right) ){
1565 rotate_right(se_tree, parent);
1566 node=node->right;
1568 rb_insert_case5(se_tree, node);
1571 static inline void
1572 rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
1574 emem_tree_node_t *grandparent;
1575 emem_tree_node_t *parent;
1576 emem_tree_node_t *uncle;
1578 uncle=emem_tree_uncle(node);
1579 if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
1580 parent=emem_tree_parent(node);
1581 parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1582 uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1583 grandparent=emem_tree_grandparent(node);
1584 grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1585 rb_insert_case1(se_tree, grandparent);
1586 } else {
1587 rb_insert_case4(se_tree, node);
1591 static inline void
1592 rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
1594 emem_tree_node_t *parent;
1596 parent=emem_tree_parent(node);
1597 /* parent is always non-NULL here */
1598 if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
1599 return;
1601 rb_insert_case3(se_tree, node);
1604 static inline void
1605 rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
1607 emem_tree_node_t *parent;
1609 parent=emem_tree_parent(node);
1610 if(!parent){
1611 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1612 return;
1614 rb_insert_case2(se_tree, node);
1617 /* insert a new node in the tree. if this node matches an already existing node
1618 * then just replace the data for that node */
1619 void
1620 emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
1622 emem_tree_node_t *node;
1624 node=se_tree->tree;
1626 /* is this the first node ?*/
1627 if(!node){
1628 node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1629 switch(se_tree->type){
1630 case EMEM_TREE_TYPE_RED_BLACK:
1631 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1632 break;
1634 node->parent=NULL;
1635 node->left=NULL;
1636 node->right=NULL;
1637 node->key32=key;
1638 node->data=data;
1639 node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
1640 se_tree->tree=node;
1641 return;
1644 /* it was not the new root so walk the tree until we find where to
1645 * insert this new leaf.
1647 while(1){
1648 /* this node already exists, so just replace the data pointer*/
1649 if(key==node->key32){
1650 node->data=data;
1651 return;
1653 if(key<node->key32) {
1654 if(!node->left){
1655 /* new node to the left */
1656 emem_tree_node_t *new_node;
1657 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1658 node->left=new_node;
1659 new_node->parent=node;
1660 new_node->left=NULL;
1661 new_node->right=NULL;
1662 new_node->key32=key;
1663 new_node->data=data;
1664 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1665 node=new_node;
1666 break;
1668 node=node->left;
1669 continue;
1671 if(key>node->key32) {
1672 if(!node->right){
1673 /* new node to the right */
1674 emem_tree_node_t *new_node;
1675 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1676 node->right=new_node;
1677 new_node->parent=node;
1678 new_node->left=NULL;
1679 new_node->right=NULL;
1680 new_node->key32=key;
1681 new_node->data=data;
1682 new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
1683 node=new_node;
1684 break;
1686 node=node->right;
1687 continue;
1691 /* node will now point to the newly created node */
1692 switch(se_tree->type){
1693 case EMEM_TREE_TYPE_RED_BLACK:
1694 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1695 rb_insert_case1(se_tree, node);
1696 break;
1700 static void *
1701 lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
1703 emem_tree_node_t *node;
1705 node=se_tree->tree;
1707 /* is this the first node ?*/
1708 if(!node){
1709 node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1710 switch(se_tree->type){
1711 case EMEM_TREE_TYPE_RED_BLACK:
1712 node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
1713 break;
1715 node->parent=NULL;
1716 node->left=NULL;
1717 node->right=NULL;
1718 node->key32=key;
1719 node->data= func(ud);
1720 node->u.is_subtree = is_subtree;
1721 se_tree->tree=node;
1722 return node->data;
1725 /* it was not the new root so walk the tree until we find where to
1726 * insert this new leaf.
1728 while(1){
1729 /* this node already exists, so just return the data pointer*/
1730 if(key==node->key32){
1731 return node->data;
1733 if(key<node->key32) {
1734 if(!node->left){
1735 /* new node to the left */
1736 emem_tree_node_t *new_node;
1737 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1738 node->left=new_node;
1739 new_node->parent=node;
1740 new_node->left=NULL;
1741 new_node->right=NULL;
1742 new_node->key32=key;
1743 new_node->data= func(ud);
1744 new_node->u.is_subtree = is_subtree;
1745 node=new_node;
1746 break;
1748 node=node->left;
1749 continue;
1751 if(key>node->key32) {
1752 if(!node->right){
1753 /* new node to the right */
1754 emem_tree_node_t *new_node;
1755 new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
1756 node->right=new_node;
1757 new_node->parent=node;
1758 new_node->left=NULL;
1759 new_node->right=NULL;
1760 new_node->key32=key;
1761 new_node->data= func(ud);
1762 new_node->u.is_subtree = is_subtree;
1763 node=new_node;
1764 break;
1766 node=node->right;
1767 continue;
1771 /* node will now point to the newly created node */
1772 switch(se_tree->type){
1773 case EMEM_TREE_TYPE_RED_BLACK:
1774 node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
1775 rb_insert_case1(se_tree, node);
1776 break;
1779 return node->data;
1782 /* create another (sub)tree using the same memory allocation scope
1783 * as the parent tree.
1785 static emem_tree_t *
1786 emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
1788 emem_tree_t *tree_list;
1790 tree_list=(emem_tree_t *)parent_tree->malloc(sizeof(emem_tree_t));
1791 tree_list->next=NULL;
1792 tree_list->type=parent_tree->type;
1793 tree_list->tree=NULL;
1794 tree_list->name=name;
1795 tree_list->malloc=parent_tree->malloc;
1797 return tree_list;
1800 static void *
1801 create_sub_tree(void* d)
1803 emem_tree_t *se_tree = (emem_tree_t *)d;
1804 return emem_tree_create_subtree(se_tree, "subtree");
1807 /* insert a new node in the tree. if this node matches an already existing node
1808 * then just replace the data for that node */
1810 void
1811 emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
1813 emem_tree_t *insert_tree = NULL;
1814 emem_tree_key_t *cur_key;
1815 guint32 i, insert_key32 = 0;
1817 if(!se_tree || !key) return;
1819 for (cur_key = key; cur_key->length > 0; cur_key++) {
1820 if(cur_key->length > 100) {
1821 DISSECTOR_ASSERT_NOT_REACHED();
1824 for (i = 0; i < cur_key->length; i++) {
1825 /* Insert using the previous key32 */
1826 if (!insert_tree) {
1827 insert_tree = se_tree;
1828 } else {
1829 insert_tree = (emem_tree_t *)lookup_or_insert32(insert_tree, insert_key32, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
1831 insert_key32 = cur_key->key[i];
1835 if(!insert_tree) {
1836 /* We didn't get a valid key. Should we return NULL instead? */
1837 DISSECTOR_ASSERT_NOT_REACHED();
1840 emem_tree_insert32(insert_tree, insert_key32, data);
1844 void *
1845 emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
1847 emem_tree_t *lookup_tree = NULL;
1848 emem_tree_key_t *cur_key;
1849 guint32 i, lookup_key32 = 0;
1851 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1853 for (cur_key = key; cur_key->length > 0; cur_key++) {
1854 if(cur_key->length > 100) {
1855 DISSECTOR_ASSERT_NOT_REACHED();
1858 for (i = 0; i < cur_key->length; i++) {
1859 /* Lookup using the previous key32 */
1860 if (!lookup_tree) {
1861 lookup_tree = se_tree;
1862 } else {
1863 lookup_tree = (emem_tree_t *)emem_tree_lookup32(lookup_tree, lookup_key32);
1864 if (!lookup_tree) {
1865 return NULL;
1868 lookup_key32 = cur_key->key[i];
1872 if(!lookup_tree) {
1873 /* We didn't get a valid key. Should we return NULL instead? */
1874 DISSECTOR_ASSERT_NOT_REACHED();
1877 return emem_tree_lookup32(lookup_tree, lookup_key32);
1880 void *
1881 emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
1883 emem_tree_t *lookup_tree = NULL;
1884 emem_tree_key_t *cur_key;
1885 guint32 i, lookup_key32 = 0;
1887 if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
1889 for (cur_key = key; cur_key->length > 0; cur_key++) {
1890 if(cur_key->length > 100) {
1891 DISSECTOR_ASSERT_NOT_REACHED();
1894 for (i = 0; i < cur_key->length; i++) {
1895 /* Lookup using the previous key32 */
1896 if (!lookup_tree) {
1897 lookup_tree = se_tree;
1898 } else {
1899 lookup_tree = (emem_tree_t *)emem_tree_lookup32_le(lookup_tree, lookup_key32);
1900 if (!lookup_tree) {
1901 return NULL;
1904 lookup_key32 = cur_key->key[i];
1908 if(!lookup_tree) {
1909 /* We didn't get a valid key. Should we return NULL instead? */
1910 DISSECTOR_ASSERT_NOT_REACHED();
1913 return emem_tree_lookup32_le(lookup_tree, lookup_key32);
1917 /* Strings are stored as an array of uint32 containing the string characters
1918 with 4 characters in each uint32.
1919 The first byte of the string is stored as the most significant byte.
1920 If the string is not a multiple of 4 characters in length the last
1921 uint32 containing the string bytes are padded with 0 bytes.
1922 After the uint32's containing the string, there is one final terminator
1923 uint32 with the value 0x00000001
1925 void
1926 emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
1928 emem_tree_key_t key[2];
1929 guint32 *aligned=NULL;
1930 guint32 len = (guint32) strlen(k);
1931 guint32 divx = (len+3)/4+1;
1932 guint32 i;
1933 guint32 tmp;
1935 aligned = (guint32 *)g_malloc(divx * sizeof (guint32));
1937 /* pack the bytes one one by one into guint32s */
1938 tmp = 0;
1939 for (i = 0;i < len;i++) {
1940 unsigned char ch;
1942 ch = (unsigned char)k[i];
1943 if (flags & EMEM_TREE_STRING_NOCASE) {
1944 if(isupper(ch)) {
1945 ch = tolower(ch);
1948 tmp <<= 8;
1949 tmp |= ch;
1950 if (i%4 == 3) {
1951 aligned[i/4] = tmp;
1952 tmp = 0;
1955 /* add required padding to the last uint32 */
1956 if (i%4 != 0) {
1957 while (i%4 != 0) {
1958 i++;
1959 tmp <<= 8;
1961 aligned[i/4-1] = tmp;
1964 /* add the terminator */
1965 aligned[divx-1] = 0x00000001;
1967 key[0].length = divx;
1968 key[0].key = aligned;
1969 key[1].length = 0;
1970 key[1].key = NULL;
1973 emem_tree_insert32_array(se_tree, key, v);
1974 g_free(aligned);
1977 void *
1978 emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
1980 emem_tree_key_t key[2];
1981 guint32 *aligned=NULL;
1982 guint32 len = (guint) strlen(k);
1983 guint32 divx = (len+3)/4+1;
1984 guint32 i;
1985 guint32 tmp;
1986 void *ret;
1988 aligned = (guint32 *)g_malloc(divx * sizeof (guint32));
1990 /* pack the bytes one one by one into guint32s */
1991 tmp = 0;
1992 for (i = 0;i < len;i++) {
1993 unsigned char ch;
1995 ch = (unsigned char)k[i];
1996 if (flags & EMEM_TREE_STRING_NOCASE) {
1997 if(isupper(ch)) {
1998 ch = tolower(ch);
2001 tmp <<= 8;
2002 tmp |= ch;
2003 if (i%4 == 3) {
2004 aligned[i/4] = tmp;
2005 tmp = 0;
2008 /* add required padding to the last uint32 */
2009 if (i%4 != 0) {
2010 while (i%4 != 0) {
2011 i++;
2012 tmp <<= 8;
2014 aligned[i/4-1] = tmp;
2017 /* add the terminator */
2018 aligned[divx-1] = 0x00000001;
2020 key[0].length = divx;
2021 key[0].key = aligned;
2022 key[1].length = 0;
2023 key[1].key = NULL;
2026 ret = emem_tree_lookup32_array(se_tree, key);
2027 g_free(aligned);
2028 return ret;
2031 static gboolean
2032 emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
2034 gboolean stop_traverse = FALSE;
2036 if (!node)
2037 return FALSE;
2039 if(node->left) {
2040 stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
2041 if (stop_traverse) {
2042 return TRUE;
2046 if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
2047 stop_traverse = emem_tree_foreach((emem_tree_t *)node->data, callback, user_data);
2048 } else {
2049 stop_traverse = callback(node->data, user_data);
2052 if (stop_traverse) {
2053 return TRUE;
2056 if(node->right) {
2057 stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
2058 if (stop_traverse) {
2059 return TRUE;
2063 return FALSE;
2066 gboolean
2067 emem_tree_foreach(emem_tree_t* emem_tree, tree_foreach_func callback, void *user_data)
2069 if (!emem_tree)
2070 return FALSE;
2072 if(!emem_tree->tree)
2073 return FALSE;
2075 return emem_tree_foreach_nodes(emem_tree->tree, callback, user_data);
2078 static void emem_print_subtree(emem_tree_t* emem_tree, guint32 level);
2080 static void
2081 emem_tree_print_nodes(const char *prefix, emem_tree_node_t* node, guint32 level)
2083 guint32 i;
2085 if (!node)
2086 return;
2088 for(i=0;i<level;i++){
2089 printf(" ");
2092 printf("%sNODE:%p parent:%p left:%p right:%p colour:%s key:%u %s:%p\n", prefix,
2093 (void *)node,(void *)(node->parent),(void *)(node->left),(void *)(node->right),
2094 (node->u.rb_color)?"Black":"Red",(node->key32),(node->u.is_subtree)?"tree":"data",node->data);
2095 if(node->left)
2096 emem_tree_print_nodes("L-", node->left, level+1);
2097 if(node->right)
2098 emem_tree_print_nodes("R-", node->right, level+1);
2100 if (node->u.is_subtree)
2101 emem_print_subtree((emem_tree_t *)node->data, level+1);
2104 static void
2105 emem_print_subtree(emem_tree_t* emem_tree, guint32 level)
2107 guint32 i;
2109 if (!emem_tree)
2110 return;
2112 for(i=0;i<level;i++){
2113 printf(" ");
2116 printf("EMEM tree:%p type:%s name:%s root:%p\n",(void *)emem_tree,(emem_tree->type==1)?"RedBlack":"unknown",emem_tree->name,(void *)(emem_tree->tree));
2117 if(emem_tree->tree)
2118 emem_tree_print_nodes("Root-", emem_tree->tree, level);
2121 void
2122 emem_print_tree(emem_tree_t* emem_tree)
2124 emem_print_subtree(emem_tree, 0);
2128 * String buffers
2132 * Presumably we're using these routines for building strings for the tree.
2133 * Use ITEM_LABEL_LENGTH as the basis for our default lengths.
2136 #define DEFAULT_STRBUF_LEN (ITEM_LABEL_LENGTH / 10)
2137 #define MAX_STRBUF_LEN 65536
2139 static gsize
2140 next_size(gsize cur_alloc_len, gsize wanted_alloc_len, gsize max_alloc_len)
2142 if (max_alloc_len < 1 || max_alloc_len > MAX_STRBUF_LEN) {
2143 max_alloc_len = MAX_STRBUF_LEN;
2146 if (cur_alloc_len < 1) {
2147 cur_alloc_len = DEFAULT_STRBUF_LEN;
2150 while (cur_alloc_len < wanted_alloc_len) {
2151 cur_alloc_len *= 2;
2154 return cur_alloc_len < max_alloc_len ? cur_alloc_len : max_alloc_len;
2157 static void
2158 ep_strbuf_grow(emem_strbuf_t *strbuf, gsize wanted_alloc_len)
2160 gsize new_alloc_len;
2161 gchar *new_str;
2163 if (!strbuf || (wanted_alloc_len <= strbuf->alloc_len) || (strbuf->alloc_len >= strbuf->max_alloc_len)) {
2164 return;
2167 new_alloc_len = next_size(strbuf->alloc_len, wanted_alloc_len, strbuf->max_alloc_len);
2168 new_str = (gchar *)ep_alloc(new_alloc_len);
2169 g_strlcpy(new_str, strbuf->str, new_alloc_len);
2171 strbuf->alloc_len = new_alloc_len;
2172 strbuf->str = new_str;
2175 emem_strbuf_t *
2176 ep_strbuf_sized_new(gsize alloc_len, gsize max_alloc_len)
2178 emem_strbuf_t *strbuf;
2180 strbuf = ep_new(emem_strbuf_t);
2182 if ((max_alloc_len == 0) || (max_alloc_len > MAX_STRBUF_LEN))
2183 max_alloc_len = MAX_STRBUF_LEN;
2184 if (alloc_len == 0)
2185 alloc_len = 1;
2186 else if (alloc_len > max_alloc_len)
2187 alloc_len = max_alloc_len;
2189 strbuf->str = (char *)ep_alloc(alloc_len);
2190 strbuf->str[0] = '\0';
2192 strbuf->len = 0;
2193 strbuf->alloc_len = alloc_len;
2194 strbuf->max_alloc_len = max_alloc_len;
2196 return strbuf;
2199 emem_strbuf_t *
2200 ep_strbuf_new(const gchar *init)
2202 emem_strbuf_t *strbuf;
2204 strbuf = ep_strbuf_sized_new(next_size(0, init?strlen(init)+1:0, 0), 0); /* +1 for NULL terminator */
2205 if (init) {
2206 gsize full_len;
2207 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2208 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2211 return strbuf;
2214 emem_strbuf_t *
2215 ep_strbuf_new_label(const gchar *init)
2217 emem_strbuf_t *strbuf;
2218 gsize full_len;
2220 /* Be optimistic: Allocate default size strbuf string and only */
2221 /* request an increase if needed. */
2222 /* XXX: Is it reasonable to assume that much of the usage of */
2223 /* ep_strbuf_new_label will have init==NULL or */
2224 /* strlen(init) < DEFAULT_STRBUF_LEN) ??? */
2225 strbuf = ep_strbuf_sized_new(DEFAULT_STRBUF_LEN, ITEM_LABEL_LENGTH);
2227 if (!init)
2228 return strbuf;
2230 /* full_len does not count the trailing '\0'. */
2231 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2232 if (full_len < strbuf->alloc_len) {
2233 strbuf->len += full_len;
2234 } else {
2235 strbuf = ep_strbuf_sized_new(full_len+1, ITEM_LABEL_LENGTH);
2236 full_len = g_strlcpy(strbuf->str, init, strbuf->alloc_len);
2237 strbuf->len = MIN(full_len, strbuf->alloc_len-1);
2240 return strbuf;
2243 emem_strbuf_t *
2244 ep_strbuf_append(emem_strbuf_t *strbuf, const gchar *str)
2246 gsize add_len, full_len;
2248 if (!strbuf || !str || str[0] == '\0') {
2249 return strbuf;
2252 /* Be optimistic; try the g_strlcpy first & see if enough room. */
2253 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same */
2254 add_len = strbuf->alloc_len - strbuf->len;
2255 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2256 if (full_len < add_len) {
2257 strbuf->len += full_len;
2258 } else {
2259 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2260 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2261 add_len = strbuf->alloc_len - strbuf->len;
2262 full_len = g_strlcpy(&strbuf->str[strbuf->len], str, add_len);
2263 strbuf->len += MIN(add_len-1, full_len);
2266 return strbuf;
2269 void
2270 ep_strbuf_append_vprintf(emem_strbuf_t *strbuf, const gchar *format, va_list ap)
2272 va_list ap2;
2273 gsize add_len, full_len;
2275 G_VA_COPY(ap2, ap);
2277 /* Be optimistic; try the g_vsnprintf first & see if enough room. */
2278 /* Note: full_len doesn't count the trailing '\0'; add_len does allow for same. */
2279 add_len = strbuf->alloc_len - strbuf->len;
2280 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap);
2281 if (full_len < add_len) {
2282 strbuf->len += full_len;
2283 } else {
2284 strbuf->str[strbuf->len] = '\0'; /* end string at original length again */
2285 ep_strbuf_grow(strbuf, strbuf->len + full_len + 1);
2286 add_len = strbuf->alloc_len - strbuf->len;
2287 full_len = g_vsnprintf(&strbuf->str[strbuf->len], (gulong) add_len, format, ap2);
2288 strbuf->len += MIN(add_len-1, full_len);
2291 va_end(ap2);
2294 void
2295 ep_strbuf_append_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2297 va_list ap;
2299 va_start(ap, format);
2300 ep_strbuf_append_vprintf(strbuf, format, ap);
2301 va_end(ap);
2304 void
2305 ep_strbuf_printf(emem_strbuf_t *strbuf, const gchar *format, ...)
2307 va_list ap;
2308 if (!strbuf) {
2309 return;
2312 strbuf->len = 0;
2314 va_start(ap, format);
2315 ep_strbuf_append_vprintf(strbuf, format, ap);
2316 va_end(ap);
2319 emem_strbuf_t *
2320 ep_strbuf_append_c(emem_strbuf_t *strbuf, const gchar c)
2322 if (!strbuf) {
2323 return strbuf;
2326 /* +1 for the new character & +1 for the trailing '\0'. */
2327 if (strbuf->alloc_len < strbuf->len + 1 + 1) {
2328 ep_strbuf_grow(strbuf, strbuf->len + 1 + 1);
2330 if (strbuf->alloc_len >= strbuf->len + 1 + 1) {
2331 strbuf->str[strbuf->len] = c;
2332 strbuf->len++;
2333 strbuf->str[strbuf->len] = '\0';
2336 return strbuf;
2339 emem_strbuf_t *
2340 ep_strbuf_append_unichar(emem_strbuf_t *strbuf, const gunichar c)
2342 gchar buf[6];
2343 gint charlen;
2345 if (!strbuf) {
2346 return strbuf;
2349 charlen = g_unichar_to_utf8(c, buf);
2351 /* +charlen for the new character & +1 for the trailing '\0'. */
2352 if (strbuf->alloc_len < strbuf->len + charlen + 1) {
2353 ep_strbuf_grow(strbuf, strbuf->len + charlen + 1);
2355 if (strbuf->alloc_len >= strbuf->len + charlen + 1) {
2356 memcpy(&strbuf->str[strbuf->len], buf, charlen);
2357 strbuf->len += charlen;
2358 strbuf->str[strbuf->len] = '\0';
2361 return strbuf;
2364 emem_strbuf_t *
2365 ep_strbuf_truncate(emem_strbuf_t *strbuf, gsize len)
2367 if (!strbuf || len >= strbuf->len) {
2368 return strbuf;
2371 strbuf->str[len] = '\0';
2372 strbuf->len = len;
2374 return strbuf;
2378 * Editor modelines
2380 * Local Variables:
2381 * c-basic-offset: 8
2382 * tab-width: 8
2383 * indent-tabs-mode: t
2384 * End:
2386 * ex: set shiftwidth=8 tabstop=8 noexpandtab:
2387 * :indentSize=8:tabSize=8:noTabs=false: