libtommath: Fix possible integer overflow CVE-2023-36328
[heimdal.git] / lib / base / heimbase.c
blob05add641e019e9dba4efbc7e18231b5eaa429108
1 /*
2 * Copyright (c) 2010 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
4 * All rights reserved.
6 * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the Institute nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 #include "baselocl.h"
37 #include "heimbase-atomics.h"
38 #include <syslog.h>
40 static heim_base_atomic(uint32_t) tidglobal = HEIM_TID_USER;
42 struct heim_base {
43 heim_const_type_t isa;
44 heim_base_atomic(uint32_t) ref_cnt;
45 HEIM_TAILQ_ENTRY(heim_base) autorel;
46 heim_auto_release_t autorelpool;
47 uintptr_t isaextra[3];
50 /* specialized version of base */
51 struct heim_base_mem {
52 heim_const_type_t isa;
53 heim_base_atomic(uint32_t) ref_cnt;
54 HEIM_TAILQ_ENTRY(heim_base) autorel;
55 heim_auto_release_t autorelpool;
56 const char *name;
57 void (HEIM_CALLCONV *dealloc)(void *);
58 uintptr_t isaextra[1];
61 #define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
62 #define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
64 HEIMDAL_MUTEX * HEIM_CALLCONV
65 heim_base_mutex(void)
67 static HEIMDAL_MUTEX _heim_base_mutex = HEIMDAL_MUTEX_INITIALIZER;
68 return &_heim_base_mutex;
72 * Auto release structure
75 struct heim_auto_release {
76 HEIM_TAILQ_HEAD(, heim_base) pool;
77 HEIMDAL_MUTEX pool_mutex;
78 struct heim_auto_release *parent;
82 /**
83 * Retain object (i.e., take a reference)
85 * @param object to be released, NULL is ok
87 * @return the same object as passed in
90 heim_object_t
91 heim_retain(heim_object_t ptr)
93 struct heim_base *p;
95 if (ptr == NULL || heim_base_is_tagged(ptr))
96 return ptr;
98 p = PTR2BASE(ptr);
100 if (heim_base_atomic_load(&p->ref_cnt) == UINT32_MAX)
101 return ptr;
103 if ((heim_base_atomic_inc_32(&p->ref_cnt) - 1) == 0)
104 heim_abort("resurection");
105 return ptr;
109 * Release object, free if reference count reaches zero
111 * @param object to be released
114 void
115 heim_release(void *ptr)
117 heim_base_atomic(uint32_t) old;
118 struct heim_base *p;
120 if (ptr == NULL || heim_base_is_tagged(ptr))
121 return;
123 p = PTR2BASE(ptr);
125 if (heim_base_atomic_load(&p->ref_cnt) == UINT32_MAX)
126 return;
128 old = heim_base_atomic_dec_32(&p->ref_cnt) + 1;
130 if (old > 1)
131 return;
133 if (old == 1) {
134 heim_auto_release_t ar = p->autorelpool;
135 /* remove from autorel pool list */
136 if (ar) {
137 p->autorelpool = NULL;
138 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
139 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
140 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
142 if (p->isa->dealloc)
143 p->isa->dealloc(ptr);
144 free(p);
145 } else
146 heim_abort("over release");
150 * If used require wrapped in autorelease pool
153 heim_string_t
154 heim_description(heim_object_t ptr)
156 struct heim_base *p = PTR2BASE(ptr);
157 if (p->isa->desc == NULL)
158 return heim_auto_release(heim_string_ref_create(p->isa->name, NULL));
159 return heim_auto_release(p->isa->desc(ptr));
163 void
164 _heim_make_permanent(heim_object_t ptr)
166 struct heim_base *p = PTR2BASE(ptr);
167 heim_base_atomic_store(&p->ref_cnt, UINT32_MAX);
171 static heim_type_t tagged_isa[9] = {
172 &_heim_number_object,
173 &_heim_null_object,
174 &_heim_bool_object,
176 NULL,
177 NULL,
178 NULL,
180 NULL,
181 NULL,
182 NULL
185 heim_const_type_t
186 _heim_get_isa(heim_object_t ptr)
188 struct heim_base *p;
189 if (heim_base_is_tagged(ptr)) {
190 if (heim_base_is_tagged_object(ptr))
191 return tagged_isa[heim_base_tagged_object_tid(ptr)];
192 heim_abort("not a supported tagged type");
194 p = PTR2BASE(ptr);
195 return p->isa;
199 * Get type ID of object
201 * @param object object to get type id of
203 * @return type id of object
206 heim_tid_t
207 heim_get_tid(heim_object_t ptr)
209 heim_const_type_t isa = _heim_get_isa(ptr);
210 return isa->tid;
214 * Get hash value of object
216 * @param object object to get hash value for
218 * @return a hash value
221 uintptr_t
222 heim_get_hash(heim_object_t ptr)
224 heim_const_type_t isa = _heim_get_isa(ptr);
225 if (isa->hash)
226 return isa->hash(ptr);
227 return (uintptr_t)ptr;
231 * Compare two objects, returns 0 if equal, can use used for qsort()
232 * and friends.
234 * @param a first object to compare
235 * @param b first object to compare
237 * @return 0 if objects are equal
241 heim_cmp(heim_object_t a, heim_object_t b)
243 heim_tid_t ta, tb;
244 heim_const_type_t isa;
246 ta = heim_get_tid(a);
247 tb = heim_get_tid(b);
249 if (ta != tb)
250 return ta - tb;
252 isa = _heim_get_isa(a);
254 if (isa->cmp)
255 return isa->cmp(a, b);
257 return (uintptr_t)a - (uintptr_t)b;
261 * Private - allocates an memory object
264 static void HEIM_CALLCONV
265 memory_dealloc(void *ptr)
267 if (ptr) {
268 struct heim_base_mem *p = (struct heim_base_mem *)PTR2BASE(ptr);
270 if (p->dealloc)
271 p->dealloc(ptr);
275 static const struct heim_type_data memory_object = {
276 HEIM_TID_MEMORY,
277 "memory-object",
278 NULL,
279 memory_dealloc,
280 NULL,
281 NULL,
282 NULL,
283 NULL
287 * Allocate memory for an object of anonymous type
289 * @param size size of object to be allocated
290 * @param name name of ad-hoc type
291 * @param dealloc destructor function
293 * Objects allocated with this interface do not serialize.
295 * @return allocated object
298 void *
299 heim_alloc(size_t size, const char *name, heim_type_dealloc dealloc)
301 /* XXX use posix_memalign */
303 struct heim_base_mem *p = calloc(1, size + sizeof(*p));
304 if (p == NULL)
305 return NULL;
306 p->isa = &memory_object;
307 p->ref_cnt = 1;
308 p->name = name;
309 p->dealloc = dealloc;
310 return BASE2PTR(p);
313 heim_type_t
314 _heim_create_type(const char *name,
315 heim_type_init init,
316 heim_type_dealloc dealloc,
317 heim_type_copy copy,
318 heim_type_cmp cmp,
319 heim_type_hash hash,
320 heim_type_description desc)
322 heim_type_t type;
324 type = calloc(1, sizeof(*type));
325 if (type == NULL)
326 return NULL;
328 type->tid = heim_base_atomic_inc_32(&tidglobal);
329 type->name = name;
330 type->init = init;
331 type->dealloc = dealloc;
332 type->copy = copy;
333 type->cmp = cmp;
334 type->hash = hash;
335 type->desc = desc;
337 return type;
340 heim_object_t
341 _heim_alloc_object(heim_const_type_t type, size_t size)
343 /* XXX should use posix_memalign */
344 struct heim_base *p = calloc(1, size + sizeof(*p));
345 if (p == NULL)
346 return NULL;
347 p->isa = type;
348 p->ref_cnt = 1;
350 return BASE2PTR(p);
353 void *
354 _heim_get_isaextra(heim_object_t ptr, size_t idx)
356 struct heim_base *p;
358 heim_assert(ptr != NULL, "internal error");
359 p = (struct heim_base *)PTR2BASE(ptr);
360 if (p->isa == &memory_object)
361 return NULL;
362 heim_assert(idx < 3, "invalid private heim_base extra data index");
363 return &p->isaextra[idx];
366 heim_tid_t
367 _heim_type_get_tid(heim_type_t type)
369 return type->tid;
372 #if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
373 static pthread_once_t once_arg_key_once = PTHREAD_ONCE_INIT;
374 static pthread_key_t once_arg_key;
376 static void
377 once_arg_key_once_init(void)
379 errno = pthread_key_create(&once_arg_key, NULL);
380 if (errno != 0) {
381 fprintf(stderr,
382 "Error: pthread_key_create() failed, cannot continue: %s\n",
383 strerror(errno));
384 abort();
388 struct once_callback {
389 void (*fn)(void *);
390 void *data;
393 static void
394 once_callback_caller(void)
396 struct once_callback *once_callback = pthread_getspecific(once_arg_key);
398 if (once_callback == NULL) {
399 fprintf(stderr, "Error: pthread_once() calls callback on "
400 "different thread?! Cannot continue.\n");
401 abort();
403 once_callback->fn(once_callback->data);
405 #endif
408 * Call func once and only once
410 * @param once pointer to a heim_base_once_t
411 * @param ctx context passed to func
412 * @param func function to be called
415 void
416 heim_base_once_f(heim_base_once_t *once, void *ctx, void (*func)(void *))
418 #if defined(WIN32)
420 * With a libroken wrapper for some CAS function and a libroken yield()
421 * wrapper we could make this the default implementation when we have
422 * neither Grand Central nor POSX threads.
424 * We could also adapt the double-checked lock pattern with CAS
425 * providing the necessary memory barriers in the absence of
426 * portable explicit memory barrier APIs.
429 * We use CAS operations in large part to provide implied memory
430 * barriers.
432 * State 0 means that func() has never executed.
433 * State 1 means that func() is executing.
434 * State 2 means that func() has completed execution.
436 if (InterlockedCompareExchange(once, 1L, 0L) == 0L) {
437 /* State is now 1 */
438 (*func)(ctx);
439 (void)InterlockedExchange(once, 2L);
440 /* State is now 2 */
441 } else {
443 * The InterlockedCompareExchange is being used to fetch
444 * the current state under a full memory barrier. As long
445 * as the current state is 1 continue to spin.
447 while (InterlockedCompareExchange(once, 2L, 0L) == 1L)
448 SwitchToThread();
450 #elif defined(HAVE_DISPATCH_DISPATCH_H)
451 dispatch_once_f(once, ctx, func);
452 #elif defined(ENABLE_PTHREAD_SUPPORT)
453 struct once_callback once_callback;
455 once_callback.fn = func;
456 once_callback.data = ctx;
458 errno = pthread_once(&once_arg_key_once, once_arg_key_once_init);
459 if (errno != 0) {
460 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
461 strerror(errno));
462 abort();
464 errno = pthread_setspecific(once_arg_key, &once_callback);
465 if (errno != 0) {
466 fprintf(stderr,
467 "Error: pthread_setspecific() failed, cannot continue: %s\n",
468 strerror(errno));
469 abort();
471 errno = pthread_once(once, once_callback_caller);
472 if (errno != 0) {
473 fprintf(stderr, "Error: pthread_once() failed, cannot continue: %s\n",
474 strerror(errno));
475 abort();
477 #else
478 static HEIMDAL_MUTEX mutex = HEIMDAL_MUTEX_INITIALIZER;
479 HEIMDAL_MUTEX_lock(&mutex);
480 if (*once == 0) {
481 *once = 1;
482 HEIMDAL_MUTEX_unlock(&mutex);
483 func(ctx);
484 HEIMDAL_MUTEX_lock(&mutex);
485 *once = 2;
486 HEIMDAL_MUTEX_unlock(&mutex);
487 } else if (*once == 2) {
488 HEIMDAL_MUTEX_unlock(&mutex);
489 } else {
490 HEIMDAL_MUTEX_unlock(&mutex);
491 while (1) {
492 struct timeval tv = { 0, 1000 };
493 select(0, NULL, NULL, NULL, &tv);
494 HEIMDAL_MUTEX_lock(&mutex);
495 if (*once == 2)
496 break;
497 HEIMDAL_MUTEX_unlock(&mutex);
499 HEIMDAL_MUTEX_unlock(&mutex);
501 #endif
505 * Abort and log the failure (using syslog)
508 void
509 heim_abort(const char *fmt, ...)
510 HEIMDAL_NORETURN_ATTRIBUTE
511 HEIMDAL_PRINTF_ATTRIBUTE((__printf__, 1, 2))
513 va_list ap;
514 va_start(ap, fmt);
515 heim_abortv(fmt, ap);
516 va_end(ap);
520 * Abort and log the failure (using syslog)
523 void
524 heim_abortv(const char *fmt, va_list ap)
525 HEIMDAL_NORETURN_ATTRIBUTE
526 HEIMDAL_PRINTF_ATTRIBUTE((__printf__, 1, 0))
528 static char str[1024];
530 vsnprintf(str, sizeof(str), fmt, ap);
531 syslog(LOG_ERR, "heim_abort: %s", str);
532 abort();
539 static int ar_created = 0;
540 static HEIMDAL_thread_key ar_key;
542 struct ar_tls {
543 struct heim_auto_release *head;
544 struct heim_auto_release *current;
545 HEIMDAL_MUTEX tls_mutex;
548 static void
549 ar_tls_delete(void *ptr)
551 struct ar_tls *tls = ptr;
552 heim_auto_release_t next = NULL;
554 if (tls == NULL)
555 return;
556 for (; tls->current != NULL; tls->current = next) {
557 next = tls->current->parent;
558 heim_release(tls->current);
560 free(tls);
563 static void
564 init_ar_tls(void *ptr)
566 int ret;
567 HEIMDAL_key_create(&ar_key, ar_tls_delete, ret);
568 if (ret == 0)
569 ar_created = 1;
572 static struct ar_tls *
573 autorel_tls(void)
575 static heim_base_once_t once = HEIM_BASE_ONCE_INIT;
576 struct ar_tls *arp;
577 int ret;
579 heim_base_once_f(&once, NULL, init_ar_tls);
580 if (!ar_created)
581 return NULL;
583 arp = HEIMDAL_getspecific(ar_key);
584 if (arp == NULL) {
586 arp = calloc(1, sizeof(*arp));
587 if (arp == NULL)
588 return NULL;
589 HEIMDAL_setspecific(ar_key, arp, ret);
590 if (ret) {
591 free(arp);
592 return NULL;
595 return arp;
599 static void HEIM_CALLCONV
600 autorel_dealloc(void *ptr)
602 heim_auto_release_t ar = ptr;
603 struct ar_tls *tls;
605 tls = autorel_tls();
606 if (tls == NULL)
607 heim_abort("autorelease pool released on thread w/o autorelease inited");
609 heim_auto_release_drain(ar);
611 if (!HEIM_TAILQ_EMPTY(&ar->pool))
612 heim_abort("pool not empty after draining");
614 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
615 if (tls->current != ptr)
616 heim_abort("autorelease not releaseing top pool");
618 tls->current = ar->parent;
619 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
622 static int
623 autorel_cmp(void *a, void *b)
625 return (a == b);
628 static uintptr_t
629 autorel_hash(void *ptr)
631 return (uintptr_t)ptr;
635 static struct heim_type_data _heim_autorel_object = {
636 HEIM_TID_AUTORELEASE,
637 "autorelease-pool",
638 NULL,
639 autorel_dealloc,
640 NULL,
641 autorel_cmp,
642 autorel_hash,
643 NULL
647 * Create thread-specific object auto-release pool
649 * Objects placed on the per-thread auto-release pool (with
650 * heim_auto_release()) can be released in one fell swoop by calling
651 * heim_auto_release_drain().
654 heim_auto_release_t
655 heim_auto_release_create(void)
657 struct ar_tls *tls = autorel_tls();
658 heim_auto_release_t ar;
660 if (tls == NULL)
661 heim_abort("Failed to create/get autorelease head");
663 ar = _heim_alloc_object(&_heim_autorel_object, sizeof(struct heim_auto_release));
664 if (ar) {
665 HEIMDAL_MUTEX_lock(&tls->tls_mutex);
666 if (tls->head == NULL)
667 tls->head = ar;
668 ar->parent = tls->current;
669 tls->current = ar;
670 HEIMDAL_MUTEX_unlock(&tls->tls_mutex);
673 return ar;
677 * Place the current object on the thread's auto-release pool
679 * @param ptr object
682 heim_object_t
683 heim_auto_release(heim_object_t ptr)
685 struct heim_base *p;
686 struct ar_tls *tls;
687 heim_auto_release_t ar;
689 if (ptr == NULL || heim_base_is_tagged(ptr))
690 return ptr;
692 p = PTR2BASE(ptr);
693 tls = autorel_tls();
695 /* drop from old pool */
696 if ((ar = p->autorelpool) != NULL) {
697 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
698 HEIM_TAILQ_REMOVE(&ar->pool, p, autorel);
699 p->autorelpool = NULL;
700 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
703 if (tls == NULL || (ar = tls->current) == NULL)
704 heim_abort("no auto release pool in place, would leak");
706 HEIMDAL_MUTEX_lock(&ar->pool_mutex);
707 HEIM_TAILQ_INSERT_HEAD(&ar->pool, p, autorel);
708 p->autorelpool = ar;
709 HEIMDAL_MUTEX_unlock(&ar->pool_mutex);
711 return ptr;
715 * Release all objects on the given auto-release pool
718 void
719 heim_auto_release_drain(heim_auto_release_t autorel)
721 heim_object_t obj;
723 /* release all elements on the tail queue */
725 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
726 while(!HEIM_TAILQ_EMPTY(&autorel->pool)) {
727 obj = HEIM_TAILQ_FIRST(&autorel->pool);
728 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
729 heim_release(BASE2PTR(obj));
730 HEIMDAL_MUTEX_lock(&autorel->pool_mutex);
732 HEIMDAL_MUTEX_unlock(&autorel->pool_mutex);
736 * Helper for heim_path_vget() and heim_path_delete(). On success
737 * outputs the node named by the path and the parent node and key
738 * (useful for heim_path_delete()).
741 static heim_object_t
742 heim_path_vget2(heim_object_t ptr, heim_object_t *parent, heim_object_t *key,
743 heim_error_t *error, va_list ap)
745 heim_object_t path_element;
746 heim_object_t node, next_node;
747 heim_tid_t node_type;
749 *parent = NULL;
750 *key = NULL;
751 if (ptr == NULL)
752 return NULL;
754 for (node = ptr; node != NULL; ) {
755 path_element = va_arg(ap, heim_object_t);
756 if (path_element == NULL) {
757 *parent = node;
758 *key = path_element;
759 return node;
762 node_type = heim_get_tid(node);
763 switch (node_type) {
764 case HEIM_TID_ARRAY:
765 case HEIM_TID_DICT:
766 case HEIM_TID_DB:
767 break;
768 default:
769 if (node == ptr)
770 heim_abort("heim_path_get() only operates on container types");
771 return NULL;
774 if (node_type == HEIM_TID_DICT) {
775 next_node = heim_dict_get_value(node, path_element);
776 } else if (node_type == HEIM_TID_DB) {
777 next_node = _heim_db_get_value(node, NULL, path_element, NULL);
778 } else {
779 int idx = -1;
781 /* node_type == HEIM_TID_ARRAY */
782 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
783 idx = heim_number_get_int(path_element);
784 if (idx < 0) {
785 if (error)
786 *error = heim_error_create(EINVAL,
787 "heim_path_get() path elements "
788 "for array nodes must be "
789 "numeric and positive");
790 return NULL;
792 next_node = heim_array_get_value(node, idx);
794 node = next_node;
796 return NULL;
800 * Get a node in a heim_object tree by path
802 * @param ptr tree
803 * @param error error (output)
804 * @param ap NULL-terminated va_list of heim_object_ts that form a path
806 * @return object (not retained) if found
808 * @addtogroup heimbase
811 heim_object_t
812 heim_path_vget(heim_object_t ptr, heim_error_t *error, va_list ap)
814 heim_object_t p, k;
816 return heim_path_vget2(ptr, &p, &k, error, ap);
820 * Get a node in a tree by path, with retained reference
822 * @param ptr tree
823 * @param error error (output)
824 * @param ap NULL-terminated va_list of heim_object_ts that form a path
826 * @return retained object if found
828 * @addtogroup heimbase
831 heim_object_t
832 heim_path_vcopy(heim_object_t ptr, heim_error_t *error, va_list ap)
834 heim_object_t p, k;
836 return heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
840 * Get a node in a tree by path
842 * @param ptr tree
843 * @param error error (output)
844 * @param ... NULL-terminated va_list of heim_object_ts that form a path
846 * @return object (not retained) if found
848 * @addtogroup heimbase
851 heim_object_t
852 heim_path_get(heim_object_t ptr, heim_error_t *error, ...)
854 heim_object_t o;
855 heim_object_t p, k;
856 va_list ap;
858 if (ptr == NULL)
859 return NULL;
861 va_start(ap, error);
862 o = heim_path_vget2(ptr, &p, &k, error, ap);
863 va_end(ap);
864 return o;
868 * Get a node in a tree by path, with retained reference
870 * @param ptr tree
871 * @param error error (output)
872 * @param ... NULL-terminated va_list of heim_object_ts that form a path
874 * @return retained object if found
876 * @addtogroup heimbase
879 heim_object_t
880 heim_path_copy(heim_object_t ptr, heim_error_t *error, ...)
882 heim_object_t o;
883 heim_object_t p, k;
884 va_list ap;
886 if (ptr == NULL)
887 return NULL;
889 va_start(ap, error);
890 o = heim_retain(heim_path_vget2(ptr, &p, &k, error, ap));
891 va_end(ap);
892 return o;
896 * Create a path in a heim_object_t tree
898 * @param ptr the tree
899 * @param size the size of the heim_dict_t nodes to be created
900 * @param leaf leaf node to be added, if any
901 * @param error error (output)
902 * @param ap NULL-terminated of path component objects
904 * Create a path of heim_dict_t interior nodes in a given heim_object_t
905 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
906 * then the leaf is not deleted).
908 * @return 0 on success, else a system error
910 * @addtogroup heimbase
914 heim_path_vcreate(heim_object_t ptr, size_t size, heim_object_t leaf,
915 heim_error_t *error, va_list ap)
917 heim_object_t path_element = va_arg(ap, heim_object_t);
918 heim_object_t next_path_element = NULL;
919 heim_object_t node = ptr;
920 heim_object_t next_node = NULL;
921 heim_tid_t node_type;
922 int ret = 0;
924 if (ptr == NULL)
925 heim_abort("heim_path_vcreate() does not create root nodes");
927 while (path_element != NULL) {
928 int idx = -1;
930 next_path_element = va_arg(ap, heim_object_t);
931 node_type = heim_get_tid(node);
933 if (node_type == HEIM_TID_DICT) {
934 next_node = heim_dict_get_value(node, path_element);
935 } else if (node_type == HEIM_TID_ARRAY) {
936 if (heim_get_tid(path_element) == HEIM_TID_NUMBER)
937 idx = heim_number_get_int(path_element);
938 if (idx < 0) {
939 if (error)
940 *error = heim_error_create(EINVAL,
941 "heim_path() path elements for "
942 "array nodes must be numeric "
943 "and positive");
944 return EINVAL;
946 if (idx < heim_array_get_length(node)) {
947 next_node = heim_array_get_value(node, idx);
948 } else if (idx == heim_array_get_length(node)) {
949 next_node = NULL;
950 } else {
951 if (error)
952 *error = heim_error_create(EINVAL,
953 "Index for array in path is too large");
954 return EINVAL;
956 } else if (node_type == HEIM_TID_DB && next_path_element != NULL) {
957 if (error)
958 *error = heim_error_create(EINVAL, "Interior node is a DB");
959 return EINVAL;
962 if (next_path_element == NULL)
963 break;
965 /* Create missing interior node */
966 if (next_node == NULL) {
967 heim_dict_t new_node;
969 new_node = heim_dict_create(size); /* no arrays or DBs, just dicts */
970 if (new_node == NULL) {
971 ret = ENOMEM;
972 goto err;
975 if (node_type == HEIM_TID_DICT) {
976 ret = heim_dict_set_value(node, path_element, new_node);
977 next_node = heim_dict_get_value(node, path_element);
978 } else if (node_type == HEIM_TID_ARRAY &&
979 heim_number_get_int(path_element) <= heim_array_get_length(node)) {
980 ret = heim_array_insert_value(node,
981 heim_number_get_int(path_element),
982 new_node);
983 next_node = heim_array_get_value(node, idx);
984 } else {
985 ret = EINVAL;
986 if (error)
987 *error = heim_error_create(ret, "Node in path not a "
988 "container");
991 heim_release(new_node);
992 if (ret)
993 goto err;
996 path_element = next_path_element;
997 node = next_node;
998 next_node = NULL;
1001 if (path_element == NULL)
1002 goto err;
1004 /* Add the leaf */
1005 if (leaf != NULL) {
1006 if (node_type == HEIM_TID_DICT)
1007 ret = heim_dict_set_value(node, path_element, leaf);
1008 else
1009 ret = heim_array_insert_value(node,
1010 heim_number_get_int(path_element),
1011 leaf);
1013 return ret;
1015 err:
1016 if (error && !*error) {
1017 if (ret == ENOMEM)
1018 *error = heim_error_create_enomem();
1019 else
1020 *error = heim_error_create(ret, "Could not set "
1021 "dict value");
1023 return ret;
1027 * Create a path in a heim_object_t tree
1029 * @param ptr the tree
1030 * @param size the size of the heim_dict_t nodes to be created
1031 * @param leaf leaf node to be added, if any
1032 * @param error error (output)
1033 * @param ... NULL-terminated list of path component objects
1035 * Create a path of heim_dict_t interior nodes in a given heim_object_t
1036 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1037 * then the leaf is not deleted).
1039 * @return 0 on success, else a system error
1041 * @addtogroup heimbase
1045 heim_path_create(heim_object_t ptr, size_t size, heim_object_t leaf,
1046 heim_error_t *error, ...)
1048 va_list ap;
1049 int ret;
1051 va_start(ap, error);
1052 ret = heim_path_vcreate(ptr, size, leaf, error, ap);
1053 va_end(ap);
1054 return ret;
1058 * Delete leaf node named by a path in a heim_object_t tree
1060 * @param ptr the tree
1061 * @param error error (output)
1062 * @param ap NULL-terminated list of path component objects
1064 * @addtogroup heimbase
1067 void
1068 heim_path_vdelete(heim_object_t ptr, heim_error_t *error, va_list ap)
1070 heim_object_t parent, key, child;
1072 child = heim_path_vget2(ptr, &parent, &key, error, ap);
1073 if (child != NULL) {
1074 if (heim_get_tid(parent) == HEIM_TID_DICT)
1075 heim_dict_delete_key(parent, key);
1076 else if (heim_get_tid(parent) == HEIM_TID_DB)
1077 heim_db_delete_key(parent, NULL, key, error);
1078 else if (heim_get_tid(parent) == HEIM_TID_ARRAY)
1079 heim_array_delete_value(parent, heim_number_get_int(key));
1080 heim_release(child);
1085 * Delete leaf node named by a path in a heim_object_t tree
1087 * @param ptr the tree
1088 * @param error error (output)
1089 * @param ap NULL-terminated list of path component objects
1091 * @addtogroup heimbase
1094 void
1095 heim_path_delete(heim_object_t ptr, heim_error_t *error, ...)
1097 va_list ap;
1099 va_start(ap, error);
1100 heim_path_vdelete(ptr, error, ap);
1101 va_end(ap);
1102 return;