2 * Copyright (c) 2010 Kungliga Tekniska Högskolan
3 * (Royal Institute of Technology, Stockholm, Sweden).
6 * Portions Copyright (c) 2010 Apple Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the Institute nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include "heimbase-atomics.h"
40 static heim_base_atomic(uint32_t) tidglobal
= HEIM_TID_USER
;
43 heim_const_type_t isa
;
44 heim_base_atomic(uint32_t) ref_cnt
;
45 HEIM_TAILQ_ENTRY(heim_base
) autorel
;
46 heim_auto_release_t autorelpool
;
47 uintptr_t isaextra
[3];
50 /* specialized version of base */
51 struct heim_base_mem
{
52 heim_const_type_t isa
;
53 heim_base_atomic(uint32_t) ref_cnt
;
54 HEIM_TAILQ_ENTRY(heim_base
) autorel
;
55 heim_auto_release_t autorelpool
;
57 void (HEIM_CALLCONV
*dealloc
)(void *);
58 uintptr_t isaextra
[1];
61 #define PTR2BASE(ptr) (((struct heim_base *)ptr) - 1)
62 #define BASE2PTR(ptr) ((void *)(((struct heim_base *)ptr) + 1))
64 HEIMDAL_MUTEX
* HEIM_CALLCONV
67 static HEIMDAL_MUTEX _heim_base_mutex
= HEIMDAL_MUTEX_INITIALIZER
;
68 return &_heim_base_mutex
;
72 * Auto release structure
75 struct heim_auto_release
{
76 HEIM_TAILQ_HEAD(, heim_base
) pool
;
77 HEIMDAL_MUTEX pool_mutex
;
78 struct heim_auto_release
*parent
;
83 * Retain object (i.e., take a reference)
85 * @param object to be released, NULL is ok
87 * @return the same object as passed in
91 heim_retain(heim_object_t ptr
)
95 if (ptr
== NULL
|| heim_base_is_tagged(ptr
))
100 if (heim_base_atomic_load(&p
->ref_cnt
) == UINT32_MAX
)
103 if ((heim_base_atomic_inc_32(&p
->ref_cnt
) - 1) == 0)
104 heim_abort("resurection");
109 * Release object, free if reference count reaches zero
111 * @param object to be released
115 heim_release(void *ptr
)
117 heim_base_atomic(uint32_t) old
;
120 if (ptr
== NULL
|| heim_base_is_tagged(ptr
))
125 if (heim_base_atomic_load(&p
->ref_cnt
) == UINT32_MAX
)
128 old
= heim_base_atomic_dec_32(&p
->ref_cnt
) + 1;
134 heim_auto_release_t ar
= p
->autorelpool
;
135 /* remove from autorel pool list */
137 p
->autorelpool
= NULL
;
138 HEIMDAL_MUTEX_lock(&ar
->pool_mutex
);
139 HEIM_TAILQ_REMOVE(&ar
->pool
, p
, autorel
);
140 HEIMDAL_MUTEX_unlock(&ar
->pool_mutex
);
143 p
->isa
->dealloc(ptr
);
146 heim_abort("over release");
150 * If used require wrapped in autorelease pool
154 heim_description(heim_object_t ptr
)
156 struct heim_base
*p
= PTR2BASE(ptr
);
157 if (p
->isa
->desc
== NULL
)
158 return heim_auto_release(heim_string_ref_create(p
->isa
->name
, NULL
));
159 return heim_auto_release(p
->isa
->desc(ptr
));
164 _heim_make_permanent(heim_object_t ptr
)
166 struct heim_base
*p
= PTR2BASE(ptr
);
167 heim_base_atomic_store(&p
->ref_cnt
, UINT32_MAX
);
171 static heim_type_t tagged_isa
[9] = {
172 &_heim_number_object
,
186 _heim_get_isa(heim_object_t ptr
)
189 if (heim_base_is_tagged(ptr
)) {
190 if (heim_base_is_tagged_object(ptr
))
191 return tagged_isa
[heim_base_tagged_object_tid(ptr
)];
192 heim_abort("not a supported tagged type");
199 * Get type ID of object
201 * @param object object to get type id of
203 * @return type id of object
207 heim_get_tid(heim_object_t ptr
)
209 heim_const_type_t isa
= _heim_get_isa(ptr
);
214 * Get hash value of object
216 * @param object object to get hash value for
218 * @return a hash value
222 heim_get_hash(heim_object_t ptr
)
224 heim_const_type_t isa
= _heim_get_isa(ptr
);
226 return isa
->hash(ptr
);
227 return (uintptr_t)ptr
;
231 * Compare two objects, returns 0 if equal, can use used for qsort()
234 * @param a first object to compare
235 * @param b first object to compare
237 * @return 0 if objects are equal
241 heim_cmp(heim_object_t a
, heim_object_t b
)
244 heim_const_type_t isa
;
246 ta
= heim_get_tid(a
);
247 tb
= heim_get_tid(b
);
252 isa
= _heim_get_isa(a
);
255 return isa
->cmp(a
, b
);
257 return (uintptr_t)a
- (uintptr_t)b
;
261 * Private - allocates an memory object
264 static void HEIM_CALLCONV
265 memory_dealloc(void *ptr
)
268 struct heim_base_mem
*p
= (struct heim_base_mem
*)PTR2BASE(ptr
);
275 static const struct heim_type_data memory_object
= {
287 * Allocate memory for an object of anonymous type
289 * @param size size of object to be allocated
290 * @param name name of ad-hoc type
291 * @param dealloc destructor function
293 * Objects allocated with this interface do not serialize.
295 * @return allocated object
299 heim_alloc(size_t size
, const char *name
, heim_type_dealloc dealloc
)
301 /* XXX use posix_memalign */
303 struct heim_base_mem
*p
= calloc(1, size
+ sizeof(*p
));
306 p
->isa
= &memory_object
;
309 p
->dealloc
= dealloc
;
314 _heim_create_type(const char *name
,
316 heim_type_dealloc dealloc
,
320 heim_type_description desc
)
324 type
= calloc(1, sizeof(*type
));
328 type
->tid
= heim_base_atomic_inc_32(&tidglobal
);
331 type
->dealloc
= dealloc
;
341 _heim_alloc_object(heim_const_type_t type
, size_t size
)
343 /* XXX should use posix_memalign */
344 struct heim_base
*p
= calloc(1, size
+ sizeof(*p
));
354 _heim_get_isaextra(heim_object_t ptr
, size_t idx
)
358 heim_assert(ptr
!= NULL
, "internal error");
359 p
= (struct heim_base
*)PTR2BASE(ptr
);
360 if (p
->isa
== &memory_object
)
362 heim_assert(idx
< 3, "invalid private heim_base extra data index");
363 return &p
->isaextra
[idx
];
367 _heim_type_get_tid(heim_type_t type
)
372 #if !defined(WIN32) && !defined(HAVE_DISPATCH_DISPATCH_H) && defined(ENABLE_PTHREAD_SUPPORT)
373 static pthread_once_t once_arg_key_once
= PTHREAD_ONCE_INIT
;
374 static pthread_key_t once_arg_key
;
377 once_arg_key_once_init(void)
379 errno
= pthread_key_create(&once_arg_key
, NULL
);
382 "Error: pthread_key_create() failed, cannot continue: %s\n",
388 struct once_callback
{
394 once_callback_caller(void)
396 struct once_callback
*once_callback
= pthread_getspecific(once_arg_key
);
398 if (once_callback
== NULL
) {
399 fprintf(stderr
, "Error: pthread_once() calls callback on "
400 "different thread?! Cannot continue.\n");
403 once_callback
->fn(once_callback
->data
);
408 * Call func once and only once
410 * @param once pointer to a heim_base_once_t
411 * @param ctx context passed to func
412 * @param func function to be called
416 heim_base_once_f(heim_base_once_t
*once
, void *ctx
, void (*func
)(void *))
420 * With a libroken wrapper for some CAS function and a libroken yield()
421 * wrapper we could make this the default implementation when we have
422 * neither Grand Central nor POSX threads.
424 * We could also adapt the double-checked lock pattern with CAS
425 * providing the necessary memory barriers in the absence of
426 * portable explicit memory barrier APIs.
429 * We use CAS operations in large part to provide implied memory
432 * State 0 means that func() has never executed.
433 * State 1 means that func() is executing.
434 * State 2 means that func() has completed execution.
436 if (InterlockedCompareExchange(once
, 1L, 0L) == 0L) {
439 (void)InterlockedExchange(once
, 2L);
443 * The InterlockedCompareExchange is being used to fetch
444 * the current state under a full memory barrier. As long
445 * as the current state is 1 continue to spin.
447 while (InterlockedCompareExchange(once
, 2L, 0L) == 1L)
450 #elif defined(HAVE_DISPATCH_DISPATCH_H)
451 dispatch_once_f(once
, ctx
, func
);
452 #elif defined(ENABLE_PTHREAD_SUPPORT)
453 struct once_callback once_callback
;
455 once_callback
.fn
= func
;
456 once_callback
.data
= ctx
;
458 errno
= pthread_once(&once_arg_key_once
, once_arg_key_once_init
);
460 fprintf(stderr
, "Error: pthread_once() failed, cannot continue: %s\n",
464 errno
= pthread_setspecific(once_arg_key
, &once_callback
);
467 "Error: pthread_setspecific() failed, cannot continue: %s\n",
471 errno
= pthread_once(once
, once_callback_caller
);
473 fprintf(stderr
, "Error: pthread_once() failed, cannot continue: %s\n",
478 static HEIMDAL_MUTEX mutex
= HEIMDAL_MUTEX_INITIALIZER
;
479 HEIMDAL_MUTEX_lock(&mutex
);
482 HEIMDAL_MUTEX_unlock(&mutex
);
484 HEIMDAL_MUTEX_lock(&mutex
);
486 HEIMDAL_MUTEX_unlock(&mutex
);
487 } else if (*once
== 2) {
488 HEIMDAL_MUTEX_unlock(&mutex
);
490 HEIMDAL_MUTEX_unlock(&mutex
);
492 struct timeval tv
= { 0, 1000 };
493 select(0, NULL
, NULL
, NULL
, &tv
);
494 HEIMDAL_MUTEX_lock(&mutex
);
497 HEIMDAL_MUTEX_unlock(&mutex
);
499 HEIMDAL_MUTEX_unlock(&mutex
);
505 * Abort and log the failure (using syslog)
509 heim_abort(const char *fmt
, ...)
510 HEIMDAL_NORETURN_ATTRIBUTE
511 HEIMDAL_PRINTF_ATTRIBUTE((__printf__
, 1, 2))
515 heim_abortv(fmt
, ap
);
520 * Abort and log the failure (using syslog)
524 heim_abortv(const char *fmt
, va_list ap
)
525 HEIMDAL_NORETURN_ATTRIBUTE
526 HEIMDAL_PRINTF_ATTRIBUTE((__printf__
, 1, 0))
528 static char str
[1024];
530 vsnprintf(str
, sizeof(str
), fmt
, ap
);
531 syslog(LOG_ERR
, "heim_abort: %s", str
);
539 static int ar_created
= 0;
540 static HEIMDAL_thread_key ar_key
;
543 struct heim_auto_release
*head
;
544 struct heim_auto_release
*current
;
545 HEIMDAL_MUTEX tls_mutex
;
549 ar_tls_delete(void *ptr
)
551 struct ar_tls
*tls
= ptr
;
552 heim_auto_release_t next
= NULL
;
556 for (; tls
->current
!= NULL
; tls
->current
= next
) {
557 next
= tls
->current
->parent
;
558 heim_release(tls
->current
);
564 init_ar_tls(void *ptr
)
567 HEIMDAL_key_create(&ar_key
, ar_tls_delete
, ret
);
572 static struct ar_tls
*
575 static heim_base_once_t once
= HEIM_BASE_ONCE_INIT
;
579 heim_base_once_f(&once
, NULL
, init_ar_tls
);
583 arp
= HEIMDAL_getspecific(ar_key
);
586 arp
= calloc(1, sizeof(*arp
));
589 HEIMDAL_setspecific(ar_key
, arp
, ret
);
599 static void HEIM_CALLCONV
600 autorel_dealloc(void *ptr
)
602 heim_auto_release_t ar
= ptr
;
607 heim_abort("autorelease pool released on thread w/o autorelease inited");
609 heim_auto_release_drain(ar
);
611 if (!HEIM_TAILQ_EMPTY(&ar
->pool
))
612 heim_abort("pool not empty after draining");
614 HEIMDAL_MUTEX_lock(&tls
->tls_mutex
);
615 if (tls
->current
!= ptr
)
616 heim_abort("autorelease not releaseing top pool");
618 tls
->current
= ar
->parent
;
619 HEIMDAL_MUTEX_unlock(&tls
->tls_mutex
);
623 autorel_cmp(void *a
, void *b
)
629 autorel_hash(void *ptr
)
631 return (uintptr_t)ptr
;
635 static struct heim_type_data _heim_autorel_object
= {
636 HEIM_TID_AUTORELEASE
,
647 * Create thread-specific object auto-release pool
649 * Objects placed on the per-thread auto-release pool (with
650 * heim_auto_release()) can be released in one fell swoop by calling
651 * heim_auto_release_drain().
655 heim_auto_release_create(void)
657 struct ar_tls
*tls
= autorel_tls();
658 heim_auto_release_t ar
;
661 heim_abort("Failed to create/get autorelease head");
663 ar
= _heim_alloc_object(&_heim_autorel_object
, sizeof(struct heim_auto_release
));
665 HEIMDAL_MUTEX_lock(&tls
->tls_mutex
);
666 if (tls
->head
== NULL
)
668 ar
->parent
= tls
->current
;
670 HEIMDAL_MUTEX_unlock(&tls
->tls_mutex
);
677 * Place the current object on the thread's auto-release pool
683 heim_auto_release(heim_object_t ptr
)
687 heim_auto_release_t ar
;
689 if (ptr
== NULL
|| heim_base_is_tagged(ptr
))
695 /* drop from old pool */
696 if ((ar
= p
->autorelpool
) != NULL
) {
697 HEIMDAL_MUTEX_lock(&ar
->pool_mutex
);
698 HEIM_TAILQ_REMOVE(&ar
->pool
, p
, autorel
);
699 p
->autorelpool
= NULL
;
700 HEIMDAL_MUTEX_unlock(&ar
->pool_mutex
);
703 if (tls
== NULL
|| (ar
= tls
->current
) == NULL
)
704 heim_abort("no auto release pool in place, would leak");
706 HEIMDAL_MUTEX_lock(&ar
->pool_mutex
);
707 HEIM_TAILQ_INSERT_HEAD(&ar
->pool
, p
, autorel
);
709 HEIMDAL_MUTEX_unlock(&ar
->pool_mutex
);
715 * Release all objects on the given auto-release pool
719 heim_auto_release_drain(heim_auto_release_t autorel
)
723 /* release all elements on the tail queue */
725 HEIMDAL_MUTEX_lock(&autorel
->pool_mutex
);
726 while(!HEIM_TAILQ_EMPTY(&autorel
->pool
)) {
727 obj
= HEIM_TAILQ_FIRST(&autorel
->pool
);
728 HEIMDAL_MUTEX_unlock(&autorel
->pool_mutex
);
729 heim_release(BASE2PTR(obj
));
730 HEIMDAL_MUTEX_lock(&autorel
->pool_mutex
);
732 HEIMDAL_MUTEX_unlock(&autorel
->pool_mutex
);
736 * Helper for heim_path_vget() and heim_path_delete(). On success
737 * outputs the node named by the path and the parent node and key
738 * (useful for heim_path_delete()).
742 heim_path_vget2(heim_object_t ptr
, heim_object_t
*parent
, heim_object_t
*key
,
743 heim_error_t
*error
, va_list ap
)
745 heim_object_t path_element
;
746 heim_object_t node
, next_node
;
747 heim_tid_t node_type
;
754 for (node
= ptr
; node
!= NULL
; ) {
755 path_element
= va_arg(ap
, heim_object_t
);
756 if (path_element
== NULL
) {
762 node_type
= heim_get_tid(node
);
770 heim_abort("heim_path_get() only operates on container types");
774 if (node_type
== HEIM_TID_DICT
) {
775 next_node
= heim_dict_get_value(node
, path_element
);
776 } else if (node_type
== HEIM_TID_DB
) {
777 next_node
= _heim_db_get_value(node
, NULL
, path_element
, NULL
);
781 /* node_type == HEIM_TID_ARRAY */
782 if (heim_get_tid(path_element
) == HEIM_TID_NUMBER
)
783 idx
= heim_number_get_int(path_element
);
786 *error
= heim_error_create(EINVAL
,
787 "heim_path_get() path elements "
788 "for array nodes must be "
789 "numeric and positive");
792 next_node
= heim_array_get_value(node
, idx
);
800 * Get a node in a heim_object tree by path
803 * @param error error (output)
804 * @param ap NULL-terminated va_list of heim_object_ts that form a path
806 * @return object (not retained) if found
808 * @addtogroup heimbase
812 heim_path_vget(heim_object_t ptr
, heim_error_t
*error
, va_list ap
)
816 return heim_path_vget2(ptr
, &p
, &k
, error
, ap
);
820 * Get a node in a tree by path, with retained reference
823 * @param error error (output)
824 * @param ap NULL-terminated va_list of heim_object_ts that form a path
826 * @return retained object if found
828 * @addtogroup heimbase
832 heim_path_vcopy(heim_object_t ptr
, heim_error_t
*error
, va_list ap
)
836 return heim_retain(heim_path_vget2(ptr
, &p
, &k
, error
, ap
));
840 * Get a node in a tree by path
843 * @param error error (output)
844 * @param ... NULL-terminated va_list of heim_object_ts that form a path
846 * @return object (not retained) if found
848 * @addtogroup heimbase
852 heim_path_get(heim_object_t ptr
, heim_error_t
*error
, ...)
862 o
= heim_path_vget2(ptr
, &p
, &k
, error
, ap
);
868 * Get a node in a tree by path, with retained reference
871 * @param error error (output)
872 * @param ... NULL-terminated va_list of heim_object_ts that form a path
874 * @return retained object if found
876 * @addtogroup heimbase
880 heim_path_copy(heim_object_t ptr
, heim_error_t
*error
, ...)
890 o
= heim_retain(heim_path_vget2(ptr
, &p
, &k
, error
, ap
));
896 * Create a path in a heim_object_t tree
898 * @param ptr the tree
899 * @param size the size of the heim_dict_t nodes to be created
900 * @param leaf leaf node to be added, if any
901 * @param error error (output)
902 * @param ap NULL-terminated of path component objects
904 * Create a path of heim_dict_t interior nodes in a given heim_object_t
905 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
906 * then the leaf is not deleted).
908 * @return 0 on success, else a system error
910 * @addtogroup heimbase
914 heim_path_vcreate(heim_object_t ptr
, size_t size
, heim_object_t leaf
,
915 heim_error_t
*error
, va_list ap
)
917 heim_object_t path_element
= va_arg(ap
, heim_object_t
);
918 heim_object_t next_path_element
= NULL
;
919 heim_object_t node
= ptr
;
920 heim_object_t next_node
= NULL
;
921 heim_tid_t node_type
;
925 heim_abort("heim_path_vcreate() does not create root nodes");
927 while (path_element
!= NULL
) {
930 next_path_element
= va_arg(ap
, heim_object_t
);
931 node_type
= heim_get_tid(node
);
933 if (node_type
== HEIM_TID_DICT
) {
934 next_node
= heim_dict_get_value(node
, path_element
);
935 } else if (node_type
== HEIM_TID_ARRAY
) {
936 if (heim_get_tid(path_element
) == HEIM_TID_NUMBER
)
937 idx
= heim_number_get_int(path_element
);
940 *error
= heim_error_create(EINVAL
,
941 "heim_path() path elements for "
942 "array nodes must be numeric "
946 if (idx
< heim_array_get_length(node
)) {
947 next_node
= heim_array_get_value(node
, idx
);
948 } else if (idx
== heim_array_get_length(node
)) {
952 *error
= heim_error_create(EINVAL
,
953 "Index for array in path is too large");
956 } else if (node_type
== HEIM_TID_DB
&& next_path_element
!= NULL
) {
958 *error
= heim_error_create(EINVAL
, "Interior node is a DB");
962 if (next_path_element
== NULL
)
965 /* Create missing interior node */
966 if (next_node
== NULL
) {
967 heim_dict_t new_node
;
969 new_node
= heim_dict_create(size
); /* no arrays or DBs, just dicts */
970 if (new_node
== NULL
) {
975 if (node_type
== HEIM_TID_DICT
) {
976 ret
= heim_dict_set_value(node
, path_element
, new_node
);
977 next_node
= heim_dict_get_value(node
, path_element
);
978 } else if (node_type
== HEIM_TID_ARRAY
&&
979 heim_number_get_int(path_element
) <= heim_array_get_length(node
)) {
980 ret
= heim_array_insert_value(node
,
981 heim_number_get_int(path_element
),
983 next_node
= heim_array_get_value(node
, idx
);
987 *error
= heim_error_create(ret
, "Node in path not a "
991 heim_release(new_node
);
996 path_element
= next_path_element
;
1001 if (path_element
== NULL
)
1006 if (node_type
== HEIM_TID_DICT
)
1007 ret
= heim_dict_set_value(node
, path_element
, leaf
);
1009 ret
= heim_array_insert_value(node
,
1010 heim_number_get_int(path_element
),
1016 if (error
&& !*error
) {
1018 *error
= heim_error_create_enomem();
1020 *error
= heim_error_create(ret
, "Could not set "
1027 * Create a path in a heim_object_t tree
1029 * @param ptr the tree
1030 * @param size the size of the heim_dict_t nodes to be created
1031 * @param leaf leaf node to be added, if any
1032 * @param error error (output)
1033 * @param ... NULL-terminated list of path component objects
1035 * Create a path of heim_dict_t interior nodes in a given heim_object_t
1036 * tree, as necessary, and set/replace a leaf, if given (if leaf is NULL
1037 * then the leaf is not deleted).
1039 * @return 0 on success, else a system error
1041 * @addtogroup heimbase
1045 heim_path_create(heim_object_t ptr
, size_t size
, heim_object_t leaf
,
1046 heim_error_t
*error
, ...)
1051 va_start(ap
, error
);
1052 ret
= heim_path_vcreate(ptr
, size
, leaf
, error
, ap
);
1058 * Delete leaf node named by a path in a heim_object_t tree
1060 * @param ptr the tree
1061 * @param error error (output)
1062 * @param ap NULL-terminated list of path component objects
1064 * @addtogroup heimbase
1068 heim_path_vdelete(heim_object_t ptr
, heim_error_t
*error
, va_list ap
)
1070 heim_object_t parent
, key
, child
;
1072 child
= heim_path_vget2(ptr
, &parent
, &key
, error
, ap
);
1073 if (child
!= NULL
) {
1074 if (heim_get_tid(parent
) == HEIM_TID_DICT
)
1075 heim_dict_delete_key(parent
, key
);
1076 else if (heim_get_tid(parent
) == HEIM_TID_DB
)
1077 heim_db_delete_key(parent
, NULL
, key
, error
);
1078 else if (heim_get_tid(parent
) == HEIM_TID_ARRAY
)
1079 heim_array_delete_value(parent
, heim_number_get_int(key
));
1080 heim_release(child
);
1085 * Delete leaf node named by a path in a heim_object_t tree
1087 * @param ptr the tree
1088 * @param error error (output)
1089 * @param ap NULL-terminated list of path component objects
1091 * @addtogroup heimbase
1095 heim_path_delete(heim_object_t ptr
, heim_error_t
*error
, ...)
1099 va_start(ap
, error
);
1100 heim_path_vdelete(ptr
, error
, ap
);