2 This file is part of the NoBug debugging library.
5 2007, 2008, 2009, 2010, Christian Thaeter <ct@pipapo.org>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, contact Christian Thaeter <ct@pipapo.org>.
22 #define NOBUG_LIBNOBUG_C
28 //deadlock There are 2 kinds of nodes: `resource_record` hold registered resources and `resource_user` which
29 //deadlock attach to (enter) a resource.
31 //deadlock Each thread keeps stacklist of each `resource_user` it created, new enters will push on the stack,
32 //deadlock leaving a resource will remove it from this stacklist.
34 //deadlock All `resource_records` in use are linked in a global precedence list, items of equal precedence are
35 //deadlock spaned by a skip pointer. Whenever a resource is entered the deadlock checker asserts that one does
36 //deadlock not break existing precedences. By doing so the precedence list gets continously refined as the system
37 //deadlock learns about new lock patterns.
39 //deadlock As a consequence of this algorithm the deadlock checker does not only find real deadlocks but
40 //deadlock already potential deadlocks by violations of the locking order which is a lot simpler than finding
41 //deadlock actual deadlocks.
43 //deadlock This also means that the deadlock tracker currently only works with hierarchical locking policies
44 //deadlock other approaches to prevent deadlocks are not yet supported and will be added on demand.
51 How much memory to reserve for a mpool chunk, 16k by default
53 #ifndef NOBUG_RESOURCE_MPOOL_CHUNKSIZE
54 #define NOBUG_RESOURCE_MPOOL_CHUNKSIZE (4096<<(sizeof(void*)/4)) /* That is roughly 8k chunks on 32bit, 16k chunks on 64 bit machines */
58 pthread_mutex_t nobug_resource_mutex
;
61 #define nobug_resourcestates \
62 resource_state(invalid), \
63 resource_state(waiting), \
64 resource_state(trying), \
65 resource_state(exclusive), \
66 resource_state(recursive), \
67 resource_state(shared),
70 #define resource_state(name) #name
71 const char* nobug_resource_states
[] =
78 const char* nobug_resource_error
= NULL
;
80 static llist nobug_resource_registry
;
81 static nobug_mpool nobug_resource_record_pool
;
82 static nobug_mpool nobug_resource_user_pool
;
84 static nobug_mpool nobug_resource_node_pool
;
87 static void nobug_resource_record_dtor (void*);
88 static void nobug_resource_user_dtor (void*);
90 static void nobug_resource_node_dtor (void*);
95 nobug_resource_init (void)
98 static pthread_mutexattr_t attr
;
99 pthread_mutexattr_init (&attr
);
100 pthread_mutexattr_settype (&attr
, PTHREAD_MUTEX_RECURSIVE
);
101 pthread_mutex_init (&nobug_resource_mutex
, &attr
);
104 llist_init (&nobug_resource_registry
);
106 nobug_mpool_init (&nobug_resource_record_pool
,
107 sizeof(struct nobug_resource_record
),
108 NOBUG_RESOURCE_MPOOL_CHUNKSIZE
/sizeof(struct nobug_resource_record
),
109 nobug_resource_record_dtor
);
111 nobug_mpool_init (&nobug_resource_user_pool
,
112 sizeof(struct nobug_resource_user
),
113 NOBUG_RESOURCE_MPOOL_CHUNKSIZE
/sizeof(struct nobug_resource_user
),
114 nobug_resource_user_dtor
);
116 #if NOBUG_USE_PTHREAD
117 nobug_mpool_init (&nobug_resource_node_pool
,
118 sizeof(struct nobug_resource_node
),
119 NOBUG_RESOURCE_MPOOL_CHUNKSIZE
/sizeof(struct nobug_resource_node
),
120 nobug_resource_node_dtor
);
126 nobug_resource_destroy (void)
128 #if NOBUG_USE_PTHREAD
129 nobug_mpool_destroy (&nobug_resource_node_pool
);
131 nobug_mpool_destroy (&nobug_resource_user_pool
);
132 nobug_mpool_destroy (&nobug_resource_record_pool
);
137 nobug_resource_record_available (void)
139 return nobug_mpool_available (&nobug_resource_record_pool
);
144 nobug_resource_user_available (void)
146 return nobug_mpool_available (&nobug_resource_user_pool
);
150 #if NOBUG_USE_PTHREAD
152 nobug_resource_node_available (void)
154 return nobug_mpool_available (&nobug_resource_node_pool
);
159 nobug_resource_node_free (struct nobug_resource_node
* self
)
161 LLIST_WHILE_HEAD (&self
->childs
, c
)
162 nobug_resource_node_free (LLIST_TO_STRUCTP(c
, struct nobug_resource_node
, cldnode
));
164 llist_unlink_fast_ (&self
->cldnode
);
165 llist_unlink_fast_ (&self
->node
);
166 nobug_mpool_free (&nobug_resource_node_pool
, self
);
171 nobug_resource_node_dtor (void* p
)
173 struct nobug_resource_node
* n
= p
;
174 llist_unlink_fast_ (&n
->node
);
175 /* must unlink childs, because we don't destroy the tree bottom up */
176 llist_unlink_fast_ (&n
->childs
);
177 llist_unlink_fast_ (&n
->cldnode
);
183 nobug_resource_record_dtor (void* p
)
185 struct nobug_resource_record
* self
= p
;
186 llist_unlink_fast_ (&self
->hdr
.node
);
188 #if NOBUG_USE_PTHREAD
189 /* destroy all nodes recursively */
190 LLIST_WHILE_HEAD (&self
->nodes
, n
)
191 nobug_resource_node_free ((struct nobug_resource_node
*)n
);
197 nobug_resource_user_dtor (void* p
)
199 struct nobug_resource_user
* u
= p
;
200 llist_unlink_fast_ (&u
->hdr
.node
);
201 #if NOBUG_USE_PTHREAD
202 llist_unlink_fast_ (&u
->res_stack
);
208 compare_resource_records (const_LList av
, const_LList bv
, void* unused
)
211 const struct nobug_resource_record
* a
= (const struct nobug_resource_record
*)av
;
212 const struct nobug_resource_record
* b
= (const struct nobug_resource_record
*)bv
;
214 return a
->object_id
> b
->object_id
? 1 : a
->object_id
< b
->object_id
? -1 : 0;
218 struct nobug_resource_record
*
219 nobug_resource_announce (const char* type
, const char* name
, const void* object_id
, const struct nobug_context extra
)
221 #if NOBUG_USE_PTHREAD
222 pthread_mutex_lock (&nobug_resource_mutex
);
225 struct nobug_resource_record
* node
= nobug_mpool_alloc (&nobug_resource_record_pool
);
228 nobug_resource_error
= "internal allocation error";
232 node
->hdr
.name
= name
;
233 node
->object_id
= object_id
;
236 /* TODO better lookup method than list search (psplay?) */
237 if (llist_ufind (&nobug_resource_registry
, &node
->hdr
.node
, compare_resource_records
, NULL
))
239 nobug_resource_error
= "already registered";
243 llist_init (&node
->users
);
244 node
->hdr
.extra
= extra
;
245 #if NOBUG_USE_PTHREAD
246 llist_init (&node
->nodes
);
249 llist_insert_head (&nobug_resource_registry
, llist_init (&node
->hdr
.node
));
255 nobug_resource_announce_complete (void)
257 #if NOBUG_USE_PTHREAD
258 pthread_mutex_unlock (&nobug_resource_mutex
);
264 nobug_resource_forget (struct nobug_resource_record
* self
)
266 #if NOBUG_USE_PTHREAD
267 pthread_mutex_lock (&nobug_resource_mutex
);
269 if (!llist_find (&nobug_resource_registry
, &self
->hdr
.node
, compare_resource_records
, NULL
))
271 nobug_resource_error
= "not registered";
275 if (!llist_is_empty (&self
->users
))
277 nobug_resource_error
= "still in use";
281 nobug_resource_record_dtor (self
);
283 nobug_mpool_free (&nobug_resource_record_pool
, self
);
285 #if NOBUG_USE_PTHREAD
286 pthread_mutex_unlock (&nobug_resource_mutex
);
295 nobug_resource_reset (struct nobug_resource_record
* self
)
298 #if NOBUG_USE_PTHREAD
299 pthread_mutex_lock (&nobug_resource_mutex
);
301 if (!llist_find (&nobug_resource_registry
, &self
->hdr
.node
, compare_resource_records
, NULL
))
303 nobug_resource_error
= "not registered";
308 if (!llist_is_empty (&self->users))
310 nobug_resource_error = "still in use";
315 /* destroy all nodes recursively */
316 LLIST_WHILE_HEAD (&self
->nodes
, n
)
317 nobug_resource_node_free ((struct nobug_resource_node
*)n
);
319 pthread_mutex_unlock (&nobug_resource_mutex
);
328 nobug_resource_resetall (void)
330 #if NOBUG_USE_PTHREAD
331 pthread_mutex_lock (&nobug_resource_mutex
);
333 LLIST_FOREACH (&nobug_resource_registry
, r
)
335 struct nobug_resource_record
* resource
= (struct nobug_resource_record
*) r
;
336 LLIST_WHILE_HEAD (&resource
->nodes
, n
)
337 nobug_resource_node_free ((struct nobug_resource_node
*)n
);
340 pthread_mutex_unlock (&nobug_resource_mutex
);
348 #if NOBUG_USE_PTHREAD
350 nobug_resource_node_resource_cmpfn (const_LList a
, const_LList b
, void* extra
)
353 return ((struct nobug_resource_node
*)a
)->resource
==
354 ((struct nobug_resource_node
*)b
)->resource
?0:-1;
358 struct nobug_resource_node
*
359 nobug_resource_node_new (struct nobug_resource_record
* resource
,
360 struct nobug_resource_node
* parent
)
362 struct nobug_resource_node
* self
= nobug_mpool_alloc (&nobug_resource_node_pool
);
365 llist_insert_head (&resource
->nodes
, llist_init (&self
->node
));
366 self
->resource
= resource
;
368 self
->parent
= parent
;
370 llist_init (&self
->childs
);
371 llist_init (&self
->cldnode
);
373 llist_insert_head (&parent
->childs
, &self
->cldnode
);
380 //dlalgo HEAD- The Resource Tracking Algorithm; deadlock_detection; how resources are tracked
382 //dlalgo Each resource registers a global 'resource_record'.
384 //dlalgo Every new locking path discovered is stored as 'resource_node' structures which refer to the associated
385 //dlalgo 'resource_record'.
387 //dlalgo Threads keep a trail of 'resource_user' structures for each resource entered. This 'resource_user' struct
388 //dlalgo refer to the 'resource_nodes' and thus indirectly to the associated 'resource_record'.
390 //dlalgo The deadlock checker uses this information to test if the acqusition of a new resource would yield a
391 //dlalgo potential deadlock.
393 struct nobug_resource_user
*
394 nobug_resource_enter (struct nobug_resource_record
* resource
,
395 const char* identifier
,
396 enum nobug_resource_state state
,
397 const struct nobug_context extra
)
401 nobug_resource_error
= "no resource";
405 #if NOBUG_USE_PTHREAD
406 pthread_mutex_lock (&nobug_resource_mutex
);
408 struct nobug_tls_data
* tls
= nobug_thread_get ();
410 //dlalgo HEAD~ Entering Resources; nobug_resource_enter; deadlock check on enter
412 //dlalgo In multithreaded programs, whenever a thread wants to wait for a 'resource_record'
413 //dlalgo the deadlock checker jumps in.
415 //dlalgo The deadlock checking algorithm is anticipatory as it will find and abort on conditions which may lead
416 //dlalgo to a potential deadlock by violating the locking order learned earlier.
418 //dlalgo Each thread holds a stack (list) of each 'resource_user' it created. Leaving
419 //dlalgo a resource will remove it from this stacklist.
421 //dlalgo Each 'resource_record' stores the trail which other 'resource_records' are already entered. This relations
422 //dlalgo are implemented with the 'resource_node' helper structure.
425 //dlalgo TODO: insert diagram here
438 //dlalgo First we find out if there is already a node from the to be acquired resource back to
439 //dlalgo the topmost node of the current threads user stack.
442 //dlalgo ---------------------------------------------------------------------
443 struct nobug_resource_user
* user
= NULL
; //dlalgo VERBATIM @
444 struct nobug_resource_node
* node
= NULL
; //dlalgo VERBATIM @
446 if (!llist_is_empty (&tls
->res_stack
)) //dlalgo VERBATIM @
447 { //dlalgo VERBATIM @
448 user
= LLIST_TO_STRUCTP (llist_tail (&tls
->res_stack
), //dlalgo VERBATIM @
449 struct nobug_resource_user
, //dlalgo VERBATIM @
450 res_stack
); //dlalgo VERBATIM @
452 struct nobug_resource_node templ
= //dlalgo VERBATIM @
453 { //dlalgo VERBATIM @
454 {NULL
, NULL
}, //dlalgo ...
455 user
->current
->resource
, //dlalgo VERBATIM @
459 }; //dlalgo VERBATIM @
461 node
= (struct nobug_resource_node
*) //dlalgo VERBATIM @
462 llist_ufind (&resource
->nodes
, //dlalgo VERBATIM @
463 &templ
.node
, //dlalgo VERBATIM @
464 nobug_resource_node_resource_cmpfn
, //dlalgo VERBATIM @
465 NULL
); //dlalgo VERBATIM @
466 } //dlalgo VERBATIM @
468 //dlalgo ---------------------------------------------------------------------
472 //dlalgo Deadlock checking is only done when the node is entered in `WAITING` state and only
473 //dlalgo available in multithreaded programs.
476 //dlalgo ---------------------------------------------------------------------
477 if (state
== NOBUG_RESOURCE_WAITING
) //dlalgo VERBATIM @
478 { //dlalgo VERBATIM @
479 #if NOBUG_USE_PTHREAD //dlalgo VERBATIM @
481 //dlalgo ---------------------------------------------------------------------
484 //dlalgo If node was found above, then this locking path is already validated and no deadlock can happen,
485 //dlalgo else, if this stack already holds a resource (user is set) we have to go on with checking.
488 //dlalgo ---------------------------------------------------------------------
489 if (!node
&& user
) //dlalgo VERBATIM @
490 { //dlalgo VERBATIM @
492 //dlalgo ---------------------------------------------------------------------
494 //dlalgo If not then its checked that the resource to be entered is not on any parent trail of the current topmost resource,
495 //dlalgo if it is then this could be a deadlock which needs to be further investigated.
498 //dlalgo ---------------------------------------------------------------------
499 LLIST_FOREACH (&user
->current
->resource
->nodes
, n
) //dlalgo VERBATIM @
500 { //dlalgo VERBATIM @
501 for (struct nobug_resource_node
* itr
= //dlalgo VERBATIM @
502 ((struct nobug_resource_node
*)n
)->parent
; //dlalgo VERBATIM @
503 itr
; //dlalgo VERBATIM @
504 itr
= itr
->parent
) //dlalgo VERBATIM @
505 { //dlalgo VERBATIM @
506 if (itr
->resource
== resource
) //dlalgo VERBATIM @
507 { //dlalgo VERBATIM @
509 //dlalgo ---------------------------------------------------------------------
511 //dlalgo if the resource was on the trail, we search if there is a common ancestor before the resource
512 //dlalgo on the trail and the threads current chain,
513 //dlalgo if yes then this ancestor protects against deadlocks and we can continue.
516 //dlalgo ---------------------------------------------------------------------
517 for (struct nobug_resource_node
* itr2
= itr
->parent
; //dlalgo VERBATIM @
518 itr2
; //dlalgo VERBATIM @
519 itr2
= itr2
->parent
) //dlalgo VERBATIM @
520 { //dlalgo VERBATIM @
521 LLIST_FOREACH_REV (&tls
->res_stack
, p
) //dlalgo VERBATIM @
522 { //dlalgo VERBATIM @
523 struct nobug_resource_user
* user
= //dlalgo VERBATIM @
524 LLIST_TO_STRUCTP (p
, //dlalgo VERBATIM @
525 struct nobug_resource_user
, //dlalgo VERBATIM @
526 res_stack
); //dlalgo VERBATIM @
527 if (user
->current
->resource
== itr2
->resource
) //dlalgo VERBATIM @
528 goto done
; //dlalgo VERBATIM @
529 } //dlalgo VERBATIM @
531 //dlalgo ---------------------------------------------------------------------
533 //dlalgo If no ancestor found, we finally abort with a potential deadlock condition.
536 //dlalgo ---------------------------------------------------------------------
537 nobug_resource_error
= "possible deadlock detected"; //dlalgo VERBATIM @
538 return NULL
; //dlalgo VERBATIM @
539 //dlalgo ---------------------------------------------------------------------
549 else if (state
== NOBUG_RESOURCE_TRYING
)
553 else if (state
== NOBUG_RESOURCE_EXCLUSIVE
)
555 /* check that everyone is waiting */
556 LLIST_FOREACH (&resource
->users
, n
)
557 if (((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_WAITING
&&
558 ((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_TRYING
)
560 nobug_resource_error
= "invalid enter state (resource already claimed)";
564 #if NOBUG_USE_PTHREAD
565 else if (state
== NOBUG_RESOURCE_RECURSIVE
)
567 /* check that everyone *else* is waiting */
568 LLIST_FOREACH (&resource
->users
, n
)
570 struct nobug_resource_user
* user
= (struct nobug_resource_user
*)n
;
571 if (user
->state
!= NOBUG_RESOURCE_WAITING
&&
572 user
->state
!= NOBUG_RESOURCE_TRYING
&&
575 nobug_resource_error
= "invalid enter state (resource already claimed non recursive by another thread)";
578 else if (!(user
->state
== NOBUG_RESOURCE_WAITING
||
579 user
->state
== NOBUG_RESOURCE_TRYING
||
580 user
->state
== NOBUG_RESOURCE_RECURSIVE
) &&
583 nobug_resource_error
= "invalid enter state (resource already claimed non recursive by this thread)";
589 else if (state
== NOBUG_RESOURCE_SHARED
)
591 /* check that every one else is waiting or hold it shared */
592 LLIST_FOREACH (&resource
->users
, n
)
593 if (((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_WAITING
&&
594 ((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_TRYING
&&
595 ((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_SHARED
)
597 nobug_resource_error
= "invalid enter state (resource already claimed non shared)";
602 nobug_resource_error
= "invalid enter state";
604 if (nobug_resource_error
)
607 struct nobug_resource_user
* new_user
= nobug_mpool_alloc (&nobug_resource_user_pool
);
610 nobug_resource_error
= "internal allocation error";
614 new_user
->hdr
.name
= identifier
;
615 new_user
->hdr
.extra
= extra
;
616 new_user
->state
= state
;
617 llist_insert_head (&resource
->users
, llist_init (&new_user
->hdr
.node
));
619 #if NOBUG_USE_PTHREAD
622 /* no node found, create a new one */
623 node
= nobug_resource_node_new (resource
, user
?user
->current
:NULL
);
626 nobug_resource_error
= "internal allocation error";
631 new_user
->current
= node
;
632 new_user
->thread
= tls
;
633 llist_insert_tail (&tls
->res_stack
, llist_init (&new_user
->res_stack
));
635 pthread_mutex_unlock (&nobug_resource_mutex
);
642 #if NOBUG_USE_PTHREAD
644 nobug_resource_node_parent_cmpfn (const_LList a
, const_LList b
, void* extra
)
647 return ((struct nobug_resource_node
*)a
)->parent
==
648 ((struct nobug_resource_node
*)b
)->parent
?0:-1;
654 nobug_resource_leave_pre (void)
656 #if NOBUG_USE_PTHREAD
657 pthread_mutex_lock (&nobug_resource_mutex
);
663 nobug_resource_leave (struct nobug_resource_user
* user
)
667 nobug_resource_error
= "no handle";
671 if (!user
->current
?user
->current
->resource
:NULL
)
673 nobug_resource_error
= "not entered";
679 //dlalgo HEAD~ Leaving Resources; nobug_resource_leave; fix resource lists
681 //dlalgo store the tail and next aside, we need it later
684 //dlalgo ---------------------------------------------------------------------
685 #if NOBUG_USE_PTHREAD //dlalgo VERBATIM @
686 struct nobug_resource_user
* tail
= //dlalgo VERBATIM @
687 LLIST_TO_STRUCTP (llist_tail (&user
->thread
->res_stack
), //dlalgo VERBATIM @
688 struct nobug_resource_user
, //dlalgo VERBATIM @
689 res_stack
); //dlalgo VERBATIM @
691 struct nobug_resource_user
* next
= //dlalgo VERBATIM @
692 LLIST_TO_STRUCTP (llist_next (&user
->res_stack
), //dlalgo VERBATIM @
693 struct nobug_resource_user
, //dlalgo VERBATIM @
694 res_stack
); //dlalgo VERBATIM @
696 //dlalgo ---------------------------------------------------------------------
698 //dlalgo remove user struct from thread stack
699 //dlalgo The res_stack is now like it is supposed to look like with the 'user' removed.
700 //dlalgo We now need to fix the node tree up to match this list.
703 //dlalgo ---------------------------------------------------------------------
704 llist_unlink_fast_ (&user
->res_stack
); //dlalgo VERBATIM @
706 //dlalgo ---------------------------------------------------------------------
708 //dlalgo When the the user node was not the tail or only node of the thread stack, we have to check
709 //dlalgo (and possibly construct) a new node chain for it. No valdation of this chain needs to be done,
710 //dlalgo since it was already validated when entering the resources first.
713 //dlalgo ---------------------------------------------------------------------
714 if (user
!= tail
&& !llist_is_empty (&user
->thread
->res_stack
)) //dlalgo VERBATIM @
715 { //dlalgo VERBATIM @
716 struct nobug_resource_user
* parent
= NULL
; //dlalgo VERBATIM @
717 if (llist_head (&user
->thread
->res_stack
)!= &next
->res_stack
) //dlalgo VERBATIM @
718 { //dlalgo VERBATIM @
719 parent
= //dlalgo VERBATIM @
720 LLIST_TO_STRUCTP (llist_prev (&next
->res_stack
), //dlalgo VERBATIM @
721 struct nobug_resource_user
, //dlalgo VERBATIM @
722 res_stack
); //dlalgo VERBATIM @
723 } //dlalgo VERBATIM @
725 //dlalgo ---------------------------------------------------------------------
727 //dlalgo iterate over all users following the removed node, finding nodes pointing to this users or
728 //dlalgo create new nodes.
731 //dlalgo ---------------------------------------------------------------------
732 LLIST_FORRANGE (&next
->res_stack
, &user
->thread
->res_stack
, n
) //dlalgo VERBATIM @
733 { //dlalgo VERBATIM @
734 struct nobug_resource_user
* cur
= //dlalgo VERBATIM @
735 LLIST_TO_STRUCTP (n
, //dlalgo VERBATIM @
736 struct nobug_resource_user
, //dlalgo VERBATIM @
737 res_stack
); //dlalgo VERBATIM @
739 struct nobug_resource_record
* resource
= //dlalgo VERBATIM @
740 cur
->current
->resource
; //dlalgo VERBATIM @
742 //dlalgo ---------------------------------------------------------------------
743 //TODO this search could be optimized out after we creates a node once,
744 //TODO all following nodes need to be created too
746 //dlalgo find the node pointing back to parent, create a new one if not found, rinse repeat
749 //dlalgo ---------------------------------------------------------------------
750 struct nobug_resource_node templ
= //dlalgo VERBATIM @
751 { //dlalgo VERBATIM @
754 parent
?parent
->current
:NULL
, //dlalgo VERBATIM @
755 {NULL
, NULL
}, //dlalgo ...
757 }; //dlalgo VERBATIM @
759 struct nobug_resource_node
* node
= //dlalgo VERBATIM @
760 (struct nobug_resource_node
*) //dlalgo VERBATIM @
761 llist_ufind (&resource
->nodes
, //dlalgo VERBATIM @
762 &templ
.node
, //dlalgo VERBATIM @
763 nobug_resource_node_parent_cmpfn
, //dlalgo VERBATIM @
764 NULL
); //dlalgo VERBATIM @
766 if (!node
) //dlalgo VERBATIM @
767 { //dlalgo VERBATIM @
768 node
= nobug_resource_node_new (resource
, //dlalgo VERBATIM @
769 parent
?parent
->current
:NULL
); //dlalgo VERBATIM @
770 if (!node
) //dlalgo VERBATIM @
771 { //dlalgo VERBATIM @
772 nobug_resource_error
= "internal allocation error"; //dlalgo VERBATIM @
773 return 0; //dlalgo VERBATIM @
774 } //dlalgo VERBATIM @
775 } //dlalgo VERBATIM @
777 parent
= cur
; //dlalgo VERBATIM @
778 //dlalgo ---------------------------------------------------------------------
784 llist_unlink_fast_ (&user
->hdr
.node
);
785 nobug_mpool_free (&nobug_resource_user_pool
, user
);
789 #if NOBUG_USE_PTHREAD
790 pthread_mutex_unlock (&nobug_resource_mutex
);
798 nobug_resource_state (struct nobug_resource_user
* user
,
799 enum nobug_resource_state state
)
803 nobug_resource_error
= "no user handle";
807 #if NOBUG_USE_PTHREAD
808 pthread_mutex_lock (&nobug_resource_mutex
);
811 if (user
->state
== NOBUG_RESOURCE_WAITING
|| user
->state
== NOBUG_RESOURCE_TRYING
)
813 if (state
== NOBUG_RESOURCE_EXCLUSIVE
)
815 /* check that every one is waiting */
816 LLIST_FOREACH (&user
->current
->resource
->users
, n
)
817 if (((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_WAITING
)
819 nobug_resource_error
= "resource hold by another thread";
823 #if NOBUG_USE_PTHREAD
824 else if (state
== NOBUG_RESOURCE_RECURSIVE
)
826 /* check that every one else is waiting */
827 LLIST_FOREACH (&user
->current
->resource
->users
, n
)
828 if (((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_WAITING
&&
829 ((struct nobug_resource_user
*)n
)->thread
!= nobug_thread_get ())
831 nobug_resource_error
= "resource hold by another thread";
836 else if (state
== NOBUG_RESOURCE_SHARED
)
838 /* check that every one else is waiting or shared */
839 LLIST_FOREACH (&user
->current
->resource
->users
, n
)
840 if (((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_WAITING
841 && ((struct nobug_resource_user
*)n
)->state
!= NOBUG_RESOURCE_SHARED
)
843 nobug_resource_error
= "resource hold by another thread nonshared";
848 nobug_resource_error
= "invalid state transistion";
851 if (!nobug_resource_error
)
854 else if (state
== NOBUG_RESOURCE_WAITING
|| state
== NOBUG_RESOURCE_TRYING
)
857 nobug_resource_error
= "invalid state transistion";
859 if (nobug_resource_error
)
862 #if NOBUG_USE_PTHREAD
863 pthread_mutex_unlock (&nobug_resource_mutex
);
870 enum nobug_resource_state
871 nobug_resource_mystate (struct nobug_resource_record
* res
)
873 enum nobug_resource_state ret
= NOBUG_RESOURCE_INVALID
;
874 #if NOBUG_USE_PTHREAD
875 pthread_mutex_lock (&nobug_resource_mutex
);
876 struct nobug_tls_data
* iam
= nobug_thread_get ();
879 LLIST_FOREACH_REV (&res
->users
, u
)
881 struct nobug_resource_user
* user
= (struct nobug_resource_user
*) u
;
882 #if NOBUG_USE_PTHREAD
883 if (user
->thread
== iam
)
890 #if NOBUG_USE_PTHREAD
891 pthread_mutex_unlock (&nobug_resource_mutex
);
899 nobug_resource_dump_ (char** start
, char* header
, struct nobug_resource_record
* resource
, const struct nobug_resource_dump_context context
)
901 #if NOBUG_USE_PTHREAD
902 nobug_log_line (start
, header
, context
.flag
, context
.level
,
903 " %s:%d: %s:%s: hold by %u entities:",
904 nobug_basename(resource
->hdr
.extra
.file
), resource
->hdr
.extra
.line
,
905 resource
->type
, resource
->hdr
.name
,
906 llist_count (&resource
->users
));
908 nobug_log_line (start
, header
, context
.flag
, context
.level
,
909 " %s:%d: %s:%s: hold by %u entities:",
910 nobug_basename(resource
->hdr
.extra
.file
), resource
->hdr
.extra
.line
,
911 resource
->type
, resource
->hdr
.name
,
912 llist_count (&resource
->users
));
915 LLIST_FOREACH (&resource
->users
, n
)
917 struct nobug_resource_user
* node
= (struct nobug_resource_user
*)n
;
918 #if NOBUG_USE_PTHREAD
919 nobug_log_line (start
, header
, context
.flag
, context
.level
,
920 NOBUG_TAB
"%s:%d: %s %s: %s",
921 nobug_basename(node
->hdr
.extra
.file
), node
->hdr
.extra
.line
,
922 node
->hdr
.name
, node
->thread
->thread_id
,
923 nobug_resource_states
[node
->state
]);
925 nobug_log_line (start
, header
, context
.flag
, context
.level
,
926 NOBUG_TAB
"%s:%d: %s: %s",
927 nobug_basename(node
->hdr
.extra
.file
), node
->hdr
.extra
.line
,
928 node
->hdr
.name
, nobug_resource_states
[node
->state
]);
934 nobug_resource_dump (struct nobug_resource_record
* resource
, const struct nobug_resource_dump_context context
)
936 if (!resource
) return;
938 #if NOBUG_USE_PTHREAD
939 pthread_mutex_lock (&nobug_resource_mutex
);
942 char header
[NOBUG_MAX_LOG_HEADER_SIZE
];
943 char* start
= nobug_log_begin (header
, context
.flag
, "RESOURCE_DUMP", context
.ctx
);
945 nobug_resource_dump_ (&start
, header
, resource
, context
);
947 nobug_log_end (context
.flag
, context
.level
);
949 #if NOBUG_USE_PTHREAD
950 pthread_mutex_unlock (&nobug_resource_mutex
);
956 nobug_resource_dump_all (const struct nobug_resource_dump_context context
)
958 #if NOBUG_USE_PTHREAD
959 pthread_mutex_lock (&nobug_resource_mutex
);
962 char header
[NOBUG_MAX_LOG_HEADER_SIZE
];
963 char* start
= nobug_log_begin (header
, context
.flag
, "RESOURCE_DUMP", context
.ctx
);
965 LLIST_FOREACH (&nobug_resource_registry
, n
)
967 struct nobug_resource_record
* node
= (struct nobug_resource_record
*)n
;
968 nobug_resource_dump_ (&start
, header
, node
, context
);
971 nobug_log_end (context
.flag
, context
.level
);
973 #if NOBUG_USE_PTHREAD
974 pthread_mutex_unlock (&nobug_resource_mutex
);
981 nobug_resource_list (const struct nobug_resource_dump_context context
)
983 #if NOBUG_USE_PTHREAD
984 pthread_mutex_lock (&nobug_resource_mutex
);
987 char header
[NOBUG_MAX_LOG_HEADER_SIZE
];
988 char* start
= nobug_log_begin (header
, context
.flag
, "RESOURCE_LIST", context
.ctx
);
990 if (!llist_is_empty (&nobug_resource_registry
))
992 LLIST_FOREACH (&nobug_resource_registry
, n
)
994 struct nobug_resource_record
* node
= (struct nobug_resource_record
*)n
;
995 nobug_log_line (&start
, header
,
996 context
.flag
, context
.level
,
997 " %s:%d: %s: %s: %p",
998 nobug_basename(node
->hdr
.extra
.file
), node
->hdr
.extra
.line
,
999 node
->type
, node
->hdr
.name
, node
->object_id
);
1004 nobug_log_line (&start
, header
, context
.flag
, context
.level
, " No resources registered");
1007 nobug_log_end (context
.flag
, context
.level
);
1009 #if NOBUG_USE_PTHREAD
1010 pthread_mutex_unlock (&nobug_resource_mutex
);
1017 // c-file-style: "gnu"
1018 // indent-tabs-mode: nil