2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
22 #include <boot/kernel_args.h>
28 #include <kscheduler.h>
29 #include <listeners.h>
30 #include <scheduling_analysis.h>
32 #include <syscall_restart.h>
35 #include <util/AutoLock.h>
36 #include <util/DoublyLinkedList.h>
38 #include <vm/vm_page.h>
39 #include <wait_for_objects.h>
41 #include "kernel_debug_config.h"
46 # define TRACE(x) dprintf_no_syslog x
53 # define KTRACE(x...) ktrace_printf(x)
55 # define KTRACE(x...) do {} while (false)
60 // * sSemsSpinlock: Protects the semaphore free list (sFreeSemsHead,
61 // sFreeSemsTail), Team::sem_list, and together with sem_entry::lock
62 // write access to sem_entry::owner/team_link.
63 // * sem_entry::lock: Protects all sem_entry members. owner, team_link
64 // additional need sSemsSpinlock for write access.
65 // lock itself doesn't need protection -- sem_entry objects are never deleted.
67 // The locking order is sSemsSpinlock -> sem_entry::lock -> scheduler lock. All
68 // semaphores are in the sSems array (sem_entry[]). Access by sem_id requires
69 // computing the object index (id % sMaxSems), locking the respective
70 // sem_entry::lock and verifying that sem_entry::id matches afterwards.
73 struct queued_thread
: DoublyLinkedListLinkImpl
<queued_thread
> {
74 queued_thread(Thread
*thread
, int32 count
)
87 typedef DoublyLinkedList
<queued_thread
> ThreadQueue
;
93 struct list_link team_link
;
96 // count + acquisition count of all blocked
100 select_info
* select_infos
;
101 thread_id last_acquirer
;
102 #if DEBUG_SEM_LAST_ACQUIRER
103 int32 last_acquire_count
;
104 thread_id last_releaser
;
105 int32 last_release_count
;
112 struct sem_entry
* next
;
117 spinlock lock
; // protects only the id field when unused
118 ThreadQueue queue
; // should be in u.used, but has a constructor
121 static const int32 kMaxSemaphores
= 65536;
122 static int32 sMaxSems
= 4096;
123 // Final value is computed based on the amount of available memory
124 static int32 sUsedSems
= 0;
126 static struct sem_entry
*sSems
= NULL
;
127 static bool sSemsActive
= false;
128 static struct sem_entry
*sFreeSemsHead
= NULL
;
129 static struct sem_entry
*sFreeSemsTail
= NULL
;
131 static spinlock sSemsSpinlock
= B_SPINLOCK_INITIALIZER
;
132 #define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sSemsSpinlock)
133 #define RELEASE_SEM_LIST_LOCK() release_spinlock(&sSemsSpinlock)
134 #define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock)
135 #define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock)
139 dump_sem_list(int argc
, char** argv
)
141 const char* name
= NULL
;
147 if (!strcmp(argv
[1], "team") || !strcmp(argv
[1], "owner"))
148 owner
= strtoul(argv
[2], NULL
, 0);
149 else if (!strcmp(argv
[1], "name"))
151 else if (!strcmp(argv
[1], "last"))
152 last
= strtoul(argv
[2], NULL
, 0);
154 owner
= strtoul(argv
[1], NULL
, 0);
156 kprintf("%-*s id count team last name\n", B_PRINTF_POINTER_WIDTH
,
159 for (i
= 0; i
< sMaxSems
; i
++) {
160 struct sem_entry
* sem
= &sSems
[i
];
162 || (last
!= -1 && sem
->u
.used
.last_acquirer
!= last
)
163 || (name
!= NULL
&& strstr(sem
->u
.used
.name
, name
) == NULL
)
164 || (owner
!= -1 && sem
->u
.used
.owner
!= owner
))
167 kprintf("%p %6" B_PRId32
" %5" B_PRId32
" %6" B_PRId32
" "
169 " %s\n", sem
, sem
->id
, sem
->u
.used
.count
,
171 sem
->u
.used
.last_acquirer
> 0 ? sem
->u
.used
.last_acquirer
: 0,
180 dump_sem(struct sem_entry
* sem
)
182 kprintf("SEM: %p\n", sem
);
183 kprintf("id: %" B_PRId32
" (%#" B_PRIx32
")\n", sem
->id
, sem
->id
);
185 kprintf("name: '%s'\n", sem
->u
.used
.name
);
186 kprintf("owner: %" B_PRId32
"\n", sem
->u
.used
.owner
);
187 kprintf("count: %" B_PRId32
"\n", sem
->u
.used
.count
);
189 if (!sem
->queue
.IsEmpty()) {
190 ThreadQueue::Iterator it
= sem
->queue
.GetIterator();
191 while (queued_thread
* entry
= it
.Next())
192 kprintf(" %" B_PRId32
, entry
->thread
->id
);
197 set_debug_variable("_sem", (addr_t
)sem
);
198 set_debug_variable("_semID", sem
->id
);
199 set_debug_variable("_owner", sem
->u
.used
.owner
);
201 #if DEBUG_SEM_LAST_ACQUIRER
202 kprintf("last acquired by: %" B_PRId32
", count: %" B_PRId32
"\n",
203 sem
->u
.used
.last_acquirer
, sem
->u
.used
.last_acquire_count
);
204 kprintf("last released by: %" B_PRId32
", count: %" B_PRId32
"\n",
205 sem
->u
.used
.last_releaser
, sem
->u
.used
.last_release_count
);
207 if (sem
->u
.used
.last_releaser
!= 0)
208 set_debug_variable("_releaser", sem
->u
.used
.last_releaser
);
210 unset_debug_variable("_releaser");
212 kprintf("last acquired by: %" B_PRId32
"\n", sem
->u
.used
.last_acquirer
);
215 if (sem
->u
.used
.last_acquirer
!= 0)
216 set_debug_variable("_acquirer", sem
->u
.used
.last_acquirer
);
218 unset_debug_variable("_acquirer");
220 kprintf("next: %p\n", sem
->u
.unused
.next
);
221 kprintf("next_id: %" B_PRId32
"\n", sem
->u
.unused
.next_id
);
227 dump_sem_info(int argc
, char **argv
)
234 print_debugger_command_usage(argv
[0]);
238 num
= strtoul(argv
[1], NULL
, 0);
240 if (IS_KERNEL_ADDRESS(num
)) {
241 dump_sem((struct sem_entry
*)num
);
243 } else if (num
>= 0) {
244 uint32 slot
= num
% sMaxSems
;
245 if (sSems
[slot
].id
!= (int)num
) {
246 kprintf("sem %ld (%#lx) doesn't exist!\n", num
, num
);
250 dump_sem(&sSems
[slot
]);
254 // walk through the sem list, trying to match name
255 for (i
= 0; i
< sMaxSems
; i
++) {
256 if (sSems
[i
].u
.used
.name
!= NULL
257 && strcmp(argv
[1], sSems
[i
].u
.used
.name
) == 0) {
264 kprintf("sem \"%s\" doesn't exist!\n", argv
[1]);
269 /*! \brief Appends a semaphore slot to the free list.
271 The semaphore list must be locked.
272 The slot's id field is not changed. It should already be set to -1.
274 \param slot The index of the semaphore slot.
275 \param nextID The ID the slot will get when reused. If < 0 the \a slot
279 free_sem_slot(int slot
, sem_id nextID
)
281 struct sem_entry
*sem
= sSems
+ slot
;
282 // set next_id to the next possible value; for sanity check the current ID
284 sem
->u
.unused
.next_id
= slot
;
286 sem
->u
.unused
.next_id
= nextID
;
287 // append the entry to the list
289 sFreeSemsTail
->u
.unused
.next
= sem
;
293 sem
->u
.unused
.next
= NULL
;
298 notify_sem_select_events(struct sem_entry
* sem
, uint16 events
)
300 if (sem
->u
.used
.select_infos
)
301 notify_select_events_list(sem
->u
.used
.select_infos
, events
);
305 /*! Fills the sem_info structure with information from the given semaphore.
306 The semaphore's lock must be held when called.
309 fill_sem_info(struct sem_entry
* sem
, sem_info
* info
, size_t size
)
312 info
->team
= sem
->u
.used
.owner
;
313 strlcpy(info
->name
, sem
->u
.used
.name
, sizeof(info
->name
));
314 info
->count
= sem
->u
.used
.count
;
315 info
->latest_holder
= sem
->u
.used
.last_acquirer
;
319 /*! You must call this function with interrupts disabled, and the semaphore's
320 spinlock held. Note that it will unlock the spinlock itself.
321 Since it cannot free() the semaphore's name with interrupts turned off, it
322 will return that one in \a name.
325 uninit_sem_locked(struct sem_entry
& sem
, char** _name
)
327 KTRACE("delete_sem(sem: %ld)", sem
.u
.used
.id
);
329 notify_sem_select_events(&sem
, B_EVENT_INVALID
);
330 sem
.u
.used
.select_infos
= NULL
;
332 // free any threads waiting for this semaphore
333 while (queued_thread
* entry
= sem
.queue
.RemoveHead()) {
334 entry
->queued
= false;
335 thread_unblock(entry
->thread
, B_BAD_SEM_ID
);
340 *_name
= sem
.u
.used
.name
;
341 sem
.u
.used
.name
= NULL
;
343 RELEASE_SEM_LOCK(sem
);
345 // append slot to the free list
346 GRAB_SEM_LIST_LOCK();
347 free_sem_slot(id
% sMaxSems
, id
+ sMaxSems
);
348 atomic_add(&sUsedSems
, -1);
349 RELEASE_SEM_LIST_LOCK();
354 delete_sem_internal(sem_id id
, bool checkPermission
)
356 if (sSemsActive
== false)
357 return B_NO_MORE_SEMS
;
361 int32 slot
= id
% sMaxSems
;
363 cpu_status state
= disable_interrupts();
364 GRAB_SEM_LIST_LOCK();
365 GRAB_SEM_LOCK(sSems
[slot
]);
367 if (sSems
[slot
].id
!= id
) {
368 RELEASE_SEM_LOCK(sSems
[slot
]);
369 RELEASE_SEM_LIST_LOCK();
370 restore_interrupts(state
);
371 TRACE(("delete_sem: invalid sem_id %ld\n", id
));
376 && sSems
[slot
].u
.used
.owner
== team_get_kernel_team_id()) {
377 RELEASE_SEM_LOCK(sSems
[slot
]);
378 RELEASE_SEM_LIST_LOCK();
379 restore_interrupts(state
);
380 dprintf("thread %" B_PRId32
" tried to delete kernel semaphore "
381 "%" B_PRId32
".\n", thread_get_current_thread_id(), id
);
382 return B_NOT_ALLOWED
;
385 if (sSems
[slot
].u
.used
.owner
>= 0) {
386 list_remove_link(&sSems
[slot
].u
.used
.team_link
);
387 sSems
[slot
].u
.used
.owner
= -1;
389 panic("sem %" B_PRId32
" has no owner", id
);
391 RELEASE_SEM_LIST_LOCK();
394 uninit_sem_locked(sSems
[slot
], &name
);
396 SpinLocker
schedulerLocker(thread_get_current_thread()->scheduler_lock
);
397 scheduler_reschedule_if_necessary_locked();
398 schedulerLocker
.Unlock();
400 restore_interrupts(state
);
407 // #pragma mark - Private Kernel API
410 // TODO: Name clash with POSIX sem_init()... (we could just use C++)
412 haiku_sem_init(kernel_args
*args
)
417 TRACE(("sem_init: entry\n"));
419 // compute maximal number of semaphores depending on the available memory
420 // 128 MB -> 16384 semaphores, 448 kB fixed array size
421 // 256 MB -> 32768, 896 kB
422 // 512 MB and more-> 65536, 1.75 MB
423 i
= vm_page_num_pages() / 2;
424 while (sMaxSems
< i
&& sMaxSems
< kMaxSemaphores
)
427 // create and initialize semaphore table
428 virtual_address_restrictions virtualRestrictions
= {};
429 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
430 physical_address_restrictions physicalRestrictions
= {};
431 area
= create_area_etc(B_SYSTEM_TEAM
, "sem_table",
432 sizeof(struct sem_entry
) * sMaxSems
, B_FULL_LOCK
,
433 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, CREATE_AREA_DONT_WAIT
, 0,
434 &virtualRestrictions
, &physicalRestrictions
, (void**)&sSems
);
436 panic("unable to allocate semaphore table!\n");
438 memset(sSems
, 0, sizeof(struct sem_entry
) * sMaxSems
);
439 for (i
= 0; i
< sMaxSems
; i
++) {
444 // add debugger commands
445 add_debugger_command_etc("sems", &dump_sem_list
,
446 "Dump a list of all active semaphores (for team, with name, etc.)",
447 "[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
448 " | (\"last\" <last acquirer>)\n"
449 "Prints a list of all active semaphores meeting the given\n"
450 "requirement. If no argument is given, all sems are listed.\n"
451 " <team> - The team owning the semaphores.\n"
452 " <name> - Part of the name of the semaphores.\n"
453 " <last acquirer> - The thread that last acquired the semaphore.\n"
455 add_debugger_command_etc("sem", &dump_sem_info
,
456 "Dump info about a particular semaphore",
458 "Prints info about the specified semaphore.\n"
459 " <sem> - pointer to the semaphore structure, semaphore ID, or name\n"
460 " of the semaphore to print info for.\n", 0);
462 TRACE(("sem_init: exit\n"));
470 /*! Creates a semaphore with the given parameters.
472 This function is only available from within the kernel, and
473 should not be made public - if possible, we should remove it
474 completely (and have only create_sem() exported).
477 create_sem_etc(int32 count
, const char* name
, team_id owner
)
479 struct sem_entry
* sem
= NULL
;
481 sem_id id
= B_NO_MORE_SEMS
;
485 if (sSemsActive
== false || sUsedSems
== sMaxSems
)
486 return B_NO_MORE_SEMS
;
489 name
= "unnamed semaphore";
491 // get the owning team
492 Team
* team
= Team::Get(owner
);
494 return B_BAD_TEAM_ID
;
495 BReference
<Team
> teamReference(team
, true);
498 nameLength
= strlen(name
) + 1;
499 nameLength
= min_c(nameLength
, B_OS_NAME_LENGTH
);
500 tempName
= (char*)malloc(nameLength
);
501 if (tempName
== NULL
)
504 strlcpy(tempName
, name
, nameLength
);
506 state
= disable_interrupts();
507 GRAB_SEM_LIST_LOCK();
509 // get the first slot from the free list
512 // remove it from the free list
513 sFreeSemsHead
= sem
->u
.unused
.next
;
515 sFreeSemsTail
= NULL
;
519 sem
->id
= sem
->u
.unused
.next_id
;
520 sem
->u
.used
.count
= count
;
521 sem
->u
.used
.net_count
= count
;
522 new(&sem
->queue
) ThreadQueue
;
523 sem
->u
.used
.name
= tempName
;
524 sem
->u
.used
.owner
= team
->id
;
525 sem
->u
.used
.select_infos
= NULL
;
528 list_add_item(&team
->sem_list
, &sem
->u
.used
.team_link
);
530 RELEASE_SEM_LOCK(*sem
);
532 atomic_add(&sUsedSems
, 1);
534 KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
535 count
, name
, owner
, id
);
537 T_SCHEDULING_ANALYSIS(CreateSemaphore(id
, name
));
538 NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated
, id
,
542 RELEASE_SEM_LIST_LOCK();
543 restore_interrupts(state
);
553 select_sem(int32 id
, struct select_info
* info
, bool kernel
)
557 status_t error
= B_OK
;
562 slot
= id
% sMaxSems
;
564 state
= disable_interrupts();
565 GRAB_SEM_LOCK(sSems
[slot
]);
567 if (sSems
[slot
].id
!= id
) {
569 error
= B_BAD_SEM_ID
;
571 && sSems
[slot
].u
.used
.owner
== team_get_kernel_team_id()) {
572 // kernel semaphore, but call from userland
573 error
= B_NOT_ALLOWED
;
575 info
->selected_events
&= B_EVENT_ACQUIRE_SEMAPHORE
| B_EVENT_INVALID
;
577 if (info
->selected_events
!= 0) {
578 info
->next
= sSems
[slot
].u
.used
.select_infos
;
579 sSems
[slot
].u
.used
.select_infos
= info
;
581 if (sSems
[slot
].u
.used
.count
> 0)
582 notify_select_events(info
, B_EVENT_ACQUIRE_SEMAPHORE
);
586 RELEASE_SEM_LOCK(sSems
[slot
]);
587 restore_interrupts(state
);
594 deselect_sem(int32 id
, struct select_info
* info
, bool kernel
)
602 if (info
->selected_events
== 0)
605 slot
= id
% sMaxSems
;
607 state
= disable_interrupts();
608 GRAB_SEM_LOCK(sSems
[slot
]);
610 if (sSems
[slot
].id
== id
) {
611 select_info
** infoLocation
= &sSems
[slot
].u
.used
.select_infos
;
612 while (*infoLocation
!= NULL
&& *infoLocation
!= info
)
613 infoLocation
= &(*infoLocation
)->next
;
615 if (*infoLocation
== info
)
616 *infoLocation
= info
->next
;
619 RELEASE_SEM_LOCK(sSems
[slot
]);
620 restore_interrupts(state
);
626 /*! Forcibly removes a thread from a semaphores wait queue. May have to wake up
627 other threads in the process.
628 Must be called with semaphore lock held. The thread lock must not be held.
631 remove_thread_from_sem(queued_thread
*entry
, struct sem_entry
*sem
)
636 sem
->queue
.Remove(entry
);
637 entry
->queued
= false;
638 sem
->u
.used
.count
+= entry
->count
;
640 // We're done with this entry. We only have to check, if other threads
641 // need unblocking, too.
643 // Now see if more threads need to be woken up. We get the scheduler lock
644 // for that time, so the blocking state of threads won't change (due to
645 // interruption or timeout). We need that lock anyway when unblocking a
647 while ((entry
= sem
->queue
.Head()) != NULL
) {
648 SpinLocker
schedulerLocker(entry
->thread
->scheduler_lock
);
649 if (thread_is_blocked(entry
->thread
)) {
650 // The thread is still waiting. If its count is satisfied, unblock
651 // it. Otherwise we can't unblock any other thread.
652 if (entry
->count
> sem
->u
.used
.net_count
)
655 thread_unblock_locked(entry
->thread
, B_OK
);
656 sem
->u
.used
.net_count
-= entry
->count
;
658 // The thread is no longer waiting, but still queued, which means
659 // acquiration failed and we can just remove it.
660 sem
->u
.used
.count
+= entry
->count
;
663 sem
->queue
.Remove(entry
);
664 entry
->queued
= false;
667 // select notification, if the semaphore is now acquirable
668 if (sem
->u
.used
.count
> 0)
669 notify_sem_select_events(sem
, B_EVENT_ACQUIRE_SEMAPHORE
);
673 /*! This function deletes all semaphores belonging to a particular team.
676 sem_delete_owned_sems(Team
* team
)
682 // get the next semaphore from the team's sem list
683 InterruptsLocker locker
;
684 SpinLocker
semListLocker(sSemsSpinlock
);
685 sem_entry
* sem
= (sem_entry
*)list_remove_head_item(&team
->sem_list
);
689 // delete the semaphore
691 semListLocker
.Unlock();
692 uninit_sem_locked(*sem
, &name
);
698 scheduler_reschedule_if_necessary();
716 // #pragma mark - Public Kernel API
720 create_sem(int32 count
, const char* name
)
722 return create_sem_etc(count
, name
, team_get_kernel_team_id());
727 delete_sem(sem_id id
)
729 return delete_sem_internal(id
, false);
734 acquire_sem(sem_id id
)
736 return switch_sem_etc(-1, id
, 1, 0, 0);
741 acquire_sem_etc(sem_id id
, int32 count
, uint32 flags
, bigtime_t timeout
)
743 return switch_sem_etc(-1, id
, count
, flags
, timeout
);
748 switch_sem(sem_id toBeReleased
, sem_id toBeAcquired
)
750 return switch_sem_etc(toBeReleased
, toBeAcquired
, 1, 0, 0);
755 switch_sem_etc(sem_id semToBeReleased
, sem_id id
, int32 count
,
756 uint32 flags
, bigtime_t timeout
)
758 int slot
= id
% sMaxSems
;
760 status_t status
= B_OK
;
764 if (sSemsActive
== false)
765 return B_NO_MORE_SEMS
;
767 if (!are_interrupts_enabled()) {
768 panic("switch_sem_etc: called with interrupts disabled for sem "
769 "%" B_PRId32
"\n", id
);
775 || (flags
& (B_RELATIVE_TIMEOUT
| B_ABSOLUTE_TIMEOUT
)) == (B_RELATIVE_TIMEOUT
| B_ABSOLUTE_TIMEOUT
)) {
779 state
= disable_interrupts();
780 GRAB_SEM_LOCK(sSems
[slot
]);
782 if (sSems
[slot
].id
!= id
) {
783 TRACE(("switch_sem_etc: bad sem %ld\n", id
));
784 status
= B_BAD_SEM_ID
;
788 // TODO: the B_CHECK_PERMISSION flag should be made private, as it
789 // doesn't have any use outside the kernel
790 if ((flags
& B_CHECK_PERMISSION
) != 0
791 && sSems
[slot
].u
.used
.owner
== team_get_kernel_team_id()) {
792 dprintf("thread %" B_PRId32
" tried to acquire kernel semaphore "
793 "%" B_PRId32
".\n", thread_get_current_thread_id(), id
);
794 status
= B_NOT_ALLOWED
;
798 if (sSems
[slot
].u
.used
.count
- count
< 0) {
799 if ((flags
& B_RELATIVE_TIMEOUT
) != 0 && timeout
<= 0) {
801 status
= B_WOULD_BLOCK
;
803 } else if ((flags
& B_ABSOLUTE_TIMEOUT
) != 0 && timeout
< 0) {
804 // absolute negative timeout
805 status
= B_TIMED_OUT
;
810 KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
811 "flags: 0x%lx, timeout: %lld)", semToBeReleased
, id
, count
, flags
,
814 if ((sSems
[slot
].u
.used
.count
-= count
) < 0) {
816 Thread
*thread
= thread_get_current_thread();
818 TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
819 " name = %s\n", id
, sSems
[slot
].u
.used
.name
, thread
, thread
->name
));
821 // do a quick check to see if the thread has any pending signals
822 // this should catch most of the cases where the thread had a signal
823 SpinLocker
schedulerLocker(thread
->scheduler_lock
);
824 if (thread_is_interrupted(thread
, flags
)) {
825 schedulerLocker
.Unlock();
826 sSems
[slot
].u
.used
.count
+= count
;
827 status
= B_INTERRUPTED
;
828 // the other semaphore will be released later
832 schedulerLocker
.Unlock();
834 if ((flags
& (B_RELATIVE_TIMEOUT
| B_ABSOLUTE_TIMEOUT
)) == 0)
835 timeout
= B_INFINITE_TIMEOUT
;
837 // enqueue in the semaphore queue and get ready to wait
838 queued_thread
queueEntry(thread
, count
);
839 sSems
[slot
].queue
.Add(&queueEntry
);
840 queueEntry
.queued
= true;
842 thread_prepare_to_block(thread
, flags
, THREAD_BLOCK_TYPE_SEMAPHORE
,
845 RELEASE_SEM_LOCK(sSems
[slot
]);
847 // release the other semaphore, if any
848 if (semToBeReleased
>= 0) {
849 release_sem_etc(semToBeReleased
, 1, B_DO_NOT_RESCHEDULE
);
850 semToBeReleased
= -1;
853 status_t acquireStatus
= timeout
== B_INFINITE_TIMEOUT
854 ? thread_block() : thread_block_with_timeout(flags
, timeout
);
856 GRAB_SEM_LOCK(sSems
[slot
]);
858 // If we're still queued, this means the acquiration failed, and we
859 // need to remove our entry and (potentially) wake up other threads.
860 if (queueEntry
.queued
)
861 remove_thread_from_sem(&queueEntry
, &sSems
[slot
]);
863 if (acquireStatus
>= B_OK
) {
864 sSems
[slot
].u
.used
.last_acquirer
= thread_get_current_thread_id();
865 #if DEBUG_SEM_LAST_ACQUIRER
866 sSems
[slot
].u
.used
.last_acquire_count
= count
;
870 RELEASE_SEM_LOCK(sSems
[slot
]);
871 restore_interrupts(state
);
873 TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
874 "thread %ld (%s)\n", id
, sSems
[slot
].u
.used
.name
, thread
->id
,
876 KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus
);
877 return acquireStatus
;
879 sSems
[slot
].u
.used
.net_count
-= count
;
880 sSems
[slot
].u
.used
.last_acquirer
= thread_get_current_thread_id();
881 #if DEBUG_SEM_LAST_ACQUIRER
882 sSems
[slot
].u
.used
.last_acquire_count
= count
;
887 RELEASE_SEM_LOCK(sSems
[slot
]);
888 restore_interrupts(state
);
890 if (status
== B_INTERRUPTED
&& semToBeReleased
>= B_OK
) {
891 // depending on when we were interrupted, we need to still
892 // release the semaphore to always leave in a consistent
894 release_sem_etc(semToBeReleased
, 1, B_DO_NOT_RESCHEDULE
);
898 if (status
== B_NOT_ALLOWED
)
899 _user_debugger("Thread tried to acquire kernel semaphore.");
902 KTRACE("switch_sem_etc() done: 0x%lx", status
);
909 release_sem(sem_id id
)
911 return release_sem_etc(id
, 1, 0);
916 release_sem_etc(sem_id id
, int32 count
, uint32 flags
)
918 int32 slot
= id
% sMaxSems
;
922 if (sSemsActive
== false)
923 return B_NO_MORE_SEMS
;
926 if (count
<= 0 && (flags
& B_RELEASE_ALL
) == 0)
930 SpinLocker
semLocker(sSems
[slot
].lock
);
932 if (sSems
[slot
].id
!= id
) {
933 TRACE(("sem_release_etc: invalid sem_id %ld\n", id
));
937 // ToDo: the B_CHECK_PERMISSION flag should be made private, as it
938 // doesn't have any use outside the kernel
939 if ((flags
& B_CHECK_PERMISSION
) != 0
940 && sSems
[slot
].u
.used
.owner
== team_get_kernel_team_id()) {
941 dprintf("thread %" B_PRId32
" tried to release kernel semaphore.\n",
942 thread_get_current_thread_id());
943 return B_NOT_ALLOWED
;
946 KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id
, count
,
949 sSems
[slot
].u
.used
.last_acquirer
= -sSems
[slot
].u
.used
.last_acquirer
;
950 #if DEBUG_SEM_LAST_ACQUIRER
951 sSems
[slot
].u
.used
.last_releaser
= thread_get_current_thread_id();
952 sSems
[slot
].u
.used
.last_release_count
= count
;
955 if (flags
& B_RELEASE_ALL
) {
956 count
= sSems
[slot
].u
.used
.net_count
- sSems
[slot
].u
.used
.count
;
958 // is there anything to do for us at all?
962 // Don't release more than necessary -- there might be interrupted/
963 // timed out threads in the queue.
964 flags
|= B_RELEASE_IF_WAITING_ONLY
;
967 // Grab the scheduler lock, so thread_is_blocked() is reliable (due to
968 // possible interruptions or timeouts, it wouldn't be otherwise).
970 queued_thread
* entry
= sSems
[slot
].queue
.Head();
972 if ((flags
& B_RELEASE_IF_WAITING_ONLY
) == 0) {
973 sSems
[slot
].u
.used
.count
+= count
;
974 sSems
[slot
].u
.used
.net_count
+= count
;
979 SpinLocker
schedulerLock(entry
->thread
->scheduler_lock
);
980 if (thread_is_blocked(entry
->thread
)) {
981 // The thread is still waiting. If its count is satisfied,
982 // unblock it. Otherwise we can't unblock any other thread.
983 if (entry
->count
> sSems
[slot
].u
.used
.net_count
+ count
) {
984 sSems
[slot
].u
.used
.count
+= count
;
985 sSems
[slot
].u
.used
.net_count
+= count
;
989 thread_unblock_locked(entry
->thread
, B_OK
);
991 int delta
= min_c(count
, entry
->count
);
992 sSems
[slot
].u
.used
.count
+= delta
;
993 sSems
[slot
].u
.used
.net_count
+= delta
- entry
->count
;
996 // The thread is no longer waiting, but still queued, which
997 // means acquiration failed and we can just remove it.
998 sSems
[slot
].u
.used
.count
+= entry
->count
;
1001 sSems
[slot
].queue
.Remove(entry
);
1002 entry
->queued
= false;
1005 if (sSems
[slot
].u
.used
.count
> 0)
1006 notify_sem_select_events(&sSems
[slot
], B_EVENT_ACQUIRE_SEMAPHORE
);
1008 // If we've unblocked another thread reschedule, if we've not explicitly
1009 // been told not to.
1010 if ((flags
& B_DO_NOT_RESCHEDULE
) == 0) {
1013 SpinLocker
_(thread_get_current_thread()->scheduler_lock
);
1014 scheduler_reschedule_if_necessary_locked();
1022 get_sem_count(sem_id id
, int32
*_count
)
1027 if (sSemsActive
== false)
1028 return B_NO_MORE_SEMS
;
1030 return B_BAD_SEM_ID
;
1034 slot
= id
% sMaxSems
;
1036 state
= disable_interrupts();
1037 GRAB_SEM_LOCK(sSems
[slot
]);
1039 if (sSems
[slot
].id
!= id
) {
1040 RELEASE_SEM_LOCK(sSems
[slot
]);
1041 restore_interrupts(state
);
1042 TRACE(("sem_get_count: invalid sem_id %ld\n", id
));
1043 return B_BAD_SEM_ID
;
1046 *_count
= sSems
[slot
].u
.used
.count
;
1048 RELEASE_SEM_LOCK(sSems
[slot
]);
1049 restore_interrupts(state
);
1055 /*! Called by the get_sem_info() macro. */
1057 _get_sem_info(sem_id id
, struct sem_info
*info
, size_t size
)
1059 status_t status
= B_OK
;
1064 return B_NO_MORE_SEMS
;
1066 return B_BAD_SEM_ID
;
1067 if (info
== NULL
|| size
!= sizeof(sem_info
))
1070 slot
= id
% sMaxSems
;
1072 state
= disable_interrupts();
1073 GRAB_SEM_LOCK(sSems
[slot
]);
1075 if (sSems
[slot
].id
!= id
) {
1076 status
= B_BAD_SEM_ID
;
1077 TRACE(("get_sem_info: invalid sem_id %ld\n", id
));
1079 fill_sem_info(&sSems
[slot
], info
, size
);
1081 RELEASE_SEM_LOCK(sSems
[slot
]);
1082 restore_interrupts(state
);
1088 /*! Called by the get_next_sem_info() macro. */
1090 _get_next_sem_info(team_id teamID
, int32
*_cookie
, struct sem_info
*info
,
1094 return B_NO_MORE_SEMS
;
1095 if (_cookie
== NULL
|| info
== NULL
|| size
!= sizeof(sem_info
))
1098 return B_BAD_TEAM_ID
;
1100 Team
* team
= Team::Get(teamID
);
1102 return B_BAD_TEAM_ID
;
1103 BReference
<Team
> teamReference(team
, true);
1105 InterruptsSpinLocker
semListLocker(sSemsSpinlock
);
1107 // TODO: find a way to iterate the list that is more reliable
1108 sem_entry
* sem
= (sem_entry
*)list_get_first_item(&team
->sem_list
);
1109 int32 newIndex
= *_cookie
;
1114 // find the next entry to be returned
1115 while (sem
!= NULL
&& index
< newIndex
) {
1116 sem
= (sem_entry
*)list_get_next_item(&team
->sem_list
, sem
);
1123 GRAB_SEM_LOCK(*sem
);
1125 if (sem
->id
!= -1 && sem
->u
.used
.owner
== team
->id
) {
1127 fill_sem_info(sem
, info
, size
);
1128 newIndex
= index
+ 1;
1133 RELEASE_SEM_LOCK(*sem
);
1139 *_cookie
= newIndex
;
1145 set_sem_owner(sem_id id
, team_id newTeamID
)
1147 if (sSemsActive
== false)
1148 return B_NO_MORE_SEMS
;
1150 return B_BAD_SEM_ID
;
1152 return B_BAD_TEAM_ID
;
1154 int32 slot
= id
% sMaxSems
;
1157 Team
* newTeam
= Team::Get(newTeamID
);
1158 if (newTeam
== NULL
)
1159 return B_BAD_TEAM_ID
;
1160 BReference
<Team
> newTeamReference(newTeam
, true);
1162 InterruptsSpinLocker
semListLocker(sSemsSpinlock
);
1163 SpinLocker
semLocker(sSems
[slot
].lock
);
1165 if (sSems
[slot
].id
!= id
) {
1166 TRACE(("set_sem_owner: invalid sem_id %ld\n", id
));
1167 return B_BAD_SEM_ID
;
1170 list_remove_link(&sSems
[slot
].u
.used
.team_link
);
1171 list_add_item(&newTeam
->sem_list
, &sSems
[slot
].u
.used
.team_link
);
1173 sSems
[slot
].u
.used
.owner
= newTeam
->id
;
1178 /*! Returns the name of the semaphore. The name is not copied, so the caller
1179 must make sure that the semaphore remains alive as long as the name is used.
1182 sem_get_name_unsafe(sem_id id
)
1184 int slot
= id
% sMaxSems
;
1186 if (sSemsActive
== false || id
< 0 || sSems
[slot
].id
!= id
)
1189 return sSems
[slot
].u
.used
.name
;
1193 // #pragma mark - Syscalls
1197 _user_create_sem(int32 count
, const char *userName
)
1199 char name
[B_OS_NAME_LENGTH
];
1201 if (userName
== NULL
)
1202 return create_sem_etc(count
, NULL
, team_get_current_team_id());
1204 if (!IS_USER_ADDRESS(userName
)
1205 || user_strlcpy(name
, userName
, B_OS_NAME_LENGTH
) < B_OK
)
1206 return B_BAD_ADDRESS
;
1208 return create_sem_etc(count
, name
, team_get_current_team_id());
1213 _user_delete_sem(sem_id id
)
1215 return delete_sem_internal(id
, true);
1220 _user_acquire_sem(sem_id id
)
1222 status_t error
= switch_sem_etc(-1, id
, 1,
1223 B_CAN_INTERRUPT
| B_CHECK_PERMISSION
, 0);
1225 return syscall_restart_handle_post(error
);
1230 _user_acquire_sem_etc(sem_id id
, int32 count
, uint32 flags
, bigtime_t timeout
)
1232 syscall_restart_handle_timeout_pre(flags
, timeout
);
1234 status_t error
= switch_sem_etc(-1, id
, count
,
1235 flags
| B_CAN_INTERRUPT
| B_CHECK_PERMISSION
, timeout
);
1237 return syscall_restart_handle_timeout_post(error
, timeout
);
1242 _user_switch_sem(sem_id releaseSem
, sem_id id
)
1244 status_t error
= switch_sem_etc(releaseSem
, id
, 1,
1245 B_CAN_INTERRUPT
| B_CHECK_PERMISSION
, 0);
1248 return syscall_restart_handle_post(error
);
1255 _user_switch_sem_etc(sem_id releaseSem
, sem_id id
, int32 count
, uint32 flags
,
1259 syscall_restart_handle_timeout_pre(flags
, timeout
);
1261 status_t error
= switch_sem_etc(releaseSem
, id
, count
,
1262 flags
| B_CAN_INTERRUPT
| B_CHECK_PERMISSION
, timeout
);
1265 return syscall_restart_handle_timeout_post(error
, timeout
);
1272 _user_release_sem(sem_id id
)
1274 return release_sem_etc(id
, 1, B_CHECK_PERMISSION
);
1279 _user_release_sem_etc(sem_id id
, int32 count
, uint32 flags
)
1281 return release_sem_etc(id
, count
, flags
| B_CHECK_PERMISSION
);
1286 _user_get_sem_count(sem_id id
, int32
*userCount
)
1291 if (userCount
== NULL
|| !IS_USER_ADDRESS(userCount
))
1292 return B_BAD_ADDRESS
;
1294 status
= get_sem_count(id
, &count
);
1295 if (status
== B_OK
&& user_memcpy(userCount
, &count
, sizeof(int32
)) < B_OK
)
1296 return B_BAD_ADDRESS
;
1303 _user_get_sem_info(sem_id id
, struct sem_info
*userInfo
, size_t size
)
1305 struct sem_info info
;
1308 if (userInfo
== NULL
|| !IS_USER_ADDRESS(userInfo
))
1309 return B_BAD_ADDRESS
;
1311 status
= _get_sem_info(id
, &info
, size
);
1312 if (status
== B_OK
&& user_memcpy(userInfo
, &info
, size
) < B_OK
)
1313 return B_BAD_ADDRESS
;
1320 _user_get_next_sem_info(team_id team
, int32
*userCookie
, struct sem_info
*userInfo
,
1323 struct sem_info info
;
1327 if (userCookie
== NULL
|| userInfo
== NULL
1328 || !IS_USER_ADDRESS(userCookie
) || !IS_USER_ADDRESS(userInfo
)
1329 || user_memcpy(&cookie
, userCookie
, sizeof(int32
)) < B_OK
)
1330 return B_BAD_ADDRESS
;
1332 status
= _get_next_sem_info(team
, &cookie
, &info
, size
);
1334 if (status
== B_OK
) {
1335 if (user_memcpy(userInfo
, &info
, size
) < B_OK
1336 || user_memcpy(userCookie
, &cookie
, sizeof(int32
)) < B_OK
)
1337 return B_BAD_ADDRESS
;
1345 _user_set_sem_owner(sem_id id
, team_id team
)
1347 return set_sem_owner(id
, team
);