headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / sem.cpp
blobb947f2080e49c02b842e4f2fe0c563a2b2ed2e0d
1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
11 /*! Semaphore code */
14 #include <sem.h>
16 #include <stdlib.h>
17 #include <string.h>
19 #include <OS.h>
21 #include <arch/int.h>
22 #include <boot/kernel_args.h>
23 #include <cpu.h>
24 #include <debug.h>
25 #include <int.h>
26 #include <kernel.h>
27 #include <ksignal.h>
28 #include <kscheduler.h>
29 #include <listeners.h>
30 #include <scheduling_analysis.h>
31 #include <smp.h>
32 #include <syscall_restart.h>
33 #include <team.h>
34 #include <thread.h>
35 #include <util/AutoLock.h>
36 #include <util/DoublyLinkedList.h>
37 #include <vfs.h>
38 #include <vm/vm_page.h>
39 #include <wait_for_objects.h>
41 #include "kernel_debug_config.h"
44 //#define TRACE_SEM
45 #ifdef TRACE_SEM
46 # define TRACE(x) dprintf_no_syslog x
47 #else
48 # define TRACE(x) ;
49 #endif
51 //#define KTRACE_SEM
52 #ifdef KTRACE_SEM
53 # define KTRACE(x...) ktrace_printf(x)
54 #else
55 # define KTRACE(x...) do {} while (false)
56 #endif
59 // Locking:
60 // * sSemsSpinlock: Protects the semaphore free list (sFreeSemsHead,
61 // sFreeSemsTail), Team::sem_list, and together with sem_entry::lock
62 // write access to sem_entry::owner/team_link.
63 // * sem_entry::lock: Protects all sem_entry members. owner, team_link
64 // additional need sSemsSpinlock for write access.
65 // lock itself doesn't need protection -- sem_entry objects are never deleted.
67 // The locking order is sSemsSpinlock -> sem_entry::lock -> scheduler lock. All
68 // semaphores are in the sSems array (sem_entry[]). Access by sem_id requires
69 // computing the object index (id % sMaxSems), locking the respective
70 // sem_entry::lock and verifying that sem_entry::id matches afterwards.
73 struct queued_thread : DoublyLinkedListLinkImpl<queued_thread> {
74 queued_thread(Thread *thread, int32 count)
76 thread(thread),
77 count(count),
78 queued(false)
82 Thread *thread;
83 int32 count;
84 bool queued;
87 typedef DoublyLinkedList<queued_thread> ThreadQueue;
89 struct sem_entry {
90 union {
91 // when slot in use
92 struct {
93 struct list_link team_link;
94 int32 count;
95 int32 net_count;
96 // count + acquisition count of all blocked
97 // threads
98 char* name;
99 team_id owner;
100 select_info* select_infos;
101 thread_id last_acquirer;
102 #if DEBUG_SEM_LAST_ACQUIRER
103 int32 last_acquire_count;
104 thread_id last_releaser;
105 int32 last_release_count;
106 #endif
107 } used;
109 // when slot unused
110 struct {
111 sem_id next_id;
112 struct sem_entry* next;
113 } unused;
114 } u;
116 sem_id id;
117 spinlock lock; // protects only the id field when unused
118 ThreadQueue queue; // should be in u.used, but has a constructor
121 static const int32 kMaxSemaphores = 65536;
122 static int32 sMaxSems = 4096;
123 // Final value is computed based on the amount of available memory
124 static int32 sUsedSems = 0;
126 static struct sem_entry *sSems = NULL;
127 static bool sSemsActive = false;
128 static struct sem_entry *sFreeSemsHead = NULL;
129 static struct sem_entry *sFreeSemsTail = NULL;
131 static spinlock sSemsSpinlock = B_SPINLOCK_INITIALIZER;
132 #define GRAB_SEM_LIST_LOCK() acquire_spinlock(&sSemsSpinlock)
133 #define RELEASE_SEM_LIST_LOCK() release_spinlock(&sSemsSpinlock)
134 #define GRAB_SEM_LOCK(s) acquire_spinlock(&(s).lock)
135 #define RELEASE_SEM_LOCK(s) release_spinlock(&(s).lock)
138 static int
139 dump_sem_list(int argc, char** argv)
141 const char* name = NULL;
142 team_id owner = -1;
143 thread_id last = -1;
144 int32 i;
146 if (argc > 2) {
147 if (!strcmp(argv[1], "team") || !strcmp(argv[1], "owner"))
148 owner = strtoul(argv[2], NULL, 0);
149 else if (!strcmp(argv[1], "name"))
150 name = argv[2];
151 else if (!strcmp(argv[1], "last"))
152 last = strtoul(argv[2], NULL, 0);
153 } else if (argc > 1)
154 owner = strtoul(argv[1], NULL, 0);
156 kprintf("%-*s id count team last name\n", B_PRINTF_POINTER_WIDTH,
157 "sem");
159 for (i = 0; i < sMaxSems; i++) {
160 struct sem_entry* sem = &sSems[i];
161 if (sem->id < 0
162 || (last != -1 && sem->u.used.last_acquirer != last)
163 || (name != NULL && strstr(sem->u.used.name, name) == NULL)
164 || (owner != -1 && sem->u.used.owner != owner))
165 continue;
167 kprintf("%p %6" B_PRId32 " %5" B_PRId32 " %6" B_PRId32 " "
168 "%6" B_PRId32 " "
169 " %s\n", sem, sem->id, sem->u.used.count,
170 sem->u.used.owner,
171 sem->u.used.last_acquirer > 0 ? sem->u.used.last_acquirer : 0,
172 sem->u.used.name);
175 return 0;
179 static void
180 dump_sem(struct sem_entry* sem)
182 kprintf("SEM: %p\n", sem);
183 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", sem->id, sem->id);
184 if (sem->id >= 0) {
185 kprintf("name: '%s'\n", sem->u.used.name);
186 kprintf("owner: %" B_PRId32 "\n", sem->u.used.owner);
187 kprintf("count: %" B_PRId32 "\n", sem->u.used.count);
188 kprintf("queue: ");
189 if (!sem->queue.IsEmpty()) {
190 ThreadQueue::Iterator it = sem->queue.GetIterator();
191 while (queued_thread* entry = it.Next())
192 kprintf(" %" B_PRId32, entry->thread->id);
193 kprintf("\n");
194 } else
195 kprintf(" -\n");
197 set_debug_variable("_sem", (addr_t)sem);
198 set_debug_variable("_semID", sem->id);
199 set_debug_variable("_owner", sem->u.used.owner);
201 #if DEBUG_SEM_LAST_ACQUIRER
202 kprintf("last acquired by: %" B_PRId32 ", count: %" B_PRId32 "\n",
203 sem->u.used.last_acquirer, sem->u.used.last_acquire_count);
204 kprintf("last released by: %" B_PRId32 ", count: %" B_PRId32 "\n",
205 sem->u.used.last_releaser, sem->u.used.last_release_count);
207 if (sem->u.used.last_releaser != 0)
208 set_debug_variable("_releaser", sem->u.used.last_releaser);
209 else
210 unset_debug_variable("_releaser");
211 #else
212 kprintf("last acquired by: %" B_PRId32 "\n", sem->u.used.last_acquirer);
213 #endif
215 if (sem->u.used.last_acquirer != 0)
216 set_debug_variable("_acquirer", sem->u.used.last_acquirer);
217 else
218 unset_debug_variable("_acquirer");
219 } else {
220 kprintf("next: %p\n", sem->u.unused.next);
221 kprintf("next_id: %" B_PRId32 "\n", sem->u.unused.next_id);
226 static int
227 dump_sem_info(int argc, char **argv)
229 bool found = false;
230 addr_t num;
231 int32 i;
233 if (argc < 2) {
234 print_debugger_command_usage(argv[0]);
235 return 0;
238 num = strtoul(argv[1], NULL, 0);
240 if (IS_KERNEL_ADDRESS(num)) {
241 dump_sem((struct sem_entry *)num);
242 return 0;
243 } else if (num >= 0) {
244 uint32 slot = num % sMaxSems;
245 if (sSems[slot].id != (int)num) {
246 kprintf("sem %ld (%#lx) doesn't exist!\n", num, num);
247 return 0;
250 dump_sem(&sSems[slot]);
251 return 0;
254 // walk through the sem list, trying to match name
255 for (i = 0; i < sMaxSems; i++) {
256 if (sSems[i].u.used.name != NULL
257 && strcmp(argv[1], sSems[i].u.used.name) == 0) {
258 dump_sem(&sSems[i]);
259 found = true;
263 if (!found)
264 kprintf("sem \"%s\" doesn't exist!\n", argv[1]);
265 return 0;
269 /*! \brief Appends a semaphore slot to the free list.
271 The semaphore list must be locked.
272 The slot's id field is not changed. It should already be set to -1.
274 \param slot The index of the semaphore slot.
275 \param nextID The ID the slot will get when reused. If < 0 the \a slot
276 is used.
278 static void
279 free_sem_slot(int slot, sem_id nextID)
281 struct sem_entry *sem = sSems + slot;
282 // set next_id to the next possible value; for sanity check the current ID
283 if (nextID < 0)
284 sem->u.unused.next_id = slot;
285 else
286 sem->u.unused.next_id = nextID;
287 // append the entry to the list
288 if (sFreeSemsTail)
289 sFreeSemsTail->u.unused.next = sem;
290 else
291 sFreeSemsHead = sem;
292 sFreeSemsTail = sem;
293 sem->u.unused.next = NULL;
297 static inline void
298 notify_sem_select_events(struct sem_entry* sem, uint16 events)
300 if (sem->u.used.select_infos)
301 notify_select_events_list(sem->u.used.select_infos, events);
305 /*! Fills the sem_info structure with information from the given semaphore.
306 The semaphore's lock must be held when called.
308 static void
309 fill_sem_info(struct sem_entry* sem, sem_info* info, size_t size)
311 info->sem = sem->id;
312 info->team = sem->u.used.owner;
313 strlcpy(info->name, sem->u.used.name, sizeof(info->name));
314 info->count = sem->u.used.count;
315 info->latest_holder = sem->u.used.last_acquirer;
319 /*! You must call this function with interrupts disabled, and the semaphore's
320 spinlock held. Note that it will unlock the spinlock itself.
321 Since it cannot free() the semaphore's name with interrupts turned off, it
322 will return that one in \a name.
324 static void
325 uninit_sem_locked(struct sem_entry& sem, char** _name)
327 KTRACE("delete_sem(sem: %ld)", sem.u.used.id);
329 notify_sem_select_events(&sem, B_EVENT_INVALID);
330 sem.u.used.select_infos = NULL;
332 // free any threads waiting for this semaphore
333 while (queued_thread* entry = sem.queue.RemoveHead()) {
334 entry->queued = false;
335 thread_unblock(entry->thread, B_BAD_SEM_ID);
338 int32 id = sem.id;
339 sem.id = -1;
340 *_name = sem.u.used.name;
341 sem.u.used.name = NULL;
343 RELEASE_SEM_LOCK(sem);
345 // append slot to the free list
346 GRAB_SEM_LIST_LOCK();
347 free_sem_slot(id % sMaxSems, id + sMaxSems);
348 atomic_add(&sUsedSems, -1);
349 RELEASE_SEM_LIST_LOCK();
353 static status_t
354 delete_sem_internal(sem_id id, bool checkPermission)
356 if (sSemsActive == false)
357 return B_NO_MORE_SEMS;
358 if (id < 0)
359 return B_BAD_SEM_ID;
361 int32 slot = id % sMaxSems;
363 cpu_status state = disable_interrupts();
364 GRAB_SEM_LIST_LOCK();
365 GRAB_SEM_LOCK(sSems[slot]);
367 if (sSems[slot].id != id) {
368 RELEASE_SEM_LOCK(sSems[slot]);
369 RELEASE_SEM_LIST_LOCK();
370 restore_interrupts(state);
371 TRACE(("delete_sem: invalid sem_id %ld\n", id));
372 return B_BAD_SEM_ID;
375 if (checkPermission
376 && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
377 RELEASE_SEM_LOCK(sSems[slot]);
378 RELEASE_SEM_LIST_LOCK();
379 restore_interrupts(state);
380 dprintf("thread %" B_PRId32 " tried to delete kernel semaphore "
381 "%" B_PRId32 ".\n", thread_get_current_thread_id(), id);
382 return B_NOT_ALLOWED;
385 if (sSems[slot].u.used.owner >= 0) {
386 list_remove_link(&sSems[slot].u.used.team_link);
387 sSems[slot].u.used.owner = -1;
388 } else
389 panic("sem %" B_PRId32 " has no owner", id);
391 RELEASE_SEM_LIST_LOCK();
393 char* name;
394 uninit_sem_locked(sSems[slot], &name);
396 SpinLocker schedulerLocker(thread_get_current_thread()->scheduler_lock);
397 scheduler_reschedule_if_necessary_locked();
398 schedulerLocker.Unlock();
400 restore_interrupts(state);
402 free(name);
403 return B_OK;
407 // #pragma mark - Private Kernel API
410 // TODO: Name clash with POSIX sem_init()... (we could just use C++)
411 status_t
412 haiku_sem_init(kernel_args *args)
414 area_id area;
415 int32 i;
417 TRACE(("sem_init: entry\n"));
419 // compute maximal number of semaphores depending on the available memory
420 // 128 MB -> 16384 semaphores, 448 kB fixed array size
421 // 256 MB -> 32768, 896 kB
422 // 512 MB and more-> 65536, 1.75 MB
423 i = vm_page_num_pages() / 2;
424 while (sMaxSems < i && sMaxSems < kMaxSemaphores)
425 sMaxSems <<= 1;
427 // create and initialize semaphore table
428 virtual_address_restrictions virtualRestrictions = {};
429 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
430 physical_address_restrictions physicalRestrictions = {};
431 area = create_area_etc(B_SYSTEM_TEAM, "sem_table",
432 sizeof(struct sem_entry) * sMaxSems, B_FULL_LOCK,
433 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
434 &virtualRestrictions, &physicalRestrictions, (void**)&sSems);
435 if (area < 0)
436 panic("unable to allocate semaphore table!\n");
438 memset(sSems, 0, sizeof(struct sem_entry) * sMaxSems);
439 for (i = 0; i < sMaxSems; i++) {
440 sSems[i].id = -1;
441 free_sem_slot(i, i);
444 // add debugger commands
445 add_debugger_command_etc("sems", &dump_sem_list,
446 "Dump a list of all active semaphores (for team, with name, etc.)",
447 "[ ([ \"team\" | \"owner\" ] <team>) | (\"name\" <name>) ]"
448 " | (\"last\" <last acquirer>)\n"
449 "Prints a list of all active semaphores meeting the given\n"
450 "requirement. If no argument is given, all sems are listed.\n"
451 " <team> - The team owning the semaphores.\n"
452 " <name> - Part of the name of the semaphores.\n"
453 " <last acquirer> - The thread that last acquired the semaphore.\n"
454 , 0);
455 add_debugger_command_etc("sem", &dump_sem_info,
456 "Dump info about a particular semaphore",
457 "<sem>\n"
458 "Prints info about the specified semaphore.\n"
459 " <sem> - pointer to the semaphore structure, semaphore ID, or name\n"
460 " of the semaphore to print info for.\n", 0);
462 TRACE(("sem_init: exit\n"));
464 sSemsActive = true;
466 return 0;
470 /*! Creates a semaphore with the given parameters.
472 This function is only available from within the kernel, and
473 should not be made public - if possible, we should remove it
474 completely (and have only create_sem() exported).
476 sem_id
477 create_sem_etc(int32 count, const char* name, team_id owner)
479 struct sem_entry* sem = NULL;
480 cpu_status state;
481 sem_id id = B_NO_MORE_SEMS;
482 char* tempName;
483 size_t nameLength;
485 if (sSemsActive == false || sUsedSems == sMaxSems)
486 return B_NO_MORE_SEMS;
488 if (name == NULL)
489 name = "unnamed semaphore";
491 // get the owning team
492 Team* team = Team::Get(owner);
493 if (team == NULL)
494 return B_BAD_TEAM_ID;
495 BReference<Team> teamReference(team, true);
497 // clone the name
498 nameLength = strlen(name) + 1;
499 nameLength = min_c(nameLength, B_OS_NAME_LENGTH);
500 tempName = (char*)malloc(nameLength);
501 if (tempName == NULL)
502 return B_NO_MEMORY;
504 strlcpy(tempName, name, nameLength);
506 state = disable_interrupts();
507 GRAB_SEM_LIST_LOCK();
509 // get the first slot from the free list
510 sem = sFreeSemsHead;
511 if (sem) {
512 // remove it from the free list
513 sFreeSemsHead = sem->u.unused.next;
514 if (!sFreeSemsHead)
515 sFreeSemsTail = NULL;
517 // init the slot
518 GRAB_SEM_LOCK(*sem);
519 sem->id = sem->u.unused.next_id;
520 sem->u.used.count = count;
521 sem->u.used.net_count = count;
522 new(&sem->queue) ThreadQueue;
523 sem->u.used.name = tempName;
524 sem->u.used.owner = team->id;
525 sem->u.used.select_infos = NULL;
526 id = sem->id;
528 list_add_item(&team->sem_list, &sem->u.used.team_link);
530 RELEASE_SEM_LOCK(*sem);
532 atomic_add(&sUsedSems, 1);
534 KTRACE("create_sem_etc(count: %ld, name: %s, owner: %ld) -> %ld",
535 count, name, owner, id);
537 T_SCHEDULING_ANALYSIS(CreateSemaphore(id, name));
538 NotifyWaitObjectListeners(&WaitObjectListener::SemaphoreCreated, id,
539 name);
542 RELEASE_SEM_LIST_LOCK();
543 restore_interrupts(state);
545 if (sem == NULL)
546 free(tempName);
548 return id;
552 status_t
553 select_sem(int32 id, struct select_info* info, bool kernel)
555 cpu_status state;
556 int32 slot;
557 status_t error = B_OK;
559 if (id < 0)
560 return B_BAD_SEM_ID;
562 slot = id % sMaxSems;
564 state = disable_interrupts();
565 GRAB_SEM_LOCK(sSems[slot]);
567 if (sSems[slot].id != id) {
568 // bad sem ID
569 error = B_BAD_SEM_ID;
570 } else if (!kernel
571 && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
572 // kernel semaphore, but call from userland
573 error = B_NOT_ALLOWED;
574 } else {
575 info->selected_events &= B_EVENT_ACQUIRE_SEMAPHORE | B_EVENT_INVALID;
577 if (info->selected_events != 0) {
578 info->next = sSems[slot].u.used.select_infos;
579 sSems[slot].u.used.select_infos = info;
581 if (sSems[slot].u.used.count > 0)
582 notify_select_events(info, B_EVENT_ACQUIRE_SEMAPHORE);
586 RELEASE_SEM_LOCK(sSems[slot]);
587 restore_interrupts(state);
589 return error;
593 status_t
594 deselect_sem(int32 id, struct select_info* info, bool kernel)
596 cpu_status state;
597 int32 slot;
599 if (id < 0)
600 return B_BAD_SEM_ID;
602 if (info->selected_events == 0)
603 return B_OK;
605 slot = id % sMaxSems;
607 state = disable_interrupts();
608 GRAB_SEM_LOCK(sSems[slot]);
610 if (sSems[slot].id == id) {
611 select_info** infoLocation = &sSems[slot].u.used.select_infos;
612 while (*infoLocation != NULL && *infoLocation != info)
613 infoLocation = &(*infoLocation)->next;
615 if (*infoLocation == info)
616 *infoLocation = info->next;
619 RELEASE_SEM_LOCK(sSems[slot]);
620 restore_interrupts(state);
622 return B_OK;
626 /*! Forcibly removes a thread from a semaphores wait queue. May have to wake up
627 other threads in the process.
628 Must be called with semaphore lock held. The thread lock must not be held.
630 static void
631 remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
633 if (!entry->queued)
634 return;
636 sem->queue.Remove(entry);
637 entry->queued = false;
638 sem->u.used.count += entry->count;
640 // We're done with this entry. We only have to check, if other threads
641 // need unblocking, too.
643 // Now see if more threads need to be woken up. We get the scheduler lock
644 // for that time, so the blocking state of threads won't change (due to
645 // interruption or timeout). We need that lock anyway when unblocking a
646 // thread.
647 while ((entry = sem->queue.Head()) != NULL) {
648 SpinLocker schedulerLocker(entry->thread->scheduler_lock);
649 if (thread_is_blocked(entry->thread)) {
650 // The thread is still waiting. If its count is satisfied, unblock
651 // it. Otherwise we can't unblock any other thread.
652 if (entry->count > sem->u.used.net_count)
653 break;
655 thread_unblock_locked(entry->thread, B_OK);
656 sem->u.used.net_count -= entry->count;
657 } else {
658 // The thread is no longer waiting, but still queued, which means
659 // acquiration failed and we can just remove it.
660 sem->u.used.count += entry->count;
663 sem->queue.Remove(entry);
664 entry->queued = false;
667 // select notification, if the semaphore is now acquirable
668 if (sem->u.used.count > 0)
669 notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
673 /*! This function deletes all semaphores belonging to a particular team.
675 void
676 sem_delete_owned_sems(Team* team)
678 while (true) {
679 char* name;
682 // get the next semaphore from the team's sem list
683 InterruptsLocker locker;
684 SpinLocker semListLocker(sSemsSpinlock);
685 sem_entry* sem = (sem_entry*)list_remove_head_item(&team->sem_list);
686 if (sem == NULL)
687 break;
689 // delete the semaphore
690 GRAB_SEM_LOCK(*sem);
691 semListLocker.Unlock();
692 uninit_sem_locked(*sem, &name);
695 free(name);
698 scheduler_reschedule_if_necessary();
702 int32
703 sem_max_sems(void)
705 return sMaxSems;
709 int32
710 sem_used_sems(void)
712 return sUsedSems;
716 // #pragma mark - Public Kernel API
719 sem_id
720 create_sem(int32 count, const char* name)
722 return create_sem_etc(count, name, team_get_kernel_team_id());
726 status_t
727 delete_sem(sem_id id)
729 return delete_sem_internal(id, false);
733 status_t
734 acquire_sem(sem_id id)
736 return switch_sem_etc(-1, id, 1, 0, 0);
740 status_t
741 acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
743 return switch_sem_etc(-1, id, count, flags, timeout);
747 status_t
748 switch_sem(sem_id toBeReleased, sem_id toBeAcquired)
750 return switch_sem_etc(toBeReleased, toBeAcquired, 1, 0, 0);
754 status_t
755 switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
756 uint32 flags, bigtime_t timeout)
758 int slot = id % sMaxSems;
759 int state;
760 status_t status = B_OK;
762 if (gKernelStartup)
763 return B_OK;
764 if (sSemsActive == false)
765 return B_NO_MORE_SEMS;
767 if (!are_interrupts_enabled()) {
768 panic("switch_sem_etc: called with interrupts disabled for sem "
769 "%" B_PRId32 "\n", id);
772 if (id < 0)
773 return B_BAD_SEM_ID;
774 if (count <= 0
775 || (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
776 return B_BAD_VALUE;
779 state = disable_interrupts();
780 GRAB_SEM_LOCK(sSems[slot]);
782 if (sSems[slot].id != id) {
783 TRACE(("switch_sem_etc: bad sem %ld\n", id));
784 status = B_BAD_SEM_ID;
785 goto err;
788 // TODO: the B_CHECK_PERMISSION flag should be made private, as it
789 // doesn't have any use outside the kernel
790 if ((flags & B_CHECK_PERMISSION) != 0
791 && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
792 dprintf("thread %" B_PRId32 " tried to acquire kernel semaphore "
793 "%" B_PRId32 ".\n", thread_get_current_thread_id(), id);
794 status = B_NOT_ALLOWED;
795 goto err;
798 if (sSems[slot].u.used.count - count < 0) {
799 if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
800 // immediate timeout
801 status = B_WOULD_BLOCK;
802 goto err;
803 } else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
804 // absolute negative timeout
805 status = B_TIMED_OUT;
806 goto err;
810 KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
811 "flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
812 timeout);
814 if ((sSems[slot].u.used.count -= count) < 0) {
815 // we need to block
816 Thread *thread = thread_get_current_thread();
818 TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
819 " name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));
821 // do a quick check to see if the thread has any pending signals
822 // this should catch most of the cases where the thread had a signal
823 SpinLocker schedulerLocker(thread->scheduler_lock);
824 if (thread_is_interrupted(thread, flags)) {
825 schedulerLocker.Unlock();
826 sSems[slot].u.used.count += count;
827 status = B_INTERRUPTED;
828 // the other semaphore will be released later
829 goto err;
832 schedulerLocker.Unlock();
834 if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
835 timeout = B_INFINITE_TIMEOUT;
837 // enqueue in the semaphore queue and get ready to wait
838 queued_thread queueEntry(thread, count);
839 sSems[slot].queue.Add(&queueEntry);
840 queueEntry.queued = true;
842 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
843 (void*)(addr_t)id);
845 RELEASE_SEM_LOCK(sSems[slot]);
847 // release the other semaphore, if any
848 if (semToBeReleased >= 0) {
849 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
850 semToBeReleased = -1;
853 status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
854 ? thread_block() : thread_block_with_timeout(flags, timeout);
856 GRAB_SEM_LOCK(sSems[slot]);
858 // If we're still queued, this means the acquiration failed, and we
859 // need to remove our entry and (potentially) wake up other threads.
860 if (queueEntry.queued)
861 remove_thread_from_sem(&queueEntry, &sSems[slot]);
863 if (acquireStatus >= B_OK) {
864 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
865 #if DEBUG_SEM_LAST_ACQUIRER
866 sSems[slot].u.used.last_acquire_count = count;
867 #endif
870 RELEASE_SEM_LOCK(sSems[slot]);
871 restore_interrupts(state);
873 TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
874 "thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
875 thread->name));
876 KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
877 return acquireStatus;
878 } else {
879 sSems[slot].u.used.net_count -= count;
880 sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
881 #if DEBUG_SEM_LAST_ACQUIRER
882 sSems[slot].u.used.last_acquire_count = count;
883 #endif
886 err:
887 RELEASE_SEM_LOCK(sSems[slot]);
888 restore_interrupts(state);
890 if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
891 // depending on when we were interrupted, we need to still
892 // release the semaphore to always leave in a consistent
893 // state
894 release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
897 #if 0
898 if (status == B_NOT_ALLOWED)
899 _user_debugger("Thread tried to acquire kernel semaphore.");
900 #endif
902 KTRACE("switch_sem_etc() done: 0x%lx", status);
904 return status;
908 status_t
909 release_sem(sem_id id)
911 return release_sem_etc(id, 1, 0);
915 status_t
916 release_sem_etc(sem_id id, int32 count, uint32 flags)
918 int32 slot = id % sMaxSems;
920 if (gKernelStartup)
921 return B_OK;
922 if (sSemsActive == false)
923 return B_NO_MORE_SEMS;
924 if (id < 0)
925 return B_BAD_SEM_ID;
926 if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
927 return B_BAD_VALUE;
929 InterruptsLocker _;
930 SpinLocker semLocker(sSems[slot].lock);
932 if (sSems[slot].id != id) {
933 TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
934 return B_BAD_SEM_ID;
937 // ToDo: the B_CHECK_PERMISSION flag should be made private, as it
938 // doesn't have any use outside the kernel
939 if ((flags & B_CHECK_PERMISSION) != 0
940 && sSems[slot].u.used.owner == team_get_kernel_team_id()) {
941 dprintf("thread %" B_PRId32 " tried to release kernel semaphore.\n",
942 thread_get_current_thread_id());
943 return B_NOT_ALLOWED;
946 KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
947 flags);
949 sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
950 #if DEBUG_SEM_LAST_ACQUIRER
951 sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
952 sSems[slot].u.used.last_release_count = count;
953 #endif
955 if (flags & B_RELEASE_ALL) {
956 count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;
958 // is there anything to do for us at all?
959 if (count == 0)
960 return B_OK;
962 // Don't release more than necessary -- there might be interrupted/
963 // timed out threads in the queue.
964 flags |= B_RELEASE_IF_WAITING_ONLY;
967 // Grab the scheduler lock, so thread_is_blocked() is reliable (due to
968 // possible interruptions or timeouts, it wouldn't be otherwise).
969 while (count > 0) {
970 queued_thread* entry = sSems[slot].queue.Head();
971 if (entry == NULL) {
972 if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
973 sSems[slot].u.used.count += count;
974 sSems[slot].u.used.net_count += count;
976 break;
979 SpinLocker schedulerLock(entry->thread->scheduler_lock);
980 if (thread_is_blocked(entry->thread)) {
981 // The thread is still waiting. If its count is satisfied,
982 // unblock it. Otherwise we can't unblock any other thread.
983 if (entry->count > sSems[slot].u.used.net_count + count) {
984 sSems[slot].u.used.count += count;
985 sSems[slot].u.used.net_count += count;
986 break;
989 thread_unblock_locked(entry->thread, B_OK);
991 int delta = min_c(count, entry->count);
992 sSems[slot].u.used.count += delta;
993 sSems[slot].u.used.net_count += delta - entry->count;
994 count -= delta;
995 } else {
996 // The thread is no longer waiting, but still queued, which
997 // means acquiration failed and we can just remove it.
998 sSems[slot].u.used.count += entry->count;
1001 sSems[slot].queue.Remove(entry);
1002 entry->queued = false;
1005 if (sSems[slot].u.used.count > 0)
1006 notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);
1008 // If we've unblocked another thread reschedule, if we've not explicitly
1009 // been told not to.
1010 if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
1011 semLocker.Unlock();
1013 SpinLocker _(thread_get_current_thread()->scheduler_lock);
1014 scheduler_reschedule_if_necessary_locked();
1017 return B_OK;
1021 status_t
1022 get_sem_count(sem_id id, int32 *_count)
1024 int slot;
1025 int state;
1027 if (sSemsActive == false)
1028 return B_NO_MORE_SEMS;
1029 if (id < 0)
1030 return B_BAD_SEM_ID;
1031 if (_count == NULL)
1032 return B_BAD_VALUE;
1034 slot = id % sMaxSems;
1036 state = disable_interrupts();
1037 GRAB_SEM_LOCK(sSems[slot]);
1039 if (sSems[slot].id != id) {
1040 RELEASE_SEM_LOCK(sSems[slot]);
1041 restore_interrupts(state);
1042 TRACE(("sem_get_count: invalid sem_id %ld\n", id));
1043 return B_BAD_SEM_ID;
1046 *_count = sSems[slot].u.used.count;
1048 RELEASE_SEM_LOCK(sSems[slot]);
1049 restore_interrupts(state);
1051 return B_OK;
1055 /*! Called by the get_sem_info() macro. */
1056 status_t
1057 _get_sem_info(sem_id id, struct sem_info *info, size_t size)
1059 status_t status = B_OK;
1060 int state;
1061 int slot;
1063 if (!sSemsActive)
1064 return B_NO_MORE_SEMS;
1065 if (id < 0)
1066 return B_BAD_SEM_ID;
1067 if (info == NULL || size != sizeof(sem_info))
1068 return B_BAD_VALUE;
1070 slot = id % sMaxSems;
1072 state = disable_interrupts();
1073 GRAB_SEM_LOCK(sSems[slot]);
1075 if (sSems[slot].id != id) {
1076 status = B_BAD_SEM_ID;
1077 TRACE(("get_sem_info: invalid sem_id %ld\n", id));
1078 } else
1079 fill_sem_info(&sSems[slot], info, size);
1081 RELEASE_SEM_LOCK(sSems[slot]);
1082 restore_interrupts(state);
1084 return status;
1088 /*! Called by the get_next_sem_info() macro. */
1089 status_t
1090 _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info,
1091 size_t size)
1093 if (!sSemsActive)
1094 return B_NO_MORE_SEMS;
1095 if (_cookie == NULL || info == NULL || size != sizeof(sem_info))
1096 return B_BAD_VALUE;
1097 if (teamID < 0)
1098 return B_BAD_TEAM_ID;
1100 Team* team = Team::Get(teamID);
1101 if (team == NULL)
1102 return B_BAD_TEAM_ID;
1103 BReference<Team> teamReference(team, true);
1105 InterruptsSpinLocker semListLocker(sSemsSpinlock);
1107 // TODO: find a way to iterate the list that is more reliable
1108 sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list);
1109 int32 newIndex = *_cookie;
1110 int32 index = 0;
1111 bool found = false;
1113 while (!found) {
1114 // find the next entry to be returned
1115 while (sem != NULL && index < newIndex) {
1116 sem = (sem_entry*)list_get_next_item(&team->sem_list, sem);
1117 index++;
1120 if (sem == NULL)
1121 return B_BAD_VALUE;
1123 GRAB_SEM_LOCK(*sem);
1125 if (sem->id != -1 && sem->u.used.owner == team->id) {
1126 // found one!
1127 fill_sem_info(sem, info, size);
1128 newIndex = index + 1;
1129 found = true;
1130 } else
1131 newIndex++;
1133 RELEASE_SEM_LOCK(*sem);
1136 if (!found)
1137 return B_BAD_VALUE;
1139 *_cookie = newIndex;
1140 return B_OK;
1144 status_t
1145 set_sem_owner(sem_id id, team_id newTeamID)
1147 if (sSemsActive == false)
1148 return B_NO_MORE_SEMS;
1149 if (id < 0)
1150 return B_BAD_SEM_ID;
1151 if (newTeamID < 0)
1152 return B_BAD_TEAM_ID;
1154 int32 slot = id % sMaxSems;
1156 // get the new team
1157 Team* newTeam = Team::Get(newTeamID);
1158 if (newTeam == NULL)
1159 return B_BAD_TEAM_ID;
1160 BReference<Team> newTeamReference(newTeam, true);
1162 InterruptsSpinLocker semListLocker(sSemsSpinlock);
1163 SpinLocker semLocker(sSems[slot].lock);
1165 if (sSems[slot].id != id) {
1166 TRACE(("set_sem_owner: invalid sem_id %ld\n", id));
1167 return B_BAD_SEM_ID;
1170 list_remove_link(&sSems[slot].u.used.team_link);
1171 list_add_item(&newTeam->sem_list, &sSems[slot].u.used.team_link);
1173 sSems[slot].u.used.owner = newTeam->id;
1174 return B_OK;
1178 /*! Returns the name of the semaphore. The name is not copied, so the caller
1179 must make sure that the semaphore remains alive as long as the name is used.
1181 const char*
1182 sem_get_name_unsafe(sem_id id)
1184 int slot = id % sMaxSems;
1186 if (sSemsActive == false || id < 0 || sSems[slot].id != id)
1187 return NULL;
1189 return sSems[slot].u.used.name;
1193 // #pragma mark - Syscalls
1196 sem_id
1197 _user_create_sem(int32 count, const char *userName)
1199 char name[B_OS_NAME_LENGTH];
1201 if (userName == NULL)
1202 return create_sem_etc(count, NULL, team_get_current_team_id());
1204 if (!IS_USER_ADDRESS(userName)
1205 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
1206 return B_BAD_ADDRESS;
1208 return create_sem_etc(count, name, team_get_current_team_id());
1212 status_t
1213 _user_delete_sem(sem_id id)
1215 return delete_sem_internal(id, true);
1219 status_t
1220 _user_acquire_sem(sem_id id)
1222 status_t error = switch_sem_etc(-1, id, 1,
1223 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1225 return syscall_restart_handle_post(error);
1229 status_t
1230 _user_acquire_sem_etc(sem_id id, int32 count, uint32 flags, bigtime_t timeout)
1232 syscall_restart_handle_timeout_pre(flags, timeout);
1234 status_t error = switch_sem_etc(-1, id, count,
1235 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1237 return syscall_restart_handle_timeout_post(error, timeout);
1241 status_t
1242 _user_switch_sem(sem_id releaseSem, sem_id id)
1244 status_t error = switch_sem_etc(releaseSem, id, 1,
1245 B_CAN_INTERRUPT | B_CHECK_PERMISSION, 0);
1247 if (releaseSem < 0)
1248 return syscall_restart_handle_post(error);
1250 return error;
1254 status_t
1255 _user_switch_sem_etc(sem_id releaseSem, sem_id id, int32 count, uint32 flags,
1256 bigtime_t timeout)
1258 if (releaseSem < 0)
1259 syscall_restart_handle_timeout_pre(flags, timeout);
1261 status_t error = switch_sem_etc(releaseSem, id, count,
1262 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, timeout);
1264 if (releaseSem < 0)
1265 return syscall_restart_handle_timeout_post(error, timeout);
1267 return error;
1271 status_t
1272 _user_release_sem(sem_id id)
1274 return release_sem_etc(id, 1, B_CHECK_PERMISSION);
1278 status_t
1279 _user_release_sem_etc(sem_id id, int32 count, uint32 flags)
1281 return release_sem_etc(id, count, flags | B_CHECK_PERMISSION);
1285 status_t
1286 _user_get_sem_count(sem_id id, int32 *userCount)
1288 status_t status;
1289 int32 count;
1291 if (userCount == NULL || !IS_USER_ADDRESS(userCount))
1292 return B_BAD_ADDRESS;
1294 status = get_sem_count(id, &count);
1295 if (status == B_OK && user_memcpy(userCount, &count, sizeof(int32)) < B_OK)
1296 return B_BAD_ADDRESS;
1298 return status;
1302 status_t
1303 _user_get_sem_info(sem_id id, struct sem_info *userInfo, size_t size)
1305 struct sem_info info;
1306 status_t status;
1308 if (userInfo == NULL || !IS_USER_ADDRESS(userInfo))
1309 return B_BAD_ADDRESS;
1311 status = _get_sem_info(id, &info, size);
1312 if (status == B_OK && user_memcpy(userInfo, &info, size) < B_OK)
1313 return B_BAD_ADDRESS;
1315 return status;
1319 status_t
1320 _user_get_next_sem_info(team_id team, int32 *userCookie, struct sem_info *userInfo,
1321 size_t size)
1323 struct sem_info info;
1324 int32 cookie;
1325 status_t status;
1327 if (userCookie == NULL || userInfo == NULL
1328 || !IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
1329 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
1330 return B_BAD_ADDRESS;
1332 status = _get_next_sem_info(team, &cookie, &info, size);
1334 if (status == B_OK) {
1335 if (user_memcpy(userInfo, &info, size) < B_OK
1336 || user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK)
1337 return B_BAD_ADDRESS;
1340 return status;
1344 status_t
1345 _user_set_sem_owner(sem_id id, team_id team)
1347 return set_sem_owner(id, team);