btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / debug / system_profiler.cpp
blobc1bae77edc11a548c168447489d5337e53ec26bd
1 /*
2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
7 #include <system_profiler.h>
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
12 #include <util/AutoLock.h>
14 #include <system_profiler_defs.h>
16 #include <cpu.h>
17 #include <kernel.h>
18 #include <kimage.h>
19 #include <kscheduler.h>
20 #include <listeners.h>
21 #include <Notifications.h>
22 #include <sem.h>
23 #include <team.h>
24 #include <thread.h>
25 #include <user_debugger.h>
26 #include <vm/vm.h>
28 #include <arch/debug.h>
30 #include "IOSchedulerRoster.h"
33 // This is the kernel-side implementation of the system profiling support.
34 // A userland team can register as system profiler, providing an area as buffer
35 // for events. Those events are team, thread, and image changes (added/removed),
36 // periodic sampling of the return address stack for each CPU, as well as
37 // scheduling and I/O scheduling events.
40 class SystemProfiler;
43 // minimum/maximum size of the table used for wait object caching
44 #define MIN_WAIT_OBJECT_COUNT 128
45 #define MAX_WAIT_OBJECT_COUNT 1024
48 static spinlock sProfilerLock = B_SPINLOCK_INITIALIZER;
49 static SystemProfiler* sProfiler = NULL;
50 static struct system_profiler_parameters* sRecordedParameters = NULL;
53 class SystemProfiler : public BReferenceable, private NotificationListener,
54 private SchedulerListener, private WaitObjectListener {
55 public:
56 SystemProfiler(team_id team,
57 const area_info& userAreaInfo,
58 const system_profiler_parameters&
59 parameters);
60 ~SystemProfiler();
62 team_id TeamID() const { return fTeam; }
64 status_t Init();
65 status_t NextBuffer(size_t bytesRead,
66 uint64* _droppedEvents);
68 private:
69 virtual void EventOccurred(NotificationService& service,
70 const KMessage* event);
72 virtual void ThreadEnqueuedInRunQueue(Thread* thread);
73 virtual void ThreadRemovedFromRunQueue(Thread* thread);
74 virtual void ThreadScheduled(Thread* oldThread,
75 Thread* newThread);
77 virtual void SemaphoreCreated(sem_id id,
78 const char* name);
79 virtual void ConditionVariableInitialized(
80 ConditionVariable* variable);
81 virtual void MutexInitialized(mutex* lock);
82 virtual void RWLockInitialized(rw_lock* lock);
84 bool _TeamAdded(Team* team);
85 bool _TeamRemoved(Team* team);
86 bool _TeamExec(Team* team);
88 bool _ThreadAdded(Thread* thread);
89 bool _ThreadRemoved(Thread* thread);
91 bool _ImageAdded(struct image* image);
92 bool _ImageRemoved(struct image* image);
94 bool _IOSchedulerAdded(IOScheduler* scheduler);
95 bool _IOSchedulerRemoved(IOScheduler* scheduler);
96 bool _IORequestScheduled(IOScheduler* scheduler,
97 IORequest* request);
98 bool _IORequestFinished(IOScheduler* scheduler,
99 IORequest* request);
100 bool _IOOperationStarted(IOScheduler* scheduler,
101 IORequest* request, IOOperation* operation);
102 bool _IOOperationFinished(IOScheduler* scheduler,
103 IORequest* request, IOOperation* operation);
105 void _WaitObjectCreated(addr_t object, uint32 type);
106 void _WaitObjectUsed(addr_t object, uint32 type);
108 inline void _MaybeNotifyProfilerThreadLocked();
109 inline void _MaybeNotifyProfilerThread();
111 static bool _InitialImageIterator(struct image* image,
112 void* cookie);
114 void* _AllocateBuffer(size_t size, int event, int cpu,
115 int count);
117 static void _InitTimers(void* cookie, int cpu);
118 static void _UninitTimers(void* cookie, int cpu);
119 void _ScheduleTimer(int cpu);
121 void _DoSample();
123 static int32 _ProfilingEvent(struct timer* timer);
125 private:
126 struct CPUProfileData {
127 struct timer timer;
128 bigtime_t timerEnd;
129 bool timerScheduled;
130 addr_t buffer[B_DEBUG_STACK_TRACE_DEPTH];
133 struct WaitObjectKey {
134 addr_t object;
135 uint32 type;
138 struct WaitObject : DoublyLinkedListLinkImpl<WaitObject>,
139 WaitObjectKey {
140 struct WaitObject* hash_link;
143 struct WaitObjectTableDefinition {
144 typedef WaitObjectKey KeyType;
145 typedef WaitObject ValueType;
147 size_t HashKey(const WaitObjectKey& key) const
149 return (size_t)key.object ^ (size_t)key.type;
152 size_t Hash(const WaitObject* value) const
154 return HashKey(*value);
157 bool Compare(const WaitObjectKey& key,
158 const WaitObject* value) const
160 return value->type == key.type
161 && value->object == key.object;
164 WaitObject*& GetLink(WaitObject* value) const
166 return value->hash_link;
170 typedef DoublyLinkedList<WaitObject> WaitObjectList;
171 typedef BOpenHashTable<WaitObjectTableDefinition> WaitObjectTable;
173 private:
174 spinlock fLock;
175 team_id fTeam;
176 area_id fUserArea;
177 area_id fKernelArea;
178 size_t fAreaSize;
179 uint32 fFlags;
180 uint32 fStackDepth;
181 bigtime_t fInterval;
182 system_profiler_buffer_header* fHeader;
183 uint8* fBufferBase;
184 size_t fBufferCapacity;
185 size_t fBufferStart;
186 size_t fBufferSize;
187 uint64 fDroppedEvents;
188 int64 fLastTeamAddedSerialNumber;
189 int64 fLastThreadAddedSerialNumber;
190 bool fTeamNotificationsRequested;
191 bool fTeamNotificationsEnabled;
192 bool fThreadNotificationsRequested;
193 bool fThreadNotificationsEnabled;
194 bool fImageNotificationsRequested;
195 bool fImageNotificationsEnabled;
196 bool fIONotificationsRequested;
197 bool fIONotificationsEnabled;
198 bool fSchedulerNotificationsRequested;
199 bool fWaitObjectNotificationsRequested;
200 Thread* volatile fWaitingProfilerThread;
201 bool fProfilingActive;
202 bool fReentered[SMP_MAX_CPUS];
203 CPUProfileData fCPUData[SMP_MAX_CPUS];
204 WaitObject* fWaitObjectBuffer;
205 int32 fWaitObjectCount;
206 WaitObjectList fUsedWaitObjects;
207 WaitObjectList fFreeWaitObjects;
208 WaitObjectTable fWaitObjectTable;
212 /*! Notifies the profiler thread when the profiling buffer is full enough.
213 The caller must hold fLock.
215 inline void
216 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
218 // If the buffer is full enough, notify the profiler.
219 if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
220 int cpu = smp_get_current_cpu();
221 fReentered[cpu] = true;
223 Thread* profilerThread = fWaitingProfilerThread;
224 fWaitingProfilerThread = NULL;
226 SpinLocker _(profilerThread->scheduler_lock);
227 thread_unblock_locked(profilerThread, B_OK);
229 fReentered[cpu] = false;
234 inline void
235 SystemProfiler::_MaybeNotifyProfilerThread()
237 if (fWaitingProfilerThread == NULL)
238 return;
240 InterruptsSpinLocker locker(fLock);
242 _MaybeNotifyProfilerThreadLocked();
246 // #pragma mark - SystemProfiler public
249 SystemProfiler::SystemProfiler(team_id team, const area_info& userAreaInfo,
250 const system_profiler_parameters& parameters)
252 fTeam(team),
253 fUserArea(userAreaInfo.area),
254 fKernelArea(-1),
255 fAreaSize(userAreaInfo.size),
256 fFlags(parameters.flags),
257 fStackDepth(parameters.stack_depth),
258 fInterval(parameters.interval),
259 fHeader(NULL),
260 fBufferBase(NULL),
261 fBufferCapacity(0),
262 fBufferStart(0),
263 fBufferSize(0),
264 fDroppedEvents(0),
265 fLastTeamAddedSerialNumber(0),
266 fLastThreadAddedSerialNumber(0),
267 fTeamNotificationsRequested(false),
268 fTeamNotificationsEnabled(false),
269 fThreadNotificationsRequested(false),
270 fThreadNotificationsEnabled(false),
271 fImageNotificationsRequested(false),
272 fImageNotificationsEnabled(false),
273 fIONotificationsRequested(false),
274 fIONotificationsEnabled(false),
275 fSchedulerNotificationsRequested(false),
276 fWaitObjectNotificationsRequested(false),
277 fWaitingProfilerThread(NULL),
278 fWaitObjectBuffer(NULL),
279 fWaitObjectCount(0),
280 fUsedWaitObjects(),
281 fFreeWaitObjects(),
282 fWaitObjectTable()
284 B_INITIALIZE_SPINLOCK(&fLock);
286 memset(fReentered, 0, sizeof(fReentered));
288 // compute the number wait objects we want to cache
289 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
290 fWaitObjectCount = parameters.locking_lookup_size
291 / (sizeof(WaitObject) + (sizeof(void*) * 3 / 2));
292 if (fWaitObjectCount < MIN_WAIT_OBJECT_COUNT)
293 fWaitObjectCount = MIN_WAIT_OBJECT_COUNT;
294 if (fWaitObjectCount > MAX_WAIT_OBJECT_COUNT)
295 fWaitObjectCount = MAX_WAIT_OBJECT_COUNT;
300 SystemProfiler::~SystemProfiler()
302 // Wake up the user thread, if it is waiting, and mark profiling
303 // inactive.
304 InterruptsSpinLocker locker(fLock);
305 if (fWaitingProfilerThread != NULL) {
306 thread_unblock(fWaitingProfilerThread, B_OK);
307 fWaitingProfilerThread = NULL;
309 fProfilingActive = false;
310 locker.Unlock();
312 // stop scheduler listening
313 if (fSchedulerNotificationsRequested)
314 scheduler_remove_listener(this);
316 // stop wait object listening
317 if (fWaitObjectNotificationsRequested) {
318 InterruptsSpinLocker locker(gWaitObjectListenerLock);
319 remove_wait_object_listener(this);
322 // deactivate the profiling timers on all CPUs
323 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
324 call_all_cpus(_UninitTimers, this);
326 // cancel notifications
327 NotificationManager& notificationManager
328 = NotificationManager::Manager();
330 // images
331 if (fImageNotificationsRequested) {
332 fImageNotificationsRequested = false;
333 notificationManager.RemoveListener("images", NULL, *this);
336 // threads
337 if (fThreadNotificationsRequested) {
338 fThreadNotificationsRequested = false;
339 notificationManager.RemoveListener("threads", NULL, *this);
342 // teams
343 if (fTeamNotificationsRequested) {
344 fTeamNotificationsRequested = false;
345 notificationManager.RemoveListener("teams", NULL, *this);
348 // I/O
349 if (fIONotificationsRequested) {
350 fIONotificationsRequested = false;
351 notificationManager.RemoveListener("I/O", NULL, *this);
354 // delete wait object related allocations
355 fWaitObjectTable.Clear();
356 delete[] fWaitObjectBuffer;
358 // unlock the memory and delete the area
359 if (fKernelArea >= 0) {
360 unlock_memory(fHeader, fAreaSize, B_READ_DEVICE);
361 delete_area(fKernelArea);
362 fKernelArea = -1;
367 status_t
368 SystemProfiler::Init()
370 // clone the user area
371 void* areaBase;
372 fKernelArea = clone_area("profiling samples", &areaBase,
373 B_ANY_KERNEL_ADDRESS, B_READ_AREA | B_WRITE_AREA,
374 fUserArea);
375 if (fKernelArea < 0)
376 return fKernelArea;
378 // we need the memory locked
379 status_t error = lock_memory(areaBase, fAreaSize, B_READ_DEVICE);
380 if (error != B_OK) {
381 delete_area(fKernelArea);
382 fKernelArea = -1;
383 return error;
386 // the buffer is ready for use
387 fHeader = (system_profiler_buffer_header*)areaBase;
388 fBufferBase = (uint8*)(fHeader + 1);
389 fBufferCapacity = fAreaSize - (fBufferBase - (uint8*)areaBase);
390 fHeader->start = 0;
391 fHeader->size = 0;
393 // allocate the wait object buffer and init the hash table
394 if (fWaitObjectCount > 0) {
395 fWaitObjectBuffer = new(std::nothrow) WaitObject[fWaitObjectCount];
396 if (fWaitObjectBuffer == NULL)
397 return B_NO_MEMORY;
399 for (int32 i = 0; i < fWaitObjectCount; i++)
400 fFreeWaitObjects.Add(fWaitObjectBuffer + i);
402 error = fWaitObjectTable.Init(fWaitObjectCount * 3 / 2);
403 if (error != B_OK)
404 return error;
407 // start listening for notifications
409 // teams
410 NotificationManager& notificationManager
411 = NotificationManager::Manager();
412 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
413 error = notificationManager.AddListener("teams",
414 TEAM_ADDED | TEAM_REMOVED | TEAM_EXEC, *this);
415 if (error != B_OK)
416 return error;
417 fTeamNotificationsRequested = true;
420 // threads
421 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
422 error = notificationManager.AddListener("threads",
423 THREAD_ADDED | THREAD_REMOVED, *this);
424 if (error != B_OK)
425 return error;
426 fThreadNotificationsRequested = true;
429 // images
430 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
431 error = notificationManager.AddListener("images",
432 IMAGE_ADDED | IMAGE_REMOVED, *this);
433 if (error != B_OK)
434 return error;
435 fImageNotificationsRequested = true;
438 // I/O events
439 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
440 error = notificationManager.AddListener("I/O",
441 IO_SCHEDULER_ADDED | IO_SCHEDULER_REMOVED
442 | IO_SCHEDULER_REQUEST_SCHEDULED | IO_SCHEDULER_REQUEST_FINISHED
443 | IO_SCHEDULER_OPERATION_STARTED
444 | IO_SCHEDULER_OPERATION_FINISHED,
445 *this);
446 if (error != B_OK)
447 return error;
448 fIONotificationsRequested = true;
451 // We need to fill the buffer with the initial state of teams, threads,
452 // and images.
454 // teams
455 if ((fFlags & B_SYSTEM_PROFILER_TEAM_EVENTS) != 0) {
456 InterruptsSpinLocker locker(fLock);
458 TeamListIterator iterator;
459 while (Team* team = iterator.Next()) {
460 locker.Unlock();
462 bool added = _TeamAdded(team);
464 // release the reference returned by the iterator
465 team->ReleaseReference();
467 if (!added)
468 return B_BUFFER_OVERFLOW;
470 locker.Lock();
473 fTeamNotificationsEnabled = true;
476 // images
477 if ((fFlags & B_SYSTEM_PROFILER_IMAGE_EVENTS) != 0) {
478 if (image_iterate_through_images(&_InitialImageIterator, this) != NULL)
479 return B_BUFFER_OVERFLOW;
482 // threads
483 if ((fFlags & B_SYSTEM_PROFILER_THREAD_EVENTS) != 0) {
484 InterruptsSpinLocker locker(fLock);
486 ThreadListIterator iterator;
487 while (Thread* thread = iterator.Next()) {
488 locker.Unlock();
490 bool added = _ThreadAdded(thread);
492 // release the reference returned by the iterator
493 thread->ReleaseReference();
495 if (!added)
496 return B_BUFFER_OVERFLOW;
498 locker.Lock();
501 fThreadNotificationsEnabled = true;
504 fProfilingActive = true;
506 // start scheduler and wait object listening
507 if ((fFlags & B_SYSTEM_PROFILER_SCHEDULING_EVENTS) != 0) {
508 scheduler_add_listener(this);
509 fSchedulerNotificationsRequested = true;
511 InterruptsSpinLocker waitObjectLocker(gWaitObjectListenerLock);
512 add_wait_object_listener(this);
513 fWaitObjectNotificationsRequested = true;
514 waitObjectLocker.Unlock();
516 // fake schedule events for the initially running threads
517 int32 cpuCount = smp_get_num_cpus();
518 for (int32 i = 0; i < cpuCount; i++) {
519 Thread* thread = gCPU[i].running_thread;
520 if (thread != NULL)
521 ThreadScheduled(thread, thread);
525 // I/O scheduling
526 if ((fFlags & B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS) != 0) {
527 IOSchedulerRoster* roster = IOSchedulerRoster::Default();
528 AutoLocker<IOSchedulerRoster> rosterLocker(roster);
530 for (IOSchedulerList::ConstIterator it
531 = roster->SchedulerList().GetIterator();
532 IOScheduler* scheduler = it.Next();) {
533 _IOSchedulerAdded(scheduler);
536 fIONotificationsEnabled = true;
539 // activate the profiling timers on all CPUs
540 if ((fFlags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0)
541 call_all_cpus(_InitTimers, this);
543 return B_OK;
547 status_t
548 SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents)
550 InterruptsSpinLocker locker(fLock);
552 if (fWaitingProfilerThread != NULL || !fProfilingActive
553 || bytesRead > fBufferSize) {
554 return B_BAD_VALUE;
557 fBufferSize -= bytesRead;
558 fBufferStart += bytesRead;
559 if (fBufferStart > fBufferCapacity)
560 fBufferStart -= fBufferCapacity;
561 fHeader->size = fBufferSize;
562 fHeader->start = fBufferStart;
564 // already enough data in the buffer to return?
565 if (fBufferSize > fBufferCapacity / 2)
566 return B_OK;
568 // Wait until the buffer gets too full or an error or a timeout occurs.
569 while (true) {
570 Thread* thread = thread_get_current_thread();
571 fWaitingProfilerThread = thread;
573 thread_prepare_to_block(thread, B_CAN_INTERRUPT,
574 THREAD_BLOCK_TYPE_OTHER, "system profiler buffer");
576 locker.Unlock();
578 status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000);
580 locker.Lock();
582 if (error == B_OK) {
583 // the caller has unset fWaitingProfilerThread for us
584 break;
587 fWaitingProfilerThread = NULL;
589 if (error != B_TIMED_OUT)
590 return error;
592 // just the timeout -- return, if the buffer is not empty
593 if (fBufferSize > 0)
594 break;
597 if (_droppedEvents != NULL) {
598 *_droppedEvents = fDroppedEvents;
599 fDroppedEvents = 0;
602 return B_OK;
606 // #pragma mark - NotificationListener interface
609 void
610 SystemProfiler::EventOccurred(NotificationService& service,
611 const KMessage* event)
613 int32 eventCode;
614 if (event->FindInt32("event", &eventCode) != B_OK)
615 return;
617 if (strcmp(service.Name(), "teams") == 0) {
618 Team* team = (Team*)event->GetPointer("teamStruct", NULL);
619 if (team == NULL)
620 return;
622 switch (eventCode) {
623 case TEAM_ADDED:
624 if (fTeamNotificationsEnabled)
625 _TeamAdded(team);
626 break;
628 case TEAM_REMOVED:
629 if (team->id == fTeam) {
630 // The profiling team is gone -- uninstall the profiler!
631 InterruptsSpinLocker locker(sProfilerLock);
632 if (sProfiler != this)
633 return;
635 sProfiler = NULL;
636 locker.Unlock();
638 ReleaseReference();
639 return;
642 // When we're still doing the initial team list scan, we are
643 // also interested in removals that happened to teams we have
644 // already seen.
645 if (fTeamNotificationsEnabled
646 || team->serial_number <= fLastTeamAddedSerialNumber) {
647 _TeamRemoved(team);
649 break;
651 case TEAM_EXEC:
652 if (fTeamNotificationsEnabled)
653 _TeamExec(team);
654 break;
656 } else if (strcmp(service.Name(), "threads") == 0) {
657 Thread* thread = (Thread*)event->GetPointer("threadStruct", NULL);
658 if (thread == NULL)
659 return;
661 switch (eventCode) {
662 case THREAD_ADDED:
663 if (fThreadNotificationsEnabled)
664 _ThreadAdded(thread);
665 break;
667 case THREAD_REMOVED:
668 // When we're still doing the initial thread list scan, we are
669 // also interested in removals that happened to threads we have
670 // already seen.
671 if (fThreadNotificationsEnabled
672 || thread->serial_number <= fLastThreadAddedSerialNumber) {
673 _ThreadRemoved(thread);
675 break;
677 } else if (strcmp(service.Name(), "images") == 0) {
678 if (!fImageNotificationsEnabled)
679 return;
681 struct image* image = (struct image*)event->GetPointer(
682 "imageStruct", NULL);
683 if (image == NULL)
684 return;
686 switch (eventCode) {
687 case IMAGE_ADDED:
688 _ImageAdded(image);
689 break;
691 case IMAGE_REMOVED:
692 _ImageRemoved(image);
693 break;
695 } else if (strcmp(service.Name(), "I/O") == 0) {
696 if (!fIONotificationsEnabled)
697 return;
699 IOScheduler* scheduler = (IOScheduler*)event->GetPointer("scheduler",
700 NULL);
701 if (scheduler == NULL)
702 return;
704 IORequest* request = (IORequest*)event->GetPointer("request", NULL);
705 IOOperation* operation = (IOOperation*)event->GetPointer("operation",
706 NULL);
708 switch (eventCode) {
709 case IO_SCHEDULER_ADDED:
710 _IOSchedulerAdded(scheduler);
711 break;
713 case IO_SCHEDULER_REMOVED:
714 _IOSchedulerRemoved(scheduler);
715 break;
717 case IO_SCHEDULER_REQUEST_SCHEDULED:
718 _IORequestScheduled(scheduler, request);
719 break;
721 case IO_SCHEDULER_REQUEST_FINISHED:
722 _IORequestFinished(scheduler, request);
723 break;
725 case IO_SCHEDULER_OPERATION_STARTED:
726 _IOOperationStarted(scheduler, request, operation);
727 break;
729 case IO_SCHEDULER_OPERATION_FINISHED:
730 _IOOperationFinished(scheduler, request, operation);
731 break;
735 _MaybeNotifyProfilerThread();
739 // #pragma mark - SchedulerListener interface
742 void
743 SystemProfiler::ThreadEnqueuedInRunQueue(Thread* thread)
745 int cpu = smp_get_current_cpu();
747 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
748 // When re-entering, we already hold the lock.
750 system_profiler_thread_enqueued_in_run_queue* event
751 = (system_profiler_thread_enqueued_in_run_queue*)
752 _AllocateBuffer(
753 sizeof(system_profiler_thread_enqueued_in_run_queue),
754 B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE, cpu, 0);
755 if (event == NULL)
756 return;
758 event->time = system_time_nsecs();
759 event->thread = thread->id;
760 event->priority = thread->priority;
762 fHeader->size = fBufferSize;
764 // Unblock the profiler thread, if necessary, but don't unblock the thread,
765 // if it had been waiting on a condition variable, since then we'd likely
766 // deadlock in ConditionVariable::NotifyOne(), as it acquires a static
767 // spinlock.
768 if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE)
769 _MaybeNotifyProfilerThreadLocked();
773 void
774 SystemProfiler::ThreadRemovedFromRunQueue(Thread* thread)
776 int cpu = smp_get_current_cpu();
778 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
779 // When re-entering, we already hold the lock.
781 system_profiler_thread_removed_from_run_queue* event
782 = (system_profiler_thread_removed_from_run_queue*)
783 _AllocateBuffer(
784 sizeof(system_profiler_thread_removed_from_run_queue),
785 B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE, cpu, 0);
786 if (event == NULL)
787 return;
789 event->time = system_time_nsecs();
790 event->thread = thread->id;
792 fHeader->size = fBufferSize;
794 // unblock the profiler thread, if necessary
795 _MaybeNotifyProfilerThreadLocked();
799 void
800 SystemProfiler::ThreadScheduled(Thread* oldThread, Thread* newThread)
802 int cpu = smp_get_current_cpu();
804 InterruptsSpinLocker locker(fLock, false, !fReentered[cpu]);
805 // When re-entering, we already hold the lock.
807 // If the old thread starts waiting, handle the wait object.
808 if (oldThread->state == B_THREAD_WAITING)
809 _WaitObjectUsed((addr_t)oldThread->wait.object, oldThread->wait.type);
811 system_profiler_thread_scheduled* event
812 = (system_profiler_thread_scheduled*)
813 _AllocateBuffer(sizeof(system_profiler_thread_scheduled),
814 B_SYSTEM_PROFILER_THREAD_SCHEDULED, cpu, 0);
815 if (event == NULL)
816 return;
818 event->time = system_time_nsecs();
819 event->thread = newThread->id;
820 event->previous_thread = oldThread->id;
821 event->previous_thread_state = oldThread->state;
822 event->previous_thread_wait_object_type = oldThread->wait.type;
823 event->previous_thread_wait_object = (addr_t)oldThread->wait.object;
825 fHeader->size = fBufferSize;
827 // unblock the profiler thread, if necessary
828 _MaybeNotifyProfilerThreadLocked();
832 // #pragma mark - WaitObjectListener interface
835 void
836 SystemProfiler::SemaphoreCreated(sem_id id, const char* name)
838 _WaitObjectCreated((addr_t)id, THREAD_BLOCK_TYPE_SEMAPHORE);
842 void
843 SystemProfiler::ConditionVariableInitialized(ConditionVariable* variable)
845 _WaitObjectCreated((addr_t)variable, THREAD_BLOCK_TYPE_CONDITION_VARIABLE);
849 void
850 SystemProfiler::MutexInitialized(mutex* lock)
852 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_MUTEX);
856 void
857 SystemProfiler::RWLockInitialized(rw_lock* lock)
859 _WaitObjectCreated((addr_t)lock, THREAD_BLOCK_TYPE_RW_LOCK);
863 // #pragma mark - SystemProfiler private
866 bool
867 SystemProfiler::_TeamAdded(Team* team)
869 TeamLocker teamLocker(team);
871 size_t nameLen = strlen(team->Name());
872 size_t argsLen = strlen(team->Args());
874 InterruptsSpinLocker locker(fLock);
876 // During the initial scan check whether the team is already gone again.
877 // Later this cannot happen, since the team creator notifies us before
878 // actually starting the team.
879 if (!fTeamNotificationsEnabled && team->state >= TEAM_STATE_DEATH)
880 return true;
882 if (team->serial_number > fLastTeamAddedSerialNumber)
883 fLastTeamAddedSerialNumber = team->serial_number;
885 system_profiler_team_added* event = (system_profiler_team_added*)
886 _AllocateBuffer(
887 sizeof(system_profiler_team_added) + nameLen + 1 + argsLen,
888 B_SYSTEM_PROFILER_TEAM_ADDED, 0, 0);
889 if (event == NULL)
890 return false;
892 event->team = team->id;
893 strcpy(event->name, team->Name());
894 event->args_offset = nameLen + 1;
895 strcpy(event->name + nameLen + 1, team->Args());
897 fHeader->size = fBufferSize;
899 return true;
903 bool
904 SystemProfiler::_TeamRemoved(Team* team)
906 // TODO: It is possible that we get remove notifications for teams that
907 // had already been removed from the global team list when we did the
908 // initial scan, but were still in the process of dying. ATM it is not
909 // really possible to identify such a case.
911 TeamLocker teamLocker(team);
912 InterruptsSpinLocker locker(fLock);
914 system_profiler_team_removed* event = (system_profiler_team_removed*)
915 _AllocateBuffer(sizeof(system_profiler_team_removed),
916 B_SYSTEM_PROFILER_TEAM_REMOVED, 0, 0);
917 if (event == NULL)
918 return false;
920 event->team = team->id;
922 fHeader->size = fBufferSize;
924 return true;
928 bool
929 SystemProfiler::_TeamExec(Team* team)
931 TeamLocker teamLocker(team);
933 size_t argsLen = strlen(team->Args());
935 InterruptsSpinLocker locker(fLock);
937 system_profiler_team_exec* event = (system_profiler_team_exec*)
938 _AllocateBuffer(sizeof(system_profiler_team_exec) + argsLen,
939 B_SYSTEM_PROFILER_TEAM_EXEC, 0, 0);
940 if (event == NULL)
941 return false;
943 event->team = team->id;
944 strlcpy(event->thread_name, team->main_thread->name,
945 sizeof(event->thread_name));
946 strcpy(event->args, team->Args());
948 fHeader->size = fBufferSize;
950 return true;
954 bool
955 SystemProfiler::_ThreadAdded(Thread* thread)
957 ThreadLocker threadLocker(thread);
958 InterruptsSpinLocker locker(fLock);
960 // During the initial scan check whether the team is already gone again.
961 // Later this cannot happen, since the team creator notifies us before
962 // actually starting the thread.
963 if (!fThreadNotificationsEnabled && !thread->IsAlive())
964 return true;
966 if (thread->serial_number > fLastThreadAddedSerialNumber)
967 fLastThreadAddedSerialNumber = thread->serial_number;
969 system_profiler_thread_added* event = (system_profiler_thread_added*)
970 _AllocateBuffer(sizeof(system_profiler_thread_added),
971 B_SYSTEM_PROFILER_THREAD_ADDED, 0, 0);
972 if (event == NULL)
973 return false;
975 event->team = thread->team->id;
976 event->thread = thread->id;
977 strlcpy(event->name, thread->name, sizeof(event->name));
979 fHeader->size = fBufferSize;
981 return true;
985 bool
986 SystemProfiler::_ThreadRemoved(Thread* thread)
988 // TODO: It is possible that we get remove notifications for threads that
989 // had already been removed from the global thread list when we did the
990 // initial scan, but were still in the process of dying. ATM it is not
991 // really possible to identify such a case.
993 ThreadLocker threadLocker(thread);
994 InterruptsSpinLocker locker(fLock);
996 system_profiler_thread_removed* event
997 = (system_profiler_thread_removed*)
998 _AllocateBuffer(sizeof(system_profiler_thread_removed),
999 B_SYSTEM_PROFILER_THREAD_REMOVED, 0, 0);
1000 if (event == NULL)
1001 return false;
1003 event->team = thread->team->id;
1004 event->thread = thread->id;
1006 fHeader->size = fBufferSize;
1008 return true;
1012 bool
1013 SystemProfiler::_ImageAdded(struct image* image)
1015 InterruptsSpinLocker locker(fLock);
1017 system_profiler_image_added* event = (system_profiler_image_added*)
1018 _AllocateBuffer(sizeof(system_profiler_image_added),
1019 B_SYSTEM_PROFILER_IMAGE_ADDED, 0, 0);
1020 if (event == NULL)
1021 return false;
1023 event->team = image->team;
1024 event->info = image->info.basic_info;
1026 fHeader->size = fBufferSize;
1028 return true;
1032 bool
1033 SystemProfiler::_ImageRemoved(struct image* image)
1035 InterruptsSpinLocker locker(fLock);
1037 system_profiler_image_removed* event = (system_profiler_image_removed*)
1038 _AllocateBuffer(sizeof(system_profiler_image_removed),
1039 B_SYSTEM_PROFILER_IMAGE_REMOVED, 0, 0);
1040 if (event == NULL)
1041 return false;
1043 event->team = image->team;
1044 event->image = image->info.basic_info.id;
1046 fHeader->size = fBufferSize;
1048 return true;
1052 bool
1053 SystemProfiler::_IOSchedulerAdded(IOScheduler* scheduler)
1055 size_t nameLen = strlen(scheduler->Name());
1057 InterruptsSpinLocker locker(fLock);
1059 system_profiler_io_scheduler_added* event
1060 = (system_profiler_io_scheduler_added*)_AllocateBuffer(
1061 sizeof(system_profiler_io_scheduler_added) + nameLen,
1062 B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED, 0, 0);
1063 if (event == NULL)
1064 return false;
1066 event->scheduler = scheduler->ID();
1067 strcpy(event->name, scheduler->Name());
1069 fHeader->size = fBufferSize;
1071 return true;
1075 bool
1076 SystemProfiler::_IOSchedulerRemoved(IOScheduler* scheduler)
1078 InterruptsSpinLocker locker(fLock);
1080 system_profiler_io_scheduler_removed* event
1081 = (system_profiler_io_scheduler_removed*)_AllocateBuffer(
1082 sizeof(system_profiler_io_scheduler_removed),
1083 B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED, 0, 0);
1084 if (event == NULL)
1085 return false;
1087 event->scheduler = scheduler->ID();
1089 fHeader->size = fBufferSize;
1091 return true;
1095 bool
1096 SystemProfiler::_IORequestScheduled(IOScheduler* scheduler, IORequest* request)
1098 InterruptsSpinLocker locker(fLock);
1100 system_profiler_io_request_scheduled* event
1101 = (system_profiler_io_request_scheduled*)_AllocateBuffer(
1102 sizeof(system_profiler_io_request_scheduled),
1103 B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED, 0, 0);
1104 if (event == NULL)
1105 return false;
1107 IORequestOwner* owner = request->Owner();
1109 event->time = system_time_nsecs();
1110 event->scheduler = scheduler->ID();
1111 event->team = owner->team;
1112 event->thread = owner->thread;
1113 event->request = request;
1114 event->offset = request->Offset();
1115 event->length = request->Length();
1116 event->write = request->IsWrite();
1117 event->priority = owner->priority;
1119 fHeader->size = fBufferSize;
1121 return true;
1125 bool
1126 SystemProfiler::_IORequestFinished(IOScheduler* scheduler, IORequest* request)
1128 InterruptsSpinLocker locker(fLock);
1130 system_profiler_io_request_finished* event
1131 = (system_profiler_io_request_finished*)_AllocateBuffer(
1132 sizeof(system_profiler_io_request_finished),
1133 B_SYSTEM_PROFILER_IO_REQUEST_FINISHED, 0, 0);
1134 if (event == NULL)
1135 return false;
1137 event->time = system_time_nsecs();
1138 event->scheduler = scheduler->ID();
1139 event->request = request;
1140 event->status = request->Status();
1141 event->transferred = request->TransferredBytes();
1143 fHeader->size = fBufferSize;
1145 return true;
1149 bool
1150 SystemProfiler::_IOOperationStarted(IOScheduler* scheduler, IORequest* request,
1151 IOOperation* operation)
1153 InterruptsSpinLocker locker(fLock);
1155 system_profiler_io_operation_started* event
1156 = (system_profiler_io_operation_started*)_AllocateBuffer(
1157 sizeof(system_profiler_io_operation_started),
1158 B_SYSTEM_PROFILER_IO_OPERATION_STARTED, 0, 0);
1159 if (event == NULL)
1160 return false;
1162 event->time = system_time_nsecs();
1163 event->scheduler = scheduler->ID();
1164 event->request = request;
1165 event->operation = operation;
1166 event->offset = request->Offset();
1167 event->length = request->Length();
1168 event->write = request->IsWrite();
1170 fHeader->size = fBufferSize;
1172 return true;
1176 bool
1177 SystemProfiler::_IOOperationFinished(IOScheduler* scheduler, IORequest* request,
1178 IOOperation* operation)
1180 InterruptsSpinLocker locker(fLock);
1182 system_profiler_io_operation_finished* event
1183 = (system_profiler_io_operation_finished*)_AllocateBuffer(
1184 sizeof(system_profiler_io_operation_finished),
1185 B_SYSTEM_PROFILER_IO_OPERATION_FINISHED, 0, 0);
1186 if (event == NULL)
1187 return false;
1189 event->time = system_time_nsecs();
1190 event->scheduler = scheduler->ID();
1191 event->request = request;
1192 event->operation = operation;
1193 event->status = request->Status();
1194 event->transferred = request->TransferredBytes();
1196 fHeader->size = fBufferSize;
1198 return true;
1202 void
1203 SystemProfiler::_WaitObjectCreated(addr_t object, uint32 type)
1205 SpinLocker locker(fLock);
1207 // look up the object
1208 WaitObjectKey key;
1209 key.object = object;
1210 key.type = type;
1211 WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1213 // If found, remove it and add it to the free list. This might sound weird,
1214 // but it makes sense, since we lazily track *used* wait objects only.
1215 // I.e. the object in the table is now guaranteedly obsolete.
1216 if (waitObject) {
1217 fWaitObjectTable.RemoveUnchecked(waitObject);
1218 fUsedWaitObjects.Remove(waitObject);
1219 fFreeWaitObjects.Add(waitObject, false);
1223 void
1224 SystemProfiler::_WaitObjectUsed(addr_t object, uint32 type)
1226 // look up the object
1227 WaitObjectKey key;
1228 key.object = object;
1229 key.type = type;
1230 WaitObject* waitObject = fWaitObjectTable.Lookup(key);
1232 // If already known, re-queue it as most recently used and be done.
1233 if (waitObject != NULL) {
1234 fUsedWaitObjects.Remove(waitObject);
1235 fUsedWaitObjects.Add(waitObject);
1236 return;
1239 // not known yet -- get the info
1240 const char* name = NULL;
1241 const void* referencedObject = NULL;
1243 switch (type) {
1244 case THREAD_BLOCK_TYPE_SEMAPHORE:
1246 name = sem_get_name_unsafe((sem_id)object);
1247 break;
1250 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1252 ConditionVariable* variable = (ConditionVariable*)object;
1253 name = variable->ObjectType();
1254 referencedObject = variable->Object();
1255 break;
1258 case THREAD_BLOCK_TYPE_MUTEX:
1260 mutex* lock = (mutex*)object;
1261 name = lock->name;
1262 break;
1265 case THREAD_BLOCK_TYPE_RW_LOCK:
1267 rw_lock* lock = (rw_lock*)object;
1268 name = lock->name;
1269 break;
1272 case THREAD_BLOCK_TYPE_OTHER:
1274 name = (const char*)(void*)object;
1275 break;
1278 case THREAD_BLOCK_TYPE_SNOOZE:
1279 case THREAD_BLOCK_TYPE_SIGNAL:
1280 default:
1281 return;
1284 // add the event
1285 size_t nameLen = name != NULL ? strlen(name) : 0;
1287 system_profiler_wait_object_info* event
1288 = (system_profiler_wait_object_info*)
1289 _AllocateBuffer(sizeof(system_profiler_wait_object_info) + nameLen,
1290 B_SYSTEM_PROFILER_WAIT_OBJECT_INFO, 0, 0);
1291 if (event == NULL)
1292 return;
1294 event->type = type;
1295 event->object = object;
1296 event->referenced_object = (addr_t)referencedObject;
1297 if (name != NULL)
1298 strcpy(event->name, name);
1299 else
1300 event->name[0] = '\0';
1302 fHeader->size = fBufferSize;
1304 // add the wait object
1306 // get a free one or steal the least recently used one
1307 waitObject = fFreeWaitObjects.RemoveHead();
1308 if (waitObject == NULL) {
1309 waitObject = fUsedWaitObjects.RemoveHead();
1310 fWaitObjectTable.RemoveUnchecked(waitObject);
1313 waitObject->object = object;
1314 waitObject->type = type;
1315 fWaitObjectTable.InsertUnchecked(waitObject);
1316 fUsedWaitObjects.Add(waitObject);
1320 /*static*/ bool
1321 SystemProfiler::_InitialImageIterator(struct image* image, void* cookie)
1323 SystemProfiler* self = (SystemProfiler*)cookie;
1324 self->fImageNotificationsEnabled = true;
1325 // Set that here, since the image lock is being held now.
1326 return !self->_ImageAdded(image);
1330 void*
1331 SystemProfiler::_AllocateBuffer(size_t size, int event, int cpu, int count)
1333 size = (size + 3) / 4 * 4;
1334 size += sizeof(system_profiler_event_header);
1336 size_t end = fBufferStart + fBufferSize;
1337 if (end + size > fBufferCapacity) {
1338 // Buffer is wrapped or needs wrapping.
1339 if (end < fBufferCapacity) {
1340 // not wrapped yet, but needed
1341 system_profiler_event_header* header
1342 = (system_profiler_event_header*)(fBufferBase + end);
1343 header->event = B_SYSTEM_PROFILER_BUFFER_END;
1344 fBufferSize = fBufferCapacity - fBufferStart;
1345 end = 0;
1346 } else
1347 end -= fBufferCapacity;
1349 if (end + size > fBufferStart) {
1350 fDroppedEvents++;
1351 return NULL;
1355 system_profiler_event_header* header
1356 = (system_profiler_event_header*)(fBufferBase + end);
1357 header->event = event;
1358 header->cpu = cpu;
1359 header->size = size - sizeof(system_profiler_event_header);
1361 fBufferSize += size;
1363 return header + 1;
1367 /*static*/ void
1368 SystemProfiler::_InitTimers(void* cookie, int cpu)
1370 SystemProfiler* self = (SystemProfiler*)cookie;
1371 self->_ScheduleTimer(cpu);
1375 /*static*/ void
1376 SystemProfiler::_UninitTimers(void* cookie, int cpu)
1378 SystemProfiler* self = (SystemProfiler*)cookie;
1380 CPUProfileData& cpuData = self->fCPUData[cpu];
1381 cancel_timer(&cpuData.timer);
1382 cpuData.timerScheduled = false;
1386 void
1387 SystemProfiler::_ScheduleTimer(int cpu)
1389 CPUProfileData& cpuData = fCPUData[cpu];
1390 cpuData.timerEnd = system_time() + fInterval;
1391 cpuData.timer.user_data = this;
1392 add_timer(&cpuData.timer, &_ProfilingEvent, fInterval,
1393 B_ONE_SHOT_RELATIVE_TIMER);
1394 cpuData.timerScheduled = true;
1398 void
1399 SystemProfiler::_DoSample()
1401 Thread* thread = thread_get_current_thread();
1402 int cpu = thread->cpu->cpu_num;
1403 CPUProfileData& cpuData = fCPUData[cpu];
1405 // get the samples
1406 int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
1407 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
1409 InterruptsSpinLocker locker(fLock);
1411 system_profiler_samples* event = (system_profiler_samples*)
1412 _AllocateBuffer(sizeof(system_profiler_samples)
1413 + count * sizeof(addr_t),
1414 B_SYSTEM_PROFILER_SAMPLES, cpu, count);
1415 if (event == NULL)
1416 return;
1418 event->thread = thread->id;
1419 memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t));
1421 fHeader->size = fBufferSize;
1425 /*static*/ int32
1426 SystemProfiler::_ProfilingEvent(struct timer* timer)
1428 SystemProfiler* self = (SystemProfiler*)timer->user_data;
1430 self->_DoSample();
1431 self->_ScheduleTimer(timer->cpu);
1433 return B_HANDLED_INTERRUPT;
1437 // #pragma mark - private kernel API
1440 #if SYSTEM_PROFILER
1442 status_t
1443 start_system_profiler(size_t areaSize, uint32 stackDepth, bigtime_t interval)
1445 struct ParameterDeleter {
1446 ParameterDeleter(area_id area)
1448 fArea(area),
1449 fDetached(false)
1453 ~ParameterDeleter()
1455 if (!fDetached) {
1456 delete_area(fArea);
1457 delete sRecordedParameters;
1458 sRecordedParameters = NULL;
1462 void Detach()
1464 fDetached = true;
1467 private:
1468 area_id fArea;
1469 bool fDetached;
1472 void* address;
1473 area_id area = create_area("kernel profile data", &address,
1474 B_ANY_KERNEL_ADDRESS, areaSize, B_FULL_LOCK,
1475 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
1476 if (area < 0)
1477 return area;
1479 ParameterDeleter parameterDeleter(area);
1481 sRecordedParameters = new(std::nothrow) system_profiler_parameters;
1482 if (sRecordedParameters == NULL)
1483 return B_NO_MEMORY;
1485 sRecordedParameters->buffer_area = area;
1486 sRecordedParameters->flags = B_SYSTEM_PROFILER_TEAM_EVENTS
1487 | B_SYSTEM_PROFILER_THREAD_EVENTS | B_SYSTEM_PROFILER_IMAGE_EVENTS
1488 | B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1489 | B_SYSTEM_PROFILER_SAMPLING_EVENTS;
1490 sRecordedParameters->locking_lookup_size = 4096;
1491 sRecordedParameters->interval = interval;
1492 sRecordedParameters->stack_depth = stackDepth;
1494 area_info areaInfo;
1495 get_area_info(area, &areaInfo);
1497 // initialize the profiler
1498 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(B_SYSTEM_TEAM,
1499 areaInfo, *sRecordedParameters);
1500 if (profiler == NULL)
1501 return B_NO_MEMORY;
1503 ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1505 status_t error = profiler->Init();
1506 if (error != B_OK)
1507 return error;
1509 // set the new profiler
1510 InterruptsSpinLocker locker(sProfilerLock);
1511 if (sProfiler != NULL)
1512 return B_BUSY;
1514 parameterDeleter.Detach();
1515 profilerDeleter.Detach();
1516 sProfiler = profiler;
1517 locker.Unlock();
1519 return B_OK;
1523 void
1524 stop_system_profiler()
1526 InterruptsSpinLocker locker(sProfilerLock);
1527 if (sProfiler == NULL)
1528 return;
1530 SystemProfiler* profiler = sProfiler;
1531 sProfiler = NULL;
1532 locker.Unlock();
1534 profiler->ReleaseReference();
1537 #endif // SYSTEM_PROFILER
1540 // #pragma mark - syscalls
1543 status_t
1544 _user_system_profiler_start(struct system_profiler_parameters* userParameters)
1546 // copy params to the kernel
1547 struct system_profiler_parameters parameters;
1548 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters)
1549 || user_memcpy(&parameters, userParameters, sizeof(parameters))
1550 != B_OK) {
1551 return B_BAD_ADDRESS;
1554 // check the parameters
1555 team_id team = thread_get_current_thread()->team->id;
1557 area_info areaInfo;
1558 status_t error = get_area_info(parameters.buffer_area, &areaInfo);
1559 if (error != B_OK)
1560 return error;
1562 if (areaInfo.team != team)
1563 return B_BAD_VALUE;
1565 if ((parameters.flags & B_SYSTEM_PROFILER_SAMPLING_EVENTS) != 0) {
1566 if (parameters.stack_depth < 1)
1567 return B_BAD_VALUE;
1569 if (parameters.interval < B_DEBUG_MIN_PROFILE_INTERVAL)
1570 parameters.interval = B_DEBUG_MIN_PROFILE_INTERVAL;
1572 if (parameters.stack_depth > B_DEBUG_STACK_TRACE_DEPTH)
1573 parameters.stack_depth = B_DEBUG_STACK_TRACE_DEPTH;
1576 // quick check to see whether we do already have a profiler installed
1577 InterruptsSpinLocker locker(sProfilerLock);
1578 if (sProfiler != NULL)
1579 return B_BUSY;
1580 locker.Unlock();
1582 // initialize the profiler
1583 SystemProfiler* profiler = new(std::nothrow) SystemProfiler(team, areaInfo,
1584 parameters);
1585 if (profiler == NULL)
1586 return B_NO_MEMORY;
1587 ObjectDeleter<SystemProfiler> profilerDeleter(profiler);
1589 error = profiler->Init();
1590 if (error != B_OK)
1591 return error;
1593 // set the new profiler
1594 locker.Lock();
1595 if (sProfiler != NULL)
1596 return B_BUSY;
1598 profilerDeleter.Detach();
1599 sProfiler = profiler;
1600 locker.Unlock();
1602 return B_OK;
1606 status_t
1607 _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents)
1609 if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents))
1610 return B_BAD_ADDRESS;
1612 team_id team = thread_get_current_thread()->team->id;
1614 InterruptsSpinLocker locker(sProfilerLock);
1615 if (sProfiler == NULL || sProfiler->TeamID() != team)
1616 return B_BAD_VALUE;
1618 // get a reference to the profiler
1619 SystemProfiler* profiler = sProfiler;
1620 BReference<SystemProfiler> reference(profiler);
1621 locker.Unlock();
1623 uint64 droppedEvents;
1624 status_t error = profiler->NextBuffer(bytesRead,
1625 _droppedEvents != NULL ? &droppedEvents : NULL);
1626 if (error == B_OK && _droppedEvents != NULL)
1627 user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents));
1629 return error;
1633 status_t
1634 _user_system_profiler_stop()
1636 team_id team = thread_get_current_thread()->team->id;
1638 InterruptsSpinLocker locker(sProfilerLock);
1639 if (sProfiler == NULL || sProfiler->TeamID() != team)
1640 return B_BAD_VALUE;
1642 SystemProfiler* profiler = sProfiler;
1643 sProfiler = NULL;
1644 locker.Unlock();
1646 profiler->ReleaseReference();
1648 return B_OK;
1652 status_t
1653 _user_system_profiler_recorded(system_profiler_parameters* userParameters)
1655 if (userParameters == NULL || !IS_USER_ADDRESS(userParameters))
1656 return B_BAD_ADDRESS;
1657 if (sRecordedParameters == NULL)
1658 return B_ERROR;
1660 #if SYSTEM_PROFILER
1661 stop_system_profiler();
1663 // Transfer the area to the userland process
1665 void* address;
1666 area_id newArea = transfer_area(sRecordedParameters->buffer_area, &address,
1667 B_ANY_ADDRESS, team_get_current_team_id(), true);
1668 if (newArea < 0)
1669 return newArea;
1671 status_t status = set_area_protection(newArea, B_READ_AREA);
1672 if (status == B_OK) {
1673 sRecordedParameters->buffer_area = newArea;
1675 status = user_memcpy(userParameters, sRecordedParameters,
1676 sizeof(system_profiler_parameters));
1678 if (status != B_OK)
1679 delete_area(newArea);
1681 delete sRecordedParameters;
1682 sRecordedParameters = NULL;
1684 return status;
1685 #else
1686 return B_NOT_SUPPORTED;
1687 #endif // SYSTEM_PROFILER