2 * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
7 #include <system_profiler.h>
9 #include <AutoDeleter.h>
10 #include <Referenceable.h>
12 #include <util/AutoLock.h>
14 #include <system_profiler_defs.h>
19 #include <kscheduler.h>
20 #include <listeners.h>
21 #include <Notifications.h>
25 #include <user_debugger.h>
28 #include <arch/debug.h>
30 #include "IOSchedulerRoster.h"
33 // This is the kernel-side implementation of the system profiling support.
34 // A userland team can register as system profiler, providing an area as buffer
35 // for events. Those events are team, thread, and image changes (added/removed),
36 // periodic sampling of the return address stack for each CPU, as well as
37 // scheduling and I/O scheduling events.
43 // minimum/maximum size of the table used for wait object caching
44 #define MIN_WAIT_OBJECT_COUNT 128
45 #define MAX_WAIT_OBJECT_COUNT 1024
48 static spinlock sProfilerLock
= B_SPINLOCK_INITIALIZER
;
49 static SystemProfiler
* sProfiler
= NULL
;
50 static struct system_profiler_parameters
* sRecordedParameters
= NULL
;
53 class SystemProfiler
: public BReferenceable
, private NotificationListener
,
54 private SchedulerListener
, private WaitObjectListener
{
56 SystemProfiler(team_id team
,
57 const area_info
& userAreaInfo
,
58 const system_profiler_parameters
&
62 team_id
TeamID() const { return fTeam
; }
65 status_t
NextBuffer(size_t bytesRead
,
66 uint64
* _droppedEvents
);
69 virtual void EventOccurred(NotificationService
& service
,
70 const KMessage
* event
);
72 virtual void ThreadEnqueuedInRunQueue(Thread
* thread
);
73 virtual void ThreadRemovedFromRunQueue(Thread
* thread
);
74 virtual void ThreadScheduled(Thread
* oldThread
,
77 virtual void SemaphoreCreated(sem_id id
,
79 virtual void ConditionVariableInitialized(
80 ConditionVariable
* variable
);
81 virtual void MutexInitialized(mutex
* lock
);
82 virtual void RWLockInitialized(rw_lock
* lock
);
84 bool _TeamAdded(Team
* team
);
85 bool _TeamRemoved(Team
* team
);
86 bool _TeamExec(Team
* team
);
88 bool _ThreadAdded(Thread
* thread
);
89 bool _ThreadRemoved(Thread
* thread
);
91 bool _ImageAdded(struct image
* image
);
92 bool _ImageRemoved(struct image
* image
);
94 bool _IOSchedulerAdded(IOScheduler
* scheduler
);
95 bool _IOSchedulerRemoved(IOScheduler
* scheduler
);
96 bool _IORequestScheduled(IOScheduler
* scheduler
,
98 bool _IORequestFinished(IOScheduler
* scheduler
,
100 bool _IOOperationStarted(IOScheduler
* scheduler
,
101 IORequest
* request
, IOOperation
* operation
);
102 bool _IOOperationFinished(IOScheduler
* scheduler
,
103 IORequest
* request
, IOOperation
* operation
);
105 void _WaitObjectCreated(addr_t object
, uint32 type
);
106 void _WaitObjectUsed(addr_t object
, uint32 type
);
108 inline void _MaybeNotifyProfilerThreadLocked();
109 inline void _MaybeNotifyProfilerThread();
111 static bool _InitialImageIterator(struct image
* image
,
114 void* _AllocateBuffer(size_t size
, int event
, int cpu
,
117 static void _InitTimers(void* cookie
, int cpu
);
118 static void _UninitTimers(void* cookie
, int cpu
);
119 void _ScheduleTimer(int cpu
);
123 static int32
_ProfilingEvent(struct timer
* timer
);
126 struct CPUProfileData
{
130 addr_t buffer
[B_DEBUG_STACK_TRACE_DEPTH
];
133 struct WaitObjectKey
{
138 struct WaitObject
: DoublyLinkedListLinkImpl
<WaitObject
>,
140 struct WaitObject
* hash_link
;
143 struct WaitObjectTableDefinition
{
144 typedef WaitObjectKey KeyType
;
145 typedef WaitObject ValueType
;
147 size_t HashKey(const WaitObjectKey
& key
) const
149 return (size_t)key
.object
^ (size_t)key
.type
;
152 size_t Hash(const WaitObject
* value
) const
154 return HashKey(*value
);
157 bool Compare(const WaitObjectKey
& key
,
158 const WaitObject
* value
) const
160 return value
->type
== key
.type
161 && value
->object
== key
.object
;
164 WaitObject
*& GetLink(WaitObject
* value
) const
166 return value
->hash_link
;
170 typedef DoublyLinkedList
<WaitObject
> WaitObjectList
;
171 typedef BOpenHashTable
<WaitObjectTableDefinition
> WaitObjectTable
;
182 system_profiler_buffer_header
* fHeader
;
184 size_t fBufferCapacity
;
187 uint64 fDroppedEvents
;
188 int64 fLastTeamAddedSerialNumber
;
189 int64 fLastThreadAddedSerialNumber
;
190 bool fTeamNotificationsRequested
;
191 bool fTeamNotificationsEnabled
;
192 bool fThreadNotificationsRequested
;
193 bool fThreadNotificationsEnabled
;
194 bool fImageNotificationsRequested
;
195 bool fImageNotificationsEnabled
;
196 bool fIONotificationsRequested
;
197 bool fIONotificationsEnabled
;
198 bool fSchedulerNotificationsRequested
;
199 bool fWaitObjectNotificationsRequested
;
200 Thread
* volatile fWaitingProfilerThread
;
201 bool fProfilingActive
;
202 bool fReentered
[SMP_MAX_CPUS
];
203 CPUProfileData fCPUData
[SMP_MAX_CPUS
];
204 WaitObject
* fWaitObjectBuffer
;
205 int32 fWaitObjectCount
;
206 WaitObjectList fUsedWaitObjects
;
207 WaitObjectList fFreeWaitObjects
;
208 WaitObjectTable fWaitObjectTable
;
212 /*! Notifies the profiler thread when the profiling buffer is full enough.
213 The caller must hold fLock.
216 SystemProfiler::_MaybeNotifyProfilerThreadLocked()
218 // If the buffer is full enough, notify the profiler.
219 if (fWaitingProfilerThread
!= NULL
&& fBufferSize
> fBufferCapacity
/ 2) {
220 int cpu
= smp_get_current_cpu();
221 fReentered
[cpu
] = true;
223 Thread
* profilerThread
= fWaitingProfilerThread
;
224 fWaitingProfilerThread
= NULL
;
226 SpinLocker
_(profilerThread
->scheduler_lock
);
227 thread_unblock_locked(profilerThread
, B_OK
);
229 fReentered
[cpu
] = false;
235 SystemProfiler::_MaybeNotifyProfilerThread()
237 if (fWaitingProfilerThread
== NULL
)
240 InterruptsSpinLocker
locker(fLock
);
242 _MaybeNotifyProfilerThreadLocked();
246 // #pragma mark - SystemProfiler public
249 SystemProfiler::SystemProfiler(team_id team
, const area_info
& userAreaInfo
,
250 const system_profiler_parameters
& parameters
)
253 fUserArea(userAreaInfo
.area
),
255 fAreaSize(userAreaInfo
.size
),
256 fFlags(parameters
.flags
),
257 fStackDepth(parameters
.stack_depth
),
258 fInterval(parameters
.interval
),
265 fLastTeamAddedSerialNumber(0),
266 fLastThreadAddedSerialNumber(0),
267 fTeamNotificationsRequested(false),
268 fTeamNotificationsEnabled(false),
269 fThreadNotificationsRequested(false),
270 fThreadNotificationsEnabled(false),
271 fImageNotificationsRequested(false),
272 fImageNotificationsEnabled(false),
273 fIONotificationsRequested(false),
274 fIONotificationsEnabled(false),
275 fSchedulerNotificationsRequested(false),
276 fWaitObjectNotificationsRequested(false),
277 fWaitingProfilerThread(NULL
),
278 fWaitObjectBuffer(NULL
),
284 B_INITIALIZE_SPINLOCK(&fLock
);
286 memset(fReentered
, 0, sizeof(fReentered
));
288 // compute the number wait objects we want to cache
289 if ((fFlags
& B_SYSTEM_PROFILER_SCHEDULING_EVENTS
) != 0) {
290 fWaitObjectCount
= parameters
.locking_lookup_size
291 / (sizeof(WaitObject
) + (sizeof(void*) * 3 / 2));
292 if (fWaitObjectCount
< MIN_WAIT_OBJECT_COUNT
)
293 fWaitObjectCount
= MIN_WAIT_OBJECT_COUNT
;
294 if (fWaitObjectCount
> MAX_WAIT_OBJECT_COUNT
)
295 fWaitObjectCount
= MAX_WAIT_OBJECT_COUNT
;
300 SystemProfiler::~SystemProfiler()
302 // Wake up the user thread, if it is waiting, and mark profiling
304 InterruptsSpinLocker
locker(fLock
);
305 if (fWaitingProfilerThread
!= NULL
) {
306 thread_unblock(fWaitingProfilerThread
, B_OK
);
307 fWaitingProfilerThread
= NULL
;
309 fProfilingActive
= false;
312 // stop scheduler listening
313 if (fSchedulerNotificationsRequested
)
314 scheduler_remove_listener(this);
316 // stop wait object listening
317 if (fWaitObjectNotificationsRequested
) {
318 InterruptsSpinLocker
locker(gWaitObjectListenerLock
);
319 remove_wait_object_listener(this);
322 // deactivate the profiling timers on all CPUs
323 if ((fFlags
& B_SYSTEM_PROFILER_SAMPLING_EVENTS
) != 0)
324 call_all_cpus(_UninitTimers
, this);
326 // cancel notifications
327 NotificationManager
& notificationManager
328 = NotificationManager::Manager();
331 if (fImageNotificationsRequested
) {
332 fImageNotificationsRequested
= false;
333 notificationManager
.RemoveListener("images", NULL
, *this);
337 if (fThreadNotificationsRequested
) {
338 fThreadNotificationsRequested
= false;
339 notificationManager
.RemoveListener("threads", NULL
, *this);
343 if (fTeamNotificationsRequested
) {
344 fTeamNotificationsRequested
= false;
345 notificationManager
.RemoveListener("teams", NULL
, *this);
349 if (fIONotificationsRequested
) {
350 fIONotificationsRequested
= false;
351 notificationManager
.RemoveListener("I/O", NULL
, *this);
354 // delete wait object related allocations
355 fWaitObjectTable
.Clear();
356 delete[] fWaitObjectBuffer
;
358 // unlock the memory and delete the area
359 if (fKernelArea
>= 0) {
360 unlock_memory(fHeader
, fAreaSize
, B_READ_DEVICE
);
361 delete_area(fKernelArea
);
368 SystemProfiler::Init()
370 // clone the user area
372 fKernelArea
= clone_area("profiling samples", &areaBase
,
373 B_ANY_KERNEL_ADDRESS
, B_READ_AREA
| B_WRITE_AREA
,
378 // we need the memory locked
379 status_t error
= lock_memory(areaBase
, fAreaSize
, B_READ_DEVICE
);
381 delete_area(fKernelArea
);
386 // the buffer is ready for use
387 fHeader
= (system_profiler_buffer_header
*)areaBase
;
388 fBufferBase
= (uint8
*)(fHeader
+ 1);
389 fBufferCapacity
= fAreaSize
- (fBufferBase
- (uint8
*)areaBase
);
393 // allocate the wait object buffer and init the hash table
394 if (fWaitObjectCount
> 0) {
395 fWaitObjectBuffer
= new(std::nothrow
) WaitObject
[fWaitObjectCount
];
396 if (fWaitObjectBuffer
== NULL
)
399 for (int32 i
= 0; i
< fWaitObjectCount
; i
++)
400 fFreeWaitObjects
.Add(fWaitObjectBuffer
+ i
);
402 error
= fWaitObjectTable
.Init(fWaitObjectCount
* 3 / 2);
407 // start listening for notifications
410 NotificationManager
& notificationManager
411 = NotificationManager::Manager();
412 if ((fFlags
& B_SYSTEM_PROFILER_TEAM_EVENTS
) != 0) {
413 error
= notificationManager
.AddListener("teams",
414 TEAM_ADDED
| TEAM_REMOVED
| TEAM_EXEC
, *this);
417 fTeamNotificationsRequested
= true;
421 if ((fFlags
& B_SYSTEM_PROFILER_THREAD_EVENTS
) != 0) {
422 error
= notificationManager
.AddListener("threads",
423 THREAD_ADDED
| THREAD_REMOVED
, *this);
426 fThreadNotificationsRequested
= true;
430 if ((fFlags
& B_SYSTEM_PROFILER_IMAGE_EVENTS
) != 0) {
431 error
= notificationManager
.AddListener("images",
432 IMAGE_ADDED
| IMAGE_REMOVED
, *this);
435 fImageNotificationsRequested
= true;
439 if ((fFlags
& B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
) != 0) {
440 error
= notificationManager
.AddListener("I/O",
441 IO_SCHEDULER_ADDED
| IO_SCHEDULER_REMOVED
442 | IO_SCHEDULER_REQUEST_SCHEDULED
| IO_SCHEDULER_REQUEST_FINISHED
443 | IO_SCHEDULER_OPERATION_STARTED
444 | IO_SCHEDULER_OPERATION_FINISHED
,
448 fIONotificationsRequested
= true;
451 // We need to fill the buffer with the initial state of teams, threads,
455 if ((fFlags
& B_SYSTEM_PROFILER_TEAM_EVENTS
) != 0) {
456 InterruptsSpinLocker
locker(fLock
);
458 TeamListIterator iterator
;
459 while (Team
* team
= iterator
.Next()) {
462 bool added
= _TeamAdded(team
);
464 // release the reference returned by the iterator
465 team
->ReleaseReference();
468 return B_BUFFER_OVERFLOW
;
473 fTeamNotificationsEnabled
= true;
477 if ((fFlags
& B_SYSTEM_PROFILER_IMAGE_EVENTS
) != 0) {
478 if (image_iterate_through_images(&_InitialImageIterator
, this) != NULL
)
479 return B_BUFFER_OVERFLOW
;
483 if ((fFlags
& B_SYSTEM_PROFILER_THREAD_EVENTS
) != 0) {
484 InterruptsSpinLocker
locker(fLock
);
486 ThreadListIterator iterator
;
487 while (Thread
* thread
= iterator
.Next()) {
490 bool added
= _ThreadAdded(thread
);
492 // release the reference returned by the iterator
493 thread
->ReleaseReference();
496 return B_BUFFER_OVERFLOW
;
501 fThreadNotificationsEnabled
= true;
504 fProfilingActive
= true;
506 // start scheduler and wait object listening
507 if ((fFlags
& B_SYSTEM_PROFILER_SCHEDULING_EVENTS
) != 0) {
508 scheduler_add_listener(this);
509 fSchedulerNotificationsRequested
= true;
511 InterruptsSpinLocker
waitObjectLocker(gWaitObjectListenerLock
);
512 add_wait_object_listener(this);
513 fWaitObjectNotificationsRequested
= true;
514 waitObjectLocker
.Unlock();
516 // fake schedule events for the initially running threads
517 int32 cpuCount
= smp_get_num_cpus();
518 for (int32 i
= 0; i
< cpuCount
; i
++) {
519 Thread
* thread
= gCPU
[i
].running_thread
;
521 ThreadScheduled(thread
, thread
);
526 if ((fFlags
& B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
) != 0) {
527 IOSchedulerRoster
* roster
= IOSchedulerRoster::Default();
528 AutoLocker
<IOSchedulerRoster
> rosterLocker(roster
);
530 for (IOSchedulerList::ConstIterator it
531 = roster
->SchedulerList().GetIterator();
532 IOScheduler
* scheduler
= it
.Next();) {
533 _IOSchedulerAdded(scheduler
);
536 fIONotificationsEnabled
= true;
539 // activate the profiling timers on all CPUs
540 if ((fFlags
& B_SYSTEM_PROFILER_SAMPLING_EVENTS
) != 0)
541 call_all_cpus(_InitTimers
, this);
548 SystemProfiler::NextBuffer(size_t bytesRead
, uint64
* _droppedEvents
)
550 InterruptsSpinLocker
locker(fLock
);
552 if (fWaitingProfilerThread
!= NULL
|| !fProfilingActive
553 || bytesRead
> fBufferSize
) {
557 fBufferSize
-= bytesRead
;
558 fBufferStart
+= bytesRead
;
559 if (fBufferStart
> fBufferCapacity
)
560 fBufferStart
-= fBufferCapacity
;
561 fHeader
->size
= fBufferSize
;
562 fHeader
->start
= fBufferStart
;
564 // already enough data in the buffer to return?
565 if (fBufferSize
> fBufferCapacity
/ 2)
568 // Wait until the buffer gets too full or an error or a timeout occurs.
570 Thread
* thread
= thread_get_current_thread();
571 fWaitingProfilerThread
= thread
;
573 thread_prepare_to_block(thread
, B_CAN_INTERRUPT
,
574 THREAD_BLOCK_TYPE_OTHER
, "system profiler buffer");
578 status_t error
= thread_block_with_timeout(B_RELATIVE_TIMEOUT
, 1000000);
583 // the caller has unset fWaitingProfilerThread for us
587 fWaitingProfilerThread
= NULL
;
589 if (error
!= B_TIMED_OUT
)
592 // just the timeout -- return, if the buffer is not empty
597 if (_droppedEvents
!= NULL
) {
598 *_droppedEvents
= fDroppedEvents
;
606 // #pragma mark - NotificationListener interface
610 SystemProfiler::EventOccurred(NotificationService
& service
,
611 const KMessage
* event
)
614 if (event
->FindInt32("event", &eventCode
) != B_OK
)
617 if (strcmp(service
.Name(), "teams") == 0) {
618 Team
* team
= (Team
*)event
->GetPointer("teamStruct", NULL
);
624 if (fTeamNotificationsEnabled
)
629 if (team
->id
== fTeam
) {
630 // The profiling team is gone -- uninstall the profiler!
631 InterruptsSpinLocker
locker(sProfilerLock
);
632 if (sProfiler
!= this)
642 // When we're still doing the initial team list scan, we are
643 // also interested in removals that happened to teams we have
645 if (fTeamNotificationsEnabled
646 || team
->serial_number
<= fLastTeamAddedSerialNumber
) {
652 if (fTeamNotificationsEnabled
)
656 } else if (strcmp(service
.Name(), "threads") == 0) {
657 Thread
* thread
= (Thread
*)event
->GetPointer("threadStruct", NULL
);
663 if (fThreadNotificationsEnabled
)
664 _ThreadAdded(thread
);
668 // When we're still doing the initial thread list scan, we are
669 // also interested in removals that happened to threads we have
671 if (fThreadNotificationsEnabled
672 || thread
->serial_number
<= fLastThreadAddedSerialNumber
) {
673 _ThreadRemoved(thread
);
677 } else if (strcmp(service
.Name(), "images") == 0) {
678 if (!fImageNotificationsEnabled
)
681 struct image
* image
= (struct image
*)event
->GetPointer(
682 "imageStruct", NULL
);
692 _ImageRemoved(image
);
695 } else if (strcmp(service
.Name(), "I/O") == 0) {
696 if (!fIONotificationsEnabled
)
699 IOScheduler
* scheduler
= (IOScheduler
*)event
->GetPointer("scheduler",
701 if (scheduler
== NULL
)
704 IORequest
* request
= (IORequest
*)event
->GetPointer("request", NULL
);
705 IOOperation
* operation
= (IOOperation
*)event
->GetPointer("operation",
709 case IO_SCHEDULER_ADDED
:
710 _IOSchedulerAdded(scheduler
);
713 case IO_SCHEDULER_REMOVED
:
714 _IOSchedulerRemoved(scheduler
);
717 case IO_SCHEDULER_REQUEST_SCHEDULED
:
718 _IORequestScheduled(scheduler
, request
);
721 case IO_SCHEDULER_REQUEST_FINISHED
:
722 _IORequestFinished(scheduler
, request
);
725 case IO_SCHEDULER_OPERATION_STARTED
:
726 _IOOperationStarted(scheduler
, request
, operation
);
729 case IO_SCHEDULER_OPERATION_FINISHED
:
730 _IOOperationFinished(scheduler
, request
, operation
);
735 _MaybeNotifyProfilerThread();
739 // #pragma mark - SchedulerListener interface
743 SystemProfiler::ThreadEnqueuedInRunQueue(Thread
* thread
)
745 int cpu
= smp_get_current_cpu();
747 InterruptsSpinLocker
locker(fLock
, false, !fReentered
[cpu
]);
748 // When re-entering, we already hold the lock.
750 system_profiler_thread_enqueued_in_run_queue
* event
751 = (system_profiler_thread_enqueued_in_run_queue
*)
753 sizeof(system_profiler_thread_enqueued_in_run_queue
),
754 B_SYSTEM_PROFILER_THREAD_ENQUEUED_IN_RUN_QUEUE
, cpu
, 0);
758 event
->time
= system_time_nsecs();
759 event
->thread
= thread
->id
;
760 event
->priority
= thread
->priority
;
762 fHeader
->size
= fBufferSize
;
764 // Unblock the profiler thread, if necessary, but don't unblock the thread,
765 // if it had been waiting on a condition variable, since then we'd likely
766 // deadlock in ConditionVariable::NotifyOne(), as it acquires a static
768 if (thread
->wait
.type
!= THREAD_BLOCK_TYPE_CONDITION_VARIABLE
)
769 _MaybeNotifyProfilerThreadLocked();
774 SystemProfiler::ThreadRemovedFromRunQueue(Thread
* thread
)
776 int cpu
= smp_get_current_cpu();
778 InterruptsSpinLocker
locker(fLock
, false, !fReentered
[cpu
]);
779 // When re-entering, we already hold the lock.
781 system_profiler_thread_removed_from_run_queue
* event
782 = (system_profiler_thread_removed_from_run_queue
*)
784 sizeof(system_profiler_thread_removed_from_run_queue
),
785 B_SYSTEM_PROFILER_THREAD_REMOVED_FROM_RUN_QUEUE
, cpu
, 0);
789 event
->time
= system_time_nsecs();
790 event
->thread
= thread
->id
;
792 fHeader
->size
= fBufferSize
;
794 // unblock the profiler thread, if necessary
795 _MaybeNotifyProfilerThreadLocked();
800 SystemProfiler::ThreadScheduled(Thread
* oldThread
, Thread
* newThread
)
802 int cpu
= smp_get_current_cpu();
804 InterruptsSpinLocker
locker(fLock
, false, !fReentered
[cpu
]);
805 // When re-entering, we already hold the lock.
807 // If the old thread starts waiting, handle the wait object.
808 if (oldThread
->state
== B_THREAD_WAITING
)
809 _WaitObjectUsed((addr_t
)oldThread
->wait
.object
, oldThread
->wait
.type
);
811 system_profiler_thread_scheduled
* event
812 = (system_profiler_thread_scheduled
*)
813 _AllocateBuffer(sizeof(system_profiler_thread_scheduled
),
814 B_SYSTEM_PROFILER_THREAD_SCHEDULED
, cpu
, 0);
818 event
->time
= system_time_nsecs();
819 event
->thread
= newThread
->id
;
820 event
->previous_thread
= oldThread
->id
;
821 event
->previous_thread_state
= oldThread
->state
;
822 event
->previous_thread_wait_object_type
= oldThread
->wait
.type
;
823 event
->previous_thread_wait_object
= (addr_t
)oldThread
->wait
.object
;
825 fHeader
->size
= fBufferSize
;
827 // unblock the profiler thread, if necessary
828 _MaybeNotifyProfilerThreadLocked();
832 // #pragma mark - WaitObjectListener interface
836 SystemProfiler::SemaphoreCreated(sem_id id
, const char* name
)
838 _WaitObjectCreated((addr_t
)id
, THREAD_BLOCK_TYPE_SEMAPHORE
);
843 SystemProfiler::ConditionVariableInitialized(ConditionVariable
* variable
)
845 _WaitObjectCreated((addr_t
)variable
, THREAD_BLOCK_TYPE_CONDITION_VARIABLE
);
850 SystemProfiler::MutexInitialized(mutex
* lock
)
852 _WaitObjectCreated((addr_t
)lock
, THREAD_BLOCK_TYPE_MUTEX
);
857 SystemProfiler::RWLockInitialized(rw_lock
* lock
)
859 _WaitObjectCreated((addr_t
)lock
, THREAD_BLOCK_TYPE_RW_LOCK
);
863 // #pragma mark - SystemProfiler private
867 SystemProfiler::_TeamAdded(Team
* team
)
869 TeamLocker
teamLocker(team
);
871 size_t nameLen
= strlen(team
->Name());
872 size_t argsLen
= strlen(team
->Args());
874 InterruptsSpinLocker
locker(fLock
);
876 // During the initial scan check whether the team is already gone again.
877 // Later this cannot happen, since the team creator notifies us before
878 // actually starting the team.
879 if (!fTeamNotificationsEnabled
&& team
->state
>= TEAM_STATE_DEATH
)
882 if (team
->serial_number
> fLastTeamAddedSerialNumber
)
883 fLastTeamAddedSerialNumber
= team
->serial_number
;
885 system_profiler_team_added
* event
= (system_profiler_team_added
*)
887 sizeof(system_profiler_team_added
) + nameLen
+ 1 + argsLen
,
888 B_SYSTEM_PROFILER_TEAM_ADDED
, 0, 0);
892 event
->team
= team
->id
;
893 strcpy(event
->name
, team
->Name());
894 event
->args_offset
= nameLen
+ 1;
895 strcpy(event
->name
+ nameLen
+ 1, team
->Args());
897 fHeader
->size
= fBufferSize
;
904 SystemProfiler::_TeamRemoved(Team
* team
)
906 // TODO: It is possible that we get remove notifications for teams that
907 // had already been removed from the global team list when we did the
908 // initial scan, but were still in the process of dying. ATM it is not
909 // really possible to identify such a case.
911 TeamLocker
teamLocker(team
);
912 InterruptsSpinLocker
locker(fLock
);
914 system_profiler_team_removed
* event
= (system_profiler_team_removed
*)
915 _AllocateBuffer(sizeof(system_profiler_team_removed
),
916 B_SYSTEM_PROFILER_TEAM_REMOVED
, 0, 0);
920 event
->team
= team
->id
;
922 fHeader
->size
= fBufferSize
;
929 SystemProfiler::_TeamExec(Team
* team
)
931 TeamLocker
teamLocker(team
);
933 size_t argsLen
= strlen(team
->Args());
935 InterruptsSpinLocker
locker(fLock
);
937 system_profiler_team_exec
* event
= (system_profiler_team_exec
*)
938 _AllocateBuffer(sizeof(system_profiler_team_exec
) + argsLen
,
939 B_SYSTEM_PROFILER_TEAM_EXEC
, 0, 0);
943 event
->team
= team
->id
;
944 strlcpy(event
->thread_name
, team
->main_thread
->name
,
945 sizeof(event
->thread_name
));
946 strcpy(event
->args
, team
->Args());
948 fHeader
->size
= fBufferSize
;
955 SystemProfiler::_ThreadAdded(Thread
* thread
)
957 ThreadLocker
threadLocker(thread
);
958 InterruptsSpinLocker
locker(fLock
);
960 // During the initial scan check whether the team is already gone again.
961 // Later this cannot happen, since the team creator notifies us before
962 // actually starting the thread.
963 if (!fThreadNotificationsEnabled
&& !thread
->IsAlive())
966 if (thread
->serial_number
> fLastThreadAddedSerialNumber
)
967 fLastThreadAddedSerialNumber
= thread
->serial_number
;
969 system_profiler_thread_added
* event
= (system_profiler_thread_added
*)
970 _AllocateBuffer(sizeof(system_profiler_thread_added
),
971 B_SYSTEM_PROFILER_THREAD_ADDED
, 0, 0);
975 event
->team
= thread
->team
->id
;
976 event
->thread
= thread
->id
;
977 strlcpy(event
->name
, thread
->name
, sizeof(event
->name
));
979 fHeader
->size
= fBufferSize
;
986 SystemProfiler::_ThreadRemoved(Thread
* thread
)
988 // TODO: It is possible that we get remove notifications for threads that
989 // had already been removed from the global thread list when we did the
990 // initial scan, but were still in the process of dying. ATM it is not
991 // really possible to identify such a case.
993 ThreadLocker
threadLocker(thread
);
994 InterruptsSpinLocker
locker(fLock
);
996 system_profiler_thread_removed
* event
997 = (system_profiler_thread_removed
*)
998 _AllocateBuffer(sizeof(system_profiler_thread_removed
),
999 B_SYSTEM_PROFILER_THREAD_REMOVED
, 0, 0);
1003 event
->team
= thread
->team
->id
;
1004 event
->thread
= thread
->id
;
1006 fHeader
->size
= fBufferSize
;
1013 SystemProfiler::_ImageAdded(struct image
* image
)
1015 InterruptsSpinLocker
locker(fLock
);
1017 system_profiler_image_added
* event
= (system_profiler_image_added
*)
1018 _AllocateBuffer(sizeof(system_profiler_image_added
),
1019 B_SYSTEM_PROFILER_IMAGE_ADDED
, 0, 0);
1023 event
->team
= image
->team
;
1024 event
->info
= image
->info
.basic_info
;
1026 fHeader
->size
= fBufferSize
;
1033 SystemProfiler::_ImageRemoved(struct image
* image
)
1035 InterruptsSpinLocker
locker(fLock
);
1037 system_profiler_image_removed
* event
= (system_profiler_image_removed
*)
1038 _AllocateBuffer(sizeof(system_profiler_image_removed
),
1039 B_SYSTEM_PROFILER_IMAGE_REMOVED
, 0, 0);
1043 event
->team
= image
->team
;
1044 event
->image
= image
->info
.basic_info
.id
;
1046 fHeader
->size
= fBufferSize
;
1053 SystemProfiler::_IOSchedulerAdded(IOScheduler
* scheduler
)
1055 size_t nameLen
= strlen(scheduler
->Name());
1057 InterruptsSpinLocker
locker(fLock
);
1059 system_profiler_io_scheduler_added
* event
1060 = (system_profiler_io_scheduler_added
*)_AllocateBuffer(
1061 sizeof(system_profiler_io_scheduler_added
) + nameLen
,
1062 B_SYSTEM_PROFILER_IO_SCHEDULER_ADDED
, 0, 0);
1066 event
->scheduler
= scheduler
->ID();
1067 strcpy(event
->name
, scheduler
->Name());
1069 fHeader
->size
= fBufferSize
;
1076 SystemProfiler::_IOSchedulerRemoved(IOScheduler
* scheduler
)
1078 InterruptsSpinLocker
locker(fLock
);
1080 system_profiler_io_scheduler_removed
* event
1081 = (system_profiler_io_scheduler_removed
*)_AllocateBuffer(
1082 sizeof(system_profiler_io_scheduler_removed
),
1083 B_SYSTEM_PROFILER_IO_SCHEDULER_REMOVED
, 0, 0);
1087 event
->scheduler
= scheduler
->ID();
1089 fHeader
->size
= fBufferSize
;
1096 SystemProfiler::_IORequestScheduled(IOScheduler
* scheduler
, IORequest
* request
)
1098 InterruptsSpinLocker
locker(fLock
);
1100 system_profiler_io_request_scheduled
* event
1101 = (system_profiler_io_request_scheduled
*)_AllocateBuffer(
1102 sizeof(system_profiler_io_request_scheduled
),
1103 B_SYSTEM_PROFILER_IO_REQUEST_SCHEDULED
, 0, 0);
1107 IORequestOwner
* owner
= request
->Owner();
1109 event
->time
= system_time_nsecs();
1110 event
->scheduler
= scheduler
->ID();
1111 event
->team
= owner
->team
;
1112 event
->thread
= owner
->thread
;
1113 event
->request
= request
;
1114 event
->offset
= request
->Offset();
1115 event
->length
= request
->Length();
1116 event
->write
= request
->IsWrite();
1117 event
->priority
= owner
->priority
;
1119 fHeader
->size
= fBufferSize
;
1126 SystemProfiler::_IORequestFinished(IOScheduler
* scheduler
, IORequest
* request
)
1128 InterruptsSpinLocker
locker(fLock
);
1130 system_profiler_io_request_finished
* event
1131 = (system_profiler_io_request_finished
*)_AllocateBuffer(
1132 sizeof(system_profiler_io_request_finished
),
1133 B_SYSTEM_PROFILER_IO_REQUEST_FINISHED
, 0, 0);
1137 event
->time
= system_time_nsecs();
1138 event
->scheduler
= scheduler
->ID();
1139 event
->request
= request
;
1140 event
->status
= request
->Status();
1141 event
->transferred
= request
->TransferredBytes();
1143 fHeader
->size
= fBufferSize
;
1150 SystemProfiler::_IOOperationStarted(IOScheduler
* scheduler
, IORequest
* request
,
1151 IOOperation
* operation
)
1153 InterruptsSpinLocker
locker(fLock
);
1155 system_profiler_io_operation_started
* event
1156 = (system_profiler_io_operation_started
*)_AllocateBuffer(
1157 sizeof(system_profiler_io_operation_started
),
1158 B_SYSTEM_PROFILER_IO_OPERATION_STARTED
, 0, 0);
1162 event
->time
= system_time_nsecs();
1163 event
->scheduler
= scheduler
->ID();
1164 event
->request
= request
;
1165 event
->operation
= operation
;
1166 event
->offset
= request
->Offset();
1167 event
->length
= request
->Length();
1168 event
->write
= request
->IsWrite();
1170 fHeader
->size
= fBufferSize
;
1177 SystemProfiler::_IOOperationFinished(IOScheduler
* scheduler
, IORequest
* request
,
1178 IOOperation
* operation
)
1180 InterruptsSpinLocker
locker(fLock
);
1182 system_profiler_io_operation_finished
* event
1183 = (system_profiler_io_operation_finished
*)_AllocateBuffer(
1184 sizeof(system_profiler_io_operation_finished
),
1185 B_SYSTEM_PROFILER_IO_OPERATION_FINISHED
, 0, 0);
1189 event
->time
= system_time_nsecs();
1190 event
->scheduler
= scheduler
->ID();
1191 event
->request
= request
;
1192 event
->operation
= operation
;
1193 event
->status
= request
->Status();
1194 event
->transferred
= request
->TransferredBytes();
1196 fHeader
->size
= fBufferSize
;
1203 SystemProfiler::_WaitObjectCreated(addr_t object
, uint32 type
)
1205 SpinLocker
locker(fLock
);
1207 // look up the object
1209 key
.object
= object
;
1211 WaitObject
* waitObject
= fWaitObjectTable
.Lookup(key
);
1213 // If found, remove it and add it to the free list. This might sound weird,
1214 // but it makes sense, since we lazily track *used* wait objects only.
1215 // I.e. the object in the table is now guaranteedly obsolete.
1217 fWaitObjectTable
.RemoveUnchecked(waitObject
);
1218 fUsedWaitObjects
.Remove(waitObject
);
1219 fFreeWaitObjects
.Add(waitObject
, false);
1224 SystemProfiler::_WaitObjectUsed(addr_t object
, uint32 type
)
1226 // look up the object
1228 key
.object
= object
;
1230 WaitObject
* waitObject
= fWaitObjectTable
.Lookup(key
);
1232 // If already known, re-queue it as most recently used and be done.
1233 if (waitObject
!= NULL
) {
1234 fUsedWaitObjects
.Remove(waitObject
);
1235 fUsedWaitObjects
.Add(waitObject
);
1239 // not known yet -- get the info
1240 const char* name
= NULL
;
1241 const void* referencedObject
= NULL
;
1244 case THREAD_BLOCK_TYPE_SEMAPHORE
:
1246 name
= sem_get_name_unsafe((sem_id
)object
);
1250 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE
:
1252 ConditionVariable
* variable
= (ConditionVariable
*)object
;
1253 name
= variable
->ObjectType();
1254 referencedObject
= variable
->Object();
1258 case THREAD_BLOCK_TYPE_MUTEX
:
1260 mutex
* lock
= (mutex
*)object
;
1265 case THREAD_BLOCK_TYPE_RW_LOCK
:
1267 rw_lock
* lock
= (rw_lock
*)object
;
1272 case THREAD_BLOCK_TYPE_OTHER
:
1274 name
= (const char*)(void*)object
;
1278 case THREAD_BLOCK_TYPE_SNOOZE
:
1279 case THREAD_BLOCK_TYPE_SIGNAL
:
1285 size_t nameLen
= name
!= NULL
? strlen(name
) : 0;
1287 system_profiler_wait_object_info
* event
1288 = (system_profiler_wait_object_info
*)
1289 _AllocateBuffer(sizeof(system_profiler_wait_object_info
) + nameLen
,
1290 B_SYSTEM_PROFILER_WAIT_OBJECT_INFO
, 0, 0);
1295 event
->object
= object
;
1296 event
->referenced_object
= (addr_t
)referencedObject
;
1298 strcpy(event
->name
, name
);
1300 event
->name
[0] = '\0';
1302 fHeader
->size
= fBufferSize
;
1304 // add the wait object
1306 // get a free one or steal the least recently used one
1307 waitObject
= fFreeWaitObjects
.RemoveHead();
1308 if (waitObject
== NULL
) {
1309 waitObject
= fUsedWaitObjects
.RemoveHead();
1310 fWaitObjectTable
.RemoveUnchecked(waitObject
);
1313 waitObject
->object
= object
;
1314 waitObject
->type
= type
;
1315 fWaitObjectTable
.InsertUnchecked(waitObject
);
1316 fUsedWaitObjects
.Add(waitObject
);
1321 SystemProfiler::_InitialImageIterator(struct image
* image
, void* cookie
)
1323 SystemProfiler
* self
= (SystemProfiler
*)cookie
;
1324 self
->fImageNotificationsEnabled
= true;
1325 // Set that here, since the image lock is being held now.
1326 return !self
->_ImageAdded(image
);
1331 SystemProfiler::_AllocateBuffer(size_t size
, int event
, int cpu
, int count
)
1333 size
= (size
+ 3) / 4 * 4;
1334 size
+= sizeof(system_profiler_event_header
);
1336 size_t end
= fBufferStart
+ fBufferSize
;
1337 if (end
+ size
> fBufferCapacity
) {
1338 // Buffer is wrapped or needs wrapping.
1339 if (end
< fBufferCapacity
) {
1340 // not wrapped yet, but needed
1341 system_profiler_event_header
* header
1342 = (system_profiler_event_header
*)(fBufferBase
+ end
);
1343 header
->event
= B_SYSTEM_PROFILER_BUFFER_END
;
1344 fBufferSize
= fBufferCapacity
- fBufferStart
;
1347 end
-= fBufferCapacity
;
1349 if (end
+ size
> fBufferStart
) {
1355 system_profiler_event_header
* header
1356 = (system_profiler_event_header
*)(fBufferBase
+ end
);
1357 header
->event
= event
;
1359 header
->size
= size
- sizeof(system_profiler_event_header
);
1361 fBufferSize
+= size
;
1368 SystemProfiler::_InitTimers(void* cookie
, int cpu
)
1370 SystemProfiler
* self
= (SystemProfiler
*)cookie
;
1371 self
->_ScheduleTimer(cpu
);
1376 SystemProfiler::_UninitTimers(void* cookie
, int cpu
)
1378 SystemProfiler
* self
= (SystemProfiler
*)cookie
;
1380 CPUProfileData
& cpuData
= self
->fCPUData
[cpu
];
1381 cancel_timer(&cpuData
.timer
);
1382 cpuData
.timerScheduled
= false;
1387 SystemProfiler::_ScheduleTimer(int cpu
)
1389 CPUProfileData
& cpuData
= fCPUData
[cpu
];
1390 cpuData
.timerEnd
= system_time() + fInterval
;
1391 cpuData
.timer
.user_data
= this;
1392 add_timer(&cpuData
.timer
, &_ProfilingEvent
, fInterval
,
1393 B_ONE_SHOT_RELATIVE_TIMER
);
1394 cpuData
.timerScheduled
= true;
1399 SystemProfiler::_DoSample()
1401 Thread
* thread
= thread_get_current_thread();
1402 int cpu
= thread
->cpu
->cpu_num
;
1403 CPUProfileData
& cpuData
= fCPUData
[cpu
];
1406 int32 count
= arch_debug_get_stack_trace(cpuData
.buffer
, fStackDepth
, 1,
1407 0, STACK_TRACE_KERNEL
| STACK_TRACE_USER
);
1409 InterruptsSpinLocker
locker(fLock
);
1411 system_profiler_samples
* event
= (system_profiler_samples
*)
1412 _AllocateBuffer(sizeof(system_profiler_samples
)
1413 + count
* sizeof(addr_t
),
1414 B_SYSTEM_PROFILER_SAMPLES
, cpu
, count
);
1418 event
->thread
= thread
->id
;
1419 memcpy(event
->samples
, cpuData
.buffer
, count
* sizeof(addr_t
));
1421 fHeader
->size
= fBufferSize
;
1426 SystemProfiler::_ProfilingEvent(struct timer
* timer
)
1428 SystemProfiler
* self
= (SystemProfiler
*)timer
->user_data
;
1431 self
->_ScheduleTimer(timer
->cpu
);
1433 return B_HANDLED_INTERRUPT
;
1437 // #pragma mark - private kernel API
1443 start_system_profiler(size_t areaSize
, uint32 stackDepth
, bigtime_t interval
)
1445 struct ParameterDeleter
{
1446 ParameterDeleter(area_id area
)
1457 delete sRecordedParameters
;
1458 sRecordedParameters
= NULL
;
1473 area_id area
= create_area("kernel profile data", &address
,
1474 B_ANY_KERNEL_ADDRESS
, areaSize
, B_FULL_LOCK
,
1475 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
1479 ParameterDeleter
parameterDeleter(area
);
1481 sRecordedParameters
= new(std::nothrow
) system_profiler_parameters
;
1482 if (sRecordedParameters
== NULL
)
1485 sRecordedParameters
->buffer_area
= area
;
1486 sRecordedParameters
->flags
= B_SYSTEM_PROFILER_TEAM_EVENTS
1487 | B_SYSTEM_PROFILER_THREAD_EVENTS
| B_SYSTEM_PROFILER_IMAGE_EVENTS
1488 | B_SYSTEM_PROFILER_IO_SCHEDULING_EVENTS
1489 | B_SYSTEM_PROFILER_SAMPLING_EVENTS
;
1490 sRecordedParameters
->locking_lookup_size
= 4096;
1491 sRecordedParameters
->interval
= interval
;
1492 sRecordedParameters
->stack_depth
= stackDepth
;
1495 get_area_info(area
, &areaInfo
);
1497 // initialize the profiler
1498 SystemProfiler
* profiler
= new(std::nothrow
) SystemProfiler(B_SYSTEM_TEAM
,
1499 areaInfo
, *sRecordedParameters
);
1500 if (profiler
== NULL
)
1503 ObjectDeleter
<SystemProfiler
> profilerDeleter(profiler
);
1505 status_t error
= profiler
->Init();
1509 // set the new profiler
1510 InterruptsSpinLocker
locker(sProfilerLock
);
1511 if (sProfiler
!= NULL
)
1514 parameterDeleter
.Detach();
1515 profilerDeleter
.Detach();
1516 sProfiler
= profiler
;
1524 stop_system_profiler()
1526 InterruptsSpinLocker
locker(sProfilerLock
);
1527 if (sProfiler
== NULL
)
1530 SystemProfiler
* profiler
= sProfiler
;
1534 profiler
->ReleaseReference();
1537 #endif // SYSTEM_PROFILER
1540 // #pragma mark - syscalls
1544 _user_system_profiler_start(struct system_profiler_parameters
* userParameters
)
1546 // copy params to the kernel
1547 struct system_profiler_parameters parameters
;
1548 if (userParameters
== NULL
|| !IS_USER_ADDRESS(userParameters
)
1549 || user_memcpy(¶meters
, userParameters
, sizeof(parameters
))
1551 return B_BAD_ADDRESS
;
1554 // check the parameters
1555 team_id team
= thread_get_current_thread()->team
->id
;
1558 status_t error
= get_area_info(parameters
.buffer_area
, &areaInfo
);
1562 if (areaInfo
.team
!= team
)
1565 if ((parameters
.flags
& B_SYSTEM_PROFILER_SAMPLING_EVENTS
) != 0) {
1566 if (parameters
.stack_depth
< 1)
1569 if (parameters
.interval
< B_DEBUG_MIN_PROFILE_INTERVAL
)
1570 parameters
.interval
= B_DEBUG_MIN_PROFILE_INTERVAL
;
1572 if (parameters
.stack_depth
> B_DEBUG_STACK_TRACE_DEPTH
)
1573 parameters
.stack_depth
= B_DEBUG_STACK_TRACE_DEPTH
;
1576 // quick check to see whether we do already have a profiler installed
1577 InterruptsSpinLocker
locker(sProfilerLock
);
1578 if (sProfiler
!= NULL
)
1582 // initialize the profiler
1583 SystemProfiler
* profiler
= new(std::nothrow
) SystemProfiler(team
, areaInfo
,
1585 if (profiler
== NULL
)
1587 ObjectDeleter
<SystemProfiler
> profilerDeleter(profiler
);
1589 error
= profiler
->Init();
1593 // set the new profiler
1595 if (sProfiler
!= NULL
)
1598 profilerDeleter
.Detach();
1599 sProfiler
= profiler
;
1607 _user_system_profiler_next_buffer(size_t bytesRead
, uint64
* _droppedEvents
)
1609 if (_droppedEvents
!= NULL
&& !IS_USER_ADDRESS(_droppedEvents
))
1610 return B_BAD_ADDRESS
;
1612 team_id team
= thread_get_current_thread()->team
->id
;
1614 InterruptsSpinLocker
locker(sProfilerLock
);
1615 if (sProfiler
== NULL
|| sProfiler
->TeamID() != team
)
1618 // get a reference to the profiler
1619 SystemProfiler
* profiler
= sProfiler
;
1620 BReference
<SystemProfiler
> reference(profiler
);
1623 uint64 droppedEvents
;
1624 status_t error
= profiler
->NextBuffer(bytesRead
,
1625 _droppedEvents
!= NULL
? &droppedEvents
: NULL
);
1626 if (error
== B_OK
&& _droppedEvents
!= NULL
)
1627 user_memcpy(_droppedEvents
, &droppedEvents
, sizeof(droppedEvents
));
1634 _user_system_profiler_stop()
1636 team_id team
= thread_get_current_thread()->team
->id
;
1638 InterruptsSpinLocker
locker(sProfilerLock
);
1639 if (sProfiler
== NULL
|| sProfiler
->TeamID() != team
)
1642 SystemProfiler
* profiler
= sProfiler
;
1646 profiler
->ReleaseReference();
1653 _user_system_profiler_recorded(system_profiler_parameters
* userParameters
)
1655 if (userParameters
== NULL
|| !IS_USER_ADDRESS(userParameters
))
1656 return B_BAD_ADDRESS
;
1657 if (sRecordedParameters
== NULL
)
1661 stop_system_profiler();
1663 // Transfer the area to the userland process
1666 area_id newArea
= transfer_area(sRecordedParameters
->buffer_area
, &address
,
1667 B_ANY_ADDRESS
, team_get_current_team_id(), true);
1671 status_t status
= set_area_protection(newArea
, B_READ_AREA
);
1672 if (status
== B_OK
) {
1673 sRecordedParameters
->buffer_area
= newArea
;
1675 status
= user_memcpy(userParameters
, sRecordedParameters
,
1676 sizeof(system_profiler_parameters
));
1679 delete_area(newArea
);
1681 delete sRecordedParameters
;
1682 sRecordedParameters
= NULL
;
1686 return B_NOT_SUPPORTED
;
1687 #endif // SYSTEM_PROFILER