btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / scheduler / scheduling_analysis.cpp
blob68a3a44c5a064c2be3cb707b9436dd274ae17c5e
1 /*
2 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Distributed under the terms of the MIT License.
4 */
6 #include <scheduling_analysis.h>
8 #include <elf.h>
9 #include <kernel.h>
10 #include <scheduler_defs.h>
11 #include <tracing.h>
12 #include <util/AutoLock.h>
14 #include "scheduler_tracing.h"
17 #if SCHEDULER_TRACING
19 namespace SchedulingAnalysis {
21 using namespace SchedulerTracing;
23 #if SCHEDULING_ANALYSIS_TRACING
24 using namespace SchedulingAnalysisTracing;
25 #endif
27 struct ThreadWaitObject;
29 struct HashObjectKey {
30 virtual ~HashObjectKey()
34 virtual uint32 HashKey() const = 0;
38 struct HashObject {
39 HashObject* next;
41 virtual ~HashObject()
45 virtual uint32 HashKey() const = 0;
46 virtual bool Equals(const HashObjectKey* key) const = 0;
50 struct ThreadKey : HashObjectKey {
51 thread_id id;
53 ThreadKey(thread_id id)
55 id(id)
59 virtual uint32 HashKey() const
61 return id;
66 struct Thread : HashObject, scheduling_analysis_thread {
67 ScheduleState state;
68 bigtime_t lastTime;
70 ThreadWaitObject* waitObject;
72 Thread(thread_id id)
74 state(UNKNOWN),
75 lastTime(0),
77 waitObject(NULL)
79 this->id = id;
80 name[0] = '\0';
82 runs = 0;
83 total_run_time = 0;
84 min_run_time = 1;
85 max_run_time = -1;
87 latencies = 0;
88 total_latency = 0;
89 min_latency = -1;
90 max_latency = -1;
92 reruns = 0;
93 total_rerun_time = 0;
94 min_rerun_time = -1;
95 max_rerun_time = -1;
97 unspecified_wait_time = 0;
99 preemptions = 0;
101 wait_objects = NULL;
104 virtual uint32 HashKey() const
106 return id;
109 virtual bool Equals(const HashObjectKey* _key) const
111 const ThreadKey* key = dynamic_cast<const ThreadKey*>(_key);
112 if (key == NULL)
113 return false;
114 return key->id == id;
119 struct WaitObjectKey : HashObjectKey {
120 uint32 type;
121 void* object;
123 WaitObjectKey(uint32 type, void* object)
125 type(type),
126 object(object)
130 virtual uint32 HashKey() const
132 return type ^ (uint32)(addr_t)object;
137 struct WaitObject : HashObject, scheduling_analysis_wait_object {
138 WaitObject(uint32 type, void* object)
140 this->type = type;
141 this->object = object;
142 name[0] = '\0';
143 referenced_object = NULL;
146 virtual uint32 HashKey() const
148 return type ^ (uint32)(addr_t)object;
151 virtual bool Equals(const HashObjectKey* _key) const
153 const WaitObjectKey* key = dynamic_cast<const WaitObjectKey*>(_key);
154 if (key == NULL)
155 return false;
156 return key->type == type && key->object == object;
161 struct ThreadWaitObjectKey : HashObjectKey {
162 thread_id thread;
163 uint32 type;
164 void* object;
166 ThreadWaitObjectKey(thread_id thread, uint32 type, void* object)
168 thread(thread),
169 type(type),
170 object(object)
174 virtual uint32 HashKey() const
176 return thread ^ type ^ (uint32)(addr_t)object;
181 struct ThreadWaitObject : HashObject, scheduling_analysis_thread_wait_object {
182 ThreadWaitObject(thread_id thread, WaitObject* waitObject)
184 this->thread = thread;
185 wait_object = waitObject;
186 wait_time = 0;
187 waits = 0;
188 next_in_list = NULL;
191 virtual uint32 HashKey() const
193 return thread ^ wait_object->type ^ (uint32)(addr_t)wait_object->object;
196 virtual bool Equals(const HashObjectKey* _key) const
198 const ThreadWaitObjectKey* key
199 = dynamic_cast<const ThreadWaitObjectKey*>(_key);
200 if (key == NULL)
201 return false;
202 return key->thread == thread && key->type == wait_object->type
203 && key->object == wait_object->object;
208 class SchedulingAnalysisManager {
209 public:
210 SchedulingAnalysisManager(void* buffer, size_t size)
212 fBuffer(buffer),
213 fSize(size),
214 fHashTable(),
215 fHashTableSize(0)
217 fAnalysis.thread_count = 0;
218 fAnalysis.threads = 0;
219 fAnalysis.wait_object_count = 0;
220 fAnalysis.thread_wait_object_count = 0;
222 size_t maxObjectSize = max_c(max_c(sizeof(Thread), sizeof(WaitObject)),
223 sizeof(ThreadWaitObject));
224 fHashTableSize = size / (maxObjectSize + sizeof(HashObject*));
225 fHashTable = (HashObject**)((uint8*)fBuffer + fSize) - fHashTableSize;
226 fNextAllocation = (uint8*)fBuffer;
227 fRemainingBytes = (addr_t)fHashTable - (addr_t)fBuffer;
229 image_info info;
230 if (elf_get_image_info_for_address((addr_t)&scheduler_init, &info)
231 == B_OK) {
232 fKernelStart = (addr_t)info.text;
233 fKernelEnd = (addr_t)info.data + info.data_size;
234 } else {
235 fKernelStart = 0;
236 fKernelEnd = 0;
240 const scheduling_analysis* Analysis() const
242 return &fAnalysis;
245 void* Allocate(size_t size)
247 size = (size + 7) & ~(size_t)7;
249 if (size > fRemainingBytes)
250 return NULL;
252 void* address = fNextAllocation;
253 fNextAllocation += size;
254 fRemainingBytes -= size;
255 return address;
258 void Insert(HashObject* object)
260 uint32 index = object->HashKey() % fHashTableSize;
261 object->next = fHashTable[index];
262 fHashTable[index] = object;
265 void Remove(HashObject* object)
267 uint32 index = object->HashKey() % fHashTableSize;
268 HashObject** slot = &fHashTable[index];
269 while (*slot != object)
270 slot = &(*slot)->next;
272 *slot = object->next;
275 HashObject* Lookup(const HashObjectKey& key) const
277 uint32 index = key.HashKey() % fHashTableSize;
278 HashObject* object = fHashTable[index];
279 while (object != NULL && !object->Equals(&key))
280 object = object->next;
281 return object;
284 Thread* ThreadFor(thread_id id) const
286 return dynamic_cast<Thread*>(Lookup(ThreadKey(id)));
289 WaitObject* WaitObjectFor(uint32 type, void* object) const
291 return dynamic_cast<WaitObject*>(Lookup(WaitObjectKey(type, object)));
294 ThreadWaitObject* ThreadWaitObjectFor(thread_id thread, uint32 type,
295 void* object) const
297 return dynamic_cast<ThreadWaitObject*>(
298 Lookup(ThreadWaitObjectKey(thread, type, object)));
301 status_t AddThread(thread_id id, const char* name)
303 Thread* thread = ThreadFor(id);
304 if (thread == NULL) {
305 void* memory = Allocate(sizeof(Thread));
306 if (memory == NULL)
307 return B_NO_MEMORY;
309 thread = new(memory) Thread(id);
310 Insert(thread);
311 fAnalysis.thread_count++;
314 if (name != NULL && thread->name[0] == '\0')
315 strlcpy(thread->name, name, sizeof(thread->name));
317 return B_OK;
320 status_t AddWaitObject(uint32 type, void* object,
321 WaitObject** _waitObject = NULL)
323 if (WaitObjectFor(type, object) != NULL)
324 return B_OK;
326 void* memory = Allocate(sizeof(WaitObject));
327 if (memory == NULL)
328 return B_NO_MEMORY;
330 WaitObject* waitObject = new(memory) WaitObject(type, object);
331 Insert(waitObject);
332 fAnalysis.wait_object_count++;
334 // Set a dummy name for snooze() and waiting for signals, so we don't
335 // try to update them later on.
336 if (type == THREAD_BLOCK_TYPE_SNOOZE
337 || type == THREAD_BLOCK_TYPE_SIGNAL) {
338 strcpy(waitObject->name, "?");
341 if (_waitObject != NULL)
342 *_waitObject = waitObject;
344 return B_OK;
347 status_t UpdateWaitObject(uint32 type, void* object, const char* name,
348 void* referencedObject)
350 WaitObject* waitObject = WaitObjectFor(type, object);
351 if (waitObject == NULL)
352 return B_OK;
354 if (waitObject->name[0] != '\0') {
355 // This is a new object at the same address. Replace the old one.
356 Remove(waitObject);
357 status_t error = AddWaitObject(type, object, &waitObject);
358 if (error != B_OK)
359 return error;
362 if (name == NULL)
363 name = "?";
365 strlcpy(waitObject->name, name, sizeof(waitObject->name));
366 waitObject->referenced_object = referencedObject;
368 return B_OK;
371 bool UpdateWaitObjectDontAdd(uint32 type, void* object, const char* name,
372 void* referencedObject)
374 WaitObject* waitObject = WaitObjectFor(type, object);
375 if (waitObject == NULL || waitObject->name[0] != '\0')
376 return false;
378 if (name == NULL)
379 name = "?";
381 strlcpy(waitObject->name, name, sizeof(waitObject->name));
382 waitObject->referenced_object = referencedObject;
384 return B_OK;
387 status_t AddThreadWaitObject(Thread* thread, uint32 type, void* object)
389 WaitObject* waitObject = WaitObjectFor(type, object);
390 if (waitObject == NULL) {
391 // The algorithm should prevent this case.
392 return B_ERROR;
395 ThreadWaitObject* threadWaitObject = ThreadWaitObjectFor(thread->id,
396 type, object);
397 if (threadWaitObject == NULL
398 || threadWaitObject->wait_object != waitObject) {
399 if (threadWaitObject != NULL)
400 Remove(threadWaitObject);
402 void* memory = Allocate(sizeof(ThreadWaitObject));
403 if (memory == NULL)
404 return B_NO_MEMORY;
406 threadWaitObject = new(memory) ThreadWaitObject(thread->id,
407 waitObject);
408 Insert(threadWaitObject);
409 fAnalysis.thread_wait_object_count++;
411 threadWaitObject->next_in_list = thread->wait_objects;
412 thread->wait_objects = threadWaitObject;
415 thread->waitObject = threadWaitObject;
417 return B_OK;
420 int32 MissingWaitObjects() const
422 // Iterate through the hash table and count the wait objects that don't
423 // have a name yet.
424 int32 count = 0;
425 for (uint32 i = 0; i < fHashTableSize; i++) {
426 HashObject* object = fHashTable[i];
427 while (object != NULL) {
428 WaitObject* waitObject = dynamic_cast<WaitObject*>(object);
429 if (waitObject != NULL && waitObject->name[0] == '\0')
430 count++;
432 object = object->next;
436 return count;
439 status_t FinishAnalysis()
441 // allocate the thread array
442 scheduling_analysis_thread** threads
443 = (scheduling_analysis_thread**)Allocate(
444 sizeof(Thread*) * fAnalysis.thread_count);
445 if (threads == NULL)
446 return B_NO_MEMORY;
448 // Iterate through the hash table and collect all threads. Also polish
449 // all wait objects that haven't been update yet.
450 int32 index = 0;
451 for (uint32 i = 0; i < fHashTableSize; i++) {
452 HashObject* object = fHashTable[i];
453 while (object != NULL) {
454 Thread* thread = dynamic_cast<Thread*>(object);
455 if (thread != NULL) {
456 threads[index++] = thread;
457 } else if (WaitObject* waitObject
458 = dynamic_cast<WaitObject*>(object)) {
459 _PolishWaitObject(waitObject);
462 object = object->next;
466 fAnalysis.threads = threads;
467 dprintf("scheduling analysis: free bytes: %lu/%lu\n", fRemainingBytes, fSize);
468 return B_OK;
471 private:
472 void _PolishWaitObject(WaitObject* waitObject)
474 if (waitObject->name[0] != '\0')
475 return;
477 switch (waitObject->type) {
478 case THREAD_BLOCK_TYPE_SEMAPHORE:
480 sem_info info;
481 if (get_sem_info((sem_id)(addr_t)waitObject->object, &info)
482 == B_OK) {
483 strlcpy(waitObject->name, info.name,
484 sizeof(waitObject->name));
486 break;
488 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
490 // If the condition variable object is in the kernel image,
491 // assume, it is still initialized.
492 ConditionVariable* variable
493 = (ConditionVariable*)waitObject->object;
494 if (!_IsInKernelImage(variable))
495 break;
497 waitObject->referenced_object = (void*)variable->Object();
498 strlcpy(waitObject->name, variable->ObjectType(),
499 sizeof(waitObject->name));
500 break;
503 case THREAD_BLOCK_TYPE_MUTEX:
505 // If the mutex object is in the kernel image, assume, it is
506 // still initialized.
507 mutex* lock = (mutex*)waitObject->object;
508 if (!_IsInKernelImage(lock))
509 break;
511 strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
512 break;
515 case THREAD_BLOCK_TYPE_RW_LOCK:
517 // If the mutex object is in the kernel image, assume, it is
518 // still initialized.
519 rw_lock* lock = (rw_lock*)waitObject->object;
520 if (!_IsInKernelImage(lock))
521 break;
523 strlcpy(waitObject->name, lock->name, sizeof(waitObject->name));
524 break;
527 case THREAD_BLOCK_TYPE_OTHER:
529 const char* name = (const char*)waitObject->object;
530 if (name == NULL || _IsInKernelImage(name))
531 return;
533 strlcpy(waitObject->name, name, sizeof(waitObject->name));
536 case THREAD_BLOCK_TYPE_SNOOZE:
537 case THREAD_BLOCK_TYPE_SIGNAL:
538 default:
539 break;
542 if (waitObject->name[0] != '\0')
543 return;
545 strcpy(waitObject->name, "?");
548 bool _IsInKernelImage(const void* _address)
550 addr_t address = (addr_t)_address;
551 return address >= fKernelStart && address < fKernelEnd;
554 private:
555 scheduling_analysis fAnalysis;
556 void* fBuffer;
557 size_t fSize;
558 HashObject** fHashTable;
559 uint32 fHashTableSize;
560 uint8* fNextAllocation;
561 size_t fRemainingBytes;
562 addr_t fKernelStart;
563 addr_t fKernelEnd;
567 static status_t
568 analyze_scheduling(bigtime_t from, bigtime_t until,
569 SchedulingAnalysisManager& manager)
571 // analyze how much threads and locking primitives we're talking about
572 TraceEntryIterator iterator;
573 iterator.MoveTo(INT_MAX);
574 while (TraceEntry* _entry = iterator.Previous()) {
575 SchedulerTraceEntry* baseEntry
576 = dynamic_cast<SchedulerTraceEntry*>(_entry);
577 if (baseEntry == NULL || baseEntry->Time() >= until)
578 continue;
579 if (baseEntry->Time() < from)
580 break;
582 status_t error = manager.AddThread(baseEntry->ThreadID(),
583 baseEntry->Name());
584 if (error != B_OK)
585 return error;
587 if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
588 error = manager.AddThread(entry->PreviousThreadID(), NULL);
589 if (error != B_OK)
590 return error;
592 if (entry->PreviousState() == B_THREAD_WAITING) {
593 void* waitObject = (void*)entry->PreviousWaitObject();
594 switch (entry->PreviousWaitObjectType()) {
595 case THREAD_BLOCK_TYPE_SNOOZE:
596 case THREAD_BLOCK_TYPE_SIGNAL:
597 waitObject = NULL;
598 break;
599 case THREAD_BLOCK_TYPE_SEMAPHORE:
600 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
601 case THREAD_BLOCK_TYPE_MUTEX:
602 case THREAD_BLOCK_TYPE_RW_LOCK:
603 case THREAD_BLOCK_TYPE_OTHER:
604 default:
605 break;
608 error = manager.AddWaitObject(entry->PreviousWaitObjectType(),
609 waitObject);
610 if (error != B_OK)
611 return error;
616 #if SCHEDULING_ANALYSIS_TRACING
617 int32 startEntryIndex = iterator.Index();
618 #endif
620 while (TraceEntry* _entry = iterator.Next()) {
621 #if SCHEDULING_ANALYSIS_TRACING
622 // might be info on a wait object
623 if (WaitObjectTraceEntry* waitObjectEntry
624 = dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
625 status_t error = manager.UpdateWaitObject(waitObjectEntry->Type(),
626 waitObjectEntry->Object(), waitObjectEntry->Name(),
627 waitObjectEntry->ReferencedObject());
628 if (error != B_OK)
629 return error;
630 continue;
632 #endif
634 SchedulerTraceEntry* baseEntry
635 = dynamic_cast<SchedulerTraceEntry*>(_entry);
636 if (baseEntry == NULL)
637 continue;
638 if (baseEntry->Time() >= until)
639 break;
641 if (ScheduleThread* entry = dynamic_cast<ScheduleThread*>(_entry)) {
642 // scheduled thread
643 Thread* thread = manager.ThreadFor(entry->ThreadID());
645 bigtime_t diffTime = entry->Time() - thread->lastTime;
647 if (thread->state == READY) {
648 // thread scheduled after having been woken up
649 thread->latencies++;
650 thread->total_latency += diffTime;
651 if (thread->min_latency < 0 || diffTime < thread->min_latency)
652 thread->min_latency = diffTime;
653 if (diffTime > thread->max_latency)
654 thread->max_latency = diffTime;
655 } else if (thread->state == PREEMPTED) {
656 // thread scheduled after having been preempted before
657 thread->reruns++;
658 thread->total_rerun_time += diffTime;
659 if (thread->min_rerun_time < 0
660 || diffTime < thread->min_rerun_time) {
661 thread->min_rerun_time = diffTime;
663 if (diffTime > thread->max_rerun_time)
664 thread->max_rerun_time = diffTime;
667 if (thread->state == STILL_RUNNING) {
668 // Thread was running and continues to run.
669 thread->state = RUNNING;
672 if (thread->state != RUNNING) {
673 thread->lastTime = entry->Time();
674 thread->state = RUNNING;
677 // unscheduled thread
679 if (entry->ThreadID() == entry->PreviousThreadID())
680 continue;
682 thread = manager.ThreadFor(entry->PreviousThreadID());
684 diffTime = entry->Time() - thread->lastTime;
686 if (thread->state == STILL_RUNNING) {
687 // thread preempted
688 thread->runs++;
689 thread->preemptions++;
690 thread->total_run_time += diffTime;
691 if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
692 thread->min_run_time = diffTime;
693 if (diffTime > thread->max_run_time)
694 thread->max_run_time = diffTime;
696 thread->lastTime = entry->Time();
697 thread->state = PREEMPTED;
698 } else if (thread->state == RUNNING) {
699 // thread starts waiting (it hadn't been added to the run
700 // queue before being unscheduled)
701 thread->runs++;
702 thread->total_run_time += diffTime;
703 if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
704 thread->min_run_time = diffTime;
705 if (diffTime > thread->max_run_time)
706 thread->max_run_time = diffTime;
708 if (entry->PreviousState() == B_THREAD_WAITING) {
709 void* waitObject = (void*)entry->PreviousWaitObject();
710 switch (entry->PreviousWaitObjectType()) {
711 case THREAD_BLOCK_TYPE_SNOOZE:
712 case THREAD_BLOCK_TYPE_SIGNAL:
713 waitObject = NULL;
714 break;
715 case THREAD_BLOCK_TYPE_SEMAPHORE:
716 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
717 case THREAD_BLOCK_TYPE_MUTEX:
718 case THREAD_BLOCK_TYPE_RW_LOCK:
719 case THREAD_BLOCK_TYPE_OTHER:
720 default:
721 break;
724 status_t error = manager.AddThreadWaitObject(thread,
725 entry->PreviousWaitObjectType(), waitObject);
726 if (error != B_OK)
727 return error;
730 thread->lastTime = entry->Time();
731 thread->state = WAITING;
732 } else if (thread->state == UNKNOWN) {
733 uint32 threadState = entry->PreviousState();
734 if (threadState == B_THREAD_WAITING
735 || threadState == B_THREAD_SUSPENDED) {
736 thread->lastTime = entry->Time();
737 thread->state = WAITING;
738 } else if (threadState == B_THREAD_READY) {
739 thread->lastTime = entry->Time();
740 thread->state = PREEMPTED;
743 } else if (EnqueueThread* entry
744 = dynamic_cast<EnqueueThread*>(_entry)) {
745 // thread enqueued in run queue
747 Thread* thread = manager.ThreadFor(entry->ThreadID());
749 if (thread->state == RUNNING || thread->state == STILL_RUNNING) {
750 // Thread was running and is reentered into the run queue. This
751 // is done by the scheduler, if the thread remains ready.
752 thread->state = STILL_RUNNING;
753 } else {
754 // Thread was waiting and is ready now.
755 bigtime_t diffTime = entry->Time() - thread->lastTime;
756 if (thread->waitObject != NULL) {
757 thread->waitObject->wait_time += diffTime;
758 thread->waitObject->waits++;
759 thread->waitObject = NULL;
760 } else if (thread->state != UNKNOWN)
761 thread->unspecified_wait_time += diffTime;
763 thread->lastTime = entry->Time();
764 thread->state = READY;
766 } else if (RemoveThread* entry = dynamic_cast<RemoveThread*>(_entry)) {
767 // thread removed from run queue
769 Thread* thread = manager.ThreadFor(entry->ThreadID());
771 // This really only happens when the thread priority is changed
772 // while the thread is ready.
774 bigtime_t diffTime = entry->Time() - thread->lastTime;
775 if (thread->state == RUNNING) {
776 // This should never happen.
777 thread->runs++;
778 thread->total_run_time += diffTime;
779 if (thread->min_run_time < 0 || diffTime < thread->min_run_time)
780 thread->min_run_time = diffTime;
781 if (diffTime > thread->max_run_time)
782 thread->max_run_time = diffTime;
783 } else if (thread->state == READY || thread->state == PREEMPTED) {
784 // Not really correct, but the case is rare and we keep it
785 // simple.
786 thread->unspecified_wait_time += diffTime;
789 thread->lastTime = entry->Time();
790 thread->state = WAITING;
795 #if SCHEDULING_ANALYSIS_TRACING
796 int32 missingWaitObjects = manager.MissingWaitObjects();
797 if (missingWaitObjects > 0) {
798 iterator.MoveTo(startEntryIndex + 1);
799 while (TraceEntry* _entry = iterator.Previous()) {
800 if (WaitObjectTraceEntry* waitObjectEntry
801 = dynamic_cast<WaitObjectTraceEntry*>(_entry)) {
802 if (manager.UpdateWaitObjectDontAdd(
803 waitObjectEntry->Type(), waitObjectEntry->Object(),
804 waitObjectEntry->Name(),
805 waitObjectEntry->ReferencedObject())) {
806 if (--missingWaitObjects == 0)
807 break;
812 #endif
814 return B_OK;
817 } // namespace SchedulingAnalysis
819 #endif // SCHEDULER_TRACING
822 status_t
823 _user_analyze_scheduling(bigtime_t from, bigtime_t until, void* buffer,
824 size_t size, scheduling_analysis* analysis)
826 #if SCHEDULER_TRACING
827 using namespace SchedulingAnalysis;
829 if ((addr_t)buffer & 0x7) {
830 addr_t diff = (addr_t)buffer & 0x7;
831 buffer = (void*)((addr_t)buffer + 8 - diff);
832 size -= 8 - diff;
834 size &= ~(size_t)0x7;
836 if (buffer == NULL || !IS_USER_ADDRESS(buffer) || size == 0)
837 return B_BAD_VALUE;
839 status_t error = lock_memory(buffer, size, B_READ_DEVICE);
840 if (error != B_OK)
841 return error;
843 SchedulingAnalysisManager manager(buffer, size);
845 InterruptsLocker locker;
846 lock_tracing_buffer();
848 error = analyze_scheduling(from, until, manager);
850 unlock_tracing_buffer();
851 locker.Unlock();
853 if (error == B_OK)
854 error = manager.FinishAnalysis();
856 unlock_memory(buffer, size, B_READ_DEVICE);
858 if (error == B_OK) {
859 error = user_memcpy(analysis, manager.Analysis(),
860 sizeof(scheduling_analysis));
863 return error;
864 #else
865 return B_BAD_VALUE;
866 #endif