2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Copyright 2012, Rene Gollent, rene@gollent.com.
5 * Distributed under the terms of the MIT License.
15 #include <arch/debug.h>
17 #include <debug_heap.h>
23 #include <util/AutoLock.h>
29 //#define TRACE_TRACING
31 # define TRACE(x) dprintf_no_syslog x
39 ENTRY_INITIALIZED
= 0x02,
47 static const size_t kTraceOutputBufferSize
= 10240;
48 static const size_t kBufferSize
= MAX_TRACE_SIZE
/ sizeof(trace_entry
);
50 static const uint32 kMaxRecoveringErrorCount
= 100;
51 static const addr_t kMetaDataBaseAddress
= 32 * 1024 * 1024;
52 static const addr_t kMetaDataBaseEndAddress
= 128 * 1024 * 1024;
53 static const addr_t kMetaDataAddressIncrement
= 8 * 1024 * 1024;
54 static const uint32 kMetaDataMagic1
= 'Vali';
55 static const uint32 kMetaDataMagic2
= 'dTra';
56 static const uint32 kMetaDataMagic3
= 'cing';
58 // the maximum we can address with the trace_entry::[previous_]size fields
59 static const size_t kMaxTracingEntryByteSize
60 = ((1 << 13) - 1) * sizeof(trace_entry
);
63 struct TraceOutputPrint
{
64 TraceOutputPrint(TraceOutput
& output
)
70 void operator()(const char* format
,...) const
73 va_start(args
, format
);
74 fOutput
.PrintArgs(format
, args
);
83 class TracingMetaData
{
85 static status_t
Create(TracingMetaData
*& _metaData
);
90 inline trace_entry
* FirstEntry() const;
91 inline trace_entry
* AfterLastEntry() const;
93 inline uint32
Entries() const;
94 inline uint32
EntriesEver() const;
96 inline void IncrementEntriesEver();
98 inline char* TraceOutputBuffer() const;
100 trace_entry
* NextEntry(trace_entry
* entry
);
101 trace_entry
* PreviousEntry(trace_entry
* entry
);
103 trace_entry
* AllocateEntry(size_t size
, uint16 flags
);
105 bool IsInBuffer(void* address
, size_t size
);
108 bool _FreeFirstEntry();
109 bool _MakeSpace(size_t needed
);
111 static status_t
_CreateMetaDataArea(bool findPrevious
,
113 TracingMetaData
*& _metaData
);
114 bool _InitPreviousTracingData();
118 trace_entry
* fBuffer
;
119 trace_entry
* fFirstEntry
;
120 trace_entry
* fAfterLastEntry
;
125 char* fTraceOutputBuffer
;
126 phys_addr_t fPhysicalAddress
;
130 static TracingMetaData sFallbackTracingMetaData
;
131 static TracingMetaData
* sTracingMetaData
= &sFallbackTracingMetaData
;
132 static bool sTracingDataRecovered
= false;
138 template<typename Print
>
140 print_stack_trace(struct tracing_stack_trace
* stackTrace
,
143 if (stackTrace
== NULL
|| stackTrace
->depth
<= 0)
146 static const size_t kBufferSize
= 256;
147 char* buffer
= (char*)debug_malloc(kBufferSize
);
149 for (int32 i
= 0; i
< stackTrace
->depth
; i
++) {
150 addr_t address
= stackTrace
->return_addresses
[i
];
153 const char* demangledName
= NULL
;
154 const char* imageName
;
158 if (elf_debug_lookup_symbol_address(address
, &baseAddress
, &symbol
,
159 &imageName
, &exactMatch
) == B_OK
) {
161 if (buffer
!= NULL
) {
163 demangledName
= debug_demangle_symbol(symbol
, buffer
,
164 kBufferSize
, &isObjectMethod
);
167 print(" %p %s + 0x%lx (%s)%s\n", (void*)address
,
168 demangledName
!= NULL
? demangledName
: symbol
,
169 address
- baseAddress
, imageName
,
170 exactMatch
? "" : " (nearest)");
172 print(" %p\n", (void*)address
);
180 // #pragma mark - TracingMetaData
184 TracingMetaData::Lock()
186 acquire_spinlock(&fLock
);
192 TracingMetaData::Unlock()
194 release_spinlock(&fLock
);
199 TracingMetaData::FirstEntry() const
206 TracingMetaData::AfterLastEntry() const
208 return fAfterLastEntry
;
213 TracingMetaData::Entries() const
220 TracingMetaData::EntriesEver() const
227 TracingMetaData::IncrementEntriesEver()
230 // NOTE: Race condition on SMP machines! We should use atomic_add(),
231 // though that costs some performance and the information is for
232 // informational purpose anyway.
237 TracingMetaData::TraceOutputBuffer() const
239 return fTraceOutputBuffer
;
244 TracingMetaData::NextEntry(trace_entry
* entry
)
246 entry
+= entry
->size
;
247 if ((entry
->flags
& WRAP_ENTRY
) != 0)
250 if (entry
== fAfterLastEntry
)
258 TracingMetaData::PreviousEntry(trace_entry
* entry
)
260 if (entry
== fFirstEntry
)
263 if (entry
== fBuffer
) {
264 // beginning of buffer -- previous entry is a wrap entry
265 entry
= fBuffer
+ kBufferSize
- entry
->previous_size
;
268 return entry
- entry
->previous_size
;
273 TracingMetaData::AllocateEntry(size_t size
, uint16 flags
)
275 if (fAfterLastEntry
== NULL
|| size
== 0
276 || size
>= kMaxTracingEntryByteSize
) {
280 InterruptsSpinLocker
_(fLock
);
282 size
= (size
+ 3) >> 2;
283 // 4 byte aligned, don't store the lower 2 bits
285 TRACE(("AllocateEntry(%lu), start %p, end %p, buffer %p\n", size
* 4,
286 fFirstEntry
, fAfterLastEntry
, fBuffer
));
288 if (!_MakeSpace(size
))
291 trace_entry
* entry
= fAfterLastEntry
;
293 entry
->flags
= flags
;
294 fAfterLastEntry
+= size
;
295 fAfterLastEntry
->previous_size
= size
;
297 if (!(flags
& BUFFER_ENTRY
))
300 TRACE((" entry: %p, end %p, start %p, entries %ld\n", entry
,
301 fAfterLastEntry
, fFirstEntry
, fEntries
));
308 TracingMetaData::IsInBuffer(void* address
, size_t size
)
313 addr_t start
= (addr_t
)address
;
314 addr_t end
= start
+ size
;
316 if (start
< (addr_t
)fBuffer
|| end
> (addr_t
)(fBuffer
+ kBufferSize
))
319 if (fFirstEntry
> fAfterLastEntry
)
320 return start
>= (addr_t
)fFirstEntry
|| end
<= (addr_t
)fAfterLastEntry
;
322 return start
>= (addr_t
)fFirstEntry
&& end
<= (addr_t
)fAfterLastEntry
;
327 TracingMetaData::_FreeFirstEntry()
329 TRACE((" skip start %p, %lu*4 bytes\n", fFirstEntry
, fFirstEntry
->size
));
331 trace_entry
* newFirst
= NextEntry(fFirstEntry
);
333 if (fFirstEntry
->flags
& BUFFER_ENTRY
) {
334 // a buffer entry -- just skip it
335 } else if (fFirstEntry
->flags
& ENTRY_INITIALIZED
) {
336 // Fully initialized TraceEntry: We could destroy it, but don't do so
337 // for sake of robustness. The destructors of tracing entry classes
338 // should be empty anyway.
341 // Not fully initialized TraceEntry. We can't free it, since
342 // then it's constructor might still write into the memory and
343 // overwrite data of the entry we're going to allocate.
344 // We can't do anything until this entry can be discarded.
348 if (newFirst
== NULL
) {
349 // everything is freed -- practically this can't happen, if
350 // the buffer is large enough to hold three max-sized entries
351 fFirstEntry
= fAfterLastEntry
= fBuffer
;
352 TRACE(("_FreeFirstEntry(): all entries freed!\n"));
354 fFirstEntry
= newFirst
;
360 /*! Makes sure we have needed * 4 bytes of memory at fAfterLastEntry.
361 Returns \c false, if unable to free that much.
364 TracingMetaData::_MakeSpace(size_t needed
)
366 // we need space for fAfterLastEntry, too (in case we need to wrap around
370 // If there's not enough space (free or occupied) after fAfterLastEntry,
371 // we free all entries in that region and wrap around.
372 if (fAfterLastEntry
+ needed
> fBuffer
+ kBufferSize
) {
373 TRACE(("_MakeSpace(%lu), wrapping around: after last: %p\n", needed
,
376 // Free all entries after fAfterLastEntry and one more at the beginning
378 while (fFirstEntry
> fAfterLastEntry
) {
379 if (!_FreeFirstEntry())
382 if (fAfterLastEntry
!= fBuffer
&& !_FreeFirstEntry())
385 // just in case _FreeFirstEntry() freed the very last existing entry
386 if (fAfterLastEntry
== fBuffer
)
389 // mark as wrap entry and actually wrap around
390 trace_entry
* wrapEntry
= fAfterLastEntry
;
392 wrapEntry
->flags
= WRAP_ENTRY
;
393 fAfterLastEntry
= fBuffer
;
394 fAfterLastEntry
->previous_size
= fBuffer
+ kBufferSize
- wrapEntry
;
397 if (fFirstEntry
<= fAfterLastEntry
) {
398 // buffer is empty or the space after fAfterLastEntry is unoccupied
402 // free the first entries, until there's enough space
403 size_t space
= fFirstEntry
- fAfterLastEntry
;
405 if (space
< needed
) {
406 TRACE(("_MakeSpace(%lu), left %ld\n", needed
, space
));
409 while (space
< needed
) {
410 space
+= fFirstEntry
->size
;
412 if (!_FreeFirstEntry())
416 TRACE((" out: start %p, entries %ld\n", fFirstEntry
, fEntries
));
423 TracingMetaData::Create(TracingMetaData
*& _metaData
)
425 // search meta data in memory (from previous session)
427 TracingMetaData
* metaData
;
428 status_t error
= _CreateMetaDataArea(true, area
, metaData
);
430 if (metaData
->_InitPreviousTracingData()) {
431 _metaData
= metaData
;
435 dprintf("Found previous tracing meta data, but failed to init.\n");
437 // invalidate the meta data
438 metaData
->fMagic1
= 0;
439 metaData
->fMagic2
= 0;
440 metaData
->fMagic3
= 0;
443 dprintf("No previous tracing meta data found.\n");
445 // no previous tracing data found -- create new one
446 error
= _CreateMetaDataArea(false, area
, metaData
);
450 virtual_address_restrictions virtualRestrictions
= {};
451 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
452 physical_address_restrictions physicalRestrictions
= {};
453 area
= create_area_etc(B_SYSTEM_TEAM
, "tracing log",
454 kTraceOutputBufferSize
+ MAX_TRACE_SIZE
, B_CONTIGUOUS
,
455 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, CREATE_AREA_DONT_WAIT
, 0,
456 &virtualRestrictions
, &physicalRestrictions
,
457 (void**)&metaData
->fTraceOutputBuffer
);
461 // get the physical address
462 physical_entry physicalEntry
;
463 if (get_memory_map(metaData
->fTraceOutputBuffer
, B_PAGE_SIZE
,
464 &physicalEntry
, 1) == B_OK
) {
465 metaData
->fPhysicalAddress
= physicalEntry
.address
;
467 dprintf("TracingMetaData::Create(): failed to get physical address "
468 "of tracing buffer\n");
469 metaData
->fPhysicalAddress
= 0;
472 metaData
->fBuffer
= (trace_entry
*)(metaData
->fTraceOutputBuffer
473 + kTraceOutputBufferSize
);
474 metaData
->fFirstEntry
= metaData
->fBuffer
;
475 metaData
->fAfterLastEntry
= metaData
->fBuffer
;
477 metaData
->fEntries
= 0;
478 metaData
->fEntriesEver
= 0;
479 B_INITIALIZE_SPINLOCK(&metaData
->fLock
);
481 metaData
->fMagic1
= kMetaDataMagic1
;
482 metaData
->fMagic2
= kMetaDataMagic2
;
483 metaData
->fMagic3
= kMetaDataMagic3
;
485 _metaData
= metaData
;
491 TracingMetaData::_CreateMetaDataArea(bool findPrevious
, area_id
& _area
,
492 TracingMetaData
*& _metaData
)
494 // search meta data in memory (from previous session)
495 TracingMetaData
* metaData
;
496 phys_addr_t metaDataAddress
= kMetaDataBaseAddress
;
497 for (; metaDataAddress
<= kMetaDataBaseEndAddress
;
498 metaDataAddress
+= kMetaDataAddressIncrement
) {
499 virtual_address_restrictions virtualRestrictions
= {};
500 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
501 physical_address_restrictions physicalRestrictions
= {};
502 physicalRestrictions
.low_address
= metaDataAddress
;
503 physicalRestrictions
.high_address
= metaDataAddress
+ B_PAGE_SIZE
;
504 area_id area
= create_area_etc(B_SYSTEM_TEAM
, "tracing metadata",
505 B_PAGE_SIZE
, B_FULL_LOCK
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
,
506 CREATE_AREA_DONT_CLEAR
, 0, &virtualRestrictions
,
507 &physicalRestrictions
, (void**)&metaData
);
513 _metaData
= metaData
;
517 if (metaData
->fMagic1
== kMetaDataMagic1
518 && metaData
->fMagic2
== kMetaDataMagic2
519 && metaData
->fMagic3
== kMetaDataMagic3
) {
521 _metaData
= metaData
;
529 return B_ENTRY_NOT_FOUND
;
531 // We could allocate any of the standard locations. Instead of failing
532 // entirely, we use the static meta data. The tracing buffer won't be
533 // reattachable in the next session, but at least we can use it in this
535 _metaData
= &sFallbackTracingMetaData
;
541 TracingMetaData::_InitPreviousTracingData()
543 // TODO: ATM re-attaching the previous tracing buffer doesn't work very
544 // well. The entries should be checked more thoroughly for validity -- e.g.
545 // the pointers to the entries' vtable pointers could be invalid, which can
546 // make the "traced" command quite unusable. The validity of the entries
547 // could be checked in a safe environment (i.e. with a fault handler) with
548 // typeid() and call of a virtual function.
552 = (addr_t
)fTraceOutputBuffer
+ kTraceOutputBufferSize
;
553 addr_t bufferEnd
= bufferStart
+ MAX_TRACE_SIZE
;
555 if (bufferStart
> bufferEnd
|| (addr_t
)fBuffer
!= bufferStart
556 || (addr_t
)fFirstEntry
% sizeof(trace_entry
) != 0
557 || (addr_t
)fFirstEntry
< bufferStart
558 || (addr_t
)fFirstEntry
+ sizeof(trace_entry
) >= bufferEnd
559 || (addr_t
)fAfterLastEntry
% sizeof(trace_entry
) != 0
560 || (addr_t
)fAfterLastEntry
< bufferStart
561 || (addr_t
)fAfterLastEntry
> bufferEnd
562 || fPhysicalAddress
== 0) {
563 dprintf("Failed to init tracing meta data: Sanity checks "
568 // re-map the previous tracing buffer
569 virtual_address_restrictions virtualRestrictions
= {};
570 virtualRestrictions
.address
= fTraceOutputBuffer
;
571 virtualRestrictions
.address_specification
= B_EXACT_ADDRESS
;
572 physical_address_restrictions physicalRestrictions
= {};
573 physicalRestrictions
.low_address
= fPhysicalAddress
;
574 physicalRestrictions
.high_address
= fPhysicalAddress
575 + ROUNDUP(kTraceOutputBufferSize
+ MAX_TRACE_SIZE
, B_PAGE_SIZE
);
576 area_id area
= create_area_etc(B_SYSTEM_TEAM
, "tracing log",
577 kTraceOutputBufferSize
+ MAX_TRACE_SIZE
, B_CONTIGUOUS
,
578 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, CREATE_AREA_DONT_CLEAR
, 0,
579 &virtualRestrictions
, &physicalRestrictions
, NULL
);
581 dprintf("Failed to init tracing meta data: Mapping tracing log "
582 "buffer failed: %s\n", strerror(area
));
586 dprintf("ktrace: Remapped tracing buffer at %p, size: %" B_PRIuSIZE
"\n",
587 fTraceOutputBuffer
, kTraceOutputBufferSize
+ MAX_TRACE_SIZE
);
589 // verify/repair the tracing entry list
590 uint32 errorCount
= 0;
591 uint32 entryCount
= 0;
592 uint32 nonBufferEntryCount
= 0;
593 uint32 previousEntrySize
= 0;
594 trace_entry
* entry
= fFirstEntry
;
595 while (errorCount
<= kMaxRecoveringErrorCount
) {
596 // check previous entry size
597 if (entry
->previous_size
!= previousEntrySize
) {
598 if (entry
!= fFirstEntry
) {
599 dprintf("ktrace recovering: entry %p: fixing previous_size "
600 "size: %" B_PRIu32
" (should be %" B_PRIu32
")\n", entry
,
601 entry
->previous_size
, previousEntrySize
);
604 entry
->previous_size
= previousEntrySize
;
607 if (entry
== fAfterLastEntry
)
611 if ((entry
->flags
& WRAP_ENTRY
) == 0 && entry
->size
== 0) {
612 dprintf("ktrace recovering: entry %p: non-wrap entry size is 0\n",
615 fAfterLastEntry
= entry
;
619 if (entry
->size
> uint32(fBuffer
+ kBufferSize
- entry
)) {
620 dprintf("ktrace recovering: entry %p: size too big: %" B_PRIu32
"\n",
623 fAfterLastEntry
= entry
;
627 if (entry
< fAfterLastEntry
&& entry
+ entry
->size
> fAfterLastEntry
) {
628 dprintf("ktrace recovering: entry %p: entry crosses "
629 "fAfterLastEntry (%p)\n", entry
, fAfterLastEntry
);
631 fAfterLastEntry
= entry
;
635 // check for wrap entry
636 if ((entry
->flags
& WRAP_ENTRY
) != 0) {
637 if ((uint32
)(fBuffer
+ kBufferSize
- entry
)
638 > kMaxTracingEntryByteSize
/ sizeof(trace_entry
)) {
639 dprintf("ktrace recovering: entry %p: wrap entry at invalid "
640 "buffer location\n", entry
);
644 if (entry
->size
!= 0) {
645 dprintf("ktrace recovering: entry %p: invalid wrap entry "
646 "size: %" B_PRIu32
"\n", entry
, entry
->size
);
651 previousEntrySize
= fBuffer
+ kBufferSize
- entry
;
656 if ((entry
->flags
& BUFFER_ENTRY
) == 0) {
657 entry
->flags
|= CHECK_ENTRY
;
658 nonBufferEntryCount
++;
662 previousEntrySize
= entry
->size
;
664 entry
+= entry
->size
;
667 if (errorCount
> kMaxRecoveringErrorCount
) {
668 dprintf("ktrace recovering: Too many errors.\n");
669 fAfterLastEntry
= entry
;
670 fAfterLastEntry
->previous_size
= previousEntrySize
;
673 dprintf("ktrace recovering: Recovered %" B_PRIu32
" entries + %" B_PRIu32
674 " buffer entries from previous session. Expected %" B_PRIu32
675 " entries.\n", nonBufferEntryCount
, entryCount
- nonBufferEntryCount
,
677 fEntries
= nonBufferEntryCount
;
679 B_INITIALIZE_SPINLOCK(&fLock
);
681 // TODO: Actually check the entries! Do that when first accessing the
682 // tracing buffer from the kernel debugger (when sTracingDataRecovered is
684 sTracingDataRecovered
= true;
689 #endif // ENABLE_TRACING
695 TraceOutput::TraceOutput(char* buffer
, size_t bufferSize
, uint32 flags
)
697 fCapacity(bufferSize
),
714 TraceOutput::PrintArgs(const char* format
, va_list args
)
720 size_t length
= vsnprintf(fBuffer
+ fSize
, fCapacity
- fSize
, format
, args
);
721 fSize
+= std::min(length
, fCapacity
- fSize
- 1);
727 TraceOutput::PrintStackTrace(tracing_stack_trace
* stackTrace
)
730 print_stack_trace(stackTrace
, TraceOutputPrint(*this));
736 TraceOutput::SetLastEntryTime(bigtime_t time
)
738 fLastEntryTime
= time
;
743 TraceOutput::LastEntryTime() const
745 return fLastEntryTime
;
752 TraceEntry::TraceEntry()
757 TraceEntry::~TraceEntry()
763 TraceEntry::Dump(TraceOutput
& out
)
766 // to be overridden by subclasses
767 out
.Print("ENTRY %p", this);
773 TraceEntry::DumpStackTrace(TraceOutput
& out
)
779 TraceEntry::Initialized()
782 ToTraceEntry()->flags
|= ENTRY_INITIALIZED
;
783 sTracingMetaData
->IncrementEntriesEver();
789 TraceEntry::operator new(size_t size
, const std::nothrow_t
&) throw()
792 trace_entry
* entry
= sTracingMetaData
->AllocateEntry(
793 size
+ sizeof(trace_entry
), 0);
794 return entry
!= NULL
? entry
+ 1 : NULL
;
803 AbstractTraceEntry::~AbstractTraceEntry()
809 AbstractTraceEntry::Dump(TraceOutput
& out
)
811 bigtime_t time
= (out
.Flags() & TRACE_OUTPUT_DIFF_TIME
)
812 ? fTime
- out
.LastEntryTime()
815 if (out
.Flags() & TRACE_OUTPUT_TEAM_ID
) {
816 out
.Print("[%6" B_PRId32
":%6" B_PRId32
"] %10" B_PRId64
": ", fThread
,
819 out
.Print("[%6" B_PRId32
"] %10" B_PRId64
": ", fThread
, time
);
823 out
.SetLastEntryTime(fTime
);
828 AbstractTraceEntry::AddDump(TraceOutput
& out
)
834 AbstractTraceEntry::_Init()
836 Thread
* thread
= thread_get_current_thread();
837 if (thread
!= NULL
) {
838 fThread
= thread
->id
;
840 fTeam
= thread
->team
->id
;
842 fTime
= system_time();
846 // #pragma mark - AbstractTraceEntryWithStackTrace
850 AbstractTraceEntryWithStackTrace::AbstractTraceEntryWithStackTrace(
851 size_t stackTraceDepth
, size_t skipFrames
, bool kernelOnly
)
853 fStackTrace
= capture_tracing_stack_trace(stackTraceDepth
, skipFrames
+ 1,
859 AbstractTraceEntryWithStackTrace::DumpStackTrace(TraceOutput
& out
)
861 out
.PrintStackTrace(fStackTrace
);
870 class KernelTraceEntry
: public AbstractTraceEntry
{
872 KernelTraceEntry(const char* message
)
874 fMessage
= alloc_tracing_buffer_strcpy(message
, 256, false);
876 #if KTRACE_PRINTF_STACK_TRACE
877 fStackTrace
= capture_tracing_stack_trace(
878 KTRACE_PRINTF_STACK_TRACE
, 1, false);
883 virtual void AddDump(TraceOutput
& out
)
885 out
.Print("kern: %s", fMessage
);
888 #if KTRACE_PRINTF_STACK_TRACE
889 virtual void DumpStackTrace(TraceOutput
& out
)
891 out
.PrintStackTrace(fStackTrace
);
897 #if KTRACE_PRINTF_STACK_TRACE
898 tracing_stack_trace
* fStackTrace
;
903 class UserTraceEntry
: public AbstractTraceEntry
{
905 UserTraceEntry(const char* message
)
907 fMessage
= alloc_tracing_buffer_strcpy(message
, 256, true);
909 #if KTRACE_PRINTF_STACK_TRACE
910 fStackTrace
= capture_tracing_stack_trace(
911 KTRACE_PRINTF_STACK_TRACE
, 1, false);
916 virtual void AddDump(TraceOutput
& out
)
918 out
.Print("user: %s", fMessage
);
921 #if KTRACE_PRINTF_STACK_TRACE
922 virtual void DumpStackTrace(TraceOutput
& out
)
924 out
.PrintStackTrace(fStackTrace
);
930 #if KTRACE_PRINTF_STACK_TRACE
931 tracing_stack_trace
* fStackTrace
;
936 class TracingLogStartEntry
: public AbstractTraceEntry
{
938 TracingLogStartEntry()
943 virtual void AddDump(TraceOutput
& out
)
945 out
.Print("ktrace start");
949 #endif // ENABLE_TRACING
952 // #pragma mark - trace filters
955 TraceFilter::~TraceFilter()
961 TraceFilter::Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
968 class ThreadTraceFilter
: public TraceFilter
{
970 virtual bool Filter(const TraceEntry
* _entry
, LazyTraceOutput
& out
)
972 const AbstractTraceEntry
* entry
973 = dynamic_cast<const AbstractTraceEntry
*>(_entry
);
974 return (entry
!= NULL
&& entry
->ThreadID() == fThread
);
979 class TeamTraceFilter
: public TraceFilter
{
981 virtual bool Filter(const TraceEntry
* _entry
, LazyTraceOutput
& out
)
983 const AbstractTraceEntry
* entry
984 = dynamic_cast<const AbstractTraceEntry
*>(_entry
);
985 return (entry
!= NULL
&& entry
->TeamID() == fTeam
);
990 class PatternTraceFilter
: public TraceFilter
{
992 virtual bool Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
994 return strstr(out
.DumpEntry(entry
), fString
) != NULL
;
999 class DecimalPatternTraceFilter
: public TraceFilter
{
1001 virtual bool Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
1003 // TODO: this is *very* slow
1005 snprintf(buffer
, sizeof(buffer
), "%" B_PRId64
, fValue
);
1006 return strstr(out
.DumpEntry(entry
), buffer
) != NULL
;
1010 class HexPatternTraceFilter
: public TraceFilter
{
1012 virtual bool Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
1014 // TODO: this is *very* slow
1016 snprintf(buffer
, sizeof(buffer
), "%" B_PRIx64
, fValue
);
1017 return strstr(out
.DumpEntry(entry
), buffer
) != NULL
;
1021 class StringPatternTraceFilter
: public TraceFilter
{
1023 virtual bool Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
1025 if (IS_KERNEL_ADDRESS(fValue
))
1026 return strstr(out
.DumpEntry(entry
), (const char*)fValue
) != NULL
;
1028 // TODO: this is *very* slow
1030 user_strlcpy(buffer
, (const char*)fValue
, sizeof(buffer
));
1031 return strstr(out
.DumpEntry(entry
), buffer
) != NULL
;
1035 class NotTraceFilter
: public TraceFilter
{
1037 virtual bool Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
1039 return !fSubFilters
.first
->Filter(entry
, out
);
1044 class AndTraceFilter
: public TraceFilter
{
1046 virtual bool Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
1048 return fSubFilters
.first
->Filter(entry
, out
)
1049 && fSubFilters
.second
->Filter(entry
, out
);
1054 class OrTraceFilter
: public TraceFilter
{
1056 virtual bool Filter(const TraceEntry
* entry
, LazyTraceOutput
& out
)
1058 return fSubFilters
.first
->Filter(entry
, out
)
1059 || fSubFilters
.second
->Filter(entry
, out
);
1064 class TraceFilterParser
{
1066 static TraceFilterParser
* Default()
1071 bool Parse(int argc
, const char* const* argv
)
1078 TraceFilter
* filter
= _ParseExpression();
1079 return fTokenIndex
== fTokenCount
&& filter
!= NULL
;
1082 TraceFilter
* Filter()
1084 return &fFilters
[0];
1088 TraceFilter
* _ParseExpression()
1090 const char* token
= _NextToken();
1092 // unexpected end of expression
1096 if (fFilterCount
== MAX_FILTERS
) {
1101 if (token
[0] == '#') {
1102 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++])
1104 filter
->fString
= token
+ 1;
1106 } else if (token
[0] == 'd' && token
[1] == '#') {
1107 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++])
1108 DecimalPatternTraceFilter
;
1109 filter
->fValue
= parse_expression(token
+ 2);
1111 } else if (token
[0] == 'x' && token
[1] == '#') {
1112 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++])
1113 HexPatternTraceFilter
;
1114 filter
->fValue
= parse_expression(token
+ 2);
1116 } else if (token
[0] == 's' && token
[1] == '#') {
1117 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++])
1118 StringPatternTraceFilter
;
1119 filter
->fValue
= parse_expression(token
+ 2);
1121 } else if (strcmp(token
, "not") == 0) {
1122 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++]) NotTraceFilter
;
1123 if ((filter
->fSubFilters
.first
= _ParseExpression()) != NULL
)
1126 } else if (strcmp(token
, "and") == 0) {
1127 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++]) AndTraceFilter
;
1128 if ((filter
->fSubFilters
.first
= _ParseExpression()) != NULL
1129 && (filter
->fSubFilters
.second
= _ParseExpression()) != NULL
) {
1133 } else if (strcmp(token
, "or") == 0) {
1134 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++]) OrTraceFilter
;
1135 if ((filter
->fSubFilters
.first
= _ParseExpression()) != NULL
1136 && (filter
->fSubFilters
.second
= _ParseExpression()) != NULL
) {
1140 } else if (strcmp(token
, "thread") == 0) {
1141 const char* arg
= _NextToken();
1143 // unexpected end of expression
1147 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++])
1149 filter
->fThread
= strtol(arg
, NULL
, 0);
1151 } else if (strcmp(token
, "team") == 0) {
1152 const char* arg
= _NextToken();
1154 // unexpected end of expression
1158 TraceFilter
* filter
= new(&fFilters
[fFilterCount
++])
1160 filter
->fTeam
= strtol(arg
, NULL
, 0);
1168 const char* _CurrentToken() const
1170 if (fTokenIndex
>= 1 && fTokenIndex
<= fTokenCount
)
1171 return fTokens
[fTokenIndex
- 1];
1175 const char* _NextToken()
1177 if (fTokenIndex
>= fTokenCount
)
1179 return fTokens
[fTokenIndex
++];
1183 enum { MAX_FILTERS
= 32 };
1185 const char* const* fTokens
;
1188 TraceFilter fFilters
[MAX_FILTERS
];
1191 static TraceFilterParser sParser
;
1195 TraceFilterParser
TraceFilterParser::sParser
;
1205 TraceEntryIterator::Next()
1208 fEntry
= _NextNonBufferEntry(sTracingMetaData
->FirstEntry());
1210 } else if (fEntry
!= NULL
) {
1211 fEntry
= _NextNonBufferEntry(sTracingMetaData
->NextEntry(fEntry
));
1220 TraceEntryIterator::Previous()
1222 if (fIndex
== (int32
)sTracingMetaData
->Entries() + 1)
1223 fEntry
= sTracingMetaData
->AfterLastEntry();
1225 if (fEntry
!= NULL
) {
1226 fEntry
= _PreviousNonBufferEntry(
1227 sTracingMetaData
->PreviousEntry(fEntry
));
1236 TraceEntryIterator::MoveTo(int32 index
)
1238 if (index
== fIndex
)
1241 if (index
<= 0 || index
> (int32
)sTracingMetaData
->Entries()) {
1242 fIndex
= (index
<= 0 ? 0 : sTracingMetaData
->Entries() + 1);
1247 // get the shortest iteration path
1248 int32 distance
= index
- fIndex
;
1249 int32 direction
= distance
< 0 ? -1 : 1;
1250 distance
*= direction
;
1252 if (index
< distance
) {
1258 if ((int32
)sTracingMetaData
->Entries() + 1 - fIndex
< distance
) {
1259 distance
= sTracingMetaData
->Entries() + 1 - fIndex
;
1262 fIndex
= sTracingMetaData
->Entries() + 1;
1265 // iterate to the index
1266 if (direction
< 0) {
1267 while (fIndex
!= index
)
1270 while (fIndex
!= index
)
1279 TraceEntryIterator::_NextNonBufferEntry(trace_entry
* entry
)
1281 while (entry
!= NULL
&& (entry
->flags
& BUFFER_ENTRY
) != 0)
1282 entry
= sTracingMetaData
->NextEntry(entry
);
1289 TraceEntryIterator::_PreviousNonBufferEntry(trace_entry
* entry
)
1291 while (entry
!= NULL
&& (entry
->flags
& BUFFER_ENTRY
) != 0)
1292 entry
= sTracingMetaData
->PreviousEntry(entry
);
1299 dump_tracing_internal(int argc
, char** argv
, WrapperTraceFilter
* wrapperFilter
)
1303 // variables in which we store our state to be continuable
1304 static int32 _previousCount
= 0;
1305 static bool _previousHasFilter
= false;
1306 static bool _previousPrintStackTrace
= false;
1307 static int32 _previousMaxToCheck
= 0;
1308 static int32 _previousFirstChecked
= 1;
1309 static int32 _previousLastChecked
= -1;
1310 static int32 _previousDirection
= 1;
1311 static uint32 _previousEntriesEver
= 0;
1312 static uint32 _previousEntries
= 0;
1313 static uint32 _previousOutputFlags
= 0;
1314 static TraceEntryIterator iterator
;
1316 uint32 entriesEver
= sTracingMetaData
->EntriesEver();
1318 // Note: start and index are Pascal-like indices (i.e. in [1, Entries()]).
1319 int32 start
= 0; // special index: print the last count entries
1321 int32 maxToCheck
= 0;
1324 bool hasFilter
= false;
1325 bool printStackTrace
= false;
1327 uint32 outputFlags
= 0;
1328 while (argi
< argc
) {
1329 if (strcmp(argv
[argi
], "--difftime") == 0) {
1330 outputFlags
|= TRACE_OUTPUT_DIFF_TIME
;
1332 } else if (strcmp(argv
[argi
], "--printteam") == 0) {
1333 outputFlags
|= TRACE_OUTPUT_TEAM_ID
;
1335 } else if (strcmp(argv
[argi
], "--stacktrace") == 0) {
1336 printStackTrace
= true;
1343 if (strcmp(argv
[argi
], "forward") == 0) {
1346 } else if (strcmp(argv
[argi
], "backward") == 0) {
1351 cont
= _previousDirection
;
1355 print_debugger_command_usage(argv
[0]);
1358 if (entriesEver
== 0 || entriesEver
!= _previousEntriesEver
1359 || sTracingMetaData
->Entries() != _previousEntries
) {
1360 kprintf("Can't continue iteration. \"%s\" has not been invoked "
1361 "before, or there were new entries written since the last "
1362 "invocation.\n", argv
[0]);
1367 // get start, count, maxToCheck
1368 int32
* params
[3] = { &start
, &count
, &maxToCheck
};
1369 for (int i
= 0; i
< 3 && !hasFilter
&& argi
< argc
; i
++) {
1370 if (strcmp(argv
[argi
], "filter") == 0) {
1373 } else if (argv
[argi
][0] == '#') {
1376 *params
[i
] = parse_expression(argv
[argi
]);
1381 // filter specification
1384 if (strcmp(argv
[argi
], "filter") == 0)
1387 if (!TraceFilterParser::Default()->Parse(argc
- argi
, argv
+ argi
)) {
1388 print_debugger_command_usage(argv
[0]);
1398 // get values from the previous iteration
1400 count
= _previousCount
;
1401 maxToCheck
= _previousMaxToCheck
;
1402 hasFilter
= _previousHasFilter
;
1403 outputFlags
= _previousOutputFlags
;
1404 printStackTrace
= _previousPrintStackTrace
;
1407 start
= _previousFirstChecked
- 1;
1409 start
= _previousLastChecked
+ 1;
1411 // defaults for count and maxToCheck
1414 if (maxToCheck
== 0 || !hasFilter
)
1416 else if (maxToCheck
< 0)
1417 maxToCheck
= sTracingMetaData
->Entries();
1419 // determine iteration direction
1420 direction
= (start
<= 0 || count
< 0 ? -1 : 1);
1422 // validate count and maxToCheck
1426 maxToCheck
= -maxToCheck
;
1427 if (maxToCheck
> (int32
)sTracingMetaData
->Entries())
1428 maxToCheck
= sTracingMetaData
->Entries();
1429 if (count
> maxToCheck
)
1433 if (start
<= 0 || start
> (int32
)sTracingMetaData
->Entries())
1434 start
= max_c(1, sTracingMetaData
->Entries());
1437 if (direction
< 0) {
1438 firstToCheck
= max_c(1, start
- maxToCheck
+ 1);
1439 lastToCheck
= start
;
1441 firstToCheck
= start
;
1442 lastToCheck
= min_c((int32
)sTracingMetaData
->Entries(),
1443 start
+ maxToCheck
- 1);
1446 // reset the iterator, if something changed in the meantime
1447 if (entriesEver
== 0 || entriesEver
!= _previousEntriesEver
1448 || sTracingMetaData
->Entries() != _previousEntries
) {
1452 LazyTraceOutput
out(sTracingMetaData
->TraceOutputBuffer(),
1453 kTraceOutputBufferSize
, outputFlags
);
1455 bool markedMatching
= false;
1456 int32 firstToDump
= firstToCheck
;
1457 int32 lastToDump
= lastToCheck
;
1459 TraceFilter
* filter
= NULL
;
1461 filter
= TraceFilterParser::Default()->Filter();
1463 if (wrapperFilter
!= NULL
) {
1464 wrapperFilter
->Init(filter
, direction
, cont
!= 0);
1465 filter
= wrapperFilter
;
1468 if (direction
< 0 && filter
&& lastToCheck
- firstToCheck
>= count
) {
1469 // iteration direction is backwards
1470 markedMatching
= true;
1472 // From the last entry to check iterate backwards to check filter
1476 // move to the entry after the last entry to check
1477 iterator
.MoveTo(lastToCheck
+ 1);
1479 // iterate backwards
1482 while (iterator
.Index() > firstToCheck
) {
1483 TraceEntry
* entry
= iterator
.Previous();
1484 if ((entry
->Flags() & ENTRY_INITIALIZED
) != 0) {
1486 if (filter
->Filter(entry
, out
)) {
1487 entry
->ToTraceEntry()->flags
|= FILTER_MATCH
;
1488 if (lastToDump
== -1)
1489 lastToDump
= iterator
.Index();
1490 firstToDump
= iterator
.Index();
1493 if (matching
>= count
)
1496 entry
->ToTraceEntry()->flags
&= ~FILTER_MATCH
;
1500 firstToCheck
= iterator
.Index();
1502 // iterate to the previous entry, so that the next loop starts at the
1504 iterator
.Previous();
1507 out
.SetLastEntryTime(0);
1509 // set the iterator to the entry before the first one to dump
1510 iterator
.MoveTo(firstToDump
- 1);
1512 // dump the entries matching the filter in the range
1513 // [firstToDump, lastToDump]
1516 while (TraceEntry
* entry
= iterator
.Next()) {
1517 int32 index
= iterator
.Index();
1518 if (index
< firstToDump
)
1520 if (index
> lastToDump
|| dumped
>= count
) {
1522 lastToCheck
= index
- 1;
1526 if ((entry
->Flags() & ENTRY_INITIALIZED
) != 0) {
1528 if (filter
&& (markedMatching
1529 ? (entry
->Flags() & FILTER_MATCH
) == 0
1530 : !filter
->Filter(entry
, out
))) {
1534 // don't print trailing new line
1535 const char* dump
= out
.DumpEntry(entry
);
1536 int len
= strlen(dump
);
1537 if (len
> 0 && dump
[len
- 1] == '\n')
1540 kprintf("%5" B_PRId32
". %.*s\n", index
, len
, dump
);
1542 if (printStackTrace
) {
1544 entry
->DumpStackTrace(out
);
1546 kputs(out
.Buffer());
1549 kprintf("%5" B_PRId32
". ** uninitialized entry **\n", index
);
1554 kprintf("printed %" B_PRId32
" entries within range %" B_PRId32
" to %"
1555 B_PRId32
" (%" B_PRId32
" of %" B_PRId32
" total, %" B_PRId32
" ever)\n",
1556 dumped
, firstToCheck
, lastToCheck
, lastToCheck
- firstToCheck
+ 1,
1557 sTracingMetaData
->Entries(), entriesEver
);
1559 // store iteration state
1560 _previousCount
= count
;
1561 _previousMaxToCheck
= maxToCheck
;
1562 _previousHasFilter
= hasFilter
;
1563 _previousPrintStackTrace
= printStackTrace
;
1564 _previousFirstChecked
= firstToCheck
;
1565 _previousLastChecked
= lastToCheck
;
1566 _previousDirection
= direction
;
1567 _previousEntriesEver
= entriesEver
;
1568 _previousEntries
= sTracingMetaData
->Entries();
1569 _previousOutputFlags
= outputFlags
;
1571 return cont
!= 0 ? B_KDEBUG_CONT
: 0;
1576 dump_tracing_command(int argc
, char** argv
)
1578 return dump_tracing_internal(argc
, argv
, NULL
);
1582 #endif // ENABLE_TRACING
1586 alloc_tracing_buffer(size_t size
)
1589 trace_entry
* entry
= sTracingMetaData
->AllocateEntry(
1590 size
+ sizeof(trace_entry
), BUFFER_ENTRY
);
1594 return (uint8
*)(entry
+ 1);
1602 alloc_tracing_buffer_memcpy(const void* source
, size_t size
, bool user
)
1604 if (user
&& !IS_USER_ADDRESS(source
))
1607 uint8
* buffer
= alloc_tracing_buffer(size
);
1612 if (user_memcpy(buffer
, source
, size
) != B_OK
)
1615 memcpy(buffer
, source
, size
);
1622 alloc_tracing_buffer_strcpy(const char* source
, size_t maxSize
, bool user
)
1624 if (source
== NULL
|| maxSize
== 0)
1627 if (user
&& !IS_USER_ADDRESS(source
))
1630 // limit maxSize to the actual source string len
1632 ssize_t size
= user_strlcpy(NULL
, source
, 0);
1633 // there's no user_strnlen()
1636 maxSize
= min_c(maxSize
, (size_t)size
+ 1);
1638 maxSize
= strnlen(source
, maxSize
- 1) + 1;
1640 char* buffer
= (char*)alloc_tracing_buffer(maxSize
);
1645 if (user_strlcpy(buffer
, source
, maxSize
) < B_OK
)
1648 strlcpy(buffer
, source
, maxSize
);
1654 tracing_stack_trace
*
1655 capture_tracing_stack_trace(int32 maxCount
, int32 skipFrames
, bool kernelOnly
)
1658 // page_fault_exception() doesn't allow us to gracefully handle a bad
1659 // address in the stack trace, if interrupts are disabled, so we always
1660 // restrict the stack traces to the kernel only in this case. A bad address
1661 // in the kernel stack trace would still cause a panic(), but this is
1662 // probably even desired.
1663 if (!are_interrupts_enabled())
1666 tracing_stack_trace
* stackTrace
1667 = (tracing_stack_trace
*)alloc_tracing_buffer(
1668 sizeof(tracing_stack_trace
) + maxCount
* sizeof(addr_t
));
1670 if (stackTrace
!= NULL
) {
1671 stackTrace
->depth
= arch_debug_get_stack_trace(
1672 stackTrace
->return_addresses
, maxCount
, 0, skipFrames
+ 1,
1673 STACK_TRACE_KERNEL
| (kernelOnly
? 0 : STACK_TRACE_USER
));
1684 tracing_find_caller_in_stack_trace(struct tracing_stack_trace
* stackTrace
,
1685 const addr_t excludeRanges
[], uint32 excludeRangeCount
)
1687 for (int32 i
= 0; i
< stackTrace
->depth
; i
++) {
1688 addr_t returnAddress
= stackTrace
->return_addresses
[i
];
1690 bool inRange
= false;
1691 for (uint32 j
= 0; j
< excludeRangeCount
; j
++) {
1692 if (returnAddress
>= excludeRanges
[j
* 2 + 0]
1693 && returnAddress
< excludeRanges
[j
* 2 + 1]) {
1700 return returnAddress
;
1708 tracing_print_stack_trace(struct tracing_stack_trace
* stackTrace
)
1711 print_stack_trace(stackTrace
, kprintf
);
1717 dump_tracing(int argc
, char** argv
, WrapperTraceFilter
* wrapperFilter
)
1720 return dump_tracing_internal(argc
, argv
, wrapperFilter
);
1728 tracing_is_entry_valid(AbstractTraceEntry
* candidate
, bigtime_t entryTime
)
1731 if (!sTracingMetaData
->IsInBuffer(candidate
, sizeof(*candidate
)))
1737 TraceEntryIterator iterator
;
1738 while (TraceEntry
* entry
= iterator
.Next()) {
1739 AbstractTraceEntry
* abstract
= dynamic_cast<AbstractTraceEntry
*>(entry
);
1740 if (abstract
== NULL
)
1743 if (abstract
!= candidate
&& abstract
->Time() > entryTime
)
1746 return candidate
->Time() == entryTime
;
1755 lock_tracing_buffer()
1758 sTracingMetaData
->Lock();
1764 unlock_tracing_buffer()
1767 sTracingMetaData
->Unlock();
1776 status_t result
= TracingMetaData::Create(sTracingMetaData
);
1777 if (result
!= B_OK
) {
1778 memset(&sFallbackTracingMetaData
, 0, sizeof(sFallbackTracingMetaData
));
1779 sTracingMetaData
= &sFallbackTracingMetaData
;
1783 new(nothrow
) TracingLogStartEntry();
1785 add_debugger_command_etc("traced", &dump_tracing_command
,
1786 "Dump recorded trace entries",
1787 "[ --printteam ] [ --difftime ] [ --stacktrace ] "
1788 "(\"forward\" | \"backward\") "
1789 "| ([ <start> [ <count> [ <range> ] ] ] "
1790 "[ #<pattern> | (\"filter\" <filter>) ])\n"
1791 "Prints recorded trace entries. If \"backward\" or \"forward\" is\n"
1792 "specified, the command continues where the previous invocation left\n"
1793 "off, i.e. printing the previous respectively next entries (as many\n"
1794 "as printed before). In this case the command is continuable, that is\n"
1795 "afterwards entering an empty line in the debugger will reinvoke it.\n"
1796 "If no arguments are given, the command continues in the direction\n"
1797 "of the last invocation.\n"
1798 "--printteam - enables printing the entries' team IDs.\n"
1799 "--difftime - print difference times for all but the first entry.\n"
1800 "--stacktrace - print stack traces for entries that captured one.\n"
1801 " <start> - The base index of the entries to print. Depending on\n"
1802 " whether the iteration direction is forward or\n"
1803 " backward this will be the first or last entry printed\n"
1804 " (potentially, if a filter is specified). The index of\n"
1805 " the first entry in the trace buffer is 1. If 0 is\n"
1806 " specified, the last <count> recorded entries are\n"
1807 " printed (iteration direction is backward). Defaults \n"
1809 " <count> - The number of entries to be printed. Defaults to 30.\n"
1810 " If negative, the -<count> entries before and\n"
1811 " including <start> will be printed.\n"
1812 " <range> - Only relevant if a filter is specified. Specifies the\n"
1813 " number of entries to be filtered -- depending on the\n"
1814 " iteration direction the entries before or after\n"
1815 " <start>. If more than <count> entries match the\n"
1816 " filter, only the first (forward) or last (backward)\n"
1817 " <count> matching entries will be printed. If 0 is\n"
1818 " specified <range> will be set to <count>. If -1,\n"
1819 " <range> will be set to the number of recorded\n"
1821 " <pattern> - If specified only entries containing this string are\n"
1823 " <filter> - If specified only entries matching this filter\n"
1824 " expression are printed. The expression can consist of\n"
1825 " prefix operators \"not\", \"and\", \"or\", and\n"
1826 " filters \"'thread' <thread>\" (matching entries\n"
1827 " with the given thread ID), \"'team' <team>\"\n"
1828 "(matching entries with the given team ID), and\n"
1829 " \"#<pattern>\" (matching entries containing the given\n"
1831 #endif // ENABLE_TRACING
1837 ktrace_printf(const char *format
, ...)
1841 va_start(list
, format
);
1844 vsnprintf(buffer
, sizeof(buffer
), format
, list
);
1848 new(nothrow
) KernelTraceEntry(buffer
);
1849 #endif // ENABLE_TRACING
1854 _user_ktrace_output(const char *message
)
1857 new(nothrow
) UserTraceEntry(message
);
1858 #endif // ENABLE_TRACING