1 //===-- xray_fdr_log_writer.h ---------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of XRay, a function call tracing system.
11 //===----------------------------------------------------------------------===//
12 #ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
13 #define COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
15 #include "xray_buffer_queue.h"
16 #include "xray_fdr_log_records.h"
19 #include <type_traits>
24 template <size_t Index
> struct SerializerImpl
{
25 template <class Tuple
,
26 typename
std::enable_if
<
27 Index
<std::tuple_size
<
28 typename
std::remove_reference
<Tuple
>::type
>::value
,
29 int>::type
= 0> static void serializeTo(char *Buffer
,
31 auto P
= reinterpret_cast<const char *>(&std::get
<Index
>(T
));
32 constexpr auto Size
= sizeof(std::get
<Index
>(T
));
33 internal_memcpy(Buffer
, P
, Size
);
34 SerializerImpl
<Index
+ 1>::serializeTo(Buffer
+ Size
,
35 std::forward
<Tuple
>(T
));
38 template <class Tuple
,
39 typename
std::enable_if
<
40 Index
>= std::tuple_size
<typename
std::remove_reference
<
43 static void serializeTo(char *, Tuple
&&) {}
46 using Serializer
= SerializerImpl
<0>;
48 template <class Tuple
, size_t Index
> struct AggregateSizesImpl
{
49 static constexpr size_t value
=
50 sizeof(typename
std::tuple_element
<Index
, Tuple
>::type
) +
51 AggregateSizesImpl
<Tuple
, Index
- 1>::value
;
54 template <class Tuple
> struct AggregateSizesImpl
<Tuple
, 0> {
55 static constexpr size_t value
=
56 sizeof(typename
std::tuple_element
<0, Tuple
>::type
);
59 template <class Tuple
> struct AggregateSizes
{
60 static constexpr size_t value
=
61 AggregateSizesImpl
<Tuple
, std::tuple_size
<Tuple
>::value
- 1>::value
;
64 template <MetadataRecord::RecordKinds Kind
, class... DataTypes
>
65 MetadataRecord
createMetadataRecord(DataTypes
&&... Ds
) {
66 static_assert(AggregateSizes
<std::tuple
<DataTypes
...>>::value
<=
67 sizeof(MetadataRecord
) - 1,
68 "Metadata payload longer than metadata buffer!");
71 R
.RecordKind
= static_cast<uint8_t>(Kind
);
72 Serializer::serializeTo(R
.Data
,
73 std::make_tuple(std::forward
<DataTypes
>(Ds
)...));
78 BufferQueue::Buffer
&Buffer
;
79 char *NextRecord
= nullptr;
81 template <class T
> void writeRecord(const T
&R
) {
82 internal_memcpy(NextRecord
, reinterpret_cast<const char *>(&R
), sizeof(T
));
83 NextRecord
+= sizeof(T
);
84 // We need this atomic fence here to ensure that other threads attempting to
85 // read the bytes in the buffer will see the writes committed before the
86 // extents are updated.
87 atomic_thread_fence(memory_order_release
);
88 atomic_fetch_add(Buffer
.Extents
, sizeof(T
), memory_order_acq_rel
);
92 explicit FDRLogWriter(BufferQueue::Buffer
&B
, char *P
)
93 : Buffer(B
), NextRecord(P
) {
94 DCHECK_NE(Buffer
.Data
, nullptr);
95 DCHECK_NE(NextRecord
, nullptr);
98 explicit FDRLogWriter(BufferQueue::Buffer
&B
)
99 : FDRLogWriter(B
, static_cast<char *>(B
.Data
)) {}
101 template <MetadataRecord::RecordKinds Kind
, class... Data
>
102 bool writeMetadata(Data
&&... Ds
) {
103 // TODO: Check boundary conditions:
104 // 1) Buffer is full, and cannot handle one metadata record.
105 // 2) Buffer queue is finalising.
106 writeRecord(createMetadataRecord
<Kind
>(std::forward
<Data
>(Ds
)...));
110 template <size_t N
> size_t writeMetadataRecords(MetadataRecord (&Recs
)[N
]) {
111 constexpr auto Size
= sizeof(MetadataRecord
) * N
;
112 internal_memcpy(NextRecord
, reinterpret_cast<const char *>(Recs
), Size
);
114 // We need this atomic fence here to ensure that other threads attempting to
115 // read the bytes in the buffer will see the writes committed before the
116 // extents are updated.
117 atomic_thread_fence(memory_order_release
);
118 atomic_fetch_add(Buffer
.Extents
, Size
, memory_order_acq_rel
);
122 enum class FunctionRecordKind
: uint8_t {
129 bool writeFunction(FunctionRecordKind Kind
, int32_t FuncId
, int32_t Delta
) {
132 R
.RecordKind
= uint8_t(Kind
);
139 bool writeFunctionWithArg(FunctionRecordKind Kind
, int32_t FuncId
,
140 int32_t Delta
, uint64_t Arg
) {
141 // We need to write the function with arg into the buffer, and then
142 // atomically update the buffer extents. This ensures that any reads
143 // synchronised on the buffer extents record will always see the writes
144 // that happen before the atomic update.
147 R
.RecordKind
= uint8_t(Kind
);
151 createMetadataRecord
<MetadataRecord::RecordKinds::CallArgument
>(Arg
);
152 NextRecord
= reinterpret_cast<char *>(internal_memcpy(
153 NextRecord
, reinterpret_cast<char *>(&R
), sizeof(R
))) +
155 NextRecord
= reinterpret_cast<char *>(internal_memcpy(
156 NextRecord
, reinterpret_cast<char *>(&A
), sizeof(A
))) +
158 // We need this atomic fence here to ensure that other threads attempting to
159 // read the bytes in the buffer will see the writes committed before the
160 // extents are updated.
161 atomic_thread_fence(memory_order_release
);
162 atomic_fetch_add(Buffer
.Extents
, sizeof(R
) + sizeof(A
),
163 memory_order_acq_rel
);
167 bool writeCustomEvent(int32_t Delta
, const void *Event
, int32_t EventSize
) {
168 // We write the metadata record and the custom event data into the buffer
169 // first, before we atomically update the extents for the buffer. This
170 // allows us to ensure that any threads reading the extents of the buffer
171 // will only ever see the full metadata and custom event payload accounted
172 // (no partial writes accounted).
174 createMetadataRecord
<MetadataRecord::RecordKinds::CustomEventMarker
>(
176 NextRecord
= reinterpret_cast<char *>(internal_memcpy(
177 NextRecord
, reinterpret_cast<char *>(&R
), sizeof(R
))) +
179 NextRecord
= reinterpret_cast<char *>(
180 internal_memcpy(NextRecord
, Event
, EventSize
)) +
183 // We need this atomic fence here to ensure that other threads attempting to
184 // read the bytes in the buffer will see the writes committed before the
185 // extents are updated.
186 atomic_thread_fence(memory_order_release
);
187 atomic_fetch_add(Buffer
.Extents
, sizeof(R
) + EventSize
,
188 memory_order_acq_rel
);
192 bool writeTypedEvent(int32_t Delta
, uint16_t EventType
, const void *Event
,
194 // We do something similar when writing out typed events, see
195 // writeCustomEvent(...) above for details.
197 createMetadataRecord
<MetadataRecord::RecordKinds::TypedEventMarker
>(
198 EventSize
, Delta
, EventType
);
199 NextRecord
= reinterpret_cast<char *>(internal_memcpy(
200 NextRecord
, reinterpret_cast<char *>(&R
), sizeof(R
))) +
202 NextRecord
= reinterpret_cast<char *>(
203 internal_memcpy(NextRecord
, Event
, EventSize
)) +
206 // We need this atomic fence here to ensure that other threads attempting to
207 // read the bytes in the buffer will see the writes committed before the
208 // extents are updated.
209 atomic_thread_fence(memory_order_release
);
210 atomic_fetch_add(Buffer
.Extents
, EventSize
, memory_order_acq_rel
);
214 char *getNextRecord() const { return NextRecord
; }
217 NextRecord
= reinterpret_cast<char *>(Buffer
.Data
);
218 atomic_store(Buffer
.Extents
, 0, memory_order_release
);
221 void undoWrites(size_t B
) {
222 DCHECK_GE(NextRecord
- B
, reinterpret_cast<char *>(Buffer
.Data
));
224 atomic_fetch_sub(Buffer
.Extents
, B
, memory_order_acq_rel
);
227 }; // namespace __xray
229 } // namespace __xray
231 #endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_