1 //===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Created by Greg Clayton on 6/25/07.
11 //===----------------------------------------------------------------------===//
13 #if defined(__i386__) || defined(__x86_64__)
15 #include <sys/cdefs.h>
16 #include <sys/sysctl.h>
17 #include <sys/types.h>
20 #include "MacOSX/x86_64/DNBArchImplX86_64.h"
21 #include "MachProcess.h"
22 #include "MachThread.h"
24 #include <mach/mach.h>
26 #if defined(LLDB_DEBUGSERVER_RELEASE) || defined(LLDB_DEBUGSERVER_DEBUG)
27 enum debugState
{ debugStateUnknown
, debugStateOff
, debugStateOn
};
29 static debugState sFPUDebugState
= debugStateUnknown
;
30 static debugState sAVXForceState
= debugStateUnknown
;
32 static bool DebugFPURegs() {
33 if (sFPUDebugState
== debugStateUnknown
) {
34 if (getenv("DNB_DEBUG_FPU_REGS"))
35 sFPUDebugState
= debugStateOn
;
37 sFPUDebugState
= debugStateOff
;
40 return (sFPUDebugState
== debugStateOn
);
43 static bool ForceAVXRegs() {
44 if (sFPUDebugState
== debugStateUnknown
) {
45 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
46 sAVXForceState
= debugStateOn
;
48 sAVXForceState
= debugStateOff
;
51 return (sAVXForceState
== debugStateOn
);
54 #define DEBUG_FPU_REGS (DebugFPURegs())
55 #define FORCE_AVX_REGS (ForceAVXRegs())
57 #define DEBUG_FPU_REGS (0)
58 #define FORCE_AVX_REGS (0)
61 bool DetectHardwareFeature(const char *feature
) {
63 size_t answer_size
= sizeof(answer
);
64 int error
= ::sysctlbyname(feature
, &answer
, &answer_size
, NULL
, 0);
65 return error
== 0 && answer
!= 0;
68 enum AVXPresence
{ eAVXUnknown
= -1, eAVXNotPresent
= 0, eAVXPresent
= 1 };
70 bool LogAVXAndReturn(AVXPresence has_avx
, int err
, const char * os_ver
) {
71 DNBLogThreadedIf(LOG_THREAD
,
72 "CPUHasAVX(): g_has_avx = %i (err = %i, os_ver = %s)",
73 has_avx
, err
, os_ver
);
74 return (has_avx
== eAVXPresent
);
77 extern "C" bool CPUHasAVX() {
78 static AVXPresence g_has_avx
= eAVXUnknown
;
79 if (g_has_avx
!= eAVXUnknown
)
80 return LogAVXAndReturn(g_has_avx
, 0, "");
82 g_has_avx
= eAVXNotPresent
;
84 // OS X 10.7.3 and earlier have a bug in thread_get_state that truncated the
85 // size of the return. To work around this we have to disable AVX debugging
86 // on hosts prior to 10.7.3 (<rdar://problem/10122874>).
89 size_t length
= sizeof(buffer
);
91 mib
[1] = KERN_OSVERSION
;
93 // KERN_OSVERSION returns the build number which is a number signifying the
94 // major version, a capitol letter signifying the minor version, and numbers
95 // signifying the build (ex: on 10.12.3, the returned value is 16D32).
96 int err
= ::sysctl(mib
, 2, &buffer
, &length
, NULL
, 0);
98 return LogAVXAndReturn(g_has_avx
, err
, "");
100 size_t first_letter
= 0;
101 for (; first_letter
< length
; ++first_letter
) {
102 // This is looking for the first uppercase letter
103 if (isupper(buffer
[first_letter
]))
106 char letter
= buffer
[first_letter
];
107 buffer
[first_letter
] = '\0';
108 auto major_ver
= strtoull(buffer
, NULL
, 0);
109 buffer
[first_letter
] = letter
;
111 // In this check we're looking to see that our major and minor version numer
112 // was >= 11E, which is the 10.7.4 release.
113 if (major_ver
< 11 || (major_ver
== 11 && letter
< 'E'))
114 return LogAVXAndReturn(g_has_avx
, err
, buffer
);
115 if (DetectHardwareFeature("hw.optional.avx1_0"))
116 g_has_avx
= eAVXPresent
;
118 return LogAVXAndReturn(g_has_avx
, err
, buffer
);
121 extern "C" bool CPUHasAVX512f() {
122 static AVXPresence g_has_avx512f
= eAVXUnknown
;
123 if (g_has_avx512f
!= eAVXUnknown
)
124 return g_has_avx512f
== eAVXPresent
;
126 g_has_avx512f
= DetectHardwareFeature("hw.optional.avx512f") ? eAVXPresent
129 return (g_has_avx512f
== eAVXPresent
);
132 uint64_t DNBArchImplX86_64::GetPC(uint64_t failValue
) {
133 // Get program counter
134 if (GetGPRState(false) == KERN_SUCCESS
)
135 return m_state
.context
.gpr
.__rip
;
139 kern_return_t
DNBArchImplX86_64::SetPC(uint64_t value
) {
140 // Get program counter
141 kern_return_t err
= GetGPRState(false);
142 if (err
== KERN_SUCCESS
) {
143 m_state
.context
.gpr
.__rip
= value
;
146 return err
== KERN_SUCCESS
;
149 uint64_t DNBArchImplX86_64::GetSP(uint64_t failValue
) {
151 if (GetGPRState(false) == KERN_SUCCESS
)
152 return m_state
.context
.gpr
.__rsp
;
156 // Uncomment the value below to verify the values in the debugger.
157 //#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
159 kern_return_t
DNBArchImplX86_64::GetGPRState(bool force
) {
160 if (force
|| m_state
.GetError(e_regSetGPR
, Read
)) {
162 m_state
.context
.gpr
.__rax
= ('a' << 8) + 'x';
163 m_state
.context
.gpr
.__rbx
= ('b' << 8) + 'x';
164 m_state
.context
.gpr
.__rcx
= ('c' << 8) + 'x';
165 m_state
.context
.gpr
.__rdx
= ('d' << 8) + 'x';
166 m_state
.context
.gpr
.__rdi
= ('d' << 8) + 'i';
167 m_state
.context
.gpr
.__rsi
= ('s' << 8) + 'i';
168 m_state
.context
.gpr
.__rbp
= ('b' << 8) + 'p';
169 m_state
.context
.gpr
.__rsp
= ('s' << 8) + 'p';
170 m_state
.context
.gpr
.__r8
= ('r' << 8) + '8';
171 m_state
.context
.gpr
.__r9
= ('r' << 8) + '9';
172 m_state
.context
.gpr
.__r10
= ('r' << 8) + 'a';
173 m_state
.context
.gpr
.__r11
= ('r' << 8) + 'b';
174 m_state
.context
.gpr
.__r12
= ('r' << 8) + 'c';
175 m_state
.context
.gpr
.__r13
= ('r' << 8) + 'd';
176 m_state
.context
.gpr
.__r14
= ('r' << 8) + 'e';
177 m_state
.context
.gpr
.__r15
= ('r' << 8) + 'f';
178 m_state
.context
.gpr
.__rip
= ('i' << 8) + 'p';
179 m_state
.context
.gpr
.__rflags
= ('f' << 8) + 'l';
180 m_state
.context
.gpr
.__cs
= ('c' << 8) + 's';
181 m_state
.context
.gpr
.__fs
= ('f' << 8) + 's';
182 m_state
.context
.gpr
.__gs
= ('g' << 8) + 's';
183 m_state
.SetError(e_regSetGPR
, Read
, 0);
185 mach_msg_type_number_t count
= e_regSetWordSizeGPRFull
;
186 int flavor
= __x86_64_THREAD_FULL_STATE
;
189 ::thread_get_state(m_thread
->MachPortNumber(), flavor
,
190 (thread_state_t
)&m_state
.context
.gpr
, &count
));
192 if (!m_state
.GetError(e_regSetGPR
, Read
)) {
193 m_state
.hasFullGPRState
= true;
195 m_state
.hasFullGPRState
= false;
196 count
= e_regSetWordSizeGPR
;
197 flavor
= __x86_64_THREAD_STATE
;
200 ::thread_get_state(m_thread
->MachPortNumber(), flavor
,
201 (thread_state_t
)&m_state
.context
.gpr
, &count
));
205 "::thread_get_state (0x%4.4x, %u (%s), &gpr, %u) => 0x%8.8x"
206 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
207 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
208 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
209 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
210 "\n\trip = %16.16llx"
211 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx"
212 "\n\t ds = %16.16llx es = %16.16llx ss = %16.16llx gsB = %16.16llx",
213 m_thread
->MachPortNumber(), flavor
,
214 m_state
.hasFullGPRState
? "full" : "non-full",
215 m_state
.hasFullGPRState
? e_regSetWordSizeGPRFull
216 : e_regSetWordSizeGPR
,
217 m_state
.GetError(e_regSetGPR
, Read
),
218 m_state
.context
.gpr
.__rax
, m_state
.context
.gpr
.__rbx
,
219 m_state
.context
.gpr
.__rcx
, m_state
.context
.gpr
.__rdx
,
220 m_state
.context
.gpr
.__rdi
, m_state
.context
.gpr
.__rsi
,
221 m_state
.context
.gpr
.__rbp
, m_state
.context
.gpr
.__rsp
,
222 m_state
.context
.gpr
.__r8
, m_state
.context
.gpr
.__r9
,
223 m_state
.context
.gpr
.__r10
, m_state
.context
.gpr
.__r11
,
224 m_state
.context
.gpr
.__r12
, m_state
.context
.gpr
.__r13
,
225 m_state
.context
.gpr
.__r14
, m_state
.context
.gpr
.__r15
,
226 m_state
.context
.gpr
.__rip
, m_state
.context
.gpr
.__rflags
,
227 m_state
.context
.gpr
.__cs
, m_state
.context
.gpr
.__fs
,
228 m_state
.context
.gpr
.__gs
, m_state
.context
.gpr
.__ds
,
229 m_state
.context
.gpr
.__es
, m_state
.context
.gpr
.__ss
,
230 m_state
.context
.gpr
.__gsbase
);
232 // DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u)
234 // "\n\trax = %16.16llx"
235 // "\n\trbx = %16.16llx"
236 // "\n\trcx = %16.16llx"
237 // "\n\trdx = %16.16llx"
238 // "\n\trdi = %16.16llx"
239 // "\n\trsi = %16.16llx"
240 // "\n\trbp = %16.16llx"
241 // "\n\trsp = %16.16llx"
242 // "\n\t r8 = %16.16llx"
243 // "\n\t r9 = %16.16llx"
244 // "\n\tr10 = %16.16llx"
245 // "\n\tr11 = %16.16llx"
246 // "\n\tr12 = %16.16llx"
247 // "\n\tr13 = %16.16llx"
248 // "\n\tr14 = %16.16llx"
249 // "\n\tr15 = %16.16llx"
250 // "\n\trip = %16.16llx"
251 // "\n\tflg = %16.16llx"
252 // "\n\t cs = %16.16llx"
253 // "\n\t fs = %16.16llx"
254 // "\n\t gs = %16.16llx",
255 // m_thread->MachPortNumber(),
256 // x86_THREAD_STATE64,
257 // x86_THREAD_STATE64_COUNT,
258 // m_state.GetError(e_regSetGPR, Read),
259 // m_state.context.gpr.__rax,
260 // m_state.context.gpr.__rbx,
261 // m_state.context.gpr.__rcx,
262 // m_state.context.gpr.__rdx,
263 // m_state.context.gpr.__rdi,
264 // m_state.context.gpr.__rsi,
265 // m_state.context.gpr.__rbp,
266 // m_state.context.gpr.__rsp,
267 // m_state.context.gpr.__r8,
268 // m_state.context.gpr.__r9,
269 // m_state.context.gpr.__r10,
270 // m_state.context.gpr.__r11,
271 // m_state.context.gpr.__r12,
272 // m_state.context.gpr.__r13,
273 // m_state.context.gpr.__r14,
274 // m_state.context.gpr.__r15,
275 // m_state.context.gpr.__rip,
276 // m_state.context.gpr.__rflags,
277 // m_state.context.gpr.__cs,
278 // m_state.context.gpr.__fs,
279 // m_state.context.gpr.__gs);
282 return m_state
.GetError(e_regSetGPR
, Read
);
285 // Uncomment the value below to verify the values in the debugger.
286 //#define DEBUG_FPU_REGS 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
288 kern_return_t
DNBArchImplX86_64::GetFPUState(bool force
) {
289 if (force
|| m_state
.GetError(e_regSetFPU
, Read
)) {
290 if (DEBUG_FPU_REGS
) {
291 m_state
.context
.fpu
.no_avx
.__fpu_reserved
[0] = -1;
292 m_state
.context
.fpu
.no_avx
.__fpu_reserved
[1] = -1;
293 *(uint16_t *)&(m_state
.context
.fpu
.no_avx
.__fpu_fcw
) = 0x1234;
294 *(uint16_t *)&(m_state
.context
.fpu
.no_avx
.__fpu_fsw
) = 0x5678;
295 m_state
.context
.fpu
.no_avx
.__fpu_ftw
= 1;
296 m_state
.context
.fpu
.no_avx
.__fpu_rsrv1
= UINT8_MAX
;
297 m_state
.context
.fpu
.no_avx
.__fpu_fop
= 2;
298 m_state
.context
.fpu
.no_avx
.__fpu_ip
= 3;
299 m_state
.context
.fpu
.no_avx
.__fpu_cs
= 4;
300 m_state
.context
.fpu
.no_avx
.__fpu_rsrv2
= 5;
301 m_state
.context
.fpu
.no_avx
.__fpu_dp
= 6;
302 m_state
.context
.fpu
.no_avx
.__fpu_ds
= 7;
303 m_state
.context
.fpu
.no_avx
.__fpu_rsrv3
= UINT16_MAX
;
304 m_state
.context
.fpu
.no_avx
.__fpu_mxcsr
= 8;
305 m_state
.context
.fpu
.no_avx
.__fpu_mxcsrmask
= 9;
306 for (int i
= 0; i
< 16; ++i
) {
308 m_state
.context
.fpu
.no_avx
.__fpu_stmm0
.__mmst_reg
[i
] = 'a';
309 m_state
.context
.fpu
.no_avx
.__fpu_stmm1
.__mmst_reg
[i
] = 'b';
310 m_state
.context
.fpu
.no_avx
.__fpu_stmm2
.__mmst_reg
[i
] = 'c';
311 m_state
.context
.fpu
.no_avx
.__fpu_stmm3
.__mmst_reg
[i
] = 'd';
312 m_state
.context
.fpu
.no_avx
.__fpu_stmm4
.__mmst_reg
[i
] = 'e';
313 m_state
.context
.fpu
.no_avx
.__fpu_stmm5
.__mmst_reg
[i
] = 'f';
314 m_state
.context
.fpu
.no_avx
.__fpu_stmm6
.__mmst_reg
[i
] = 'g';
315 m_state
.context
.fpu
.no_avx
.__fpu_stmm7
.__mmst_reg
[i
] = 'h';
317 m_state
.context
.fpu
.no_avx
.__fpu_stmm0
.__mmst_reg
[i
] = INT8_MIN
;
318 m_state
.context
.fpu
.no_avx
.__fpu_stmm1
.__mmst_reg
[i
] = INT8_MIN
;
319 m_state
.context
.fpu
.no_avx
.__fpu_stmm2
.__mmst_reg
[i
] = INT8_MIN
;
320 m_state
.context
.fpu
.no_avx
.__fpu_stmm3
.__mmst_reg
[i
] = INT8_MIN
;
321 m_state
.context
.fpu
.no_avx
.__fpu_stmm4
.__mmst_reg
[i
] = INT8_MIN
;
322 m_state
.context
.fpu
.no_avx
.__fpu_stmm5
.__mmst_reg
[i
] = INT8_MIN
;
323 m_state
.context
.fpu
.no_avx
.__fpu_stmm6
.__mmst_reg
[i
] = INT8_MIN
;
324 m_state
.context
.fpu
.no_avx
.__fpu_stmm7
.__mmst_reg
[i
] = INT8_MIN
;
327 m_state
.context
.fpu
.no_avx
.__fpu_xmm0
.__xmm_reg
[i
] = '0';
328 m_state
.context
.fpu
.no_avx
.__fpu_xmm1
.__xmm_reg
[i
] = '1';
329 m_state
.context
.fpu
.no_avx
.__fpu_xmm2
.__xmm_reg
[i
] = '2';
330 m_state
.context
.fpu
.no_avx
.__fpu_xmm3
.__xmm_reg
[i
] = '3';
331 m_state
.context
.fpu
.no_avx
.__fpu_xmm4
.__xmm_reg
[i
] = '4';
332 m_state
.context
.fpu
.no_avx
.__fpu_xmm5
.__xmm_reg
[i
] = '5';
333 m_state
.context
.fpu
.no_avx
.__fpu_xmm6
.__xmm_reg
[i
] = '6';
334 m_state
.context
.fpu
.no_avx
.__fpu_xmm7
.__xmm_reg
[i
] = '7';
335 m_state
.context
.fpu
.no_avx
.__fpu_xmm8
.__xmm_reg
[i
] = '8';
336 m_state
.context
.fpu
.no_avx
.__fpu_xmm9
.__xmm_reg
[i
] = '9';
337 m_state
.context
.fpu
.no_avx
.__fpu_xmm10
.__xmm_reg
[i
] = 'A';
338 m_state
.context
.fpu
.no_avx
.__fpu_xmm11
.__xmm_reg
[i
] = 'B';
339 m_state
.context
.fpu
.no_avx
.__fpu_xmm12
.__xmm_reg
[i
] = 'C';
340 m_state
.context
.fpu
.no_avx
.__fpu_xmm13
.__xmm_reg
[i
] = 'D';
341 m_state
.context
.fpu
.no_avx
.__fpu_xmm14
.__xmm_reg
[i
] = 'E';
342 m_state
.context
.fpu
.no_avx
.__fpu_xmm15
.__xmm_reg
[i
] = 'F';
344 for (int i
= 0; i
< sizeof(m_state
.context
.fpu
.no_avx
.__fpu_rsrv4
); ++i
)
345 m_state
.context
.fpu
.no_avx
.__fpu_rsrv4
[i
] = INT8_MIN
;
346 m_state
.context
.fpu
.no_avx
.__fpu_reserved1
= -1;
348 if (CPUHasAVX() || FORCE_AVX_REGS
) {
349 for (int i
= 0; i
< 16; ++i
) {
350 m_state
.context
.fpu
.avx
.__fpu_ymmh0
.__xmm_reg
[i
] = '0' + i
;
351 m_state
.context
.fpu
.avx
.__fpu_ymmh1
.__xmm_reg
[i
] = '1' + i
;
352 m_state
.context
.fpu
.avx
.__fpu_ymmh2
.__xmm_reg
[i
] = '2' + i
;
353 m_state
.context
.fpu
.avx
.__fpu_ymmh3
.__xmm_reg
[i
] = '3' + i
;
354 m_state
.context
.fpu
.avx
.__fpu_ymmh4
.__xmm_reg
[i
] = '4' + i
;
355 m_state
.context
.fpu
.avx
.__fpu_ymmh5
.__xmm_reg
[i
] = '5' + i
;
356 m_state
.context
.fpu
.avx
.__fpu_ymmh6
.__xmm_reg
[i
] = '6' + i
;
357 m_state
.context
.fpu
.avx
.__fpu_ymmh7
.__xmm_reg
[i
] = '7' + i
;
358 m_state
.context
.fpu
.avx
.__fpu_ymmh8
.__xmm_reg
[i
] = '8' + i
;
359 m_state
.context
.fpu
.avx
.__fpu_ymmh9
.__xmm_reg
[i
] = '9' + i
;
360 m_state
.context
.fpu
.avx
.__fpu_ymmh10
.__xmm_reg
[i
] = 'A' + i
;
361 m_state
.context
.fpu
.avx
.__fpu_ymmh11
.__xmm_reg
[i
] = 'B' + i
;
362 m_state
.context
.fpu
.avx
.__fpu_ymmh12
.__xmm_reg
[i
] = 'C' + i
;
363 m_state
.context
.fpu
.avx
.__fpu_ymmh13
.__xmm_reg
[i
] = 'D' + i
;
364 m_state
.context
.fpu
.avx
.__fpu_ymmh14
.__xmm_reg
[i
] = 'E' + i
;
365 m_state
.context
.fpu
.avx
.__fpu_ymmh15
.__xmm_reg
[i
] = 'F' + i
;
367 for (int i
= 0; i
< sizeof(m_state
.context
.fpu
.avx
.__avx_reserved1
); ++i
)
368 m_state
.context
.fpu
.avx
.__avx_reserved1
[i
] = INT8_MIN
;
370 if (CPUHasAVX512f() || FORCE_AVX_REGS
) {
371 for (int i
= 0; i
< 8; ++i
) {
372 m_state
.context
.fpu
.avx512f
.__fpu_k0
.__opmask_reg
[i
] = '0';
373 m_state
.context
.fpu
.avx512f
.__fpu_k1
.__opmask_reg
[i
] = '1';
374 m_state
.context
.fpu
.avx512f
.__fpu_k2
.__opmask_reg
[i
] = '2';
375 m_state
.context
.fpu
.avx512f
.__fpu_k3
.__opmask_reg
[i
] = '3';
376 m_state
.context
.fpu
.avx512f
.__fpu_k4
.__opmask_reg
[i
] = '4';
377 m_state
.context
.fpu
.avx512f
.__fpu_k5
.__opmask_reg
[i
] = '5';
378 m_state
.context
.fpu
.avx512f
.__fpu_k6
.__opmask_reg
[i
] = '6';
379 m_state
.context
.fpu
.avx512f
.__fpu_k7
.__opmask_reg
[i
] = '7';
382 for (int i
= 0; i
< 32; ++i
) {
383 m_state
.context
.fpu
.avx512f
.__fpu_zmmh0
.__ymm_reg
[i
] = '0';
384 m_state
.context
.fpu
.avx512f
.__fpu_zmmh1
.__ymm_reg
[i
] = '1';
385 m_state
.context
.fpu
.avx512f
.__fpu_zmmh2
.__ymm_reg
[i
] = '2';
386 m_state
.context
.fpu
.avx512f
.__fpu_zmmh3
.__ymm_reg
[i
] = '3';
387 m_state
.context
.fpu
.avx512f
.__fpu_zmmh4
.__ymm_reg
[i
] = '4';
388 m_state
.context
.fpu
.avx512f
.__fpu_zmmh5
.__ymm_reg
[i
] = '5';
389 m_state
.context
.fpu
.avx512f
.__fpu_zmmh6
.__ymm_reg
[i
] = '6';
390 m_state
.context
.fpu
.avx512f
.__fpu_zmmh7
.__ymm_reg
[i
] = '7';
391 m_state
.context
.fpu
.avx512f
.__fpu_zmmh8
.__ymm_reg
[i
] = '8';
392 m_state
.context
.fpu
.avx512f
.__fpu_zmmh9
.__ymm_reg
[i
] = '9';
393 m_state
.context
.fpu
.avx512f
.__fpu_zmmh10
.__ymm_reg
[i
] = 'A';
394 m_state
.context
.fpu
.avx512f
.__fpu_zmmh11
.__ymm_reg
[i
] = 'B';
395 m_state
.context
.fpu
.avx512f
.__fpu_zmmh12
.__ymm_reg
[i
] = 'C';
396 m_state
.context
.fpu
.avx512f
.__fpu_zmmh13
.__ymm_reg
[i
] = 'D';
397 m_state
.context
.fpu
.avx512f
.__fpu_zmmh14
.__ymm_reg
[i
] = 'E';
398 m_state
.context
.fpu
.avx512f
.__fpu_zmmh15
.__ymm_reg
[i
] = 'F';
400 for (int i
= 0; i
< 64; ++i
) {
401 m_state
.context
.fpu
.avx512f
.__fpu_zmm16
.__zmm_reg
[i
] = 'G';
402 m_state
.context
.fpu
.avx512f
.__fpu_zmm17
.__zmm_reg
[i
] = 'H';
403 m_state
.context
.fpu
.avx512f
.__fpu_zmm18
.__zmm_reg
[i
] = 'I';
404 m_state
.context
.fpu
.avx512f
.__fpu_zmm19
.__zmm_reg
[i
] = 'J';
405 m_state
.context
.fpu
.avx512f
.__fpu_zmm20
.__zmm_reg
[i
] = 'K';
406 m_state
.context
.fpu
.avx512f
.__fpu_zmm21
.__zmm_reg
[i
] = 'L';
407 m_state
.context
.fpu
.avx512f
.__fpu_zmm22
.__zmm_reg
[i
] = 'M';
408 m_state
.context
.fpu
.avx512f
.__fpu_zmm23
.__zmm_reg
[i
] = 'N';
409 m_state
.context
.fpu
.avx512f
.__fpu_zmm24
.__zmm_reg
[i
] = 'O';
410 m_state
.context
.fpu
.avx512f
.__fpu_zmm25
.__zmm_reg
[i
] = 'P';
411 m_state
.context
.fpu
.avx512f
.__fpu_zmm26
.__zmm_reg
[i
] = 'Q';
412 m_state
.context
.fpu
.avx512f
.__fpu_zmm27
.__zmm_reg
[i
] = 'R';
413 m_state
.context
.fpu
.avx512f
.__fpu_zmm28
.__zmm_reg
[i
] = 'S';
414 m_state
.context
.fpu
.avx512f
.__fpu_zmm29
.__zmm_reg
[i
] = 'T';
415 m_state
.context
.fpu
.avx512f
.__fpu_zmm30
.__zmm_reg
[i
] = 'U';
416 m_state
.context
.fpu
.avx512f
.__fpu_zmm31
.__zmm_reg
[i
] = 'V';
419 m_state
.SetError(e_regSetFPU
, Read
, 0);
421 mach_msg_type_number_t count
= e_regSetWordSizeFPU
;
422 int flavor
= __x86_64_FLOAT_STATE
;
423 // On a machine with the AVX512 register set, a process only gets a
424 // full AVX512 register context after it uses the AVX512 registers;
425 // if the process has not yet triggered this change, trying to fetch
426 // the AVX512 registers will fail. Fall through to fetching the AVX
428 if (CPUHasAVX512f() || FORCE_AVX_REGS
) {
429 count
= e_regSetWordSizeAVX512f
;
430 flavor
= __x86_64_AVX512F_STATE
;
431 m_state
.SetError(e_regSetFPU
, Read
,
432 ::thread_get_state(m_thread
->MachPortNumber(), flavor
,
433 (thread_state_t
)&m_state
.context
.fpu
,
435 DNBLogThreadedIf(LOG_THREAD
,
436 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x",
437 m_thread
->MachPortNumber(), flavor
, (uint32_t)count
,
438 m_state
.GetError(e_regSetFPU
, Read
));
440 if (m_state
.GetError(e_regSetFPU
, Read
) == KERN_SUCCESS
)
441 return m_state
.GetError(e_regSetFPU
, Read
);
443 DNBLogThreadedIf(LOG_THREAD
,
444 "::thread_get_state attempted fetch of avx512 fpu regctx failed, will try fetching avx");
446 if (CPUHasAVX() || FORCE_AVX_REGS
) {
447 count
= e_regSetWordSizeAVX
;
448 flavor
= __x86_64_AVX_STATE
;
450 m_state
.SetError(e_regSetFPU
, Read
,
451 ::thread_get_state(m_thread
->MachPortNumber(), flavor
,
452 (thread_state_t
)&m_state
.context
.fpu
,
454 DNBLogThreadedIf(LOG_THREAD
,
455 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x",
456 m_thread
->MachPortNumber(), flavor
, (uint32_t)count
,
457 m_state
.GetError(e_regSetFPU
, Read
));
460 return m_state
.GetError(e_regSetFPU
, Read
);
463 kern_return_t
DNBArchImplX86_64::GetEXCState(bool force
) {
464 if (force
|| m_state
.GetError(e_regSetEXC
, Read
)) {
465 mach_msg_type_number_t count
= e_regSetWordSizeEXC
;
468 ::thread_get_state(m_thread
->MachPortNumber(), __x86_64_EXCEPTION_STATE
,
469 (thread_state_t
)&m_state
.context
.exc
, &count
));
471 return m_state
.GetError(e_regSetEXC
, Read
);
474 kern_return_t
DNBArchImplX86_64::SetGPRState() {
475 kern_return_t kret
= ::thread_abort_safely(m_thread
->MachPortNumber());
477 LOG_THREAD
, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
478 "(SetGPRState() for stop_count = %u)",
479 m_thread
->MachPortNumber(), kret
, m_thread
->Process()->StopCount());
481 mach_msg_type_number_t count
=
482 m_state
.hasFullGPRState
? e_regSetWordSizeGPRFull
: e_regSetWordSizeGPR
;
483 int flavor
= m_state
.hasFullGPRState
? __x86_64_THREAD_FULL_STATE
484 : __x86_64_THREAD_STATE
;
485 m_state
.SetError(e_regSetGPR
, Write
,
486 ::thread_set_state(m_thread
->MachPortNumber(), flavor
,
487 (thread_state_t
)&m_state
.context
.gpr
,
491 "::thread_set_state (0x%4.4x, %u (%s), &gpr, %u) => 0x%8.8x"
492 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
493 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
494 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
495 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
496 "\n\trip = %16.16llx"
497 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx"
498 "\n\t ds = %16.16llx es = %16.16llx ss = %16.16llx gsB = %16.16llx",
499 m_thread
->MachPortNumber(), flavor
,
500 m_state
.hasFullGPRState
? "full" : "non-full", count
,
501 m_state
.GetError(e_regSetGPR
, Write
), m_state
.context
.gpr
.__rax
,
502 m_state
.context
.gpr
.__rbx
, m_state
.context
.gpr
.__rcx
,
503 m_state
.context
.gpr
.__rdx
, m_state
.context
.gpr
.__rdi
,
504 m_state
.context
.gpr
.__rsi
, m_state
.context
.gpr
.__rbp
,
505 m_state
.context
.gpr
.__rsp
, m_state
.context
.gpr
.__r8
,
506 m_state
.context
.gpr
.__r9
, m_state
.context
.gpr
.__r10
,
507 m_state
.context
.gpr
.__r11
, m_state
.context
.gpr
.__r12
,
508 m_state
.context
.gpr
.__r13
, m_state
.context
.gpr
.__r14
,
509 m_state
.context
.gpr
.__r15
, m_state
.context
.gpr
.__rip
,
510 m_state
.context
.gpr
.__rflags
, m_state
.context
.gpr
.__cs
,
511 m_state
.context
.gpr
.__fs
, m_state
.context
.gpr
.__gs
,
512 m_state
.context
.gpr
.__ds
, m_state
.context
.gpr
.__es
,
513 m_state
.context
.gpr
.__ss
, m_state
.context
.gpr
.__gsbase
);
514 return m_state
.GetError(e_regSetGPR
, Write
);
517 kern_return_t
DNBArchImplX86_64::SetFPUState() {
518 if (DEBUG_FPU_REGS
) {
519 m_state
.SetError(e_regSetFPU
, Write
, 0);
520 return m_state
.GetError(e_regSetFPU
, Write
);
522 int flavor
= __x86_64_FLOAT_STATE
;
523 mach_msg_type_number_t count
= e_regSetWordSizeFPU
;
524 if (CPUHasAVX512f() || FORCE_AVX_REGS
) {
525 count
= e_regSetWordSizeAVX512f
;
526 flavor
= __x86_64_AVX512F_STATE
;
529 ::thread_set_state(m_thread
->MachPortNumber(), flavor
,
530 (thread_state_t
)&m_state
.context
.fpu
, count
));
531 if (m_state
.GetError(e_regSetFPU
, Write
) == KERN_SUCCESS
)
532 return m_state
.GetError(e_regSetFPU
, Write
);
534 DNBLogThreadedIf(LOG_THREAD
,
535 "::thread_get_state attempted save of avx512 fpu regctx failed, will try saving avx regctx");
538 if (CPUHasAVX() || FORCE_AVX_REGS
) {
539 flavor
= __x86_64_AVX_STATE
;
540 count
= e_regSetWordSizeAVX
;
544 ::thread_set_state(m_thread
->MachPortNumber(), flavor
,
545 (thread_state_t
)&m_state
.context
.fpu
, count
));
546 return m_state
.GetError(e_regSetFPU
, Write
);
550 kern_return_t
DNBArchImplX86_64::SetEXCState() {
551 m_state
.SetError(e_regSetEXC
, Write
,
552 ::thread_set_state(m_thread
->MachPortNumber(),
553 __x86_64_EXCEPTION_STATE
,
554 (thread_state_t
)&m_state
.context
.exc
,
555 e_regSetWordSizeEXC
));
556 return m_state
.GetError(e_regSetEXC
, Write
);
559 kern_return_t
DNBArchImplX86_64::GetDBGState(bool force
) {
560 if (force
|| m_state
.GetError(e_regSetDBG
, Read
)) {
561 mach_msg_type_number_t count
= e_regSetWordSizeDBG
;
564 ::thread_get_state(m_thread
->MachPortNumber(), __x86_64_DEBUG_STATE
,
565 (thread_state_t
)&m_state
.context
.dbg
, &count
));
567 return m_state
.GetError(e_regSetDBG
, Read
);
570 kern_return_t
DNBArchImplX86_64::SetDBGState(bool also_set_on_task
) {
571 m_state
.SetError(e_regSetDBG
, Write
,
572 ::thread_set_state(m_thread
->MachPortNumber(),
573 __x86_64_DEBUG_STATE
,
574 (thread_state_t
)&m_state
.context
.dbg
,
575 e_regSetWordSizeDBG
));
576 if (also_set_on_task
) {
577 kern_return_t kret
= ::task_set_state(
578 m_thread
->Process()->Task().TaskPort(), __x86_64_DEBUG_STATE
,
579 (thread_state_t
)&m_state
.context
.dbg
, e_regSetWordSizeDBG
);
580 if (kret
!= KERN_SUCCESS
)
581 DNBLogThreadedIf(LOG_WATCHPOINTS
, "DNBArchImplX86_64::SetDBGState failed "
582 "to set debug control register state: "
586 return m_state
.GetError(e_regSetDBG
, Write
);
589 void DNBArchImplX86_64::ThreadWillResume() {
590 // Do we need to step this thread? If so, let the mach thread tell us so.
591 if (m_thread
->IsStepping()) {
592 // This is the primary thread, let the arch do anything it needs
593 EnableHardwareSingleStep(true);
596 // Reset the debug status register, if necessary, before we resume.
597 kern_return_t kret
= GetDBGState(false);
600 "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret
);
601 if (kret
!= KERN_SUCCESS
)
604 DBG
&debug_state
= m_state
.context
.dbg
;
605 bool need_reset
= false;
606 uint32_t i
, num
= NumSupportedHardwareWatchpoints();
607 for (i
= 0; i
< num
; ++i
)
608 if (IsWatchpointHit(debug_state
, i
))
612 ClearWatchpointHits(debug_state
);
613 kret
= SetDBGState(false);
616 "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.",
621 bool DNBArchImplX86_64::ThreadDidStop() {
624 m_state
.InvalidateAllRegisterStates();
626 // Are we stepping a single instruction?
627 if (GetGPRState(true) == KERN_SUCCESS
) {
628 // We are single stepping, was this the primary thread?
629 if (m_thread
->IsStepping()) {
630 // This was the primary thread, we need to clear the trace
632 success
= EnableHardwareSingleStep(false) == KERN_SUCCESS
;
634 // The MachThread will automatically restore the suspend count
635 // in ThreadDidStop(), so we don't need to do anything here if
636 // we weren't the primary thread the last time
642 bool DNBArchImplX86_64::NotifyException(MachException::Data
&exc
) {
643 switch (exc
.exc_type
) {
646 case EXC_BAD_INSTRUCTION
:
655 if (exc
.exc_data
.size() >= 2 && exc
.exc_data
[0] == 2) {
656 // exc_code = EXC_I386_BPT
658 nub_addr_t pc
= GetPC(INVALID_NUB_ADDRESS
);
659 if (pc
!= INVALID_NUB_ADDRESS
&& pc
> 0) {
661 // Check for a breakpoint at one byte prior to the current PC value
662 // since the PC will be just past the trap.
665 m_thread
->Process()->Breakpoints().FindByAddress(pc
);
667 // Backup the PC for i386 since the trap was taken and the PC
668 // is at the address following the single byte trap instruction.
669 if (m_state
.context
.gpr
.__rip
> 0) {
670 m_state
.context
.gpr
.__rip
= pc
;
671 // Write the new PC back out
677 } else if (exc
.exc_data
.size() >= 2 && exc
.exc_data
[0] == 1) {
678 // exc_code = EXC_I386_SGL
680 // Check whether this corresponds to a watchpoint hit event.
681 // If yes, set the exc_sub_code to the data break address.
683 uint32_t hw_index
= GetHardwareWatchpointHit(addr
);
684 if (hw_index
!= INVALID_NUB_HW_INDEX
) {
685 exc
.exc_data
[1] = addr
;
686 // Piggyback the hw_index in the exc.data.
687 exc
.exc_data
.push_back(hw_index
);
695 case EXC_MACH_SYSCALL
:
703 uint32_t DNBArchImplX86_64::NumSupportedHardwareWatchpoints() {
704 // Available debug address registers: dr0, dr1, dr2, dr3.
708 uint32_t DNBArchImplX86_64::NumSupportedHardwareBreakpoints() {
709 DNBLogThreadedIf(LOG_BREAKPOINTS
,
710 "DNBArchImplX86_64::NumSupportedHardwareBreakpoints");
714 static uint32_t size_and_rw_bits(nub_size_t size
, bool read
, bool write
) {
717 rw
= 0x3; // READ or READ/WRITE
721 assert(0 && "read and write cannot both be false");
728 return (0x1 << 2) | rw
;
730 return (0x3 << 2) | rw
;
732 return (0x2 << 2) | rw
;
734 assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
737 void DNBArchImplX86_64::SetWatchpoint(DBG
&debug_state
, uint32_t hw_index
,
738 nub_addr_t addr
, nub_size_t size
,
739 bool read
, bool write
) {
740 // Set both dr7 (debug control register) and dri (debug address register).
742 // dr7{7-0} encodes the local/gloabl enable bits:
743 // global enable --. .-- local enable
751 // dr7{31-16} encodes the rw/len bits:
752 // b_x+3, b_x+2, b_x+1, b_x
753 // where bits{x+1, x} => rw
754 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io
755 // read-or-write (unused)
756 // and bits{x+3, x+2} => len
757 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
759 // dr0 -> bits{19-16}
760 // dr1 -> bits{23-20}
761 // dr2 -> bits{27-24}
762 // dr3 -> bits{31-28}
764 (1 << (2 * hw_index
) |
765 size_and_rw_bits(size
, read
, write
) << (16 + 4 * hw_index
));
768 debug_state
.__dr0
= addr
;
771 debug_state
.__dr1
= addr
;
774 debug_state
.__dr2
= addr
;
777 debug_state
.__dr3
= addr
;
781 "invalid hardware register index, must be one of 0, 1, 2, or 3");
786 void DNBArchImplX86_64::ClearWatchpoint(DBG
&debug_state
, uint32_t hw_index
) {
787 debug_state
.__dr7
&= ~(3 << (2 * hw_index
));
790 debug_state
.__dr0
= 0;
793 debug_state
.__dr1
= 0;
796 debug_state
.__dr2
= 0;
799 debug_state
.__dr3
= 0;
803 "invalid hardware register index, must be one of 0, 1, 2, or 3");
808 bool DNBArchImplX86_64::IsWatchpointVacant(const DBG
&debug_state
,
810 // Check dr7 (debug control register) for local/global enable bits:
811 // global enable --. .-- local enable
818 return (debug_state
.__dr7
& (3 << (2 * hw_index
))) == 0;
821 // Resets local copy of debug status register to wait for the next debug
823 void DNBArchImplX86_64::ClearWatchpointHits(DBG
&debug_state
) {
824 // See also IsWatchpointHit().
825 debug_state
.__dr6
= 0;
829 bool DNBArchImplX86_64::IsWatchpointHit(const DBG
&debug_state
,
831 // Check dr6 (debug status register) whether a watchpoint hits:
832 // is watchpoint hit?
839 return (debug_state
.__dr6
& (1 << hw_index
));
842 nub_addr_t
DNBArchImplX86_64::GetWatchAddress(const DBG
&debug_state
,
846 return debug_state
.__dr0
;
848 return debug_state
.__dr1
;
850 return debug_state
.__dr2
;
852 return debug_state
.__dr3
;
854 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
858 bool DNBArchImplX86_64::StartTransForHWP() {
859 if (m_2pc_trans_state
!= Trans_Done
&& m_2pc_trans_state
!= Trans_Rolled_Back
)
860 DNBLogError("%s inconsistent state detected, expected %d or %d, got: %d",
861 __FUNCTION__
, Trans_Done
, Trans_Rolled_Back
, m_2pc_trans_state
);
862 m_2pc_dbg_checkpoint
= m_state
.context
.dbg
;
863 m_2pc_trans_state
= Trans_Pending
;
866 bool DNBArchImplX86_64::RollbackTransForHWP() {
867 m_state
.context
.dbg
= m_2pc_dbg_checkpoint
;
868 if (m_2pc_trans_state
!= Trans_Pending
)
869 DNBLogError("%s inconsistent state detected, expected %d, got: %d",
870 __FUNCTION__
, Trans_Pending
, m_2pc_trans_state
);
871 m_2pc_trans_state
= Trans_Rolled_Back
;
872 kern_return_t kret
= SetDBGState(false);
875 "DNBArchImplX86_64::RollbackTransForHWP() SetDBGState() => 0x%8.8x.",
878 return kret
== KERN_SUCCESS
;
880 bool DNBArchImplX86_64::FinishTransForHWP() {
881 m_2pc_trans_state
= Trans_Done
;
884 DNBArchImplX86_64::DBG
DNBArchImplX86_64::GetDBGCheckpoint() {
885 return m_2pc_dbg_checkpoint
;
888 void DNBArchImplX86_64::SetHardwareBreakpoint(DBG
&debug_state
,
892 // Set both dr7 (debug control register) and dri (debug address register).
894 // dr7{7-0} encodes the local/gloabl enable bits:
895 // global enable --. .-- local enable
903 // dr7{31-16} encodes the rw/len bits:
904 // b_x+3, b_x+2, b_x+1, b_x
905 // where bits{x+1, x} => rw
906 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io
907 // read-or-write (unused)
908 // and bits{x+3, x+2} => len
909 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
911 // dr0 -> bits{19-16}
912 // dr1 -> bits{23-20}
913 // dr2 -> bits{27-24}
914 // dr3 -> bits{31-28}
915 debug_state
.__dr7
|= (1 << (2 * hw_index
) | 0 << (16 + 4 * hw_index
));
919 debug_state
.__dr0
= addr
;
922 debug_state
.__dr1
= addr
;
925 debug_state
.__dr2
= addr
;
928 debug_state
.__dr3
= addr
;
932 "invalid hardware register index, must be one of 0, 1, 2, or 3");
937 uint32_t DNBArchImplX86_64::EnableHardwareBreakpoint(nub_addr_t addr
,
939 bool also_set_on_task
) {
940 DNBLogThreadedIf(LOG_BREAKPOINTS
,
941 "DNBArchImplX86_64::EnableHardwareBreakpoint( addr = "
942 "0x%8.8llx, size = %llu )",
943 (uint64_t)addr
, (uint64_t)size
);
945 const uint32_t num_hw_breakpoints
= NumSupportedHardwareBreakpoints();
946 // Read the debug state
947 kern_return_t kret
= GetDBGState(false);
949 if (kret
!= KERN_SUCCESS
) {
950 return INVALID_NUB_HW_INDEX
;
953 // Check to make sure we have the needed hardware support
956 DBG
&debug_state
= m_state
.context
.dbg
;
957 for (i
= 0; i
< num_hw_breakpoints
; ++i
) {
958 if (IsWatchpointVacant(debug_state
, i
)) {
963 // See if we found an available hw breakpoint slot above
964 if (i
< num_hw_breakpoints
) {
967 "DNBArchImplX86_64::EnableHardwareBreakpoint( free slot = %u )", i
);
971 // Modify our local copy of the debug state, first.
972 SetHardwareBreakpoint(debug_state
, i
, addr
, size
);
973 // Now set the watch point in the inferior.
974 kret
= SetDBGState(also_set_on_task
);
976 DNBLogThreadedIf(LOG_BREAKPOINTS
,
977 "DNBArchImplX86_64::"
978 "EnableHardwareBreakpoint() "
979 "SetDBGState() => 0x%8.8x.",
982 if (kret
== KERN_SUCCESS
) {
985 "DNBArchImplX86_64::EnableHardwareBreakpoint( enabled at slot = %u)",
989 // Revert to the previous debug state voluntarily. The transaction
990 // coordinator knows that we have failed.
992 m_state
.context
.dbg
= GetDBGCheckpoint();
995 DNBLogThreadedIf(LOG_BREAKPOINTS
,
996 "DNBArchImplX86_64::EnableHardwareBreakpoint(addr = "
997 "0x%8.8llx, size = %llu) => all hardware breakpoint "
998 "resources are being used.",
999 (uint64_t)addr
, (uint64_t)size
);
1002 return INVALID_NUB_HW_INDEX
;
1005 bool DNBArchImplX86_64::DisableHardwareBreakpoint(uint32_t hw_index
,
1006 bool also_set_on_task
) {
1007 kern_return_t kret
= GetDBGState(false);
1009 const uint32_t num_hw_points
= NumSupportedHardwareBreakpoints();
1010 if (kret
== KERN_SUCCESS
) {
1011 DBG
&debug_state
= m_state
.context
.dbg
;
1012 if (hw_index
< num_hw_points
&&
1013 !IsWatchpointVacant(debug_state
, hw_index
)) {
1017 // Modify our local copy of the debug state, first.
1018 ClearWatchpoint(debug_state
, hw_index
);
1019 // Now disable the watch point in the inferior.
1020 kret
= SetDBGState(true);
1021 DNBLogThreadedIf(LOG_WATCHPOINTS
,
1022 "DNBArchImplX86_64::DisableHardwareBreakpoint( %u )",
1025 if (kret
== KERN_SUCCESS
)
1027 else // Revert to the previous debug state voluntarily. The transaction
1028 // coordinator knows that we have failed.
1029 m_state
.context
.dbg
= GetDBGCheckpoint();
1035 uint32_t DNBArchImplX86_64::EnableHardwareWatchpoint(nub_addr_t addr
,
1036 nub_size_t size
, bool read
,
1038 bool also_set_on_task
) {
1039 DNBLogThreadedIf(LOG_WATCHPOINTS
, "DNBArchImplX86_64::"
1040 "EnableHardwareWatchpoint(addr = 0x%llx, "
1041 "size = %llu, read = %u, write = %u)",
1042 (uint64_t)addr
, (uint64_t)size
, read
, write
);
1044 const uint32_t num_hw_watchpoints
= NumSupportedHardwareWatchpoints();
1046 // Can only watch 1, 2, 4, or 8 bytes.
1047 if (!(size
== 1 || size
== 2 || size
== 4 || size
== 8))
1048 return INVALID_NUB_HW_INDEX
;
1050 // We must watch for either read or write
1051 if (!read
&& !write
)
1052 return INVALID_NUB_HW_INDEX
;
1054 // Read the debug state
1055 kern_return_t kret
= GetDBGState(false);
1057 if (kret
== KERN_SUCCESS
) {
1058 // Check to make sure we have the needed hardware support
1061 DBG
&debug_state
= m_state
.context
.dbg
;
1062 for (i
= 0; i
< num_hw_watchpoints
; ++i
) {
1063 if (IsWatchpointVacant(debug_state
, i
))
1067 // See if we found an available hw breakpoint slot above
1068 if (i
< num_hw_watchpoints
) {
1071 // Modify our local copy of the debug state, first.
1072 SetWatchpoint(debug_state
, i
, addr
, size
, read
, write
);
1073 // Now set the watch point in the inferior.
1074 kret
= SetDBGState(also_set_on_task
);
1075 DNBLogThreadedIf(LOG_WATCHPOINTS
, "DNBArchImplX86_64::"
1076 "EnableHardwareWatchpoint() "
1077 "SetDBGState() => 0x%8.8x.",
1080 if (kret
== KERN_SUCCESS
)
1082 else // Revert to the previous debug state voluntarily. The transaction
1083 // coordinator knows that we have failed.
1084 m_state
.context
.dbg
= GetDBGCheckpoint();
1086 DNBLogThreadedIf(LOG_WATCHPOINTS
, "DNBArchImplX86_64::"
1087 "EnableHardwareWatchpoint(): All "
1088 "hardware resources (%u) are in use.",
1089 num_hw_watchpoints
);
1092 return INVALID_NUB_HW_INDEX
;
1095 bool DNBArchImplX86_64::DisableHardwareWatchpoint(uint32_t hw_index
,
1096 bool also_set_on_task
) {
1097 kern_return_t kret
= GetDBGState(false);
1099 const uint32_t num_hw_points
= NumSupportedHardwareWatchpoints();
1100 if (kret
== KERN_SUCCESS
) {
1101 DBG
&debug_state
= m_state
.context
.dbg
;
1102 if (hw_index
< num_hw_points
&&
1103 !IsWatchpointVacant(debug_state
, hw_index
)) {
1106 // Modify our local copy of the debug state, first.
1107 ClearWatchpoint(debug_state
, hw_index
);
1108 // Now disable the watch point in the inferior.
1109 kret
= SetDBGState(also_set_on_task
);
1110 DNBLogThreadedIf(LOG_WATCHPOINTS
,
1111 "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
1114 if (kret
== KERN_SUCCESS
)
1116 else // Revert to the previous debug state voluntarily. The transaction
1117 // coordinator knows that we have failed.
1118 m_state
.context
.dbg
= GetDBGCheckpoint();
1124 // Iterate through the debug status register; return the index of the first hit.
1125 uint32_t DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t
&addr
) {
1126 // Read the debug state
1127 kern_return_t kret
= GetDBGState(true);
1130 "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.",
1132 if (kret
== KERN_SUCCESS
) {
1133 DBG
&debug_state
= m_state
.context
.dbg
;
1134 uint32_t i
, num
= NumSupportedHardwareWatchpoints();
1135 for (i
= 0; i
< num
; ++i
) {
1136 if (IsWatchpointHit(debug_state
, i
)) {
1137 addr
= GetWatchAddress(debug_state
, i
);
1138 DNBLogThreadedIf(LOG_WATCHPOINTS
, "DNBArchImplX86_64::"
1139 "GetHardwareWatchpointHit() found => "
1140 "%u (addr = 0x%llx).",
1146 return INVALID_NUB_HW_INDEX
;
1149 // Set the single step bit in the processor status register.
1150 kern_return_t
DNBArchImplX86_64::EnableHardwareSingleStep(bool enable
) {
1151 if (GetGPRState(false) == KERN_SUCCESS
) {
1152 const uint32_t trace_bit
= 0x100u
;
1154 m_state
.context
.gpr
.__rflags
|= trace_bit
;
1156 m_state
.context
.gpr
.__rflags
&= ~trace_bit
;
1157 return SetGPRState();
1159 return m_state
.GetError(e_regSetGPR
, Read
);
1162 // Register information definitions
1198 gpr_r8d
, // Low 32 bits or r8
1199 gpr_r9d
, // Low 32 bits or r9
1200 gpr_r10d
, // Low 32 bits or r10
1201 gpr_r11d
, // Low 32 bits or r11
1202 gpr_r12d
, // Low 32 bits or r12
1203 gpr_r13d
, // Low 32 bits or r13
1204 gpr_r14d
, // Low 32 bits or r14
1205 gpr_r15d
, // Low 32 bits or r15
1214 gpr_r8w
, // Low 16 bits or r8
1215 gpr_r9w
, // Low 16 bits or r9
1216 gpr_r10w
, // Low 16 bits or r10
1217 gpr_r11w
, // Low 16 bits or r11
1218 gpr_r12w
, // Low 16 bits or r12
1219 gpr_r13w
, // Low 16 bits or r13
1220 gpr_r14w
, // Low 16 bits or r14
1221 gpr_r15w
, // Low 16 bits or r15
1234 gpr_r8l
, // Low 8 bits or r8
1235 gpr_r9l
, // Low 8 bits or r9
1236 gpr_r10l
, // Low 8 bits or r10
1237 gpr_r11l
, // Low 8 bits or r11
1238 gpr_r12l
, // Low 8 bits or r12
1239 gpr_r13l
, // Low 8 bits or r13
1240 gpr_r14l
, // Low 8 bits or r14
1241 gpr_r15l
, // Low 8 bits or r15
1339 fpu_fctrl
= fpu_fcw
,
1340 fpu_fstat
= fpu_fsw
,
1355 enum ehframe_dwarf_regnums
{
1356 ehframe_dwarf_rax
= 0,
1357 ehframe_dwarf_rdx
= 1,
1358 ehframe_dwarf_rcx
= 2,
1359 ehframe_dwarf_rbx
= 3,
1360 ehframe_dwarf_rsi
= 4,
1361 ehframe_dwarf_rdi
= 5,
1362 ehframe_dwarf_rbp
= 6,
1363 ehframe_dwarf_rsp
= 7,
1383 ehframe_dwarf_xmm10
,
1384 ehframe_dwarf_xmm11
,
1385 ehframe_dwarf_xmm12
,
1386 ehframe_dwarf_xmm13
,
1387 ehframe_dwarf_xmm14
,
1388 ehframe_dwarf_xmm15
,
1389 ehframe_dwarf_stmm0
,
1390 ehframe_dwarf_stmm1
,
1391 ehframe_dwarf_stmm2
,
1392 ehframe_dwarf_stmm3
,
1393 ehframe_dwarf_stmm4
,
1394 ehframe_dwarf_stmm5
,
1395 ehframe_dwarf_stmm6
,
1396 ehframe_dwarf_stmm7
,
1397 ehframe_dwarf_ymm0
= ehframe_dwarf_xmm0
,
1398 ehframe_dwarf_ymm1
= ehframe_dwarf_xmm1
,
1399 ehframe_dwarf_ymm2
= ehframe_dwarf_xmm2
,
1400 ehframe_dwarf_ymm3
= ehframe_dwarf_xmm3
,
1401 ehframe_dwarf_ymm4
= ehframe_dwarf_xmm4
,
1402 ehframe_dwarf_ymm5
= ehframe_dwarf_xmm5
,
1403 ehframe_dwarf_ymm6
= ehframe_dwarf_xmm6
,
1404 ehframe_dwarf_ymm7
= ehframe_dwarf_xmm7
,
1405 ehframe_dwarf_ymm8
= ehframe_dwarf_xmm8
,
1406 ehframe_dwarf_ymm9
= ehframe_dwarf_xmm9
,
1407 ehframe_dwarf_ymm10
= ehframe_dwarf_xmm10
,
1408 ehframe_dwarf_ymm11
= ehframe_dwarf_xmm11
,
1409 ehframe_dwarf_ymm12
= ehframe_dwarf_xmm12
,
1410 ehframe_dwarf_ymm13
= ehframe_dwarf_xmm13
,
1411 ehframe_dwarf_ymm14
= ehframe_dwarf_xmm14
,
1412 ehframe_dwarf_ymm15
= ehframe_dwarf_xmm15
,
1413 ehframe_dwarf_zmm0
= ehframe_dwarf_xmm0
,
1414 ehframe_dwarf_zmm1
= ehframe_dwarf_xmm1
,
1415 ehframe_dwarf_zmm2
= ehframe_dwarf_xmm2
,
1416 ehframe_dwarf_zmm3
= ehframe_dwarf_xmm3
,
1417 ehframe_dwarf_zmm4
= ehframe_dwarf_xmm4
,
1418 ehframe_dwarf_zmm5
= ehframe_dwarf_xmm5
,
1419 ehframe_dwarf_zmm6
= ehframe_dwarf_xmm6
,
1420 ehframe_dwarf_zmm7
= ehframe_dwarf_xmm7
,
1421 ehframe_dwarf_zmm8
= ehframe_dwarf_xmm8
,
1422 ehframe_dwarf_zmm9
= ehframe_dwarf_xmm9
,
1423 ehframe_dwarf_zmm10
= ehframe_dwarf_xmm10
,
1424 ehframe_dwarf_zmm11
= ehframe_dwarf_xmm11
,
1425 ehframe_dwarf_zmm12
= ehframe_dwarf_xmm12
,
1426 ehframe_dwarf_zmm13
= ehframe_dwarf_xmm13
,
1427 ehframe_dwarf_zmm14
= ehframe_dwarf_xmm14
,
1428 ehframe_dwarf_zmm15
= ehframe_dwarf_xmm15
,
1429 ehframe_dwarf_zmm16
= 67,
1430 ehframe_dwarf_zmm17
,
1431 ehframe_dwarf_zmm18
,
1432 ehframe_dwarf_zmm19
,
1433 ehframe_dwarf_zmm20
,
1434 ehframe_dwarf_zmm21
,
1435 ehframe_dwarf_zmm22
,
1436 ehframe_dwarf_zmm23
,
1437 ehframe_dwarf_zmm24
,
1438 ehframe_dwarf_zmm25
,
1439 ehframe_dwarf_zmm26
,
1440 ehframe_dwarf_zmm27
,
1441 ehframe_dwarf_zmm28
,
1442 ehframe_dwarf_zmm29
,
1443 ehframe_dwarf_zmm30
,
1444 ehframe_dwarf_zmm31
,
1445 ehframe_dwarf_k0
= 118,
1455 enum debugserver_regnums
{
1456 debugserver_rax
= 0,
1457 debugserver_rbx
= 1,
1458 debugserver_rcx
= 2,
1459 debugserver_rdx
= 3,
1460 debugserver_rsi
= 4,
1461 debugserver_rdi
= 5,
1462 debugserver_rbp
= 6,
1463 debugserver_rsp
= 7,
1466 debugserver_r10
= 10,
1467 debugserver_r11
= 11,
1468 debugserver_r12
= 12,
1469 debugserver_r13
= 13,
1470 debugserver_r14
= 14,
1471 debugserver_r15
= 15,
1472 debugserver_rip
= 16,
1473 debugserver_rflags
= 17,
1474 debugserver_cs
= 18,
1475 debugserver_ss
= 19,
1476 debugserver_ds
= 20,
1477 debugserver_es
= 21,
1478 debugserver_fs
= 22,
1479 debugserver_gs
= 23,
1480 debugserver_stmm0
= 24,
1481 debugserver_stmm1
= 25,
1482 debugserver_stmm2
= 26,
1483 debugserver_stmm3
= 27,
1484 debugserver_stmm4
= 28,
1485 debugserver_stmm5
= 29,
1486 debugserver_stmm6
= 30,
1487 debugserver_stmm7
= 31,
1488 debugserver_fctrl
= 32,
1489 debugserver_fcw
= debugserver_fctrl
,
1490 debugserver_fstat
= 33,
1491 debugserver_fsw
= debugserver_fstat
,
1492 debugserver_ftag
= 34,
1493 debugserver_ftw
= debugserver_ftag
,
1494 debugserver_fiseg
= 35,
1495 debugserver_fpu_cs
= debugserver_fiseg
,
1496 debugserver_fioff
= 36,
1497 debugserver_ip
= debugserver_fioff
,
1498 debugserver_foseg
= 37,
1499 debugserver_fpu_ds
= debugserver_foseg
,
1500 debugserver_fooff
= 38,
1501 debugserver_dp
= debugserver_fooff
,
1502 debugserver_fop
= 39,
1503 debugserver_xmm0
= 40,
1504 debugserver_xmm1
= 41,
1505 debugserver_xmm2
= 42,
1506 debugserver_xmm3
= 43,
1507 debugserver_xmm4
= 44,
1508 debugserver_xmm5
= 45,
1509 debugserver_xmm6
= 46,
1510 debugserver_xmm7
= 47,
1511 debugserver_xmm8
= 48,
1512 debugserver_xmm9
= 49,
1513 debugserver_xmm10
= 50,
1514 debugserver_xmm11
= 51,
1515 debugserver_xmm12
= 52,
1516 debugserver_xmm13
= 53,
1517 debugserver_xmm14
= 54,
1518 debugserver_xmm15
= 55,
1519 debugserver_mxcsr
= 56,
1520 debugserver_ymm0
= debugserver_xmm0
,
1521 debugserver_ymm1
= debugserver_xmm1
,
1522 debugserver_ymm2
= debugserver_xmm2
,
1523 debugserver_ymm3
= debugserver_xmm3
,
1524 debugserver_ymm4
= debugserver_xmm4
,
1525 debugserver_ymm5
= debugserver_xmm5
,
1526 debugserver_ymm6
= debugserver_xmm6
,
1527 debugserver_ymm7
= debugserver_xmm7
,
1528 debugserver_ymm8
= debugserver_xmm8
,
1529 debugserver_ymm9
= debugserver_xmm9
,
1530 debugserver_ymm10
= debugserver_xmm10
,
1531 debugserver_ymm11
= debugserver_xmm11
,
1532 debugserver_ymm12
= debugserver_xmm12
,
1533 debugserver_ymm13
= debugserver_xmm13
,
1534 debugserver_ymm14
= debugserver_xmm14
,
1535 debugserver_ymm15
= debugserver_xmm15
,
1536 debugserver_zmm0
= debugserver_xmm0
,
1537 debugserver_zmm1
= debugserver_xmm1
,
1538 debugserver_zmm2
= debugserver_xmm2
,
1539 debugserver_zmm3
= debugserver_xmm3
,
1540 debugserver_zmm4
= debugserver_xmm4
,
1541 debugserver_zmm5
= debugserver_xmm5
,
1542 debugserver_zmm6
= debugserver_xmm6
,
1543 debugserver_zmm7
= debugserver_xmm7
,
1544 debugserver_zmm8
= debugserver_xmm8
,
1545 debugserver_zmm9
= debugserver_xmm9
,
1546 debugserver_zmm10
= debugserver_xmm10
,
1547 debugserver_zmm11
= debugserver_xmm11
,
1548 debugserver_zmm12
= debugserver_xmm12
,
1549 debugserver_zmm13
= debugserver_xmm13
,
1550 debugserver_zmm14
= debugserver_xmm14
,
1551 debugserver_zmm15
= debugserver_xmm15
,
1552 debugserver_zmm16
= 67,
1553 debugserver_zmm17
= 68,
1554 debugserver_zmm18
= 69,
1555 debugserver_zmm19
= 70,
1556 debugserver_zmm20
= 71,
1557 debugserver_zmm21
= 72,
1558 debugserver_zmm22
= 73,
1559 debugserver_zmm23
= 74,
1560 debugserver_zmm24
= 75,
1561 debugserver_zmm25
= 76,
1562 debugserver_zmm26
= 77,
1563 debugserver_zmm27
= 78,
1564 debugserver_zmm28
= 79,
1565 debugserver_zmm29
= 80,
1566 debugserver_zmm30
= 81,
1567 debugserver_zmm31
= 82,
1568 debugserver_k0
= 118,
1569 debugserver_k1
= 119,
1570 debugserver_k2
= 120,
1571 debugserver_k3
= 121,
1572 debugserver_k4
= 122,
1573 debugserver_k5
= 123,
1574 debugserver_k6
= 124,
1575 debugserver_k7
= 125,
1576 debugserver_gsbase
= 126,
1579 #define GPR_OFFSET(reg) (offsetof(DNBArchImplX86_64::GPR, __##reg))
1580 #define FPU_OFFSET(reg) \
1581 (offsetof(DNBArchImplX86_64::FPU, __fpu_##reg) + \
1582 offsetof(DNBArchImplX86_64::Context, fpu.no_avx))
1583 #define AVX_OFFSET(reg) \
1584 (offsetof(DNBArchImplX86_64::AVX, __fpu_##reg) + \
1585 offsetof(DNBArchImplX86_64::Context, fpu.avx))
1586 #define AVX512F_OFFSET(reg) \
1587 (offsetof(DNBArchImplX86_64::AVX512F, __fpu_##reg) + \
1588 offsetof(DNBArchImplX86_64::Context, fpu.avx512f))
1589 #define EXC_OFFSET(reg) \
1590 (offsetof(DNBArchImplX86_64::EXC, __##reg) + \
1591 offsetof(DNBArchImplX86_64::Context, exc))
1592 #define AVX_OFFSET_YMM(n) (AVX_OFFSET(ymmh0) + (32 * n))
1593 #define AVX512F_OFFSET_ZMM(n) (AVX512F_OFFSET(zmmh0) + (64 * n))
1595 #define GPR_SIZE(reg) (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1596 #define FPU_SIZE_UINT(reg) \
1597 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1598 #define FPU_SIZE_MMST(reg) \
1599 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1600 #define FPU_SIZE_XMM(reg) \
1601 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1602 #define FPU_SIZE_YMM(reg) (32)
1603 #define FPU_SIZE_ZMM(reg) (64)
1604 #define EXC_SIZE(reg) (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1606 // These macros will auto define the register name, alt name, register size,
1607 // register offset, encoding, format and native register. This ensures that
1608 // the register state structures are defined correctly and have the correct
1609 // sizes and offsets.
1610 #define DEFINE_GPR(reg) \
1612 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), \
1613 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, \
1614 INVALID_NUB_REGNUM, debugserver_##reg, NULL, g_invalidate_##reg \
1616 #define DEFINE_GPR_ALT(reg, alt, gen) \
1618 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1619 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, \
1620 debugserver_##reg, NULL, g_invalidate_##reg \
1622 #define DEFINE_GPR_ALT2(reg, alt) \
1624 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1625 GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1626 INVALID_NUB_REGNUM, debugserver_##reg, NULL, NULL \
1628 #define DEFINE_GPR_ALT3(reg, alt, gen) \
1630 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1631 GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, \
1632 debugserver_##reg, NULL, NULL \
1634 #define DEFINE_GPR_ALT4(reg, alt, gen) \
1636 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1637 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, \
1638 debugserver_##reg, NULL, NULL \
1641 #define DEFINE_GPR_PSEUDO_32(reg32, reg64) \
1643 e_regSetGPR, gpr_##reg32, #reg32, NULL, Uint, Hex, 4, 0, \
1644 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1645 INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 \
1647 #define DEFINE_GPR_PSEUDO_16(reg16, reg64) \
1649 e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0, \
1650 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1651 INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 \
1653 #define DEFINE_GPR_PSEUDO_8H(reg8, reg64) \
1655 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 1, INVALID_NUB_REGNUM, \
1656 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1657 g_contained_##reg64, g_invalidate_##reg64 \
1659 #define DEFINE_GPR_PSEUDO_8L(reg8, reg64) \
1661 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 0, INVALID_NUB_REGNUM, \
1662 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1663 g_contained_##reg64, g_invalidate_##reg64 \
1666 // General purpose registers for 64 bit
1668 const char *g_contained_rax
[] = {"rax", NULL
};
1669 const char *g_contained_rbx
[] = {"rbx", NULL
};
1670 const char *g_contained_rcx
[] = {"rcx", NULL
};
1671 const char *g_contained_rdx
[] = {"rdx", NULL
};
1672 const char *g_contained_rdi
[] = {"rdi", NULL
};
1673 const char *g_contained_rsi
[] = {"rsi", NULL
};
1674 const char *g_contained_rbp
[] = {"rbp", NULL
};
1675 const char *g_contained_rsp
[] = {"rsp", NULL
};
1676 const char *g_contained_r8
[] = {"r8", NULL
};
1677 const char *g_contained_r9
[] = {"r9", NULL
};
1678 const char *g_contained_r10
[] = {"r10", NULL
};
1679 const char *g_contained_r11
[] = {"r11", NULL
};
1680 const char *g_contained_r12
[] = {"r12", NULL
};
1681 const char *g_contained_r13
[] = {"r13", NULL
};
1682 const char *g_contained_r14
[] = {"r14", NULL
};
1683 const char *g_contained_r15
[] = {"r15", NULL
};
1685 const char *g_invalidate_rax
[] = {"rax", "eax", "ax", "ah", "al", NULL
};
1686 const char *g_invalidate_rbx
[] = {"rbx", "ebx", "bx", "bh", "bl", NULL
};
1687 const char *g_invalidate_rcx
[] = {"rcx", "ecx", "cx", "ch", "cl", NULL
};
1688 const char *g_invalidate_rdx
[] = {"rdx", "edx", "dx", "dh", "dl", NULL
};
1689 const char *g_invalidate_rdi
[] = {"rdi", "edi", "di", "dil", NULL
};
1690 const char *g_invalidate_rsi
[] = {"rsi", "esi", "si", "sil", NULL
};
1691 const char *g_invalidate_rbp
[] = {"rbp", "ebp", "bp", "bpl", NULL
};
1692 const char *g_invalidate_rsp
[] = {"rsp", "esp", "sp", "spl", NULL
};
1693 const char *g_invalidate_r8
[] = {"r8", "r8d", "r8w", "r8l", NULL
};
1694 const char *g_invalidate_r9
[] = {"r9", "r9d", "r9w", "r9l", NULL
};
1695 const char *g_invalidate_r10
[] = {"r10", "r10d", "r10w", "r10l", NULL
};
1696 const char *g_invalidate_r11
[] = {"r11", "r11d", "r11w", "r11l", NULL
};
1697 const char *g_invalidate_r12
[] = {"r12", "r12d", "r12w", "r12l", NULL
};
1698 const char *g_invalidate_r13
[] = {"r13", "r13d", "r13w", "r13l", NULL
};
1699 const char *g_invalidate_r14
[] = {"r14", "r14d", "r14w", "r14l", NULL
};
1700 const char *g_invalidate_r15
[] = {"r15", "r15d", "r15w", "r15l", NULL
};
1702 const DNBRegisterInfo
DNBArchImplX86_64::g_gpr_registers
[] = {
1705 DEFINE_GPR_ALT(rcx
, "arg4", GENERIC_REGNUM_ARG4
),
1706 DEFINE_GPR_ALT(rdx
, "arg3", GENERIC_REGNUM_ARG3
),
1707 DEFINE_GPR_ALT(rdi
, "arg1", GENERIC_REGNUM_ARG1
),
1708 DEFINE_GPR_ALT(rsi
, "arg2", GENERIC_REGNUM_ARG2
),
1709 DEFINE_GPR_ALT(rbp
, "fp", GENERIC_REGNUM_FP
),
1710 DEFINE_GPR_ALT(rsp
, "sp", GENERIC_REGNUM_SP
),
1711 DEFINE_GPR_ALT(r8
, "arg5", GENERIC_REGNUM_ARG5
),
1712 DEFINE_GPR_ALT(r9
, "arg6", GENERIC_REGNUM_ARG6
),
1719 DEFINE_GPR_ALT4(rip
, "pc", GENERIC_REGNUM_PC
),
1720 DEFINE_GPR_ALT3(rflags
, "flags", GENERIC_REGNUM_FLAGS
),
1721 DEFINE_GPR_ALT2(cs
, NULL
),
1722 DEFINE_GPR_ALT2(fs
, NULL
),
1723 DEFINE_GPR_ALT2(gs
, NULL
),
1724 DEFINE_GPR_ALT2(ds
, NULL
),
1725 DEFINE_GPR_ALT2(es
, NULL
),
1726 DEFINE_GPR_ALT2(ss
, NULL
),
1727 DEFINE_GPR_ALT2(gsbase
, NULL
),
1728 DEFINE_GPR_PSEUDO_32(eax
, rax
),
1729 DEFINE_GPR_PSEUDO_32(ebx
, rbx
),
1730 DEFINE_GPR_PSEUDO_32(ecx
, rcx
),
1731 DEFINE_GPR_PSEUDO_32(edx
, rdx
),
1732 DEFINE_GPR_PSEUDO_32(edi
, rdi
),
1733 DEFINE_GPR_PSEUDO_32(esi
, rsi
),
1734 DEFINE_GPR_PSEUDO_32(ebp
, rbp
),
1735 DEFINE_GPR_PSEUDO_32(esp
, rsp
),
1736 DEFINE_GPR_PSEUDO_32(r8d
, r8
),
1737 DEFINE_GPR_PSEUDO_32(r9d
, r9
),
1738 DEFINE_GPR_PSEUDO_32(r10d
, r10
),
1739 DEFINE_GPR_PSEUDO_32(r11d
, r11
),
1740 DEFINE_GPR_PSEUDO_32(r12d
, r12
),
1741 DEFINE_GPR_PSEUDO_32(r13d
, r13
),
1742 DEFINE_GPR_PSEUDO_32(r14d
, r14
),
1743 DEFINE_GPR_PSEUDO_32(r15d
, r15
),
1744 DEFINE_GPR_PSEUDO_16(ax
, rax
),
1745 DEFINE_GPR_PSEUDO_16(bx
, rbx
),
1746 DEFINE_GPR_PSEUDO_16(cx
, rcx
),
1747 DEFINE_GPR_PSEUDO_16(dx
, rdx
),
1748 DEFINE_GPR_PSEUDO_16(di
, rdi
),
1749 DEFINE_GPR_PSEUDO_16(si
, rsi
),
1750 DEFINE_GPR_PSEUDO_16(bp
, rbp
),
1751 DEFINE_GPR_PSEUDO_16(sp
, rsp
),
1752 DEFINE_GPR_PSEUDO_16(r8w
, r8
),
1753 DEFINE_GPR_PSEUDO_16(r9w
, r9
),
1754 DEFINE_GPR_PSEUDO_16(r10w
, r10
),
1755 DEFINE_GPR_PSEUDO_16(r11w
, r11
),
1756 DEFINE_GPR_PSEUDO_16(r12w
, r12
),
1757 DEFINE_GPR_PSEUDO_16(r13w
, r13
),
1758 DEFINE_GPR_PSEUDO_16(r14w
, r14
),
1759 DEFINE_GPR_PSEUDO_16(r15w
, r15
),
1760 DEFINE_GPR_PSEUDO_8H(ah
, rax
),
1761 DEFINE_GPR_PSEUDO_8H(bh
, rbx
),
1762 DEFINE_GPR_PSEUDO_8H(ch
, rcx
),
1763 DEFINE_GPR_PSEUDO_8H(dh
, rdx
),
1764 DEFINE_GPR_PSEUDO_8L(al
, rax
),
1765 DEFINE_GPR_PSEUDO_8L(bl
, rbx
),
1766 DEFINE_GPR_PSEUDO_8L(cl
, rcx
),
1767 DEFINE_GPR_PSEUDO_8L(dl
, rdx
),
1768 DEFINE_GPR_PSEUDO_8L(dil
, rdi
),
1769 DEFINE_GPR_PSEUDO_8L(sil
, rsi
),
1770 DEFINE_GPR_PSEUDO_8L(bpl
, rbp
),
1771 DEFINE_GPR_PSEUDO_8L(spl
, rsp
),
1772 DEFINE_GPR_PSEUDO_8L(r8l
, r8
),
1773 DEFINE_GPR_PSEUDO_8L(r9l
, r9
),
1774 DEFINE_GPR_PSEUDO_8L(r10l
, r10
),
1775 DEFINE_GPR_PSEUDO_8L(r11l
, r11
),
1776 DEFINE_GPR_PSEUDO_8L(r12l
, r12
),
1777 DEFINE_GPR_PSEUDO_8L(r13l
, r13
),
1778 DEFINE_GPR_PSEUDO_8L(r14l
, r14
),
1779 DEFINE_GPR_PSEUDO_8L(r15l
, r15
)};
1781 // Floating point registers 64 bit
1782 const DNBRegisterInfo
DNBArchImplX86_64::g_fpu_registers_no_avx
[] = {
1783 {e_regSetFPU
, fpu_fcw
, "fctrl", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fcw
),
1784 FPU_OFFSET(fcw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1785 {e_regSetFPU
, fpu_fsw
, "fstat", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fsw
),
1786 FPU_OFFSET(fsw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1787 {e_regSetFPU
, fpu_ftw
, "ftag", NULL
, Uint
, Hex
, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */,
1788 FPU_OFFSET(ftw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1789 {e_regSetFPU
, fpu_fop
, "fop", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fop
),
1790 FPU_OFFSET(fop
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1791 {e_regSetFPU
, fpu_ip
, "fioff", NULL
, Uint
, Hex
, FPU_SIZE_UINT(ip
),
1792 FPU_OFFSET(ip
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1793 {e_regSetFPU
, fpu_cs
, "fiseg", NULL
, Uint
, Hex
, FPU_SIZE_UINT(cs
),
1794 FPU_OFFSET(cs
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1795 {e_regSetFPU
, fpu_dp
, "fooff", NULL
, Uint
, Hex
, FPU_SIZE_UINT(dp
),
1796 FPU_OFFSET(dp
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1797 {e_regSetFPU
, fpu_ds
, "foseg", NULL
, Uint
, Hex
, FPU_SIZE_UINT(ds
),
1798 FPU_OFFSET(ds
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1799 {e_regSetFPU
, fpu_mxcsr
, "mxcsr", NULL
, Uint
, Hex
, FPU_SIZE_UINT(mxcsr
),
1800 FPU_OFFSET(mxcsr
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1801 {e_regSetFPU
, fpu_mxcsrmask
, "mxcsrmask", NULL
, Uint
, Hex
,
1802 FPU_SIZE_UINT(mxcsrmask
), FPU_OFFSET(mxcsrmask
), -1U, -1U, -1U, -1U, NULL
,
1805 {e_regSetFPU
, fpu_stmm0
, "stmm0", "st0", Vector
, VectorOfUInt8
,
1806 FPU_SIZE_MMST(stmm0
), FPU_OFFSET(stmm0
), ehframe_dwarf_stmm0
,
1807 ehframe_dwarf_stmm0
, -1U, debugserver_stmm0
, NULL
, NULL
},
1808 {e_regSetFPU
, fpu_stmm1
, "stmm1", "st1", Vector
, VectorOfUInt8
,
1809 FPU_SIZE_MMST(stmm1
), FPU_OFFSET(stmm1
), ehframe_dwarf_stmm1
,
1810 ehframe_dwarf_stmm1
, -1U, debugserver_stmm1
, NULL
, NULL
},
1811 {e_regSetFPU
, fpu_stmm2
, "stmm2", "st2", Vector
, VectorOfUInt8
,
1812 FPU_SIZE_MMST(stmm2
), FPU_OFFSET(stmm2
), ehframe_dwarf_stmm2
,
1813 ehframe_dwarf_stmm2
, -1U, debugserver_stmm2
, NULL
, NULL
},
1814 {e_regSetFPU
, fpu_stmm3
, "stmm3", "st3", Vector
, VectorOfUInt8
,
1815 FPU_SIZE_MMST(stmm3
), FPU_OFFSET(stmm3
), ehframe_dwarf_stmm3
,
1816 ehframe_dwarf_stmm3
, -1U, debugserver_stmm3
, NULL
, NULL
},
1817 {e_regSetFPU
, fpu_stmm4
, "stmm4", "st4", Vector
, VectorOfUInt8
,
1818 FPU_SIZE_MMST(stmm4
), FPU_OFFSET(stmm4
), ehframe_dwarf_stmm4
,
1819 ehframe_dwarf_stmm4
, -1U, debugserver_stmm4
, NULL
, NULL
},
1820 {e_regSetFPU
, fpu_stmm5
, "stmm5", "st5", Vector
, VectorOfUInt8
,
1821 FPU_SIZE_MMST(stmm5
), FPU_OFFSET(stmm5
), ehframe_dwarf_stmm5
,
1822 ehframe_dwarf_stmm5
, -1U, debugserver_stmm5
, NULL
, NULL
},
1823 {e_regSetFPU
, fpu_stmm6
, "stmm6", "st6", Vector
, VectorOfUInt8
,
1824 FPU_SIZE_MMST(stmm6
), FPU_OFFSET(stmm6
), ehframe_dwarf_stmm6
,
1825 ehframe_dwarf_stmm6
, -1U, debugserver_stmm6
, NULL
, NULL
},
1826 {e_regSetFPU
, fpu_stmm7
, "stmm7", "st7", Vector
, VectorOfUInt8
,
1827 FPU_SIZE_MMST(stmm7
), FPU_OFFSET(stmm7
), ehframe_dwarf_stmm7
,
1828 ehframe_dwarf_stmm7
, -1U, debugserver_stmm7
, NULL
, NULL
},
1830 {e_regSetFPU
, fpu_xmm0
, "xmm0", NULL
, Vector
, VectorOfUInt8
,
1831 FPU_SIZE_XMM(xmm0
), FPU_OFFSET(xmm0
), ehframe_dwarf_xmm0
,
1832 ehframe_dwarf_xmm0
, -1U, debugserver_xmm0
, NULL
, NULL
},
1833 {e_regSetFPU
, fpu_xmm1
, "xmm1", NULL
, Vector
, VectorOfUInt8
,
1834 FPU_SIZE_XMM(xmm1
), FPU_OFFSET(xmm1
), ehframe_dwarf_xmm1
,
1835 ehframe_dwarf_xmm1
, -1U, debugserver_xmm1
, NULL
, NULL
},
1836 {e_regSetFPU
, fpu_xmm2
, "xmm2", NULL
, Vector
, VectorOfUInt8
,
1837 FPU_SIZE_XMM(xmm2
), FPU_OFFSET(xmm2
), ehframe_dwarf_xmm2
,
1838 ehframe_dwarf_xmm2
, -1U, debugserver_xmm2
, NULL
, NULL
},
1839 {e_regSetFPU
, fpu_xmm3
, "xmm3", NULL
, Vector
, VectorOfUInt8
,
1840 FPU_SIZE_XMM(xmm3
), FPU_OFFSET(xmm3
), ehframe_dwarf_xmm3
,
1841 ehframe_dwarf_xmm3
, -1U, debugserver_xmm3
, NULL
, NULL
},
1842 {e_regSetFPU
, fpu_xmm4
, "xmm4", NULL
, Vector
, VectorOfUInt8
,
1843 FPU_SIZE_XMM(xmm4
), FPU_OFFSET(xmm4
), ehframe_dwarf_xmm4
,
1844 ehframe_dwarf_xmm4
, -1U, debugserver_xmm4
, NULL
, NULL
},
1845 {e_regSetFPU
, fpu_xmm5
, "xmm5", NULL
, Vector
, VectorOfUInt8
,
1846 FPU_SIZE_XMM(xmm5
), FPU_OFFSET(xmm5
), ehframe_dwarf_xmm5
,
1847 ehframe_dwarf_xmm5
, -1U, debugserver_xmm5
, NULL
, NULL
},
1848 {e_regSetFPU
, fpu_xmm6
, "xmm6", NULL
, Vector
, VectorOfUInt8
,
1849 FPU_SIZE_XMM(xmm6
), FPU_OFFSET(xmm6
), ehframe_dwarf_xmm6
,
1850 ehframe_dwarf_xmm6
, -1U, debugserver_xmm6
, NULL
, NULL
},
1851 {e_regSetFPU
, fpu_xmm7
, "xmm7", NULL
, Vector
, VectorOfUInt8
,
1852 FPU_SIZE_XMM(xmm7
), FPU_OFFSET(xmm7
), ehframe_dwarf_xmm7
,
1853 ehframe_dwarf_xmm7
, -1U, debugserver_xmm7
, NULL
, NULL
},
1854 {e_regSetFPU
, fpu_xmm8
, "xmm8", NULL
, Vector
, VectorOfUInt8
,
1855 FPU_SIZE_XMM(xmm8
), FPU_OFFSET(xmm8
), ehframe_dwarf_xmm8
,
1856 ehframe_dwarf_xmm8
, -1U, debugserver_xmm8
, NULL
, NULL
},
1857 {e_regSetFPU
, fpu_xmm9
, "xmm9", NULL
, Vector
, VectorOfUInt8
,
1858 FPU_SIZE_XMM(xmm9
), FPU_OFFSET(xmm9
), ehframe_dwarf_xmm9
,
1859 ehframe_dwarf_xmm9
, -1U, debugserver_xmm9
, NULL
, NULL
},
1860 {e_regSetFPU
, fpu_xmm10
, "xmm10", NULL
, Vector
, VectorOfUInt8
,
1861 FPU_SIZE_XMM(xmm10
), FPU_OFFSET(xmm10
), ehframe_dwarf_xmm10
,
1862 ehframe_dwarf_xmm10
, -1U, debugserver_xmm10
, NULL
, NULL
},
1863 {e_regSetFPU
, fpu_xmm11
, "xmm11", NULL
, Vector
, VectorOfUInt8
,
1864 FPU_SIZE_XMM(xmm11
), FPU_OFFSET(xmm11
), ehframe_dwarf_xmm11
,
1865 ehframe_dwarf_xmm11
, -1U, debugserver_xmm11
, NULL
, NULL
},
1866 {e_regSetFPU
, fpu_xmm12
, "xmm12", NULL
, Vector
, VectorOfUInt8
,
1867 FPU_SIZE_XMM(xmm12
), FPU_OFFSET(xmm12
), ehframe_dwarf_xmm12
,
1868 ehframe_dwarf_xmm12
, -1U, debugserver_xmm12
, NULL
, NULL
},
1869 {e_regSetFPU
, fpu_xmm13
, "xmm13", NULL
, Vector
, VectorOfUInt8
,
1870 FPU_SIZE_XMM(xmm13
), FPU_OFFSET(xmm13
), ehframe_dwarf_xmm13
,
1871 ehframe_dwarf_xmm13
, -1U, debugserver_xmm13
, NULL
, NULL
},
1872 {e_regSetFPU
, fpu_xmm14
, "xmm14", NULL
, Vector
, VectorOfUInt8
,
1873 FPU_SIZE_XMM(xmm14
), FPU_OFFSET(xmm14
), ehframe_dwarf_xmm14
,
1874 ehframe_dwarf_xmm14
, -1U, debugserver_xmm14
, NULL
, NULL
},
1875 {e_regSetFPU
, fpu_xmm15
, "xmm15", NULL
, Vector
, VectorOfUInt8
,
1876 FPU_SIZE_XMM(xmm15
), FPU_OFFSET(xmm15
), ehframe_dwarf_xmm15
,
1877 ehframe_dwarf_xmm15
, -1U, debugserver_xmm15
, NULL
, NULL
},
1880 static const char *g_contained_ymm0
[] = {"ymm0", NULL
};
1881 static const char *g_contained_ymm1
[] = {"ymm1", NULL
};
1882 static const char *g_contained_ymm2
[] = {"ymm2", NULL
};
1883 static const char *g_contained_ymm3
[] = {"ymm3", NULL
};
1884 static const char *g_contained_ymm4
[] = {"ymm4", NULL
};
1885 static const char *g_contained_ymm5
[] = {"ymm5", NULL
};
1886 static const char *g_contained_ymm6
[] = {"ymm6", NULL
};
1887 static const char *g_contained_ymm7
[] = {"ymm7", NULL
};
1888 static const char *g_contained_ymm8
[] = {"ymm8", NULL
};
1889 static const char *g_contained_ymm9
[] = {"ymm9", NULL
};
1890 static const char *g_contained_ymm10
[] = {"ymm10", NULL
};
1891 static const char *g_contained_ymm11
[] = {"ymm11", NULL
};
1892 static const char *g_contained_ymm12
[] = {"ymm12", NULL
};
1893 static const char *g_contained_ymm13
[] = {"ymm13", NULL
};
1894 static const char *g_contained_ymm14
[] = {"ymm14", NULL
};
1895 static const char *g_contained_ymm15
[] = {"ymm15", NULL
};
1897 const DNBRegisterInfo
DNBArchImplX86_64::g_fpu_registers_avx
[] = {
1898 {e_regSetFPU
, fpu_fcw
, "fctrl", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fcw
),
1899 AVX_OFFSET(fcw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1900 {e_regSetFPU
, fpu_fsw
, "fstat", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fsw
),
1901 AVX_OFFSET(fsw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1902 {e_regSetFPU
, fpu_ftw
, "ftag", NULL
, Uint
, Hex
, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */,
1903 AVX_OFFSET(ftw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1904 {e_regSetFPU
, fpu_fop
, "fop", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fop
),
1905 AVX_OFFSET(fop
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1906 {e_regSetFPU
, fpu_ip
, "fioff", NULL
, Uint
, Hex
, FPU_SIZE_UINT(ip
),
1907 AVX_OFFSET(ip
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1908 {e_regSetFPU
, fpu_cs
, "fiseg", NULL
, Uint
, Hex
, FPU_SIZE_UINT(cs
),
1909 AVX_OFFSET(cs
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1910 {e_regSetFPU
, fpu_dp
, "fooff", NULL
, Uint
, Hex
, FPU_SIZE_UINT(dp
),
1911 AVX_OFFSET(dp
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1912 {e_regSetFPU
, fpu_ds
, "foseg", NULL
, Uint
, Hex
, FPU_SIZE_UINT(ds
),
1913 AVX_OFFSET(ds
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1914 {e_regSetFPU
, fpu_mxcsr
, "mxcsr", NULL
, Uint
, Hex
, FPU_SIZE_UINT(mxcsr
),
1915 AVX_OFFSET(mxcsr
), -1U, -1U, -1U, -1U, NULL
, NULL
},
1916 {e_regSetFPU
, fpu_mxcsrmask
, "mxcsrmask", NULL
, Uint
, Hex
,
1917 FPU_SIZE_UINT(mxcsrmask
), AVX_OFFSET(mxcsrmask
), -1U, -1U, -1U, -1U, NULL
,
1920 {e_regSetFPU
, fpu_stmm0
, "stmm0", "st0", Vector
, VectorOfUInt8
,
1921 FPU_SIZE_MMST(stmm0
), AVX_OFFSET(stmm0
), ehframe_dwarf_stmm0
,
1922 ehframe_dwarf_stmm0
, -1U, debugserver_stmm0
, NULL
, NULL
},
1923 {e_regSetFPU
, fpu_stmm1
, "stmm1", "st1", Vector
, VectorOfUInt8
,
1924 FPU_SIZE_MMST(stmm1
), AVX_OFFSET(stmm1
), ehframe_dwarf_stmm1
,
1925 ehframe_dwarf_stmm1
, -1U, debugserver_stmm1
, NULL
, NULL
},
1926 {e_regSetFPU
, fpu_stmm2
, "stmm2", "st2", Vector
, VectorOfUInt8
,
1927 FPU_SIZE_MMST(stmm2
), AVX_OFFSET(stmm2
), ehframe_dwarf_stmm2
,
1928 ehframe_dwarf_stmm2
, -1U, debugserver_stmm2
, NULL
, NULL
},
1929 {e_regSetFPU
, fpu_stmm3
, "stmm3", "st3", Vector
, VectorOfUInt8
,
1930 FPU_SIZE_MMST(stmm3
), AVX_OFFSET(stmm3
), ehframe_dwarf_stmm3
,
1931 ehframe_dwarf_stmm3
, -1U, debugserver_stmm3
, NULL
, NULL
},
1932 {e_regSetFPU
, fpu_stmm4
, "stmm4", "st4", Vector
, VectorOfUInt8
,
1933 FPU_SIZE_MMST(stmm4
), AVX_OFFSET(stmm4
), ehframe_dwarf_stmm4
,
1934 ehframe_dwarf_stmm4
, -1U, debugserver_stmm4
, NULL
, NULL
},
1935 {e_regSetFPU
, fpu_stmm5
, "stmm5", "st5", Vector
, VectorOfUInt8
,
1936 FPU_SIZE_MMST(stmm5
), AVX_OFFSET(stmm5
), ehframe_dwarf_stmm5
,
1937 ehframe_dwarf_stmm5
, -1U, debugserver_stmm5
, NULL
, NULL
},
1938 {e_regSetFPU
, fpu_stmm6
, "stmm6", "st6", Vector
, VectorOfUInt8
,
1939 FPU_SIZE_MMST(stmm6
), AVX_OFFSET(stmm6
), ehframe_dwarf_stmm6
,
1940 ehframe_dwarf_stmm6
, -1U, debugserver_stmm6
, NULL
, NULL
},
1941 {e_regSetFPU
, fpu_stmm7
, "stmm7", "st7", Vector
, VectorOfUInt8
,
1942 FPU_SIZE_MMST(stmm7
), AVX_OFFSET(stmm7
), ehframe_dwarf_stmm7
,
1943 ehframe_dwarf_stmm7
, -1U, debugserver_stmm7
, NULL
, NULL
},
1945 {e_regSetFPU
, fpu_ymm0
, "ymm0", NULL
, Vector
, VectorOfUInt8
,
1946 FPU_SIZE_YMM(ymm0
), AVX_OFFSET_YMM(0), ehframe_dwarf_ymm0
,
1947 ehframe_dwarf_ymm0
, -1U, debugserver_ymm0
, NULL
, NULL
},
1948 {e_regSetFPU
, fpu_ymm1
, "ymm1", NULL
, Vector
, VectorOfUInt8
,
1949 FPU_SIZE_YMM(ymm1
), AVX_OFFSET_YMM(1), ehframe_dwarf_ymm1
,
1950 ehframe_dwarf_ymm1
, -1U, debugserver_ymm1
, NULL
, NULL
},
1951 {e_regSetFPU
, fpu_ymm2
, "ymm2", NULL
, Vector
, VectorOfUInt8
,
1952 FPU_SIZE_YMM(ymm2
), AVX_OFFSET_YMM(2), ehframe_dwarf_ymm2
,
1953 ehframe_dwarf_ymm2
, -1U, debugserver_ymm2
, NULL
, NULL
},
1954 {e_regSetFPU
, fpu_ymm3
, "ymm3", NULL
, Vector
, VectorOfUInt8
,
1955 FPU_SIZE_YMM(ymm3
), AVX_OFFSET_YMM(3), ehframe_dwarf_ymm3
,
1956 ehframe_dwarf_ymm3
, -1U, debugserver_ymm3
, NULL
, NULL
},
1957 {e_regSetFPU
, fpu_ymm4
, "ymm4", NULL
, Vector
, VectorOfUInt8
,
1958 FPU_SIZE_YMM(ymm4
), AVX_OFFSET_YMM(4), ehframe_dwarf_ymm4
,
1959 ehframe_dwarf_ymm4
, -1U, debugserver_ymm4
, NULL
, NULL
},
1960 {e_regSetFPU
, fpu_ymm5
, "ymm5", NULL
, Vector
, VectorOfUInt8
,
1961 FPU_SIZE_YMM(ymm5
), AVX_OFFSET_YMM(5), ehframe_dwarf_ymm5
,
1962 ehframe_dwarf_ymm5
, -1U, debugserver_ymm5
, NULL
, NULL
},
1963 {e_regSetFPU
, fpu_ymm6
, "ymm6", NULL
, Vector
, VectorOfUInt8
,
1964 FPU_SIZE_YMM(ymm6
), AVX_OFFSET_YMM(6), ehframe_dwarf_ymm6
,
1965 ehframe_dwarf_ymm6
, -1U, debugserver_ymm6
, NULL
, NULL
},
1966 {e_regSetFPU
, fpu_ymm7
, "ymm7", NULL
, Vector
, VectorOfUInt8
,
1967 FPU_SIZE_YMM(ymm7
), AVX_OFFSET_YMM(7), ehframe_dwarf_ymm7
,
1968 ehframe_dwarf_ymm7
, -1U, debugserver_ymm7
, NULL
, NULL
},
1969 {e_regSetFPU
, fpu_ymm8
, "ymm8", NULL
, Vector
, VectorOfUInt8
,
1970 FPU_SIZE_YMM(ymm8
), AVX_OFFSET_YMM(8), ehframe_dwarf_ymm8
,
1971 ehframe_dwarf_ymm8
, -1U, debugserver_ymm8
, NULL
, NULL
},
1972 {e_regSetFPU
, fpu_ymm9
, "ymm9", NULL
, Vector
, VectorOfUInt8
,
1973 FPU_SIZE_YMM(ymm9
), AVX_OFFSET_YMM(9), ehframe_dwarf_ymm9
,
1974 ehframe_dwarf_ymm9
, -1U, debugserver_ymm9
, NULL
, NULL
},
1975 {e_regSetFPU
, fpu_ymm10
, "ymm10", NULL
, Vector
, VectorOfUInt8
,
1976 FPU_SIZE_YMM(ymm10
), AVX_OFFSET_YMM(10), ehframe_dwarf_ymm10
,
1977 ehframe_dwarf_ymm10
, -1U, debugserver_ymm10
, NULL
, NULL
},
1978 {e_regSetFPU
, fpu_ymm11
, "ymm11", NULL
, Vector
, VectorOfUInt8
,
1979 FPU_SIZE_YMM(ymm11
), AVX_OFFSET_YMM(11), ehframe_dwarf_ymm11
,
1980 ehframe_dwarf_ymm11
, -1U, debugserver_ymm11
, NULL
, NULL
},
1981 {e_regSetFPU
, fpu_ymm12
, "ymm12", NULL
, Vector
, VectorOfUInt8
,
1982 FPU_SIZE_YMM(ymm12
), AVX_OFFSET_YMM(12), ehframe_dwarf_ymm12
,
1983 ehframe_dwarf_ymm12
, -1U, debugserver_ymm12
, NULL
, NULL
},
1984 {e_regSetFPU
, fpu_ymm13
, "ymm13", NULL
, Vector
, VectorOfUInt8
,
1985 FPU_SIZE_YMM(ymm13
), AVX_OFFSET_YMM(13), ehframe_dwarf_ymm13
,
1986 ehframe_dwarf_ymm13
, -1U, debugserver_ymm13
, NULL
, NULL
},
1987 {e_regSetFPU
, fpu_ymm14
, "ymm14", NULL
, Vector
, VectorOfUInt8
,
1988 FPU_SIZE_YMM(ymm14
), AVX_OFFSET_YMM(14), ehframe_dwarf_ymm14
,
1989 ehframe_dwarf_ymm14
, -1U, debugserver_ymm14
, NULL
, NULL
},
1990 {e_regSetFPU
, fpu_ymm15
, "ymm15", NULL
, Vector
, VectorOfUInt8
,
1991 FPU_SIZE_YMM(ymm15
), AVX_OFFSET_YMM(15), ehframe_dwarf_ymm15
,
1992 ehframe_dwarf_ymm15
, -1U, debugserver_ymm15
, NULL
, NULL
},
1994 {e_regSetFPU
, fpu_xmm0
, "xmm0", NULL
, Vector
, VectorOfUInt8
,
1995 FPU_SIZE_XMM(xmm0
), 0, ehframe_dwarf_xmm0
, ehframe_dwarf_xmm0
, -1U,
1996 debugserver_xmm0
, g_contained_ymm0
, NULL
},
1997 {e_regSetFPU
, fpu_xmm1
, "xmm1", NULL
, Vector
, VectorOfUInt8
,
1998 FPU_SIZE_XMM(xmm1
), 0, ehframe_dwarf_xmm1
, ehframe_dwarf_xmm1
, -1U,
1999 debugserver_xmm1
, g_contained_ymm1
, NULL
},
2000 {e_regSetFPU
, fpu_xmm2
, "xmm2", NULL
, Vector
, VectorOfUInt8
,
2001 FPU_SIZE_XMM(xmm2
), 0, ehframe_dwarf_xmm2
, ehframe_dwarf_xmm2
, -1U,
2002 debugserver_xmm2
, g_contained_ymm2
, NULL
},
2003 {e_regSetFPU
, fpu_xmm3
, "xmm3", NULL
, Vector
, VectorOfUInt8
,
2004 FPU_SIZE_XMM(xmm3
), 0, ehframe_dwarf_xmm3
, ehframe_dwarf_xmm3
, -1U,
2005 debugserver_xmm3
, g_contained_ymm3
, NULL
},
2006 {e_regSetFPU
, fpu_xmm4
, "xmm4", NULL
, Vector
, VectorOfUInt8
,
2007 FPU_SIZE_XMM(xmm4
), 0, ehframe_dwarf_xmm4
, ehframe_dwarf_xmm4
, -1U,
2008 debugserver_xmm4
, g_contained_ymm4
, NULL
},
2009 {e_regSetFPU
, fpu_xmm5
, "xmm5", NULL
, Vector
, VectorOfUInt8
,
2010 FPU_SIZE_XMM(xmm5
), 0, ehframe_dwarf_xmm5
, ehframe_dwarf_xmm5
, -1U,
2011 debugserver_xmm5
, g_contained_ymm5
, NULL
},
2012 {e_regSetFPU
, fpu_xmm6
, "xmm6", NULL
, Vector
, VectorOfUInt8
,
2013 FPU_SIZE_XMM(xmm6
), 0, ehframe_dwarf_xmm6
, ehframe_dwarf_xmm6
, -1U,
2014 debugserver_xmm6
, g_contained_ymm6
, NULL
},
2015 {e_regSetFPU
, fpu_xmm7
, "xmm7", NULL
, Vector
, VectorOfUInt8
,
2016 FPU_SIZE_XMM(xmm7
), 0, ehframe_dwarf_xmm7
, ehframe_dwarf_xmm7
, -1U,
2017 debugserver_xmm7
, g_contained_ymm7
, NULL
},
2018 {e_regSetFPU
, fpu_xmm8
, "xmm8", NULL
, Vector
, VectorOfUInt8
,
2019 FPU_SIZE_XMM(xmm8
), 0, ehframe_dwarf_xmm8
, ehframe_dwarf_xmm8
, -1U,
2020 debugserver_xmm8
, g_contained_ymm8
, NULL
},
2021 {e_regSetFPU
, fpu_xmm9
, "xmm9", NULL
, Vector
, VectorOfUInt8
,
2022 FPU_SIZE_XMM(xmm9
), 0, ehframe_dwarf_xmm9
, ehframe_dwarf_xmm9
, -1U,
2023 debugserver_xmm9
, g_contained_ymm9
, NULL
},
2024 {e_regSetFPU
, fpu_xmm10
, "xmm10", NULL
, Vector
, VectorOfUInt8
,
2025 FPU_SIZE_XMM(xmm10
), 0, ehframe_dwarf_xmm10
, ehframe_dwarf_xmm10
, -1U,
2026 debugserver_xmm10
, g_contained_ymm10
, NULL
},
2027 {e_regSetFPU
, fpu_xmm11
, "xmm11", NULL
, Vector
, VectorOfUInt8
,
2028 FPU_SIZE_XMM(xmm11
), 0, ehframe_dwarf_xmm11
, ehframe_dwarf_xmm11
, -1U,
2029 debugserver_xmm11
, g_contained_ymm11
, NULL
},
2030 {e_regSetFPU
, fpu_xmm12
, "xmm12", NULL
, Vector
, VectorOfUInt8
,
2031 FPU_SIZE_XMM(xmm12
), 0, ehframe_dwarf_xmm12
, ehframe_dwarf_xmm12
, -1U,
2032 debugserver_xmm12
, g_contained_ymm12
, NULL
},
2033 {e_regSetFPU
, fpu_xmm13
, "xmm13", NULL
, Vector
, VectorOfUInt8
,
2034 FPU_SIZE_XMM(xmm13
), 0, ehframe_dwarf_xmm13
, ehframe_dwarf_xmm13
, -1U,
2035 debugserver_xmm13
, g_contained_ymm13
, NULL
},
2036 {e_regSetFPU
, fpu_xmm14
, "xmm14", NULL
, Vector
, VectorOfUInt8
,
2037 FPU_SIZE_XMM(xmm14
), 0, ehframe_dwarf_xmm14
, ehframe_dwarf_xmm14
, -1U,
2038 debugserver_xmm14
, g_contained_ymm14
, NULL
},
2039 {e_regSetFPU
, fpu_xmm15
, "xmm15", NULL
, Vector
, VectorOfUInt8
,
2040 FPU_SIZE_XMM(xmm15
), 0, ehframe_dwarf_xmm15
, ehframe_dwarf_xmm15
, -1U,
2041 debugserver_xmm15
, g_contained_ymm15
, NULL
}
2045 static const char *g_contained_zmm0
[] = {"zmm0", NULL
};
2046 static const char *g_contained_zmm1
[] = {"zmm1", NULL
};
2047 static const char *g_contained_zmm2
[] = {"zmm2", NULL
};
2048 static const char *g_contained_zmm3
[] = {"zmm3", NULL
};
2049 static const char *g_contained_zmm4
[] = {"zmm4", NULL
};
2050 static const char *g_contained_zmm5
[] = {"zmm5", NULL
};
2051 static const char *g_contained_zmm6
[] = {"zmm6", NULL
};
2052 static const char *g_contained_zmm7
[] = {"zmm7", NULL
};
2053 static const char *g_contained_zmm8
[] = {"zmm8", NULL
};
2054 static const char *g_contained_zmm9
[] = {"zmm9", NULL
};
2055 static const char *g_contained_zmm10
[] = {"zmm10", NULL
};
2056 static const char *g_contained_zmm11
[] = {"zmm11", NULL
};
2057 static const char *g_contained_zmm12
[] = {"zmm12", NULL
};
2058 static const char *g_contained_zmm13
[] = {"zmm13", NULL
};
2059 static const char *g_contained_zmm14
[] = {"zmm14", NULL
};
2060 static const char *g_contained_zmm15
[] = {"zmm15", NULL
};
2064 #define ZMM_REG_DEF(reg) \
2066 e_regSetFPU, fpu_zmm##reg, STR(zmm##reg), NULL, Vector, VectorOfUInt8, \
2067 FPU_SIZE_ZMM(zmm##reg), AVX512F_OFFSET_ZMM(reg), \
2068 ehframe_dwarf_zmm##reg, ehframe_dwarf_zmm##reg, -1U, \
2069 debugserver_zmm##reg, NULL, NULL \
2072 #define YMM_REG_ALIAS(reg) \
2074 e_regSetFPU, fpu_ymm##reg, STR(ymm##reg), NULL, Vector, VectorOfUInt8, \
2075 FPU_SIZE_YMM(ymm##reg), 0, ehframe_dwarf_ymm##reg, \
2076 ehframe_dwarf_ymm##reg, -1U, debugserver_ymm##reg, \
2077 g_contained_zmm##reg, NULL \
2080 #define XMM_REG_ALIAS(reg) \
2082 e_regSetFPU, fpu_xmm##reg, STR(xmm##reg), NULL, Vector, VectorOfUInt8, \
2083 FPU_SIZE_XMM(xmm##reg), 0, ehframe_dwarf_xmm##reg, \
2084 ehframe_dwarf_xmm##reg, -1U, debugserver_xmm##reg, \
2085 g_contained_zmm##reg, NULL \
2088 #define AVX512_K_REG_DEF(reg) \
2090 e_regSetFPU, fpu_k##reg, STR(k##reg), NULL, Vector, VectorOfUInt8, 8, \
2091 AVX512F_OFFSET(k##reg), ehframe_dwarf_k##reg, ehframe_dwarf_k##reg, \
2092 -1U, debugserver_k##reg, NULL, NULL \
2095 const DNBRegisterInfo
DNBArchImplX86_64::g_fpu_registers_avx512f
[] = {
2096 {e_regSetFPU
, fpu_fcw
, "fctrl", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fcw
),
2097 AVX_OFFSET(fcw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2098 {e_regSetFPU
, fpu_fsw
, "fstat", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fsw
),
2099 AVX_OFFSET(fsw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2100 {e_regSetFPU
, fpu_ftw
, "ftag", NULL
, Uint
, Hex
, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */,
2101 AVX_OFFSET(ftw
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2102 {e_regSetFPU
, fpu_fop
, "fop", NULL
, Uint
, Hex
, FPU_SIZE_UINT(fop
),
2103 AVX_OFFSET(fop
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2104 {e_regSetFPU
, fpu_ip
, "fioff", NULL
, Uint
, Hex
, FPU_SIZE_UINT(ip
),
2105 AVX_OFFSET(ip
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2106 {e_regSetFPU
, fpu_cs
, "fiseg", NULL
, Uint
, Hex
, FPU_SIZE_UINT(cs
),
2107 AVX_OFFSET(cs
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2108 {e_regSetFPU
, fpu_dp
, "fooff", NULL
, Uint
, Hex
, FPU_SIZE_UINT(dp
),
2109 AVX_OFFSET(dp
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2110 {e_regSetFPU
, fpu_ds
, "foseg", NULL
, Uint
, Hex
, FPU_SIZE_UINT(ds
),
2111 AVX_OFFSET(ds
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2112 {e_regSetFPU
, fpu_mxcsr
, "mxcsr", NULL
, Uint
, Hex
, FPU_SIZE_UINT(mxcsr
),
2113 AVX_OFFSET(mxcsr
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2114 {e_regSetFPU
, fpu_mxcsrmask
, "mxcsrmask", NULL
, Uint
, Hex
,
2115 FPU_SIZE_UINT(mxcsrmask
), AVX_OFFSET(mxcsrmask
), -1U, -1U, -1U, -1U, NULL
,
2118 {e_regSetFPU
, fpu_stmm0
, "stmm0", "st0", Vector
, VectorOfUInt8
,
2119 FPU_SIZE_MMST(stmm0
), AVX_OFFSET(stmm0
), ehframe_dwarf_stmm0
,
2120 ehframe_dwarf_stmm0
, -1U, debugserver_stmm0
, NULL
, NULL
},
2121 {e_regSetFPU
, fpu_stmm1
, "stmm1", "st1", Vector
, VectorOfUInt8
,
2122 FPU_SIZE_MMST(stmm1
), AVX_OFFSET(stmm1
), ehframe_dwarf_stmm1
,
2123 ehframe_dwarf_stmm1
, -1U, debugserver_stmm1
, NULL
, NULL
},
2124 {e_regSetFPU
, fpu_stmm2
, "stmm2", "st2", Vector
, VectorOfUInt8
,
2125 FPU_SIZE_MMST(stmm2
), AVX_OFFSET(stmm2
), ehframe_dwarf_stmm2
,
2126 ehframe_dwarf_stmm2
, -1U, debugserver_stmm2
, NULL
, NULL
},
2127 {e_regSetFPU
, fpu_stmm3
, "stmm3", "st3", Vector
, VectorOfUInt8
,
2128 FPU_SIZE_MMST(stmm3
), AVX_OFFSET(stmm3
), ehframe_dwarf_stmm3
,
2129 ehframe_dwarf_stmm3
, -1U, debugserver_stmm3
, NULL
, NULL
},
2130 {e_regSetFPU
, fpu_stmm4
, "stmm4", "st4", Vector
, VectorOfUInt8
,
2131 FPU_SIZE_MMST(stmm4
), AVX_OFFSET(stmm4
), ehframe_dwarf_stmm4
,
2132 ehframe_dwarf_stmm4
, -1U, debugserver_stmm4
, NULL
, NULL
},
2133 {e_regSetFPU
, fpu_stmm5
, "stmm5", "st5", Vector
, VectorOfUInt8
,
2134 FPU_SIZE_MMST(stmm5
), AVX_OFFSET(stmm5
), ehframe_dwarf_stmm5
,
2135 ehframe_dwarf_stmm5
, -1U, debugserver_stmm5
, NULL
, NULL
},
2136 {e_regSetFPU
, fpu_stmm6
, "stmm6", "st6", Vector
, VectorOfUInt8
,
2137 FPU_SIZE_MMST(stmm6
), AVX_OFFSET(stmm6
), ehframe_dwarf_stmm6
,
2138 ehframe_dwarf_stmm6
, -1U, debugserver_stmm6
, NULL
, NULL
},
2139 {e_regSetFPU
, fpu_stmm7
, "stmm7", "st7", Vector
, VectorOfUInt8
,
2140 FPU_SIZE_MMST(stmm7
), AVX_OFFSET(stmm7
), ehframe_dwarf_stmm7
,
2141 ehframe_dwarf_stmm7
, -1U, debugserver_stmm7
, NULL
, NULL
},
2143 AVX512_K_REG_DEF(0),
2144 AVX512_K_REG_DEF(1),
2145 AVX512_K_REG_DEF(2),
2146 AVX512_K_REG_DEF(3),
2147 AVX512_K_REG_DEF(4),
2148 AVX512_K_REG_DEF(5),
2149 AVX512_K_REG_DEF(6),
2150 AVX512_K_REG_DEF(7),
2222 // Exception registers
2224 const DNBRegisterInfo
DNBArchImplX86_64::g_exc_registers
[] = {
2225 {e_regSetEXC
, exc_trapno
, "trapno", NULL
, Uint
, Hex
, EXC_SIZE(trapno
),
2226 EXC_OFFSET(trapno
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2227 {e_regSetEXC
, exc_err
, "err", NULL
, Uint
, Hex
, EXC_SIZE(err
),
2228 EXC_OFFSET(err
), -1U, -1U, -1U, -1U, NULL
, NULL
},
2229 {e_regSetEXC
, exc_faultvaddr
, "faultvaddr", NULL
, Uint
, Hex
,
2230 EXC_SIZE(faultvaddr
), EXC_OFFSET(faultvaddr
), -1U, -1U, -1U, -1U, NULL
,
2233 // Number of registers in each register set
2234 const size_t DNBArchImplX86_64::k_num_gpr_registers
=
2235 sizeof(g_gpr_registers
) / sizeof(DNBRegisterInfo
);
2236 const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx
=
2237 sizeof(g_fpu_registers_no_avx
) / sizeof(DNBRegisterInfo
);
2238 const size_t DNBArchImplX86_64::k_num_fpu_registers_avx
=
2239 sizeof(g_fpu_registers_avx
) / sizeof(DNBRegisterInfo
);
2240 const size_t DNBArchImplX86_64::k_num_exc_registers
=
2241 sizeof(g_exc_registers
) / sizeof(DNBRegisterInfo
);
2242 const size_t DNBArchImplX86_64::k_num_all_registers_no_avx
=
2243 k_num_gpr_registers
+ k_num_fpu_registers_no_avx
+ k_num_exc_registers
;
2244 const size_t DNBArchImplX86_64::k_num_all_registers_avx
=
2245 k_num_gpr_registers
+ k_num_fpu_registers_avx
+ k_num_exc_registers
;
2246 const size_t DNBArchImplX86_64::k_num_fpu_registers_avx512f
=
2247 sizeof(g_fpu_registers_avx512f
) / sizeof(DNBRegisterInfo
);
2248 const size_t DNBArchImplX86_64::k_num_all_registers_avx512f
=
2249 k_num_gpr_registers
+ k_num_fpu_registers_avx512f
+ k_num_exc_registers
;
2251 // Register set definitions. The first definitions at register set index
2252 // of zero is for all registers, followed by other registers sets. The
2253 // register information for the all register set need not be filled in.
2254 const DNBRegisterSetInfo
DNBArchImplX86_64::g_reg_sets_no_avx
[] = {
2255 {"x86_64 Registers", NULL
, k_num_all_registers_no_avx
},
2256 {"General Purpose Registers", g_gpr_registers
, k_num_gpr_registers
},
2257 {"Floating Point Registers", g_fpu_registers_no_avx
,
2258 k_num_fpu_registers_no_avx
},
2259 {"Exception State Registers", g_exc_registers
, k_num_exc_registers
}};
2261 const DNBRegisterSetInfo
DNBArchImplX86_64::g_reg_sets_avx
[] = {
2262 {"x86_64 Registers", NULL
, k_num_all_registers_avx
},
2263 {"General Purpose Registers", g_gpr_registers
, k_num_gpr_registers
},
2264 {"Floating Point Registers", g_fpu_registers_avx
, k_num_fpu_registers_avx
},
2265 {"Exception State Registers", g_exc_registers
, k_num_exc_registers
}};
2267 const DNBRegisterSetInfo
DNBArchImplX86_64::g_reg_sets_avx512f
[] = {
2268 {"x86_64 Registers", NULL
, k_num_all_registers_avx
},
2269 {"General Purpose Registers", g_gpr_registers
, k_num_gpr_registers
},
2270 {"Floating Point Registers", g_fpu_registers_avx512f
,
2271 k_num_fpu_registers_avx512f
},
2272 {"Exception State Registers", g_exc_registers
, k_num_exc_registers
}};
2274 // Total number of register sets for this architecture
2275 const size_t DNBArchImplX86_64::k_num_register_sets
=
2276 sizeof(g_reg_sets_avx
) / sizeof(DNBRegisterSetInfo
);
2278 DNBArchProtocol
*DNBArchImplX86_64::Create(MachThread
*thread
) {
2279 DNBArchImplX86_64
*obj
= new DNBArchImplX86_64(thread
);
2284 DNBArchImplX86_64::SoftwareBreakpointOpcode(nub_size_t byte_size
) {
2285 static const uint8_t g_breakpoint_opcode
[] = {0xCC};
2287 return g_breakpoint_opcode
;
2291 const DNBRegisterSetInfo
*
2292 DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t
*num_reg_sets
) {
2293 *num_reg_sets
= k_num_register_sets
;
2295 if (CPUHasAVX512f() || FORCE_AVX_REGS
)
2296 return g_reg_sets_avx512f
;
2297 if (CPUHasAVX() || FORCE_AVX_REGS
)
2298 return g_reg_sets_avx
;
2300 return g_reg_sets_no_avx
;
2303 void DNBArchImplX86_64::Initialize() {
2304 DNBArchPluginInfo arch_plugin_info
= {
2305 CPU_TYPE_X86_64
, DNBArchImplX86_64::Create
,
2306 DNBArchImplX86_64::GetRegisterSetInfo
,
2307 DNBArchImplX86_64::SoftwareBreakpointOpcode
};
2309 // Register this arch plug-in with the main protocol class
2310 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info
);
2313 bool DNBArchImplX86_64::GetRegisterValue(uint32_t set
, uint32_t reg
,
2314 DNBRegisterValue
*value
) {
2315 if (set
== REGISTER_SET_GENERIC
) {
2317 case GENERIC_REGNUM_PC
: // Program Counter
2322 case GENERIC_REGNUM_SP
: // Stack Pointer
2327 case GENERIC_REGNUM_FP
: // Frame Pointer
2332 case GENERIC_REGNUM_FLAGS
: // Processor flags register
2337 case GENERIC_REGNUM_RA
: // Return Address
2343 if (GetRegisterState(set
, false) != KERN_SUCCESS
)
2346 const DNBRegisterInfo
*regInfo
= m_thread
->GetRegisterInfo(set
, reg
);
2348 value
->info
= *regInfo
;
2351 if (reg
> gpr_gs
&& !m_state
.hasFullGPRState
)
2353 if (reg
< k_num_gpr_registers
) {
2354 value
->value
.uint64
= ((uint64_t *)(&m_state
.context
.gpr
))[reg
];
2360 if (reg
> fpu_xmm15
&& !(CPUHasAVX() || FORCE_AVX_REGS
))
2362 if (reg
> fpu_ymm15
&& !(CPUHasAVX512f() || FORCE_AVX_REGS
))
2367 value
->value
.uint16
=
2368 *((uint16_t *)(&m_state
.context
.fpu
.no_avx
.__fpu_fcw
));
2371 value
->value
.uint16
=
2372 *((uint16_t *)(&m_state
.context
.fpu
.no_avx
.__fpu_fsw
));
2375 memcpy (&value
->value
.uint16
, &m_state
.context
.fpu
.no_avx
.__fpu_ftw
, 2);
2378 value
->value
.uint16
= m_state
.context
.fpu
.no_avx
.__fpu_fop
;
2381 value
->value
.uint32
= m_state
.context
.fpu
.no_avx
.__fpu_ip
;
2384 value
->value
.uint16
= m_state
.context
.fpu
.no_avx
.__fpu_cs
;
2387 value
->value
.uint32
= m_state
.context
.fpu
.no_avx
.__fpu_dp
;
2390 value
->value
.uint16
= m_state
.context
.fpu
.no_avx
.__fpu_ds
;
2393 value
->value
.uint32
= m_state
.context
.fpu
.no_avx
.__fpu_mxcsr
;
2396 value
->value
.uint32
= m_state
.context
.fpu
.no_avx
.__fpu_mxcsrmask
;
2407 memcpy(&value
->value
.uint8
,
2408 &m_state
.context
.fpu
.no_avx
.__fpu_stmm0
+ (reg
- fpu_stmm0
), 10);
2427 memcpy(&value
->value
.uint8
,
2428 &m_state
.context
.fpu
.no_avx
.__fpu_xmm0
+ (reg
- fpu_xmm0
), 16);
2447 memcpy(&value
->value
.uint8
,
2448 &m_state
.context
.fpu
.avx
.__fpu_xmm0
+ (reg
- fpu_ymm0
), 16);
2449 memcpy((&value
->value
.uint8
) + 16,
2450 &m_state
.context
.fpu
.avx
.__fpu_ymmh0
+ (reg
- fpu_ymm0
), 16);
2460 memcpy((&value
->value
.uint8
),
2461 &m_state
.context
.fpu
.avx512f
.__fpu_k0
+ (reg
- fpu_k0
), 8);
2479 memcpy(&value
->value
.uint8
,
2480 &m_state
.context
.fpu
.avx512f
.__fpu_xmm0
+ (reg
- fpu_zmm0
), 16);
2481 memcpy((&value
->value
.uint8
) + 16,
2482 &m_state
.context
.fpu
.avx512f
.__fpu_ymmh0
+ (reg
- fpu_zmm0
), 16);
2483 memcpy((&value
->value
.uint8
) + 32,
2484 &m_state
.context
.fpu
.avx512f
.__fpu_zmmh0
+ (reg
- fpu_zmm0
), 32);
2502 memcpy(&value
->value
.uint8
,
2503 &m_state
.context
.fpu
.avx512f
.__fpu_zmm16
+ (reg
- fpu_zmm16
), 64);
2511 value
->value
.uint32
= m_state
.context
.exc
.__trapno
;
2514 value
->value
.uint32
= m_state
.context
.exc
.__err
;
2516 case exc_faultvaddr
:
2517 value
->value
.uint64
= m_state
.context
.exc
.__faultvaddr
;
2526 bool DNBArchImplX86_64::SetRegisterValue(uint32_t set
, uint32_t reg
,
2527 const DNBRegisterValue
*value
) {
2528 if (set
== REGISTER_SET_GENERIC
) {
2530 case GENERIC_REGNUM_PC
: // Program Counter
2535 case GENERIC_REGNUM_SP
: // Stack Pointer
2540 case GENERIC_REGNUM_FP
: // Frame Pointer
2545 case GENERIC_REGNUM_FLAGS
: // Processor flags register
2550 case GENERIC_REGNUM_RA
: // Return Address
2556 if (GetRegisterState(set
, false) != KERN_SUCCESS
)
2559 bool success
= false;
2560 const DNBRegisterInfo
*regInfo
= m_thread
->GetRegisterInfo(set
, reg
);
2564 if (reg
> gpr_gs
&& !m_state
.hasFullGPRState
)
2566 if (reg
< k_num_gpr_registers
) {
2567 ((uint64_t *)(&m_state
.context
.gpr
))[reg
] = value
->value
.uint64
;
2571 if (reg
> fpu_xmm15
&& !(CPUHasAVX() || FORCE_AVX_REGS
))
2573 if (reg
> fpu_ymm15
&& !(CPUHasAVX512f() || FORCE_AVX_REGS
))
2578 *((uint16_t *)(&m_state
.context
.fpu
.no_avx
.__fpu_fcw
)) =
2579 value
->value
.uint16
;
2583 *((uint16_t *)(&m_state
.context
.fpu
.no_avx
.__fpu_fsw
)) =
2584 value
->value
.uint16
;
2588 memcpy (&m_state
.context
.fpu
.no_avx
.__fpu_ftw
, &value
->value
.uint8
, 2);
2592 m_state
.context
.fpu
.no_avx
.__fpu_fop
= value
->value
.uint16
;
2596 m_state
.context
.fpu
.no_avx
.__fpu_ip
= value
->value
.uint32
;
2600 m_state
.context
.fpu
.no_avx
.__fpu_cs
= value
->value
.uint16
;
2604 m_state
.context
.fpu
.no_avx
.__fpu_dp
= value
->value
.uint32
;
2608 m_state
.context
.fpu
.no_avx
.__fpu_ds
= value
->value
.uint16
;
2612 m_state
.context
.fpu
.no_avx
.__fpu_mxcsr
= value
->value
.uint32
;
2616 m_state
.context
.fpu
.no_avx
.__fpu_mxcsrmask
= value
->value
.uint32
;
2628 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_stmm0
+ (reg
- fpu_stmm0
),
2629 &value
->value
.uint8
, 10);
2649 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_xmm0
+ (reg
- fpu_xmm0
),
2650 &value
->value
.uint8
, 16);
2670 memcpy(&m_state
.context
.fpu
.avx
.__fpu_xmm0
+ (reg
- fpu_ymm0
),
2671 &value
->value
.uint8
, 16);
2672 memcpy(&m_state
.context
.fpu
.avx
.__fpu_ymmh0
+ (reg
- fpu_ymm0
),
2673 (&value
->value
.uint8
) + 16, 16);
2684 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_k0
+ (reg
- fpu_k0
),
2685 &value
->value
.uint8
, 8);
2704 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_xmm0
+ (reg
- fpu_zmm0
),
2705 &value
->value
.uint8
, 16);
2706 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_ymmh0
+ (reg
- fpu_zmm0
),
2707 &value
->value
.uint8
+ 16, 16);
2708 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_zmmh0
+ (reg
- fpu_zmm0
),
2709 &value
->value
.uint8
+ 32, 32);
2728 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_zmm16
+ (reg
- fpu_zmm16
),
2729 &value
->value
.uint8
, 64);
2738 m_state
.context
.exc
.__trapno
= value
->value
.uint32
;
2742 m_state
.context
.exc
.__err
= value
->value
.uint32
;
2745 case exc_faultvaddr
:
2746 m_state
.context
.exc
.__faultvaddr
= value
->value
.uint64
;
2755 return SetRegisterState(set
) == KERN_SUCCESS
;
2759 uint32_t DNBArchImplX86_64::GetRegisterContextSize() {
2760 static uint32_t g_cached_size
= 0;
2761 if (g_cached_size
== 0) {
2762 if (CPUHasAVX512f() || FORCE_AVX_REGS
) {
2763 for (size_t i
= 0; i
< k_num_fpu_registers_avx512f
; ++i
) {
2764 if (g_fpu_registers_avx512f
[i
].value_regs
== NULL
)
2765 g_cached_size
+= g_fpu_registers_avx512f
[i
].size
;
2767 } else if (CPUHasAVX() || FORCE_AVX_REGS
) {
2768 for (size_t i
= 0; i
< k_num_fpu_registers_avx
; ++i
) {
2769 if (g_fpu_registers_avx
[i
].value_regs
== NULL
)
2770 g_cached_size
+= g_fpu_registers_avx
[i
].size
;
2773 for (size_t i
= 0; i
< k_num_fpu_registers_no_avx
; ++i
) {
2774 if (g_fpu_registers_no_avx
[i
].value_regs
== NULL
)
2775 g_cached_size
+= g_fpu_registers_no_avx
[i
].size
;
2778 DNBLogThreaded("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, "
2779 "FPU = %u, EXC = %zu",
2780 sizeof(GPR
), g_cached_size
, sizeof(EXC
));
2781 g_cached_size
+= sizeof(GPR
);
2782 g_cached_size
+= sizeof(EXC
);
2784 "DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u",
2787 return g_cached_size
;
2790 nub_size_t
DNBArchImplX86_64::GetRegisterContext(void *buf
,
2791 nub_size_t buf_len
) {
2792 uint32_t size
= GetRegisterContextSize();
2794 if (buf
&& buf_len
) {
2798 if ((kret
= GetGPRState(force
)) != KERN_SUCCESS
) {
2799 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::GetRegisterContext (buf "
2800 "= %p, len = %llu) error: GPR regs failed "
2802 buf
, (uint64_t)buf_len
, kret
);
2804 } else if ((kret
= GetFPUState(force
)) != KERN_SUCCESS
) {
2806 LOG_THREAD
, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = "
2807 "%llu) error: %s regs failed to read: %u",
2808 buf
, (uint64_t)buf_len
, CPUHasAVX() ? "AVX" : "FPU", kret
);
2810 } else if ((kret
= GetEXCState(force
)) != KERN_SUCCESS
) {
2811 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::GetRegisterContext (buf "
2812 "= %p, len = %llu) error: EXC regs failed "
2814 buf
, (uint64_t)buf_len
, kret
);
2817 uint8_t *p
= (uint8_t *)buf
;
2818 // Copy the GPR registers
2819 memcpy(p
, &m_state
.context
.gpr
, sizeof(GPR
));
2822 // Walk around the gaps in the FPU regs
2823 memcpy(p
, &m_state
.context
.fpu
.no_avx
.__fpu_fcw
, 5);
2824 // We read 5 bytes, but we skip 6 to account for __fpu_rsrv1
2825 // to match the g_fpu_registers_* tables.
2827 memcpy(p
, &m_state
.context
.fpu
.no_avx
.__fpu_fop
, 8);
2829 memcpy(p
, &m_state
.context
.fpu
.no_avx
.__fpu_dp
, 6);
2831 memcpy(p
, &m_state
.context
.fpu
.no_avx
.__fpu_mxcsr
, 8);
2834 // Work around the padding between the stmm registers as they are 16
2835 // byte structs with 10 bytes of the value in each
2836 for (size_t i
= 0; i
< 8; ++i
) {
2837 memcpy(p
, &m_state
.context
.fpu
.no_avx
.__fpu_stmm0
+ i
, 10);
2841 if(CPUHasAVX512f() || FORCE_AVX_REGS
) {
2842 for (size_t i
= 0; i
< 8; ++i
) {
2843 memcpy(p
, &m_state
.context
.fpu
.avx512f
.__fpu_k0
+ i
, 8);
2848 if (CPUHasAVX() || FORCE_AVX_REGS
) {
2849 // Interleave the XMM and YMMH registers to make the YMM registers
2850 for (size_t i
= 0; i
< 16; ++i
) {
2851 memcpy(p
, &m_state
.context
.fpu
.avx
.__fpu_xmm0
+ i
, 16);
2853 memcpy(p
, &m_state
.context
.fpu
.avx
.__fpu_ymmh0
+ i
, 16);
2856 if(CPUHasAVX512f() || FORCE_AVX_REGS
) {
2857 for (size_t i
= 0; i
< 16; ++i
) {
2858 memcpy(p
, &m_state
.context
.fpu
.avx512f
.__fpu_zmmh0
+ i
, 32);
2861 for (size_t i
= 0; i
< 16; ++i
) {
2862 memcpy(p
, &m_state
.context
.fpu
.avx512f
.__fpu_zmm16
+ i
, 64);
2867 // Copy the XMM registers in a single block
2868 memcpy(p
, &m_state
.context
.fpu
.no_avx
.__fpu_xmm0
, 16 * 16);
2872 // Copy the exception registers
2873 memcpy(p
, &m_state
.context
.exc
, sizeof(EXC
));
2876 // make sure we end up with exactly what we think we should have
2877 size_t bytes_written
= p
- (uint8_t *)buf
;
2878 UNUSED_IF_ASSERT_DISABLED(bytes_written
);
2879 assert(bytes_written
== size
);
2885 "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) => %u", buf
,
2886 (uint64_t)buf_len
, size
);
2887 // Return the size of the register context even if NULL was passed in
2891 nub_size_t
DNBArchImplX86_64::SetRegisterContext(const void *buf
,
2892 nub_size_t buf_len
) {
2893 uint32_t size
= GetRegisterContextSize();
2894 if (buf
== NULL
|| buf_len
== 0)
2899 size
= static_cast<uint32_t>(buf_len
);
2901 const uint8_t *p
= (const uint8_t *)buf
;
2902 // Copy the GPR registers
2903 memcpy(&m_state
.context
.gpr
, p
, sizeof(GPR
));
2906 // Copy fcw through mxcsrmask as there is no padding
2907 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_fcw
, p
, 5);
2908 // We wrote 5 bytes, but we skip 6 to account for __fpu_rsrv1
2909 // to match the g_fpu_registers_* tables.
2911 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_fop
, p
, 8);
2913 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_dp
, p
, 6);
2915 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_mxcsr
, p
, 8);
2918 // Work around the padding between the stmm registers as they are 16
2919 // byte structs with 10 bytes of the value in each
2920 for (size_t i
= 0; i
< 8; ++i
) {
2921 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_stmm0
+ i
, p
, 10);
2925 if(CPUHasAVX512f() || FORCE_AVX_REGS
) {
2926 for (size_t i
= 0; i
< 8; ++i
) {
2927 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_k0
+ i
, p
, 8);
2932 if (CPUHasAVX() || FORCE_AVX_REGS
) {
2933 // Interleave the XMM and YMMH registers to make the YMM registers
2934 for (size_t i
= 0; i
< 16; ++i
) {
2935 memcpy(&m_state
.context
.fpu
.avx
.__fpu_xmm0
+ i
, p
, 16);
2937 memcpy(&m_state
.context
.fpu
.avx
.__fpu_ymmh0
+ i
, p
, 16);
2940 if(CPUHasAVX512f() || FORCE_AVX_REGS
) {
2941 for (size_t i
= 0; i
< 16; ++i
) {
2942 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_zmmh0
+ i
, p
, 32);
2945 for (size_t i
= 0; i
< 16; ++i
) {
2946 memcpy(&m_state
.context
.fpu
.avx512f
.__fpu_zmm16
+ i
, p
, 64);
2951 // Copy the XMM registers in a single block
2952 memcpy(&m_state
.context
.fpu
.no_avx
.__fpu_xmm0
, p
, 16 * 16);
2956 // Copy the exception registers
2957 memcpy(&m_state
.context
.exc
, p
, sizeof(EXC
));
2960 // make sure we end up with exactly what we think we should have
2961 size_t bytes_written
= p
- (const uint8_t *)buf
;
2962 UNUSED_IF_ASSERT_DISABLED(bytes_written
);
2963 assert(bytes_written
== size
);
2966 if ((kret
= SetGPRState()) != KERN_SUCCESS
)
2967 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::SetRegisterContext (buf "
2968 "= %p, len = %llu) error: GPR regs failed "
2970 buf
, (uint64_t)buf_len
, kret
);
2971 if ((kret
= SetFPUState()) != KERN_SUCCESS
)
2973 LOG_THREAD
, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = "
2974 "%llu) error: %s regs failed to write: %u",
2975 buf
, (uint64_t)buf_len
, CPUHasAVX() ? "AVX" : "FPU", kret
);
2976 if ((kret
= SetEXCState()) != KERN_SUCCESS
)
2977 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::SetRegisterContext (buf "
2978 "= %p, len = %llu) error: EXP regs failed "
2980 buf
, (uint64_t)buf_len
, kret
);
2984 "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) => %llu",
2985 buf
, (uint64_t)buf_len
, (uint64_t)size
);
2989 uint32_t DNBArchImplX86_64::SaveRegisterState() {
2990 kern_return_t kret
= ::thread_abort_safely(m_thread
->MachPortNumber());
2992 LOG_THREAD
, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
2993 "(SetGPRState() for stop_count = %u)",
2994 m_thread
->MachPortNumber(), kret
, m_thread
->Process()->StopCount());
2996 // Always re-read the registers because above we call thread_abort_safely();
2999 if ((kret
= GetGPRState(force
)) != KERN_SUCCESS
) {
3000 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::SaveRegisterState () "
3001 "error: GPR regs failed to read: %u ",
3003 } else if ((kret
= GetFPUState(force
)) != KERN_SUCCESS
) {
3004 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::SaveRegisterState () "
3005 "error: %s regs failed to read: %u",
3006 CPUHasAVX() ? "AVX" : "FPU", kret
);
3008 const uint32_t save_id
= GetNextRegisterStateSaveID();
3009 m_saved_register_states
[save_id
] = m_state
.context
;
3014 bool DNBArchImplX86_64::RestoreRegisterState(uint32_t save_id
) {
3015 SaveRegisterStates::iterator pos
= m_saved_register_states
.find(save_id
);
3016 if (pos
!= m_saved_register_states
.end()) {
3017 m_state
.context
.gpr
= pos
->second
.gpr
;
3018 m_state
.context
.fpu
= pos
->second
.fpu
;
3019 m_state
.SetError(e_regSetGPR
, Read
, 0);
3020 m_state
.SetError(e_regSetFPU
, Read
, 0);
3022 bool success
= true;
3023 if ((kret
= SetGPRState()) != KERN_SUCCESS
) {
3024 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::RestoreRegisterState "
3025 "(save_id = %u) error: GPR regs failed to "
3029 } else if ((kret
= SetFPUState()) != KERN_SUCCESS
) {
3030 DNBLogThreadedIf(LOG_THREAD
, "DNBArchImplX86_64::RestoreRegisterState "
3031 "(save_id = %u) error: %s regs failed to "
3033 save_id
, CPUHasAVX() ? "AVX" : "FPU", kret
);
3036 m_saved_register_states
.erase(pos
);
3042 kern_return_t
DNBArchImplX86_64::GetRegisterState(int set
, bool force
) {
3045 return GetGPRState(force
) | GetFPUState(force
) | GetEXCState(force
);
3047 return GetGPRState(force
);
3049 return GetFPUState(force
);
3051 return GetEXCState(force
);
3055 return KERN_INVALID_ARGUMENT
;
3058 kern_return_t
DNBArchImplX86_64::SetRegisterState(int set
) {
3059 // Make sure we have a valid context to set.
3060 if (RegisterSetStateIsValid(set
)) {
3063 return SetGPRState() | SetFPUState() | SetEXCState();
3065 return SetGPRState();
3067 return SetFPUState();
3069 return SetEXCState();
3074 return KERN_INVALID_ARGUMENT
;
3077 bool DNBArchImplX86_64::RegisterSetStateIsValid(int set
) const {
3078 return m_state
.RegsAreValid(set
);
3081 #endif // #if defined (__i386__) || defined (__x86_64__)