[RISCV] Add a default assignment of Inst{12-7} to RVInst16CSS. NFC
[llvm-project.git] / lldb / tools / debugserver / source / MacOSX / x86_64 / DNBArchImplX86_64.cpp
blob3b3f1f02a2851f305283c5fd083406f6a4f2dd3b
1 //===-- DNBArchImplX86_64.cpp -----------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Created by Greg Clayton on 6/25/07.
11 //===----------------------------------------------------------------------===//
13 #if defined(__i386__) || defined(__x86_64__)
15 #include <sys/cdefs.h>
16 #include <sys/sysctl.h>
17 #include <sys/types.h>
19 #include "DNBLog.h"
20 #include "MacOSX/x86_64/DNBArchImplX86_64.h"
21 #include "MachProcess.h"
22 #include "MachThread.h"
23 #include <cstdlib>
24 #include <mach/mach.h>
26 #if defined(LLDB_DEBUGSERVER_RELEASE) || defined(LLDB_DEBUGSERVER_DEBUG)
27 enum debugState { debugStateUnknown, debugStateOff, debugStateOn };
29 static debugState sFPUDebugState = debugStateUnknown;
30 static debugState sAVXForceState = debugStateUnknown;
32 static bool DebugFPURegs() {
33 if (sFPUDebugState == debugStateUnknown) {
34 if (getenv("DNB_DEBUG_FPU_REGS"))
35 sFPUDebugState = debugStateOn;
36 else
37 sFPUDebugState = debugStateOff;
40 return (sFPUDebugState == debugStateOn);
43 static bool ForceAVXRegs() {
44 if (sFPUDebugState == debugStateUnknown) {
45 if (getenv("DNB_DEBUG_X86_FORCE_AVX_REGS"))
46 sAVXForceState = debugStateOn;
47 else
48 sAVXForceState = debugStateOff;
51 return (sAVXForceState == debugStateOn);
54 #define DEBUG_FPU_REGS (DebugFPURegs())
55 #define FORCE_AVX_REGS (ForceAVXRegs())
56 #else
57 #define DEBUG_FPU_REGS (0)
58 #define FORCE_AVX_REGS (0)
59 #endif
61 bool DetectHardwareFeature(const char *feature) {
62 int answer = 0;
63 size_t answer_size = sizeof(answer);
64 int error = ::sysctlbyname(feature, &answer, &answer_size, NULL, 0);
65 return error == 0 && answer != 0;
68 enum AVXPresence { eAVXUnknown = -1, eAVXNotPresent = 0, eAVXPresent = 1 };
70 bool LogAVXAndReturn(AVXPresence has_avx, int err, const char * os_ver) {
71 DNBLogThreadedIf(LOG_THREAD,
72 "CPUHasAVX(): g_has_avx = %i (err = %i, os_ver = %s)",
73 has_avx, err, os_ver);
74 return (has_avx == eAVXPresent);
77 extern "C" bool CPUHasAVX() {
78 static AVXPresence g_has_avx = eAVXUnknown;
79 if (g_has_avx != eAVXUnknown)
80 return LogAVXAndReturn(g_has_avx, 0, "");
82 g_has_avx = eAVXNotPresent;
84 // OS X 10.7.3 and earlier have a bug in thread_get_state that truncated the
85 // size of the return. To work around this we have to disable AVX debugging
86 // on hosts prior to 10.7.3 (<rdar://problem/10122874>).
87 int mib[2];
88 char buffer[1024];
89 size_t length = sizeof(buffer);
90 mib[0] = CTL_KERN;
91 mib[1] = KERN_OSVERSION;
93 // KERN_OSVERSION returns the build number which is a number signifying the
94 // major version, a capitol letter signifying the minor version, and numbers
95 // signifying the build (ex: on 10.12.3, the returned value is 16D32).
96 int err = ::sysctl(mib, 2, &buffer, &length, NULL, 0);
97 if (err != 0)
98 return LogAVXAndReturn(g_has_avx, err, "");
100 size_t first_letter = 0;
101 for (; first_letter < length; ++first_letter) {
102 // This is looking for the first uppercase letter
103 if (isupper(buffer[first_letter]))
104 break;
106 char letter = buffer[first_letter];
107 buffer[first_letter] = '\0';
108 auto major_ver = strtoull(buffer, NULL, 0);
109 buffer[first_letter] = letter;
111 // In this check we're looking to see that our major and minor version numer
112 // was >= 11E, which is the 10.7.4 release.
113 if (major_ver < 11 || (major_ver == 11 && letter < 'E'))
114 return LogAVXAndReturn(g_has_avx, err, buffer);
115 if (DetectHardwareFeature("hw.optional.avx1_0"))
116 g_has_avx = eAVXPresent;
118 return LogAVXAndReturn(g_has_avx, err, buffer);
121 extern "C" bool CPUHasAVX512f() {
122 static AVXPresence g_has_avx512f = eAVXUnknown;
123 if (g_has_avx512f != eAVXUnknown)
124 return g_has_avx512f == eAVXPresent;
126 g_has_avx512f = DetectHardwareFeature("hw.optional.avx512f") ? eAVXPresent
127 : eAVXNotPresent;
129 return (g_has_avx512f == eAVXPresent);
132 uint64_t DNBArchImplX86_64::GetPC(uint64_t failValue) {
133 // Get program counter
134 if (GetGPRState(false) == KERN_SUCCESS)
135 return m_state.context.gpr.__rip;
136 return failValue;
139 kern_return_t DNBArchImplX86_64::SetPC(uint64_t value) {
140 // Get program counter
141 kern_return_t err = GetGPRState(false);
142 if (err == KERN_SUCCESS) {
143 m_state.context.gpr.__rip = value;
144 err = SetGPRState();
146 return err == KERN_SUCCESS;
149 uint64_t DNBArchImplX86_64::GetSP(uint64_t failValue) {
150 // Get stack pointer
151 if (GetGPRState(false) == KERN_SUCCESS)
152 return m_state.context.gpr.__rsp;
153 return failValue;
156 // Uncomment the value below to verify the values in the debugger.
157 //#define DEBUG_GPR_VALUES 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
159 kern_return_t DNBArchImplX86_64::GetGPRState(bool force) {
160 if (force || m_state.GetError(e_regSetGPR, Read)) {
161 #if DEBUG_GPR_VALUES
162 m_state.context.gpr.__rax = ('a' << 8) + 'x';
163 m_state.context.gpr.__rbx = ('b' << 8) + 'x';
164 m_state.context.gpr.__rcx = ('c' << 8) + 'x';
165 m_state.context.gpr.__rdx = ('d' << 8) + 'x';
166 m_state.context.gpr.__rdi = ('d' << 8) + 'i';
167 m_state.context.gpr.__rsi = ('s' << 8) + 'i';
168 m_state.context.gpr.__rbp = ('b' << 8) + 'p';
169 m_state.context.gpr.__rsp = ('s' << 8) + 'p';
170 m_state.context.gpr.__r8 = ('r' << 8) + '8';
171 m_state.context.gpr.__r9 = ('r' << 8) + '9';
172 m_state.context.gpr.__r10 = ('r' << 8) + 'a';
173 m_state.context.gpr.__r11 = ('r' << 8) + 'b';
174 m_state.context.gpr.__r12 = ('r' << 8) + 'c';
175 m_state.context.gpr.__r13 = ('r' << 8) + 'd';
176 m_state.context.gpr.__r14 = ('r' << 8) + 'e';
177 m_state.context.gpr.__r15 = ('r' << 8) + 'f';
178 m_state.context.gpr.__rip = ('i' << 8) + 'p';
179 m_state.context.gpr.__rflags = ('f' << 8) + 'l';
180 m_state.context.gpr.__cs = ('c' << 8) + 's';
181 m_state.context.gpr.__fs = ('f' << 8) + 's';
182 m_state.context.gpr.__gs = ('g' << 8) + 's';
183 m_state.SetError(e_regSetGPR, Read, 0);
184 #else
185 mach_msg_type_number_t count = e_regSetWordSizeGPRFull;
186 int flavor = __x86_64_THREAD_FULL_STATE;
187 m_state.SetError(
188 e_regSetGPR, Read,
189 ::thread_get_state(m_thread->MachPortNumber(), flavor,
190 (thread_state_t)&m_state.context.gpr, &count));
192 if (!m_state.GetError(e_regSetGPR, Read)) {
193 m_state.hasFullGPRState = true;
194 } else {
195 m_state.hasFullGPRState = false;
196 count = e_regSetWordSizeGPR;
197 flavor = __x86_64_THREAD_STATE;
198 m_state.SetError(
199 e_regSetGPR, Read,
200 ::thread_get_state(m_thread->MachPortNumber(), flavor,
201 (thread_state_t)&m_state.context.gpr, &count));
203 DNBLogThreadedIf(
204 LOG_THREAD,
205 "::thread_get_state (0x%4.4x, %u (%s), &gpr, %u) => 0x%8.8x"
206 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
207 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
208 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
209 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
210 "\n\trip = %16.16llx"
211 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx"
212 "\n\t ds = %16.16llx es = %16.16llx ss = %16.16llx gsB = %16.16llx",
213 m_thread->MachPortNumber(), flavor,
214 m_state.hasFullGPRState ? "full" : "non-full",
215 m_state.hasFullGPRState ? e_regSetWordSizeGPRFull
216 : e_regSetWordSizeGPR,
217 m_state.GetError(e_regSetGPR, Read),
218 m_state.context.gpr.__rax, m_state.context.gpr.__rbx,
219 m_state.context.gpr.__rcx, m_state.context.gpr.__rdx,
220 m_state.context.gpr.__rdi, m_state.context.gpr.__rsi,
221 m_state.context.gpr.__rbp, m_state.context.gpr.__rsp,
222 m_state.context.gpr.__r8, m_state.context.gpr.__r9,
223 m_state.context.gpr.__r10, m_state.context.gpr.__r11,
224 m_state.context.gpr.__r12, m_state.context.gpr.__r13,
225 m_state.context.gpr.__r14, m_state.context.gpr.__r15,
226 m_state.context.gpr.__rip, m_state.context.gpr.__rflags,
227 m_state.context.gpr.__cs, m_state.context.gpr.__fs,
228 m_state.context.gpr.__gs, m_state.context.gpr.__ds,
229 m_state.context.gpr.__es, m_state.context.gpr.__ss,
230 m_state.context.gpr.__gsbase );
232 // DNBLogThreadedIf (LOG_THREAD, "thread_get_state(0x%4.4x, %u, &gpr, %u)
233 // => 0x%8.8x"
234 // "\n\trax = %16.16llx"
235 // "\n\trbx = %16.16llx"
236 // "\n\trcx = %16.16llx"
237 // "\n\trdx = %16.16llx"
238 // "\n\trdi = %16.16llx"
239 // "\n\trsi = %16.16llx"
240 // "\n\trbp = %16.16llx"
241 // "\n\trsp = %16.16llx"
242 // "\n\t r8 = %16.16llx"
243 // "\n\t r9 = %16.16llx"
244 // "\n\tr10 = %16.16llx"
245 // "\n\tr11 = %16.16llx"
246 // "\n\tr12 = %16.16llx"
247 // "\n\tr13 = %16.16llx"
248 // "\n\tr14 = %16.16llx"
249 // "\n\tr15 = %16.16llx"
250 // "\n\trip = %16.16llx"
251 // "\n\tflg = %16.16llx"
252 // "\n\t cs = %16.16llx"
253 // "\n\t fs = %16.16llx"
254 // "\n\t gs = %16.16llx",
255 // m_thread->MachPortNumber(),
256 // x86_THREAD_STATE64,
257 // x86_THREAD_STATE64_COUNT,
258 // m_state.GetError(e_regSetGPR, Read),
259 // m_state.context.gpr.__rax,
260 // m_state.context.gpr.__rbx,
261 // m_state.context.gpr.__rcx,
262 // m_state.context.gpr.__rdx,
263 // m_state.context.gpr.__rdi,
264 // m_state.context.gpr.__rsi,
265 // m_state.context.gpr.__rbp,
266 // m_state.context.gpr.__rsp,
267 // m_state.context.gpr.__r8,
268 // m_state.context.gpr.__r9,
269 // m_state.context.gpr.__r10,
270 // m_state.context.gpr.__r11,
271 // m_state.context.gpr.__r12,
272 // m_state.context.gpr.__r13,
273 // m_state.context.gpr.__r14,
274 // m_state.context.gpr.__r15,
275 // m_state.context.gpr.__rip,
276 // m_state.context.gpr.__rflags,
277 // m_state.context.gpr.__cs,
278 // m_state.context.gpr.__fs,
279 // m_state.context.gpr.__gs);
280 #endif
282 return m_state.GetError(e_regSetGPR, Read);
285 // Uncomment the value below to verify the values in the debugger.
286 //#define DEBUG_FPU_REGS 1 // DO NOT CHECK IN WITH THIS DEFINE ENABLED
288 kern_return_t DNBArchImplX86_64::GetFPUState(bool force) {
289 if (force || m_state.GetError(e_regSetFPU, Read)) {
290 if (DEBUG_FPU_REGS) {
291 m_state.context.fpu.no_avx.__fpu_reserved[0] = -1;
292 m_state.context.fpu.no_avx.__fpu_reserved[1] = -1;
293 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fcw) = 0x1234;
294 *(uint16_t *)&(m_state.context.fpu.no_avx.__fpu_fsw) = 0x5678;
295 m_state.context.fpu.no_avx.__fpu_ftw = 1;
296 m_state.context.fpu.no_avx.__fpu_rsrv1 = UINT8_MAX;
297 m_state.context.fpu.no_avx.__fpu_fop = 2;
298 m_state.context.fpu.no_avx.__fpu_ip = 3;
299 m_state.context.fpu.no_avx.__fpu_cs = 4;
300 m_state.context.fpu.no_avx.__fpu_rsrv2 = 5;
301 m_state.context.fpu.no_avx.__fpu_dp = 6;
302 m_state.context.fpu.no_avx.__fpu_ds = 7;
303 m_state.context.fpu.no_avx.__fpu_rsrv3 = UINT16_MAX;
304 m_state.context.fpu.no_avx.__fpu_mxcsr = 8;
305 m_state.context.fpu.no_avx.__fpu_mxcsrmask = 9;
306 for (int i = 0; i < 16; ++i) {
307 if (i < 10) {
308 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = 'a';
309 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = 'b';
310 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = 'c';
311 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = 'd';
312 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = 'e';
313 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = 'f';
314 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = 'g';
315 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = 'h';
316 } else {
317 m_state.context.fpu.no_avx.__fpu_stmm0.__mmst_reg[i] = INT8_MIN;
318 m_state.context.fpu.no_avx.__fpu_stmm1.__mmst_reg[i] = INT8_MIN;
319 m_state.context.fpu.no_avx.__fpu_stmm2.__mmst_reg[i] = INT8_MIN;
320 m_state.context.fpu.no_avx.__fpu_stmm3.__mmst_reg[i] = INT8_MIN;
321 m_state.context.fpu.no_avx.__fpu_stmm4.__mmst_reg[i] = INT8_MIN;
322 m_state.context.fpu.no_avx.__fpu_stmm5.__mmst_reg[i] = INT8_MIN;
323 m_state.context.fpu.no_avx.__fpu_stmm6.__mmst_reg[i] = INT8_MIN;
324 m_state.context.fpu.no_avx.__fpu_stmm7.__mmst_reg[i] = INT8_MIN;
327 m_state.context.fpu.no_avx.__fpu_xmm0.__xmm_reg[i] = '0';
328 m_state.context.fpu.no_avx.__fpu_xmm1.__xmm_reg[i] = '1';
329 m_state.context.fpu.no_avx.__fpu_xmm2.__xmm_reg[i] = '2';
330 m_state.context.fpu.no_avx.__fpu_xmm3.__xmm_reg[i] = '3';
331 m_state.context.fpu.no_avx.__fpu_xmm4.__xmm_reg[i] = '4';
332 m_state.context.fpu.no_avx.__fpu_xmm5.__xmm_reg[i] = '5';
333 m_state.context.fpu.no_avx.__fpu_xmm6.__xmm_reg[i] = '6';
334 m_state.context.fpu.no_avx.__fpu_xmm7.__xmm_reg[i] = '7';
335 m_state.context.fpu.no_avx.__fpu_xmm8.__xmm_reg[i] = '8';
336 m_state.context.fpu.no_avx.__fpu_xmm9.__xmm_reg[i] = '9';
337 m_state.context.fpu.no_avx.__fpu_xmm10.__xmm_reg[i] = 'A';
338 m_state.context.fpu.no_avx.__fpu_xmm11.__xmm_reg[i] = 'B';
339 m_state.context.fpu.no_avx.__fpu_xmm12.__xmm_reg[i] = 'C';
340 m_state.context.fpu.no_avx.__fpu_xmm13.__xmm_reg[i] = 'D';
341 m_state.context.fpu.no_avx.__fpu_xmm14.__xmm_reg[i] = 'E';
342 m_state.context.fpu.no_avx.__fpu_xmm15.__xmm_reg[i] = 'F';
344 for (int i = 0; i < sizeof(m_state.context.fpu.no_avx.__fpu_rsrv4); ++i)
345 m_state.context.fpu.no_avx.__fpu_rsrv4[i] = INT8_MIN;
346 m_state.context.fpu.no_avx.__fpu_reserved1 = -1;
348 if (CPUHasAVX() || FORCE_AVX_REGS) {
349 for (int i = 0; i < 16; ++i) {
350 m_state.context.fpu.avx.__fpu_ymmh0.__xmm_reg[i] = '0' + i;
351 m_state.context.fpu.avx.__fpu_ymmh1.__xmm_reg[i] = '1' + i;
352 m_state.context.fpu.avx.__fpu_ymmh2.__xmm_reg[i] = '2' + i;
353 m_state.context.fpu.avx.__fpu_ymmh3.__xmm_reg[i] = '3' + i;
354 m_state.context.fpu.avx.__fpu_ymmh4.__xmm_reg[i] = '4' + i;
355 m_state.context.fpu.avx.__fpu_ymmh5.__xmm_reg[i] = '5' + i;
356 m_state.context.fpu.avx.__fpu_ymmh6.__xmm_reg[i] = '6' + i;
357 m_state.context.fpu.avx.__fpu_ymmh7.__xmm_reg[i] = '7' + i;
358 m_state.context.fpu.avx.__fpu_ymmh8.__xmm_reg[i] = '8' + i;
359 m_state.context.fpu.avx.__fpu_ymmh9.__xmm_reg[i] = '9' + i;
360 m_state.context.fpu.avx.__fpu_ymmh10.__xmm_reg[i] = 'A' + i;
361 m_state.context.fpu.avx.__fpu_ymmh11.__xmm_reg[i] = 'B' + i;
362 m_state.context.fpu.avx.__fpu_ymmh12.__xmm_reg[i] = 'C' + i;
363 m_state.context.fpu.avx.__fpu_ymmh13.__xmm_reg[i] = 'D' + i;
364 m_state.context.fpu.avx.__fpu_ymmh14.__xmm_reg[i] = 'E' + i;
365 m_state.context.fpu.avx.__fpu_ymmh15.__xmm_reg[i] = 'F' + i;
367 for (int i = 0; i < sizeof(m_state.context.fpu.avx.__avx_reserved1); ++i)
368 m_state.context.fpu.avx.__avx_reserved1[i] = INT8_MIN;
370 if (CPUHasAVX512f() || FORCE_AVX_REGS) {
371 for (int i = 0; i < 8; ++i) {
372 m_state.context.fpu.avx512f.__fpu_k0.__opmask_reg[i] = '0';
373 m_state.context.fpu.avx512f.__fpu_k1.__opmask_reg[i] = '1';
374 m_state.context.fpu.avx512f.__fpu_k2.__opmask_reg[i] = '2';
375 m_state.context.fpu.avx512f.__fpu_k3.__opmask_reg[i] = '3';
376 m_state.context.fpu.avx512f.__fpu_k4.__opmask_reg[i] = '4';
377 m_state.context.fpu.avx512f.__fpu_k5.__opmask_reg[i] = '5';
378 m_state.context.fpu.avx512f.__fpu_k6.__opmask_reg[i] = '6';
379 m_state.context.fpu.avx512f.__fpu_k7.__opmask_reg[i] = '7';
382 for (int i = 0; i < 32; ++i) {
383 m_state.context.fpu.avx512f.__fpu_zmmh0.__ymm_reg[i] = '0';
384 m_state.context.fpu.avx512f.__fpu_zmmh1.__ymm_reg[i] = '1';
385 m_state.context.fpu.avx512f.__fpu_zmmh2.__ymm_reg[i] = '2';
386 m_state.context.fpu.avx512f.__fpu_zmmh3.__ymm_reg[i] = '3';
387 m_state.context.fpu.avx512f.__fpu_zmmh4.__ymm_reg[i] = '4';
388 m_state.context.fpu.avx512f.__fpu_zmmh5.__ymm_reg[i] = '5';
389 m_state.context.fpu.avx512f.__fpu_zmmh6.__ymm_reg[i] = '6';
390 m_state.context.fpu.avx512f.__fpu_zmmh7.__ymm_reg[i] = '7';
391 m_state.context.fpu.avx512f.__fpu_zmmh8.__ymm_reg[i] = '8';
392 m_state.context.fpu.avx512f.__fpu_zmmh9.__ymm_reg[i] = '9';
393 m_state.context.fpu.avx512f.__fpu_zmmh10.__ymm_reg[i] = 'A';
394 m_state.context.fpu.avx512f.__fpu_zmmh11.__ymm_reg[i] = 'B';
395 m_state.context.fpu.avx512f.__fpu_zmmh12.__ymm_reg[i] = 'C';
396 m_state.context.fpu.avx512f.__fpu_zmmh13.__ymm_reg[i] = 'D';
397 m_state.context.fpu.avx512f.__fpu_zmmh14.__ymm_reg[i] = 'E';
398 m_state.context.fpu.avx512f.__fpu_zmmh15.__ymm_reg[i] = 'F';
400 for (int i = 0; i < 64; ++i) {
401 m_state.context.fpu.avx512f.__fpu_zmm16.__zmm_reg[i] = 'G';
402 m_state.context.fpu.avx512f.__fpu_zmm17.__zmm_reg[i] = 'H';
403 m_state.context.fpu.avx512f.__fpu_zmm18.__zmm_reg[i] = 'I';
404 m_state.context.fpu.avx512f.__fpu_zmm19.__zmm_reg[i] = 'J';
405 m_state.context.fpu.avx512f.__fpu_zmm20.__zmm_reg[i] = 'K';
406 m_state.context.fpu.avx512f.__fpu_zmm21.__zmm_reg[i] = 'L';
407 m_state.context.fpu.avx512f.__fpu_zmm22.__zmm_reg[i] = 'M';
408 m_state.context.fpu.avx512f.__fpu_zmm23.__zmm_reg[i] = 'N';
409 m_state.context.fpu.avx512f.__fpu_zmm24.__zmm_reg[i] = 'O';
410 m_state.context.fpu.avx512f.__fpu_zmm25.__zmm_reg[i] = 'P';
411 m_state.context.fpu.avx512f.__fpu_zmm26.__zmm_reg[i] = 'Q';
412 m_state.context.fpu.avx512f.__fpu_zmm27.__zmm_reg[i] = 'R';
413 m_state.context.fpu.avx512f.__fpu_zmm28.__zmm_reg[i] = 'S';
414 m_state.context.fpu.avx512f.__fpu_zmm29.__zmm_reg[i] = 'T';
415 m_state.context.fpu.avx512f.__fpu_zmm30.__zmm_reg[i] = 'U';
416 m_state.context.fpu.avx512f.__fpu_zmm31.__zmm_reg[i] = 'V';
419 m_state.SetError(e_regSetFPU, Read, 0);
420 } else {
421 mach_msg_type_number_t count = e_regSetWordSizeFPU;
422 int flavor = __x86_64_FLOAT_STATE;
423 // On a machine with the AVX512 register set, a process only gets a
424 // full AVX512 register context after it uses the AVX512 registers;
425 // if the process has not yet triggered this change, trying to fetch
426 // the AVX512 registers will fail. Fall through to fetching the AVX
427 // registers.
428 if (CPUHasAVX512f() || FORCE_AVX_REGS) {
429 count = e_regSetWordSizeAVX512f;
430 flavor = __x86_64_AVX512F_STATE;
431 m_state.SetError(e_regSetFPU, Read,
432 ::thread_get_state(m_thread->MachPortNumber(), flavor,
433 (thread_state_t)&m_state.context.fpu,
434 &count));
435 DNBLogThreadedIf(LOG_THREAD,
436 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x",
437 m_thread->MachPortNumber(), flavor, (uint32_t)count,
438 m_state.GetError(e_regSetFPU, Read));
440 if (m_state.GetError(e_regSetFPU, Read) == KERN_SUCCESS)
441 return m_state.GetError(e_regSetFPU, Read);
442 else
443 DNBLogThreadedIf(LOG_THREAD,
444 "::thread_get_state attempted fetch of avx512 fpu regctx failed, will try fetching avx");
446 if (CPUHasAVX() || FORCE_AVX_REGS) {
447 count = e_regSetWordSizeAVX;
448 flavor = __x86_64_AVX_STATE;
450 m_state.SetError(e_regSetFPU, Read,
451 ::thread_get_state(m_thread->MachPortNumber(), flavor,
452 (thread_state_t)&m_state.context.fpu,
453 &count));
454 DNBLogThreadedIf(LOG_THREAD,
455 "::thread_get_state (0x%4.4x, %u, &fpu, %u => 0x%8.8x",
456 m_thread->MachPortNumber(), flavor, (uint32_t)count,
457 m_state.GetError(e_regSetFPU, Read));
460 return m_state.GetError(e_regSetFPU, Read);
463 kern_return_t DNBArchImplX86_64::GetEXCState(bool force) {
464 if (force || m_state.GetError(e_regSetEXC, Read)) {
465 mach_msg_type_number_t count = e_regSetWordSizeEXC;
466 m_state.SetError(
467 e_regSetEXC, Read,
468 ::thread_get_state(m_thread->MachPortNumber(), __x86_64_EXCEPTION_STATE,
469 (thread_state_t)&m_state.context.exc, &count));
471 return m_state.GetError(e_regSetEXC, Read);
474 kern_return_t DNBArchImplX86_64::SetGPRState() {
475 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
476 DNBLogThreadedIf(
477 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
478 "(SetGPRState() for stop_count = %u)",
479 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
481 mach_msg_type_number_t count =
482 m_state.hasFullGPRState ? e_regSetWordSizeGPRFull : e_regSetWordSizeGPR;
483 int flavor = m_state.hasFullGPRState ? __x86_64_THREAD_FULL_STATE
484 : __x86_64_THREAD_STATE;
485 m_state.SetError(e_regSetGPR, Write,
486 ::thread_set_state(m_thread->MachPortNumber(), flavor,
487 (thread_state_t)&m_state.context.gpr,
488 count));
489 DNBLogThreadedIf(
490 LOG_THREAD,
491 "::thread_set_state (0x%4.4x, %u (%s), &gpr, %u) => 0x%8.8x"
492 "\n\trax = %16.16llx rbx = %16.16llx rcx = %16.16llx rdx = %16.16llx"
493 "\n\trdi = %16.16llx rsi = %16.16llx rbp = %16.16llx rsp = %16.16llx"
494 "\n\t r8 = %16.16llx r9 = %16.16llx r10 = %16.16llx r11 = %16.16llx"
495 "\n\tr12 = %16.16llx r13 = %16.16llx r14 = %16.16llx r15 = %16.16llx"
496 "\n\trip = %16.16llx"
497 "\n\tflg = %16.16llx cs = %16.16llx fs = %16.16llx gs = %16.16llx"
498 "\n\t ds = %16.16llx es = %16.16llx ss = %16.16llx gsB = %16.16llx",
499 m_thread->MachPortNumber(), flavor,
500 m_state.hasFullGPRState ? "full" : "non-full", count,
501 m_state.GetError(e_regSetGPR, Write), m_state.context.gpr.__rax,
502 m_state.context.gpr.__rbx, m_state.context.gpr.__rcx,
503 m_state.context.gpr.__rdx, m_state.context.gpr.__rdi,
504 m_state.context.gpr.__rsi, m_state.context.gpr.__rbp,
505 m_state.context.gpr.__rsp, m_state.context.gpr.__r8,
506 m_state.context.gpr.__r9, m_state.context.gpr.__r10,
507 m_state.context.gpr.__r11, m_state.context.gpr.__r12,
508 m_state.context.gpr.__r13, m_state.context.gpr.__r14,
509 m_state.context.gpr.__r15, m_state.context.gpr.__rip,
510 m_state.context.gpr.__rflags, m_state.context.gpr.__cs,
511 m_state.context.gpr.__fs, m_state.context.gpr.__gs,
512 m_state.context.gpr.__ds, m_state.context.gpr.__es,
513 m_state.context.gpr.__ss, m_state.context.gpr.__gsbase);
514 return m_state.GetError(e_regSetGPR, Write);
517 kern_return_t DNBArchImplX86_64::SetFPUState() {
518 if (DEBUG_FPU_REGS) {
519 m_state.SetError(e_regSetFPU, Write, 0);
520 return m_state.GetError(e_regSetFPU, Write);
521 } else {
522 int flavor = __x86_64_FLOAT_STATE;
523 mach_msg_type_number_t count = e_regSetWordSizeFPU;
524 if (CPUHasAVX512f() || FORCE_AVX_REGS) {
525 count = e_regSetWordSizeAVX512f;
526 flavor = __x86_64_AVX512F_STATE;
527 m_state.SetError(
528 e_regSetFPU, Write,
529 ::thread_set_state(m_thread->MachPortNumber(), flavor,
530 (thread_state_t)&m_state.context.fpu, count));
531 if (m_state.GetError(e_regSetFPU, Write) == KERN_SUCCESS)
532 return m_state.GetError(e_regSetFPU, Write);
533 else
534 DNBLogThreadedIf(LOG_THREAD,
535 "::thread_get_state attempted save of avx512 fpu regctx failed, will try saving avx regctx");
538 if (CPUHasAVX() || FORCE_AVX_REGS) {
539 flavor = __x86_64_AVX_STATE;
540 count = e_regSetWordSizeAVX;
542 m_state.SetError(
543 e_regSetFPU, Write,
544 ::thread_set_state(m_thread->MachPortNumber(), flavor,
545 (thread_state_t)&m_state.context.fpu, count));
546 return m_state.GetError(e_regSetFPU, Write);
550 kern_return_t DNBArchImplX86_64::SetEXCState() {
551 m_state.SetError(e_regSetEXC, Write,
552 ::thread_set_state(m_thread->MachPortNumber(),
553 __x86_64_EXCEPTION_STATE,
554 (thread_state_t)&m_state.context.exc,
555 e_regSetWordSizeEXC));
556 return m_state.GetError(e_regSetEXC, Write);
559 kern_return_t DNBArchImplX86_64::GetDBGState(bool force) {
560 if (force || m_state.GetError(e_regSetDBG, Read)) {
561 mach_msg_type_number_t count = e_regSetWordSizeDBG;
562 m_state.SetError(
563 e_regSetDBG, Read,
564 ::thread_get_state(m_thread->MachPortNumber(), __x86_64_DEBUG_STATE,
565 (thread_state_t)&m_state.context.dbg, &count));
567 return m_state.GetError(e_regSetDBG, Read);
570 kern_return_t DNBArchImplX86_64::SetDBGState(bool also_set_on_task) {
571 m_state.SetError(e_regSetDBG, Write,
572 ::thread_set_state(m_thread->MachPortNumber(),
573 __x86_64_DEBUG_STATE,
574 (thread_state_t)&m_state.context.dbg,
575 e_regSetWordSizeDBG));
576 if (also_set_on_task) {
577 kern_return_t kret = ::task_set_state(
578 m_thread->Process()->Task().TaskPort(), __x86_64_DEBUG_STATE,
579 (thread_state_t)&m_state.context.dbg, e_regSetWordSizeDBG);
580 if (kret != KERN_SUCCESS)
581 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::SetDBGState failed "
582 "to set debug control register state: "
583 "0x%8.8x.",
584 kret);
586 return m_state.GetError(e_regSetDBG, Write);
589 void DNBArchImplX86_64::ThreadWillResume() {
590 // Do we need to step this thread? If so, let the mach thread tell us so.
591 if (m_thread->IsStepping()) {
592 // This is the primary thread, let the arch do anything it needs
593 EnableHardwareSingleStep(true);
596 // Reset the debug status register, if necessary, before we resume.
597 kern_return_t kret = GetDBGState(false);
598 DNBLogThreadedIf(
599 LOG_WATCHPOINTS,
600 "DNBArchImplX86_64::ThreadWillResume() GetDBGState() => 0x%8.8x.", kret);
601 if (kret != KERN_SUCCESS)
602 return;
604 DBG &debug_state = m_state.context.dbg;
605 bool need_reset = false;
606 uint32_t i, num = NumSupportedHardwareWatchpoints();
607 for (i = 0; i < num; ++i)
608 if (IsWatchpointHit(debug_state, i))
609 need_reset = true;
611 if (need_reset) {
612 ClearWatchpointHits(debug_state);
613 kret = SetDBGState(false);
614 DNBLogThreadedIf(
615 LOG_WATCHPOINTS,
616 "DNBArchImplX86_64::ThreadWillResume() SetDBGState() => 0x%8.8x.",
617 kret);
621 bool DNBArchImplX86_64::ThreadDidStop() {
622 bool success = true;
624 m_state.InvalidateAllRegisterStates();
626 // Are we stepping a single instruction?
627 if (GetGPRState(true) == KERN_SUCCESS) {
628 // We are single stepping, was this the primary thread?
629 if (m_thread->IsStepping()) {
630 // This was the primary thread, we need to clear the trace
631 // bit if so.
632 success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
633 } else {
634 // The MachThread will automatically restore the suspend count
635 // in ThreadDidStop(), so we don't need to do anything here if
636 // we weren't the primary thread the last time
639 return success;
642 bool DNBArchImplX86_64::NotifyException(MachException::Data &exc) {
643 switch (exc.exc_type) {
644 case EXC_BAD_ACCESS:
645 break;
646 case EXC_BAD_INSTRUCTION:
647 break;
648 case EXC_ARITHMETIC:
649 break;
650 case EXC_EMULATION:
651 break;
652 case EXC_SOFTWARE:
653 break;
654 case EXC_BREAKPOINT:
655 if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 2) {
656 // exc_code = EXC_I386_BPT
658 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
659 if (pc != INVALID_NUB_ADDRESS && pc > 0) {
660 pc -= 1;
661 // Check for a breakpoint at one byte prior to the current PC value
662 // since the PC will be just past the trap.
664 DNBBreakpoint *bp =
665 m_thread->Process()->Breakpoints().FindByAddress(pc);
666 if (bp) {
667 // Backup the PC for i386 since the trap was taken and the PC
668 // is at the address following the single byte trap instruction.
669 if (m_state.context.gpr.__rip > 0) {
670 m_state.context.gpr.__rip = pc;
671 // Write the new PC back out
672 SetGPRState();
675 return true;
677 } else if (exc.exc_data.size() >= 2 && exc.exc_data[0] == 1) {
678 // exc_code = EXC_I386_SGL
680 // Check whether this corresponds to a watchpoint hit event.
681 // If yes, set the exc_sub_code to the data break address.
682 nub_addr_t addr = 0;
683 uint32_t hw_index = GetHardwareWatchpointHit(addr);
684 if (hw_index != INVALID_NUB_HW_INDEX) {
685 exc.exc_data[1] = addr;
686 // Piggyback the hw_index in the exc.data.
687 exc.exc_data.push_back(hw_index);
690 return true;
692 break;
693 case EXC_SYSCALL:
694 break;
695 case EXC_MACH_SYSCALL:
696 break;
697 case EXC_RPC_ALERT:
698 break;
700 return false;
703 uint32_t DNBArchImplX86_64::NumSupportedHardwareWatchpoints() {
704 // Available debug address registers: dr0, dr1, dr2, dr3.
705 return 4;
708 uint32_t DNBArchImplX86_64::NumSupportedHardwareBreakpoints() {
709 DNBLogThreadedIf(LOG_BREAKPOINTS,
710 "DNBArchImplX86_64::NumSupportedHardwareBreakpoints");
711 return 4;
714 static uint32_t size_and_rw_bits(nub_size_t size, bool read, bool write) {
715 uint32_t rw;
716 if (read) {
717 rw = 0x3; // READ or READ/WRITE
718 } else if (write) {
719 rw = 0x1; // WRITE
720 } else {
721 assert(0 && "read and write cannot both be false");
724 switch (size) {
725 case 1:
726 return rw;
727 case 2:
728 return (0x1 << 2) | rw;
729 case 4:
730 return (0x3 << 2) | rw;
731 case 8:
732 return (0x2 << 2) | rw;
734 assert(0 && "invalid size, must be one of 1, 2, 4, or 8");
735 return 0;
737 void DNBArchImplX86_64::SetWatchpoint(DBG &debug_state, uint32_t hw_index,
738 nub_addr_t addr, nub_size_t size,
739 bool read, bool write) {
740 // Set both dr7 (debug control register) and dri (debug address register).
742 // dr7{7-0} encodes the local/gloabl enable bits:
743 // global enable --. .-- local enable
744 // | |
745 // v v
746 // dr0 -> bits{1-0}
747 // dr1 -> bits{3-2}
748 // dr2 -> bits{5-4}
749 // dr3 -> bits{7-6}
751 // dr7{31-16} encodes the rw/len bits:
752 // b_x+3, b_x+2, b_x+1, b_x
753 // where bits{x+1, x} => rw
754 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io
755 // read-or-write (unused)
756 // and bits{x+3, x+2} => len
757 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
759 // dr0 -> bits{19-16}
760 // dr1 -> bits{23-20}
761 // dr2 -> bits{27-24}
762 // dr3 -> bits{31-28}
763 debug_state.__dr7 |=
764 (1 << (2 * hw_index) |
765 size_and_rw_bits(size, read, write) << (16 + 4 * hw_index));
766 switch (hw_index) {
767 case 0:
768 debug_state.__dr0 = addr;
769 break;
770 case 1:
771 debug_state.__dr1 = addr;
772 break;
773 case 2:
774 debug_state.__dr2 = addr;
775 break;
776 case 3:
777 debug_state.__dr3 = addr;
778 break;
779 default:
780 assert(0 &&
781 "invalid hardware register index, must be one of 0, 1, 2, or 3");
783 return;
786 void DNBArchImplX86_64::ClearWatchpoint(DBG &debug_state, uint32_t hw_index) {
787 debug_state.__dr7 &= ~(3 << (2 * hw_index));
788 switch (hw_index) {
789 case 0:
790 debug_state.__dr0 = 0;
791 break;
792 case 1:
793 debug_state.__dr1 = 0;
794 break;
795 case 2:
796 debug_state.__dr2 = 0;
797 break;
798 case 3:
799 debug_state.__dr3 = 0;
800 break;
801 default:
802 assert(0 &&
803 "invalid hardware register index, must be one of 0, 1, 2, or 3");
805 return;
808 bool DNBArchImplX86_64::IsWatchpointVacant(const DBG &debug_state,
809 uint32_t hw_index) {
810 // Check dr7 (debug control register) for local/global enable bits:
811 // global enable --. .-- local enable
812 // | |
813 // v v
814 // dr0 -> bits{1-0}
815 // dr1 -> bits{3-2}
816 // dr2 -> bits{5-4}
817 // dr3 -> bits{7-6}
818 return (debug_state.__dr7 & (3 << (2 * hw_index))) == 0;
821 // Resets local copy of debug status register to wait for the next debug
822 // exception.
823 void DNBArchImplX86_64::ClearWatchpointHits(DBG &debug_state) {
824 // See also IsWatchpointHit().
825 debug_state.__dr6 = 0;
826 return;
829 bool DNBArchImplX86_64::IsWatchpointHit(const DBG &debug_state,
830 uint32_t hw_index) {
831 // Check dr6 (debug status register) whether a watchpoint hits:
832 // is watchpoint hit?
833 // |
834 // v
835 // dr0 -> bits{0}
836 // dr1 -> bits{1}
837 // dr2 -> bits{2}
838 // dr3 -> bits{3}
839 return (debug_state.__dr6 & (1 << hw_index));
842 nub_addr_t DNBArchImplX86_64::GetWatchAddress(const DBG &debug_state,
843 uint32_t hw_index) {
844 switch (hw_index) {
845 case 0:
846 return debug_state.__dr0;
847 case 1:
848 return debug_state.__dr1;
849 case 2:
850 return debug_state.__dr2;
851 case 3:
852 return debug_state.__dr3;
854 assert(0 && "invalid hardware register index, must be one of 0, 1, 2, or 3");
855 return 0;
858 bool DNBArchImplX86_64::StartTransForHWP() {
859 if (m_2pc_trans_state != Trans_Done && m_2pc_trans_state != Trans_Rolled_Back)
860 DNBLogError("%s inconsistent state detected, expected %d or %d, got: %d",
861 __FUNCTION__, Trans_Done, Trans_Rolled_Back, m_2pc_trans_state);
862 m_2pc_dbg_checkpoint = m_state.context.dbg;
863 m_2pc_trans_state = Trans_Pending;
864 return true;
866 bool DNBArchImplX86_64::RollbackTransForHWP() {
867 m_state.context.dbg = m_2pc_dbg_checkpoint;
868 if (m_2pc_trans_state != Trans_Pending)
869 DNBLogError("%s inconsistent state detected, expected %d, got: %d",
870 __FUNCTION__, Trans_Pending, m_2pc_trans_state);
871 m_2pc_trans_state = Trans_Rolled_Back;
872 kern_return_t kret = SetDBGState(false);
873 DNBLogThreadedIf(
874 LOG_WATCHPOINTS,
875 "DNBArchImplX86_64::RollbackTransForHWP() SetDBGState() => 0x%8.8x.",
876 kret);
878 return kret == KERN_SUCCESS;
880 bool DNBArchImplX86_64::FinishTransForHWP() {
881 m_2pc_trans_state = Trans_Done;
882 return true;
884 DNBArchImplX86_64::DBG DNBArchImplX86_64::GetDBGCheckpoint() {
885 return m_2pc_dbg_checkpoint;
888 void DNBArchImplX86_64::SetHardwareBreakpoint(DBG &debug_state,
889 uint32_t hw_index,
890 nub_addr_t addr,
891 nub_size_t size) {
892 // Set both dr7 (debug control register) and dri (debug address register).
894 // dr7{7-0} encodes the local/gloabl enable bits:
895 // global enable --. .-- local enable
896 // | |
897 // v v
898 // dr0 -> bits{1-0}
899 // dr1 -> bits{3-2}
900 // dr2 -> bits{5-4}
901 // dr3 -> bits{7-6}
903 // dr7{31-16} encodes the rw/len bits:
904 // b_x+3, b_x+2, b_x+1, b_x
905 // where bits{x+1, x} => rw
906 // 0b00: execute, 0b01: write, 0b11: read-or-write, 0b10: io
907 // read-or-write (unused)
908 // and bits{x+3, x+2} => len
909 // 0b00: 1-byte, 0b01: 2-byte, 0b11: 4-byte, 0b10: 8-byte
911 // dr0 -> bits{19-16}
912 // dr1 -> bits{23-20}
913 // dr2 -> bits{27-24}
914 // dr3 -> bits{31-28}
915 debug_state.__dr7 |= (1 << (2 * hw_index) | 0 << (16 + 4 * hw_index));
917 switch (hw_index) {
918 case 0:
919 debug_state.__dr0 = addr;
920 break;
921 case 1:
922 debug_state.__dr1 = addr;
923 break;
924 case 2:
925 debug_state.__dr2 = addr;
926 break;
927 case 3:
928 debug_state.__dr3 = addr;
929 break;
930 default:
931 assert(0 &&
932 "invalid hardware register index, must be one of 0, 1, 2, or 3");
934 return;
937 uint32_t DNBArchImplX86_64::EnableHardwareBreakpoint(nub_addr_t addr,
938 nub_size_t size,
939 bool also_set_on_task) {
940 DNBLogThreadedIf(LOG_BREAKPOINTS,
941 "DNBArchImplX86_64::EnableHardwareBreakpoint( addr = "
942 "0x%8.8llx, size = %llu )",
943 (uint64_t)addr, (uint64_t)size);
945 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints();
946 // Read the debug state
947 kern_return_t kret = GetDBGState(false);
949 if (kret != KERN_SUCCESS) {
950 return INVALID_NUB_HW_INDEX;
953 // Check to make sure we have the needed hardware support
954 uint32_t i = 0;
956 DBG &debug_state = m_state.context.dbg;
957 for (i = 0; i < num_hw_breakpoints; ++i) {
958 if (IsWatchpointVacant(debug_state, i)) {
959 break;
963 // See if we found an available hw breakpoint slot above
964 if (i < num_hw_breakpoints) {
965 DNBLogThreadedIf(
966 LOG_BREAKPOINTS,
967 "DNBArchImplX86_64::EnableHardwareBreakpoint( free slot = %u )", i);
969 StartTransForHWP();
971 // Modify our local copy of the debug state, first.
972 SetHardwareBreakpoint(debug_state, i, addr, size);
973 // Now set the watch point in the inferior.
974 kret = SetDBGState(also_set_on_task);
976 DNBLogThreadedIf(LOG_BREAKPOINTS,
977 "DNBArchImplX86_64::"
978 "EnableHardwareBreakpoint() "
979 "SetDBGState() => 0x%8.8x.",
980 kret);
982 if (kret == KERN_SUCCESS) {
983 DNBLogThreadedIf(
984 LOG_BREAKPOINTS,
985 "DNBArchImplX86_64::EnableHardwareBreakpoint( enabled at slot = %u)",
987 return i;
989 // Revert to the previous debug state voluntarily. The transaction
990 // coordinator knows that we have failed.
991 else {
992 m_state.context.dbg = GetDBGCheckpoint();
994 } else {
995 DNBLogThreadedIf(LOG_BREAKPOINTS,
996 "DNBArchImplX86_64::EnableHardwareBreakpoint(addr = "
997 "0x%8.8llx, size = %llu) => all hardware breakpoint "
998 "resources are being used.",
999 (uint64_t)addr, (uint64_t)size);
1002 return INVALID_NUB_HW_INDEX;
1005 bool DNBArchImplX86_64::DisableHardwareBreakpoint(uint32_t hw_index,
1006 bool also_set_on_task) {
1007 kern_return_t kret = GetDBGState(false);
1009 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints();
1010 if (kret == KERN_SUCCESS) {
1011 DBG &debug_state = m_state.context.dbg;
1012 if (hw_index < num_hw_points &&
1013 !IsWatchpointVacant(debug_state, hw_index)) {
1015 StartTransForHWP();
1017 // Modify our local copy of the debug state, first.
1018 ClearWatchpoint(debug_state, hw_index);
1019 // Now disable the watch point in the inferior.
1020 kret = SetDBGState(true);
1021 DNBLogThreadedIf(LOG_WATCHPOINTS,
1022 "DNBArchImplX86_64::DisableHardwareBreakpoint( %u )",
1023 hw_index);
1025 if (kret == KERN_SUCCESS)
1026 return true;
1027 else // Revert to the previous debug state voluntarily. The transaction
1028 // coordinator knows that we have failed.
1029 m_state.context.dbg = GetDBGCheckpoint();
1032 return false;
1035 uint32_t DNBArchImplX86_64::EnableHardwareWatchpoint(nub_addr_t addr,
1036 nub_size_t size, bool read,
1037 bool write,
1038 bool also_set_on_task) {
1039 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::"
1040 "EnableHardwareWatchpoint(addr = 0x%llx, "
1041 "size = %llu, read = %u, write = %u)",
1042 (uint64_t)addr, (uint64_t)size, read, write);
1044 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
1046 // Can only watch 1, 2, 4, or 8 bytes.
1047 if (!(size == 1 || size == 2 || size == 4 || size == 8))
1048 return INVALID_NUB_HW_INDEX;
1050 // We must watch for either read or write
1051 if (!read && !write)
1052 return INVALID_NUB_HW_INDEX;
1054 // Read the debug state
1055 kern_return_t kret = GetDBGState(false);
1057 if (kret == KERN_SUCCESS) {
1058 // Check to make sure we have the needed hardware support
1059 uint32_t i = 0;
1061 DBG &debug_state = m_state.context.dbg;
1062 for (i = 0; i < num_hw_watchpoints; ++i) {
1063 if (IsWatchpointVacant(debug_state, i))
1064 break;
1067 // See if we found an available hw breakpoint slot above
1068 if (i < num_hw_watchpoints) {
1069 StartTransForHWP();
1071 // Modify our local copy of the debug state, first.
1072 SetWatchpoint(debug_state, i, addr, size, read, write);
1073 // Now set the watch point in the inferior.
1074 kret = SetDBGState(also_set_on_task);
1075 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::"
1076 "EnableHardwareWatchpoint() "
1077 "SetDBGState() => 0x%8.8x.",
1078 kret);
1080 if (kret == KERN_SUCCESS)
1081 return i;
1082 else // Revert to the previous debug state voluntarily. The transaction
1083 // coordinator knows that we have failed.
1084 m_state.context.dbg = GetDBGCheckpoint();
1085 } else {
1086 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::"
1087 "EnableHardwareWatchpoint(): All "
1088 "hardware resources (%u) are in use.",
1089 num_hw_watchpoints);
1092 return INVALID_NUB_HW_INDEX;
1095 bool DNBArchImplX86_64::DisableHardwareWatchpoint(uint32_t hw_index,
1096 bool also_set_on_task) {
1097 kern_return_t kret = GetDBGState(false);
1099 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1100 if (kret == KERN_SUCCESS) {
1101 DBG &debug_state = m_state.context.dbg;
1102 if (hw_index < num_hw_points &&
1103 !IsWatchpointVacant(debug_state, hw_index)) {
1104 StartTransForHWP();
1106 // Modify our local copy of the debug state, first.
1107 ClearWatchpoint(debug_state, hw_index);
1108 // Now disable the watch point in the inferior.
1109 kret = SetDBGState(also_set_on_task);
1110 DNBLogThreadedIf(LOG_WATCHPOINTS,
1111 "DNBArchImplX86_64::DisableHardwareWatchpoint( %u )",
1112 hw_index);
1114 if (kret == KERN_SUCCESS)
1115 return true;
1116 else // Revert to the previous debug state voluntarily. The transaction
1117 // coordinator knows that we have failed.
1118 m_state.context.dbg = GetDBGCheckpoint();
1121 return false;
1124 // Iterate through the debug status register; return the index of the first hit.
1125 uint32_t DNBArchImplX86_64::GetHardwareWatchpointHit(nub_addr_t &addr) {
1126 // Read the debug state
1127 kern_return_t kret = GetDBGState(true);
1128 DNBLogThreadedIf(
1129 LOG_WATCHPOINTS,
1130 "DNBArchImplX86_64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.",
1131 kret);
1132 if (kret == KERN_SUCCESS) {
1133 DBG &debug_state = m_state.context.dbg;
1134 uint32_t i, num = NumSupportedHardwareWatchpoints();
1135 for (i = 0; i < num; ++i) {
1136 if (IsWatchpointHit(debug_state, i)) {
1137 addr = GetWatchAddress(debug_state, i);
1138 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::"
1139 "GetHardwareWatchpointHit() found => "
1140 "%u (addr = 0x%llx).",
1141 i, (uint64_t)addr);
1142 return i;
1146 return INVALID_NUB_HW_INDEX;
1149 // Set the single step bit in the processor status register.
1150 kern_return_t DNBArchImplX86_64::EnableHardwareSingleStep(bool enable) {
1151 if (GetGPRState(false) == KERN_SUCCESS) {
1152 const uint32_t trace_bit = 0x100u;
1153 if (enable)
1154 m_state.context.gpr.__rflags |= trace_bit;
1155 else
1156 m_state.context.gpr.__rflags &= ~trace_bit;
1157 return SetGPRState();
1159 return m_state.GetError(e_regSetGPR, Read);
1162 // Register information definitions
1164 enum {
1165 gpr_rax = 0,
1166 gpr_rbx,
1167 gpr_rcx,
1168 gpr_rdx,
1169 gpr_rdi,
1170 gpr_rsi,
1171 gpr_rbp,
1172 gpr_rsp,
1173 gpr_r8,
1174 gpr_r9,
1175 gpr_r10,
1176 gpr_r11,
1177 gpr_r12,
1178 gpr_r13,
1179 gpr_r14,
1180 gpr_r15,
1181 gpr_rip,
1182 gpr_rflags,
1183 gpr_cs,
1184 gpr_fs,
1185 gpr_gs,
1186 gpr_ds,
1187 gpr_es,
1188 gpr_ss,
1189 gpr_gsbase,
1190 gpr_eax,
1191 gpr_ebx,
1192 gpr_ecx,
1193 gpr_edx,
1194 gpr_edi,
1195 gpr_esi,
1196 gpr_ebp,
1197 gpr_esp,
1198 gpr_r8d, // Low 32 bits or r8
1199 gpr_r9d, // Low 32 bits or r9
1200 gpr_r10d, // Low 32 bits or r10
1201 gpr_r11d, // Low 32 bits or r11
1202 gpr_r12d, // Low 32 bits or r12
1203 gpr_r13d, // Low 32 bits or r13
1204 gpr_r14d, // Low 32 bits or r14
1205 gpr_r15d, // Low 32 bits or r15
1206 gpr_ax,
1207 gpr_bx,
1208 gpr_cx,
1209 gpr_dx,
1210 gpr_di,
1211 gpr_si,
1212 gpr_bp,
1213 gpr_sp,
1214 gpr_r8w, // Low 16 bits or r8
1215 gpr_r9w, // Low 16 bits or r9
1216 gpr_r10w, // Low 16 bits or r10
1217 gpr_r11w, // Low 16 bits or r11
1218 gpr_r12w, // Low 16 bits or r12
1219 gpr_r13w, // Low 16 bits or r13
1220 gpr_r14w, // Low 16 bits or r14
1221 gpr_r15w, // Low 16 bits or r15
1222 gpr_ah,
1223 gpr_bh,
1224 gpr_ch,
1225 gpr_dh,
1226 gpr_al,
1227 gpr_bl,
1228 gpr_cl,
1229 gpr_dl,
1230 gpr_dil,
1231 gpr_sil,
1232 gpr_bpl,
1233 gpr_spl,
1234 gpr_r8l, // Low 8 bits or r8
1235 gpr_r9l, // Low 8 bits or r9
1236 gpr_r10l, // Low 8 bits or r10
1237 gpr_r11l, // Low 8 bits or r11
1238 gpr_r12l, // Low 8 bits or r12
1239 gpr_r13l, // Low 8 bits or r13
1240 gpr_r14l, // Low 8 bits or r14
1241 gpr_r15l, // Low 8 bits or r15
1242 k_num_gpr_regs
1245 enum {
1246 fpu_fcw,
1247 fpu_fsw,
1248 fpu_ftw,
1249 fpu_fop,
1250 fpu_ip,
1251 fpu_cs,
1252 fpu_dp,
1253 fpu_ds,
1254 fpu_mxcsr,
1255 fpu_mxcsrmask,
1256 fpu_stmm0,
1257 fpu_stmm1,
1258 fpu_stmm2,
1259 fpu_stmm3,
1260 fpu_stmm4,
1261 fpu_stmm5,
1262 fpu_stmm6,
1263 fpu_stmm7,
1264 fpu_xmm0,
1265 fpu_xmm1,
1266 fpu_xmm2,
1267 fpu_xmm3,
1268 fpu_xmm4,
1269 fpu_xmm5,
1270 fpu_xmm6,
1271 fpu_xmm7,
1272 fpu_xmm8,
1273 fpu_xmm9,
1274 fpu_xmm10,
1275 fpu_xmm11,
1276 fpu_xmm12,
1277 fpu_xmm13,
1278 fpu_xmm14,
1279 fpu_xmm15,
1280 fpu_ymm0,
1281 fpu_ymm1,
1282 fpu_ymm2,
1283 fpu_ymm3,
1284 fpu_ymm4,
1285 fpu_ymm5,
1286 fpu_ymm6,
1287 fpu_ymm7,
1288 fpu_ymm8,
1289 fpu_ymm9,
1290 fpu_ymm10,
1291 fpu_ymm11,
1292 fpu_ymm12,
1293 fpu_ymm13,
1294 fpu_ymm14,
1295 fpu_ymm15,
1296 fpu_k0,
1297 fpu_k1,
1298 fpu_k2,
1299 fpu_k3,
1300 fpu_k4,
1301 fpu_k5,
1302 fpu_k6,
1303 fpu_k7,
1304 fpu_zmm0,
1305 fpu_zmm1,
1306 fpu_zmm2,
1307 fpu_zmm3,
1308 fpu_zmm4,
1309 fpu_zmm5,
1310 fpu_zmm6,
1311 fpu_zmm7,
1312 fpu_zmm8,
1313 fpu_zmm9,
1314 fpu_zmm10,
1315 fpu_zmm11,
1316 fpu_zmm12,
1317 fpu_zmm13,
1318 fpu_zmm14,
1319 fpu_zmm15,
1320 fpu_zmm16,
1321 fpu_zmm17,
1322 fpu_zmm18,
1323 fpu_zmm19,
1324 fpu_zmm20,
1325 fpu_zmm21,
1326 fpu_zmm22,
1327 fpu_zmm23,
1328 fpu_zmm24,
1329 fpu_zmm25,
1330 fpu_zmm26,
1331 fpu_zmm27,
1332 fpu_zmm28,
1333 fpu_zmm29,
1334 fpu_zmm30,
1335 fpu_zmm31,
1336 k_num_fpu_regs,
1338 // Aliases
1339 fpu_fctrl = fpu_fcw,
1340 fpu_fstat = fpu_fsw,
1341 fpu_ftag = fpu_ftw,
1342 fpu_fiseg = fpu_cs,
1343 fpu_fioff = fpu_ip,
1344 fpu_foseg = fpu_ds,
1345 fpu_fooff = fpu_dp
1348 enum {
1349 exc_trapno,
1350 exc_err,
1351 exc_faultvaddr,
1352 k_num_exc_regs,
1355 enum ehframe_dwarf_regnums {
1356 ehframe_dwarf_rax = 0,
1357 ehframe_dwarf_rdx = 1,
1358 ehframe_dwarf_rcx = 2,
1359 ehframe_dwarf_rbx = 3,
1360 ehframe_dwarf_rsi = 4,
1361 ehframe_dwarf_rdi = 5,
1362 ehframe_dwarf_rbp = 6,
1363 ehframe_dwarf_rsp = 7,
1364 ehframe_dwarf_r8,
1365 ehframe_dwarf_r9,
1366 ehframe_dwarf_r10,
1367 ehframe_dwarf_r11,
1368 ehframe_dwarf_r12,
1369 ehframe_dwarf_r13,
1370 ehframe_dwarf_r14,
1371 ehframe_dwarf_r15,
1372 ehframe_dwarf_rip,
1373 ehframe_dwarf_xmm0,
1374 ehframe_dwarf_xmm1,
1375 ehframe_dwarf_xmm2,
1376 ehframe_dwarf_xmm3,
1377 ehframe_dwarf_xmm4,
1378 ehframe_dwarf_xmm5,
1379 ehframe_dwarf_xmm6,
1380 ehframe_dwarf_xmm7,
1381 ehframe_dwarf_xmm8,
1382 ehframe_dwarf_xmm9,
1383 ehframe_dwarf_xmm10,
1384 ehframe_dwarf_xmm11,
1385 ehframe_dwarf_xmm12,
1386 ehframe_dwarf_xmm13,
1387 ehframe_dwarf_xmm14,
1388 ehframe_dwarf_xmm15,
1389 ehframe_dwarf_stmm0,
1390 ehframe_dwarf_stmm1,
1391 ehframe_dwarf_stmm2,
1392 ehframe_dwarf_stmm3,
1393 ehframe_dwarf_stmm4,
1394 ehframe_dwarf_stmm5,
1395 ehframe_dwarf_stmm6,
1396 ehframe_dwarf_stmm7,
1397 ehframe_dwarf_ymm0 = ehframe_dwarf_xmm0,
1398 ehframe_dwarf_ymm1 = ehframe_dwarf_xmm1,
1399 ehframe_dwarf_ymm2 = ehframe_dwarf_xmm2,
1400 ehframe_dwarf_ymm3 = ehframe_dwarf_xmm3,
1401 ehframe_dwarf_ymm4 = ehframe_dwarf_xmm4,
1402 ehframe_dwarf_ymm5 = ehframe_dwarf_xmm5,
1403 ehframe_dwarf_ymm6 = ehframe_dwarf_xmm6,
1404 ehframe_dwarf_ymm7 = ehframe_dwarf_xmm7,
1405 ehframe_dwarf_ymm8 = ehframe_dwarf_xmm8,
1406 ehframe_dwarf_ymm9 = ehframe_dwarf_xmm9,
1407 ehframe_dwarf_ymm10 = ehframe_dwarf_xmm10,
1408 ehframe_dwarf_ymm11 = ehframe_dwarf_xmm11,
1409 ehframe_dwarf_ymm12 = ehframe_dwarf_xmm12,
1410 ehframe_dwarf_ymm13 = ehframe_dwarf_xmm13,
1411 ehframe_dwarf_ymm14 = ehframe_dwarf_xmm14,
1412 ehframe_dwarf_ymm15 = ehframe_dwarf_xmm15,
1413 ehframe_dwarf_zmm0 = ehframe_dwarf_xmm0,
1414 ehframe_dwarf_zmm1 = ehframe_dwarf_xmm1,
1415 ehframe_dwarf_zmm2 = ehframe_dwarf_xmm2,
1416 ehframe_dwarf_zmm3 = ehframe_dwarf_xmm3,
1417 ehframe_dwarf_zmm4 = ehframe_dwarf_xmm4,
1418 ehframe_dwarf_zmm5 = ehframe_dwarf_xmm5,
1419 ehframe_dwarf_zmm6 = ehframe_dwarf_xmm6,
1420 ehframe_dwarf_zmm7 = ehframe_dwarf_xmm7,
1421 ehframe_dwarf_zmm8 = ehframe_dwarf_xmm8,
1422 ehframe_dwarf_zmm9 = ehframe_dwarf_xmm9,
1423 ehframe_dwarf_zmm10 = ehframe_dwarf_xmm10,
1424 ehframe_dwarf_zmm11 = ehframe_dwarf_xmm11,
1425 ehframe_dwarf_zmm12 = ehframe_dwarf_xmm12,
1426 ehframe_dwarf_zmm13 = ehframe_dwarf_xmm13,
1427 ehframe_dwarf_zmm14 = ehframe_dwarf_xmm14,
1428 ehframe_dwarf_zmm15 = ehframe_dwarf_xmm15,
1429 ehframe_dwarf_zmm16 = 67,
1430 ehframe_dwarf_zmm17,
1431 ehframe_dwarf_zmm18,
1432 ehframe_dwarf_zmm19,
1433 ehframe_dwarf_zmm20,
1434 ehframe_dwarf_zmm21,
1435 ehframe_dwarf_zmm22,
1436 ehframe_dwarf_zmm23,
1437 ehframe_dwarf_zmm24,
1438 ehframe_dwarf_zmm25,
1439 ehframe_dwarf_zmm26,
1440 ehframe_dwarf_zmm27,
1441 ehframe_dwarf_zmm28,
1442 ehframe_dwarf_zmm29,
1443 ehframe_dwarf_zmm30,
1444 ehframe_dwarf_zmm31,
1445 ehframe_dwarf_k0 = 118,
1446 ehframe_dwarf_k1,
1447 ehframe_dwarf_k2,
1448 ehframe_dwarf_k3,
1449 ehframe_dwarf_k4,
1450 ehframe_dwarf_k5,
1451 ehframe_dwarf_k6,
1452 ehframe_dwarf_k7,
1455 enum debugserver_regnums {
1456 debugserver_rax = 0,
1457 debugserver_rbx = 1,
1458 debugserver_rcx = 2,
1459 debugserver_rdx = 3,
1460 debugserver_rsi = 4,
1461 debugserver_rdi = 5,
1462 debugserver_rbp = 6,
1463 debugserver_rsp = 7,
1464 debugserver_r8 = 8,
1465 debugserver_r9 = 9,
1466 debugserver_r10 = 10,
1467 debugserver_r11 = 11,
1468 debugserver_r12 = 12,
1469 debugserver_r13 = 13,
1470 debugserver_r14 = 14,
1471 debugserver_r15 = 15,
1472 debugserver_rip = 16,
1473 debugserver_rflags = 17,
1474 debugserver_cs = 18,
1475 debugserver_ss = 19,
1476 debugserver_ds = 20,
1477 debugserver_es = 21,
1478 debugserver_fs = 22,
1479 debugserver_gs = 23,
1480 debugserver_stmm0 = 24,
1481 debugserver_stmm1 = 25,
1482 debugserver_stmm2 = 26,
1483 debugserver_stmm3 = 27,
1484 debugserver_stmm4 = 28,
1485 debugserver_stmm5 = 29,
1486 debugserver_stmm6 = 30,
1487 debugserver_stmm7 = 31,
1488 debugserver_fctrl = 32,
1489 debugserver_fcw = debugserver_fctrl,
1490 debugserver_fstat = 33,
1491 debugserver_fsw = debugserver_fstat,
1492 debugserver_ftag = 34,
1493 debugserver_ftw = debugserver_ftag,
1494 debugserver_fiseg = 35,
1495 debugserver_fpu_cs = debugserver_fiseg,
1496 debugserver_fioff = 36,
1497 debugserver_ip = debugserver_fioff,
1498 debugserver_foseg = 37,
1499 debugserver_fpu_ds = debugserver_foseg,
1500 debugserver_fooff = 38,
1501 debugserver_dp = debugserver_fooff,
1502 debugserver_fop = 39,
1503 debugserver_xmm0 = 40,
1504 debugserver_xmm1 = 41,
1505 debugserver_xmm2 = 42,
1506 debugserver_xmm3 = 43,
1507 debugserver_xmm4 = 44,
1508 debugserver_xmm5 = 45,
1509 debugserver_xmm6 = 46,
1510 debugserver_xmm7 = 47,
1511 debugserver_xmm8 = 48,
1512 debugserver_xmm9 = 49,
1513 debugserver_xmm10 = 50,
1514 debugserver_xmm11 = 51,
1515 debugserver_xmm12 = 52,
1516 debugserver_xmm13 = 53,
1517 debugserver_xmm14 = 54,
1518 debugserver_xmm15 = 55,
1519 debugserver_mxcsr = 56,
1520 debugserver_ymm0 = debugserver_xmm0,
1521 debugserver_ymm1 = debugserver_xmm1,
1522 debugserver_ymm2 = debugserver_xmm2,
1523 debugserver_ymm3 = debugserver_xmm3,
1524 debugserver_ymm4 = debugserver_xmm4,
1525 debugserver_ymm5 = debugserver_xmm5,
1526 debugserver_ymm6 = debugserver_xmm6,
1527 debugserver_ymm7 = debugserver_xmm7,
1528 debugserver_ymm8 = debugserver_xmm8,
1529 debugserver_ymm9 = debugserver_xmm9,
1530 debugserver_ymm10 = debugserver_xmm10,
1531 debugserver_ymm11 = debugserver_xmm11,
1532 debugserver_ymm12 = debugserver_xmm12,
1533 debugserver_ymm13 = debugserver_xmm13,
1534 debugserver_ymm14 = debugserver_xmm14,
1535 debugserver_ymm15 = debugserver_xmm15,
1536 debugserver_zmm0 = debugserver_xmm0,
1537 debugserver_zmm1 = debugserver_xmm1,
1538 debugserver_zmm2 = debugserver_xmm2,
1539 debugserver_zmm3 = debugserver_xmm3,
1540 debugserver_zmm4 = debugserver_xmm4,
1541 debugserver_zmm5 = debugserver_xmm5,
1542 debugserver_zmm6 = debugserver_xmm6,
1543 debugserver_zmm7 = debugserver_xmm7,
1544 debugserver_zmm8 = debugserver_xmm8,
1545 debugserver_zmm9 = debugserver_xmm9,
1546 debugserver_zmm10 = debugserver_xmm10,
1547 debugserver_zmm11 = debugserver_xmm11,
1548 debugserver_zmm12 = debugserver_xmm12,
1549 debugserver_zmm13 = debugserver_xmm13,
1550 debugserver_zmm14 = debugserver_xmm14,
1551 debugserver_zmm15 = debugserver_xmm15,
1552 debugserver_zmm16 = 67,
1553 debugserver_zmm17 = 68,
1554 debugserver_zmm18 = 69,
1555 debugserver_zmm19 = 70,
1556 debugserver_zmm20 = 71,
1557 debugserver_zmm21 = 72,
1558 debugserver_zmm22 = 73,
1559 debugserver_zmm23 = 74,
1560 debugserver_zmm24 = 75,
1561 debugserver_zmm25 = 76,
1562 debugserver_zmm26 = 77,
1563 debugserver_zmm27 = 78,
1564 debugserver_zmm28 = 79,
1565 debugserver_zmm29 = 80,
1566 debugserver_zmm30 = 81,
1567 debugserver_zmm31 = 82,
1568 debugserver_k0 = 118,
1569 debugserver_k1 = 119,
1570 debugserver_k2 = 120,
1571 debugserver_k3 = 121,
1572 debugserver_k4 = 122,
1573 debugserver_k5 = 123,
1574 debugserver_k6 = 124,
1575 debugserver_k7 = 125,
1576 debugserver_gsbase = 126,
1579 #define GPR_OFFSET(reg) (offsetof(DNBArchImplX86_64::GPR, __##reg))
1580 #define FPU_OFFSET(reg) \
1581 (offsetof(DNBArchImplX86_64::FPU, __fpu_##reg) + \
1582 offsetof(DNBArchImplX86_64::Context, fpu.no_avx))
1583 #define AVX_OFFSET(reg) \
1584 (offsetof(DNBArchImplX86_64::AVX, __fpu_##reg) + \
1585 offsetof(DNBArchImplX86_64::Context, fpu.avx))
1586 #define AVX512F_OFFSET(reg) \
1587 (offsetof(DNBArchImplX86_64::AVX512F, __fpu_##reg) + \
1588 offsetof(DNBArchImplX86_64::Context, fpu.avx512f))
1589 #define EXC_OFFSET(reg) \
1590 (offsetof(DNBArchImplX86_64::EXC, __##reg) + \
1591 offsetof(DNBArchImplX86_64::Context, exc))
1592 #define AVX_OFFSET_YMM(n) (AVX_OFFSET(ymmh0) + (32 * n))
1593 #define AVX512F_OFFSET_ZMM(n) (AVX512F_OFFSET(zmmh0) + (64 * n))
1595 #define GPR_SIZE(reg) (sizeof(((DNBArchImplX86_64::GPR *)NULL)->__##reg))
1596 #define FPU_SIZE_UINT(reg) \
1597 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg))
1598 #define FPU_SIZE_MMST(reg) \
1599 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__mmst_reg))
1600 #define FPU_SIZE_XMM(reg) \
1601 (sizeof(((DNBArchImplX86_64::FPU *)NULL)->__fpu_##reg.__xmm_reg))
1602 #define FPU_SIZE_YMM(reg) (32)
1603 #define FPU_SIZE_ZMM(reg) (64)
1604 #define EXC_SIZE(reg) (sizeof(((DNBArchImplX86_64::EXC *)NULL)->__##reg))
1606 // These macros will auto define the register name, alt name, register size,
1607 // register offset, encoding, format and native register. This ensures that
1608 // the register state structures are defined correctly and have the correct
1609 // sizes and offsets.
1610 #define DEFINE_GPR(reg) \
1612 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, GPR_SIZE(reg), \
1613 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, \
1614 INVALID_NUB_REGNUM, debugserver_##reg, NULL, g_invalidate_##reg \
1616 #define DEFINE_GPR_ALT(reg, alt, gen) \
1618 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1619 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, \
1620 debugserver_##reg, NULL, g_invalidate_##reg \
1622 #define DEFINE_GPR_ALT2(reg, alt) \
1624 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1625 GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1626 INVALID_NUB_REGNUM, debugserver_##reg, NULL, NULL \
1628 #define DEFINE_GPR_ALT3(reg, alt, gen) \
1630 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1631 GPR_OFFSET(reg), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, gen, \
1632 debugserver_##reg, NULL, NULL \
1634 #define DEFINE_GPR_ALT4(reg, alt, gen) \
1636 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, GPR_SIZE(reg), \
1637 GPR_OFFSET(reg), ehframe_dwarf_##reg, ehframe_dwarf_##reg, gen, \
1638 debugserver_##reg, NULL, NULL \
1641 #define DEFINE_GPR_PSEUDO_32(reg32, reg64) \
1643 e_regSetGPR, gpr_##reg32, #reg32, NULL, Uint, Hex, 4, 0, \
1644 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1645 INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 \
1647 #define DEFINE_GPR_PSEUDO_16(reg16, reg64) \
1649 e_regSetGPR, gpr_##reg16, #reg16, NULL, Uint, Hex, 2, 0, \
1650 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1651 INVALID_NUB_REGNUM, g_contained_##reg64, g_invalidate_##reg64 \
1653 #define DEFINE_GPR_PSEUDO_8H(reg8, reg64) \
1655 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 1, INVALID_NUB_REGNUM, \
1656 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1657 g_contained_##reg64, g_invalidate_##reg64 \
1659 #define DEFINE_GPR_PSEUDO_8L(reg8, reg64) \
1661 e_regSetGPR, gpr_##reg8, #reg8, NULL, Uint, Hex, 1, 0, INVALID_NUB_REGNUM, \
1662 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1663 g_contained_##reg64, g_invalidate_##reg64 \
1666 // General purpose registers for 64 bit
1668 const char *g_contained_rax[] = {"rax", NULL};
1669 const char *g_contained_rbx[] = {"rbx", NULL};
1670 const char *g_contained_rcx[] = {"rcx", NULL};
1671 const char *g_contained_rdx[] = {"rdx", NULL};
1672 const char *g_contained_rdi[] = {"rdi", NULL};
1673 const char *g_contained_rsi[] = {"rsi", NULL};
1674 const char *g_contained_rbp[] = {"rbp", NULL};
1675 const char *g_contained_rsp[] = {"rsp", NULL};
1676 const char *g_contained_r8[] = {"r8", NULL};
1677 const char *g_contained_r9[] = {"r9", NULL};
1678 const char *g_contained_r10[] = {"r10", NULL};
1679 const char *g_contained_r11[] = {"r11", NULL};
1680 const char *g_contained_r12[] = {"r12", NULL};
1681 const char *g_contained_r13[] = {"r13", NULL};
1682 const char *g_contained_r14[] = {"r14", NULL};
1683 const char *g_contained_r15[] = {"r15", NULL};
1685 const char *g_invalidate_rax[] = {"rax", "eax", "ax", "ah", "al", NULL};
1686 const char *g_invalidate_rbx[] = {"rbx", "ebx", "bx", "bh", "bl", NULL};
1687 const char *g_invalidate_rcx[] = {"rcx", "ecx", "cx", "ch", "cl", NULL};
1688 const char *g_invalidate_rdx[] = {"rdx", "edx", "dx", "dh", "dl", NULL};
1689 const char *g_invalidate_rdi[] = {"rdi", "edi", "di", "dil", NULL};
1690 const char *g_invalidate_rsi[] = {"rsi", "esi", "si", "sil", NULL};
1691 const char *g_invalidate_rbp[] = {"rbp", "ebp", "bp", "bpl", NULL};
1692 const char *g_invalidate_rsp[] = {"rsp", "esp", "sp", "spl", NULL};
1693 const char *g_invalidate_r8[] = {"r8", "r8d", "r8w", "r8l", NULL};
1694 const char *g_invalidate_r9[] = {"r9", "r9d", "r9w", "r9l", NULL};
1695 const char *g_invalidate_r10[] = {"r10", "r10d", "r10w", "r10l", NULL};
1696 const char *g_invalidate_r11[] = {"r11", "r11d", "r11w", "r11l", NULL};
1697 const char *g_invalidate_r12[] = {"r12", "r12d", "r12w", "r12l", NULL};
1698 const char *g_invalidate_r13[] = {"r13", "r13d", "r13w", "r13l", NULL};
1699 const char *g_invalidate_r14[] = {"r14", "r14d", "r14w", "r14l", NULL};
1700 const char *g_invalidate_r15[] = {"r15", "r15d", "r15w", "r15l", NULL};
1702 const DNBRegisterInfo DNBArchImplX86_64::g_gpr_registers[] = {
1703 DEFINE_GPR(rax),
1704 DEFINE_GPR(rbx),
1705 DEFINE_GPR_ALT(rcx, "arg4", GENERIC_REGNUM_ARG4),
1706 DEFINE_GPR_ALT(rdx, "arg3", GENERIC_REGNUM_ARG3),
1707 DEFINE_GPR_ALT(rdi, "arg1", GENERIC_REGNUM_ARG1),
1708 DEFINE_GPR_ALT(rsi, "arg2", GENERIC_REGNUM_ARG2),
1709 DEFINE_GPR_ALT(rbp, "fp", GENERIC_REGNUM_FP),
1710 DEFINE_GPR_ALT(rsp, "sp", GENERIC_REGNUM_SP),
1711 DEFINE_GPR_ALT(r8, "arg5", GENERIC_REGNUM_ARG5),
1712 DEFINE_GPR_ALT(r9, "arg6", GENERIC_REGNUM_ARG6),
1713 DEFINE_GPR(r10),
1714 DEFINE_GPR(r11),
1715 DEFINE_GPR(r12),
1716 DEFINE_GPR(r13),
1717 DEFINE_GPR(r14),
1718 DEFINE_GPR(r15),
1719 DEFINE_GPR_ALT4(rip, "pc", GENERIC_REGNUM_PC),
1720 DEFINE_GPR_ALT3(rflags, "flags", GENERIC_REGNUM_FLAGS),
1721 DEFINE_GPR_ALT2(cs, NULL),
1722 DEFINE_GPR_ALT2(fs, NULL),
1723 DEFINE_GPR_ALT2(gs, NULL),
1724 DEFINE_GPR_ALT2(ds, NULL),
1725 DEFINE_GPR_ALT2(es, NULL),
1726 DEFINE_GPR_ALT2(ss, NULL),
1727 DEFINE_GPR_ALT2(gsbase, NULL),
1728 DEFINE_GPR_PSEUDO_32(eax, rax),
1729 DEFINE_GPR_PSEUDO_32(ebx, rbx),
1730 DEFINE_GPR_PSEUDO_32(ecx, rcx),
1731 DEFINE_GPR_PSEUDO_32(edx, rdx),
1732 DEFINE_GPR_PSEUDO_32(edi, rdi),
1733 DEFINE_GPR_PSEUDO_32(esi, rsi),
1734 DEFINE_GPR_PSEUDO_32(ebp, rbp),
1735 DEFINE_GPR_PSEUDO_32(esp, rsp),
1736 DEFINE_GPR_PSEUDO_32(r8d, r8),
1737 DEFINE_GPR_PSEUDO_32(r9d, r9),
1738 DEFINE_GPR_PSEUDO_32(r10d, r10),
1739 DEFINE_GPR_PSEUDO_32(r11d, r11),
1740 DEFINE_GPR_PSEUDO_32(r12d, r12),
1741 DEFINE_GPR_PSEUDO_32(r13d, r13),
1742 DEFINE_GPR_PSEUDO_32(r14d, r14),
1743 DEFINE_GPR_PSEUDO_32(r15d, r15),
1744 DEFINE_GPR_PSEUDO_16(ax, rax),
1745 DEFINE_GPR_PSEUDO_16(bx, rbx),
1746 DEFINE_GPR_PSEUDO_16(cx, rcx),
1747 DEFINE_GPR_PSEUDO_16(dx, rdx),
1748 DEFINE_GPR_PSEUDO_16(di, rdi),
1749 DEFINE_GPR_PSEUDO_16(si, rsi),
1750 DEFINE_GPR_PSEUDO_16(bp, rbp),
1751 DEFINE_GPR_PSEUDO_16(sp, rsp),
1752 DEFINE_GPR_PSEUDO_16(r8w, r8),
1753 DEFINE_GPR_PSEUDO_16(r9w, r9),
1754 DEFINE_GPR_PSEUDO_16(r10w, r10),
1755 DEFINE_GPR_PSEUDO_16(r11w, r11),
1756 DEFINE_GPR_PSEUDO_16(r12w, r12),
1757 DEFINE_GPR_PSEUDO_16(r13w, r13),
1758 DEFINE_GPR_PSEUDO_16(r14w, r14),
1759 DEFINE_GPR_PSEUDO_16(r15w, r15),
1760 DEFINE_GPR_PSEUDO_8H(ah, rax),
1761 DEFINE_GPR_PSEUDO_8H(bh, rbx),
1762 DEFINE_GPR_PSEUDO_8H(ch, rcx),
1763 DEFINE_GPR_PSEUDO_8H(dh, rdx),
1764 DEFINE_GPR_PSEUDO_8L(al, rax),
1765 DEFINE_GPR_PSEUDO_8L(bl, rbx),
1766 DEFINE_GPR_PSEUDO_8L(cl, rcx),
1767 DEFINE_GPR_PSEUDO_8L(dl, rdx),
1768 DEFINE_GPR_PSEUDO_8L(dil, rdi),
1769 DEFINE_GPR_PSEUDO_8L(sil, rsi),
1770 DEFINE_GPR_PSEUDO_8L(bpl, rbp),
1771 DEFINE_GPR_PSEUDO_8L(spl, rsp),
1772 DEFINE_GPR_PSEUDO_8L(r8l, r8),
1773 DEFINE_GPR_PSEUDO_8L(r9l, r9),
1774 DEFINE_GPR_PSEUDO_8L(r10l, r10),
1775 DEFINE_GPR_PSEUDO_8L(r11l, r11),
1776 DEFINE_GPR_PSEUDO_8L(r12l, r12),
1777 DEFINE_GPR_PSEUDO_8L(r13l, r13),
1778 DEFINE_GPR_PSEUDO_8L(r14l, r14),
1779 DEFINE_GPR_PSEUDO_8L(r15l, r15)};
1781 // Floating point registers 64 bit
1782 const DNBRegisterInfo DNBArchImplX86_64::g_fpu_registers_no_avx[] = {
1783 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw),
1784 FPU_OFFSET(fcw), -1U, -1U, -1U, -1U, NULL, NULL},
1785 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw),
1786 FPU_OFFSET(fsw), -1U, -1U, -1U, -1U, NULL, NULL},
1787 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */,
1788 FPU_OFFSET(ftw), -1U, -1U, -1U, -1U, NULL, NULL},
1789 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop),
1790 FPU_OFFSET(fop), -1U, -1U, -1U, -1U, NULL, NULL},
1791 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip),
1792 FPU_OFFSET(ip), -1U, -1U, -1U, -1U, NULL, NULL},
1793 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs),
1794 FPU_OFFSET(cs), -1U, -1U, -1U, -1U, NULL, NULL},
1795 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp),
1796 FPU_OFFSET(dp), -1U, -1U, -1U, -1U, NULL, NULL},
1797 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds),
1798 FPU_OFFSET(ds), -1U, -1U, -1U, -1U, NULL, NULL},
1799 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr),
1800 FPU_OFFSET(mxcsr), -1U, -1U, -1U, -1U, NULL, NULL},
1801 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex,
1802 FPU_SIZE_UINT(mxcsrmask), FPU_OFFSET(mxcsrmask), -1U, -1U, -1U, -1U, NULL,
1803 NULL},
1805 {e_regSetFPU, fpu_stmm0, "stmm0", "st0", Vector, VectorOfUInt8,
1806 FPU_SIZE_MMST(stmm0), FPU_OFFSET(stmm0), ehframe_dwarf_stmm0,
1807 ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL},
1808 {e_regSetFPU, fpu_stmm1, "stmm1", "st1", Vector, VectorOfUInt8,
1809 FPU_SIZE_MMST(stmm1), FPU_OFFSET(stmm1), ehframe_dwarf_stmm1,
1810 ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL},
1811 {e_regSetFPU, fpu_stmm2, "stmm2", "st2", Vector, VectorOfUInt8,
1812 FPU_SIZE_MMST(stmm2), FPU_OFFSET(stmm2), ehframe_dwarf_stmm2,
1813 ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL},
1814 {e_regSetFPU, fpu_stmm3, "stmm3", "st3", Vector, VectorOfUInt8,
1815 FPU_SIZE_MMST(stmm3), FPU_OFFSET(stmm3), ehframe_dwarf_stmm3,
1816 ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL},
1817 {e_regSetFPU, fpu_stmm4, "stmm4", "st4", Vector, VectorOfUInt8,
1818 FPU_SIZE_MMST(stmm4), FPU_OFFSET(stmm4), ehframe_dwarf_stmm4,
1819 ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL},
1820 {e_regSetFPU, fpu_stmm5, "stmm5", "st5", Vector, VectorOfUInt8,
1821 FPU_SIZE_MMST(stmm5), FPU_OFFSET(stmm5), ehframe_dwarf_stmm5,
1822 ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL},
1823 {e_regSetFPU, fpu_stmm6, "stmm6", "st6", Vector, VectorOfUInt8,
1824 FPU_SIZE_MMST(stmm6), FPU_OFFSET(stmm6), ehframe_dwarf_stmm6,
1825 ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL},
1826 {e_regSetFPU, fpu_stmm7, "stmm7", "st7", Vector, VectorOfUInt8,
1827 FPU_SIZE_MMST(stmm7), FPU_OFFSET(stmm7), ehframe_dwarf_stmm7,
1828 ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL},
1830 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8,
1831 FPU_SIZE_XMM(xmm0), FPU_OFFSET(xmm0), ehframe_dwarf_xmm0,
1832 ehframe_dwarf_xmm0, -1U, debugserver_xmm0, NULL, NULL},
1833 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8,
1834 FPU_SIZE_XMM(xmm1), FPU_OFFSET(xmm1), ehframe_dwarf_xmm1,
1835 ehframe_dwarf_xmm1, -1U, debugserver_xmm1, NULL, NULL},
1836 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8,
1837 FPU_SIZE_XMM(xmm2), FPU_OFFSET(xmm2), ehframe_dwarf_xmm2,
1838 ehframe_dwarf_xmm2, -1U, debugserver_xmm2, NULL, NULL},
1839 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8,
1840 FPU_SIZE_XMM(xmm3), FPU_OFFSET(xmm3), ehframe_dwarf_xmm3,
1841 ehframe_dwarf_xmm3, -1U, debugserver_xmm3, NULL, NULL},
1842 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8,
1843 FPU_SIZE_XMM(xmm4), FPU_OFFSET(xmm4), ehframe_dwarf_xmm4,
1844 ehframe_dwarf_xmm4, -1U, debugserver_xmm4, NULL, NULL},
1845 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8,
1846 FPU_SIZE_XMM(xmm5), FPU_OFFSET(xmm5), ehframe_dwarf_xmm5,
1847 ehframe_dwarf_xmm5, -1U, debugserver_xmm5, NULL, NULL},
1848 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8,
1849 FPU_SIZE_XMM(xmm6), FPU_OFFSET(xmm6), ehframe_dwarf_xmm6,
1850 ehframe_dwarf_xmm6, -1U, debugserver_xmm6, NULL, NULL},
1851 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8,
1852 FPU_SIZE_XMM(xmm7), FPU_OFFSET(xmm7), ehframe_dwarf_xmm7,
1853 ehframe_dwarf_xmm7, -1U, debugserver_xmm7, NULL, NULL},
1854 {e_regSetFPU, fpu_xmm8, "xmm8", NULL, Vector, VectorOfUInt8,
1855 FPU_SIZE_XMM(xmm8), FPU_OFFSET(xmm8), ehframe_dwarf_xmm8,
1856 ehframe_dwarf_xmm8, -1U, debugserver_xmm8, NULL, NULL},
1857 {e_regSetFPU, fpu_xmm9, "xmm9", NULL, Vector, VectorOfUInt8,
1858 FPU_SIZE_XMM(xmm9), FPU_OFFSET(xmm9), ehframe_dwarf_xmm9,
1859 ehframe_dwarf_xmm9, -1U, debugserver_xmm9, NULL, NULL},
1860 {e_regSetFPU, fpu_xmm10, "xmm10", NULL, Vector, VectorOfUInt8,
1861 FPU_SIZE_XMM(xmm10), FPU_OFFSET(xmm10), ehframe_dwarf_xmm10,
1862 ehframe_dwarf_xmm10, -1U, debugserver_xmm10, NULL, NULL},
1863 {e_regSetFPU, fpu_xmm11, "xmm11", NULL, Vector, VectorOfUInt8,
1864 FPU_SIZE_XMM(xmm11), FPU_OFFSET(xmm11), ehframe_dwarf_xmm11,
1865 ehframe_dwarf_xmm11, -1U, debugserver_xmm11, NULL, NULL},
1866 {e_regSetFPU, fpu_xmm12, "xmm12", NULL, Vector, VectorOfUInt8,
1867 FPU_SIZE_XMM(xmm12), FPU_OFFSET(xmm12), ehframe_dwarf_xmm12,
1868 ehframe_dwarf_xmm12, -1U, debugserver_xmm12, NULL, NULL},
1869 {e_regSetFPU, fpu_xmm13, "xmm13", NULL, Vector, VectorOfUInt8,
1870 FPU_SIZE_XMM(xmm13), FPU_OFFSET(xmm13), ehframe_dwarf_xmm13,
1871 ehframe_dwarf_xmm13, -1U, debugserver_xmm13, NULL, NULL},
1872 {e_regSetFPU, fpu_xmm14, "xmm14", NULL, Vector, VectorOfUInt8,
1873 FPU_SIZE_XMM(xmm14), FPU_OFFSET(xmm14), ehframe_dwarf_xmm14,
1874 ehframe_dwarf_xmm14, -1U, debugserver_xmm14, NULL, NULL},
1875 {e_regSetFPU, fpu_xmm15, "xmm15", NULL, Vector, VectorOfUInt8,
1876 FPU_SIZE_XMM(xmm15), FPU_OFFSET(xmm15), ehframe_dwarf_xmm15,
1877 ehframe_dwarf_xmm15, -1U, debugserver_xmm15, NULL, NULL},
1880 static const char *g_contained_ymm0[] = {"ymm0", NULL};
1881 static const char *g_contained_ymm1[] = {"ymm1", NULL};
1882 static const char *g_contained_ymm2[] = {"ymm2", NULL};
1883 static const char *g_contained_ymm3[] = {"ymm3", NULL};
1884 static const char *g_contained_ymm4[] = {"ymm4", NULL};
1885 static const char *g_contained_ymm5[] = {"ymm5", NULL};
1886 static const char *g_contained_ymm6[] = {"ymm6", NULL};
1887 static const char *g_contained_ymm7[] = {"ymm7", NULL};
1888 static const char *g_contained_ymm8[] = {"ymm8", NULL};
1889 static const char *g_contained_ymm9[] = {"ymm9", NULL};
1890 static const char *g_contained_ymm10[] = {"ymm10", NULL};
1891 static const char *g_contained_ymm11[] = {"ymm11", NULL};
1892 static const char *g_contained_ymm12[] = {"ymm12", NULL};
1893 static const char *g_contained_ymm13[] = {"ymm13", NULL};
1894 static const char *g_contained_ymm14[] = {"ymm14", NULL};
1895 static const char *g_contained_ymm15[] = {"ymm15", NULL};
1897 const DNBRegisterInfo DNBArchImplX86_64::g_fpu_registers_avx[] = {
1898 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw),
1899 AVX_OFFSET(fcw), -1U, -1U, -1U, -1U, NULL, NULL},
1900 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw),
1901 AVX_OFFSET(fsw), -1U, -1U, -1U, -1U, NULL, NULL},
1902 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */,
1903 AVX_OFFSET(ftw), -1U, -1U, -1U, -1U, NULL, NULL},
1904 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop),
1905 AVX_OFFSET(fop), -1U, -1U, -1U, -1U, NULL, NULL},
1906 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip),
1907 AVX_OFFSET(ip), -1U, -1U, -1U, -1U, NULL, NULL},
1908 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs),
1909 AVX_OFFSET(cs), -1U, -1U, -1U, -1U, NULL, NULL},
1910 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp),
1911 AVX_OFFSET(dp), -1U, -1U, -1U, -1U, NULL, NULL},
1912 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds),
1913 AVX_OFFSET(ds), -1U, -1U, -1U, -1U, NULL, NULL},
1914 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr),
1915 AVX_OFFSET(mxcsr), -1U, -1U, -1U, -1U, NULL, NULL},
1916 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex,
1917 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), -1U, -1U, -1U, -1U, NULL,
1918 NULL},
1920 {e_regSetFPU, fpu_stmm0, "stmm0", "st0", Vector, VectorOfUInt8,
1921 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), ehframe_dwarf_stmm0,
1922 ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL},
1923 {e_regSetFPU, fpu_stmm1, "stmm1", "st1", Vector, VectorOfUInt8,
1924 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), ehframe_dwarf_stmm1,
1925 ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL},
1926 {e_regSetFPU, fpu_stmm2, "stmm2", "st2", Vector, VectorOfUInt8,
1927 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), ehframe_dwarf_stmm2,
1928 ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL},
1929 {e_regSetFPU, fpu_stmm3, "stmm3", "st3", Vector, VectorOfUInt8,
1930 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), ehframe_dwarf_stmm3,
1931 ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL},
1932 {e_regSetFPU, fpu_stmm4, "stmm4", "st4", Vector, VectorOfUInt8,
1933 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), ehframe_dwarf_stmm4,
1934 ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL},
1935 {e_regSetFPU, fpu_stmm5, "stmm5", "st5", Vector, VectorOfUInt8,
1936 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), ehframe_dwarf_stmm5,
1937 ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL},
1938 {e_regSetFPU, fpu_stmm6, "stmm6", "st6", Vector, VectorOfUInt8,
1939 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), ehframe_dwarf_stmm6,
1940 ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL},
1941 {e_regSetFPU, fpu_stmm7, "stmm7", "st7", Vector, VectorOfUInt8,
1942 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), ehframe_dwarf_stmm7,
1943 ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL},
1945 {e_regSetFPU, fpu_ymm0, "ymm0", NULL, Vector, VectorOfUInt8,
1946 FPU_SIZE_YMM(ymm0), AVX_OFFSET_YMM(0), ehframe_dwarf_ymm0,
1947 ehframe_dwarf_ymm0, -1U, debugserver_ymm0, NULL, NULL},
1948 {e_regSetFPU, fpu_ymm1, "ymm1", NULL, Vector, VectorOfUInt8,
1949 FPU_SIZE_YMM(ymm1), AVX_OFFSET_YMM(1), ehframe_dwarf_ymm1,
1950 ehframe_dwarf_ymm1, -1U, debugserver_ymm1, NULL, NULL},
1951 {e_regSetFPU, fpu_ymm2, "ymm2", NULL, Vector, VectorOfUInt8,
1952 FPU_SIZE_YMM(ymm2), AVX_OFFSET_YMM(2), ehframe_dwarf_ymm2,
1953 ehframe_dwarf_ymm2, -1U, debugserver_ymm2, NULL, NULL},
1954 {e_regSetFPU, fpu_ymm3, "ymm3", NULL, Vector, VectorOfUInt8,
1955 FPU_SIZE_YMM(ymm3), AVX_OFFSET_YMM(3), ehframe_dwarf_ymm3,
1956 ehframe_dwarf_ymm3, -1U, debugserver_ymm3, NULL, NULL},
1957 {e_regSetFPU, fpu_ymm4, "ymm4", NULL, Vector, VectorOfUInt8,
1958 FPU_SIZE_YMM(ymm4), AVX_OFFSET_YMM(4), ehframe_dwarf_ymm4,
1959 ehframe_dwarf_ymm4, -1U, debugserver_ymm4, NULL, NULL},
1960 {e_regSetFPU, fpu_ymm5, "ymm5", NULL, Vector, VectorOfUInt8,
1961 FPU_SIZE_YMM(ymm5), AVX_OFFSET_YMM(5), ehframe_dwarf_ymm5,
1962 ehframe_dwarf_ymm5, -1U, debugserver_ymm5, NULL, NULL},
1963 {e_regSetFPU, fpu_ymm6, "ymm6", NULL, Vector, VectorOfUInt8,
1964 FPU_SIZE_YMM(ymm6), AVX_OFFSET_YMM(6), ehframe_dwarf_ymm6,
1965 ehframe_dwarf_ymm6, -1U, debugserver_ymm6, NULL, NULL},
1966 {e_regSetFPU, fpu_ymm7, "ymm7", NULL, Vector, VectorOfUInt8,
1967 FPU_SIZE_YMM(ymm7), AVX_OFFSET_YMM(7), ehframe_dwarf_ymm7,
1968 ehframe_dwarf_ymm7, -1U, debugserver_ymm7, NULL, NULL},
1969 {e_regSetFPU, fpu_ymm8, "ymm8", NULL, Vector, VectorOfUInt8,
1970 FPU_SIZE_YMM(ymm8), AVX_OFFSET_YMM(8), ehframe_dwarf_ymm8,
1971 ehframe_dwarf_ymm8, -1U, debugserver_ymm8, NULL, NULL},
1972 {e_regSetFPU, fpu_ymm9, "ymm9", NULL, Vector, VectorOfUInt8,
1973 FPU_SIZE_YMM(ymm9), AVX_OFFSET_YMM(9), ehframe_dwarf_ymm9,
1974 ehframe_dwarf_ymm9, -1U, debugserver_ymm9, NULL, NULL},
1975 {e_regSetFPU, fpu_ymm10, "ymm10", NULL, Vector, VectorOfUInt8,
1976 FPU_SIZE_YMM(ymm10), AVX_OFFSET_YMM(10), ehframe_dwarf_ymm10,
1977 ehframe_dwarf_ymm10, -1U, debugserver_ymm10, NULL, NULL},
1978 {e_regSetFPU, fpu_ymm11, "ymm11", NULL, Vector, VectorOfUInt8,
1979 FPU_SIZE_YMM(ymm11), AVX_OFFSET_YMM(11), ehframe_dwarf_ymm11,
1980 ehframe_dwarf_ymm11, -1U, debugserver_ymm11, NULL, NULL},
1981 {e_regSetFPU, fpu_ymm12, "ymm12", NULL, Vector, VectorOfUInt8,
1982 FPU_SIZE_YMM(ymm12), AVX_OFFSET_YMM(12), ehframe_dwarf_ymm12,
1983 ehframe_dwarf_ymm12, -1U, debugserver_ymm12, NULL, NULL},
1984 {e_regSetFPU, fpu_ymm13, "ymm13", NULL, Vector, VectorOfUInt8,
1985 FPU_SIZE_YMM(ymm13), AVX_OFFSET_YMM(13), ehframe_dwarf_ymm13,
1986 ehframe_dwarf_ymm13, -1U, debugserver_ymm13, NULL, NULL},
1987 {e_regSetFPU, fpu_ymm14, "ymm14", NULL, Vector, VectorOfUInt8,
1988 FPU_SIZE_YMM(ymm14), AVX_OFFSET_YMM(14), ehframe_dwarf_ymm14,
1989 ehframe_dwarf_ymm14, -1U, debugserver_ymm14, NULL, NULL},
1990 {e_regSetFPU, fpu_ymm15, "ymm15", NULL, Vector, VectorOfUInt8,
1991 FPU_SIZE_YMM(ymm15), AVX_OFFSET_YMM(15), ehframe_dwarf_ymm15,
1992 ehframe_dwarf_ymm15, -1U, debugserver_ymm15, NULL, NULL},
1994 {e_regSetFPU, fpu_xmm0, "xmm0", NULL, Vector, VectorOfUInt8,
1995 FPU_SIZE_XMM(xmm0), 0, ehframe_dwarf_xmm0, ehframe_dwarf_xmm0, -1U,
1996 debugserver_xmm0, g_contained_ymm0, NULL},
1997 {e_regSetFPU, fpu_xmm1, "xmm1", NULL, Vector, VectorOfUInt8,
1998 FPU_SIZE_XMM(xmm1), 0, ehframe_dwarf_xmm1, ehframe_dwarf_xmm1, -1U,
1999 debugserver_xmm1, g_contained_ymm1, NULL},
2000 {e_regSetFPU, fpu_xmm2, "xmm2", NULL, Vector, VectorOfUInt8,
2001 FPU_SIZE_XMM(xmm2), 0, ehframe_dwarf_xmm2, ehframe_dwarf_xmm2, -1U,
2002 debugserver_xmm2, g_contained_ymm2, NULL},
2003 {e_regSetFPU, fpu_xmm3, "xmm3", NULL, Vector, VectorOfUInt8,
2004 FPU_SIZE_XMM(xmm3), 0, ehframe_dwarf_xmm3, ehframe_dwarf_xmm3, -1U,
2005 debugserver_xmm3, g_contained_ymm3, NULL},
2006 {e_regSetFPU, fpu_xmm4, "xmm4", NULL, Vector, VectorOfUInt8,
2007 FPU_SIZE_XMM(xmm4), 0, ehframe_dwarf_xmm4, ehframe_dwarf_xmm4, -1U,
2008 debugserver_xmm4, g_contained_ymm4, NULL},
2009 {e_regSetFPU, fpu_xmm5, "xmm5", NULL, Vector, VectorOfUInt8,
2010 FPU_SIZE_XMM(xmm5), 0, ehframe_dwarf_xmm5, ehframe_dwarf_xmm5, -1U,
2011 debugserver_xmm5, g_contained_ymm5, NULL},
2012 {e_regSetFPU, fpu_xmm6, "xmm6", NULL, Vector, VectorOfUInt8,
2013 FPU_SIZE_XMM(xmm6), 0, ehframe_dwarf_xmm6, ehframe_dwarf_xmm6, -1U,
2014 debugserver_xmm6, g_contained_ymm6, NULL},
2015 {e_regSetFPU, fpu_xmm7, "xmm7", NULL, Vector, VectorOfUInt8,
2016 FPU_SIZE_XMM(xmm7), 0, ehframe_dwarf_xmm7, ehframe_dwarf_xmm7, -1U,
2017 debugserver_xmm7, g_contained_ymm7, NULL},
2018 {e_regSetFPU, fpu_xmm8, "xmm8", NULL, Vector, VectorOfUInt8,
2019 FPU_SIZE_XMM(xmm8), 0, ehframe_dwarf_xmm8, ehframe_dwarf_xmm8, -1U,
2020 debugserver_xmm8, g_contained_ymm8, NULL},
2021 {e_regSetFPU, fpu_xmm9, "xmm9", NULL, Vector, VectorOfUInt8,
2022 FPU_SIZE_XMM(xmm9), 0, ehframe_dwarf_xmm9, ehframe_dwarf_xmm9, -1U,
2023 debugserver_xmm9, g_contained_ymm9, NULL},
2024 {e_regSetFPU, fpu_xmm10, "xmm10", NULL, Vector, VectorOfUInt8,
2025 FPU_SIZE_XMM(xmm10), 0, ehframe_dwarf_xmm10, ehframe_dwarf_xmm10, -1U,
2026 debugserver_xmm10, g_contained_ymm10, NULL},
2027 {e_regSetFPU, fpu_xmm11, "xmm11", NULL, Vector, VectorOfUInt8,
2028 FPU_SIZE_XMM(xmm11), 0, ehframe_dwarf_xmm11, ehframe_dwarf_xmm11, -1U,
2029 debugserver_xmm11, g_contained_ymm11, NULL},
2030 {e_regSetFPU, fpu_xmm12, "xmm12", NULL, Vector, VectorOfUInt8,
2031 FPU_SIZE_XMM(xmm12), 0, ehframe_dwarf_xmm12, ehframe_dwarf_xmm12, -1U,
2032 debugserver_xmm12, g_contained_ymm12, NULL},
2033 {e_regSetFPU, fpu_xmm13, "xmm13", NULL, Vector, VectorOfUInt8,
2034 FPU_SIZE_XMM(xmm13), 0, ehframe_dwarf_xmm13, ehframe_dwarf_xmm13, -1U,
2035 debugserver_xmm13, g_contained_ymm13, NULL},
2036 {e_regSetFPU, fpu_xmm14, "xmm14", NULL, Vector, VectorOfUInt8,
2037 FPU_SIZE_XMM(xmm14), 0, ehframe_dwarf_xmm14, ehframe_dwarf_xmm14, -1U,
2038 debugserver_xmm14, g_contained_ymm14, NULL},
2039 {e_regSetFPU, fpu_xmm15, "xmm15", NULL, Vector, VectorOfUInt8,
2040 FPU_SIZE_XMM(xmm15), 0, ehframe_dwarf_xmm15, ehframe_dwarf_xmm15, -1U,
2041 debugserver_xmm15, g_contained_ymm15, NULL}
2045 static const char *g_contained_zmm0[] = {"zmm0", NULL};
2046 static const char *g_contained_zmm1[] = {"zmm1", NULL};
2047 static const char *g_contained_zmm2[] = {"zmm2", NULL};
2048 static const char *g_contained_zmm3[] = {"zmm3", NULL};
2049 static const char *g_contained_zmm4[] = {"zmm4", NULL};
2050 static const char *g_contained_zmm5[] = {"zmm5", NULL};
2051 static const char *g_contained_zmm6[] = {"zmm6", NULL};
2052 static const char *g_contained_zmm7[] = {"zmm7", NULL};
2053 static const char *g_contained_zmm8[] = {"zmm8", NULL};
2054 static const char *g_contained_zmm9[] = {"zmm9", NULL};
2055 static const char *g_contained_zmm10[] = {"zmm10", NULL};
2056 static const char *g_contained_zmm11[] = {"zmm11", NULL};
2057 static const char *g_contained_zmm12[] = {"zmm12", NULL};
2058 static const char *g_contained_zmm13[] = {"zmm13", NULL};
2059 static const char *g_contained_zmm14[] = {"zmm14", NULL};
2060 static const char *g_contained_zmm15[] = {"zmm15", NULL};
2062 #define STR(s) #s
2064 #define ZMM_REG_DEF(reg) \
2066 e_regSetFPU, fpu_zmm##reg, STR(zmm##reg), NULL, Vector, VectorOfUInt8, \
2067 FPU_SIZE_ZMM(zmm##reg), AVX512F_OFFSET_ZMM(reg), \
2068 ehframe_dwarf_zmm##reg, ehframe_dwarf_zmm##reg, -1U, \
2069 debugserver_zmm##reg, NULL, NULL \
2072 #define YMM_REG_ALIAS(reg) \
2074 e_regSetFPU, fpu_ymm##reg, STR(ymm##reg), NULL, Vector, VectorOfUInt8, \
2075 FPU_SIZE_YMM(ymm##reg), 0, ehframe_dwarf_ymm##reg, \
2076 ehframe_dwarf_ymm##reg, -1U, debugserver_ymm##reg, \
2077 g_contained_zmm##reg, NULL \
2080 #define XMM_REG_ALIAS(reg) \
2082 e_regSetFPU, fpu_xmm##reg, STR(xmm##reg), NULL, Vector, VectorOfUInt8, \
2083 FPU_SIZE_XMM(xmm##reg), 0, ehframe_dwarf_xmm##reg, \
2084 ehframe_dwarf_xmm##reg, -1U, debugserver_xmm##reg, \
2085 g_contained_zmm##reg, NULL \
2088 #define AVX512_K_REG_DEF(reg) \
2090 e_regSetFPU, fpu_k##reg, STR(k##reg), NULL, Vector, VectorOfUInt8, 8, \
2091 AVX512F_OFFSET(k##reg), ehframe_dwarf_k##reg, ehframe_dwarf_k##reg, \
2092 -1U, debugserver_k##reg, NULL, NULL \
2095 const DNBRegisterInfo DNBArchImplX86_64::g_fpu_registers_avx512f[] = {
2096 {e_regSetFPU, fpu_fcw, "fctrl", NULL, Uint, Hex, FPU_SIZE_UINT(fcw),
2097 AVX_OFFSET(fcw), -1U, -1U, -1U, -1U, NULL, NULL},
2098 {e_regSetFPU, fpu_fsw, "fstat", NULL, Uint, Hex, FPU_SIZE_UINT(fsw),
2099 AVX_OFFSET(fsw), -1U, -1U, -1U, -1U, NULL, NULL},
2100 {e_regSetFPU, fpu_ftw, "ftag", NULL, Uint, Hex, 2 /* sizeof __fpu_ftw + sizeof __fpu_rsrv1 */,
2101 AVX_OFFSET(ftw), -1U, -1U, -1U, -1U, NULL, NULL},
2102 {e_regSetFPU, fpu_fop, "fop", NULL, Uint, Hex, FPU_SIZE_UINT(fop),
2103 AVX_OFFSET(fop), -1U, -1U, -1U, -1U, NULL, NULL},
2104 {e_regSetFPU, fpu_ip, "fioff", NULL, Uint, Hex, FPU_SIZE_UINT(ip),
2105 AVX_OFFSET(ip), -1U, -1U, -1U, -1U, NULL, NULL},
2106 {e_regSetFPU, fpu_cs, "fiseg", NULL, Uint, Hex, FPU_SIZE_UINT(cs),
2107 AVX_OFFSET(cs), -1U, -1U, -1U, -1U, NULL, NULL},
2108 {e_regSetFPU, fpu_dp, "fooff", NULL, Uint, Hex, FPU_SIZE_UINT(dp),
2109 AVX_OFFSET(dp), -1U, -1U, -1U, -1U, NULL, NULL},
2110 {e_regSetFPU, fpu_ds, "foseg", NULL, Uint, Hex, FPU_SIZE_UINT(ds),
2111 AVX_OFFSET(ds), -1U, -1U, -1U, -1U, NULL, NULL},
2112 {e_regSetFPU, fpu_mxcsr, "mxcsr", NULL, Uint, Hex, FPU_SIZE_UINT(mxcsr),
2113 AVX_OFFSET(mxcsr), -1U, -1U, -1U, -1U, NULL, NULL},
2114 {e_regSetFPU, fpu_mxcsrmask, "mxcsrmask", NULL, Uint, Hex,
2115 FPU_SIZE_UINT(mxcsrmask), AVX_OFFSET(mxcsrmask), -1U, -1U, -1U, -1U, NULL,
2116 NULL},
2118 {e_regSetFPU, fpu_stmm0, "stmm0", "st0", Vector, VectorOfUInt8,
2119 FPU_SIZE_MMST(stmm0), AVX_OFFSET(stmm0), ehframe_dwarf_stmm0,
2120 ehframe_dwarf_stmm0, -1U, debugserver_stmm0, NULL, NULL},
2121 {e_regSetFPU, fpu_stmm1, "stmm1", "st1", Vector, VectorOfUInt8,
2122 FPU_SIZE_MMST(stmm1), AVX_OFFSET(stmm1), ehframe_dwarf_stmm1,
2123 ehframe_dwarf_stmm1, -1U, debugserver_stmm1, NULL, NULL},
2124 {e_regSetFPU, fpu_stmm2, "stmm2", "st2", Vector, VectorOfUInt8,
2125 FPU_SIZE_MMST(stmm2), AVX_OFFSET(stmm2), ehframe_dwarf_stmm2,
2126 ehframe_dwarf_stmm2, -1U, debugserver_stmm2, NULL, NULL},
2127 {e_regSetFPU, fpu_stmm3, "stmm3", "st3", Vector, VectorOfUInt8,
2128 FPU_SIZE_MMST(stmm3), AVX_OFFSET(stmm3), ehframe_dwarf_stmm3,
2129 ehframe_dwarf_stmm3, -1U, debugserver_stmm3, NULL, NULL},
2130 {e_regSetFPU, fpu_stmm4, "stmm4", "st4", Vector, VectorOfUInt8,
2131 FPU_SIZE_MMST(stmm4), AVX_OFFSET(stmm4), ehframe_dwarf_stmm4,
2132 ehframe_dwarf_stmm4, -1U, debugserver_stmm4, NULL, NULL},
2133 {e_regSetFPU, fpu_stmm5, "stmm5", "st5", Vector, VectorOfUInt8,
2134 FPU_SIZE_MMST(stmm5), AVX_OFFSET(stmm5), ehframe_dwarf_stmm5,
2135 ehframe_dwarf_stmm5, -1U, debugserver_stmm5, NULL, NULL},
2136 {e_regSetFPU, fpu_stmm6, "stmm6", "st6", Vector, VectorOfUInt8,
2137 FPU_SIZE_MMST(stmm6), AVX_OFFSET(stmm6), ehframe_dwarf_stmm6,
2138 ehframe_dwarf_stmm6, -1U, debugserver_stmm6, NULL, NULL},
2139 {e_regSetFPU, fpu_stmm7, "stmm7", "st7", Vector, VectorOfUInt8,
2140 FPU_SIZE_MMST(stmm7), AVX_OFFSET(stmm7), ehframe_dwarf_stmm7,
2141 ehframe_dwarf_stmm7, -1U, debugserver_stmm7, NULL, NULL},
2143 AVX512_K_REG_DEF(0),
2144 AVX512_K_REG_DEF(1),
2145 AVX512_K_REG_DEF(2),
2146 AVX512_K_REG_DEF(3),
2147 AVX512_K_REG_DEF(4),
2148 AVX512_K_REG_DEF(5),
2149 AVX512_K_REG_DEF(6),
2150 AVX512_K_REG_DEF(7),
2152 ZMM_REG_DEF(0),
2153 ZMM_REG_DEF(1),
2154 ZMM_REG_DEF(2),
2155 ZMM_REG_DEF(3),
2156 ZMM_REG_DEF(4),
2157 ZMM_REG_DEF(5),
2158 ZMM_REG_DEF(6),
2159 ZMM_REG_DEF(7),
2160 ZMM_REG_DEF(8),
2161 ZMM_REG_DEF(9),
2162 ZMM_REG_DEF(10),
2163 ZMM_REG_DEF(11),
2164 ZMM_REG_DEF(12),
2165 ZMM_REG_DEF(13),
2166 ZMM_REG_DEF(14),
2167 ZMM_REG_DEF(15),
2168 ZMM_REG_DEF(16),
2169 ZMM_REG_DEF(17),
2170 ZMM_REG_DEF(18),
2171 ZMM_REG_DEF(19),
2172 ZMM_REG_DEF(20),
2173 ZMM_REG_DEF(21),
2174 ZMM_REG_DEF(22),
2175 ZMM_REG_DEF(23),
2176 ZMM_REG_DEF(24),
2177 ZMM_REG_DEF(25),
2178 ZMM_REG_DEF(26),
2179 ZMM_REG_DEF(27),
2180 ZMM_REG_DEF(28),
2181 ZMM_REG_DEF(29),
2182 ZMM_REG_DEF(30),
2183 ZMM_REG_DEF(31),
2185 YMM_REG_ALIAS(0),
2186 YMM_REG_ALIAS(1),
2187 YMM_REG_ALIAS(2),
2188 YMM_REG_ALIAS(3),
2189 YMM_REG_ALIAS(4),
2190 YMM_REG_ALIAS(5),
2191 YMM_REG_ALIAS(6),
2192 YMM_REG_ALIAS(7),
2193 YMM_REG_ALIAS(8),
2194 YMM_REG_ALIAS(9),
2195 YMM_REG_ALIAS(10),
2196 YMM_REG_ALIAS(11),
2197 YMM_REG_ALIAS(12),
2198 YMM_REG_ALIAS(13),
2199 YMM_REG_ALIAS(14),
2200 YMM_REG_ALIAS(15),
2202 XMM_REG_ALIAS(0),
2203 XMM_REG_ALIAS(1),
2204 XMM_REG_ALIAS(2),
2205 XMM_REG_ALIAS(3),
2206 XMM_REG_ALIAS(4),
2207 XMM_REG_ALIAS(5),
2208 XMM_REG_ALIAS(6),
2209 XMM_REG_ALIAS(7),
2210 XMM_REG_ALIAS(8),
2211 XMM_REG_ALIAS(9),
2212 XMM_REG_ALIAS(10),
2213 XMM_REG_ALIAS(11),
2214 XMM_REG_ALIAS(12),
2215 XMM_REG_ALIAS(13),
2216 XMM_REG_ALIAS(14),
2217 XMM_REG_ALIAS(15),
2222 // Exception registers
2224 const DNBRegisterInfo DNBArchImplX86_64::g_exc_registers[] = {
2225 {e_regSetEXC, exc_trapno, "trapno", NULL, Uint, Hex, EXC_SIZE(trapno),
2226 EXC_OFFSET(trapno), -1U, -1U, -1U, -1U, NULL, NULL},
2227 {e_regSetEXC, exc_err, "err", NULL, Uint, Hex, EXC_SIZE(err),
2228 EXC_OFFSET(err), -1U, -1U, -1U, -1U, NULL, NULL},
2229 {e_regSetEXC, exc_faultvaddr, "faultvaddr", NULL, Uint, Hex,
2230 EXC_SIZE(faultvaddr), EXC_OFFSET(faultvaddr), -1U, -1U, -1U, -1U, NULL,
2231 NULL}};
2233 // Number of registers in each register set
2234 const size_t DNBArchImplX86_64::k_num_gpr_registers =
2235 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo);
2236 const size_t DNBArchImplX86_64::k_num_fpu_registers_no_avx =
2237 sizeof(g_fpu_registers_no_avx) / sizeof(DNBRegisterInfo);
2238 const size_t DNBArchImplX86_64::k_num_fpu_registers_avx =
2239 sizeof(g_fpu_registers_avx) / sizeof(DNBRegisterInfo);
2240 const size_t DNBArchImplX86_64::k_num_exc_registers =
2241 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo);
2242 const size_t DNBArchImplX86_64::k_num_all_registers_no_avx =
2243 k_num_gpr_registers + k_num_fpu_registers_no_avx + k_num_exc_registers;
2244 const size_t DNBArchImplX86_64::k_num_all_registers_avx =
2245 k_num_gpr_registers + k_num_fpu_registers_avx + k_num_exc_registers;
2246 const size_t DNBArchImplX86_64::k_num_fpu_registers_avx512f =
2247 sizeof(g_fpu_registers_avx512f) / sizeof(DNBRegisterInfo);
2248 const size_t DNBArchImplX86_64::k_num_all_registers_avx512f =
2249 k_num_gpr_registers + k_num_fpu_registers_avx512f + k_num_exc_registers;
2251 // Register set definitions. The first definitions at register set index
2252 // of zero is for all registers, followed by other registers sets. The
2253 // register information for the all register set need not be filled in.
2254 const DNBRegisterSetInfo DNBArchImplX86_64::g_reg_sets_no_avx[] = {
2255 {"x86_64 Registers", NULL, k_num_all_registers_no_avx},
2256 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers},
2257 {"Floating Point Registers", g_fpu_registers_no_avx,
2258 k_num_fpu_registers_no_avx},
2259 {"Exception State Registers", g_exc_registers, k_num_exc_registers}};
2261 const DNBRegisterSetInfo DNBArchImplX86_64::g_reg_sets_avx[] = {
2262 {"x86_64 Registers", NULL, k_num_all_registers_avx},
2263 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers},
2264 {"Floating Point Registers", g_fpu_registers_avx, k_num_fpu_registers_avx},
2265 {"Exception State Registers", g_exc_registers, k_num_exc_registers}};
2267 const DNBRegisterSetInfo DNBArchImplX86_64::g_reg_sets_avx512f[] = {
2268 {"x86_64 Registers", NULL, k_num_all_registers_avx},
2269 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers},
2270 {"Floating Point Registers", g_fpu_registers_avx512f,
2271 k_num_fpu_registers_avx512f},
2272 {"Exception State Registers", g_exc_registers, k_num_exc_registers}};
2274 // Total number of register sets for this architecture
2275 const size_t DNBArchImplX86_64::k_num_register_sets =
2276 sizeof(g_reg_sets_avx) / sizeof(DNBRegisterSetInfo);
2278 DNBArchProtocol *DNBArchImplX86_64::Create(MachThread *thread) {
2279 DNBArchImplX86_64 *obj = new DNBArchImplX86_64(thread);
2280 return obj;
2283 const uint8_t *
2284 DNBArchImplX86_64::SoftwareBreakpointOpcode(nub_size_t byte_size) {
2285 static const uint8_t g_breakpoint_opcode[] = {0xCC};
2286 if (byte_size == 1)
2287 return g_breakpoint_opcode;
2288 return NULL;
2291 const DNBRegisterSetInfo *
2292 DNBArchImplX86_64::GetRegisterSetInfo(nub_size_t *num_reg_sets) {
2293 *num_reg_sets = k_num_register_sets;
2295 if (CPUHasAVX512f() || FORCE_AVX_REGS)
2296 return g_reg_sets_avx512f;
2297 if (CPUHasAVX() || FORCE_AVX_REGS)
2298 return g_reg_sets_avx;
2299 else
2300 return g_reg_sets_no_avx;
2303 void DNBArchImplX86_64::Initialize() {
2304 DNBArchPluginInfo arch_plugin_info = {
2305 CPU_TYPE_X86_64, DNBArchImplX86_64::Create,
2306 DNBArchImplX86_64::GetRegisterSetInfo,
2307 DNBArchImplX86_64::SoftwareBreakpointOpcode};
2309 // Register this arch plug-in with the main protocol class
2310 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info);
2313 bool DNBArchImplX86_64::GetRegisterValue(uint32_t set, uint32_t reg,
2314 DNBRegisterValue *value) {
2315 if (set == REGISTER_SET_GENERIC) {
2316 switch (reg) {
2317 case GENERIC_REGNUM_PC: // Program Counter
2318 set = e_regSetGPR;
2319 reg = gpr_rip;
2320 break;
2322 case GENERIC_REGNUM_SP: // Stack Pointer
2323 set = e_regSetGPR;
2324 reg = gpr_rsp;
2325 break;
2327 case GENERIC_REGNUM_FP: // Frame Pointer
2328 set = e_regSetGPR;
2329 reg = gpr_rbp;
2330 break;
2332 case GENERIC_REGNUM_FLAGS: // Processor flags register
2333 set = e_regSetGPR;
2334 reg = gpr_rflags;
2335 break;
2337 case GENERIC_REGNUM_RA: // Return Address
2338 default:
2339 return false;
2343 if (GetRegisterState(set, false) != KERN_SUCCESS)
2344 return false;
2346 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2347 if (regInfo) {
2348 value->info = *regInfo;
2349 switch (set) {
2350 case e_regSetGPR:
2351 if (reg > gpr_gs && !m_state.hasFullGPRState)
2352 return false;
2353 if (reg < k_num_gpr_registers) {
2354 value->value.uint64 = ((uint64_t *)(&m_state.context.gpr))[reg];
2355 return true;
2357 break;
2359 case e_regSetFPU:
2360 if (reg > fpu_xmm15 && !(CPUHasAVX() || FORCE_AVX_REGS))
2361 return false;
2362 if (reg > fpu_ymm15 && !(CPUHasAVX512f() || FORCE_AVX_REGS))
2363 return false;
2364 switch (reg) {
2366 case fpu_fcw:
2367 value->value.uint16 =
2368 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw));
2369 return true;
2370 case fpu_fsw:
2371 value->value.uint16 =
2372 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw));
2373 return true;
2374 case fpu_ftw:
2375 memcpy (&value->value.uint16, &m_state.context.fpu.no_avx.__fpu_ftw, 2);
2376 return true;
2377 case fpu_fop:
2378 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_fop;
2379 return true;
2380 case fpu_ip:
2381 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_ip;
2382 return true;
2383 case fpu_cs:
2384 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_cs;
2385 return true;
2386 case fpu_dp:
2387 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_dp;
2388 return true;
2389 case fpu_ds:
2390 value->value.uint16 = m_state.context.fpu.no_avx.__fpu_ds;
2391 return true;
2392 case fpu_mxcsr:
2393 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsr;
2394 return true;
2395 case fpu_mxcsrmask:
2396 value->value.uint32 = m_state.context.fpu.no_avx.__fpu_mxcsrmask;
2397 return true;
2399 case fpu_stmm0:
2400 case fpu_stmm1:
2401 case fpu_stmm2:
2402 case fpu_stmm3:
2403 case fpu_stmm4:
2404 case fpu_stmm5:
2405 case fpu_stmm6:
2406 case fpu_stmm7:
2407 memcpy(&value->value.uint8,
2408 &m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0), 10);
2409 return true;
2411 case fpu_xmm0:
2412 case fpu_xmm1:
2413 case fpu_xmm2:
2414 case fpu_xmm3:
2415 case fpu_xmm4:
2416 case fpu_xmm5:
2417 case fpu_xmm6:
2418 case fpu_xmm7:
2419 case fpu_xmm8:
2420 case fpu_xmm9:
2421 case fpu_xmm10:
2422 case fpu_xmm11:
2423 case fpu_xmm12:
2424 case fpu_xmm13:
2425 case fpu_xmm14:
2426 case fpu_xmm15:
2427 memcpy(&value->value.uint8,
2428 &m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0), 16);
2429 return true;
2431 case fpu_ymm0:
2432 case fpu_ymm1:
2433 case fpu_ymm2:
2434 case fpu_ymm3:
2435 case fpu_ymm4:
2436 case fpu_ymm5:
2437 case fpu_ymm6:
2438 case fpu_ymm7:
2439 case fpu_ymm8:
2440 case fpu_ymm9:
2441 case fpu_ymm10:
2442 case fpu_ymm11:
2443 case fpu_ymm12:
2444 case fpu_ymm13:
2445 case fpu_ymm14:
2446 case fpu_ymm15:
2447 memcpy(&value->value.uint8,
2448 &m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0), 16);
2449 memcpy((&value->value.uint8) + 16,
2450 &m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0), 16);
2451 return true;
2452 case fpu_k0:
2453 case fpu_k1:
2454 case fpu_k2:
2455 case fpu_k3:
2456 case fpu_k4:
2457 case fpu_k5:
2458 case fpu_k6:
2459 case fpu_k7:
2460 memcpy((&value->value.uint8),
2461 &m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0), 8);
2462 return true;
2463 case fpu_zmm0:
2464 case fpu_zmm1:
2465 case fpu_zmm2:
2466 case fpu_zmm3:
2467 case fpu_zmm4:
2468 case fpu_zmm5:
2469 case fpu_zmm6:
2470 case fpu_zmm7:
2471 case fpu_zmm8:
2472 case fpu_zmm9:
2473 case fpu_zmm10:
2474 case fpu_zmm11:
2475 case fpu_zmm12:
2476 case fpu_zmm13:
2477 case fpu_zmm14:
2478 case fpu_zmm15:
2479 memcpy(&value->value.uint8,
2480 &m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0), 16);
2481 memcpy((&value->value.uint8) + 16,
2482 &m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0), 16);
2483 memcpy((&value->value.uint8) + 32,
2484 &m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0), 32);
2485 return true;
2486 case fpu_zmm16:
2487 case fpu_zmm17:
2488 case fpu_zmm18:
2489 case fpu_zmm19:
2490 case fpu_zmm20:
2491 case fpu_zmm21:
2492 case fpu_zmm22:
2493 case fpu_zmm23:
2494 case fpu_zmm24:
2495 case fpu_zmm25:
2496 case fpu_zmm26:
2497 case fpu_zmm27:
2498 case fpu_zmm28:
2499 case fpu_zmm29:
2500 case fpu_zmm30:
2501 case fpu_zmm31:
2502 memcpy(&value->value.uint8,
2503 &m_state.context.fpu.avx512f.__fpu_zmm16 + (reg - fpu_zmm16), 64);
2504 return true;
2506 break;
2508 case e_regSetEXC:
2509 switch (reg) {
2510 case exc_trapno:
2511 value->value.uint32 = m_state.context.exc.__trapno;
2512 return true;
2513 case exc_err:
2514 value->value.uint32 = m_state.context.exc.__err;
2515 return true;
2516 case exc_faultvaddr:
2517 value->value.uint64 = m_state.context.exc.__faultvaddr;
2518 return true;
2520 break;
2523 return false;
2526 bool DNBArchImplX86_64::SetRegisterValue(uint32_t set, uint32_t reg,
2527 const DNBRegisterValue *value) {
2528 if (set == REGISTER_SET_GENERIC) {
2529 switch (reg) {
2530 case GENERIC_REGNUM_PC: // Program Counter
2531 set = e_regSetGPR;
2532 reg = gpr_rip;
2533 break;
2535 case GENERIC_REGNUM_SP: // Stack Pointer
2536 set = e_regSetGPR;
2537 reg = gpr_rsp;
2538 break;
2540 case GENERIC_REGNUM_FP: // Frame Pointer
2541 set = e_regSetGPR;
2542 reg = gpr_rbp;
2543 break;
2545 case GENERIC_REGNUM_FLAGS: // Processor flags register
2546 set = e_regSetGPR;
2547 reg = gpr_rflags;
2548 break;
2550 case GENERIC_REGNUM_RA: // Return Address
2551 default:
2552 return false;
2556 if (GetRegisterState(set, false) != KERN_SUCCESS)
2557 return false;
2559 bool success = false;
2560 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2561 if (regInfo) {
2562 switch (set) {
2563 case e_regSetGPR:
2564 if (reg > gpr_gs && !m_state.hasFullGPRState)
2565 return false;
2566 if (reg < k_num_gpr_registers) {
2567 ((uint64_t *)(&m_state.context.gpr))[reg] = value->value.uint64;
2568 success = true;
2570 break;
2571 if (reg > fpu_xmm15 && !(CPUHasAVX() || FORCE_AVX_REGS))
2572 return false;
2573 if (reg > fpu_ymm15 && !(CPUHasAVX512f() || FORCE_AVX_REGS))
2574 return false;
2575 case e_regSetFPU:
2576 switch (reg) {
2577 case fpu_fcw:
2578 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fcw)) =
2579 value->value.uint16;
2580 success = true;
2581 break;
2582 case fpu_fsw:
2583 *((uint16_t *)(&m_state.context.fpu.no_avx.__fpu_fsw)) =
2584 value->value.uint16;
2585 success = true;
2586 break;
2587 case fpu_ftw:
2588 memcpy (&m_state.context.fpu.no_avx.__fpu_ftw, &value->value.uint8, 2);
2589 success = true;
2590 break;
2591 case fpu_fop:
2592 m_state.context.fpu.no_avx.__fpu_fop = value->value.uint16;
2593 success = true;
2594 break;
2595 case fpu_ip:
2596 m_state.context.fpu.no_avx.__fpu_ip = value->value.uint32;
2597 success = true;
2598 break;
2599 case fpu_cs:
2600 m_state.context.fpu.no_avx.__fpu_cs = value->value.uint16;
2601 success = true;
2602 break;
2603 case fpu_dp:
2604 m_state.context.fpu.no_avx.__fpu_dp = value->value.uint32;
2605 success = true;
2606 break;
2607 case fpu_ds:
2608 m_state.context.fpu.no_avx.__fpu_ds = value->value.uint16;
2609 success = true;
2610 break;
2611 case fpu_mxcsr:
2612 m_state.context.fpu.no_avx.__fpu_mxcsr = value->value.uint32;
2613 success = true;
2614 break;
2615 case fpu_mxcsrmask:
2616 m_state.context.fpu.no_avx.__fpu_mxcsrmask = value->value.uint32;
2617 success = true;
2618 break;
2620 case fpu_stmm0:
2621 case fpu_stmm1:
2622 case fpu_stmm2:
2623 case fpu_stmm3:
2624 case fpu_stmm4:
2625 case fpu_stmm5:
2626 case fpu_stmm6:
2627 case fpu_stmm7:
2628 memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + (reg - fpu_stmm0),
2629 &value->value.uint8, 10);
2630 success = true;
2631 break;
2633 case fpu_xmm0:
2634 case fpu_xmm1:
2635 case fpu_xmm2:
2636 case fpu_xmm3:
2637 case fpu_xmm4:
2638 case fpu_xmm5:
2639 case fpu_xmm6:
2640 case fpu_xmm7:
2641 case fpu_xmm8:
2642 case fpu_xmm9:
2643 case fpu_xmm10:
2644 case fpu_xmm11:
2645 case fpu_xmm12:
2646 case fpu_xmm13:
2647 case fpu_xmm14:
2648 case fpu_xmm15:
2649 memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0 + (reg - fpu_xmm0),
2650 &value->value.uint8, 16);
2651 success = true;
2652 break;
2654 case fpu_ymm0:
2655 case fpu_ymm1:
2656 case fpu_ymm2:
2657 case fpu_ymm3:
2658 case fpu_ymm4:
2659 case fpu_ymm5:
2660 case fpu_ymm6:
2661 case fpu_ymm7:
2662 case fpu_ymm8:
2663 case fpu_ymm9:
2664 case fpu_ymm10:
2665 case fpu_ymm11:
2666 case fpu_ymm12:
2667 case fpu_ymm13:
2668 case fpu_ymm14:
2669 case fpu_ymm15:
2670 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + (reg - fpu_ymm0),
2671 &value->value.uint8, 16);
2672 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + (reg - fpu_ymm0),
2673 (&value->value.uint8) + 16, 16);
2674 success = true;
2675 break;
2676 case fpu_k0:
2677 case fpu_k1:
2678 case fpu_k2:
2679 case fpu_k3:
2680 case fpu_k4:
2681 case fpu_k5:
2682 case fpu_k6:
2683 case fpu_k7:
2684 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + (reg - fpu_k0),
2685 &value->value.uint8, 8);
2686 success = true;
2687 break;
2688 case fpu_zmm0:
2689 case fpu_zmm1:
2690 case fpu_zmm2:
2691 case fpu_zmm3:
2692 case fpu_zmm4:
2693 case fpu_zmm5:
2694 case fpu_zmm6:
2695 case fpu_zmm7:
2696 case fpu_zmm8:
2697 case fpu_zmm9:
2698 case fpu_zmm10:
2699 case fpu_zmm11:
2700 case fpu_zmm12:
2701 case fpu_zmm13:
2702 case fpu_zmm14:
2703 case fpu_zmm15:
2704 memcpy(&m_state.context.fpu.avx512f.__fpu_xmm0 + (reg - fpu_zmm0),
2705 &value->value.uint8, 16);
2706 memcpy(&m_state.context.fpu.avx512f.__fpu_ymmh0 + (reg - fpu_zmm0),
2707 &value->value.uint8 + 16, 16);
2708 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + (reg - fpu_zmm0),
2709 &value->value.uint8 + 32, 32);
2710 success = true;
2711 break;
2712 case fpu_zmm16:
2713 case fpu_zmm17:
2714 case fpu_zmm18:
2715 case fpu_zmm19:
2716 case fpu_zmm20:
2717 case fpu_zmm21:
2718 case fpu_zmm22:
2719 case fpu_zmm23:
2720 case fpu_zmm24:
2721 case fpu_zmm25:
2722 case fpu_zmm26:
2723 case fpu_zmm27:
2724 case fpu_zmm28:
2725 case fpu_zmm29:
2726 case fpu_zmm30:
2727 case fpu_zmm31:
2728 memcpy(&m_state.context.fpu.avx512f.__fpu_zmm16 + (reg - fpu_zmm16),
2729 &value->value.uint8, 64);
2730 success = true;
2731 break;
2733 break;
2735 case e_regSetEXC:
2736 switch (reg) {
2737 case exc_trapno:
2738 m_state.context.exc.__trapno = value->value.uint32;
2739 success = true;
2740 break;
2741 case exc_err:
2742 m_state.context.exc.__err = value->value.uint32;
2743 success = true;
2744 break;
2745 case exc_faultvaddr:
2746 m_state.context.exc.__faultvaddr = value->value.uint64;
2747 success = true;
2748 break;
2750 break;
2754 if (success)
2755 return SetRegisterState(set) == KERN_SUCCESS;
2756 return false;
2759 uint32_t DNBArchImplX86_64::GetRegisterContextSize() {
2760 static uint32_t g_cached_size = 0;
2761 if (g_cached_size == 0) {
2762 if (CPUHasAVX512f() || FORCE_AVX_REGS) {
2763 for (size_t i = 0; i < k_num_fpu_registers_avx512f; ++i) {
2764 if (g_fpu_registers_avx512f[i].value_regs == NULL)
2765 g_cached_size += g_fpu_registers_avx512f[i].size;
2767 } else if (CPUHasAVX() || FORCE_AVX_REGS) {
2768 for (size_t i = 0; i < k_num_fpu_registers_avx; ++i) {
2769 if (g_fpu_registers_avx[i].value_regs == NULL)
2770 g_cached_size += g_fpu_registers_avx[i].size;
2772 } else {
2773 for (size_t i = 0; i < k_num_fpu_registers_no_avx; ++i) {
2774 if (g_fpu_registers_no_avx[i].value_regs == NULL)
2775 g_cached_size += g_fpu_registers_no_avx[i].size;
2778 DNBLogThreaded("DNBArchImplX86_64::GetRegisterContextSize() - GPR = %zu, "
2779 "FPU = %u, EXC = %zu",
2780 sizeof(GPR), g_cached_size, sizeof(EXC));
2781 g_cached_size += sizeof(GPR);
2782 g_cached_size += sizeof(EXC);
2783 DNBLogThreaded(
2784 "DNBArchImplX86_64::GetRegisterContextSize() - GPR + FPU + EXC = %u",
2785 g_cached_size);
2787 return g_cached_size;
2790 nub_size_t DNBArchImplX86_64::GetRegisterContext(void *buf,
2791 nub_size_t buf_len) {
2792 uint32_t size = GetRegisterContextSize();
2794 if (buf && buf_len) {
2795 bool force = false;
2796 kern_return_t kret;
2798 if ((kret = GetGPRState(force)) != KERN_SUCCESS) {
2799 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf "
2800 "= %p, len = %llu) error: GPR regs failed "
2801 "to read: %u ",
2802 buf, (uint64_t)buf_len, kret);
2803 size = 0;
2804 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) {
2805 DNBLogThreadedIf(
2806 LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = "
2807 "%llu) error: %s regs failed to read: %u",
2808 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
2809 size = 0;
2810 } else if ((kret = GetEXCState(force)) != KERN_SUCCESS) {
2811 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::GetRegisterContext (buf "
2812 "= %p, len = %llu) error: EXC regs failed "
2813 "to read: %u",
2814 buf, (uint64_t)buf_len, kret);
2815 size = 0;
2816 } else {
2817 uint8_t *p = (uint8_t *)buf;
2818 // Copy the GPR registers
2819 memcpy(p, &m_state.context.gpr, sizeof(GPR));
2820 p += sizeof(GPR);
2822 // Walk around the gaps in the FPU regs
2823 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fcw, 5);
2824 // We read 5 bytes, but we skip 6 to account for __fpu_rsrv1
2825 // to match the g_fpu_registers_* tables.
2826 p += 6;
2827 memcpy(p, &m_state.context.fpu.no_avx.__fpu_fop, 8);
2828 p += 8;
2829 memcpy(p, &m_state.context.fpu.no_avx.__fpu_dp, 6);
2830 p += 6;
2831 memcpy(p, &m_state.context.fpu.no_avx.__fpu_mxcsr, 8);
2832 p += 8;
2834 // Work around the padding between the stmm registers as they are 16
2835 // byte structs with 10 bytes of the value in each
2836 for (size_t i = 0; i < 8; ++i) {
2837 memcpy(p, &m_state.context.fpu.no_avx.__fpu_stmm0 + i, 10);
2838 p += 10;
2841 if(CPUHasAVX512f() || FORCE_AVX_REGS) {
2842 for (size_t i = 0; i < 8; ++i) {
2843 memcpy(p, &m_state.context.fpu.avx512f.__fpu_k0 + i, 8);
2844 p += 8;
2848 if (CPUHasAVX() || FORCE_AVX_REGS) {
2849 // Interleave the XMM and YMMH registers to make the YMM registers
2850 for (size_t i = 0; i < 16; ++i) {
2851 memcpy(p, &m_state.context.fpu.avx.__fpu_xmm0 + i, 16);
2852 p += 16;
2853 memcpy(p, &m_state.context.fpu.avx.__fpu_ymmh0 + i, 16);
2854 p += 16;
2856 if(CPUHasAVX512f() || FORCE_AVX_REGS) {
2857 for (size_t i = 0; i < 16; ++i) {
2858 memcpy(p, &m_state.context.fpu.avx512f.__fpu_zmmh0 + i, 32);
2859 p += 32;
2861 for (size_t i = 0; i < 16; ++i) {
2862 memcpy(p, &m_state.context.fpu.avx512f.__fpu_zmm16 + i, 64);
2863 p += 64;
2866 } else {
2867 // Copy the XMM registers in a single block
2868 memcpy(p, &m_state.context.fpu.no_avx.__fpu_xmm0, 16 * 16);
2869 p += 16 * 16;
2872 // Copy the exception registers
2873 memcpy(p, &m_state.context.exc, sizeof(EXC));
2874 p += sizeof(EXC);
2876 // make sure we end up with exactly what we think we should have
2877 size_t bytes_written = p - (uint8_t *)buf;
2878 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2879 assert(bytes_written == size);
2883 DNBLogThreadedIf(
2884 LOG_THREAD,
2885 "DNBArchImplX86_64::GetRegisterContext (buf = %p, len = %llu) => %u", buf,
2886 (uint64_t)buf_len, size);
2887 // Return the size of the register context even if NULL was passed in
2888 return size;
2891 nub_size_t DNBArchImplX86_64::SetRegisterContext(const void *buf,
2892 nub_size_t buf_len) {
2893 uint32_t size = GetRegisterContextSize();
2894 if (buf == NULL || buf_len == 0)
2895 size = 0;
2897 if (size) {
2898 if (size > buf_len)
2899 size = static_cast<uint32_t>(buf_len);
2901 const uint8_t *p = (const uint8_t *)buf;
2902 // Copy the GPR registers
2903 memcpy(&m_state.context.gpr, p, sizeof(GPR));
2904 p += sizeof(GPR);
2906 // Copy fcw through mxcsrmask as there is no padding
2907 memcpy(&m_state.context.fpu.no_avx.__fpu_fcw, p, 5);
2908 // We wrote 5 bytes, but we skip 6 to account for __fpu_rsrv1
2909 // to match the g_fpu_registers_* tables.
2910 p += 6;
2911 memcpy(&m_state.context.fpu.no_avx.__fpu_fop, p, 8);
2912 p += 8;
2913 memcpy(&m_state.context.fpu.no_avx.__fpu_dp, p, 6);
2914 p += 6;
2915 memcpy(&m_state.context.fpu.no_avx.__fpu_mxcsr, p, 8);
2916 p += 8;
2918 // Work around the padding between the stmm registers as they are 16
2919 // byte structs with 10 bytes of the value in each
2920 for (size_t i = 0; i < 8; ++i) {
2921 memcpy(&m_state.context.fpu.no_avx.__fpu_stmm0 + i, p, 10);
2922 p += 10;
2925 if(CPUHasAVX512f() || FORCE_AVX_REGS) {
2926 for (size_t i = 0; i < 8; ++i) {
2927 memcpy(&m_state.context.fpu.avx512f.__fpu_k0 + i, p, 8);
2928 p += 8;
2932 if (CPUHasAVX() || FORCE_AVX_REGS) {
2933 // Interleave the XMM and YMMH registers to make the YMM registers
2934 for (size_t i = 0; i < 16; ++i) {
2935 memcpy(&m_state.context.fpu.avx.__fpu_xmm0 + i, p, 16);
2936 p += 16;
2937 memcpy(&m_state.context.fpu.avx.__fpu_ymmh0 + i, p, 16);
2938 p += 16;
2940 if(CPUHasAVX512f() || FORCE_AVX_REGS) {
2941 for (size_t i = 0; i < 16; ++i) {
2942 memcpy(&m_state.context.fpu.avx512f.__fpu_zmmh0 + i, p, 32);
2943 p += 32;
2945 for (size_t i = 0; i < 16; ++i) {
2946 memcpy(&m_state.context.fpu.avx512f.__fpu_zmm16 + i, p, 64);
2947 p += 64;
2950 } else {
2951 // Copy the XMM registers in a single block
2952 memcpy(&m_state.context.fpu.no_avx.__fpu_xmm0, p, 16 * 16);
2953 p += 16 * 16;
2956 // Copy the exception registers
2957 memcpy(&m_state.context.exc, p, sizeof(EXC));
2958 p += sizeof(EXC);
2960 // make sure we end up with exactly what we think we should have
2961 size_t bytes_written = p - (const uint8_t *)buf;
2962 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2963 assert(bytes_written == size);
2965 kern_return_t kret;
2966 if ((kret = SetGPRState()) != KERN_SUCCESS)
2967 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf "
2968 "= %p, len = %llu) error: GPR regs failed "
2969 "to write: %u",
2970 buf, (uint64_t)buf_len, kret);
2971 if ((kret = SetFPUState()) != KERN_SUCCESS)
2972 DNBLogThreadedIf(
2973 LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = "
2974 "%llu) error: %s regs failed to write: %u",
2975 buf, (uint64_t)buf_len, CPUHasAVX() ? "AVX" : "FPU", kret);
2976 if ((kret = SetEXCState()) != KERN_SUCCESS)
2977 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SetRegisterContext (buf "
2978 "= %p, len = %llu) error: EXP regs failed "
2979 "to write: %u",
2980 buf, (uint64_t)buf_len, kret);
2982 DNBLogThreadedIf(
2983 LOG_THREAD,
2984 "DNBArchImplX86_64::SetRegisterContext (buf = %p, len = %llu) => %llu",
2985 buf, (uint64_t)buf_len, (uint64_t)size);
2986 return size;
2989 uint32_t DNBArchImplX86_64::SaveRegisterState() {
2990 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
2991 DNBLogThreadedIf(
2992 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
2993 "(SetGPRState() for stop_count = %u)",
2994 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
2996 // Always re-read the registers because above we call thread_abort_safely();
2997 bool force = true;
2999 if ((kret = GetGPRState(force)) != KERN_SUCCESS) {
3000 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SaveRegisterState () "
3001 "error: GPR regs failed to read: %u ",
3002 kret);
3003 } else if ((kret = GetFPUState(force)) != KERN_SUCCESS) {
3004 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::SaveRegisterState () "
3005 "error: %s regs failed to read: %u",
3006 CPUHasAVX() ? "AVX" : "FPU", kret);
3007 } else {
3008 const uint32_t save_id = GetNextRegisterStateSaveID();
3009 m_saved_register_states[save_id] = m_state.context;
3010 return save_id;
3012 return 0;
3014 bool DNBArchImplX86_64::RestoreRegisterState(uint32_t save_id) {
3015 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
3016 if (pos != m_saved_register_states.end()) {
3017 m_state.context.gpr = pos->second.gpr;
3018 m_state.context.fpu = pos->second.fpu;
3019 m_state.SetError(e_regSetGPR, Read, 0);
3020 m_state.SetError(e_regSetFPU, Read, 0);
3021 kern_return_t kret;
3022 bool success = true;
3023 if ((kret = SetGPRState()) != KERN_SUCCESS) {
3024 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::RestoreRegisterState "
3025 "(save_id = %u) error: GPR regs failed to "
3026 "write: %u",
3027 save_id, kret);
3028 success = false;
3029 } else if ((kret = SetFPUState()) != KERN_SUCCESS) {
3030 DNBLogThreadedIf(LOG_THREAD, "DNBArchImplX86_64::RestoreRegisterState "
3031 "(save_id = %u) error: %s regs failed to "
3032 "write: %u",
3033 save_id, CPUHasAVX() ? "AVX" : "FPU", kret);
3034 success = false;
3036 m_saved_register_states.erase(pos);
3037 return success;
3039 return false;
3042 kern_return_t DNBArchImplX86_64::GetRegisterState(int set, bool force) {
3043 switch (set) {
3044 case e_regSetALL:
3045 return GetGPRState(force) | GetFPUState(force) | GetEXCState(force);
3046 case e_regSetGPR:
3047 return GetGPRState(force);
3048 case e_regSetFPU:
3049 return GetFPUState(force);
3050 case e_regSetEXC:
3051 return GetEXCState(force);
3052 default:
3053 break;
3055 return KERN_INVALID_ARGUMENT;
3058 kern_return_t DNBArchImplX86_64::SetRegisterState(int set) {
3059 // Make sure we have a valid context to set.
3060 if (RegisterSetStateIsValid(set)) {
3061 switch (set) {
3062 case e_regSetALL:
3063 return SetGPRState() | SetFPUState() | SetEXCState();
3064 case e_regSetGPR:
3065 return SetGPRState();
3066 case e_regSetFPU:
3067 return SetFPUState();
3068 case e_regSetEXC:
3069 return SetEXCState();
3070 default:
3071 break;
3074 return KERN_INVALID_ARGUMENT;
3077 bool DNBArchImplX86_64::RegisterSetStateIsValid(int set) const {
3078 return m_state.RegsAreValid(set);
3081 #endif // #if defined (__i386__) || defined (__x86_64__)