1 /* Copyright (c) 2007, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Author: Joi Sigurdsson
32 * Author: Scott Francis
34 * Implementation of PreamblePatcher
37 #include "preamble_patcher.h"
39 #include "mini_disassembler.h"
41 // compatibility shims
42 #include "base/logging.h"
44 // Definitions of assembly statements we need
45 #define ASM_JMP32REL 0xE9
47 #define ASM_JMP32ABS_0 0xFF
48 #define ASM_JMP32ABS_1 0x25
49 #define ASM_JMP8REL 0xEB
50 #define ASM_JCC32REL_0 0x0F
51 #define ASM_JCC32REL_1_MASK 0x80
55 #define ASM_MOVRAX_IMM 0xB8
57 #define ASM_JMP_RAX 0xE0
61 PreamblePatcher::PreamblePage
* PreamblePatcher::preamble_pages_
= NULL
;
62 long PreamblePatcher::granularity_
= 0;
63 long PreamblePatcher::pagesize_
= 0;
64 bool PreamblePatcher::initialized_
= false;
66 static const unsigned int kPreamblePageMagic
= 0x4347414D; // "MAGC"
68 // Handle a special case that we see with functions that point into an
69 // IAT table (including functions linked statically into the
70 // application): these function already starts with ASM_JMP32*. For
71 // instance, malloc() might be implemented as a JMP to __malloc().
72 // This function follows the initial JMPs for us, until we get to the
73 // place where the actual code is defined. If we get to STOP_BEFORE,
74 // we return the address before stop_before. The stop_before_trampoline
75 // flag is used in 64-bit mode. If true, we will return the address
76 // before a trampoline is detected. Trampolines are defined as:
79 // mov rax, <replacement_function>
82 // See PreamblePatcher::RawPatchWithStub for more information.
83 void* PreamblePatcher::ResolveTargetImpl(unsigned char* target
,
84 unsigned char* stop_before
,
85 bool stop_before_trampoline
) {
89 unsigned char* new_target
;
90 if (target
[0] == ASM_JMP32REL
) {
91 // target[1-4] holds the place the jmp goes to, but it's
92 // relative to the next instruction.
93 int relative_offset
; // Windows guarantees int is 4 bytes
94 SIDESTEP_ASSERT(sizeof(relative_offset
) == 4);
95 memcpy(reinterpret_cast<void*>(&relative_offset
),
96 reinterpret_cast<void*>(target
+ 1), 4);
97 new_target
= target
+ 5 + relative_offset
;
98 } else if (target
[0] == ASM_JMP8REL
) {
99 // Visual Studio 7.1 implements new[] as an 8 bit jump to new
100 signed char relative_offset
;
101 memcpy(reinterpret_cast<void*>(&relative_offset
),
102 reinterpret_cast<void*>(target
+ 1), 1);
103 new_target
= target
+ 2 + relative_offset
;
104 } else if (target
[0] == ASM_JMP32ABS_0
&&
105 target
[1] == ASM_JMP32ABS_1
) {
106 // Visual studio seems to sometimes do it this way instead of the
107 // previous way. Not sure what the rules are, but it was happening
108 // with operator new in some binaries.
110 if (kIs64BitBinary
) {
111 // In 64-bit mode JMPs are RIP-relative, not absolute
113 memcpy(reinterpret_cast<void*>(&target_offset
),
114 reinterpret_cast<void*>(target
+ 2), 4);
115 new_target_v
= reinterpret_cast<void**>(target
+ target_offset
+ 6);
117 SIDESTEP_ASSERT(sizeof(new_target
) == 4);
118 memcpy(&new_target_v
, reinterpret_cast<void*>(target
+ 2), 4);
120 new_target
= reinterpret_cast<unsigned char*>(*new_target_v
);
124 if (new_target
== stop_before
)
126 if (stop_before_trampoline
&& *new_target
== ASM_NOP
127 && new_target
[1] == ASM_REXW
&& new_target
[2] == ASM_MOVRAX_IMM
)
134 // Special case scoped_ptr to avoid dependency on scoped_ptr below.
135 class DeleteUnsignedCharArray
{
137 DeleteUnsignedCharArray(unsigned char* array
) : array_(array
) {
140 ~DeleteUnsignedCharArray() {
142 PreamblePatcher::FreePreambleBlock(array_
);
146 unsigned char* Release() {
147 unsigned char* temp
= array_
;
153 unsigned char* array_
;
156 SideStepError
PreamblePatcher::RawPatchWithStubAndProtections(
157 void* target_function
, void *replacement_function
,
158 unsigned char* preamble_stub
, unsigned long stub_size
,
159 unsigned long* bytes_needed
) {
160 // We need to be able to write to a process-local copy of the first
161 // MAX_PREAMBLE_STUB_SIZE bytes of target_function
162 DWORD old_target_function_protect
= 0;
163 BOOL succeeded
= ::VirtualProtect(reinterpret_cast<void*>(target_function
),
164 MAX_PREAMBLE_STUB_SIZE
,
165 PAGE_EXECUTE_READWRITE
,
166 &old_target_function_protect
);
168 SIDESTEP_ASSERT(false && "Failed to make page containing target function "
170 return SIDESTEP_ACCESS_DENIED
;
173 SideStepError error_code
= RawPatchWithStub(target_function
,
174 replacement_function
,
179 // Restore the protection of the first MAX_PREAMBLE_STUB_SIZE bytes of
180 // pTargetFunction to what they were before we started goofing around.
181 // We do this regardless of whether the patch succeeded or not.
182 succeeded
= ::VirtualProtect(reinterpret_cast<void*>(target_function
),
183 MAX_PREAMBLE_STUB_SIZE
,
184 old_target_function_protect
,
185 &old_target_function_protect
);
187 SIDESTEP_ASSERT(false &&
188 "Failed to restore protection to target function.");
189 // We must not return an error here because the function has
190 // likely actually been patched, and returning an error might
191 // cause our client code not to unpatch it. So we just keep
195 if (SIDESTEP_SUCCESS
!= error_code
) { // Testing RawPatchWithStub, above
196 SIDESTEP_ASSERT(false);
200 // Flush the instruction cache to make sure the processor doesn't execute the
201 // old version of the instructions (before our patch).
203 // FlushInstructionCache is actually a no-op at least on
204 // single-processor XP machines. I'm not sure why this is so, but
205 // it is, yet I want to keep the call to the API here for
206 // correctness in case there is a difference in some variants of
208 succeeded
= ::FlushInstructionCache(::GetCurrentProcess(),
210 MAX_PREAMBLE_STUB_SIZE
);
212 SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
213 // We must not return an error here because the function has actually
214 // been patched, and returning an error would likely cause our client
215 // code not to unpatch it. So we just keep going.
218 return SIDESTEP_SUCCESS
;
221 SideStepError
PreamblePatcher::RawPatch(void* target_function
,
222 void* replacement_function
,
223 void** original_function_stub
) {
224 if (!target_function
|| !replacement_function
|| !original_function_stub
||
225 (*original_function_stub
) || target_function
== replacement_function
) {
226 SIDESTEP_ASSERT(false && "Preconditions not met");
227 return SIDESTEP_INVALID_PARAMETER
;
230 BOOL succeeded
= FALSE
;
232 // First, deal with a special case that we see with functions that
233 // point into an IAT table (including functions linked statically
234 // into the application): these function already starts with
235 // ASM_JMP32REL. For instance, malloc() might be implemented as a
236 // JMP to __malloc(). In that case, we replace the destination of
237 // the JMP (__malloc), rather than the JMP itself (malloc). This
238 // way we get the correct behavior no matter how malloc gets called.
239 void* new_target
= ResolveTarget(target_function
);
240 if (new_target
!= target_function
) {
241 target_function
= new_target
;
244 // In 64-bit mode, preamble_stub must be within 2GB of target function
245 // so that if target contains a jump, we can translate it.
246 unsigned char* preamble_stub
= AllocPreambleBlockNear(target_function
);
247 if (!preamble_stub
) {
248 SIDESTEP_ASSERT(false && "Unable to allocate preamble-stub.");
249 return SIDESTEP_INSUFFICIENT_BUFFER
;
252 // Frees the array at end of scope.
253 DeleteUnsignedCharArray
guard_preamble_stub(preamble_stub
);
255 SideStepError error_code
= RawPatchWithStubAndProtections(
256 target_function
, replacement_function
, preamble_stub
,
257 MAX_PREAMBLE_STUB_SIZE
, NULL
);
259 if (SIDESTEP_SUCCESS
!= error_code
) {
260 SIDESTEP_ASSERT(false);
264 // Flush the instruction cache to make sure the processor doesn't execute the
265 // old version of the instructions (before our patch).
267 // FlushInstructionCache is actually a no-op at least on
268 // single-processor XP machines. I'm not sure why this is so, but
269 // it is, yet I want to keep the call to the API here for
270 // correctness in case there is a difference in some variants of
272 succeeded
= ::FlushInstructionCache(::GetCurrentProcess(),
274 MAX_PREAMBLE_STUB_SIZE
);
276 SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
277 // We must not return an error here because the function has actually
278 // been patched, and returning an error would likely cause our client
279 // code not to unpatch it. So we just keep going.
282 SIDESTEP_LOG("PreamblePatcher::RawPatch successfully patched.");
284 // detach the scoped pointer so the memory is not freed
285 *original_function_stub
=
286 reinterpret_cast<void*>(guard_preamble_stub
.Release());
287 return SIDESTEP_SUCCESS
;
290 SideStepError
PreamblePatcher::Unpatch(void* target_function
,
291 void* replacement_function
,
292 void* original_function_stub
) {
293 SIDESTEP_ASSERT(target_function
&& replacement_function
&&
294 original_function_stub
);
295 if (!target_function
|| !replacement_function
||
296 !original_function_stub
) {
297 return SIDESTEP_INVALID_PARAMETER
;
300 // Before unpatching, target_function should be a JMP to
301 // replacement_function. If it's not, then either it's an error, or
302 // we're falling into the case where the original instruction was a
303 // JMP, and we patched the jumped_to address rather than the JMP
304 // itself. (For instance, if malloc() is just a JMP to __malloc(),
305 // we patched __malloc() and not malloc().)
306 unsigned char* target
= reinterpret_cast<unsigned char*>(target_function
);
307 target
= reinterpret_cast<unsigned char*>(
309 target
, reinterpret_cast<unsigned char*>(replacement_function
),
311 // We should end at the function we patched. When we patch, we insert
312 // a ASM_JMP32REL instruction, so look for that as a sanity check.
313 if (target
[0] != ASM_JMP32REL
) {
314 SIDESTEP_ASSERT(false &&
315 "target_function does not look like it was patched.");
316 return SIDESTEP_INVALID_PARAMETER
;
319 const unsigned int kRequiredTargetPatchBytes
= 5;
321 // We need to be able to write to a process-local copy of the first
322 // kRequiredTargetPatchBytes bytes of target_function
323 DWORD old_target_function_protect
= 0;
324 BOOL succeeded
= ::VirtualProtect(reinterpret_cast<void*>(target
),
325 kRequiredTargetPatchBytes
,
326 PAGE_EXECUTE_READWRITE
,
327 &old_target_function_protect
);
329 SIDESTEP_ASSERT(false && "Failed to make page containing target function "
331 return SIDESTEP_ACCESS_DENIED
;
334 unsigned char* preamble_stub
= reinterpret_cast<unsigned char*>(
335 original_function_stub
);
337 // Disassemble the preamble of stub and copy the bytes back to target.
338 // If we've done any conditional jumps in the preamble we need to convert
339 // them back to the orignal REL8 jumps in the target.
340 MiniDisassembler disassembler
;
341 unsigned int preamble_bytes
= 0;
342 unsigned int target_bytes
= 0;
343 while (target_bytes
< kRequiredTargetPatchBytes
) {
344 unsigned int cur_bytes
= 0;
345 InstructionType instruction_type
=
346 disassembler
.Disassemble(preamble_stub
+ preamble_bytes
, cur_bytes
);
347 if (IT_JUMP
== instruction_type
) {
348 unsigned int jump_bytes
= 0;
349 SideStepError jump_ret
= SIDESTEP_JUMP_INSTRUCTION
;
350 if (IsNearConditionalJump(preamble_stub
+ preamble_bytes
, cur_bytes
) ||
351 IsNearRelativeJump(preamble_stub
+ preamble_bytes
, cur_bytes
) ||
352 IsNearAbsoluteCall(preamble_stub
+ preamble_bytes
, cur_bytes
) ||
353 IsNearRelativeCall(preamble_stub
+ preamble_bytes
, cur_bytes
)) {
354 jump_ret
= PatchNearJumpOrCall(preamble_stub
+ preamble_bytes
,
355 cur_bytes
, target
+ target_bytes
,
356 &jump_bytes
, MAX_PREAMBLE_STUB_SIZE
);
358 if (jump_ret
== SIDESTEP_JUMP_INSTRUCTION
) {
359 SIDESTEP_ASSERT(false &&
360 "Found unsupported jump instruction in stub!!");
361 return SIDESTEP_UNSUPPORTED_INSTRUCTION
;
363 target_bytes
+= jump_bytes
;
364 } else if (IT_GENERIC
== instruction_type
) {
365 if (IsMovWithDisplacement(preamble_stub
+ preamble_bytes
, cur_bytes
)) {
366 unsigned int mov_bytes
= 0;
367 if (PatchMovWithDisplacement(preamble_stub
+ preamble_bytes
, cur_bytes
,
368 target
+ target_bytes
, &mov_bytes
,
369 MAX_PREAMBLE_STUB_SIZE
)
370 != SIDESTEP_SUCCESS
) {
371 SIDESTEP_ASSERT(false &&
372 "Found unsupported generic instruction in stub!!");
373 return SIDESTEP_UNSUPPORTED_INSTRUCTION
;
376 memcpy(reinterpret_cast<void*>(target
+ target_bytes
),
377 reinterpret_cast<void*>(reinterpret_cast<unsigned char*>(
378 original_function_stub
) + preamble_bytes
), cur_bytes
);
379 target_bytes
+= cur_bytes
;
382 SIDESTEP_ASSERT(false &&
383 "Found unsupported instruction in stub!!");
384 return SIDESTEP_UNSUPPORTED_INSTRUCTION
;
386 preamble_bytes
+= cur_bytes
;
389 FreePreambleBlock(reinterpret_cast<unsigned char*>(original_function_stub
));
391 // Restore the protection of the first kRequiredTargetPatchBytes bytes of
392 // target to what they were before we started goofing around.
393 succeeded
= ::VirtualProtect(reinterpret_cast<void*>(target
),
394 kRequiredTargetPatchBytes
,
395 old_target_function_protect
,
396 &old_target_function_protect
);
398 // Flush the instruction cache to make sure the processor doesn't execute the
399 // old version of the instructions (before our patch).
401 // See comment on FlushInstructionCache elsewhere in this file.
402 succeeded
= ::FlushInstructionCache(::GetCurrentProcess(),
404 MAX_PREAMBLE_STUB_SIZE
);
406 SIDESTEP_ASSERT(false && "Failed to flush instruction cache.");
407 return SIDESTEP_UNEXPECTED
;
410 SIDESTEP_LOG("PreamblePatcher::Unpatch successfully unpatched.");
411 return SIDESTEP_SUCCESS
;
414 void PreamblePatcher::Initialize() {
416 SYSTEM_INFO si
= { 0 };
417 ::GetSystemInfo(&si
);
418 granularity_
= si
.dwAllocationGranularity
;
419 pagesize_
= si
.dwPageSize
;
424 unsigned char* PreamblePatcher::AllocPreambleBlockNear(void* target
) {
425 PreamblePage
* preamble_page
= preamble_pages_
;
426 while (preamble_page
!= NULL
) {
427 if (preamble_page
->free_
!= NULL
) {
428 __int64 val
= reinterpret_cast<__int64
>(preamble_page
) -
429 reinterpret_cast<__int64
>(target
);
430 if ((val
> 0 && val
+ pagesize_
<= INT_MAX
) ||
431 (val
< 0 && val
>= INT_MIN
)) {
435 preamble_page
= preamble_page
->next_
;
438 // The free_ member of the page is used to store the next available block
439 // of memory to use or NULL if there are no chunks available, in which case
440 // we'll allocate a new page.
441 if (preamble_page
== NULL
|| preamble_page
->free_
== NULL
) {
442 // Create a new preamble page and initialize the free list
443 preamble_page
= reinterpret_cast<PreamblePage
*>(AllocPageNear(target
));
444 SIDESTEP_ASSERT(preamble_page
!= NULL
&& "Could not allocate page!");
445 void** pp
= &preamble_page
->free_
;
446 unsigned char* ptr
= reinterpret_cast<unsigned char*>(preamble_page
) +
447 MAX_PREAMBLE_STUB_SIZE
;
448 unsigned char* limit
= reinterpret_cast<unsigned char*>(preamble_page
) +
450 while (ptr
< limit
) {
452 pp
= reinterpret_cast<void**>(ptr
);
453 ptr
+= MAX_PREAMBLE_STUB_SIZE
;
456 // Insert the new page into the list
457 preamble_page
->magic_
= kPreamblePageMagic
;
458 preamble_page
->next_
= preamble_pages_
;
459 preamble_pages_
= preamble_page
;
461 unsigned char* ret
= reinterpret_cast<unsigned char*>(preamble_page
->free_
);
462 preamble_page
->free_
= *(reinterpret_cast<void**>(preamble_page
->free_
));
466 void PreamblePatcher::FreePreambleBlock(unsigned char* block
) {
467 SIDESTEP_ASSERT(block
!= NULL
);
468 SIDESTEP_ASSERT(granularity_
!= 0);
469 uintptr_t ptr
= reinterpret_cast<uintptr_t>(block
);
470 ptr
-= ptr
& (granularity_
- 1);
471 PreamblePage
* preamble_page
= reinterpret_cast<PreamblePage
*>(ptr
);
472 SIDESTEP_ASSERT(preamble_page
->magic_
== kPreamblePageMagic
);
473 *(reinterpret_cast<void**>(block
)) = preamble_page
->free_
;
474 preamble_page
->free_
= block
;
477 void* PreamblePatcher::AllocPageNear(void* target
) {
478 MEMORY_BASIC_INFORMATION mbi
= { 0 };
479 if (!::VirtualQuery(target
, &mbi
, sizeof(mbi
))) {
480 SIDESTEP_ASSERT(false && "VirtualQuery failed on target address");
483 if (initialized_
== false) {
484 PreamblePatcher::Initialize();
485 SIDESTEP_ASSERT(initialized_
);
488 unsigned char* allocation_base
= reinterpret_cast<unsigned char*>(
491 bool high_target
= reinterpret_cast<__int64
>(target
) > UINT_MAX
;
493 __int64 val
= reinterpret_cast<__int64
>(allocation_base
) -
496 reinterpret_cast<__int64
>(target
) - val
> INT_MAX
) {
497 // We're further than 2GB from the target
499 } else if (val
<= NULL
) {
503 pv
= ::VirtualAlloc(reinterpret_cast<void*>(allocation_base
-
504 (i
++ * granularity_
)),
505 pagesize_
, MEM_COMMIT
| MEM_RESERVE
,
506 PAGE_EXECUTE_READWRITE
);
509 // We couldn't allocate low, try to allocate high
512 // Round up to the next multiple of page granularity
513 allocation_base
= reinterpret_cast<unsigned char*>(
514 (reinterpret_cast<__int64
>(target
) &
515 (~(granularity_
- 1))) + granularity_
);
517 __int64 val
= reinterpret_cast<__int64
>(allocation_base
) +
518 (i
* granularity_
) - reinterpret_cast<__int64
>(target
);
519 if (val
> INT_MAX
|| val
< 0) {
520 // We're too far or we overflowed
523 pv
= ::VirtualAlloc(reinterpret_cast<void*>(allocation_base
+
524 (i
++ * granularity_
)),
525 pagesize_
, MEM_COMMIT
| MEM_RESERVE
,
526 PAGE_EXECUTE_READWRITE
);
532 bool PreamblePatcher::IsShortConditionalJump(
533 unsigned char* target
,
534 unsigned int instruction_size
) {
535 return (*(target
) & 0x70) == 0x70 && instruction_size
== 2;
538 bool PreamblePatcher::IsNearConditionalJump(
539 unsigned char* target
,
540 unsigned int instruction_size
) {
541 return *(target
) == 0xf && (*(target
+ 1) & 0x80) == 0x80 &&
542 instruction_size
== 6;
545 bool PreamblePatcher::IsNearRelativeJump(
546 unsigned char* target
,
547 unsigned int instruction_size
) {
548 return *(target
) == 0xe9 && instruction_size
== 5;
551 bool PreamblePatcher::IsNearAbsoluteCall(
552 unsigned char* target
,
553 unsigned int instruction_size
) {
554 return *(target
) == 0xff && (*(target
+ 1) & 0x10) == 0x10 &&
555 instruction_size
== 6;
558 bool PreamblePatcher::IsNearRelativeCall(
559 unsigned char* target
,
560 unsigned int instruction_size
) {
561 return *(target
) == 0xe8 && instruction_size
== 5;
564 bool PreamblePatcher::IsMovWithDisplacement(
565 unsigned char* target
,
566 unsigned int instruction_size
) {
567 // In this case, the ModRM byte's mod field will be 0 and r/m will be 101b (5)
568 return instruction_size
== 7 && *target
== 0x48 && *(target
+ 1) == 0x8b &&
569 (*(target
+ 2) >> 6) == 0 && (*(target
+ 2) & 0x7) == 5;
572 SideStepError
PreamblePatcher::PatchShortConditionalJump(
573 unsigned char* source
,
574 unsigned int instruction_size
,
575 unsigned char* target
,
576 unsigned int* target_bytes
,
577 unsigned int target_size
) {
578 unsigned char* original_jump_dest
= (source
+ 2) + source
[1];
579 unsigned char* stub_jump_from
= target
+ 6;
580 __int64 fixup_jump_offset
= original_jump_dest
- stub_jump_from
;
581 if (fixup_jump_offset
> INT_MAX
|| fixup_jump_offset
< INT_MIN
) {
582 SIDESTEP_ASSERT(false &&
583 "Unable to fix up short jump because target"
584 " is too far away.");
585 return SIDESTEP_JUMP_INSTRUCTION
;
589 if (target_size
> *target_bytes
) {
590 // Convert the short jump to a near jump.
592 // 0f 8x xx xx xx xx = Jcc rel32off
593 unsigned short jmpcode
= ((0x80 | (source
[0] & 0xf)) << 8) | 0x0f;
594 memcpy(reinterpret_cast<void*>(target
),
595 reinterpret_cast<void*>(&jmpcode
), 2);
596 memcpy(reinterpret_cast<void*>(target
+ 2),
597 reinterpret_cast<void*>(&fixup_jump_offset
), 4);
600 return SIDESTEP_SUCCESS
;
603 SideStepError
PreamblePatcher::PatchNearJumpOrCall(
604 unsigned char* source
,
605 unsigned int instruction_size
,
606 unsigned char* target
,
607 unsigned int* target_bytes
,
608 unsigned int target_size
) {
609 SIDESTEP_ASSERT(instruction_size
== 5 || instruction_size
== 6);
610 unsigned int jmp_offset_in_instruction
= instruction_size
== 5 ? 1 : 2;
611 unsigned char* original_jump_dest
= reinterpret_cast<unsigned char *>(
612 reinterpret_cast<__int64
>(source
+ instruction_size
) +
613 *(reinterpret_cast<int*>(source
+ jmp_offset_in_instruction
)));
614 unsigned char* stub_jump_from
= target
+ instruction_size
;
615 __int64 fixup_jump_offset
= original_jump_dest
- stub_jump_from
;
616 if (fixup_jump_offset
> INT_MAX
|| fixup_jump_offset
< INT_MIN
) {
617 SIDESTEP_ASSERT(false &&
618 "Unable to fix up near jump because target"
619 " is too far away.");
620 return SIDESTEP_JUMP_INSTRUCTION
;
623 if ((fixup_jump_offset
< SCHAR_MAX
&& fixup_jump_offset
> SCHAR_MIN
)) {
625 if (target_size
> *target_bytes
) {
626 // If the new offset is in range, use a short jump instead of a near jump.
627 if (source
[0] == ASM_JCC32REL_0
&&
628 (source
[1] & ASM_JCC32REL_1_MASK
) == ASM_JCC32REL_1_MASK
) {
629 unsigned short jmpcode
= (static_cast<unsigned char>(
630 fixup_jump_offset
) << 8) | (0x70 | (source
[1] & 0xf));
631 memcpy(reinterpret_cast<void*>(target
),
632 reinterpret_cast<void*>(&jmpcode
),
635 target
[0] = ASM_JMP8REL
;
636 target
[1] = static_cast<unsigned char>(fixup_jump_offset
);
640 *target_bytes
= instruction_size
;
641 if (target_size
> *target_bytes
) {
642 memcpy(reinterpret_cast<void*>(target
),
643 reinterpret_cast<void*>(source
),
644 jmp_offset_in_instruction
);
645 memcpy(reinterpret_cast<void*>(target
+ jmp_offset_in_instruction
),
646 reinterpret_cast<void*>(&fixup_jump_offset
),
651 return SIDESTEP_SUCCESS
;
654 SideStepError
PreamblePatcher::PatchMovWithDisplacement(
655 unsigned char* source
,
656 unsigned int instruction_size
,
657 unsigned char* target
,
658 unsigned int* target_bytes
,
659 unsigned int target_size
) {
660 SIDESTEP_ASSERT(instruction_size
== 7);
661 const int mov_offset_in_instruction
= 3; // 0x48 0x8b 0x0d <offset>
662 unsigned char* original_mov_dest
= reinterpret_cast<unsigned char*>(
663 reinterpret_cast<__int64
>(source
+ instruction_size
) +
664 *(reinterpret_cast<int*>(source
+ mov_offset_in_instruction
)));
665 unsigned char* stub_mov_from
= target
+ instruction_size
;
666 __int64 fixup_mov_offset
= original_mov_dest
- stub_mov_from
;
667 if (fixup_mov_offset
> INT_MAX
|| fixup_mov_offset
< INT_MIN
) {
668 SIDESTEP_ASSERT(false &&
669 "Unable to fix up near MOV because target is too far away.");
670 return SIDESTEP_UNEXPECTED
;
672 *target_bytes
= instruction_size
;
673 if (target_size
> *target_bytes
) {
674 memcpy(reinterpret_cast<void*>(target
),
675 reinterpret_cast<void*>(source
),
676 mov_offset_in_instruction
);
677 memcpy(reinterpret_cast<void*>(target
+ mov_offset_in_instruction
),
678 reinterpret_cast<void*>(&fixup_mov_offset
),
681 return SIDESTEP_SUCCESS
;
684 }; // namespace sidestep