1 /* -*- mode: C; c-basic-offset: 3; -*- */
3 /*--------------------------------------------------------------------*/
4 /*--- MemCheck: some non-generic asm implementations of mc_main.c */
6 /*--- mc_main_asm.c ---*/
7 /*--------------------------------------------------------------------*/
10 This file is part of MemCheck, a heavyweight Valgrind tool for
11 detecting memory errors.
13 Copyright (C) 2000-2018 Julian Seward
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
31 The GNU General Public License is contained in the file COPYING.
34 /* Having these in mc_main.c gives undefined references at link time,
35 when compiling with lto. Having them in a separate file solves this.
36 Also, for some toolchain, we might maybe need to disable lto. */
38 // A bunch of include only needed for mc_include.h
39 #include "pub_tool_basics.h"
40 #include "pub_tool_poolalloc.h"
41 #include "pub_tool_hashtable.h"
42 #include "pub_tool_tooliface.h"
44 #include "mc_include.h"
46 // Non-generic assembly for arm32-linux
47 #if ENABLE_ASSEMBLY_HELPERS && defined(PERF_FAST_LOADV) \
48 && defined(VGP_arm_linux)
49 __asm__( /* Derived from the 32 bit assembly helper */
52 ".global vgMemCheck_helperc_LOADV64le \n"
53 ".type vgMemCheck_helperc_LOADV64le, %function \n"
54 "vgMemCheck_helperc_LOADV64le: \n"
56 " movw r3, #:lower16:primary_map \n"
57 " bne .LLV64LEc4 \n" // if misaligned
59 " movt r3, #:upper16:primary_map \n"
60 " ldr r2, [r3, r2, lsl #2] \n"
61 " uxth r1, r0 \n" // r1 is 0-(16)-0 X-(13)-X 000
62 " movw r3, #0xAAAA \n"
63 " lsr r1, r1, #2 \n" // r1 is 0-(16)-0 00 X-(13)-X 0
64 " ldrh r1, [r2, r1] \n"
65 " cmp r1, r3 \n" // 0xAAAA == VA_BITS16_DEFINED
66 " bne .LLV64LEc0 \n" // if !all_defined
67 " mov r1, #0x0 \n" // 0x0 == V_BITS32_DEFINED
68 " mov r0, #0x0 \n" // 0x0 == V_BITS32_DEFINED
71 " movw r3, #0x5555 \n"
72 " cmp r1, r3 \n" // 0x5555 == VA_BITS16_UNDEFINED
73 " bne .LLV64LEc4 \n" // if !all_undefined
74 " mov r1, #0xFFFFFFFF \n" // 0xFFFFFFFF == V_BITS32_UNDEFINED
75 " mov r0, #0xFFFFFFFF \n" // 0xFFFFFFFF == V_BITS32_UNDEFINED
81 " bl mc_LOADVn_slow \n"
83 ".size vgMemCheck_helperc_LOADV64le, .-vgMemCheck_helperc_LOADV64le \n"
87 #elif ENABLE_ASSEMBLY_HELPERS && defined(PERF_FAST_LOADV) \
88 && (defined(VGP_x86_linux) || defined(VGP_x86_solaris))
92 ".global vgMemCheck_helperc_LOADV64le\n"
93 ".type vgMemCheck_helperc_LOADV64le, @function\n"
94 "vgMemCheck_helperc_LOADV64le:\n"
96 " jne .LLV64LE2\n" /* jump if not aligned */
100 " mov primary_map(,%ecx,4), %ecx\n"
102 " movzwl (%ecx,%edx,2), %edx\n"
103 " cmp $0xaaaa, %edx\n"
104 " jne .LLV64LE1\n" /* jump if not all defined */
105 " xor %eax, %eax\n" /* return 0 in edx:eax */
109 " cmp $0x5555, %edx\n"
110 " jne .LLV64LE2\n" /* jump if not all undefined */
111 " or $0xffffffff, %eax\n" /* else return all bits set in edx:eax */
112 " or $0xffffffff, %edx\n"
115 " xor %ecx, %ecx\n" /* tail call to mc_LOADVn_slow(a, 64, 0) */
117 " jmp mc_LOADVn_slow\n"
118 ".size vgMemCheck_helperc_LOADV64le, .-vgMemCheck_helperc_LOADV64le\n"
123 // Generic for all platforms except {arm32,x86}-linux and x86-solaris
128 // Non-generic assembly for arm32-linux
129 #if ENABLE_ASSEMBLY_HELPERS && defined(PERF_FAST_LOADV) \
130 && defined(VGP_arm_linux)
131 __asm__( /* Derived from NCode template */
134 ".global vgMemCheck_helperc_LOADV32le \n"
135 ".type vgMemCheck_helperc_LOADV32le, %function \n"
136 "vgMemCheck_helperc_LOADV32le: \n"
137 " tst r0, #3 \n" // 1
138 " movw r3, #:lower16:primary_map \n" // 1
139 " bne .LLV32LEc4 \n" // 2 if misaligned
140 " lsr r2, r0, #16 \n" // 3
141 " movt r3, #:upper16:primary_map \n" // 3
142 " ldr r2, [r3, r2, lsl #2] \n" // 4
143 " uxth r1, r0 \n" // 4
144 " ldrb r1, [r2, r1, lsr #2] \n" // 5
145 " cmp r1, #0xAA \n" // 6 0xAA == VA_BITS8_DEFINED
146 " bne .LLV32LEc0 \n" // 7 if !all_defined
147 " mov r0, #0x0 \n" // 8 0x0 == V_BITS32_DEFINED
150 " cmp r1, #0x55 \n" // 0x55 == VA_BITS8_UNDEFINED
151 " bne .LLV32LEc4 \n" // if !all_undefined
152 " mov r0, #0xFFFFFFFF \n" // 0xFFFFFFFF == V_BITS32_UNDEFINED
158 " bl mc_LOADVn_slow \n"
160 ".size vgMemCheck_helperc_LOADV32le, .-vgMemCheck_helperc_LOADV32le \n"
164 #elif ENABLE_ASSEMBLY_HELPERS && defined(PERF_FAST_LOADV) \
165 && (defined(VGP_x86_linux) || defined(VGP_x86_solaris))
169 ".global vgMemCheck_helperc_LOADV32le\n"
170 ".type vgMemCheck_helperc_LOADV32le, @function\n"
171 "vgMemCheck_helperc_LOADV32le:\n"
173 " jnz .LLV32LE2\n" /* jump if misaligned */
176 " mov primary_map(,%edx,4), %ecx\n"
177 " movzwl %ax, %edx\n"
179 " movzbl (%ecx,%edx,1), %edx\n"
180 " cmp $0xaa, %edx\n" /* compare to VA_BITS8_DEFINED */
181 " jne .LLV32LE1\n" /* jump if not completely defined */
182 " xor %eax, %eax\n" /* else return V_BITS32_DEFINED */
185 " cmp $0x55, %edx\n" /* compare to VA_BITS8_UNDEFINED */
186 " jne .LLV32LE2\n" /* jump if not completely undefined */
187 " or $0xffffffff, %eax\n" /* else return V_BITS32_UNDEFINED */
190 " xor %ecx, %ecx\n" /* tail call mc_LOADVn_slow(a, 32, 0) */
192 " jmp mc_LOADVn_slow\n"
193 ".size vgMemCheck_helperc_LOADV32le, .-vgMemCheck_helperc_LOADV32le\n"
198 // Generic for all platforms except {arm32,x86}-linux and x86-solaris
202 /*--------------------------------------------------------------------*/
204 /*--------------------------------------------------------------------*/