1 ; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown -mattr=+lvi-load-hardening -mattr=+lvi-cfi -x86-experimental-lvi-inline-asm-hardening < %s -o %t.out 2> %t.err
2 ; RUN: FileCheck %s --check-prefix=X86 < %t.out
3 ; RUN: FileCheck %s --check-prefix=WARN < %t.err
5 ; Test module-level assembly
10 ; X86-NEXT: shlq $0, (%rsp)
14 ; Function Attrs: noinline nounwind optnone uwtable
15 define dso_local void @test_inline_asm() {
17 ; X86-LABEL: test_inline_asm:
18 call void asm sideeffect "mov 0x3fed(%rip),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
19 ; X86: movq 16365(%rip), %rax
21 call void asm sideeffect "movdqa 0x0(%rip),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1
22 ; X86: movdqa (%rip), %xmm0
24 call void asm sideeffect "movslq 0x3e5d(%rip),%rbx", "~{dirflag},~{fpsr},~{flags}"() #1
25 ; X86: movslq 15965(%rip), %rbx
27 call void asm sideeffect "mov (%r12,%rax,8),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
28 ; X86: movq (%r12,%rax,8), %rax
30 call void asm sideeffect "movq (24)(%rsi), %r11", "~{dirflag},~{fpsr},~{flags}"() #1
31 ; X86: movq 24(%rsi), %r11
33 call void asm sideeffect "cmove %r12,%rax", "~{dirflag},~{fpsr},~{flags}"() #1
34 ; X86: cmoveq %r12, %rax
36 call void asm sideeffect "cmove (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
37 ; X86: cmoveq (%r12), %rax
39 call void asm sideeffect "pop %rbx", "~{dirflag},~{fpsr},~{flags}"() #1
42 call void asm sideeffect "popq %rbx", "~{dirflag},~{fpsr},~{flags}"() #1
45 call void asm sideeffect "xchg (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
46 ; X86: xchgq %rax, (%r12)
48 call void asm sideeffect "cmpxchg %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
49 ; X86: cmpxchgq %r12, (%rax)
51 call void asm sideeffect "vpxor (%rcx,%rdx,1),%ymm1,%ymm0", "~{dirflag},~{fpsr},~{flags}"() #1
52 ; X86: vpxor (%rcx,%rdx), %ymm1, %ymm0
54 call void asm sideeffect "vpmuludq 0x20(%rsi),%ymm0,%ymm12", "~{dirflag},~{fpsr},~{flags}"() #1
55 ; X86: vpmuludq 32(%rsi), %ymm0, %ymm12
57 call void asm sideeffect "vpexpandq 0x40(%rdi),%zmm8{%k2}{z}", "~{dirflag},~{fpsr},~{flags}"() #1
58 ; X86: vpexpandq 64(%rdi), %zmm8 {%k2} {z}
60 call void asm sideeffect "addq (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
61 ; X86: addq (%r12), %rax
63 call void asm sideeffect "subq Lpoly+0(%rip), %rax", "~{dirflag},~{fpsr},~{flags}"() #1
64 ; X86: subq Lpoly+0(%rip), %rax
66 call void asm sideeffect "adcq %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
67 ; X86: adcq %r12, (%rax)
69 call void asm sideeffect "negq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
72 call void asm sideeffect "incq %rax", "~{dirflag},~{fpsr},~{flags}"() #1
75 call void asm sideeffect "mulq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
78 call void asm sideeffect "imulq (%rax),%rdx", "~{dirflag},~{fpsr},~{flags}"() #1
79 ; X86: imulq (%rax), %rdx
81 call void asm sideeffect "shlq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
84 call void asm sideeffect "shrq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
87 call void asm sideeffect "repz cmpsb %es:(%rdi),%ds:(%rsi)", "~{dirflag},~{fpsr},~{flags}"() #1
88 ; WARN: warning: Instruction may be vulnerable to LVI
89 ; WARN-NEXT: repz cmpsb %es:(%rdi),%ds:(%rsi)
91 ; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
92 ; X86: rep cmpsb %es:(%rdi), %ds:(%rsi)
94 call void asm sideeffect "repnz scasb", "~{dirflag},~{fpsr},~{flags}"() #1
95 ; WARN: warning: Instruction may be vulnerable to LVI
96 ; WARN-NEXT: repnz scasb
98 ; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
99 ; X86: repne scasb %es:(%rdi), %al
101 call void asm sideeffect "repnz", ""() #1
102 ; WARN: warning: Instruction may be vulnerable to LVI
105 ; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
106 call void asm sideeffect "pinsrw $$0x6,(%eax),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1
107 ; X86: pinsrw $6, (%eax), %xmm0
109 call void asm sideeffect "ret", "~{dirflag},~{fpsr},~{flags}"() #1
110 ; X86: shlq $0, (%rsp)
114 call void asm sideeffect "ret $$8", "~{dirflag},~{fpsr},~{flags}"() #1
115 ; X86: shlq $0, (%rsp)
119 call void asm sideeffect "jmpq *(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
120 ; WARN: warning: Instruction may be vulnerable to LVI
121 ; WARN-NEXT: jmpq *(%rdx)
123 ; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
126 call void asm sideeffect "jmpq *0x100(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
127 ; WARN: warning: Instruction may be vulnerable to LVI
128 ; WARN-NEXT: jmpq *0x100(%rdx)
130 ; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
131 ; X86: jmpq *256(%rdx)
133 call void asm sideeffect "callq *200(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
134 ; WARN: warning: Instruction may be vulnerable to LVI
135 ; WARN-NEXT: callq *200(%rdx)
137 ; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
138 ; X86: callq *200(%rdx)
140 call void asm sideeffect "fldt 0x8(%rbp)", "~{dirflag},~{fpsr},~{flags}"() #1
143 call void asm sideeffect "fld %st(0)", "~{dirflag},~{fpsr},~{flags}"() #1
146 ; Test assembler macros
147 call void asm sideeffect ".macro mplus1 x\0Aincq (\5Cx)\0A.endm\0Amplus1 %rcx", "~{dirflag},~{fpsr},~{flags}"() #1
153 attributes #1 = { nounwind }