1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
3 ; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
7 define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
8 ; CHECK-LABEL: test_extrqi:
10 ; CHECK-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
11 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
12 %1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
16 define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
17 ; X86-SSE-LABEL: test_extrqi_domain:
19 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
20 ; X86-SSE-NEXT: movdqa (%eax), %xmm0 # encoding: [0x66,0x0f,0x6f,0x00]
21 ; X86-SSE-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
22 ; X86-SSE-NEXT: retl # encoding: [0xc3]
24 ; X86-AVX-LABEL: test_extrqi_domain:
26 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
27 ; X86-AVX-NEXT: vmovdqa (%eax), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x00]
28 ; X86-AVX-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
29 ; X86-AVX-NEXT: retl # encoding: [0xc3]
31 ; X64-SSE-LABEL: test_extrqi_domain:
33 ; X64-SSE-NEXT: movdqa (%rdi), %xmm0 # encoding: [0x66,0x0f,0x6f,0x07]
34 ; X64-SSE-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
35 ; X64-SSE-NEXT: retq # encoding: [0xc3]
37 ; X64-AVX-LABEL: test_extrqi_domain:
39 ; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x07]
40 ; X64-AVX-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
41 ; X64-AVX-NEXT: retq # encoding: [0xc3]
42 %1 = load <2 x i64>, <2 x i64> *%p
43 %2 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %1, i8 3, i8 2)
47 declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
49 define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
50 ; CHECK-LABEL: test_extrq:
52 ; CHECK-NEXT: extrq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x79,0xc1]
53 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
54 %1 = bitcast <2 x i64> %y to <16 x i8>
55 %2 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %1) nounwind
59 define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
60 ; X86-SSE-LABEL: test_extrq_domain:
62 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
63 ; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
64 ; X86-SSE-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
65 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
66 ; X86-SSE-NEXT: retl # encoding: [0xc3]
68 ; X86-AVX-LABEL: test_extrq_domain:
70 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
71 ; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
72 ; X86-AVX-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
73 ; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
74 ; X86-AVX-NEXT: retl # encoding: [0xc3]
76 ; X64-SSE-LABEL: test_extrq_domain:
78 ; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
79 ; X64-SSE-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
80 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
81 ; X64-SSE-NEXT: retq # encoding: [0xc3]
83 ; X64-AVX-LABEL: test_extrq_domain:
85 ; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
86 ; X64-AVX-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
87 ; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
88 ; X64-AVX-NEXT: retq # encoding: [0xc3]
89 %1 = load <2 x i64>, <2 x i64> *%p
90 %2 = bitcast <2 x i64> %y to <16 x i8>
91 %3 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %1, <16 x i8> %2) nounwind
95 declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
97 define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
98 ; CHECK-LABEL: test_insertqi:
100 ; CHECK-NEXT: insertq $6, $5, %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x78,0xc1,0x05,0x06]
101 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
102 %1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
106 define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
107 ; X86-SSE-LABEL: test_insertqi_domain:
109 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
110 ; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
111 ; X86-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
112 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
113 ; X86-SSE-NEXT: retl # encoding: [0xc3]
115 ; X86-AVX-LABEL: test_insertqi_domain:
117 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
118 ; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
119 ; X86-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
120 ; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
121 ; X86-AVX-NEXT: retl # encoding: [0xc3]
123 ; X64-SSE-LABEL: test_insertqi_domain:
125 ; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
126 ; X64-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
127 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
128 ; X64-SSE-NEXT: retq # encoding: [0xc3]
130 ; X64-AVX-LABEL: test_insertqi_domain:
132 ; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
133 ; X64-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
134 ; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
135 ; X64-AVX-NEXT: retq # encoding: [0xc3]
136 %1 = load <2 x i64>, <2 x i64> *%p
137 %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %y, i8 5, i8 6)
141 declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
143 define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
144 ; CHECK-LABEL: test_insertq:
146 ; CHECK-NEXT: insertq %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x79,0xc1]
147 ; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
148 %1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
152 define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
153 ; X86-SSE-LABEL: test_insertq_domain:
155 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
156 ; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
157 ; X86-SSE-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
158 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
159 ; X86-SSE-NEXT: retl # encoding: [0xc3]
161 ; X86-AVX-LABEL: test_insertq_domain:
163 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
164 ; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
165 ; X86-AVX-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
166 ; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
167 ; X86-AVX-NEXT: retl # encoding: [0xc3]
169 ; X64-SSE-LABEL: test_insertq_domain:
171 ; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
172 ; X64-SSE-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
173 ; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
174 ; X64-SSE-NEXT: retq # encoding: [0xc3]
176 ; X64-AVX-LABEL: test_insertq_domain:
178 ; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
179 ; X64-AVX-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
180 ; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
181 ; X64-AVX-NEXT: retq # encoding: [0xc3]
182 %1 = load <2 x i64>, <2 x i64> *%p
183 %2 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %1, <2 x i64> %y) nounwind
187 declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>) nounwind