1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr | FileCheck %s --check-prefix=SSE
3 ; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
5 define i32 @map0(ptr nocapture noundef readonly %a, i64 noundef %b) {
6 ; SSE-LABEL: name: map0
8 ; SSE-NEXT: liveins: $rdi, $rsi
10 ; SSE-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
11 ; SSE-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
12 ; SSE-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
13 ; SSE-NEXT: $eax = COPY [[MOV32rm]]
14 ; SSE-NEXT: RET 0, $eax
15 ; AVX-LABEL: name: map0
17 ; AVX-NEXT: liveins: $rdi, $rsi
19 ; AVX-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
20 ; AVX-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
21 ; AVX-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
22 ; AVX-NEXT: $eax = COPY [[MOV32rm]]
23 ; AVX-NEXT: RET 0, $eax
25 %add.ptr = getelementptr inbounds i32, ptr %a, i64 %b
26 %0 = load i32, ptr %add.ptr
30 define i32 @map1_or_vex(<2 x double> noundef %a) {
31 ; SSE-LABEL: name: map1_or_vex
33 ; SSE-NEXT: liveins: $xmm0
35 ; SSE-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
36 ; SSE-NEXT: [[CVTSD2SIrr_Int:%[0-9]+]]:gr32 = nofpexcept CVTSD2SIrr_Int [[COPY]], implicit $mxcsr
37 ; SSE-NEXT: $eax = COPY [[CVTSD2SIrr_Int]]
38 ; SSE-NEXT: RET 0, $eax
39 ; AVX-LABEL: name: map1_or_vex
41 ; AVX-NEXT: liveins: $xmm0
43 ; AVX-NEXT: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
44 ; AVX-NEXT: [[VCVTSD2SIrr_Int:%[0-9]+]]:gr32_norex2 = nofpexcept VCVTSD2SIrr_Int [[COPY]], implicit $mxcsr
45 ; AVX-NEXT: $eax = COPY [[VCVTSD2SIrr_Int]]
46 ; AVX-NEXT: RET 0, $eax
48 %0 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a)
52 define <2 x i64> @map2_or_vex(ptr nocapture noundef readonly %b, i64 noundef %c) {
53 ; SSE-LABEL: name: map2_or_vex
55 ; SSE-NEXT: liveins: $rdi, $rsi
57 ; SSE-NEXT: [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
58 ; SSE-NEXT: [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
59 ; SSE-NEXT: [[PABSBrm:%[0-9]+]]:vr128 = PABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
60 ; SSE-NEXT: $xmm0 = COPY [[PABSBrm]]
61 ; SSE-NEXT: RET 0, $xmm0
62 ; AVX-LABEL: name: map2_or_vex
64 ; AVX-NEXT: liveins: $rdi, $rsi
66 ; AVX-NEXT: [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
67 ; AVX-NEXT: [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
68 ; AVX-NEXT: [[VPABSBrm:%[0-9]+]]:vr128 = VPABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
69 ; AVX-NEXT: $xmm0 = COPY [[VPABSBrm]]
70 ; AVX-NEXT: RET 0, $xmm0
72 %add.ptr = getelementptr inbounds i32, ptr %b, i64 %c
73 %a = load <2 x i64>, ptr %add.ptr
74 %0 = bitcast <2 x i64> %a to <16 x i8>
75 %elt.abs.i = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %0, i1 false)
76 %1 = bitcast <16 x i8> %elt.abs.i to <2 x i64>
80 declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>)
81 declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1 immarg)