1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
3 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
4 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
5 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
8 define float @test_fadd_float(float %arg1, float %arg2) {
9 %ret = fadd float %arg1, %arg2
13 define double @test_fadd_double(double %arg1, double %arg2) {
14 %ret = fadd double %arg1, %arg2
26 - { id: 0, class: vecr, preferred-register: '' }
27 - { id: 1, class: vecr, preferred-register: '' }
28 - { id: 2, class: vecr, preferred-register: '' }
29 - { id: 3, class: vecr, preferred-register: '' }
30 - { id: 4, class: vecr, preferred-register: '' }
31 - { id: 5, class: vecr, preferred-register: '' }
42 ; SSE-LABEL: name: test_fadd_float
43 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
44 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
45 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
46 ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
47 ; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY1]], [[COPY3]]
48 ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSSrr]]
49 ; SSE: $xmm0 = COPY [[COPY4]]
50 ; SSE: RET 0, implicit $xmm0
51 ; AVX-LABEL: name: test_fadd_float
52 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
53 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
54 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
55 ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
56 ; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY1]], [[COPY3]]
57 ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSSrr]]
58 ; AVX: $xmm0 = COPY [[COPY4]]
59 ; AVX: RET 0, implicit $xmm0
60 ; AVX512F-LABEL: name: test_fadd_float
61 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
62 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
63 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
64 ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
65 ; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
66 ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
67 ; AVX512F: $xmm0 = COPY [[COPY4]]
68 ; AVX512F: RET 0, implicit $xmm0
69 ; AVX512VL-LABEL: name: test_fadd_float
70 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
71 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
72 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
73 ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
74 ; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
75 ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
76 ; AVX512VL: $xmm0 = COPY [[COPY4]]
77 ; AVX512VL: RET 0, implicit $xmm0
78 %2:vecr(s128) = COPY $xmm0
79 %0:vecr(s32) = G_TRUNC %2(s128)
80 %3:vecr(s128) = COPY $xmm1
81 %1:vecr(s32) = G_TRUNC %3(s128)
82 %4:vecr(s32) = G_FADD %0, %1
83 %5:vecr(s128) = G_ANYEXT %4(s32)
89 name: test_fadd_double
95 - { id: 0, class: vecr, preferred-register: '' }
96 - { id: 1, class: vecr, preferred-register: '' }
97 - { id: 2, class: vecr, preferred-register: '' }
98 - { id: 3, class: vecr, preferred-register: '' }
99 - { id: 4, class: vecr, preferred-register: '' }
100 - { id: 5, class: vecr, preferred-register: '' }
109 liveins: $xmm0, $xmm1
111 ; SSE-LABEL: name: test_fadd_double
112 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
113 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
114 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
115 ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
116 ; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY1]], [[COPY3]]
117 ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSDrr]]
118 ; SSE: $xmm0 = COPY [[COPY4]]
119 ; SSE: RET 0, implicit $xmm0
120 ; AVX-LABEL: name: test_fadd_double
121 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
122 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
123 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
124 ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
125 ; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY1]], [[COPY3]]
126 ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSDrr]]
127 ; AVX: $xmm0 = COPY [[COPY4]]
128 ; AVX: RET 0, implicit $xmm0
129 ; AVX512F-LABEL: name: test_fadd_double
130 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
131 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
132 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
133 ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
134 ; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
135 ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
136 ; AVX512F: $xmm0 = COPY [[COPY4]]
137 ; AVX512F: RET 0, implicit $xmm0
138 ; AVX512VL-LABEL: name: test_fadd_double
139 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
140 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
141 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
142 ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
143 ; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
144 ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
145 ; AVX512VL: $xmm0 = COPY [[COPY4]]
146 ; AVX512VL: RET 0, implicit $xmm0
147 %2:vecr(s128) = COPY $xmm0
148 %0:vecr(s64) = G_TRUNC %2(s128)
149 %3:vecr(s128) = COPY $xmm1
150 %1:vecr(s64) = G_TRUNC %3(s128)
151 %4:vecr(s64) = G_FADD %0, %1
152 %5:vecr(s128) = G_ANYEXT %4(s64)
153 $xmm0 = COPY %5(s128)
154 RET 0, implicit $xmm0