1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -global-isel -march=amdgcn -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s
4 define float @v_constained_fadd_f32_fpexcept_strict(float %x, float %y) #0 {
5 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_strict
6 ; CHECK: bb.1 (%ir-block.0):
7 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
9 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
10 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
11 ; CHECK-NEXT: [[STRICT_FADD:%[0-9]+]]:_(s32) = G_STRICT_FADD [[COPY]], [[COPY1]]
12 ; CHECK-NEXT: $vgpr0 = COPY [[STRICT_FADD]](s32)
13 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
14 %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
18 define float @v_constained_fadd_f32_fpexcept_strict_flags(float %x, float %y) #0 {
19 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_strict_flags
20 ; CHECK: bb.1 (%ir-block.0):
21 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
23 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
24 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
25 ; CHECK-NEXT: [[STRICT_FADD:%[0-9]+]]:_(s32) = nsz G_STRICT_FADD [[COPY]], [[COPY1]]
26 ; CHECK-NEXT: $vgpr0 = COPY [[STRICT_FADD]](s32)
27 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
28 %val = call nsz float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
32 define float @v_constained_fadd_f32_fpexcept_ignore(float %x, float %y) #0 {
33 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_ignore
34 ; CHECK: bb.1 (%ir-block.0):
35 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
37 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
38 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
39 ; CHECK-NEXT: %2:_(s32) = nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]]
40 ; CHECK-NEXT: $vgpr0 = COPY %2(s32)
41 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
42 %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
46 define float @v_constained_fadd_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
47 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_ignore_flags
48 ; CHECK: bb.1 (%ir-block.0):
49 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
51 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
52 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
53 ; CHECK-NEXT: %2:_(s32) = nsz nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]]
54 ; CHECK-NEXT: $vgpr0 = COPY %2(s32)
55 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
56 %val = call nsz float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
60 define float @v_constained_fadd_f32_fpexcept_maytrap(float %x, float %y) #0 {
61 ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_maytrap
62 ; CHECK: bb.1 (%ir-block.0):
63 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
65 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
66 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
67 ; CHECK-NEXT: [[STRICT_FADD:%[0-9]+]]:_(s32) = G_STRICT_FADD [[COPY]], [[COPY1]]
68 ; CHECK-NEXT: $vgpr0 = COPY [[STRICT_FADD]](s32)
69 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
70 %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
74 define <2 x float> @v_constained_fadd_v2f32_fpexcept_strict(<2 x float> %x, <2 x float> %y) #0 {
75 ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_strict
76 ; CHECK: bb.1 (%ir-block.0):
77 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
79 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
80 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
81 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
82 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
83 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
84 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
85 ; CHECK-NEXT: [[STRICT_FADD:%[0-9]+]]:_(<2 x s32>) = G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
86 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[STRICT_FADD]](<2 x s32>)
87 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
88 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
89 ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
90 %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
94 define <2 x float> @v_constained_fadd_v2f32_fpexcept_ignore(<2 x float> %x, <2 x float> %y) #0 {
95 ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_ignore
96 ; CHECK: bb.1 (%ir-block.0):
97 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
99 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
100 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
101 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
102 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
103 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
104 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
105 ; CHECK-NEXT: %6:_(<2 x s32>) = nofpexcept G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
106 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %6(<2 x s32>)
107 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
108 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
109 ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
110 %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
114 define <2 x float> @v_constained_fadd_v2f32_fpexcept_maytrap(<2 x float> %x, <2 x float> %y) #0 {
115 ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_maytrap
116 ; CHECK: bb.1 (%ir-block.0):
117 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
119 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
120 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
121 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
122 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
123 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
124 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
125 ; CHECK-NEXT: [[STRICT_FADD:%[0-9]+]]:_(<2 x s32>) = G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
126 ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[STRICT_FADD]](<2 x s32>)
127 ; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
128 ; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
129 ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
130 %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
134 define float @v_constained_fsub_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
135 ; CHECK-LABEL: name: v_constained_fsub_f32_fpexcept_ignore_flags
136 ; CHECK: bb.1 (%ir-block.0):
137 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
139 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
140 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
141 ; CHECK-NEXT: %2:_(s32) = nsz nofpexcept G_STRICT_FSUB [[COPY]], [[COPY1]]
142 ; CHECK-NEXT: $vgpr0 = COPY %2(s32)
143 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
144 %val = call nsz float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
148 define float @v_constained_fmul_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
149 ; CHECK-LABEL: name: v_constained_fmul_f32_fpexcept_ignore_flags
150 ; CHECK: bb.1 (%ir-block.0):
151 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
153 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
154 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
155 ; CHECK-NEXT: %2:_(s32) = nsz nofpexcept G_STRICT_FMUL [[COPY]], [[COPY1]]
156 ; CHECK-NEXT: $vgpr0 = COPY %2(s32)
157 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
158 %val = call nsz float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
162 define float @v_constained_fdiv_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
163 ; CHECK-LABEL: name: v_constained_fdiv_f32_fpexcept_ignore_flags
164 ; CHECK: bb.1 (%ir-block.0):
165 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
167 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
168 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
169 ; CHECK-NEXT: %2:_(s32) = nsz nofpexcept G_STRICT_FDIV [[COPY]], [[COPY1]]
170 ; CHECK-NEXT: $vgpr0 = COPY %2(s32)
171 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
172 %val = call nsz float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
176 define float @v_constained_frem_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
177 ; CHECK-LABEL: name: v_constained_frem_f32_fpexcept_ignore_flags
178 ; CHECK: bb.1 (%ir-block.0):
179 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
181 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
182 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
183 ; CHECK-NEXT: %2:_(s32) = nsz nofpexcept G_STRICT_FREM [[COPY]], [[COPY1]]
184 ; CHECK-NEXT: $vgpr0 = COPY %2(s32)
185 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
186 %val = call nsz float @llvm.experimental.constrained.frem.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
190 define float @v_constained_fma_f32_fpexcept_ignore_flags(float %x, float %y, float %z) #0 {
191 ; CHECK-LABEL: name: v_constained_fma_f32_fpexcept_ignore_flags
192 ; CHECK: bb.1 (%ir-block.0):
193 ; CHECK-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2
195 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
196 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
197 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
198 ; CHECK-NEXT: %3:_(s32) = nsz nofpexcept G_STRICT_FMA [[COPY]], [[COPY1]], [[COPY2]]
199 ; CHECK-NEXT: $vgpr0 = COPY %3(s32)
200 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
201 %val = call nsz float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.ignore")
205 define float @v_constained_sqrt_f32_fpexcept_strict(float %x) #0 {
206 ; CHECK-LABEL: name: v_constained_sqrt_f32_fpexcept_strict
207 ; CHECK: bb.1 (%ir-block.0):
208 ; CHECK-NEXT: liveins: $vgpr0
210 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
211 ; CHECK-NEXT: [[STRICT_FSQRT:%[0-9]+]]:_(s32) = G_STRICT_FSQRT [[COPY]]
212 ; CHECK-NEXT: $vgpr0 = COPY [[STRICT_FSQRT]](s32)
213 ; CHECK-NEXT: SI_RETURN implicit $vgpr0
214 %val = call float @llvm.experimental.constrained.sqrt.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
218 declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) #1
219 declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata) #1
220 declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata) #1
221 declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) #1
222 declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) #1
223 declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) #1
224 declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata) #1
225 declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) #1
226 declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) #1
228 attributes #0 = { strictfp }
229 attributes #1 = { inaccessiblememonly nounwind willreturn }