1 ; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
2 ; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
3 ; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
4 ; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN
5 ; RUN: llc < %s -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN
7 declare i32 @llvm.r600.read.tidig.x() nounwind readnone
9 ; FUNC-LABEL: {{^}}u32_mad24:
14 define amdgpu_kernel void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
17 %a_24 = lshr i32 %0, 8
19 %b_24 = lshr i32 %1, 8
20 %2 = mul i32 %a_24, %b_24
22 store i32 %3, i32 addrspace(1)* %out
26 ; FUNC-LABEL: {{^}}i16_mad24:
27 ; The order of A and B does not matter.
28 ; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
29 ; The result must be sign-extended
30 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
32 ; FIXME: Should be using scalar instructions here.
33 ; GCN: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
34 ; GCN: v_bfe_i32 v{{[0-9]}}, [[MAD]], 0, 16
35 define amdgpu_kernel void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
39 %2 = sext i16 %1 to i32
40 store i32 %2, i32 addrspace(1)* %out
44 ; FIXME: Need to handle non-uniform case for function below (load without gep).
45 ; FUNC-LABEL: {{^}}i8_mad24:
46 ; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
47 ; The result must be sign-extended
48 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
50 ; GCN: v_mad_u32_u24 [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
51 ; GCN: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
52 define amdgpu_kernel void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
56 %2 = sext i8 %1 to i32
57 store i32 %2, i32 addrspace(1)* %out
61 ; This tests for a bug where the mad_u24 pattern matcher would call
62 ; SimplifyDemandedBits on the first operand of the mul instruction
63 ; assuming that the pattern would be matched to a 24-bit mad. This
64 ; led to some instructions being incorrectly erased when the entire
65 ; 24-bit mad pattern wasn't being matched.
67 ; Check that the select instruction is not deleted.
68 ; FUNC-LABEL: {{^}}i24_i32_i32_mad:
71 define amdgpu_kernel void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
74 %1 = icmp ne i32 %c, 0
75 %2 = select i1 %1, i32 %0, i32 34
78 store i32 %4, i32 addrspace(1)* %out
82 ; FUNC-LABEL: {{^}}extra_and:
86 define amdgpu_kernel void @extra_and(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
90 bb4: ; preds = %bb4, %bb
91 %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
92 %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
93 %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
94 %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
95 %tmp8 = and i32 %tmp7, 16777215
96 %tmp9 = and i32 %tmp6, 16777215
97 %tmp10 = and i32 %tmp5, 16777215
98 %tmp11 = and i32 %tmp, 16777215
99 %tmp12 = mul i32 %tmp8, %tmp11
100 %tmp13 = add i32 %arg2, %tmp12
101 %tmp14 = mul i32 %tmp9, %tmp11
102 %tmp15 = add i32 %arg3, %tmp14
103 %tmp16 = add nuw nsw i32 %tmp13, %tmp15
104 %tmp17 = icmp eq i32 %tmp16, 8
105 br i1 %tmp17, label %bb18, label %bb4
108 store i32 %tmp16, i32 addrspace(1)* %arg
112 ; FUNC-LABEL: {{^}}dont_remove_shift
116 define amdgpu_kernel void @dont_remove_shift(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
120 bb4: ; preds = %bb4, %bb
121 %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
122 %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
123 %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
124 %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
125 %tmp8 = lshr i32 %tmp7, 8
126 %tmp9 = lshr i32 %tmp6, 8
127 %tmp10 = lshr i32 %tmp5, 8
128 %tmp11 = lshr i32 %tmp, 8
129 %tmp12 = mul i32 %tmp8, %tmp11
130 %tmp13 = add i32 %arg2, %tmp12
131 %tmp14 = mul i32 %tmp9, %tmp11
132 %tmp15 = add i32 %arg3, %tmp14
133 %tmp16 = add nuw nsw i32 %tmp13, %tmp15
134 %tmp17 = icmp eq i32 %tmp16, 8
135 br i1 %tmp17, label %bb18, label %bb4
138 store i32 %tmp16, i32 addrspace(1)* %arg
142 ; FUNC-LABEL: {{^}}i8_mad_sat_16:
143 ; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
144 ; The result must be sign-extended
145 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
147 ; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
148 ; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
149 ; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
150 ; GCN: v_med3_i32 v{{[0-9]}}, [[EXT]],
151 define amdgpu_kernel void @i8_mad_sat_16(i8 addrspace(1)* %out, i8 addrspace(1)* %in0, i8 addrspace(1)* %in1, i8 addrspace(1)* %in2, i64 addrspace(5)* %idx) {
153 %retval.0.i = load i64, i64 addrspace(5)* %idx
154 %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 %retval.0.i
155 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 %retval.0.i
156 %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %in2, i64 %retval.0.i
157 %l1 = load i8, i8 addrspace(1)* %arrayidx, align 1
158 %l2 = load i8, i8 addrspace(1)* %arrayidx2, align 1
159 %l3 = load i8, i8 addrspace(1)* %arrayidx4, align 1
160 %conv1.i = sext i8 %l1 to i16
161 %conv3.i = sext i8 %l2 to i16
162 %conv5.i = sext i8 %l3 to i16
163 %mul.i.i.i = mul nsw i16 %conv3.i, %conv1.i
164 %add.i.i = add i16 %mul.i.i.i, %conv5.i
165 %c4 = icmp sgt i16 %add.i.i, -128
166 %cond.i.i = select i1 %c4, i16 %add.i.i, i16 -128
167 %c5 = icmp slt i16 %cond.i.i, 127
168 %cond13.i.i = select i1 %c5, i16 %cond.i.i, i16 127
169 %conv8.i = trunc i16 %cond13.i.i to i8
170 %arrayidx7 = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 %retval.0.i
171 store i8 %conv8.i, i8 addrspace(1)* %arrayidx7, align 1
175 ; FUNC-LABEL: {{^}}i8_mad_32:
176 ; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
177 ; The result must be sign-extended
178 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
180 ; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
181 ; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
182 ; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
183 define amdgpu_kernel void @i8_mad_32(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
185 %retval.0.i = load i64, i64 addrspace(5)* %idx
186 %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
187 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
188 %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
189 %la = load i8, i8 addrspace(1)* %arrayidx, align 1
190 %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
191 %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
192 %exta = sext i8 %la to i16
193 %extb = sext i8 %lb to i16
194 %extc = sext i8 %lc to i16
195 %mul = mul i16 %exta, %extb
196 %mad = add i16 %mul, %extc
197 %mad_ext = sext i16 %mad to i32
198 store i32 %mad_ext, i32 addrspace(1)* %out
202 ; FUNC-LABEL: {{^}}i8_mad_64:
203 ; EG: MULADD_UINT24 {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
204 ; The result must be sign-extended
205 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
207 ; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
208 ; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
209 ; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
210 define amdgpu_kernel void @i8_mad_64(i64 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
212 %retval.0.i = load i64, i64 addrspace(5)* %idx
213 %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
214 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
215 %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
216 %la = load i8, i8 addrspace(1)* %arrayidx, align 1
217 %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
218 %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
219 %exta = sext i8 %la to i16
220 %extb = sext i8 %lb to i16
221 %extc = sext i8 %lc to i16
222 %mul = mul i16 %exta, %extb
223 %mad = add i16 %mul, %extc
224 %mad_ext = sext i16 %mad to i64
225 store i64 %mad_ext, i64 addrspace(1)* %out