1 ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
2 ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
4 ; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
5 ; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
7 ; CHECK-DAG: %[[Char:.*]] = OpTypeInt 8 0
8 ; CHECK-DAG: %[[CharVec2:.*]] = OpTypeVector %[[Char]] 2
9 ; CHECK-DAG: %[[CharVec3:.*]] = OpTypeVector %[[Char]] 3
11 ; CHECK-DAG: %[[Short:.*]] = OpTypeInt 16 0
12 ; CHECK-DAG: %[[ShortVec2:.*]] = OpTypeVector %[[Short]] 2
13 ; CHECK-DAG: %[[ShortVec3:.*]] = OpTypeVector %[[Short]] 3
15 ; CHECK-DAG: %[[Int:.*]] = OpTypeInt 32 0
16 ; CHECK-DAG: %[[IntVec2:.*]] = OpTypeVector %[[Int]] 2
17 ; CHECK-DAG: %[[IntVec3:.*]] = OpTypeVector %[[Int]] 3
19 ; CHECK-DAG: %[[Long:.*]] = OpTypeInt 64 0
20 ; CHECK-DAG: %[[LongVec2:.*]] = OpTypeVector %[[Long]] 2
21 ; CHECK-DAG: %[[LongVec3:.*]] = OpTypeVector %[[Long]] 3
24 ; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[CharVec2]] %[[#]] %[[#]] 1 -1
25 ; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[CharVec2]] %[[#]] %[[#]]
26 ; CHECK: %[[Vec2CharR:.*]] = OpCompositeExtract %[[Char]] %[[Added1]] 0
27 ; CHECK: OpReturnValue %[[Vec2CharR]]
28 ; CHECK: OpFunctionEnd
31 ; CHECK: %[[ParamVec3Char:.*]] = OpFunctionParameter %[[CharVec3]]
32 ; CHECK: %[[Vec3CharItem0:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 0
33 ; CHECK: %[[Vec3CharItem1:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 1
34 ; CHECK: %[[Vec3CharItem2:.*]] = OpCompositeExtract %[[Char]] %[[ParamVec3Char]] 2
35 ; CHECK: %[[Vec3CharR1:.*]] = OpBitwiseAnd %[[Char]] %[[Vec3CharItem0]] %[[Vec3CharItem1]]
36 ; CHECK: %[[Vec3CharR2:.*]] = OpBitwiseAnd %[[Char]] %[[Vec3CharR1]] %[[Vec3CharItem2]]
37 ; CHECK: OpReturnValue %[[Vec3CharR2]]
38 ; CHECK: OpFunctionEnd
41 ; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[ShortVec2]] %[[#]] %[[#]] 1 -1
42 ; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[ShortVec2]] %[[#]] %[[#]]
43 ; CHECK: %[[Vec2ShortR:.*]] = OpCompositeExtract %[[Short]] %[[Added1]] 0
44 ; CHECK: OpReturnValue %[[Vec2ShortR]]
45 ; CHECK: OpFunctionEnd
48 ; CHECK: %[[ParamVec3Short:.*]] = OpFunctionParameter %[[ShortVec3]]
49 ; CHECK: %[[Vec3ShortItem0:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 0
50 ; CHECK: %[[Vec3ShortItem1:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 1
51 ; CHECK: %[[Vec3ShortItem2:.*]] = OpCompositeExtract %[[Short]] %[[ParamVec3Short]] 2
52 ; CHECK: %[[Vec3ShortR1:.*]] = OpBitwiseAnd %[[Short]] %[[Vec3ShortItem0]] %[[Vec3ShortItem1]]
53 ; CHECK: %[[Vec3ShortR2:.*]] = OpBitwiseAnd %[[Short]] %[[Vec3ShortR1]] %[[Vec3ShortItem2]]
54 ; CHECK: OpReturnValue %[[Vec3ShortR2]]
55 ; CHECK: OpFunctionEnd
58 ; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[IntVec2]] %[[#]] %[[#]] 1 -1
59 ; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[IntVec2]] %[[#]] %[[#]]
60 ; CHECK: %[[Vec2IntR:.*]] = OpCompositeExtract %[[Int]] %[[Added1]] 0
61 ; CHECK: OpReturnValue %[[Vec2IntR]]
62 ; CHECK: OpFunctionEnd
65 ; CHECK: %[[ParamVec3Int:.*]] = OpFunctionParameter %[[IntVec3]]
66 ; CHECK: %[[Vec3IntItem0:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 0
67 ; CHECK: %[[Vec3IntItem1:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 1
68 ; CHECK: %[[Vec3IntItem2:.*]] = OpCompositeExtract %[[Int]] %[[ParamVec3Int]] 2
69 ; CHECK: %[[Vec3IntR1:.*]] = OpBitwiseAnd %[[Int]] %[[Vec3IntItem0]] %[[Vec3IntItem1]]
70 ; CHECK: %[[Vec3IntR2:.*]] = OpBitwiseAnd %[[Int]] %[[Vec3IntR1]] %[[Vec3IntItem2]]
71 ; CHECK: OpReturnValue %[[Vec3IntR2]]
72 ; CHECK: OpFunctionEnd
75 ; CHECK: %[[Shuffle1:.*]] = OpVectorShuffle %[[LongVec2]] %[[#]] %[[#]] 1 -1
76 ; CHECK: %[[Added1:.*]] = OpBitwiseAnd %[[LongVec2]] %[[#]] %[[#]]
77 ; CHECK: %[[Vec2LongR:.*]] = OpCompositeExtract %[[Long]] %[[Added1]] 0
78 ; CHECK: OpReturnValue %[[Vec2LongR]]
79 ; CHECK: OpFunctionEnd
82 ; CHECK: %[[ParamVec3Long:.*]] = OpFunctionParameter %[[LongVec3]]
83 ; CHECK: %[[Vec3LongItem0:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 0
84 ; CHECK: %[[Vec3LongItem1:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 1
85 ; CHECK: %[[Vec3LongItem2:.*]] = OpCompositeExtract %[[Long]] %[[ParamVec3Long]] 2
86 ; CHECK: %[[Vec3LongR1:.*]] = OpBitwiseAnd %[[Long]] %[[Vec3LongItem0]] %[[Vec3LongItem1]]
87 ; CHECK: %[[Vec3LongR2:.*]] = OpBitwiseAnd %[[Long]] %[[Vec3LongR1]] %[[Vec3LongItem2]]
88 ; CHECK: OpReturnValue %[[Vec3LongR2]]
89 ; CHECK: OpFunctionEnd
91 define spir_func i8 @test_vector_reduce_and_v2i8(<2 x i8> %v) {
93 %res = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %v)
97 define spir_func i8 @test_vector_reduce_and_v3i8(<3 x i8> %v) {
99 %res = call i8 @llvm.vector.reduce.and.v3i8(<3 x i8> %v)
103 define spir_func i8 @test_vector_reduce_and_v4i8(<4 x i8> %v) {
105 %res = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %v)
109 define spir_func i8 @test_vector_reduce_and_v8i8(<8 x i8> %v) {
111 %res = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %v)
115 define spir_func i8 @test_vector_reduce_and_v16i8(<16 x i8> %v) {
117 %res = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %v)
121 define spir_func i16 @test_vector_reduce_and_v2i16(<2 x i16> %v) {
123 %res = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %v)
127 define spir_func i16 @test_vector_reduce_and_v3i16(<3 x i16> %v) {
129 %res = call i16 @llvm.vector.reduce.and.v3i16(<3 x i16> %v)
133 define spir_func i16 @test_vector_reduce_and_v4i16(<4 x i16> %v) {
135 %res = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %v)
139 define spir_func i16 @test_vector_reduce_and_v8i16(<8 x i16> %v) {
141 %res = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %v)
145 define spir_func i16 @test_vector_reduce_and_v16i16(<16 x i16> %v) {
147 %res = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %v)
152 define spir_func i32 @test_vector_reduce_and_v2i32(<2 x i32> %v) {
154 %res = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %v)
158 define spir_func i32 @test_vector_reduce_and_v3i32(<3 x i32> %v) {
160 %res = call i32 @llvm.vector.reduce.and.v3i32(<3 x i32> %v)
164 define spir_func i32 @test_vector_reduce_and_v4i32(<4 x i32> %v) {
166 %res = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %v)
170 define spir_func i32 @test_vector_reduce_and_v8i32(<8 x i32> %v) {
172 %res = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %v)
176 define spir_func i32 @test_vector_reduce_and_v16i32(<16 x i32> %v) {
178 %res = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %v)
182 define spir_func i64 @test_vector_reduce_and_v2i64(<2 x i64> %v) {
184 %res = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %v)
188 define spir_func i64 @test_vector_reduce_and_v3i64(<3 x i64> %v) {
190 %res = call i64 @llvm.vector.reduce.and.v3i64(<3 x i64> %v)
194 define spir_func i64 @test_vector_reduce_and_v4i64(<4 x i64> %v) {
196 %res = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %v)
200 define spir_func i64 @test_vector_reduce_and_v8i64(<8 x i64> %v) {
202 %res = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %v)
206 define spir_func i64 @test_vector_reduce_and_v16i64(<16 x i64> %v) {
208 %res = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> %v)
212 declare i8 @llvm.vector.reduce.and.v2i8(<2 x i8>)
213 declare i8 @llvm.vector.reduce.and.v3i8(<3 x i8>)
214 declare i8 @llvm.vector.reduce.and.v4i8(<4 x i8>)
215 declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
216 declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
218 declare i16 @llvm.vector.reduce.and.v2i16(<2 x i16>)
219 declare i16 @llvm.vector.reduce.and.v3i16(<3 x i16>)
220 declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
221 declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
222 declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
224 declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
225 declare i32 @llvm.vector.reduce.and.v3i32(<3 x i32>)
226 declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
227 declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
228 declare i32 @llvm.vector.reduce.and.v16i32(<16 x i32>)
230 declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
231 declare i64 @llvm.vector.reduce.and.v3i64(<3 x i64>)
232 declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
233 declare i64 @llvm.vector.reduce.and.v8i64(<8 x i64>)
234 declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>)