1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefix=AVX
3 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl,+avx512bw | FileCheck %s --check-prefix=AVX512
4 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl,+avx512bw,+avxvnni | FileCheck %s --check-prefix=AVX
6 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32(<4 x i32> %a0, <8 x i16> %a1, <8 x i16> %a2) {
7 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32:
9 ; AVX-NEXT: {vex} vpdpwssd %xmm2, %xmm1, %xmm0
12 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32:
14 ; AVX512-NEXT: vpdpwssd %xmm2, %xmm1, %xmm0
16 %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
17 %2 = add <4 x i32> %1, %a0
21 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute(<4 x i32> %a0, <8 x i16> %a1, <8 x i16> %a2) {
22 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_commute:
24 ; AVX-NEXT: {vex} vpdpwssd %xmm2, %xmm1, %xmm0
27 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_commute:
29 ; AVX512-NEXT: vpdpwssd %xmm2, %xmm1, %xmm0
31 %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
32 %2 = add <4 x i32> %a0, %1
36 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_load1(<4 x i32> %a0, ptr %p1, <8 x i16> %a2) {
37 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_load1:
39 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %xmm1, %xmm0
42 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_load1:
44 ; AVX512-NEXT: vpdpwssd (%rdi), %xmm1, %xmm0
46 %a1 = load <8 x i16>, ptr %p1
47 %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
48 %2 = add <4 x i32> %1, %a0
52 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_load2(<4 x i32> %a0, <8 x i16> %a1, ptr %p2) {
53 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_load2:
55 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %xmm1, %xmm0
58 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_load2:
60 ; AVX512-NEXT: vpdpwssd (%rdi), %xmm1, %xmm0
62 %a2 = load <8 x i16>, ptr %p2
63 %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
64 %2 = add <4 x i32> %1, %a0
68 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute_load1(<4 x i32> %a0, ptr %p1, <8 x i16> %a2) {
69 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load1:
71 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %xmm1, %xmm0
74 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load1:
76 ; AVX512-NEXT: vpdpwssd (%rdi), %xmm1, %xmm0
78 %a1 = load <8 x i16>, ptr %p1
79 %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
80 %2 = add <4 x i32> %a0, %1
84 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute_load2(<4 x i32> %a0, <8 x i16> %a1, ptr %p2) {
85 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load2:
87 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %xmm1, %xmm0
90 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load2:
92 ; AVX512-NEXT: vpdpwssd (%rdi), %xmm1, %xmm0
94 %a2 = load <8 x i16>, ptr %p2
95 %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
96 %2 = add <4 x i32> %a0, %1
100 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32(<8 x i32> %a0, <16 x i16> %a1, <16 x i16> %a2) {
101 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32:
103 ; AVX-NEXT: {vex} vpdpwssd %ymm2, %ymm1, %ymm0
106 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32:
108 ; AVX512-NEXT: vpdpwssd %ymm2, %ymm1, %ymm0
110 %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
111 %2 = add <8 x i32> %1, %a0
115 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute(<8 x i32> %a0, <16 x i16> %a1, <16 x i16> %a2) {
116 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_commute:
118 ; AVX-NEXT: {vex} vpdpwssd %ymm2, %ymm1, %ymm0
121 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_commute:
123 ; AVX512-NEXT: vpdpwssd %ymm2, %ymm1, %ymm0
125 %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
126 %2 = add <8 x i32> %a0, %1
130 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_load1(<8 x i32> %a0, ptr %p1, <16 x i16> %a2) {
131 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_load1:
133 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %ymm1, %ymm0
136 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_load1:
138 ; AVX512-NEXT: vpdpwssd (%rdi), %ymm1, %ymm0
140 %a1 = load <16 x i16>, ptr %p1
141 %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
142 %2 = add <8 x i32> %1, %a0
146 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_load2(<8 x i32> %a0, <16 x i16> %a1, ptr %p2) {
147 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_load2:
149 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %ymm1, %ymm0
152 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_load2:
154 ; AVX512-NEXT: vpdpwssd (%rdi), %ymm1, %ymm0
156 %a2 = load <16 x i16>, ptr %p2
157 %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
158 %2 = add <8 x i32> %1, %a0
162 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute_load1(<8 x i32> %a0, ptr %p1, <16 x i16> %a2) {
163 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load1:
165 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %ymm1, %ymm0
168 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load1:
170 ; AVX512-NEXT: vpdpwssd (%rdi), %ymm1, %ymm0
172 %a1 = load <16 x i16>, ptr %p1
173 %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
174 %2 = add <8 x i32> %a0, %1
178 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute_load2(<8 x i32> %a0, <16 x i16> %a1, ptr %p2) {
179 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load2:
181 ; AVX-NEXT: {vex} vpdpwssd (%rdi), %ymm1, %ymm0
184 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load2:
186 ; AVX512-NEXT: vpdpwssd (%rdi), %ymm1, %ymm0
188 %a2 = load <16 x i16>, ptr %p2
189 %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
190 %2 = add <8 x i32> %a0, %1
194 declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>)
195 declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>)