1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s
4 declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
6 define i32 @mul_i8i8(ptr%a, <16 x i8> %b, i32 %c) {
7 ; CHECK-LABEL: mul_i8i8:
8 ; CHECK: # %bb.0: # %entry
9 ; CHECK-NEXT: vmovdqa (%rdi), %xmm1
10 ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
11 ; CHECK-NEXT: vpdpbusd %xmm0, %xmm1, %xmm2
12 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
13 ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
14 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
15 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
16 ; CHECK-NEXT: vmovd %xmm0, %eax
17 ; CHECK-NEXT: addl %esi, %eax
20 %0 = load <16 x i8>, ptr %a, align 16
21 %1 = zext <16 x i8> %0 to <16 x i32>
22 %2 = sext <16 x i8> %b to <16 x i32>
23 %3 = mul nsw <16 x i32> %1, %2
24 %4 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
25 %op.extra = add nsw i32 %4, %c
29 define i32 @mul_i4i8(<16 x i4> %a, <16 x i8> %b, i32 %c) {
30 ; CHECK-LABEL: mul_i4i8:
31 ; CHECK: # %bb.0: # %entry
32 ; CHECK-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
33 ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
34 ; CHECK-NEXT: vpdpbusd %xmm1, %xmm0, %xmm2
35 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
36 ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
37 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
38 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
39 ; CHECK-NEXT: vmovd %xmm0, %eax
40 ; CHECK-NEXT: addl %edi, %eax
43 %0 = zext <16 x i4> %a to <16 x i32>
44 %1 = sext <16 x i8> %b to <16 x i32>
45 %2 = mul nsw <16 x i32> %0, %1
46 %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
47 %op.extra = add nsw i32 %3, %c
51 define i32 @mul_i4i4(<16 x i4> %a, <16 x i4> %b, i32 %c) {
52 ; CHECK-LABEL: mul_i4i4:
53 ; CHECK: # %bb.0: # %entry
54 ; CHECK-NEXT: vpsllw $4, %xmm1, %xmm1
55 ; CHECK-NEXT: vpsrlw $4, %xmm1, %xmm1
56 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
57 ; CHECK-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm1
58 ; CHECK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
59 ; CHECK-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
60 ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
61 ; CHECK-NEXT: vpdpbusd %xmm1, %xmm0, %xmm2
62 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
63 ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
64 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
65 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
66 ; CHECK-NEXT: vmovd %xmm0, %eax
67 ; CHECK-NEXT: addl %edi, %eax
70 %0 = zext <16 x i4> %a to <16 x i32>
71 %1 = sext <16 x i4> %b to <16 x i32>
72 %2 = mul nsw <16 x i32> %0, %1
73 %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
74 %op.extra = add nsw i32 %3, %c
78 define i32 @mul_sext_i4i4(<16 x i4> %a, <16 x i4> %b, i32 %c) {
79 ; CHECK-LABEL: mul_sext_i4i4:
80 ; CHECK: # %bb.0: # %entry
81 ; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
82 ; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
83 ; CHECK-NEXT: vpsllw $12, %ymm1, %ymm1
84 ; CHECK-NEXT: vpsraw $12, %ymm1, %ymm1
85 ; CHECK-NEXT: vpsllw $12, %ymm0, %ymm0
86 ; CHECK-NEXT: vpsraw $12, %ymm0, %ymm0
87 ; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0
88 ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
89 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
90 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
91 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
92 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
93 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
94 ; CHECK-NEXT: vmovd %xmm0, %eax
95 ; CHECK-NEXT: addl %edi, %eax
96 ; CHECK-NEXT: vzeroupper
99 %0 = sext <16 x i4> %a to <16 x i32>
100 %1 = sext <16 x i4> %b to <16 x i32>
101 %2 = mul nsw <16 x i32> %0, %1
102 %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
103 %op.extra = add nsw i32 %3, %c
107 define i32 @mul_zext_i4i4(<16 x i4> %a, <16 x i4> %b, i32 %c) {
108 ; CHECK-LABEL: mul_zext_i4i4:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
111 ; CHECK-NEXT: vpand %xmm2, %xmm1, %xmm1
112 ; CHECK-NEXT: vpand %xmm2, %xmm0, %xmm0
113 ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
114 ; CHECK-NEXT: vpdpbusd %xmm1, %xmm0, %xmm2
115 ; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
116 ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
117 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
118 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
119 ; CHECK-NEXT: vmovd %xmm0, %eax
120 ; CHECK-NEXT: addl %edi, %eax
123 %0 = zext <16 x i4> %a to <16 x i32>
124 %1 = zext <16 x i4> %b to <16 x i32>
125 %2 = mul nsw <16 x i32> %0, %1
126 %3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
127 %op.extra = add nsw i32 %3, %c