1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
3 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
5 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
6 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
9 ; This test case aims to test the vector multiply instructions on Power10.
10 ; This includes the low order and high order versions of vector multiply.
11 ; The low order version operates on doublewords, whereas the high order version
12 ; operates on signed and unsigned words and doublewords.
13 ; This file also includes 128 bit vector multiply instructions.
15 define <2 x i64> @test_vmulld(<2 x i64> %a, <2 x i64> %b) {
16 ; CHECK-LABEL: test_vmulld:
17 ; CHECK: # %bb.0: # %entry
18 ; CHECK-NEXT: vmulld v2, v3, v2
21 %mul = mul <2 x i64> %b, %a
25 define <2 x i64> @test_vmulhsd(<2 x i64> %a, <2 x i64> %b) {
26 ; CHECK-LABEL: test_vmulhsd:
27 ; CHECK: # %bb.0: # %entry
28 ; CHECK-NEXT: vmulhsd v2, v3, v2
31 %0 = sext <2 x i64> %a to <2 x i128>
32 %1 = sext <2 x i64> %b to <2 x i128>
33 %mul = mul <2 x i128> %1, %0
34 %shr = lshr <2 x i128> %mul, <i128 64, i128 64>
35 %tr = trunc <2 x i128> %shr to <2 x i64>
39 define <2 x i64> @test_vmulhud(<2 x i64> %a, <2 x i64> %b) {
40 ; CHECK-LABEL: test_vmulhud:
41 ; CHECK: # %bb.0: # %entry
42 ; CHECK-NEXT: vmulhud v2, v3, v2
45 %0 = zext <2 x i64> %a to <2 x i128>
46 %1 = zext <2 x i64> %b to <2 x i128>
47 %mul = mul <2 x i128> %1, %0
48 %shr = lshr <2 x i128> %mul, <i128 64, i128 64>
49 %tr = trunc <2 x i128> %shr to <2 x i64>
53 define <4 x i32> @test_vmulhsw(<4 x i32> %a, <4 x i32> %b) {
54 ; CHECK-LABEL: test_vmulhsw:
55 ; CHECK: # %bb.0: # %entry
56 ; CHECK-NEXT: vmulhsw v2, v3, v2
59 %0 = sext <4 x i32> %a to <4 x i64>
60 %1 = sext <4 x i32> %b to <4 x i64>
61 %mul = mul <4 x i64> %1, %0
62 %shr = lshr <4 x i64> %mul, <i64 32, i64 32, i64 32, i64 32>
63 %tr = trunc <4 x i64> %shr to <4 x i32>
67 define <4 x i32> @test_vmulhuw(<4 x i32> %a, <4 x i32> %b) {
68 ; CHECK-LABEL: test_vmulhuw:
69 ; CHECK: # %bb.0: # %entry
70 ; CHECK-NEXT: vmulhuw v2, v3, v2
73 %0 = zext <4 x i32> %a to <4 x i64>
74 %1 = zext <4 x i32> %b to <4 x i64>
75 %mul = mul <4 x i64> %1, %0
76 %shr = lshr <4 x i64> %mul, <i64 32, i64 32, i64 32, i64 32>
77 %tr = trunc <4 x i64> %shr to <4 x i32>
81 ; Test the vector multiply high intrinsics.
82 declare <4 x i32> @llvm.ppc.altivec.vmulhsw(<4 x i32>, <4 x i32>)
83 declare <4 x i32> @llvm.ppc.altivec.vmulhuw(<4 x i32>, <4 x i32>)
84 declare <2 x i64> @llvm.ppc.altivec.vmulhsd(<2 x i64>, <2 x i64>)
85 declare <2 x i64> @llvm.ppc.altivec.vmulhud(<2 x i64>, <2 x i64>)
87 define <4 x i32> @test_vmulhsw_intrinsic(<4 x i32> %a, <4 x i32> %b) {
88 ; CHECK-LABEL: test_vmulhsw_intrinsic:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vmulhsw v2, v2, v3
93 %mulh = tail call <4 x i32> @llvm.ppc.altivec.vmulhsw(<4 x i32> %a, <4 x i32> %b)
97 define <4 x i32> @test_vmulhuw_intrinsic(<4 x i32> %a, <4 x i32> %b) {
98 ; CHECK-LABEL: test_vmulhuw_intrinsic:
99 ; CHECK: # %bb.0: # %entry
100 ; CHECK-NEXT: vmulhuw v2, v2, v3
103 %mulh = tail call <4 x i32> @llvm.ppc.altivec.vmulhuw(<4 x i32> %a, <4 x i32> %b)
107 define <2 x i64> @test_vmulhsd_intrinsic(<2 x i64> %a, <2 x i64> %b) {
108 ; CHECK-LABEL: test_vmulhsd_intrinsic:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vmulhsd v2, v2, v3
113 %mulh = tail call <2 x i64> @llvm.ppc.altivec.vmulhsd(<2 x i64> %a, <2 x i64> %b)
117 define <2 x i64> @test_vmulhud_intrinsic(<2 x i64> %a, <2 x i64> %b) {
118 ; CHECK-LABEL: test_vmulhud_intrinsic:
119 ; CHECK: # %bb.0: # %entry
120 ; CHECK-NEXT: vmulhud v2, v2, v3
123 %mulh = tail call <2 x i64> @llvm.ppc.altivec.vmulhud(<2 x i64> %a, <2 x i64> %b)
127 declare <1 x i128> @llvm.ppc.altivec.vmuleud(<2 x i64>, <2 x i64>) nounwind readnone
128 declare <1 x i128> @llvm.ppc.altivec.vmuloud(<2 x i64>, <2 x i64>) nounwind readnone
129 declare <1 x i128> @llvm.ppc.altivec.vmulesd(<2 x i64>, <2 x i64>) nounwind readnone
130 declare <1 x i128> @llvm.ppc.altivec.vmulosd(<2 x i64>, <2 x i64>) nounwind readnone
131 declare <1 x i128> @llvm.ppc.altivec.vmsumcud(<2 x i64>, <2 x i64>, <1 x i128>) nounwind readnone
133 define <1 x i128> @test_vmuleud(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
134 ; CHECK-LABEL: test_vmuleud:
136 ; CHECK-NEXT: vmuleud v2, v2, v3
138 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vmuleud(<2 x i64> %x, <2 x i64> %y)
142 define <1 x i128> @test_vmuloud(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
143 ; CHECK-LABEL: test_vmuloud:
145 ; CHECK-NEXT: vmuloud v2, v2, v3
147 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vmuloud(<2 x i64> %x, <2 x i64> %y)
151 define <1 x i128> @test_vmulesd(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
152 ; CHECK-LABEL: test_vmulesd:
154 ; CHECK-NEXT: vmulesd v2, v2, v3
156 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vmulesd(<2 x i64> %x, <2 x i64> %y)
160 define <1 x i128> @test_vmulosd(<2 x i64> %x, <2 x i64> %y) nounwind readnone {
161 ; CHECK-LABEL: test_vmulosd:
163 ; CHECK-NEXT: vmulosd v2, v2, v3
165 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vmulosd(<2 x i64> %x, <2 x i64> %y)
169 define <1 x i128> @test_vmsumcud(<2 x i64> %x, <2 x i64> %y, <1 x i128> %z) nounwind readnone {
170 ; CHECK-LABEL: test_vmsumcud:
172 ; CHECK-NEXT: vmsumcud v2, v2, v3, v4
174 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vmsumcud(<2 x i64> %x, <2 x i64> %y, <1 x i128> %z)