1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
3 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
5 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
6 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
8 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-ibm-aix-xcoff \
9 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
12 ; This test case aims to test the vector divide instructions on Power10.
13 ; This includes the low order and extended versions of vector divide,
14 ; that operate on signed and unsigned words and doublewords.
15 ; This also includes 128 bit vector divide instructions.
17 define <2 x i64> @test_vdivud(<2 x i64> %a, <2 x i64> %b) {
18 ; CHECK-LABEL: test_vdivud:
19 ; CHECK: # %bb.0: # %entry
20 ; CHECK-NEXT: vdivud v2, v2, v3
23 %div = udiv <2 x i64> %a, %b
27 define <2 x i64> @test_vdivsd(<2 x i64> %a, <2 x i64> %b) {
28 ; CHECK-LABEL: test_vdivsd:
29 ; CHECK: # %bb.0: # %entry
30 ; CHECK-NEXT: vdivsd v2, v2, v3
33 %div = sdiv <2 x i64> %a, %b
37 define <4 x i32> @test_vdivuw(<4 x i32> %a, <4 x i32> %b) {
38 ; CHECK-LABEL: test_vdivuw:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vdivuw v2, v2, v3
43 %div = udiv <4 x i32> %a, %b
47 define <4 x i32> @test_vdivsw(<4 x i32> %a, <4 x i32> %b) {
48 ; CHECK-LABEL: test_vdivsw:
49 ; CHECK: # %bb.0: # %entry
50 ; CHECK-NEXT: vdivsw v2, v2, v3
53 %div = sdiv <4 x i32> %a, %b
57 ; Test the vector divide extended intrinsics.
58 declare <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32>, <4 x i32>)
59 declare <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32>, <4 x i32>)
60 declare <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64>, <2 x i64>)
61 declare <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64>, <2 x i64>)
63 define <4 x i32> @test_vdivesw(<4 x i32> %a, <4 x i32> %b) {
64 ; CHECK-LABEL: test_vdivesw:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: vdivesw v2, v2, v3
69 %div = tail call <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32> %a, <4 x i32> %b)
73 define <4 x i32> @test_vdiveuw(<4 x i32> %a, <4 x i32> %b) {
74 ; CHECK-LABEL: test_vdiveuw:
75 ; CHECK: # %bb.0: # %entry
76 ; CHECK-NEXT: vdiveuw v2, v2, v3
79 %div = tail call <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32> %a, <4 x i32> %b)
83 define <1 x i128> @test_vdivsq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
84 ; CHECK-LABEL: test_vdivsq:
86 ; CHECK-NEXT: vdivsq v2, v2, v3
88 %tmp = sdiv <1 x i128> %x, %y
92 define <1 x i128> @test_vdivuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
93 ; CHECK-LABEL: test_vdivuq:
95 ; CHECK-NEXT: vdivuq v2, v2, v3
97 %tmp = udiv <1 x i128> %x, %y
101 define <2 x i64> @test_vdivesd(<2 x i64> %a, <2 x i64> %b) {
102 ; CHECK-LABEL: test_vdivesd:
103 ; CHECK: # %bb.0: # %entry
104 ; CHECK-NEXT: vdivesd v2, v2, v3
107 %div = tail call <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64> %a, <2 x i64> %b)
111 define <2 x i64> @test_vdiveud(<2 x i64> %a, <2 x i64> %b) {
112 ; CHECK-LABEL: test_vdiveud:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vdiveud v2, v2, v3
117 %div = tail call <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64> %a, <2 x i64> %b)
121 declare <1 x i128> @llvm.ppc.altivec.vdivesq(<1 x i128>, <1 x i128>) nounwind readnone
122 declare <1 x i128> @llvm.ppc.altivec.vdiveuq(<1 x i128>, <1 x i128>) nounwind readnone
124 define <1 x i128> @test_vdivesq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
125 ; CHECK-LABEL: test_vdivesq:
127 ; CHECK-NEXT: vdivesq v2, v2, v3
129 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vdivesq(<1 x i128> %x, <1 x i128> %y)
134 define <1 x i128> @test_vdiveuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
135 ; CHECK-LABEL: test_vdiveuq:
137 ; CHECK-NEXT: vdiveuq v2, v2, v3
139 %tmp = call <1 x i128> @llvm.ppc.altivec.vdiveuq(<1 x i128> %x, <1 x i128> %y)