1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
3 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
5 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \
6 ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \
9 ; This test case aims to test the vector divide instructions on Power10.
10 ; This includes the low order and extended versions of vector divide,
11 ; that operate on signed and unsigned words and doublewords.
12 ; This also includes 128 bit vector divide instructions.
14 define <2 x i64> @test_vdivud(<2 x i64> %a, <2 x i64> %b) {
15 ; CHECK-LABEL: test_vdivud:
16 ; CHECK: # %bb.0: # %entry
17 ; CHECK-NEXT: vdivud v2, v2, v3
20 %div = udiv <2 x i64> %a, %b
24 define <2 x i64> @test_vdivsd(<2 x i64> %a, <2 x i64> %b) {
25 ; CHECK-LABEL: test_vdivsd:
26 ; CHECK: # %bb.0: # %entry
27 ; CHECK-NEXT: vdivsd v2, v2, v3
30 %div = sdiv <2 x i64> %a, %b
34 define <4 x i32> @test_vdivuw(<4 x i32> %a, <4 x i32> %b) {
35 ; CHECK-LABEL: test_vdivuw:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: vdivuw v2, v2, v3
40 %div = udiv <4 x i32> %a, %b
44 define <4 x i32> @test_vdivsw(<4 x i32> %a, <4 x i32> %b) {
45 ; CHECK-LABEL: test_vdivsw:
46 ; CHECK: # %bb.0: # %entry
47 ; CHECK-NEXT: vdivsw v2, v2, v3
50 %div = sdiv <4 x i32> %a, %b
54 ; Test the vector divide extended intrinsics.
55 declare <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32>, <4 x i32>)
56 declare <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32>, <4 x i32>)
57 declare <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64>, <2 x i64>)
58 declare <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64>, <2 x i64>)
60 define <4 x i32> @test_vdivesw(<4 x i32> %a, <4 x i32> %b) {
61 ; CHECK-LABEL: test_vdivesw:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vdivesw v2, v2, v3
66 %div = tail call <4 x i32> @llvm.ppc.altivec.vdivesw(<4 x i32> %a, <4 x i32> %b)
70 define <4 x i32> @test_vdiveuw(<4 x i32> %a, <4 x i32> %b) {
71 ; CHECK-LABEL: test_vdiveuw:
72 ; CHECK: # %bb.0: # %entry
73 ; CHECK-NEXT: vdiveuw v2, v2, v3
76 %div = tail call <4 x i32> @llvm.ppc.altivec.vdiveuw(<4 x i32> %a, <4 x i32> %b)
80 define <1 x i128> @test_vdivsq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
81 ; CHECK-LABEL: test_vdivsq:
83 ; CHECK-NEXT: vdivsq v2, v2, v3
85 %tmp = sdiv <1 x i128> %x, %y
89 define <1 x i128> @test_vdivuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
90 ; CHECK-LABEL: test_vdivuq:
92 ; CHECK-NEXT: vdivuq v2, v2, v3
94 %tmp = udiv <1 x i128> %x, %y
98 define <2 x i64> @test_vdivesd(<2 x i64> %a, <2 x i64> %b) {
99 ; CHECK-LABEL: test_vdivesd:
100 ; CHECK: # %bb.0: # %entry
101 ; CHECK-NEXT: vdivesd v2, v2, v3
104 %div = tail call <2 x i64> @llvm.ppc.altivec.vdivesd(<2 x i64> %a, <2 x i64> %b)
108 define <2 x i64> @test_vdiveud(<2 x i64> %a, <2 x i64> %b) {
109 ; CHECK-LABEL: test_vdiveud:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vdiveud v2, v2, v3
114 %div = tail call <2 x i64> @llvm.ppc.altivec.vdiveud(<2 x i64> %a, <2 x i64> %b)
118 declare <1 x i128> @llvm.ppc.altivec.vdivesq(<1 x i128>, <1 x i128>) nounwind readnone
119 declare <1 x i128> @llvm.ppc.altivec.vdiveuq(<1 x i128>, <1 x i128>) nounwind readnone
121 define <1 x i128> @test_vdivesq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
122 ; CHECK-LABEL: test_vdivesq:
124 ; CHECK-NEXT: vdivesq v2, v2, v3
126 %tmp = tail call <1 x i128> @llvm.ppc.altivec.vdivesq(<1 x i128> %x, <1 x i128> %y)
131 define <1 x i128> @test_vdiveuq(<1 x i128> %x, <1 x i128> %y) nounwind readnone {
132 ; CHECK-LABEL: test_vdiveuq:
134 ; CHECK-NEXT: vdiveuq v2, v2, v3
136 %tmp = call <1 x i128> @llvm.ppc.altivec.vdiveuq(<1 x i128> %x, <1 x i128> %y)