1 ; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 <%s | FileCheck %s
3 ; Test optimizations of build_vector for 6-bit immediates.
5 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
6 target triple = "powerpc64-unknown-linux-gnu"
8 %v4i32 = type <4 x i32>
9 %v8i16 = type <8 x i16>
10 %v16i8 = type <16 x i8>
12 define void @test_v4i32_pos_even(%v4i32* %P, %v4i32* %S) {
13 %p = load %v4i32, %v4i32* %P
14 %r = add %v4i32 %p, < i32 18, i32 18, i32 18, i32 18 >
15 store %v4i32 %r, %v4i32* %S
19 ; CHECK-LABEL: test_v4i32_pos_even:
20 ; CHECK: vspltisw [[REG1:[0-9]+]], 9
21 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
23 define void @test_v4i32_neg_even(%v4i32* %P, %v4i32* %S) {
24 %p = load %v4i32, %v4i32* %P
25 %r = add %v4i32 %p, < i32 -28, i32 -28, i32 -28, i32 -28 >
26 store %v4i32 %r, %v4i32* %S
30 ; CHECK-LABEL: test_v4i32_neg_even:
31 ; CHECK: vspltisw [[REG1:[0-9]+]], -14
32 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG1]]
34 define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
35 %p = load %v8i16, %v8i16* %P
36 %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
37 store %v8i16 %r, %v8i16* %S
41 ; CHECK-LABEL: test_v8i16_pos_even:
42 ; CHECK: vspltish [[REG1:[0-9]+]], 15
43 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
45 define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
46 %p = load %v8i16, %v8i16* %P
47 %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
48 store %v8i16 %r, %v8i16* %S
52 ; CHECK-LABEL: test_v8i16_neg_even:
53 ; CHECK: vspltish [[REG1:[0-9]+]], -16
54 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG1]]
56 define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) {
57 %p = load %v16i8, %v16i8* %P
58 %r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16 >
59 store %v16i8 %r, %v16i8* %S
63 ; CHECK-LABEL: test_v16i8_pos_even:
64 ; CHECK: vspltisb [[REG1:[0-9]+]], 8
65 ; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
67 define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) {
68 %p = load %v16i8, %v16i8* %P
69 %r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18 >
70 store %v16i8 %r, %v16i8* %S
74 ; CHECK-LABEL: test_v16i8_neg_even:
75 ; CHECK: vspltisb [[REG1:[0-9]+]], -9
76 ; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG1]]
78 define void @test_v4i32_pos_odd(%v4i32* %P, %v4i32* %S) {
79 %p = load %v4i32, %v4i32* %P
80 %r = add %v4i32 %p, < i32 27, i32 27, i32 27, i32 27 >
81 store %v4i32 %r, %v4i32* %S
85 ; CHECK-LABEL: test_v4i32_pos_odd:
86 ; CHECK: vspltisw [[REG2:[0-9]+]], -16
87 ; CHECK: vspltisw [[REG1:[0-9]+]], 11
88 ; CHECK: vsubuwm {{[0-9]+}}, [[REG1]], [[REG2]]
90 define void @test_v4i32_neg_odd(%v4i32* %P, %v4i32* %S) {
91 %p = load %v4i32, %v4i32* %P
92 %r = add %v4i32 %p, < i32 -27, i32 -27, i32 -27, i32 -27 >
93 store %v4i32 %r, %v4i32* %S
97 ; CHECK-LABEL: test_v4i32_neg_odd:
98 ; CHECK: vspltisw [[REG2:[0-9]+]], -16
99 ; CHECK: vspltisw [[REG1:[0-9]+]], -11
100 ; CHECK: vadduwm {{[0-9]+}}, [[REG1]], [[REG2]]
102 define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
103 %p = load %v8i16, %v8i16* %P
104 %r = add %v8i16 %p, < i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31 >
105 store %v8i16 %r, %v8i16* %S
109 ; CHECK-LABEL: test_v8i16_pos_odd:
110 ; CHECK: vspltish [[REG2:[0-9]+]], -16
111 ; CHECK: vspltish [[REG1:[0-9]+]], 15
112 ; CHECK: vsubuhm {{[0-9]+}}, [[REG1]], [[REG2]]
114 define void @test_v8i16_neg_odd(%v8i16* %P, %v8i16* %S) {
115 %p = load %v8i16, %v8i16* %P
116 %r = add %v8i16 %p, < i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31, i16 -31 >
117 store %v8i16 %r, %v8i16* %S
121 ; CHECK-LABEL: test_v8i16_neg_odd:
122 ; CHECK: vspltish [[REG2:[0-9]+]], -16
123 ; CHECK: vspltish [[REG1:[0-9]+]], -15
124 ; CHECK: vadduhm {{[0-9]+}}, [[REG1]], [[REG2]]
126 define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) {
127 %p = load %v16i8, %v16i8* %P
128 %r = add %v16i8 %p, < i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17 >
129 store %v16i8 %r, %v16i8* %S
133 ; CHECK-LABEL: test_v16i8_pos_odd:
134 ; CHECK: vspltisb [[REG2:[0-9]+]], -16
135 ; CHECK: vspltisb [[REG1:[0-9]+]], 1
136 ; CHECK: vsububm {{[0-9]+}}, [[REG1]], [[REG2]]
138 define void @test_v16i8_neg_odd(%v16i8* %P, %v16i8* %S) {
139 %p = load %v16i8, %v16i8* %P
140 %r = add %v16i8 %p, < i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17, i8 -17 >
141 store %v16i8 %r, %v16i8* %S
145 ; CHECK-LABEL: test_v16i8_neg_odd:
146 ; CHECK: vspltisb [[REG2:[0-9]+]], -16
147 ; CHECK: vspltisb [[REG1:[0-9]+]], -1
148 ; CHECK: vaddubm {{[0-9]+}}, [[REG1]], [[REG2]]