1 ; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr8 \
2 ; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
4 ; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr8 -disable-ppc-vsx-swap-removal \
5 ; RUN: -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck \
6 ; RUN: -check-prefix=NOOPTSWAP %s
8 ; RUN: llc -O3 -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \
9 ; RUN: -verify-machineinstrs -ppc-vsr-nums-as-vr < %s | FileCheck \
10 ; RUN: -check-prefix=CHECK-P9 --implicit-check-not xxswapd %s
12 ; RUN: llc -O3 -mcpu=pwr9 -disable-ppc-vsx-swap-removal -mattr=-power9-vector \
13 ; RUN: -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu < %s \
14 ; RUN: | FileCheck -check-prefix=NOOPTSWAP %s
17 ; Updated align attritue from 16 to 8 to keep swap instructions tests.
18 ; Changes have been made on little-endian to use lvx and stvx
19 ; instructions instead of lxvd2x/xxswapd and xxswapd/stxvd2x for
20 ; aligned vectors with elements up to 4 bytes
22 ; This test was generated from the following source:
25 ; int ca[N] __attribute__((aligned(16)));
26 ; int cb[N] __attribute__((aligned(16)));
27 ; int cc[N] __attribute__((aligned(16)));
28 ; int cd[N] __attribute__((aligned(16)));
33 ; for (i = 0; i < N; i++) {
34 ; ca[i] = (cb[i] + cc[i]) * cd[i];
38 @cb = common global [4096 x i32] zeroinitializer, align 8
39 @cc = common global [4096 x i32] zeroinitializer, align 8
40 @cd = common global [4096 x i32] zeroinitializer, align 8
41 @ca = common global [4096 x i32] zeroinitializer, align 8
48 %index = phi i64 [ 0, %entry ], [ %index.next.3, %vector.body ]
49 %0 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index
50 %wide.load = load <4 x i32>, ptr %0, align 8
51 %1 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index
52 %wide.load13 = load <4 x i32>, ptr %1, align 8
53 %2 = add nsw <4 x i32> %wide.load13, %wide.load
54 %3 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index
55 %wide.load14 = load <4 x i32>, ptr %3, align 8
56 %4 = mul nsw <4 x i32> %2, %wide.load14
57 %5 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index
58 store <4 x i32> %4, ptr %5, align 8
59 %index.next = add nuw nsw i64 %index, 4
60 %6 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next
61 %wide.load.1 = load <4 x i32>, ptr %6, align 8
62 %7 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next
63 %wide.load13.1 = load <4 x i32>, ptr %7, align 8
64 %8 = add nsw <4 x i32> %wide.load13.1, %wide.load.1
65 %9 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next
66 %wide.load14.1 = load <4 x i32>, ptr %9, align 8
67 %10 = mul nsw <4 x i32> %8, %wide.load14.1
68 %11 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next
69 store <4 x i32> %10, ptr %11, align 8
70 %index.next.1 = add nuw nsw i64 %index.next, 4
71 %12 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next.1
72 %wide.load.2 = load <4 x i32>, ptr %12, align 8
73 %13 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next.1
74 %wide.load13.2 = load <4 x i32>, ptr %13, align 8
75 %14 = add nsw <4 x i32> %wide.load13.2, %wide.load.2
76 %15 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next.1
77 %wide.load14.2 = load <4 x i32>, ptr %15, align 8
78 %16 = mul nsw <4 x i32> %14, %wide.load14.2
79 %17 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next.1
80 store <4 x i32> %16, ptr %17, align 8
81 %index.next.2 = add nuw nsw i64 %index.next.1, 4
82 %18 = getelementptr inbounds [4096 x i32], ptr @cb, i64 0, i64 %index.next.2
83 %wide.load.3 = load <4 x i32>, ptr %18, align 8
84 %19 = getelementptr inbounds [4096 x i32], ptr @cc, i64 0, i64 %index.next.2
85 %wide.load13.3 = load <4 x i32>, ptr %19, align 8
86 %20 = add nsw <4 x i32> %wide.load13.3, %wide.load.3
87 %21 = getelementptr inbounds [4096 x i32], ptr @cd, i64 0, i64 %index.next.2
88 %wide.load14.3 = load <4 x i32>, ptr %21, align 8
89 %22 = mul nsw <4 x i32> %20, %wide.load14.3
90 %23 = getelementptr inbounds [4096 x i32], ptr @ca, i64 0, i64 %index.next.2
91 store <4 x i32> %22, ptr %23, align 8
92 %index.next.3 = add nuw nsw i64 %index.next.2, 4
93 %24 = icmp eq i64 %index.next.3, 4096
94 br i1 %24, label %for.end, label %vector.body
101 ; CHECK-NOT: xxpermdi
103 ; CHECK-P9-NOT: xxpermdi
133 ; NOOPTSWAP-LABEL: @foo
136 ; NOOPTSWAP-DAG: lxvd2x
137 ; NOOPTSWAP-DAG: lxvd2x
138 ; NOOPTSWAP-DAG: xxswapd
139 ; NOOPTSWAP-DAG: xxswapd
140 ; NOOPTSWAP-DAG: xxswapd
141 ; NOOPTSWAP-DAG: vadduwm
144 ; NOOPTSWAP-DAG: xxswapd
145 ; NOOPTSWAP-DAG: xxswapd
146 ; NOOPTSWAP-DAG: stxvd2x
147 ; NOOPTSWAP-DAG: stxvd2x
150 ; CHECK-P9-LABEL: @foo
163 ; CHECK-P9-DAG: vadduwm
164 ; CHECK-P9-DAG: vadduwm
165 ; CHECK-P9-DAG: vadduwm
166 ; CHECK-P9-DAG: vadduwm
167 ; CHECK-P9-DAG: vmuluwm
168 ; CHECK-P9-DAG: vmuluwm
169 ; CHECK-P9-DAG: vmuluwm
170 ; CHECK-P9-DAG: vmuluwm