1 ; Verify that we can create unaligned loads and stores from VSX intrinsics.
3 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
5 target triple = "powerpc64-unknown-linux-gnu"
7 @vf = common global <4 x float> zeroinitializer, align 1
8 @res_vf = common global <4 x float> zeroinitializer, align 1
9 @vd = common global <2 x double> zeroinitializer, align 1
10 @res_vd = common global <2 x double> zeroinitializer, align 1
12 define void @test1() {
14 %t1 = alloca ptr, align 8
15 %t2 = alloca ptr, align 8
16 store ptr @vf, ptr %t1, align 8
17 %0 = load ptr, ptr %t1, align 8
18 %1 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr %0)
19 store ptr @res_vf, ptr %t1, align 8
20 %2 = load ptr, ptr %t1, align 8
21 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %1, ptr %2)
22 store ptr @vd, ptr %t2, align 8
23 %3 = load ptr, ptr %t2, align 8
24 %4 = call <2 x double> @llvm.ppc.vsx.lxvd2x(ptr %3)
25 store ptr @res_vd, ptr %t2, align 8
26 %5 = load ptr, ptr %t2, align 8
27 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %4, ptr %5)
32 ; CHECK: %0 = load <4 x i32>, ptr @vf, align 1
33 ; CHECK: store <4 x i32> %0, ptr @res_vf, align 1
34 ; CHECK: %1 = load <2 x double>, ptr @vd, align 1
35 ; CHECK: store <2 x double> %1, ptr @res_vd, align 1
37 declare <4 x i32> @llvm.ppc.vsx.lxvw4x(ptr)
38 declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, ptr)
39 declare <2 x double> @llvm.ppc.vsx.lxvd2x(ptr)
40 declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, ptr)