1 ; Verify that we can create unaligned loads and stores from VSX intrinsics.
3 ; RUN: opt < %s -instcombine -S | FileCheck %s
5 target triple = "powerpc64-unknown-linux-gnu"
7 @vf = common global <4 x float> zeroinitializer, align 1
8 @res_vf = common global <4 x float> zeroinitializer, align 1
9 @vd = common global <2 x double> zeroinitializer, align 1
10 @res_vd = common global <2 x double> zeroinitializer, align 1
12 define void @test1() {
14 %t1 = alloca <4 x float>*, align 8
15 %t2 = alloca <2 x double>*, align 8
16 store <4 x float>* @vf, <4 x float>** %t1, align 8
17 %0 = load <4 x float>*, <4 x float>** %t1, align 8
18 %1 = bitcast <4 x float>* %0 to i8*
19 %2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %1)
20 store <4 x float>* @res_vf, <4 x float>** %t1, align 8
21 %3 = load <4 x float>*, <4 x float>** %t1, align 8
22 %4 = bitcast <4 x float>* %3 to i8*
23 call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %2, i8* %4)
24 store <2 x double>* @vd, <2 x double>** %t2, align 8
25 %5 = load <2 x double>*, <2 x double>** %t2, align 8
26 %6 = bitcast <2 x double>* %5 to i8*
27 %7 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %6)
28 store <2 x double>* @res_vd, <2 x double>** %t2, align 8
29 %8 = load <2 x double>*, <2 x double>** %t2, align 8
30 %9 = bitcast <2 x double>* %8 to i8*
31 call void @llvm.ppc.vsx.stxvd2x(<2 x double> %7, i8* %9)
36 ; CHECK: %0 = load <4 x i32>, <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 1
37 ; CHECK: store <4 x i32> %0, <4 x i32>* bitcast (<4 x float>* @res_vf to <4 x i32>*), align 1
38 ; CHECK: %1 = load <2 x double>, <2 x double>* @vd, align 1
39 ; CHECK: store <2 x double> %1, <2 x double>* @res_vd, align 1
41 declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
42 declare void @llvm.ppc.vsx.stxvw4x(<4 x i32>, i8*)
43 declare <2 x double> @llvm.ppc.vsx.lxvd2x(i8*)
44 declare void @llvm.ppc.vsx.stxvd2x(<2 x double>, i8*)