1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
2 ; RUN: opt < %s -passes=infer-alignment -S | FileCheck %s
4 target datalayout = "p1:64:64:64:32"
6 ; ------------------------------------------------------------------------------
8 ; ------------------------------------------------------------------------------
10 define void @load(ptr align 1 %ptr) {
11 ; CHECK-LABEL: define void @load
12 ; CHECK-SAME: (ptr align 1 [[PTR:%.*]]) {
13 ; CHECK-NEXT: [[ALIGNED_0:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -2)
14 ; CHECK-NEXT: [[ALIGNED_1:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -4)
15 ; CHECK-NEXT: [[ALIGNED_2:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8)
16 ; CHECK-NEXT: [[LOAD_0:%.*]] = load <16 x i8>, ptr [[ALIGNED_0]], align 2
17 ; CHECK-NEXT: [[LOAD_1:%.*]] = load <16 x i8>, ptr [[ALIGNED_1]], align 4
18 ; CHECK-NEXT: [[LOAD_2:%.*]] = load <16 x i8>, ptr [[ALIGNED_2]], align 8
19 ; CHECK-NEXT: ret void
21 %aligned.0 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -2)
22 %aligned.1 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -4)
23 %aligned.2 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8)
25 %load.0 = load <16 x i8>, ptr %aligned.0, align 1
26 %load.1 = load <16 x i8>, ptr %aligned.1, align 1
27 %load.2 = load <16 x i8>, ptr %aligned.2, align 1
32 ; ------------------------------------------------------------------------------
34 ; ------------------------------------------------------------------------------
36 define void @store(ptr align 1 %ptr) {
37 ; CHECK-LABEL: define void @store
38 ; CHECK-SAME: (ptr align 1 [[PTR:%.*]]) {
39 ; CHECK-NEXT: [[ALIGNED_0:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -2)
40 ; CHECK-NEXT: [[ALIGNED_1:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -4)
41 ; CHECK-NEXT: [[ALIGNED_2:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8)
42 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED_0]], align 2
43 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED_1]], align 4
44 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED_2]], align 8
45 ; CHECK-NEXT: ret void
47 %aligned.0 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -2)
48 %aligned.1 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -4)
49 %aligned.2 = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8)
51 store <16 x i8> zeroinitializer, ptr %aligned.0, align 1
52 store <16 x i8> zeroinitializer, ptr %aligned.1, align 1
53 store <16 x i8> zeroinitializer, ptr %aligned.2, align 1
58 ; ------------------------------------------------------------------------------
60 ; ------------------------------------------------------------------------------
62 ; Underlying alignment greater than alignment forced by ptrmask
63 define void @ptrmask_overaligned(ptr align 16 %ptr) {
64 ; CHECK-LABEL: define void @ptrmask_overaligned
65 ; CHECK-SAME: (ptr align 16 [[PTR:%.*]]) {
66 ; CHECK-NEXT: [[ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR]], i64 -8)
67 ; CHECK-NEXT: [[LOAD:%.*]] = load <16 x i8>, ptr [[ALIGNED]], align 16
68 ; CHECK-NEXT: store <16 x i8> zeroinitializer, ptr [[ALIGNED]], align 16
69 ; CHECK-NEXT: ret void
71 %aligned = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 -8)
73 %load = load <16 x i8>, ptr %aligned, align 1
74 store <16 x i8> zeroinitializer, ptr %aligned, align 1
79 define i8 @smaller_index_type(ptr addrspace(1) %ptr) {
80 ; CHECK-LABEL: define i8 @smaller_index_type
81 ; CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) {
82 ; CHECK-NEXT: [[PTR2:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) [[PTR]], i32 -4)
83 ; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(1) [[PTR2]], align 4
84 ; CHECK-NEXT: ret i8 [[LOAD]]
86 %ptr2 = call ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1) %ptr, i32 -4)
87 %load = load i8, ptr addrspace(1) %ptr2, align 1
91 declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
92 declare ptr addrspace(1) @llvm.ptrmask.p1.i32(ptr addrspace(1), i32)