1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2 ; RUN: opt < %s -passes=separate-const-offset-from-gep,slsr,gvn -S | FileCheck %s
3 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s --check-prefix=PTX
5 target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
6 target triple = "nvptx64-unknown-unknown"
30 define void @slsr_after_reassociate_geps(ptr %arr, i32 %i) {
31 ; CHECK-LABEL: define void @slsr_after_reassociate_geps(
32 ; CHECK-SAME: ptr [[ARR:%.*]], i32 [[I:%.*]]) {
33 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I]] to i64
34 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[ARR]], i64 [[TMP1]]
35 ; CHECK-NEXT: [[P12:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 20
36 ; CHECK-NEXT: [[V1:%.*]] = load float, ptr [[P12]], align 4
37 ; CHECK-NEXT: call void @foo(float [[V1]])
38 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP1]], 2
39 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP3]]
40 ; CHECK-NEXT: [[P24:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 20
41 ; CHECK-NEXT: [[V2:%.*]] = load float, ptr [[P24]], align 4
42 ; CHECK-NEXT: call void @foo(float [[V2]])
43 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]]
44 ; CHECK-NEXT: [[P36:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i64 20
45 ; CHECK-NEXT: [[V3:%.*]] = load float, ptr [[P36]], align 4
46 ; CHECK-NEXT: call void @foo(float [[V3]])
47 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i64 [[TMP3]]
48 ; CHECK-NEXT: [[P48:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 20
49 ; CHECK-NEXT: [[V4:%.*]] = load float, ptr [[P48]], align 4
50 ; CHECK-NEXT: call void @foo(float [[V4]])
51 ; CHECK-NEXT: ret void
53 ; PTX-LABEL: .visible .func slsr_after_reassociate_geps(
54 ; PTX: ld.param.u64 [[arr:%rd[0-9]+]], [slsr_after_reassociate_geps_param_0];
55 ; PTX: ld.param.u32 [[i:%r[0-9]+]], [slsr_after_reassociate_geps_param_1];
56 %i2 = shl nsw i32 %i, 1
57 %i3 = mul nsw i32 %i, 3
58 %i4 = shl nsw i32 %i, 2
60 %j1 = add nsw i32 %i, 5
61 %p1 = getelementptr inbounds float, ptr %arr, i32 %j1
62 ; PTX: mul.wide.s32 [[i4:%rd[0-9]+]], [[i]], 4;
63 ; PTX: add.s64 [[base1:%rd[0-9]+]], [[arr]], [[i4]];
64 %v1 = load float, ptr %p1, align 4
65 ; PTX: ld.f32 {{%f[0-9]+}}, [[[base1]]+20];
66 call void @foo(float %v1)
68 %j2 = add nsw i32 %i2, 5
69 %p2 = getelementptr inbounds float, ptr %arr, i32 %j2
70 ; PTX: add.s64 [[base2:%rd[0-9]+]], [[base1]], [[i4]];
71 %v2 = load float, ptr %p2, align 4
72 ; PTX: ld.f32 {{%f[0-9]+}}, [[[base2]]+20];
73 call void @foo(float %v2)
75 %j3 = add nsw i32 %i3, 5
76 %p3 = getelementptr inbounds float, ptr %arr, i32 %j3
77 ; PTX: add.s64 [[base3:%rd[0-9]+]], [[base2]], [[i4]];
78 %v3 = load float, ptr %p3, align 4
79 ; PTX: ld.f32 {{%f[0-9]+}}, [[[base3]]+20];
80 call void @foo(float %v3)
82 %j4 = add nsw i32 %i4, 5
83 %p4 = getelementptr inbounds float, ptr %arr, i32 %j4
84 ; PTX: add.s64 [[base4:%rd[0-9]+]], [[base3]], [[i4]];
85 %v4 = load float, ptr %p4, align 4
86 ; PTX: ld.f32 {{%f[0-9]+}}, [[[base4]]+20];
87 call void @foo(float %v4)
92 declare void @foo(float)