1 ; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
2 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
3 ; RUN: opt -mtriple=nvptx-- < %s -S -passes=infer-address-spaces | FileCheck %s --check-prefix IR
4 ; RUN: opt -mtriple=nvptx64-- < %s -S -passes=infer-address-spaces | FileCheck %s --check-prefix IR
5 ; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -mcpu=sm_20 | %ptxas-verify %}
6 ; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
8 @array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
9 @scalar = internal addrspace(3) global float 0.000000e+00, align 4
11 ; Verifies nvptx-favor-non-generic correctly optimizes generic address space
12 ; usage to non-generic address space usage for the patterns we claim to handle:
17 ; gep and cast can be an instruction or a constant expression. This function
18 ; tries all possible combinations.
19 define void @ld_st_shared_f32(i32 %i, float %v) {
20 ; IR-LABEL: @ld_st_shared_f32
21 ; IR-NOT: addrspacecast
22 ; PTX-LABEL: ld_st_shared_f32(
24 %1 = load float, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), align 4
25 call void @use(float %1)
26 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
28 store float %v, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), align 4
29 ; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
30 ; use syncthreads to disable optimizations across components
31 call void @llvm.nvvm.barrier0()
35 %2 = addrspacecast ptr addrspace(3) @scalar to ptr
36 %3 = load float, ptr %2, align 4
37 call void @use(float %3)
38 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
40 store float %v, ptr %2, align 4
41 ; PTX: st.shared.f32 [scalar], %f{{[0-9]+}};
42 call void @llvm.nvvm.barrier0()
46 %4 = load float, ptr getelementptr inbounds ([10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i32 0, i32 5), align 4
47 call void @use(float %4)
48 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
50 store float %v, ptr getelementptr inbounds ([10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i32 0, i32 5), align 4
51 ; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
52 call void @llvm.nvvm.barrier0()
56 %5 = getelementptr inbounds [10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i32 0, i32 5
57 %6 = load float, ptr %5, align 4
58 call void @use(float %6)
59 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
61 store float %v, ptr %5, align 4
62 ; PTX: st.shared.f32 [array+20], %f{{[0-9]+}};
63 call void @llvm.nvvm.barrier0()
67 %7 = addrspacecast ptr addrspace(3) @array to ptr
68 %8 = getelementptr inbounds [10 x float], ptr %7, i32 0, i32 %i
69 %9 = load float, ptr %8, align 4
70 call void @use(float %9)
71 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}];
73 store float %v, ptr %8, align 4
74 ; PTX: st.shared.f32 [%{{(r|rl|rd)[0-9]+}}], %f{{[0-9]+}};
75 call void @llvm.nvvm.barrier0()
81 ; When hoisting an addrspacecast between different pointer types, replace the
82 ; addrspacecast with a bitcast.
83 define i32 @ld_int_from_float() {
84 ; IR-LABEL: @ld_int_from_float
85 ; IR: load i32, ptr addrspace(3) @scalar
86 ; PTX-LABEL: ld_int_from_float(
87 ; PTX: ld.shared.u{{(32|64)}}
88 %1 = load i32, ptr addrspacecast(ptr addrspace(3) @scalar to ptr), align 4
92 define i32 @ld_int_from_global_float(ptr addrspace(1) %input, i32 %i, i32 %j) {
93 ; IR-LABEL: @ld_int_from_global_float(
94 ; PTX-LABEL: ld_int_from_global_float(
95 %1 = addrspacecast ptr addrspace(1) %input to ptr
96 %2 = getelementptr float, ptr %1, i32 %i
97 ; IR-NEXT: getelementptr float, ptr addrspace(1) %input, i32 %i
98 %3 = getelementptr float, ptr %2, i32 %j
99 ; IR-NEXT: getelementptr float, ptr addrspace(1) {{%[^,]+}}, i32 %j
100 %4 = load i32, ptr %3
101 ; IR-NEXT: load i32, ptr addrspace(1) {{%.+}}
102 ; PTX-LABEL: ld.global
106 define void @nested_const_expr() {
107 ; PTX-LABEL: nested_const_expr(
108 ; store 1 to bitcast(gep(addrspacecast(array), 0, 1))
109 store i32 1, ptr getelementptr ([10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i64 0, i64 1), align 4
110 ; PTX: mov.u32 %r1, 1;
111 ; PTX-NEXT: st.shared.u32 [array+4], %r1;
115 define void @rauw(ptr addrspace(1) %input) {
116 %generic_input = addrspacecast ptr addrspace(1) %input to ptr
117 %addr = getelementptr float, ptr %generic_input, i64 10
118 %v = load float, ptr %addr
119 store float %v, ptr %addr
122 ; IR-NEXT: %addr = getelementptr float, ptr addrspace(1) %input, i64 10
123 ; IR-NEXT: %v = load float, ptr addrspace(1) %addr
124 ; IR-NEXT: store float %v, ptr addrspace(1) %addr
128 define void @loop() {
131 %p = addrspacecast ptr addrspace(3) @array to ptr
132 %end = getelementptr float, ptr %p, i64 10
136 %i = phi ptr [ %p, %entry ], [ %i2, %loop ]
137 ; IR: phi ptr addrspace(3) [ @array, %entry ], [ %i2, %loop ]
138 %v = load float, ptr %i
139 ; IR: %v = load float, ptr addrspace(3) %i
140 call void @use(float %v)
141 %i2 = getelementptr float, ptr %i, i64 1
142 ; IR: %i2 = getelementptr float, ptr addrspace(3) %i, i64 1
143 %exit_cond = icmp eq ptr %i2, %end
144 br i1 %exit_cond, label %exit, label %loop
150 @generic_end = external global ptr
152 define void @loop_with_generic_bound() {
153 ; IR-LABEL: @loop_with_generic_bound(
155 %p = addrspacecast ptr addrspace(3) @array to ptr
156 %end = load ptr, ptr @generic_end
160 %i = phi ptr [ %p, %entry ], [ %i2, %loop ]
161 ; IR: phi ptr addrspace(3) [ @array, %entry ], [ %i2, %loop ]
162 %v = load float, ptr %i
163 ; IR: %v = load float, ptr addrspace(3) %i
164 call void @use(float %v)
165 %i2 = getelementptr float, ptr %i, i64 1
166 ; IR: %i2 = getelementptr float, ptr addrspace(3) %i, i64 1
167 %exit_cond = icmp eq ptr %i2, %end
168 ; IR: addrspacecast ptr addrspace(3) %i2 to ptr
169 ; IR: icmp eq ptr %{{[0-9]+}}, %end
170 br i1 %exit_cond, label %exit, label %loop
176 declare void @llvm.nvvm.barrier0() #3
178 declare void @use(float)
180 attributes #3 = { noduplicate nounwind }