1 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
2 ; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 | %ptxas-verify %}
4 ; Even though general vector types are not supported in PTX, we can still
5 ; optimize loads/stores with pseudo-vector instructions of the form:
7 ; ld.v2.f32 {%f0, %f1}, [%r0]
9 ; which will load two floats at once into scalar registers.
12 define void @foo(ptr %a) {
13 ; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}
14 %t1 = load <2 x float>, ptr %a
15 %t2 = fmul <2 x float> %t1, %t1
16 store <2 x float> %t2, ptr %a
21 define void @foo2(ptr %a) {
22 ; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
23 %t1 = load <4 x float>, ptr %a
24 %t2 = fmul <4 x float> %t1, %t1
25 store <4 x float> %t2, ptr %a
30 define void @foo3(ptr %a) {
31 ; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
32 ; CHECK-NEXT: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
33 %t1 = load <8 x float>, ptr %a
34 %t2 = fmul <8 x float> %t1, %t1
35 store <8 x float> %t2, ptr %a
42 define void @foo4(ptr %a) {
43 ; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}
44 %t1 = load <2 x i32>, ptr %a
45 %t2 = mul <2 x i32> %t1, %t1
46 store <2 x i32> %t2, ptr %a
51 define void @foo5(ptr %a) {
52 ; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
53 %t1 = load <4 x i32>, ptr %a
54 %t2 = mul <4 x i32> %t1, %t1
55 store <4 x i32> %t2, ptr %a
60 define void @foo6(ptr %a) {
61 ; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
62 ; CHECK-NEXT: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
63 %t1 = load <8 x i32>, ptr %a
64 %t2 = mul <8 x i32> %t1, %t1
65 store <8 x i32> %t2, ptr %a
69 ; The following test wasn't passing previously as the address
70 ; computation was still too complex when LSV was called.
71 declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() #0
72 declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() #0
73 ; CHECK-LABEL: foo_complex
74 define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(134217728) %alloc0) {
75 %t0 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x(), !range !1
76 %t1 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
78 %t3 = shl nuw nsw i32 %t1, 9
79 %ttile_origin.2 = and i32 %t3, 130560
80 %tstart_offset_x_mul = shl nuw nsw i32 %t0, 1
81 %t4 = or disjoint i32 %ttile_origin.2, %tstart_offset_x_mul
82 %t6 = or disjoint i32 %t4, 1
83 %t8 = or disjoint i32 %t4, 128
84 %t9 = zext i32 %t8 to i64
85 %t10 = or disjoint i32 %t4, 129
86 %t11 = zext i32 %t10 to i64
87 %t20 = zext i32 %t2 to i64
88 %t27 = getelementptr inbounds [1024 x [131072 x i8]], ptr %alloc0, i64 0, i64 %t20, i64 %t9
90 %t28 = load i8, ptr %t27, align 2
91 %t31 = getelementptr inbounds [1024 x [131072 x i8]], ptr %alloc0, i64 0, i64 %t20, i64 %t11
92 %t32 = load i8, ptr %t31, align 1
93 %t33 = icmp ult i8 %t28, %t32
94 %t34 = select i1 %t33, i8 %t32, i8 %t28
95 store i8 %t34, ptr %t31
100 ; CHECK-LABEL: extv8f16_global_a16(
101 define void @extv8f16_global_a16(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 {
102 ; CHECK: ld.global.v4.b32 {%r
103 %v = load <8 x half>, ptr addrspace(1) %src, align 16
104 ; CHECK: mov.b32 {%rs
105 ; CHECK: mov.b32 {%rs
106 ; CHECK: mov.b32 {%rs
107 ; CHECK: mov.b32 {%rs
108 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
109 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
110 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
111 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
112 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
113 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
114 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
115 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
116 %ext = fpext <8 x half> %v to <8 x float>
117 ; CHECK: st.global.v4.f32
118 ; CHECK: st.global.v4.f32
119 store <8 x float> %ext, ptr addrspace(1) %dst, align 16
123 ; CHECK-LABEL: extv8f16_global_a4(
124 define void @extv8f16_global_a4(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 {
125 ; CHECK: ld.global.b32 %r
126 ; CHECK: ld.global.b32 %r
127 ; CHECK: ld.global.b32 %r
128 ; CHECK: ld.global.b32 %r
129 %v = load <8 x half>, ptr addrspace(1) %src, align 4
130 ; CHECK: mov.b32 {%rs
131 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
132 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
133 ; CHECK: mov.b32 {%rs
134 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
135 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
136 ; CHECK: mov.b32 {%rs
137 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
138 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
139 ; CHECK: mov.b32 {%rs
140 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
141 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
142 %ext = fpext <8 x half> %v to <8 x float>
143 ; CHECK: st.global.v4.f32
144 ; CHECK: st.global.v4.f32
145 store <8 x float> %ext, ptr addrspace(1) %dst, align 16
150 ; CHECK-LABEL: extv8f16_generic_a16(
151 define void @extv8f16_generic_a16(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 {
152 ; CHECK: ld.v4.b32 {%r
153 %v = load <8 x half>, ptr %src, align 16
154 ; CHECK: mov.b32 {%rs
155 ; CHECK: mov.b32 {%rs
156 ; CHECK: mov.b32 {%rs
157 ; CHECK: mov.b32 {%rs
158 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
159 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
160 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
161 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
162 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
163 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
164 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
165 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
166 %ext = fpext <8 x half> %v to <8 x float>
169 store <8 x float> %ext, ptr %dst, align 16
173 ; CHECK-LABEL: extv8f16_generic_a4(
174 define void @extv8f16_generic_a4(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 {
179 %v = load <8 x half>, ptr %src, align 4
180 ; CHECK: mov.b32 {%rs
181 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
182 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
183 ; CHECK: mov.b32 {%rs
184 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
185 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
186 ; CHECK: mov.b32 {%rs
187 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
188 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
189 ; CHECK: mov.b32 {%rs
190 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
191 ; CHECK: cvt.f32.f16 %f{{.*}}, %rs
192 %ext = fpext <8 x half> %v to <8 x float>
195 store <8 x float> %ext, ptr %dst, align 16
200 !1 = !{i32 0, i32 64}