1 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -show-mc-encoding -verify-machineinstrs < %s | FileCheck %s
3 declare i32 @llvm.amdgcn.workitem.id.x() readnone
5 ;;;==========================================================================;;;
7 ;;;==========================================================================;;;
9 ; MUBUF load with an immediate byte offset that fits into 12-bits
10 ; CHECK-LABEL: {{^}}mubuf_load0:
11 ; CHECK: buffer_load_dword v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x30,0xe0
12 define amdgpu_kernel void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
14 %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1
15 %1 = load i32, i32 addrspace(1)* %0
16 store i32 %1, i32 addrspace(1)* %out
20 ; MUBUF load with the largest possible immediate offset
21 ; CHECK-LABEL: {{^}}mubuf_load1:
22 ; CHECK: buffer_load_ubyte v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe0
23 define amdgpu_kernel void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
25 %0 = getelementptr i8, i8 addrspace(1)* %in, i64 4095
26 %1 = load i8, i8 addrspace(1)* %0
27 store i8 %1, i8 addrspace(1)* %out
31 ; MUBUF load with an immediate byte offset that doesn't fit into 12-bits
32 ; CHECK-LABEL: {{^}}mubuf_load2:
33 ; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000
34 ; CHECK: buffer_load_dword v{{[0-9]}}, off, s[{{[0-9]+:[0-9]+}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x30,0xe0
35 define amdgpu_kernel void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
37 %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1024
38 %1 = load i32, i32 addrspace(1)* %0
39 store i32 %1, i32 addrspace(1)* %out
43 ; MUBUF load with a 12-bit immediate offset and a register offset
44 ; CHECK-LABEL: {{^}}mubuf_load3:
46 ; CHECK: buffer_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x30,0xe0
47 define amdgpu_kernel void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
49 %0 = getelementptr i32, i32 addrspace(1)* %in, i64 %offset
50 %1 = getelementptr i32, i32 addrspace(1)* %0, i64 1
51 %2 = load i32, i32 addrspace(1)* %1
52 store i32 %2, i32 addrspace(1)* %out
56 ; CHECK-LABEL: {{^}}soffset_max_imm:
57 ; CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 64 offen glc
58 define amdgpu_gs void @soffset_max_imm([6 x <4 x i32>] addrspace(4)* inreg, [17 x <4 x i32>] addrspace(4)* inreg, [16 x <4 x i32>] addrspace(4)* inreg, [32 x <8 x i32>] addrspace(4)* inreg, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
60 %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(4)* %0, i32 0, i32 0
61 %tmp1 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp0
63 %tmp3 = call i32 @llvm.amdgcn.raw.buffer.load.dword.i32(<4 x i32> %tmp1, i32 %tmp2, i32 64, i32 1)
64 %tmp4 = add i32 %6, 16
65 %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32>
66 call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 0, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i1 1, i1 1)
70 ; Make sure immediates that aren't inline constants don't get folded into
71 ; the soffset operand.
72 ; FIXME: for this test we should be smart enough to shift the immediate into
74 ; CHECK-LABEL: {{^}}soffset_no_fold:
75 ; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x41
76 ; CHECK: buffer_load_dword v{{[0-9+]}}, v{{[0-9+]}}, s[{{[0-9]+}}:{{[0-9]+}}], [[SOFFSET]] offen glc
77 define amdgpu_gs void @soffset_no_fold([6 x <4 x i32>] addrspace(4)* inreg, [17 x <4 x i32>] addrspace(4)* inreg, [16 x <4 x i32>] addrspace(4)* inreg, [32 x <8 x i32>] addrspace(4)* inreg, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
79 %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(4)* %0, i32 0, i32 0
80 %tmp1 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp0
82 %tmp3 = call i32 @llvm.amdgcn.raw.buffer.load.dword.i32(<4 x i32> %tmp1, i32 %tmp2, i32 65, i32 1)
83 %tmp4 = add i32 %6, 16
84 %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32>
85 call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 0, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i1 1, i1 1)
89 ;;;==========================================================================;;;
91 ;;;==========================================================================;;;
93 ; MUBUF store with an immediate byte offset that fits into 12-bits
94 ; CHECK-LABEL: {{^}}mubuf_store0:
95 ; CHECK: buffer_store_dword v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], 0 offset:4 ; encoding: [0x04,0x00,0x70,0xe0
96 define amdgpu_kernel void @mubuf_store0(i32 addrspace(1)* %out) {
98 %0 = getelementptr i32, i32 addrspace(1)* %out, i64 1
99 store i32 0, i32 addrspace(1)* %0
103 ; MUBUF store with the largest possible immediate offset
104 ; CHECK-LABEL: {{^}}mubuf_store1:
105 ; CHECK: buffer_store_byte v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x60,0xe0
107 define amdgpu_kernel void @mubuf_store1(i8 addrspace(1)* %out) {
109 %0 = getelementptr i8, i8 addrspace(1)* %out, i64 4095
110 store i8 0, i8 addrspace(1)* %0
114 ; MUBUF store with an immediate byte offset that doesn't fit into 12-bits
115 ; CHECK-LABEL: {{^}}mubuf_store2:
116 ; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x1000
117 ; CHECK: buffer_store_dword v{{[0-9]}}, off, s[{{[0-9]:[0-9]}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x70,0xe0
118 define amdgpu_kernel void @mubuf_store2(i32 addrspace(1)* %out) {
120 %0 = getelementptr i32, i32 addrspace(1)* %out, i64 1024
121 store i32 0, i32 addrspace(1)* %0
125 ; MUBUF store with a 12-bit immediate offset and a register offset
126 ; CHECK-LABEL: {{^}}mubuf_store3:
128 ; CHECK: buffer_store_dword v{{[0-9]}}, v[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x70,0xe0
129 define amdgpu_kernel void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
131 %0 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset
132 %1 = getelementptr i32, i32 addrspace(1)* %0, i64 1
133 store i32 0, i32 addrspace(1)* %1
137 ; CHECK-LABEL: {{^}}store_sgpr_ptr:
138 ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0
139 define amdgpu_kernel void @store_sgpr_ptr(i32 addrspace(1)* %out) #0 {
140 store i32 99, i32 addrspace(1)* %out, align 4
144 ; CHECK-LABEL: {{^}}store_sgpr_ptr_offset:
145 ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:40
146 define amdgpu_kernel void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
147 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 10
148 store i32 99, i32 addrspace(1)* %out.gep, align 4
152 ; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset:
153 ; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
154 ; CHECK: buffer_store_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
155 define amdgpu_kernel void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
156 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768
157 store i32 99, i32 addrspace(1)* %out.gep, align 4
161 ; CHECK-LABEL: {{^}}store_sgpr_ptr_large_offset_atomic:
162 ; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
163 ; CHECK: buffer_atomic_add v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
164 define amdgpu_kernel void @store_sgpr_ptr_large_offset_atomic(i32 addrspace(1)* %out) #0 {
165 %gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768
166 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 5 seq_cst
170 ; CHECK-LABEL: {{^}}store_vgpr_ptr:
171 ; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
172 define amdgpu_kernel void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
173 %tid = call i32 @llvm.amdgcn.workitem.id.x() readnone
174 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
175 store i32 99, i32 addrspace(1)* %out.gep, align 4
179 declare i32 @llvm.amdgcn.raw.buffer.load.dword.i32(<4 x i32>, i32, i32, i32) #0
180 declare void @llvm.amdgcn.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1)
182 attributes #0 = { nounwind readonly }