1 ; RUN: llc -march=amdgcn -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=SIVI %s
2 ; RUN: llc -march=amdgcn -mcpu=bonaire -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=CI -check-prefix=GCN %s
3 ; RUN: llc -march=amdgcn -mcpu=tonga -show-mc-encoding -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=SIVI %s
5 ; SMRD load with an immediate offset.
6 ; GCN-LABEL: {{^}}smrd0:
7 ; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01
8 ; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4
9 define amdgpu_kernel void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
11 %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 1
12 %tmp1 = load i32, i32 addrspace(2)* %tmp
13 store i32 %tmp1, i32 addrspace(1)* %out
17 ; SMRD load with the largest possible immediate offset.
18 ; GCN-LABEL: {{^}}smrd1:
19 ; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff,0x{{[0-9]+[137]}}
20 ; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
21 define amdgpu_kernel void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
23 %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 255
24 %tmp1 = load i32, i32 addrspace(2)* %tmp
25 store i32 %tmp1, i32 addrspace(1)* %out
29 ; SMRD load with an offset greater than the largest possible immediate.
30 ; GCN-LABEL: {{^}}smrd2:
31 ; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
32 ; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
33 ; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
34 ; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
36 define amdgpu_kernel void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
38 %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 256
39 %tmp1 = load i32, i32 addrspace(2)* %tmp
40 store i32 %tmp1, i32 addrspace(1)* %out
44 ; SMRD load with a 64-bit offset
45 ; GCN-LABEL: {{^}}smrd3:
46 ; FIXME: There are too many copies here because we don't fold immediates
47 ; through REG_SEQUENCE
48 ; SI: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0xb ; encoding: [0x0b
51 define amdgpu_kernel void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
53 %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 4294967296
54 %tmp1 = load i32, i32 addrspace(2)* %tmp
55 store i32 %tmp1, i32 addrspace(1)* %out
59 ; SMRD load with the largest possible immediate offset on VI
60 ; GCN-LABEL: {{^}}smrd4:
61 ; SI: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc
62 ; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
63 ; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
64 ; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
65 define amdgpu_kernel void @smrd4(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
67 %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 262143
68 %tmp1 = load i32, i32 addrspace(2)* %tmp
69 store i32 %tmp1, i32 addrspace(1)* %out
73 ; SMRD load with an offset greater than the largest possible immediate on VI
74 ; GCN-LABEL: {{^}}smrd5:
75 ; SIVI: s_mov_b32 [[OFFSET:s[0-9]+]], 0x100000
76 ; SIVI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
77 ; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
79 define amdgpu_kernel void @smrd5(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) #0 {
81 %tmp = getelementptr i32, i32 addrspace(2)* %ptr, i64 262144
82 %tmp1 = load i32, i32 addrspace(2)* %tmp
83 store i32 %tmp1, i32 addrspace(1)* %out
87 ; SMRD load using the load.const.v4i32 intrinsic with an immediate offset
88 ; GCN-LABEL: {{^}}smrd_load_const0:
89 ; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
90 ; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
91 define amdgpu_ps void @smrd_load_const0(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
93 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
94 %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
95 %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
96 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
100 ; SMRD load using the load.const.v4i32 intrinsic with the largest possible immediate
102 ; GCN-LABEL: {{^}}smrd_load_const1:
103 ; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
104 ; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
105 define amdgpu_ps void @smrd_load_const1(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
107 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
108 %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
109 %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1020)
110 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
114 ; SMRD load using the load.const.v4i32 intrinsic with an offset greater than the
115 ; largets possible immediate.
117 ; GCN-LABEL: {{^}}smrd_load_const2:
118 ; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400
119 ; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
120 ; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
121 ; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
122 define amdgpu_ps void @smrd_load_const2(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
124 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
125 %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
126 %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1024)
127 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
131 ; SMRD load with the largest possible immediate offset on VI
132 ; GCN-LABEL: {{^}}smrd_load_const3:
133 ; SI: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc
134 ; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
135 ; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
136 ; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
137 define amdgpu_ps void @smrd_load_const3(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
139 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
140 %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
141 %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048572)
142 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
146 ; SMRD load with an offset greater than the largest possible immediate on VI
147 ; GCN-LABEL: {{^}}smrd_load_const4:
148 ; SIVI: s_mov_b32 [[OFFSET:s[0-9]+]], 0x100000
149 ; SIVI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
150 ; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
152 define amdgpu_ps void @smrd_load_const4(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
154 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
155 %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
156 %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048576)
157 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
161 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
162 declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
164 attributes #0 = { nounwind }
165 attributes #1 = { nounwind readnone }